You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

structural_sp.py 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Mon Mar 30 11:59:57 2020
  5. @author: ljia
  6. @references:
  7. [1] Suard F, Rakotomamonjy A, Bensrhair A. Kernel on Bag of Paths For
  8. Measuring Similarity of Shapes. InESANN 2007 Apr 25 (pp. 355-360).
  9. """
  10. import sys
  11. from itertools import product
  12. # from functools import partial
  13. from multiprocessing import Pool
  14. from tqdm import tqdm
  15. # import networkx as nx
  16. import numpy as np
  17. from gklearn.utils.parallel import parallel_gm, parallel_me
  18. from gklearn.utils.utils import get_shortest_paths, compute_vertex_kernels
  19. from gklearn.kernels import GraphKernel
  20. class StructuralSP(GraphKernel):
  21. def __init__(self, **kwargs):
  22. GraphKernel.__init__(self)
  23. self._node_labels = kwargs.get('node_labels', [])
  24. self._edge_labels = kwargs.get('edge_labels', [])
  25. self._node_attrs = kwargs.get('node_attrs', [])
  26. self._edge_attrs = kwargs.get('edge_attrs', [])
  27. self._edge_weight = kwargs.get('edge_weight', None)
  28. self._node_kernels = kwargs.get('node_kernels', None)
  29. self._edge_kernels = kwargs.get('edge_kernels', None)
  30. self._compute_method = kwargs.get('compute_method', 'naive')
  31. self._ds_infos = kwargs.get('ds_infos', {})
  32. def _compute_gm_series(self):
  33. # get shortest paths of each graph in the graphs.
  34. splist = []
  35. if self._verbose >= 2:
  36. iterator = tqdm(self._graphs, desc='getting sp graphs', file=sys.stdout)
  37. else:
  38. iterator = self._graphs
  39. if self._compute_method == 'trie':
  40. for g in iterator:
  41. splist.append(self._get_sps_as_trie(g))
  42. else:
  43. for g in iterator:
  44. splist.append(get_shortest_paths(g, self._edge_weight, self._ds_infos['directed']))
  45. # compute Gram matrix.
  46. gram_matrix = np.zeros((len(self._graphs), len(self._graphs)))
  47. from itertools import combinations_with_replacement
  48. itr = combinations_with_replacement(range(0, len(self._graphs)), 2)
  49. if self._verbose >= 2:
  50. iterator = tqdm(itr, desc='Computing kernels', file=sys.stdout)
  51. else:
  52. iterator = itr
  53. if self._compute_method == 'trie':
  54. for i, j in iterator:
  55. kernel = self._ssp_do_trie(self._graphs[i], self._graphs[j], splist[i], splist[j])
  56. gram_matrix[i][j] = kernel
  57. gram_matrix[j][i] = kernel
  58. else:
  59. for i, j in iterator:
  60. kernel = self._ssp_do_naive(self._graphs[i], self._graphs[j], splist[i], splist[j])
  61. # if(kernel > 1):
  62. # print("error here ")
  63. gram_matrix[i][j] = kernel
  64. gram_matrix[j][i] = kernel
  65. return gram_matrix
  66. def _compute_gm_imap_unordered(self):
  67. # get shortest paths of each graph in the graphs.
  68. splist = [None] * len(self._graphs)
  69. pool = Pool(self._n_jobs)
  70. itr = zip(self._graphs, range(0, len(self._graphs)))
  71. if len(self._graphs) < 100 * self._n_jobs:
  72. chunksize = int(len(self._graphs) / self._n_jobs) + 1
  73. else:
  74. chunksize = 100
  75. # get shortest path graphs of self._graphs
  76. if self._compute_method == 'trie':
  77. get_sps_fun = self._wrapper_get_sps_trie
  78. else:
  79. get_sps_fun = self._wrapper_get_sps_naive
  80. if self.verbose >= 2:
  81. iterator = tqdm(pool.imap_unordered(get_sps_fun, itr, chunksize),
  82. desc='getting shortest paths', file=sys.stdout)
  83. else:
  84. iterator = pool.imap_unordered(get_sps_fun, itr, chunksize)
  85. for i, sp in iterator:
  86. splist[i] = sp
  87. pool.close()
  88. pool.join()
  89. # compute Gram matrix.
  90. gram_matrix = np.zeros((len(self._graphs), len(self._graphs)))
  91. def init_worker(spl_toshare, gs_toshare):
  92. global G_spl, G_gs
  93. G_spl = spl_toshare
  94. G_gs = gs_toshare
  95. if self._compute_method == 'trie':
  96. do_fun = self._wrapper_ssp_do_trie
  97. else:
  98. do_fun = self._wrapper_ssp_do_naive
  99. parallel_gm(do_fun, gram_matrix, self._graphs, init_worker=init_worker,
  100. glbv=(splist, self._graphs), n_jobs=self._n_jobs, verbose=self._verbose)
  101. return gram_matrix
  102. def _compute_kernel_list_series(self, g1, g_list):
  103. # get shortest paths of g1 and each graph in g_list.
  104. sp1 = get_shortest_paths(g1, self._edge_weight, self._ds_infos['directed'])
  105. splist = []
  106. if self._verbose >= 2:
  107. iterator = tqdm(g_list, desc='getting sp graphs', file=sys.stdout)
  108. else:
  109. iterator = g_list
  110. if self._compute_method == 'trie':
  111. for g in iterator:
  112. splist.append(self._get_sps_as_trie(g))
  113. else:
  114. for g in iterator:
  115. splist.append(get_shortest_paths(g, self._edge_weight, self._ds_infos['directed']))
  116. # compute kernel list.
  117. kernel_list = [None] * len(g_list)
  118. if self._verbose >= 2:
  119. iterator = tqdm(range(len(g_list)), desc='Computing kernels', file=sys.stdout)
  120. else:
  121. iterator = range(len(g_list))
  122. if self._compute_method == 'trie':
  123. for i in iterator:
  124. kernel = self._ssp_do_trie(g1, g_list[i], sp1, splist[i])
  125. kernel_list[i] = kernel
  126. else:
  127. for i in iterator:
  128. kernel = self._ssp_do_naive(g1, g_list[i], sp1, splist[i])
  129. kernel_list[i] = kernel
  130. return kernel_list
  131. def _compute_kernel_list_imap_unordered(self, g1, g_list):
  132. # get shortest paths of g1 and each graph in g_list.
  133. sp1 = get_shortest_paths(g1, self._edge_weight, self._ds_infos['directed'])
  134. splist = [None] * len(g_list)
  135. pool = Pool(self._n_jobs)
  136. itr = zip(g_list, range(0, len(g_list)))
  137. if len(g_list) < 100 * self._n_jobs:
  138. chunksize = int(len(g_list) / self._n_jobs) + 1
  139. else:
  140. chunksize = 100
  141. # get shortest path graphs of g_list
  142. if self._compute_method == 'trie':
  143. get_sps_fun = self._wrapper_get_sps_trie
  144. else:
  145. get_sps_fun = self._wrapper_get_sps_naive
  146. if self.verbose >= 2:
  147. iterator = tqdm(pool.imap_unordered(get_sps_fun, itr, chunksize),
  148. desc='getting shortest paths', file=sys.stdout)
  149. else:
  150. iterator = pool.imap_unordered(get_sps_fun, itr, chunksize)
  151. for i, sp in iterator:
  152. splist[i] = sp
  153. pool.close()
  154. pool.join()
  155. # compute Gram matrix.
  156. kernel_list = [None] * len(g_list)
  157. def init_worker(sp1_toshare, spl_toshare, g1_toshare, gl_toshare):
  158. global G_sp1, G_spl, G_g1, G_gl
  159. G_sp1 = sp1_toshare
  160. G_spl = spl_toshare
  161. G_g1 = g1_toshare
  162. G_gl = gl_toshare
  163. if self._compute_method == 'trie':
  164. do_fun = self._wrapper_ssp_do_trie
  165. else:
  166. do_fun = self._wrapper_kernel_list_do
  167. def func_assign(result, var_to_assign):
  168. var_to_assign[result[0]] = result[1]
  169. itr = range(len(g_list))
  170. len_itr = len(g_list)
  171. parallel_me(do_fun, func_assign, kernel_list, itr, len_itr=len_itr,
  172. init_worker=init_worker, glbv=(sp1, splist, g1, g_list), method='imap_unordered', n_jobs=self._n_jobs, itr_desc='Computing kernels', verbose=self._verbose)
  173. return kernel_list
  174. def _wrapper_kernel_list_do(self, itr):
  175. return itr, self._ssp_do_naive(G_g1, G_gl[itr], G_sp1, G_spl[itr])
  176. def _compute_single_kernel_series(self, g1, g2):
  177. sp1 = get_shortest_paths(g1, self._edge_weight, self._ds_infos['directed'])
  178. sp2 = get_shortest_paths(g2, self._edge_weight, self._ds_infos['directed'])
  179. if self._compute_method == 'trie':
  180. kernel = self._ssp_do_trie(g1, g2, sp1, sp2)
  181. else:
  182. kernel = self._ssp_do_naive(g1, g2, sp1, sp2)
  183. return kernel
  184. def _wrapper_get_sps_naive(self, itr_item):
  185. g = itr_item[0]
  186. i = itr_item[1]
  187. return i, get_shortest_paths(g, self._edge_weight, self._ds_infos['directed'])
  188. def _ssp_do_naive(self, g1, g2, spl1, spl2):
  189. kernel = 0
  190. # First, compute shortest path matrices, method borrowed from FCSP.
  191. vk_dict = self._get_all_node_kernels(g1, g2)
  192. # Then, compute kernels between all pairs of edges, which is an idea of
  193. # extension of FCSP. It suits sparse graphs, which is the most case we
  194. # went though. For dense graphs, this would be slow.
  195. ek_dict = self._get_all_edge_kernels(g1, g2)
  196. # compute graph kernels
  197. if vk_dict:
  198. if ek_dict:
  199. for p1, p2 in product(spl1, spl2):
  200. if len(p1) == len(p2):
  201. kpath = vk_dict[(p1[0], p2[0])]
  202. if kpath:
  203. for idx in range(1, len(p1)):
  204. kpath *= vk_dict[(p1[idx], p2[idx])] * \
  205. ek_dict[((p1[idx-1], p1[idx]),
  206. (p2[idx-1], p2[idx]))]
  207. if not kpath:
  208. break
  209. kernel += kpath # add up kernels of all paths
  210. else:
  211. for p1, p2 in product(spl1, spl2):
  212. if len(p1) == len(p2):
  213. kpath = vk_dict[(p1[0], p2[0])]
  214. if kpath:
  215. for idx in range(1, len(p1)):
  216. kpath *= vk_dict[(p1[idx], p2[idx])]
  217. if not kpath:
  218. break
  219. kernel += kpath # add up kernels of all paths
  220. else:
  221. if ek_dict:
  222. for p1, p2 in product(spl1, spl2):
  223. if len(p1) == len(p2):
  224. if len(p1) == 0:
  225. kernel += 1
  226. else:
  227. kpath = 1
  228. for idx in range(0, len(p1) - 1):
  229. kpath *= ek_dict[((p1[idx], p1[idx+1]),
  230. (p2[idx], p2[idx+1]))]
  231. if not kpath:
  232. break
  233. kernel += kpath # add up kernels of all paths
  234. else:
  235. for p1, p2 in product(spl1, spl2):
  236. if len(p1) == len(p2):
  237. kernel += 1
  238. try:
  239. kernel = kernel / (len(spl1) * len(spl2)) # Compute mean average
  240. except ZeroDivisionError:
  241. print(spl1, spl2)
  242. print(g1.nodes(data=True))
  243. print(g1.edges(data=True))
  244. raise Exception
  245. # # ---- exact implementation of the Fast Computation of Shortest Path Kernel (FCSP), reference [2], sadly it is slower than the current implementation
  246. # # compute vertex kernel matrix
  247. # try:
  248. # vk_mat = np.zeros((nx.number_of_nodes(g1),
  249. # nx.number_of_nodes(g2)))
  250. # g1nl = enumerate(g1.nodes(data=True))
  251. # g2nl = enumerate(g2.nodes(data=True))
  252. # for i1, n1 in g1nl:
  253. # for i2, n2 in g2nl:
  254. # vk_mat[i1][i2] = kn(
  255. # n1[1][node_label], n2[1][node_label],
  256. # [n1[1]['attributes']], [n2[1]['attributes']])
  257. # range1 = range(0, len(edge_w_g[i]))
  258. # range2 = range(0, len(edge_w_g[j]))
  259. # for i1 in range1:
  260. # x1 = edge_x_g[i][i1]
  261. # y1 = edge_y_g[i][i1]
  262. # w1 = edge_w_g[i][i1]
  263. # for i2 in range2:
  264. # x2 = edge_x_g[j][i2]
  265. # y2 = edge_y_g[j][i2]
  266. # w2 = edge_w_g[j][i2]
  267. # ke = (w1 == w2)
  268. # if ke > 0:
  269. # kn1 = vk_mat[x1][x2] * vk_mat[y1][y2]
  270. # kn2 = vk_mat[x1][y2] * vk_mat[y1][x2]
  271. # Kmatrix += kn1 + kn2
  272. return kernel
  273. def _wrapper_ssp_do_naive(self, itr):
  274. i = itr[0]
  275. j = itr[1]
  276. return i, j, self._ssp_do_naive(G_gs[i], G_gs[j], G_spl[i], G_spl[j])
  277. def _get_all_node_kernels(self, g1, g2):
  278. return compute_vertex_kernels(g1, g2, self._node_kernels, node_labels=self._node_labels, node_attrs=self._node_attrs)
  279. def _get_all_edge_kernels(self, g1, g2):
  280. # compute kernels between all pairs of edges, which is an idea of
  281. # extension of FCSP. It suits sparse graphs, which is the most case we
  282. # went though. For dense graphs, this would be slow.
  283. ek_dict = {} # dict of edge kernels
  284. if len(self._edge_labels) > 0:
  285. # edge symb and non-synb labeled
  286. if len(self._edge_attrs) > 0:
  287. ke = self._edge_kernels['mix']
  288. for e1, e2 in product(g1.edges(data=True), g2.edges(data=True)):
  289. e1_labels = [e1[2][el] for el in self._edge_labels]
  290. e2_labels = [e2[2][el] for el in self._edge_labels]
  291. e1_attrs = [e1[2][ea] for ea in self._edge_attrs]
  292. e2_attrs = [e2[2][ea] for ea in self._edge_attrs]
  293. ek_temp = ke(e1_labels, e2_labels, e1_attrs, e2_attrs)
  294. ek_dict[((e1[0], e1[1]), (e2[0], e2[1]))] = ek_temp
  295. ek_dict[((e1[1], e1[0]), (e2[0], e2[1]))] = ek_temp
  296. ek_dict[((e1[0], e1[1]), (e2[1], e2[0]))] = ek_temp
  297. ek_dict[((e1[1], e1[0]), (e2[1], e2[0]))] = ek_temp
  298. # edge symb labeled
  299. else:
  300. ke = self._edge_kernels['symb']
  301. for e1 in g1.edges(data=True):
  302. for e2 in g2.edges(data=True):
  303. e1_labels = [e1[2][el] for el in self._edge_labels]
  304. e2_labels = [e2[2][el] for el in self._edge_labels]
  305. ek_temp = ke(e1_labels, e2_labels)
  306. ek_dict[((e1[0], e1[1]), (e2[0], e2[1]))] = ek_temp
  307. ek_dict[((e1[1], e1[0]), (e2[0], e2[1]))] = ek_temp
  308. ek_dict[((e1[0], e1[1]), (e2[1], e2[0]))] = ek_temp
  309. ek_dict[((e1[1], e1[0]), (e2[1], e2[0]))] = ek_temp
  310. else:
  311. # edge non-synb labeled
  312. if len(self._edge_attrs) > 0:
  313. ke = self._edge_kernels['nsymb']
  314. for e1 in g1.edges(data=True):
  315. for e2 in g2.edges(data=True):
  316. e1_attrs = [e1[2][ea] for ea in self._edge_attrs]
  317. e2_attrs = [e2[2][ea] for ea in self._edge_attrs]
  318. ek_temp = ke(e1_attrs, e2_attrs)
  319. ek_dict[((e1[0], e1[1]), (e2[0], e2[1]))] = ek_temp
  320. ek_dict[((e1[1], e1[0]), (e2[0], e2[1]))] = ek_temp
  321. ek_dict[((e1[0], e1[1]), (e2[1], e2[0]))] = ek_temp
  322. ek_dict[((e1[1], e1[0]), (e2[1], e2[0]))] = ek_temp
  323. # edge unlabeled
  324. else:
  325. pass
  326. return ek_dict

A Python package for graph kernels, graph edit distances and graph pre-image problem.