You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

structural_sp.py 17 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Mon Mar 30 11:59:57 2020
  5. @author: ljia
  6. @references:
  7. [1] Suard F, Rakotomamonjy A, Bensrhair A. Kernel on Bag of Paths For
  8. Measuring Similarity of Shapes. InESANN 2007 Apr 25 (pp. 355-360).
  9. """
  10. import sys
  11. from itertools import product
  12. # from functools import partial
  13. from multiprocessing import Pool
  14. from gklearn.utils import get_iters
  15. # import networkx as nx
  16. import numpy as np
  17. from gklearn.utils.parallel import parallel_gm, parallel_me
  18. from gklearn.utils.utils import get_shortest_paths, compute_vertex_kernels
  19. from gklearn.kernels import GraphKernel
  20. class StructuralSP(GraphKernel):
  21. def __init__(self, **kwargs):
  22. GraphKernel.__init__(self)
  23. self._node_labels = kwargs.get('node_labels', [])
  24. self._edge_labels = kwargs.get('edge_labels', [])
  25. self._node_attrs = kwargs.get('node_attrs', [])
  26. self._edge_attrs = kwargs.get('edge_attrs', [])
  27. self._edge_weight = kwargs.get('edge_weight', None)
  28. self._node_kernels = kwargs.get('node_kernels', None)
  29. self._edge_kernels = kwargs.get('edge_kernels', None)
  30. self._compute_method = kwargs.get('compute_method', 'naive')
  31. self._fcsp = kwargs.get('fcsp', True)
  32. self._ds_infos = kwargs.get('ds_infos', {})
  33. def _compute_gm_series(self):
  34. # get shortest paths of each graph in the graphs.
  35. splist = []
  36. iterator = get_iters(self._graphs, desc='getting sp graphs', file=sys.stdout, verbose=(self.verbose >= 2))
  37. if self._compute_method == 'trie':
  38. for g in iterator:
  39. splist.append(self._get_sps_as_trie(g))
  40. else:
  41. for g in iterator:
  42. splist.append(get_shortest_paths(g, self._edge_weight, self._ds_infos['directed']))
  43. # compute Gram matrix.
  44. gram_matrix = np.zeros((len(self._graphs), len(self._graphs)))
  45. from itertools import combinations_with_replacement
  46. itr = combinations_with_replacement(range(0, len(self._graphs)), 2)
  47. len_itr = int(len(self._graphs) * (len(self._graphs) + 1) / 2)
  48. iterator = get_iters(itr, desc='Computing kernels', file=sys.stdout,
  49. length=len_itr, verbose=(self.verbose >= 2))
  50. if self._compute_method == 'trie':
  51. for i, j in iterator:
  52. kernel = self._ssp_do_trie(self._graphs[i], self._graphs[j], splist[i], splist[j])
  53. gram_matrix[i][j] = kernel
  54. gram_matrix[j][i] = kernel
  55. else:
  56. for i, j in iterator:
  57. kernel = self._ssp_do_naive(self._graphs[i], self._graphs[j], splist[i], splist[j])
  58. # if(kernel > 1):
  59. # print("error here ")
  60. gram_matrix[i][j] = kernel
  61. gram_matrix[j][i] = kernel
  62. return gram_matrix
  63. def _compute_gm_imap_unordered(self):
  64. # get shortest paths of each graph in the graphs.
  65. splist = [None] * len(self._graphs)
  66. pool = Pool(self.n_jobs)
  67. itr = zip(self._graphs, range(0, len(self._graphs)))
  68. if len(self._graphs) < 100 * self.n_jobs:
  69. chunksize = int(len(self._graphs) / self.n_jobs) + 1
  70. else:
  71. chunksize = 100
  72. # get shortest path graphs of self._graphs
  73. if self._compute_method == 'trie':
  74. get_sps_fun = self._wrapper_get_sps_trie
  75. else:
  76. get_sps_fun = self._wrapper_get_sps_naive
  77. iterator = get_iters(pool.imap_unordered(get_sps_fun, itr, chunksize),
  78. desc='getting shortest paths', file=sys.stdout,
  79. length=len(self._graphs), verbose=(self.verbose >= 2))
  80. for i, sp in iterator:
  81. splist[i] = sp
  82. pool.close()
  83. pool.join()
  84. # compute Gram matrix.
  85. gram_matrix = np.zeros((len(self._graphs), len(self._graphs)))
  86. def init_worker(spl_toshare, gs_toshare):
  87. global G_spl, G_gs
  88. G_spl = spl_toshare
  89. G_gs = gs_toshare
  90. if self._compute_method == 'trie':
  91. do_fun = self._wrapper_ssp_do_trie
  92. else:
  93. do_fun = self._wrapper_ssp_do_naive
  94. parallel_gm(do_fun, gram_matrix, self._graphs, init_worker=init_worker,
  95. glbv=(splist, self._graphs), n_jobs=self.n_jobs, verbose=self.verbose)
  96. return gram_matrix
  97. def _compute_kernel_list_series(self, g1, g_list):
  98. # get shortest paths of g1 and each graph in g_list.
  99. sp1 = get_shortest_paths(g1, self._edge_weight, self._ds_infos['directed'])
  100. splist = []
  101. iterator = get_iters(g_list, desc='getting sp graphs', file=sys.stdout,
  102. verbose=(self.verbose >= 2))
  103. if self._compute_method == 'trie':
  104. for g in iterator:
  105. splist.append(self._get_sps_as_trie(g))
  106. else:
  107. for g in iterator:
  108. splist.append(get_shortest_paths(g, self._edge_weight, self._ds_infos['directed']))
  109. # compute kernel list.
  110. kernel_list = [None] * len(g_list)
  111. iterator = get_iters(range(len(g_list)), desc='Computing kernels',
  112. file=sys.stdout, length=len(g_list), verbose=(self.verbose >= 2))
  113. if self._compute_method == 'trie':
  114. for i in iterator:
  115. kernel = self._ssp_do_trie(g1, g_list[i], sp1, splist[i])
  116. kernel_list[i] = kernel
  117. else:
  118. for i in iterator:
  119. kernel = self._ssp_do_naive(g1, g_list[i], sp1, splist[i])
  120. kernel_list[i] = kernel
  121. return kernel_list
  122. def _compute_kernel_list_imap_unordered(self, g1, g_list):
  123. # get shortest paths of g1 and each graph in g_list.
  124. sp1 = get_shortest_paths(g1, self._edge_weight, self._ds_infos['directed'])
  125. splist = [None] * len(g_list)
  126. pool = Pool(self.n_jobs)
  127. itr = zip(g_list, range(0, len(g_list)))
  128. if len(g_list) < 100 * self.n_jobs:
  129. chunksize = int(len(g_list) / self.n_jobs) + 1
  130. else:
  131. chunksize = 100
  132. # get shortest path graphs of g_list
  133. if self._compute_method == 'trie':
  134. get_sps_fun = self._wrapper_get_sps_trie
  135. else:
  136. get_sps_fun = self._wrapper_get_sps_naive
  137. iterator = get_iters(pool.imap_unordered(get_sps_fun, itr, chunksize),
  138. desc='getting shortest paths', file=sys.stdout,
  139. length=len(g_list), verbose=(self.verbose >= 2))
  140. for i, sp in iterator:
  141. splist[i] = sp
  142. pool.close()
  143. pool.join()
  144. # compute Gram matrix.
  145. kernel_list = [None] * len(g_list)
  146. def init_worker(sp1_toshare, spl_toshare, g1_toshare, gl_toshare):
  147. global G_sp1, G_spl, G_g1, G_gl
  148. G_sp1 = sp1_toshare
  149. G_spl = spl_toshare
  150. G_g1 = g1_toshare
  151. G_gl = gl_toshare
  152. if self._compute_method == 'trie':
  153. do_fun = self._wrapper_ssp_do_trie
  154. else:
  155. do_fun = self._wrapper_kernel_list_do
  156. def func_assign(result, var_to_assign):
  157. var_to_assign[result[0]] = result[1]
  158. itr = range(len(g_list))
  159. len_itr = len(g_list)
  160. parallel_me(do_fun, func_assign, kernel_list, itr, len_itr=len_itr,
  161. init_worker=init_worker, glbv=(sp1, splist, g1, g_list), method='imap_unordered', n_jobs=self.n_jobs, itr_desc='Computing kernels', verbose=self.verbose)
  162. return kernel_list
  163. def _wrapper_kernel_list_do(self, itr):
  164. return itr, self._ssp_do_naive(G_g1, G_gl[itr], G_sp1, G_spl[itr])
  165. def _compute_single_kernel_series(self, g1, g2):
  166. sp1 = get_shortest_paths(g1, self._edge_weight, self._ds_infos['directed'])
  167. sp2 = get_shortest_paths(g2, self._edge_weight, self._ds_infos['directed'])
  168. if self._compute_method == 'trie':
  169. kernel = self._ssp_do_trie(g1, g2, sp1, sp2)
  170. else:
  171. kernel = self._ssp_do_naive(g1, g2, sp1, sp2)
  172. return kernel
  173. def _wrapper_get_sps_naive(self, itr_item):
  174. g = itr_item[0]
  175. i = itr_item[1]
  176. return i, get_shortest_paths(g, self._edge_weight, self._ds_infos['directed'])
  177. def _ssp_do_naive(self, g1, g2, spl1, spl2):
  178. if self._fcsp: # @todo: it may be put outside the _sp_do().
  179. return self._sp_do_naive_fcsp(g1, g2, spl1, spl2)
  180. else:
  181. return self._sp_do_naive_naive(g1, g2, spl1, spl2)
  182. def _sp_do_naive_fcsp(self, g1, g2, spl1, spl2):
  183. kernel = 0
  184. # First, compute shortest path matrices, method borrowed from FCSP.
  185. vk_dict = self._get_all_node_kernels(g1, g2)
  186. # Then, compute kernels between all pairs of edges, which is an idea of
  187. # extension of FCSP. It suits sparse graphs, which is the most case we
  188. # went though. For dense graphs, this would be slow.
  189. ek_dict = self._get_all_edge_kernels(g1, g2)
  190. # compute graph kernels
  191. if vk_dict:
  192. if ek_dict:
  193. for p1, p2 in product(spl1, spl2):
  194. if len(p1) == len(p2):
  195. kpath = vk_dict[(p1[0], p2[0])]
  196. if kpath:
  197. for idx in range(1, len(p1)):
  198. kpath *= vk_dict[(p1[idx], p2[idx])] * \
  199. ek_dict[((p1[idx-1], p1[idx]),
  200. (p2[idx-1], p2[idx]))]
  201. if not kpath:
  202. break
  203. kernel += kpath # add up kernels of all paths
  204. else:
  205. for p1, p2 in product(spl1, spl2):
  206. if len(p1) == len(p2):
  207. kpath = vk_dict[(p1[0], p2[0])]
  208. if kpath:
  209. for idx in range(1, len(p1)):
  210. kpath *= vk_dict[(p1[idx], p2[idx])]
  211. if not kpath:
  212. break
  213. kernel += kpath # add up kernels of all paths
  214. else:
  215. if ek_dict:
  216. for p1, p2 in product(spl1, spl2):
  217. if len(p1) == len(p2):
  218. if len(p1) == 0:
  219. kernel += 1
  220. else:
  221. kpath = 1
  222. for idx in range(0, len(p1) - 1):
  223. kpath *= ek_dict[((p1[idx], p1[idx+1]),
  224. (p2[idx], p2[idx+1]))]
  225. if not kpath:
  226. break
  227. kernel += kpath # add up kernels of all paths
  228. else:
  229. for p1, p2 in product(spl1, spl2):
  230. if len(p1) == len(p2):
  231. kernel += 1
  232. try:
  233. kernel = kernel / (len(spl1) * len(spl2)) # Compute mean average
  234. except ZeroDivisionError:
  235. print(spl1, spl2)
  236. print(g1.nodes(data=True))
  237. print(g1.edges(data=True))
  238. raise Exception
  239. # # ---- exact implementation of the Fast Computation of Shortest Path Kernel (FCSP), reference [2], sadly it is slower than the current implementation
  240. # # compute vertex kernel matrix
  241. # try:
  242. # vk_mat = np.zeros((nx.number_of_nodes(g1),
  243. # nx.number_of_nodes(g2)))
  244. # g1nl = enumerate(g1.nodes(data=True))
  245. # g2nl = enumerate(g2.nodes(data=True))
  246. # for i1, n1 in g1nl:
  247. # for i2, n2 in g2nl:
  248. # vk_mat[i1][i2] = kn(
  249. # n1[1][node_label], n2[1][node_label],
  250. # [n1[1]['attributes']], [n2[1]['attributes']])
  251. # range1 = range(0, len(edge_w_g[i]))
  252. # range2 = range(0, len(edge_w_g[j]))
  253. # for i1 in range1:
  254. # x1 = edge_x_g[i][i1]
  255. # y1 = edge_y_g[i][i1]
  256. # w1 = edge_w_g[i][i1]
  257. # for i2 in range2:
  258. # x2 = edge_x_g[j][i2]
  259. # y2 = edge_y_g[j][i2]
  260. # w2 = edge_w_g[j][i2]
  261. # ke = (w1 == w2)
  262. # if ke > 0:
  263. # kn1 = vk_mat[x1][x2] * vk_mat[y1][y2]
  264. # kn2 = vk_mat[x1][y2] * vk_mat[y1][x2]
  265. # Kmatrix += kn1 + kn2
  266. return kernel
  267. def _sp_do_naive_naive(self, g1, g2, spl1, spl2):
  268. kernel = 0
  269. # Define the function to compute kernels between vertices in each condition.
  270. if len(self._node_labels) > 0:
  271. # node symb and non-synb labeled
  272. if len(self._node_attrs) > 0:
  273. def compute_vk(n1, n2):
  274. kn = self._node_kernels['mix']
  275. n1_labels = [g1.nodes[n1][nl] for nl in self._node_labels]
  276. n2_labels = [g2.nodes[n2][nl] for nl in self._node_labels]
  277. n1_attrs = [g1.nodes[n1][na] for na in self._node_attrs]
  278. n2_attrs = [g2.nodes[n2][na] for na in self._node_attrs]
  279. return kn(n1_labels, n2_labels, n1_attrs, n2_attrs)
  280. # node symb labeled
  281. else:
  282. def compute_vk(n1, n2):
  283. kn = self._node_kernels['symb']
  284. n1_labels = [g1.nodes[n1][nl] for nl in self._node_labels]
  285. n2_labels = [g2.nodes[n2][nl] for nl in self._node_labels]
  286. return kn(n1_labels, n2_labels)
  287. else:
  288. # node non-synb labeled
  289. if len(self._node_attrs) > 0:
  290. def compute_vk(n1, n2):
  291. kn = self._node_kernels['nsymb']
  292. n1_attrs = [g1.nodes[n1][na] for na in self._node_attrs]
  293. n2_attrs = [g2.nodes[n2][na] for na in self._node_attrs]
  294. return kn(n1_attrs, n2_attrs)
  295. # # node unlabeled
  296. # else:
  297. # for e1, e2 in product(g1.edges(data=True), g2.edges(data=True)):
  298. # if e1[2]['cost'] == e2[2]['cost']:
  299. # kernel += 1
  300. # return kernel
  301. # Define the function to compute kernels between edges in each condition.
  302. if len(self._edge_labels) > 0:
  303. # edge symb and non-synb labeled
  304. if len(self._edge_attrs) > 0:
  305. def compute_ek(e1, e2):
  306. ke = self._edge_kernels['mix']
  307. e1_labels = [g1.edges[e1][el] for el in self._edge_labels]
  308. e2_labels = [g2.edges[e2][el] for el in self._edge_labels]
  309. e1_attrs = [g1.edges[e1][ea] for ea in self._edge_attrs]
  310. e2_attrs = [g2.edges[e2][ea] for ea in self._edge_attrs]
  311. return ke(e1_labels, e2_labels, e1_attrs, e2_attrs)
  312. # edge symb labeled
  313. else:
  314. def compute_ek(e1, e2):
  315. ke = self._edge_kernels['symb']
  316. e1_labels = [g1.edges[e1][el] for el in self._edge_labels]
  317. e2_labels = [g2.edges[e2][el] for el in self._edge_labels]
  318. return ke(e1_labels, e2_labels)
  319. else:
  320. # edge non-synb labeled
  321. if len(self._edge_attrs) > 0:
  322. def compute_ek(e1, e2):
  323. ke = self._edge_kernels['nsymb']
  324. e1_attrs = [g1.edges[e1][ea] for ea in self._edge_attrs]
  325. e2_attrs = [g2.edges[e2][ea] for ea in self._edge_attrs]
  326. return ke(e1_attrs, e2_attrs)
  327. # compute graph kernels
  328. if len(self._node_labels) > 0 or len(self._node_attrs) > 0:
  329. if len(self._edge_labels) > 0 or len(self._edge_attrs) > 0:
  330. for p1, p2 in product(spl1, spl2):
  331. if len(p1) == len(p2):
  332. kpath = compute_vk(p1[0], p2[0])
  333. if kpath:
  334. for idx in range(1, len(p1)):
  335. kpath *= compute_vk(p1[idx], p2[idx]) * \
  336. compute_ek((p1[idx-1], p1[idx]),
  337. (p2[idx-1], p2[idx]))
  338. if not kpath:
  339. break
  340. kernel += kpath # add up kernels of all paths
  341. else:
  342. for p1, p2 in product(spl1, spl2):
  343. if len(p1) == len(p2):
  344. kpath = compute_vk(p1[0], p2[0])
  345. if kpath:
  346. for idx in range(1, len(p1)):
  347. kpath *= compute_vk(p1[idx], p2[idx])
  348. if not kpath:
  349. break
  350. kernel += kpath # add up kernels of all paths
  351. else:
  352. if len(self._edge_labels) > 0 or len(self._edge_attrs) > 0:
  353. for p1, p2 in product(spl1, spl2):
  354. if len(p1) == len(p2):
  355. if len(p1) == 0:
  356. kernel += 1
  357. else:
  358. kpath = 1
  359. for idx in range(0, len(p1) - 1):
  360. kpath *= compute_ek((p1[idx], p1[idx+1]),
  361. (p2[idx], p2[idx+1]))
  362. if not kpath:
  363. break
  364. kernel += kpath # add up kernels of all paths
  365. else:
  366. for p1, p2 in product(spl1, spl2):
  367. if len(p1) == len(p2):
  368. kernel += 1
  369. try:
  370. kernel = kernel / (len(spl1) * len(spl2)) # Compute mean average
  371. except ZeroDivisionError:
  372. print(spl1, spl2)
  373. print(g1.nodes(data=True))
  374. print(g1.edges(data=True))
  375. raise Exception
  376. return kernel
  377. def _wrapper_ssp_do_naive(self, itr):
  378. i = itr[0]
  379. j = itr[1]
  380. return i, j, self._ssp_do_naive(G_gs[i], G_gs[j], G_spl[i], G_spl[j])
  381. def _get_all_node_kernels(self, g1, g2):
  382. return compute_vertex_kernels(g1, g2, self._node_kernels, node_labels=self._node_labels, node_attrs=self._node_attrs)
  383. def _get_all_edge_kernels(self, g1, g2):
  384. # compute kernels between all pairs of edges, which is an idea of
  385. # extension of FCSP. It suits sparse graphs, which is the most case we
  386. # went though. For dense graphs, this would be slow.
  387. ek_dict = {} # dict of edge kernels
  388. if len(self._edge_labels) > 0:
  389. # edge symb and non-synb labeled
  390. if len(self._edge_attrs) > 0:
  391. ke = self._edge_kernels['mix']
  392. for e1, e2 in product(g1.edges(data=True), g2.edges(data=True)):
  393. e1_labels = [e1[2][el] for el in self._edge_labels]
  394. e2_labels = [e2[2][el] for el in self._edge_labels]
  395. e1_attrs = [e1[2][ea] for ea in self._edge_attrs]
  396. e2_attrs = [e2[2][ea] for ea in self._edge_attrs]
  397. ek_temp = ke(e1_labels, e2_labels, e1_attrs, e2_attrs)
  398. ek_dict[((e1[0], e1[1]), (e2[0], e2[1]))] = ek_temp
  399. ek_dict[((e1[1], e1[0]), (e2[0], e2[1]))] = ek_temp
  400. ek_dict[((e1[0], e1[1]), (e2[1], e2[0]))] = ek_temp
  401. ek_dict[((e1[1], e1[0]), (e2[1], e2[0]))] = ek_temp
  402. # edge symb labeled
  403. else:
  404. ke = self._edge_kernels['symb']
  405. for e1 in g1.edges(data=True):
  406. for e2 in g2.edges(data=True):
  407. e1_labels = [e1[2][el] for el in self._edge_labels]
  408. e2_labels = [e2[2][el] for el in self._edge_labels]
  409. ek_temp = ke(e1_labels, e2_labels)
  410. ek_dict[((e1[0], e1[1]), (e2[0], e2[1]))] = ek_temp
  411. ek_dict[((e1[1], e1[0]), (e2[0], e2[1]))] = ek_temp
  412. ek_dict[((e1[0], e1[1]), (e2[1], e2[0]))] = ek_temp
  413. ek_dict[((e1[1], e1[0]), (e2[1], e2[0]))] = ek_temp
  414. else:
  415. # edge non-synb labeled
  416. if len(self._edge_attrs) > 0:
  417. ke = self._edge_kernels['nsymb']
  418. for e1 in g1.edges(data=True):
  419. for e2 in g2.edges(data=True):
  420. e1_attrs = [e1[2][ea] for ea in self._edge_attrs]
  421. e2_attrs = [e2[2][ea] for ea in self._edge_attrs]
  422. ek_temp = ke(e1_attrs, e2_attrs)
  423. ek_dict[((e1[0], e1[1]), (e2[0], e2[1]))] = ek_temp
  424. ek_dict[((e1[1], e1[0]), (e2[0], e2[1]))] = ek_temp
  425. ek_dict[((e1[0], e1[1]), (e2[1], e2[0]))] = ek_temp
  426. ek_dict[((e1[1], e1[0]), (e2[1], e2[0]))] = ek_temp
  427. # edge unlabeled
  428. else:
  429. pass
  430. return ek_dict

A Python package for graph kernels, graph edit distances and graph pre-image problem.