You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

structural_sp.py 14 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Mon Mar 30 11:59:57 2020
  5. @author: ljia
  6. """
  7. import sys
  8. from itertools import product
  9. # from functools import partial
  10. from multiprocessing import Pool
  11. from tqdm import tqdm
  12. # import networkx as nx
  13. import numpy as np
  14. from gklearn.utils.parallel import parallel_gm, parallel_me
  15. from gklearn.utils.utils import get_shortest_paths
  16. from gklearn.kernels import GraphKernel
  17. class StructuralSP(GraphKernel):
  18. def __init__(self, **kwargs):
  19. GraphKernel.__init__(self)
  20. self.__node_labels = kwargs.get('node_labels', [])
  21. self.__edge_labels = kwargs.get('edge_labels', [])
  22. self.__node_attrs = kwargs.get('node_attrs', [])
  23. self.__edge_attrs = kwargs.get('edge_attrs', [])
  24. self.__edge_weight = kwargs.get('edge_weight', None)
  25. self.__node_kernels = kwargs.get('node_kernels', None)
  26. self.__edge_kernels = kwargs.get('edge_kernels', None)
  27. self.__compute_method = kwargs.get('compute_method', 'naive')
  28. self.__ds_infos = kwargs.get('ds_infos', {})
  29. def _compute_gm_series(self):
  30. # get shortest paths of each graph in the graphs.
  31. splist = []
  32. if self._verbose >= 2:
  33. iterator = tqdm(self._graphs, desc='getting sp graphs', file=sys.stdout)
  34. else:
  35. iterator = self._graphs
  36. if self.__compute_method == 'trie':
  37. for g in iterator:
  38. splist.append(self.__get_sps_as_trie(g))
  39. else:
  40. for g in iterator:
  41. splist.append(get_shortest_paths(g, self.__edge_weight, self.__ds_infos['directed']))
  42. # compute Gram matrix.
  43. gram_matrix = np.zeros((len(self._graphs), len(self._graphs)))
  44. from itertools import combinations_with_replacement
  45. itr = combinations_with_replacement(range(0, len(self._graphs)), 2)
  46. if self._verbose >= 2:
  47. iterator = tqdm(itr, desc='calculating kernels', file=sys.stdout)
  48. else:
  49. iterator = itr
  50. if self.__compute_method == 'trie':
  51. for i, j in iterator:
  52. kernel = self.__ssp_do_trie(self._graphs[i], self._graphs[j], splist[i], splist[j])
  53. gram_matrix[i][j] = kernel
  54. gram_matrix[j][i] = kernel
  55. else:
  56. for i, j in iterator:
  57. kernel = self.__ssp_do_naive(self._graphs[i], self._graphs[j], splist[i], splist[j])
  58. # if(kernel > 1):
  59. # print("error here ")
  60. gram_matrix[i][j] = kernel
  61. gram_matrix[j][i] = kernel
  62. return gram_matrix
  63. def _compute_gm_imap_unordered(self):
  64. # get shortest paths of each graph in the graphs.
  65. splist = [None] * len(self._graphs)
  66. pool = Pool(self._n_jobs)
  67. itr = zip(self._graphs, range(0, len(self._graphs)))
  68. if len(self._graphs) < 100 * self._n_jobs:
  69. chunksize = int(len(self._graphs) / self._n_jobs) + 1
  70. else:
  71. chunksize = 100
  72. # get shortest path graphs of self._graphs
  73. if self.__compute_method == 'trie':
  74. get_sps_fun = self._wrapper_get_sps_trie
  75. else:
  76. get_sps_fun = self._wrapper_get_sps_naive
  77. if self.verbose >= 2:
  78. iterator = tqdm(pool.imap_unordered(get_sps_fun, itr, chunksize),
  79. desc='getting shortest paths', file=sys.stdout)
  80. else:
  81. iterator = pool.imap_unordered(get_sps_fun, itr, chunksize)
  82. for i, sp in iterator:
  83. splist[i] = sp
  84. pool.close()
  85. pool.join()
  86. # compute Gram matrix.
  87. gram_matrix = np.zeros((len(self._graphs), len(self._graphs)))
  88. def init_worker(spl_toshare, gs_toshare):
  89. global G_spl, G_gs
  90. G_spl = spl_toshare
  91. G_gs = gs_toshare
  92. if self.__compute_method == 'trie':
  93. do_fun = self.__wrapper_ssp_do_trie
  94. else:
  95. do_fun = self._wrapper_ssp_do_naive
  96. parallel_gm(do_fun, gram_matrix, self._graphs, init_worker=init_worker,
  97. glbv=(splist, self._graphs), n_jobs=self._n_jobs, verbose=self._verbose)
  98. return gram_matrix
  99. def _compute_kernel_list_series(self, g1, g_list):
  100. # get shortest paths of g1 and each graph in g_list.
  101. sp1 = get_shortest_paths(g1, self.__edge_weight, self.__ds_infos['directed'])
  102. splist = []
  103. if self._verbose >= 2:
  104. iterator = tqdm(g_list, desc='getting sp graphs', file=sys.stdout)
  105. else:
  106. iterator = g_list
  107. if self.__compute_method == 'trie':
  108. for g in iterator:
  109. splist.append(self.__get_sps_as_trie(g))
  110. else:
  111. for g in iterator:
  112. splist.append(get_shortest_paths(g, self.__edge_weight, self.__ds_infos['directed']))
  113. # compute kernel list.
  114. kernel_list = [None] * len(g_list)
  115. if self._verbose >= 2:
  116. iterator = tqdm(range(len(g_list)), desc='calculating kernels', file=sys.stdout)
  117. else:
  118. iterator = range(len(g_list))
  119. if self.__compute_method == 'trie':
  120. for i in iterator:
  121. kernel = self.__ssp_do_trie(g1, g_list[i], sp1, splist[i])
  122. kernel_list[i] = kernel
  123. else:
  124. for i in iterator:
  125. kernel = self.__ssp_do_naive(g1, g_list[i], sp1, splist[i])
  126. kernel_list[i] = kernel
  127. return kernel_list
  128. def _compute_kernel_list_imap_unordered(self, g1, g_list):
  129. # get shortest paths of g1 and each graph in g_list.
  130. sp1 = get_shortest_paths(g1, self.__edge_weight, self.__ds_infos['directed'])
  131. splist = [None] * len(g_list)
  132. pool = Pool(self._n_jobs)
  133. itr = zip(g_list, range(0, len(g_list)))
  134. if len(g_list) < 100 * self._n_jobs:
  135. chunksize = int(len(g_list) / self._n_jobs) + 1
  136. else:
  137. chunksize = 100
  138. # get shortest path graphs of g_list
  139. if self.__compute_method == 'trie':
  140. get_sps_fun = self._wrapper_get_sps_trie
  141. else:
  142. get_sps_fun = self._wrapper_get_sps_naive
  143. if self.verbose >= 2:
  144. iterator = tqdm(pool.imap_unordered(get_sps_fun, itr, chunksize),
  145. desc='getting shortest paths', file=sys.stdout)
  146. else:
  147. iterator = pool.imap_unordered(get_sps_fun, itr, chunksize)
  148. for i, sp in iterator:
  149. splist[i] = sp
  150. pool.close()
  151. pool.join()
  152. # compute Gram matrix.
  153. kernel_list = [None] * len(g_list)
  154. def init_worker(sp1_toshare, spl_toshare, g1_toshare, gl_toshare):
  155. global G_sp1, G_spl, G_g1, G_gl
  156. G_sp1 = sp1_toshare
  157. G_spl = spl_toshare
  158. G_g1 = g1_toshare
  159. G_gl = gl_toshare
  160. if self.__compute_method == 'trie':
  161. do_fun = self.__wrapper_ssp_do_trie
  162. else:
  163. do_fun = self._wrapper_kernel_list_do
  164. def func_assign(result, var_to_assign):
  165. var_to_assign[result[0]] = result[1]
  166. itr = range(len(g_list))
  167. len_itr = len(g_list)
  168. parallel_me(do_fun, func_assign, kernel_list, itr, len_itr=len_itr,
  169. init_worker=init_worker, glbv=(sp1, splist, g1, g_list), method='imap_unordered', n_jobs=self._n_jobs, itr_desc='calculating kernels', verbose=self._verbose)
  170. return kernel_list
  171. def _wrapper_kernel_list_do(self, itr):
  172. return itr, self.__ssp_do_naive(G_g1, G_gl[itr], G_sp1, G_spl[itr])
  173. def _compute_single_kernel_series(self, g1, g2):
  174. sp1 = get_shortest_paths(g1, self.__edge_weight, self.__ds_infos['directed'])
  175. sp2 = get_shortest_paths(g2, self.__edge_weight, self.__ds_infos['directed'])
  176. if self.__compute_method == 'trie':
  177. kernel = self.__ssp_do_trie(g1, g2, sp1, sp2)
  178. else:
  179. kernel = self.__ssp_do_naive(g1, g2, sp1, sp2)
  180. return kernel
  181. def _wrapper_get_sps_naive(self, itr_item):
  182. g = itr_item[0]
  183. i = itr_item[1]
  184. return i, get_shortest_paths(g, self.__edge_weight, self.__ds_infos['directed'])
  185. def __ssp_do_naive(self, g1, g2, spl1, spl2):
  186. kernel = 0
  187. # First, compute shortest path matrices, method borrowed from FCSP.
  188. vk_dict = self.__get_all_node_kernels(g1, g2)
  189. # Then, compute kernels between all pairs of edges, which is an idea of
  190. # extension of FCSP. It suits sparse graphs, which is the most case we
  191. # went though. For dense graphs, this would be slow.
  192. ek_dict = self.__get_all_edge_kernels(g1, g2)
  193. # compute graph kernels
  194. if vk_dict:
  195. if ek_dict:
  196. for p1, p2 in product(spl1, spl2):
  197. if len(p1) == len(p2):
  198. kpath = vk_dict[(p1[0], p2[0])]
  199. if kpath:
  200. for idx in range(1, len(p1)):
  201. kpath *= vk_dict[(p1[idx], p2[idx])] * \
  202. ek_dict[((p1[idx-1], p1[idx]),
  203. (p2[idx-1], p2[idx]))]
  204. if not kpath:
  205. break
  206. kernel += kpath # add up kernels of all paths
  207. else:
  208. for p1, p2 in product(spl1, spl2):
  209. if len(p1) == len(p2):
  210. kpath = vk_dict[(p1[0], p2[0])]
  211. if kpath:
  212. for idx in range(1, len(p1)):
  213. kpath *= vk_dict[(p1[idx], p2[idx])]
  214. if not kpath:
  215. break
  216. kernel += kpath # add up kernels of all paths
  217. else:
  218. if ek_dict:
  219. for p1, p2 in product(spl1, spl2):
  220. if len(p1) == len(p2):
  221. if len(p1) == 0:
  222. kernel += 1
  223. else:
  224. kpath = 1
  225. for idx in range(0, len(p1) - 1):
  226. kpath *= ek_dict[((p1[idx], p1[idx+1]),
  227. (p2[idx], p2[idx+1]))]
  228. if not kpath:
  229. break
  230. kernel += kpath # add up kernels of all paths
  231. else:
  232. for p1, p2 in product(spl1, spl2):
  233. if len(p1) == len(p2):
  234. kernel += 1
  235. try:
  236. kernel = kernel / (len(spl1) * len(spl2)) # calculate mean average
  237. except ZeroDivisionError:
  238. print(spl1, spl2)
  239. print(g1.nodes(data=True))
  240. print(g1.edges(data=True))
  241. raise Exception
  242. # # ---- exact implementation of the Fast Computation of Shortest Path Kernel (FCSP), reference [2], sadly it is slower than the current implementation
  243. # # compute vertex kernel matrix
  244. # try:
  245. # vk_mat = np.zeros((nx.number_of_nodes(g1),
  246. # nx.number_of_nodes(g2)))
  247. # g1nl = enumerate(g1.nodes(data=True))
  248. # g2nl = enumerate(g2.nodes(data=True))
  249. # for i1, n1 in g1nl:
  250. # for i2, n2 in g2nl:
  251. # vk_mat[i1][i2] = kn(
  252. # n1[1][node_label], n2[1][node_label],
  253. # [n1[1]['attributes']], [n2[1]['attributes']])
  254. # range1 = range(0, len(edge_w_g[i]))
  255. # range2 = range(0, len(edge_w_g[j]))
  256. # for i1 in range1:
  257. # x1 = edge_x_g[i][i1]
  258. # y1 = edge_y_g[i][i1]
  259. # w1 = edge_w_g[i][i1]
  260. # for i2 in range2:
  261. # x2 = edge_x_g[j][i2]
  262. # y2 = edge_y_g[j][i2]
  263. # w2 = edge_w_g[j][i2]
  264. # ke = (w1 == w2)
  265. # if ke > 0:
  266. # kn1 = vk_mat[x1][x2] * vk_mat[y1][y2]
  267. # kn2 = vk_mat[x1][y2] * vk_mat[y1][x2]
  268. # Kmatrix += kn1 + kn2
  269. return kernel
  270. def _wrapper_ssp_do_naive(self, itr):
  271. i = itr[0]
  272. j = itr[1]
  273. return i, j, self.__ssp_do_naive(G_gs[i], G_gs[j], G_spl[i], G_spl[j])
  274. def __get_all_node_kernels(self, g1, g2):
  275. # compute shortest path matrices, method borrowed from FCSP.
  276. vk_dict = {} # shortest path matrices dict
  277. if len(self.__node_labels) > 0:
  278. # node symb and non-synb labeled
  279. if len(self.__node_attrs) > 0:
  280. kn = self.__node_kernels['mix']
  281. for n1, n2 in product(g1.nodes(data=True), g2.nodes(data=True)):
  282. n1_labels = [n1[1][nl] for nl in self.__node_labels]
  283. n2_labels = [n2[1][nl] for nl in self.__node_labels]
  284. n1_attrs = [n1[1][na] for na in self.__node_attrs]
  285. n2_attrs = [n2[1][na] for na in self.__node_attrs]
  286. vk_dict[(n1[0], n2[0])] = kn(n1_labels, n2_labels, n1_attrs, n2_attrs)
  287. # node symb labeled
  288. else:
  289. kn = self.__node_kernels['symb']
  290. for n1 in g1.nodes(data=True):
  291. for n2 in g2.nodes(data=True):
  292. n1_labels = [n1[1][nl] for nl in self.__node_labels]
  293. n2_labels = [n2[1][nl] for nl in self.__node_labels]
  294. vk_dict[(n1[0], n2[0])] = kn(n1_labels, n2_labels)
  295. else:
  296. # node non-synb labeled
  297. if len(self.__node_attrs) > 0:
  298. kn = self.__node_kernels['nsymb']
  299. for n1 in g1.nodes(data=True):
  300. for n2 in g2.nodes(data=True):
  301. n1_attrs = [n1[1][na] for na in self.__node_attrs]
  302. n2_attrs = [n2[1][na] for na in self.__node_attrs]
  303. vk_dict[(n1[0], n2[0])] = kn(n1_attrs, n2_attrs)
  304. # node unlabeled
  305. else:
  306. pass
  307. return vk_dict
  308. def __get_all_edge_kernels(self, g1, g2):
  309. # compute kernels between all pairs of edges, which is an idea of
  310. # extension of FCSP. It suits sparse graphs, which is the most case we
  311. # went though. For dense graphs, this would be slow.
  312. ek_dict = {} # dict of edge kernels
  313. if len(self.__edge_labels) > 0:
  314. # edge symb and non-synb labeled
  315. if len(self.__edge_attrs) > 0:
  316. ke = self.__edge_kernels['mix']
  317. for e1, e2 in product(g1.edges(data=True), g2.edges(data=True)):
  318. e1_labels = [e1[2][el] for el in self.__edge_labels]
  319. e2_labels = [e2[2][el] for el in self.__edge_labels]
  320. e1_attrs = [e1[2][ea] for ea in self.__edge_attrs]
  321. e2_attrs = [e2[2][ea] for ea in self.__edge_attrs]
  322. ek_temp = ke(e1_labels, e2_labels, e1_attrs, e2_attrs)
  323. ek_dict[((e1[0], e1[1]), (e2[0], e2[1]))] = ek_temp
  324. ek_dict[((e1[1], e1[0]), (e2[0], e2[1]))] = ek_temp
  325. ek_dict[((e1[0], e1[1]), (e2[1], e2[0]))] = ek_temp
  326. ek_dict[((e1[1], e1[0]), (e2[1], e2[0]))] = ek_temp
  327. # edge symb labeled
  328. else:
  329. ke = self.__edge_kernels['symb']
  330. for e1 in g1.edges(data=True):
  331. for e2 in g2.edges(data=True):
  332. e1_labels = [e1[2][el] for el in self.__edge_labels]
  333. e2_labels = [e2[2][el] for el in self.__edge_labels]
  334. ek_temp = ke(e1_labels, e2_labels)
  335. ek_dict[((e1[0], e1[1]), (e2[0], e2[1]))] = ek_temp
  336. ek_dict[((e1[1], e1[0]), (e2[0], e2[1]))] = ek_temp
  337. ek_dict[((e1[0], e1[1]), (e2[1], e2[0]))] = ek_temp
  338. ek_dict[((e1[1], e1[0]), (e2[1], e2[0]))] = ek_temp
  339. else:
  340. # edge non-synb labeled
  341. if len(self.__edge_attrs) > 0:
  342. ke = self.__edge_kernels['nsymb']
  343. for e1 in g1.edges(data=True):
  344. for e2 in g2.edges(data=True):
  345. e1_attrs = [e1[2][ea] for ea in self.__edge_attrs]
  346. e2_attrs = [e2[2][ea] for ea in self.__edge_attrs]
  347. ek_temp = ke(e1_attrs, e2_attrs)
  348. ek_dict[((e1[0], e1[1]), (e2[0], e2[1]))] = ek_temp
  349. ek_dict[((e1[1], e1[0]), (e2[0], e2[1]))] = ek_temp
  350. ek_dict[((e1[0], e1[1]), (e2[1], e2[0]))] = ek_temp
  351. ek_dict[((e1[1], e1[0]), (e2[1], e2[0]))] = ek_temp
  352. # edge unlabeled
  353. else:
  354. pass
  355. return ek_dict

A Python package for graph kernels, graph edit distances and graph pre-image problem.