You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

shortest_path.py 11 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Tue Apr 7 15:24:58 2020
  5. @author: ljia
  6. @references:
  7. [1] Borgwardt KM, Kriegel HP. Shortest-path kernels on graphs. InData
  8. Mining, Fifth IEEE International Conference on 2005 Nov 27 (pp. 8-pp). IEEE.
  9. """
  10. import sys
  11. from itertools import product
  12. # from functools import partial
  13. from multiprocessing import Pool
  14. from tqdm import tqdm
  15. import numpy as np
  16. import networkx as nx
  17. from gklearn.utils.parallel import parallel_gm, parallel_me
  18. from gklearn.utils.utils import getSPGraph
  19. from gklearn.kernels import GraphKernel
  20. class ShortestPath(GraphKernel):
  21. def __init__(self, **kwargs):
  22. GraphKernel.__init__(self)
  23. self._node_labels = kwargs.get('node_labels', [])
  24. self._node_attrs = kwargs.get('node_attrs', [])
  25. self._edge_weight = kwargs.get('edge_weight', None)
  26. self._node_kernels = kwargs.get('node_kernels', None)
  27. self._fcsp = kwargs.get('fcsp', True)
  28. self._ds_infos = kwargs.get('ds_infos', {})
  29. def _compute_gm_series(self):
  30. self._all_graphs_have_edges(self._graphs)
  31. # get shortest path graph of each graph.
  32. if self._verbose >= 2:
  33. iterator = tqdm(self._graphs, desc='getting sp graphs', file=sys.stdout)
  34. else:
  35. iterator = self._graphs
  36. self._graphs = [getSPGraph(g, edge_weight=self._edge_weight) for g in iterator]
  37. # compute Gram matrix.
  38. gram_matrix = np.zeros((len(self._graphs), len(self._graphs)))
  39. from itertools import combinations_with_replacement
  40. itr = combinations_with_replacement(range(0, len(self._graphs)), 2)
  41. if self._verbose >= 2:
  42. iterator = tqdm(itr, desc='Computing kernels', file=sys.stdout)
  43. else:
  44. iterator = itr
  45. for i, j in iterator:
  46. kernel = self._sp_do(self._graphs[i], self._graphs[j])
  47. gram_matrix[i][j] = kernel
  48. gram_matrix[j][i] = kernel
  49. return gram_matrix
  50. def _compute_gm_imap_unordered(self):
  51. self._all_graphs_have_edges(self._graphs)
  52. # get shortest path graph of each graph.
  53. pool = Pool(self._n_jobs)
  54. get_sp_graphs_fun = self._wrapper_get_sp_graphs
  55. itr = zip(self._graphs, range(0, len(self._graphs)))
  56. if len(self._graphs) < 100 * self._n_jobs:
  57. chunksize = int(len(self._graphs) / self._n_jobs) + 1
  58. else:
  59. chunksize = 100
  60. if self._verbose >= 2:
  61. iterator = tqdm(pool.imap_unordered(get_sp_graphs_fun, itr, chunksize),
  62. desc='getting sp graphs', file=sys.stdout)
  63. else:
  64. iterator = pool.imap_unordered(get_sp_graphs_fun, itr, chunksize)
  65. for i, g in iterator:
  66. self._graphs[i] = g
  67. pool.close()
  68. pool.join()
  69. # compute Gram matrix.
  70. gram_matrix = np.zeros((len(self._graphs), len(self._graphs)))
  71. def init_worker(gs_toshare):
  72. global G_gs
  73. G_gs = gs_toshare
  74. do_fun = self._wrapper_sp_do
  75. parallel_gm(do_fun, gram_matrix, self._graphs, init_worker=init_worker,
  76. glbv=(self._graphs,), n_jobs=self._n_jobs, verbose=self._verbose)
  77. return gram_matrix
  78. def _compute_kernel_list_series(self, g1, g_list):
  79. self._all_graphs_have_edges([g1] + g_list)
  80. # get shortest path graphs of g1 and each graph in g_list.
  81. g1 = getSPGraph(g1, edge_weight=self._edge_weight)
  82. if self._verbose >= 2:
  83. iterator = tqdm(g_list, desc='getting sp graphs', file=sys.stdout)
  84. else:
  85. iterator = g_list
  86. g_list = [getSPGraph(g, edge_weight=self._edge_weight) for g in iterator]
  87. # compute kernel list.
  88. kernel_list = [None] * len(g_list)
  89. if self._verbose >= 2:
  90. iterator = tqdm(range(len(g_list)), desc='Computing kernels', file=sys.stdout)
  91. else:
  92. iterator = range(len(g_list))
  93. for i in iterator:
  94. kernel = self._sp_do(g1, g_list[i])
  95. kernel_list[i] = kernel
  96. return kernel_list
  97. def _compute_kernel_list_imap_unordered(self, g1, g_list):
  98. self._all_graphs_have_edges([g1] + g_list)
  99. # get shortest path graphs of g1 and each graph in g_list.
  100. g1 = getSPGraph(g1, edge_weight=self._edge_weight)
  101. pool = Pool(self._n_jobs)
  102. get_sp_graphs_fun = self._wrapper_get_sp_graphs
  103. itr = zip(g_list, range(0, len(g_list)))
  104. if len(g_list) < 100 * self._n_jobs:
  105. chunksize = int(len(g_list) / self._n_jobs) + 1
  106. else:
  107. chunksize = 100
  108. if self._verbose >= 2:
  109. iterator = tqdm(pool.imap_unordered(get_sp_graphs_fun, itr, chunksize),
  110. desc='getting sp graphs', file=sys.stdout)
  111. else:
  112. iterator = pool.imap_unordered(get_sp_graphs_fun, itr, chunksize)
  113. for i, g in iterator:
  114. g_list[i] = g
  115. pool.close()
  116. pool.join()
  117. # compute Gram matrix.
  118. kernel_list = [None] * len(g_list)
  119. def init_worker(g1_toshare, gl_toshare):
  120. global G_g1, G_gl
  121. G_g1 = g1_toshare
  122. G_gl = gl_toshare
  123. do_fun = self._wrapper_kernel_list_do
  124. def func_assign(result, var_to_assign):
  125. var_to_assign[result[0]] = result[1]
  126. itr = range(len(g_list))
  127. len_itr = len(g_list)
  128. parallel_me(do_fun, func_assign, kernel_list, itr, len_itr=len_itr,
  129. init_worker=init_worker, glbv=(g1, g_list), method='imap_unordered', n_jobs=self._n_jobs, itr_desc='Computing kernels', verbose=self._verbose)
  130. return kernel_list
  131. def _wrapper_kernel_list_do(self, itr):
  132. return itr, self._sp_do(G_g1, G_gl[itr])
  133. def _compute_single_kernel_series(self, g1, g2):
  134. self._all_graphs_have_edges([g1] + [g2])
  135. g1 = getSPGraph(g1, edge_weight=self._edge_weight)
  136. g2 = getSPGraph(g2, edge_weight=self._edge_weight)
  137. kernel = self._sp_do(g1, g2)
  138. return kernel
  139. def _wrapper_get_sp_graphs(self, itr_item):
  140. g = itr_item[0]
  141. i = itr_item[1]
  142. return i, getSPGraph(g, edge_weight=self._edge_weight)
  143. def _sp_do(self, g1, g2):
  144. if self._fcsp: # @todo: it may be put outside the _sp_do().
  145. return self._sp_do_fcsp(g1, g2)
  146. else:
  147. return self._sp_do_naive(g1, g2)
  148. def _sp_do_fcsp(self, g1, g2):
  149. kernel = 0
  150. # compute shortest path matrices first, method borrowed from FCSP.
  151. vk_dict = {} # shortest path matrices dict
  152. if len(self._node_labels) > 0: # @todo: it may be put outside the _sp_do().
  153. # node symb and non-synb labeled
  154. if len(self._node_attrs) > 0:
  155. kn = self._node_kernels['mix']
  156. for n1, n2 in product(
  157. g1.nodes(data=True), g2.nodes(data=True)):
  158. n1_labels = [n1[1][nl] for nl in self._node_labels]
  159. n2_labels = [n2[1][nl] for nl in self._node_labels]
  160. n1_attrs = [n1[1][na] for na in self._node_attrs]
  161. n2_attrs = [n2[1][na] for na in self._node_attrs]
  162. vk_dict[(n1[0], n2[0])] = kn(n1_labels, n2_labels, n1_attrs, n2_attrs)
  163. # node symb labeled
  164. else:
  165. kn = self._node_kernels['symb']
  166. for n1 in g1.nodes(data=True):
  167. for n2 in g2.nodes(data=True):
  168. n1_labels = [n1[1][nl] for nl in self._node_labels]
  169. n2_labels = [n2[1][nl] for nl in self._node_labels]
  170. vk_dict[(n1[0], n2[0])] = kn(n1_labels, n2_labels)
  171. else:
  172. # node non-synb labeled
  173. if len(self._node_attrs) > 0:
  174. kn = self._node_kernels['nsymb']
  175. for n1 in g1.nodes(data=True):
  176. for n2 in g2.nodes(data=True):
  177. n1_attrs = [n1[1][na] for na in self._node_attrs]
  178. n2_attrs = [n2[1][na] for na in self._node_attrs]
  179. vk_dict[(n1[0], n2[0])] = kn(n1_attrs, n2_attrs)
  180. # node unlabeled
  181. else:
  182. for e1, e2 in product(
  183. g1.edges(data=True), g2.edges(data=True)):
  184. if e1[2]['cost'] == e2[2]['cost']:
  185. kernel += 1
  186. return kernel
  187. # compute graph kernels
  188. if self._ds_infos['directed']:
  189. for e1, e2 in product(g1.edges(data=True), g2.edges(data=True)):
  190. if e1[2]['cost'] == e2[2]['cost']:
  191. nk11, nk22 = vk_dict[(e1[0], e2[0])], vk_dict[(e1[1], e2[1])]
  192. kn1 = nk11 * nk22
  193. kernel += kn1
  194. else:
  195. for e1, e2 in product(g1.edges(data=True), g2.edges(data=True)):
  196. if e1[2]['cost'] == e2[2]['cost']:
  197. # each edge walk is counted twice, starting from both its extreme nodes.
  198. nk11, nk12, nk21, nk22 = vk_dict[(e1[0], e2[0])], vk_dict[(
  199. e1[0], e2[1])], vk_dict[(e1[1], e2[0])], vk_dict[(e1[1], e2[1])]
  200. kn1 = nk11 * nk22
  201. kn2 = nk12 * nk21
  202. kernel += kn1 + kn2
  203. # # ---- exact implementation of the Fast Computation of Shortest Path Kernel (FCSP), reference [2], sadly it is slower than the current implementation
  204. # # compute vertex kernels
  205. # try:
  206. # vk_mat = np.zeros((nx.number_of_nodes(g1),
  207. # nx.number_of_nodes(g2)))
  208. # g1nl = enumerate(g1.nodes(data=True))
  209. # g2nl = enumerate(g2.nodes(data=True))
  210. # for i1, n1 in g1nl:
  211. # for i2, n2 in g2nl:
  212. # vk_mat[i1][i2] = kn(
  213. # n1[1][node_label], n2[1][node_label],
  214. # [n1[1]['attributes']], [n2[1]['attributes']])
  215. # range1 = range(0, len(edge_w_g[i]))
  216. # range2 = range(0, len(edge_w_g[j]))
  217. # for i1 in range1:
  218. # x1 = edge_x_g[i][i1]
  219. # y1 = edge_y_g[i][i1]
  220. # w1 = edge_w_g[i][i1]
  221. # for i2 in range2:
  222. # x2 = edge_x_g[j][i2]
  223. # y2 = edge_y_g[j][i2]
  224. # w2 = edge_w_g[j][i2]
  225. # ke = (w1 == w2)
  226. # if ke > 0:
  227. # kn1 = vk_mat[x1][x2] * vk_mat[y1][y2]
  228. # kn2 = vk_mat[x1][y2] * vk_mat[y1][x2]
  229. # kernel += kn1 + kn2
  230. return kernel
  231. def _sp_do_naive(self, g1, g2):
  232. kernel = 0
  233. # Define the function to compute kernels between vertices in each condition.
  234. if len(self._node_labels) > 0:
  235. # node symb and non-synb labeled
  236. if len(self._node_attrs) > 0:
  237. def compute_vk(n1, n2):
  238. kn = self._node_kernels['mix']
  239. n1_labels = [g1.nodes[n1][nl] for nl in self._node_labels]
  240. n2_labels = [g2.nodes[n2][nl] for nl in self._node_labels]
  241. n1_attrs = [g1.nodes[n1][na] for na in self._node_attrs]
  242. n2_attrs = [g2.nodes[n2][na] for na in self._node_attrs]
  243. return kn(n1_labels, n2_labels, n1_attrs, n2_attrs)
  244. # node symb labeled
  245. else:
  246. def compute_vk(n1, n2):
  247. kn = self._node_kernels['symb']
  248. n1_labels = [g1.nodes[n1][nl] for nl in self._node_labels]
  249. n2_labels = [g2.nodes[n2][nl] for nl in self._node_labels]
  250. return kn(n1_labels, n2_labels)
  251. else:
  252. # node non-synb labeled
  253. if len(self._node_attrs) > 0:
  254. def compute_vk(n1, n2):
  255. kn = self._node_kernels['nsymb']
  256. n1_attrs = [g1.nodes[n1][na] for na in self._node_attrs]
  257. n2_attrs = [g2.nodes[n2][na] for na in self._node_attrs]
  258. return kn(n1_attrs, n2_attrs)
  259. # node unlabeled
  260. else:
  261. for e1, e2 in product(g1.edges(data=True), g2.edges(data=True)):
  262. if e1[2]['cost'] == e2[2]['cost']:
  263. kernel += 1
  264. return kernel
  265. # compute graph kernels
  266. if self._ds_infos['directed']:
  267. for e1, e2 in product(g1.edges(data=True), g2.edges(data=True)):
  268. if e1[2]['cost'] == e2[2]['cost']:
  269. nk11, nk22 = compute_vk(e1[0], e2[0]), compute_vk(e1[1], e2[1])
  270. kn1 = nk11 * nk22
  271. kernel += kn1
  272. else:
  273. for e1, e2 in product(g1.edges(data=True), g2.edges(data=True)):
  274. if e1[2]['cost'] == e2[2]['cost']:
  275. # each edge walk is counted twice, starting from both its extreme nodes.
  276. nk11, nk12, nk21, nk22 = compute_vk(e1[0], e2[0]), compute_vk(
  277. e1[0], e2[1]), compute_vk(e1[1], e2[0]), compute_vk(e1[1], e2[1])
  278. kn1 = nk11 * nk22
  279. kn2 = nk12 * nk21
  280. kernel += kn1 + kn2
  281. return kernel
  282. def _wrapper_sp_do(self, itr):
  283. i = itr[0]
  284. j = itr[1]
  285. return i, j, self._sp_do(G_gs[i], G_gs[j])
  286. def _all_graphs_have_edges(self, graphs):
  287. for G in graphs:
  288. if nx.number_of_edges(G) == 0:
  289. raise ValueError('Not all graphs have edges!!!')

A Python package for graph kernels, graph edit distances and graph pre-image problem.