You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

path_up_to_h.py 8.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Fri Apr 10 18:33:13 2020
  5. @author: ljia
  6. @references:
  7. [1] Liva Ralaivola, Sanjay J Swamidass, Hiroto Saigo, and Pierre
  8. Baldi. Graph kernels for chemical informatics. Neural networks,
  9. 18(8):1093–1110, 2005.
  10. """
  11. import sys
  12. from itertools import product
  13. # from functools import partial
  14. from multiprocessing import Pool
  15. from tqdm import tqdm
  16. import numpy as np
  17. from gklearn.utils.parallel import parallel_gm, parallel_me
  18. from gklearn.utils.utils import getSPGraph
  19. from gklearn.kernels import GraphKernel
  20. class PathUpToH(GraphKernel):
  21. def __init__(self, **kwargs):
  22. GraphKernel.__init__(self)
  23. self.__node_labels = kwargs.get('node_labels', [])
  24. self.__edge_labels = kwargs.get('edge_labels', [])
  25. self.__depth = int(kwargs.get('depth', 10))
  26. self.__k_func = kwargs.get('k_func', 'MinMax')
  27. self.__compute_method = kwargs.get('compute_method', 'trie')
  28. self.__ds_infos = kwargs.get('ds_infos', {})
  29. def _compute_gm_series(self):
  30. # get shortest path graph of each graph.
  31. if self._verbose >= 2:
  32. iterator = tqdm(self._graphs, desc='getting sp graphs', file=sys.stdout)
  33. else:
  34. iterator = self._graphs
  35. self._graphs = [getSPGraph(g, edge_weight=self.__edge_weight) for g in iterator]
  36. # compute Gram matrix.
  37. gram_matrix = np.zeros((len(self._graphs), len(self._graphs)))
  38. from itertools import combinations_with_replacement
  39. itr = combinations_with_replacement(range(0, len(self._graphs)), 2)
  40. if self._verbose >= 2:
  41. iterator = tqdm(itr, desc='calculating kernels', file=sys.stdout)
  42. else:
  43. iterator = itr
  44. for i, j in iterator:
  45. kernel = self.__sp_do_(self._graphs[i], self._graphs[j])
  46. gram_matrix[i][j] = kernel
  47. gram_matrix[j][i] = kernel
  48. return gram_matrix
  49. def _compute_gm_imap_unordered(self):
  50. # get shortest path graph of each graph.
  51. pool = Pool(self._n_jobs)
  52. get_sp_graphs_fun = self._wrapper_get_sp_graphs
  53. itr = zip(self._graphs, range(0, len(self._graphs)))
  54. if len(self._graphs) < 100 * self._n_jobs:
  55. chunksize = int(len(self._graphs) / self._n_jobs) + 1
  56. else:
  57. chunksize = 100
  58. if self._verbose >= 2:
  59. iterator = tqdm(pool.imap_unordered(get_sp_graphs_fun, itr, chunksize),
  60. desc='getting sp graphs', file=sys.stdout)
  61. else:
  62. iterator = pool.imap_unordered(get_sp_graphs_fun, itr, chunksize)
  63. for i, g in iterator:
  64. self._graphs[i] = g
  65. pool.close()
  66. pool.join()
  67. # compute Gram matrix.
  68. gram_matrix = np.zeros((len(self._graphs), len(self._graphs)))
  69. def init_worker(gs_toshare):
  70. global G_gs
  71. G_gs = gs_toshare
  72. do_fun = self._wrapper_sp_do
  73. parallel_gm(do_fun, gram_matrix, self._graphs, init_worker=init_worker,
  74. glbv=(self._graphs,), n_jobs=self._n_jobs, verbose=self._verbose)
  75. return gram_matrix
  76. def _compute_kernel_list_series(self, g1, g_list):
  77. # get shortest path graphs of g1 and each graph in g_list.
  78. g1 = getSPGraph(g1, edge_weight=self.__edge_weight)
  79. if self._verbose >= 2:
  80. iterator = tqdm(g_list, desc='getting sp graphs', file=sys.stdout)
  81. else:
  82. iterator = g_list
  83. g_list = [getSPGraph(g, edge_weight=self.__edge_weight) for g in iterator]
  84. # compute kernel list.
  85. kernel_list = [None] * len(g_list)
  86. if self._verbose >= 2:
  87. iterator = tqdm(range(len(g_list)), desc='calculating kernels', file=sys.stdout)
  88. else:
  89. iterator = range(len(g_list))
  90. for i in iterator:
  91. kernel = self.__sp_do(g1, g_list[i])
  92. kernel_list[i] = kernel
  93. return kernel_list
  94. def _compute_kernel_list_imap_unordered(self, g1, g_list):
  95. # get shortest path graphs of g1 and each graph in g_list.
  96. g1 = getSPGraph(g1, edge_weight=self.__edge_weight)
  97. pool = Pool(self._n_jobs)
  98. get_sp_graphs_fun = self._wrapper_get_sp_graphs
  99. itr = zip(g_list, range(0, len(g_list)))
  100. if len(g_list) < 100 * self._n_jobs:
  101. chunksize = int(len(g_list) / self._n_jobs) + 1
  102. else:
  103. chunksize = 100
  104. if self._verbose >= 2:
  105. iterator = tqdm(pool.imap_unordered(get_sp_graphs_fun, itr, chunksize),
  106. desc='getting sp graphs', file=sys.stdout)
  107. else:
  108. iterator = pool.imap_unordered(get_sp_graphs_fun, itr, chunksize)
  109. for i, g in iterator:
  110. g_list[i] = g
  111. pool.close()
  112. pool.join()
  113. # compute Gram matrix.
  114. kernel_list = [None] * len(g_list)
  115. def init_worker(g1_toshare, gl_toshare):
  116. global G_g1, G_gl
  117. G_g1 = g1_toshare
  118. G_gl = gl_toshare
  119. do_fun = self._wrapper_kernel_list_do
  120. def func_assign(result, var_to_assign):
  121. var_to_assign[result[0]] = result[1]
  122. itr = range(len(g_list))
  123. len_itr = len(g_list)
  124. parallel_me(do_fun, func_assign, kernel_list, itr, len_itr=len_itr,
  125. init_worker=init_worker, glbv=(g1, g_list), method='imap_unordered', n_jobs=self._n_jobs, itr_desc='calculating kernels', verbose=self._verbose)
  126. return kernel_list
  127. def _wrapper_kernel_list_do(self, itr):
  128. return itr, self.__sp_do(G_g1, G_gl[itr])
  129. def _compute_single_kernel_series(self, g1, g2):
  130. g1 = getSPGraph(g1, edge_weight=self.__edge_weight)
  131. g2 = getSPGraph(g2, edge_weight=self.__edge_weight)
  132. kernel = self.__sp_do(g1, g2)
  133. return kernel
  134. def _wrapper_get_sp_graphs(self, itr_item):
  135. g = itr_item[0]
  136. i = itr_item[1]
  137. return i, getSPGraph(g, edge_weight=self.__edge_weight)
  138. def __sp_do(self, g1, g2):
  139. kernel = 0
  140. # compute shortest path matrices first, method borrowed from FCSP.
  141. vk_dict = {} # shortest path matrices dict
  142. if len(self.__node_labels) > 0:
  143. # node symb and non-synb labeled
  144. if len(self.__node_attrs) > 0:
  145. kn = self.__node_kernels['mix']
  146. for n1, n2 in product(
  147. g1.nodes(data=True), g2.nodes(data=True)):
  148. n1_labels = [n1[1][nl] for nl in self.__node_labels]
  149. n2_labels = [n2[1][nl] for nl in self.__node_labels]
  150. n1_attrs = [n1[1][na] for na in self.__node_attrs]
  151. n2_attrs = [n2[1][na] for na in self.__node_attrs]
  152. vk_dict[(n1[0], n2[0])] = kn(n1_labels, n2_labels, n1_attrs, n2_attrs)
  153. # node symb labeled
  154. else:
  155. kn = self.__node_kernels['symb']
  156. for n1 in g1.nodes(data=True):
  157. for n2 in g2.nodes(data=True):
  158. n1_labels = [n1[1][nl] for nl in self.__node_labels]
  159. n2_labels = [n2[1][nl] for nl in self.__node_labels]
  160. vk_dict[(n1[0], n2[0])] = kn(n1_labels, n2_labels)
  161. else:
  162. # node non-synb labeled
  163. if len(self.__node_attrs) > 0:
  164. kn = self.__node_kernels['nsymb']
  165. for n1 in g1.nodes(data=True):
  166. for n2 in g2.nodes(data=True):
  167. n1_attrs = [n1[1][na] for na in self.__node_attrs]
  168. n2_attrs = [n2[1][na] for na in self.__node_attrs]
  169. vk_dict[(n1[0], n2[0])] = kn(n1_attrs, n2_attrs)
  170. # node unlabeled
  171. else:
  172. for e1, e2 in product(
  173. g1.edges(data=True), g2.edges(data=True)):
  174. if e1[2]['cost'] == e2[2]['cost']:
  175. kernel += 1
  176. return kernel
  177. # compute graph kernels
  178. if self.__ds_infos['directed']:
  179. for e1, e2 in product(g1.edges(data=True), g2.edges(data=True)):
  180. if e1[2]['cost'] == e2[2]['cost']:
  181. nk11, nk22 = vk_dict[(e1[0], e2[0])], vk_dict[(e1[1], e2[1])]
  182. kn1 = nk11 * nk22
  183. kernel += kn1
  184. else:
  185. for e1, e2 in product(g1.edges(data=True), g2.edges(data=True)):
  186. if e1[2]['cost'] == e2[2]['cost']:
  187. # each edge walk is counted twice, starting from both its extreme nodes.
  188. nk11, nk12, nk21, nk22 = vk_dict[(e1[0], e2[0])], vk_dict[(
  189. e1[0], e2[1])], vk_dict[(e1[1], e2[0])], vk_dict[(e1[1], e2[1])]
  190. kn1 = nk11 * nk22
  191. kn2 = nk12 * nk21
  192. kernel += kn1 + kn2
  193. # # ---- exact implementation of the Fast Computation of Shortest Path Kernel (FCSP), reference [2], sadly it is slower than the current implementation
  194. # # compute vertex kernels
  195. # try:
  196. # vk_mat = np.zeros((nx.number_of_nodes(g1),
  197. # nx.number_of_nodes(g2)))
  198. # g1nl = enumerate(g1.nodes(data=True))
  199. # g2nl = enumerate(g2.nodes(data=True))
  200. # for i1, n1 in g1nl:
  201. # for i2, n2 in g2nl:
  202. # vk_mat[i1][i2] = kn(
  203. # n1[1][node_label], n2[1][node_label],
  204. # [n1[1]['attributes']], [n2[1]['attributes']])
  205. # range1 = range(0, len(edge_w_g[i]))
  206. # range2 = range(0, len(edge_w_g[j]))
  207. # for i1 in range1:
  208. # x1 = edge_x_g[i][i1]
  209. # y1 = edge_y_g[i][i1]
  210. # w1 = edge_w_g[i][i1]
  211. # for i2 in range2:
  212. # x2 = edge_x_g[j][i2]
  213. # y2 = edge_y_g[j][i2]
  214. # w2 = edge_w_g[j][i2]
  215. # ke = (w1 == w2)
  216. # if ke > 0:
  217. # kn1 = vk_mat[x1][x2] * vk_mat[y1][y2]
  218. # kn2 = vk_mat[x1][y2] * vk_mat[y1][x2]
  219. # kernel += kn1 + kn2
  220. return kernel
  221. def _wrapper_sp_do(self, itr):
  222. i = itr[0]
  223. j = itr[1]
  224. return i, j, self.__sp_do(G_gs[i], G_gs[j])

A Python package for graph kernels, graph edit distances and graph pre-image problem.