You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

commonWalkKernel.py 15 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449
  1. """
  2. @author: linlin
  3. @references:
  4. [1] Thomas Gärtner, Peter Flach, and Stefan Wrobel. On graph kernels:
  5. Hardness results and efficient alternatives. Learning Theory and Kernel
  6. Machines, pages 129–143, 2003.
  7. """
  8. import sys
  9. import time
  10. from collections import Counter
  11. from functools import partial
  12. import networkx as nx
  13. import numpy as np
  14. from gklearn.utils.utils import direct_product
  15. from gklearn.utils.graphdataset import get_dataset_attributes
  16. from gklearn.utils.parallel import parallel_gm
  17. def commonwalkkernel(*args,
  18. node_label='atom',
  19. edge_label='bond_type',
  20. # n=None,
  21. weight=1,
  22. compute_method=None,
  23. n_jobs=None,
  24. verbose=True):
  25. """Calculate common walk graph kernels between graphs.
  26. Parameters
  27. ----------
  28. Gn : List of NetworkX graph
  29. List of graphs between which the kernels are calculated.
  30. G1, G2 : NetworkX graphs
  31. Two graphs between which the kernel is calculated.
  32. node_label : string
  33. Node attribute used as symbolic label. The default node label is 'atom'.
  34. edge_label : string
  35. Edge attribute used as symbolic label. The default edge label is 'bond_type'.
  36. weight: integer
  37. Weight coefficient of different lengths of walks, which represents beta
  38. in 'exp' method and gamma in 'geo'.
  39. compute_method : string
  40. Method used to compute walk kernel. The Following choices are
  41. available:
  42. 'exp': method based on exponential serials applied on the direct
  43. product graph, as shown in reference [1]. The time complexity is O(n^6)
  44. for graphs with n vertices.
  45. 'geo': method based on geometric serials applied on the direct product
  46. graph, as shown in reference [1]. The time complexity is O(n^6) for
  47. graphs with n vertices.
  48. n_jobs : int
  49. Number of jobs for parallelization.
  50. Return
  51. ------
  52. Kmatrix : Numpy matrix
  53. Kernel matrix, each element of which is a common walk kernel between 2
  54. graphs.
  55. """
  56. # n : integer
  57. # Longest length of walks. Only useful when applying the 'brute' method.
  58. # 'brute': brute force, simply search for all walks and compare them.
  59. compute_method = compute_method.lower()
  60. # arrange all graphs in a list
  61. Gn = args[0] if len(args) == 1 else [args[0], args[1]]
  62. # remove graphs with only 1 node, as they do not have adjacency matrices
  63. len_gn = len(Gn)
  64. Gn = [(idx, G) for idx, G in enumerate(Gn) if nx.number_of_nodes(G) != 1]
  65. idx = [G[0] for G in Gn]
  66. Gn = [G[1] for G in Gn]
  67. if len(Gn) != len_gn:
  68. if verbose:
  69. print('\n %d graphs are removed as they have only 1 node.\n' %
  70. (len_gn - len(Gn)))
  71. ds_attrs = get_dataset_attributes(
  72. Gn,
  73. attr_names=['node_labeled', 'edge_labeled', 'is_directed'],
  74. node_label=node_label, edge_label=edge_label)
  75. if not ds_attrs['node_labeled']:
  76. for G in Gn:
  77. nx.set_node_attributes(G, '0', 'atom')
  78. if not ds_attrs['edge_labeled']:
  79. for G in Gn:
  80. nx.set_edge_attributes(G, '0', 'bond_type')
  81. if not ds_attrs['is_directed']: # convert
  82. Gn = [G.to_directed() for G in Gn]
  83. start_time = time.time()
  84. Kmatrix = np.zeros((len(Gn), len(Gn)))
  85. # ---- use pool.imap_unordered to parallel and track progress. ----
  86. def init_worker(gn_toshare):
  87. global G_gn
  88. G_gn = gn_toshare
  89. # direct product graph method - exponential
  90. if compute_method == 'exp':
  91. do_partial = partial(wrapper_cw_exp, node_label, edge_label, weight)
  92. # direct product graph method - geometric
  93. elif compute_method == 'geo':
  94. do_partial = partial(wrapper_cw_geo, node_label, edge_label, weight)
  95. parallel_gm(do_partial, Kmatrix, Gn, init_worker=init_worker,
  96. glbv=(Gn,), n_jobs=n_jobs, verbose=verbose)
  97. # pool = Pool(n_jobs)
  98. # itr = zip(combinations_with_replacement(Gn, 2),
  99. # combinations_with_replacement(range(0, len(Gn)), 2))
  100. # len_itr = int(len(Gn) * (len(Gn) + 1) / 2)
  101. # if len_itr < 1000 * n_jobs:
  102. # chunksize = int(len_itr / n_jobs) + 1
  103. # else:
  104. # chunksize = 1000
  105. #
  106. # # direct product graph method - exponential
  107. # if compute_method == 'exp':
  108. # do_partial = partial(wrapper_cw_exp, node_label, edge_label, weight)
  109. # # direct product graph method - geometric
  110. # elif compute_method == 'geo':
  111. # do_partial = partial(wrapper_cw_geo, node_label, edge_label, weight)
  112. #
  113. # for i, j, kernel in tqdm(
  114. # pool.imap_unordered(do_partial, itr, chunksize),
  115. # desc='calculating kernels',
  116. # file=sys.stdout):
  117. # Kmatrix[i][j] = kernel
  118. # Kmatrix[j][i] = kernel
  119. # pool.close()
  120. # pool.join()
  121. # # ---- direct running, normally use single CPU core. ----
  122. # # direct product graph method - exponential
  123. # itr = combinations_with_replacement(range(0, len(Gn)), 2)
  124. # if compute_method == 'exp':
  125. # for i, j in tqdm(itr, desc='calculating kernels', file=sys.stdout):
  126. # Kmatrix[i][j] = _commonwalkkernel_exp(Gn[i], Gn[j], node_label,
  127. # edge_label, weight)
  128. # Kmatrix[j][i] = Kmatrix[i][j]
  129. #
  130. # # direct product graph method - geometric
  131. # elif compute_method == 'geo':
  132. # for i, j in tqdm(itr, desc='calculating kernels', file=sys.stdout):
  133. # Kmatrix[i][j] = _commonwalkkernel_geo(Gn[i], Gn[j], node_label,
  134. # edge_label, weight)
  135. # Kmatrix[j][i] = Kmatrix[i][j]
  136. # # search all paths use brute force.
  137. # elif compute_method == 'brute':
  138. # n = int(n)
  139. # # get all paths of all graphs before calculating kernels to save time, but this may cost a lot of memory for large dataset.
  140. # all_walks = [
  141. # find_all_walks_until_length(Gn[i], n, node_label, edge_label)
  142. # for i in range(0, len(Gn))
  143. # ]
  144. #
  145. # for i in range(0, len(Gn)):
  146. # for j in range(i, len(Gn)):
  147. # Kmatrix[i][j] = _commonwalkkernel_brute(
  148. # all_walks[i],
  149. # all_walks[j],
  150. # node_label=node_label,
  151. # edge_label=edge_label)
  152. # Kmatrix[j][i] = Kmatrix[i][j]
  153. run_time = time.time() - start_time
  154. if verbose:
  155. print("\n --- kernel matrix of common walk kernel of size %d built in %s seconds ---"
  156. % (len(Gn), run_time))
  157. return Kmatrix, run_time, idx
  158. def _commonwalkkernel_exp(g1, g2, node_label, edge_label, beta):
  159. """Calculate walk graph kernels up to n between 2 graphs using exponential
  160. series.
  161. Parameters
  162. ----------
  163. Gn : List of NetworkX graph
  164. List of graphs between which the kernels are calculated.
  165. node_label : string
  166. Node attribute used as label.
  167. edge_label : string
  168. Edge attribute used as label.
  169. beta : integer
  170. Weight.
  171. ij : tuple of integer
  172. Index of graphs between which the kernel is computed.
  173. Return
  174. ------
  175. kernel : float
  176. The common walk Kernel between 2 graphs.
  177. """
  178. # get tensor product / direct product
  179. gp = direct_product(g1, g2, node_label, edge_label)
  180. # return 0 if the direct product graph have no more than 1 node.
  181. if nx.number_of_nodes(gp) < 2:
  182. return 0
  183. A = nx.adjacency_matrix(gp).todense()
  184. # print(A)
  185. # from matplotlib import pyplot as plt
  186. # nx.draw_networkx(G1)
  187. # plt.show()
  188. # nx.draw_networkx(G2)
  189. # plt.show()
  190. # nx.draw_networkx(gp)
  191. # plt.show()
  192. # print(G1.nodes(data=True))
  193. # print(G2.nodes(data=True))
  194. # print(gp.nodes(data=True))
  195. # print(gp.edges(data=True))
  196. ew, ev = np.linalg.eig(A)
  197. # print('ew: ', ew)
  198. # print(ev)
  199. # T = np.matrix(ev)
  200. # print('T: ', T)
  201. # T = ev.I
  202. D = np.zeros((len(ew), len(ew)))
  203. for i in range(len(ew)):
  204. D[i][i] = np.exp(beta * ew[i])
  205. # print('D: ', D)
  206. # print('hshs: ', T.I * D * T)
  207. # print(np.exp(-2))
  208. # print(D)
  209. # print(np.exp(weight * D))
  210. # print(ev)
  211. # print(np.linalg.inv(ev))
  212. exp_D = ev * D * ev.T
  213. # print(exp_D)
  214. # print(np.exp(weight * A))
  215. # print('-------')
  216. return exp_D.sum()
  217. def wrapper_cw_exp(node_label, edge_label, beta, itr):
  218. i = itr[0]
  219. j = itr[1]
  220. return i, j, _commonwalkkernel_exp(G_gn[i], G_gn[j], node_label, edge_label, beta)
  221. def _commonwalkkernel_geo(g1, g2, node_label, edge_label, gamma):
  222. """Calculate common walk graph kernels up to n between 2 graphs using
  223. geometric series.
  224. Parameters
  225. ----------
  226. Gn : List of NetworkX graph
  227. List of graphs between which the kernels are calculated.
  228. node_label : string
  229. Node attribute used as label.
  230. edge_label : string
  231. Edge attribute used as label.
  232. gamma: integer
  233. Weight.
  234. ij : tuple of integer
  235. Index of graphs between which the kernel is computed.
  236. Return
  237. ------
  238. kernel : float
  239. The common walk Kernel between 2 graphs.
  240. """
  241. # get tensor product / direct product
  242. gp = direct_product(g1, g2, node_label, edge_label)
  243. # return 0 if the direct product graph have no more than 1 node.
  244. if nx.number_of_nodes(gp) < 2:
  245. return 0
  246. A = nx.adjacency_matrix(gp).todense()
  247. mat = np.identity(len(A)) - gamma * A
  248. # try:
  249. return mat.I.sum()
  250. # except np.linalg.LinAlgError:
  251. # return np.nan
  252. def wrapper_cw_geo(node_label, edge_label, gama, itr):
  253. i = itr[0]
  254. j = itr[1]
  255. return i, j, _commonwalkkernel_geo(G_gn[i], G_gn[j], node_label, edge_label, gama)
  256. def _commonwalkkernel_brute(walks1,
  257. walks2,
  258. node_label='atom',
  259. edge_label='bond_type',
  260. labeled=True):
  261. """Calculate walk graph kernels up to n between 2 graphs.
  262. Parameters
  263. ----------
  264. walks1, walks2 : list
  265. List of walks in 2 graphs, where for unlabeled graphs, each walk is
  266. represented by a list of nodes; while for labeled graphs, each walk is
  267. represented by a string consists of labels of nodes and edges on that
  268. walk.
  269. node_label : string
  270. node attribute used as label. The default node label is atom.
  271. edge_label : string
  272. edge attribute used as label. The default edge label is bond_type.
  273. labeled : boolean
  274. Whether the graphs are labeled. The default is True.
  275. Return
  276. ------
  277. kernel : float
  278. Treelet Kernel between 2 graphs.
  279. """
  280. counts_walks1 = dict(Counter(walks1))
  281. counts_walks2 = dict(Counter(walks2))
  282. all_walks = list(set(walks1 + walks2))
  283. vector1 = [(counts_walks1[walk] if walk in walks1 else 0)
  284. for walk in all_walks]
  285. vector2 = [(counts_walks2[walk] if walk in walks2 else 0)
  286. for walk in all_walks]
  287. kernel = np.dot(vector1, vector2)
  288. return kernel
  289. # this method find walks repetively, it could be faster.
  290. def find_all_walks_until_length(G,
  291. length,
  292. node_label='atom',
  293. edge_label='bond_type',
  294. labeled=True):
  295. """Find all walks with a certain maximum length in a graph.
  296. A recursive depth first search is applied.
  297. Parameters
  298. ----------
  299. G : NetworkX graphs
  300. The graph in which walks are searched.
  301. length : integer
  302. The maximum length of walks.
  303. node_label : string
  304. node attribute used as label. The default node label is atom.
  305. edge_label : string
  306. edge attribute used as label. The default edge label is bond_type.
  307. labeled : boolean
  308. Whether the graphs are labeled. The default is True.
  309. Return
  310. ------
  311. walk : list
  312. List of walks retrieved, where for unlabeled graphs, each walk is
  313. represented by a list of nodes; while for labeled graphs, each walk
  314. is represented by a string consists of labels of nodes and edges on
  315. that walk.
  316. """
  317. all_walks = []
  318. # @todo: in this way, the time complexity is close to N(d^n+d^(n+1)+...+1), which could be optimized to O(Nd^n)
  319. for i in range(0, length + 1):
  320. new_walks = find_all_walks(G, i)
  321. if new_walks == []:
  322. break
  323. all_walks.extend(new_walks)
  324. if labeled == True: # convert paths to strings
  325. walk_strs = []
  326. for walk in all_walks:
  327. strlist = [
  328. G.node[node][node_label] +
  329. G[node][walk[walk.index(node) + 1]][edge_label]
  330. for node in walk[:-1]
  331. ]
  332. walk_strs.append(''.join(strlist) + G.node[walk[-1]][node_label])
  333. return walk_strs
  334. return all_walks
  335. def find_walks(G, source_node, length):
  336. """Find all walks with a certain length those start from a source node. A
  337. recursive depth first search is applied.
  338. Parameters
  339. ----------
  340. G : NetworkX graphs
  341. The graph in which walks are searched.
  342. source_node : integer
  343. The number of the node from where all walks start.
  344. length : integer
  345. The length of walks.
  346. Return
  347. ------
  348. walk : list of list
  349. List of walks retrieved, where each walk is represented by a list of
  350. nodes.
  351. """
  352. return [[source_node]] if length == 0 else \
  353. [[source_node] + walk for neighbor in G[source_node]
  354. for walk in find_walks(G, neighbor, length - 1)]
  355. def find_all_walks(G, length):
  356. """Find all walks with a certain length in a graph. A recursive depth first
  357. search is applied.
  358. Parameters
  359. ----------
  360. G : NetworkX graphs
  361. The graph in which walks are searched.
  362. length : integer
  363. The length of walks.
  364. Return
  365. ------
  366. walk : list of list
  367. List of walks retrieved, where each walk is represented by a list of
  368. nodes.
  369. """
  370. all_walks = []
  371. for node in G:
  372. all_walks.extend(find_walks(G, node, length))
  373. # The following process is not carried out according to the original article
  374. # all_paths_r = [ path[::-1] for path in all_paths ]
  375. # # For each path, two presentation are retrieved from its two extremities. Remove one of them.
  376. # for idx, path in enumerate(all_paths[:-1]):
  377. # for path2 in all_paths_r[idx+1::]:
  378. # if path == path2:
  379. # all_paths[idx] = []
  380. # break
  381. # return list(filter(lambda a: a != [], all_paths))
  382. return all_walks

A Python package for graph kernels, graph edit distances and graph pre-image problem.