You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

spKernel.py 12 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. """
  2. @author: linlin
  3. @references:
  4. [1] Borgwardt KM, Kriegel HP. Shortest-path kernels on graphs. InData
  5. Mining, Fifth IEEE International Conference on 2005 Nov 27 (pp. 8-pp). IEEE.
  6. """
  7. import sys
  8. import time
  9. from itertools import product
  10. from functools import partial
  11. from multiprocessing import Pool
  12. from tqdm import tqdm
  13. import networkx as nx
  14. import numpy as np
  15. from gklearn.utils.utils import getSPGraph
  16. from gklearn.utils.graphdataset import get_dataset_attributes
  17. from gklearn.utils.parallel import parallel_gm
  18. def spkernel(*args,
  19. node_label='atom',
  20. edge_weight=None,
  21. node_kernels=None,
  22. parallel='imap_unordered',
  23. n_jobs=None,
  24. verbose=True):
  25. """Calculate shortest-path kernels between graphs.
  26. Parameters
  27. ----------
  28. Gn : List of NetworkX graph
  29. List of graphs between which the kernels are calculated.
  30. G1, G2 : NetworkX graphs
  31. Two graphs between which the kernel is calculated.
  32. node_label : string
  33. Node attribute used as label. The default node label is atom.
  34. edge_weight : string
  35. Edge attribute name corresponding to the edge weight.
  36. node_kernels : dict
  37. A dictionary of kernel functions for nodes, including 3 items: 'symb'
  38. for symbolic node labels, 'nsymb' for non-symbolic node labels, 'mix'
  39. for both labels. The first 2 functions take two node labels as
  40. parameters, and the 'mix' function takes 4 parameters, a symbolic and a
  41. non-symbolic label for each the two nodes. Each label is in form of 2-D
  42. dimension array (n_samples, n_features). Each function returns an
  43. number as the kernel value. Ignored when nodes are unlabeled.
  44. n_jobs : int
  45. Number of jobs for parallelization.
  46. Return
  47. ------
  48. Kmatrix : Numpy matrix
  49. Kernel matrix, each element of which is the sp kernel between 2 praphs.
  50. """
  51. # pre-process
  52. Gn = args[0] if len(args) == 1 else [args[0], args[1]]
  53. Gn = [g.copy() for g in Gn]
  54. weight = None
  55. if edge_weight is None:
  56. if verbose:
  57. print('\n None edge weight specified. Set all weight to 1.\n')
  58. else:
  59. try:
  60. some_weight = list(
  61. nx.get_edge_attributes(Gn[0], edge_weight).values())[0]
  62. if isinstance(some_weight, (float, int)):
  63. weight = edge_weight
  64. else:
  65. if verbose:
  66. print(
  67. '\n Edge weight with name %s is not float or integer. Set all weight to 1.\n'
  68. % edge_weight)
  69. except:
  70. if verbose:
  71. print(
  72. '\n Edge weight with name "%s" is not found in the edge attributes. Set all weight to 1.\n'
  73. % edge_weight)
  74. ds_attrs = get_dataset_attributes(
  75. Gn,
  76. attr_names=['node_labeled', 'node_attr_dim', 'is_directed'],
  77. node_label=node_label)
  78. # remove graphs with no edges, as no sp can be found in their structures,
  79. # so the kernel between such a graph and itself will be zero.
  80. len_gn = len(Gn)
  81. Gn = [(idx, G) for idx, G in enumerate(Gn) if nx.number_of_edges(G) != 0]
  82. idx = [G[0] for G in Gn]
  83. Gn = [G[1] for G in Gn]
  84. if len(Gn) != len_gn:
  85. if verbose:
  86. print('\n %d graphs are removed as they don\'t contain edges.\n' %
  87. (len_gn - len(Gn)))
  88. start_time = time.time()
  89. if parallel == 'imap_unordered':
  90. pool = Pool(n_jobs)
  91. # get shortest path graphs of Gn
  92. getsp_partial = partial(wrapper_getSPGraph, weight)
  93. itr = zip(Gn, range(0, len(Gn)))
  94. if len(Gn) < 100 * n_jobs:
  95. # # use default chunksize as pool.map when iterable is less than 100
  96. # chunksize, extra = divmod(len(Gn), n_jobs * 4)
  97. # if extra:
  98. # chunksize += 1
  99. chunksize = int(len(Gn) / n_jobs) + 1
  100. else:
  101. chunksize = 100
  102. if verbose:
  103. iterator = tqdm(pool.imap_unordered(getsp_partial, itr, chunksize),
  104. desc='getting sp graphs', file=sys.stdout)
  105. else:
  106. iterator = pool.imap_unordered(getsp_partial, itr, chunksize)
  107. for i, g in iterator:
  108. Gn[i] = g
  109. pool.close()
  110. pool.join()
  111. elif parallel is None:
  112. pass
  113. # # ---- direct running, normally use single CPU core. ----
  114. # for i in tqdm(range(len(Gn)), desc='getting sp graphs', file=sys.stdout):
  115. # i, Gn[i] = wrapper_getSPGraph(weight, (Gn[i], i))
  116. # # ---- use pool.map to parallel ----
  117. # result_sp = pool.map(getsp_partial, range(0, len(Gn)))
  118. # for i in result_sp:
  119. # Gn[i[0]] = i[1]
  120. # or
  121. # getsp_partial = partial(wrap_getSPGraph, Gn, weight)
  122. # for i, g in tqdm(
  123. # pool.map(getsp_partial, range(0, len(Gn))),
  124. # desc='getting sp graphs',
  125. # file=sys.stdout):
  126. # Gn[i] = g
  127. # # ---- only for the Fast Computation of Shortest Path Kernel (FCSP)
  128. # sp_ml = [0] * len(Gn) # shortest path matrices
  129. # for i in result_sp:
  130. # sp_ml[i[0]] = i[1]
  131. # edge_x_g = [[] for i in range(len(sp_ml))]
  132. # edge_y_g = [[] for i in range(len(sp_ml))]
  133. # edge_w_g = [[] for i in range(len(sp_ml))]
  134. # for idx, item in enumerate(sp_ml):
  135. # for i1 in range(len(item)):
  136. # for i2 in range(i1 + 1, len(item)):
  137. # if item[i1, i2] != np.inf:
  138. # edge_x_g[idx].append(i1)
  139. # edge_y_g[idx].append(i2)
  140. # edge_w_g[idx].append(item[i1, i2])
  141. # print(len(edge_x_g[0]))
  142. # print(len(edge_y_g[0]))
  143. # print(len(edge_w_g[0]))
  144. Kmatrix = np.zeros((len(Gn), len(Gn)))
  145. # ---- use pool.imap_unordered to parallel and track progress. ----
  146. def init_worker(gn_toshare):
  147. global G_gn
  148. G_gn = gn_toshare
  149. do_partial = partial(wrapper_sp_do, ds_attrs, node_label, node_kernels)
  150. parallel_gm(do_partial, Kmatrix, Gn, init_worker=init_worker,
  151. glbv=(Gn,), n_jobs=n_jobs, verbose=verbose)
  152. # # ---- use pool.map to parallel. ----
  153. # # result_perf = pool.map(do_partial, itr)
  154. # do_partial = partial(spkernel_do, Gn, ds_attrs, node_label, node_kernels)
  155. # itr = combinations_with_replacement(range(0, len(Gn)), 2)
  156. # for i, j, kernel in tqdm(
  157. # pool.map(do_partial, itr), desc='calculating kernels',
  158. # file=sys.stdout):
  159. # Kmatrix[i][j] = kernel
  160. # Kmatrix[j][i] = kernel
  161. # pool.close()
  162. # pool.join()
  163. # # ---- use joblib.Parallel to parallel and track progress. ----
  164. # result_perf = Parallel(
  165. # n_jobs=n_jobs, verbose=10)(
  166. # delayed(do_partial)(ij)
  167. # for ij in combinations_with_replacement(range(0, len(Gn)), 2))
  168. # result_perf = [
  169. # do_partial(ij)
  170. # for ij in combinations_with_replacement(range(0, len(Gn)), 2)
  171. # ]
  172. # for i in result_perf:
  173. # Kmatrix[i[0]][i[1]] = i[2]
  174. # Kmatrix[i[1]][i[0]] = i[2]
  175. # # ---- direct running, normally use single CPU core. ----
  176. # from itertools import combinations_with_replacement
  177. # itr = combinations_with_replacement(range(0, len(Gn)), 2)
  178. # for i, j in tqdm(itr, desc='calculating kernels', file=sys.stdout):
  179. # kernel = spkernel_do(Gn[i], Gn[j], ds_attrs, node_label, node_kernels)
  180. # Kmatrix[i][j] = kernel
  181. # Kmatrix[j][i] = kernel
  182. run_time = time.time() - start_time
  183. if verbose:
  184. print(
  185. "\n --- shortest path kernel matrix of size %d built in %s seconds ---"
  186. % (len(Gn), run_time))
  187. return Kmatrix, run_time, idx
  188. def spkernel_do(g1, g2, ds_attrs, node_label, node_kernels):
  189. kernel = 0
  190. # compute shortest path matrices first, method borrowed from FCSP.
  191. vk_dict = {} # shortest path matrices dict
  192. if ds_attrs['node_labeled']:
  193. # node symb and non-synb labeled
  194. if ds_attrs['node_attr_dim'] > 0:
  195. kn = node_kernels['mix']
  196. for n1, n2 in product(
  197. g1.nodes(data=True), g2.nodes(data=True)):
  198. vk_dict[(n1[0], n2[0])] = kn(
  199. n1[1][node_label], n2[1][node_label],
  200. n1[1]['attributes'], n2[1]['attributes'])
  201. # node symb labeled
  202. else:
  203. kn = node_kernels['symb']
  204. for n1 in g1.nodes(data=True):
  205. for n2 in g2.nodes(data=True):
  206. vk_dict[(n1[0], n2[0])] = kn(n1[1][node_label],
  207. n2[1][node_label])
  208. else:
  209. # node non-synb labeled
  210. if ds_attrs['node_attr_dim'] > 0:
  211. kn = node_kernels['nsymb']
  212. for n1 in g1.nodes(data=True):
  213. for n2 in g2.nodes(data=True):
  214. vk_dict[(n1[0], n2[0])] = kn(n1[1]['attributes'],
  215. n2[1]['attributes'])
  216. # node unlabeled
  217. else:
  218. for e1, e2 in product(
  219. g1.edges(data=True), g2.edges(data=True)):
  220. if e1[2]['cost'] == e2[2]['cost']:
  221. kernel += 1
  222. return kernel
  223. # compute graph kernels
  224. if ds_attrs['is_directed']:
  225. for e1, e2 in product(g1.edges(data=True), g2.edges(data=True)):
  226. if e1[2]['cost'] == e2[2]['cost']:
  227. nk11, nk22 = vk_dict[(e1[0], e2[0])], vk_dict[(e1[1],
  228. e2[1])]
  229. kn1 = nk11 * nk22
  230. kernel += kn1
  231. else:
  232. for e1, e2 in product(g1.edges(data=True), g2.edges(data=True)):
  233. if e1[2]['cost'] == e2[2]['cost']:
  234. # each edge walk is counted twice, starting from both its extreme nodes.
  235. nk11, nk12, nk21, nk22 = vk_dict[(e1[0], e2[0])], vk_dict[(
  236. e1[0], e2[1])], vk_dict[(e1[1],
  237. e2[0])], vk_dict[(e1[1],
  238. e2[1])]
  239. kn1 = nk11 * nk22
  240. kn2 = nk12 * nk21
  241. kernel += kn1 + kn2
  242. # # ---- exact implementation of the Fast Computation of Shortest Path Kernel (FCSP), reference [2], sadly it is slower than the current implementation
  243. # # compute vertex kernels
  244. # try:
  245. # vk_mat = np.zeros((nx.number_of_nodes(g1),
  246. # nx.number_of_nodes(g2)))
  247. # g1nl = enumerate(g1.nodes(data=True))
  248. # g2nl = enumerate(g2.nodes(data=True))
  249. # for i1, n1 in g1nl:
  250. # for i2, n2 in g2nl:
  251. # vk_mat[i1][i2] = kn(
  252. # n1[1][node_label], n2[1][node_label],
  253. # [n1[1]['attributes']], [n2[1]['attributes']])
  254. # range1 = range(0, len(edge_w_g[i]))
  255. # range2 = range(0, len(edge_w_g[j]))
  256. # for i1 in range1:
  257. # x1 = edge_x_g[i][i1]
  258. # y1 = edge_y_g[i][i1]
  259. # w1 = edge_w_g[i][i1]
  260. # for i2 in range2:
  261. # x2 = edge_x_g[j][i2]
  262. # y2 = edge_y_g[j][i2]
  263. # w2 = edge_w_g[j][i2]
  264. # ke = (w1 == w2)
  265. # if ke > 0:
  266. # kn1 = vk_mat[x1][x2] * vk_mat[y1][y2]
  267. # kn2 = vk_mat[x1][y2] * vk_mat[y1][x2]
  268. # kernel += kn1 + kn2
  269. return kernel
  270. def wrapper_sp_do(ds_attrs, node_label, node_kernels, itr):
  271. i = itr[0]
  272. j = itr[1]
  273. return i, j, spkernel_do(G_gn[i], G_gn[j], ds_attrs, node_label, node_kernels)
  274. #def wrapper_sp_do(ds_attrs, node_label, node_kernels, itr_item):
  275. # g1 = itr_item[0][0]
  276. # g2 = itr_item[0][1]
  277. # i = itr_item[1][0]
  278. # j = itr_item[1][1]
  279. # return i, j, spkernel_do(g1, g2, ds_attrs, node_label, node_kernels)
  280. def wrapper_getSPGraph(weight, itr_item):
  281. g = itr_item[0]
  282. i = itr_item[1]
  283. return i, getSPGraph(g, edge_weight=weight)
  284. # return i, nx.floyd_warshall_numpy(g, weight=weight)

A Python package for graph kernels, graph edit distances and graph pre-image problem.