You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

sylvester_equation.py 7.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Wed Aug 19 17:24:46 2020
  5. @author: ljia
  6. @references:
  7. [1] S Vichy N Vishwanathan, Nicol N Schraudolph, Risi Kondor, and Karsten M Borgwardt. Graph kernels. Journal of Machine Learning Research, 11(Apr):1201–1242, 2010.
  8. """
  9. import sys
  10. from gklearn.utils import get_iters
  11. import numpy as np
  12. import networkx as nx
  13. from control import dlyap
  14. from gklearn.utils.parallel import parallel_gm, parallel_me
  15. from gklearn.kernels import RandomWalkMeta
  16. class SylvesterEquation(RandomWalkMeta):
  17. def __init__(self, **kwargs):
  18. super().__init__(**kwargs)
  19. def _compute_gm_series(self):
  20. self._check_edge_weight(self._graphs, self.verbose)
  21. self._check_graphs(self._graphs)
  22. if self.verbose >= 2:
  23. import warnings
  24. warnings.warn('All labels are ignored.')
  25. lmda = self._weight
  26. # compute Gram matrix.
  27. gram_matrix = np.zeros((len(self._graphs), len(self._graphs)))
  28. if self._q is None:
  29. # don't normalize adjacency matrices if q is a uniform vector. Note
  30. # A_wave_list actually contains the transposes of the adjacency matrices.
  31. iterator = get_iters(self._graphs, desc='compute adjacency matrices', file=sys.stdout, verbose=(self.verbose >= 2))
  32. A_wave_list = [nx.adjacency_matrix(G, self._edge_weight).todense().transpose() for G in iterator]
  33. # # normalized adjacency matrices
  34. # A_wave_list = []
  35. # for G in tqdm(Gn, desc='compute adjacency matrices', file=sys.stdout):
  36. # A_tilde = nx.adjacency_matrix(G, eweight).todense().transpose()
  37. # norm = A_tilde.sum(axis=0)
  38. # norm[norm == 0] = 1
  39. # A_wave_list.append(A_tilde / norm)
  40. if self._p is None: # p is uniform distribution as default.
  41. from itertools import combinations_with_replacement
  42. itr = combinations_with_replacement(range(0, len(self._graphs)), 2)
  43. len_itr = int(len(self._graphs) * (len(self._graphs) + 1) / 2)
  44. iterator = get_iters(itr, desc='Computing kernels', file=sys.stdout, length=len_itr, verbose=(self.verbose >= 2))
  45. for i, j in iterator:
  46. kernel = self._kernel_do(A_wave_list[i], A_wave_list[j], lmda)
  47. gram_matrix[i][j] = kernel
  48. gram_matrix[j][i] = kernel
  49. else: # @todo
  50. pass
  51. else: # @todo
  52. pass
  53. return gram_matrix
  54. def _compute_gm_imap_unordered(self):
  55. self._check_edge_weight(self._graphs, self.verbose)
  56. self._check_graphs(self._graphs)
  57. if self.verbose >= 2:
  58. import warnings
  59. warnings.warn('All labels are ignored.')
  60. # compute Gram matrix.
  61. gram_matrix = np.zeros((len(self._graphs), len(self._graphs)))
  62. if self._q is None:
  63. # don't normalize adjacency matrices if q is a uniform vector. Note
  64. # A_wave_list actually contains the transposes of the adjacency matrices.
  65. iterator = get_iters(self._graphs, desc='compute adjacency matrices', file=sys.stdout, verbose=(self.verbose >= 2))
  66. A_wave_list = [nx.adjacency_matrix(G, self._edge_weight).todense().transpose() for G in iterator] # @todo: parallel?
  67. if self._p is None: # p is uniform distribution as default.
  68. def init_worker(A_wave_list_toshare):
  69. global G_A_wave_list
  70. G_A_wave_list = A_wave_list_toshare
  71. do_fun = self._wrapper_kernel_do
  72. parallel_gm(do_fun, gram_matrix, self._graphs, init_worker=init_worker,
  73. glbv=(A_wave_list,), n_jobs=self.n_jobs, verbose=self.verbose)
  74. else: # @todo
  75. pass
  76. else: # @todo
  77. pass
  78. return gram_matrix
  79. def _compute_kernel_list_series(self, g1, g_list):
  80. self._check_edge_weight(g_list + [g1], self.verbose)
  81. self._check_graphs(g_list + [g1])
  82. if self.verbose >= 2:
  83. import warnings
  84. warnings.warn('All labels are ignored.')
  85. lmda = self._weight
  86. # compute kernel list.
  87. kernel_list = [None] * len(g_list)
  88. if self._q is None:
  89. # don't normalize adjacency matrices if q is a uniform vector. Note
  90. # A_wave_list actually contains the transposes of the adjacency matrices.
  91. A_wave_1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose()
  92. iterator = get_iters(g_list, desc='compute adjacency matrices', file=sys.stdout, verbose=(self.verbose >= 2))
  93. A_wave_list = [nx.adjacency_matrix(G, self._edge_weight).todense().transpose() for G in iterator]
  94. if self._p is None: # p is uniform distribution as default.
  95. iterator = get_iters(range(len(g_list)), desc='Computing kernels', file=sys.stdout, length=len(g_list), verbose=(self.verbose >= 2))
  96. for i in iterator:
  97. kernel = self._kernel_do(A_wave_1, A_wave_list[i], lmda)
  98. kernel_list[i] = kernel
  99. else: # @todo
  100. pass
  101. else: # @todo
  102. pass
  103. return kernel_list
  104. def _compute_kernel_list_imap_unordered(self, g1, g_list):
  105. self._check_edge_weight(g_list + [g1], self.verbose)
  106. self._check_graphs(g_list + [g1])
  107. if self.verbose >= 2:
  108. import warnings
  109. warnings.warn('All labels are ignored.')
  110. # compute kernel list.
  111. kernel_list = [None] * len(g_list)
  112. if self._q is None:
  113. # don't normalize adjacency matrices if q is a uniform vector. Note
  114. # A_wave_list actually contains the transposes of the adjacency matrices.
  115. A_wave_1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose()
  116. iterator = get_iters(g_list, desc='compute adjacency matrices', file=sys.stdout, verbose=(self.verbose >= 2))
  117. A_wave_list = [nx.adjacency_matrix(G, self._edge_weight).todense().transpose() for G in iterator] # @todo: parallel?
  118. if self._p is None: # p is uniform distribution as default.
  119. def init_worker(A_wave_1_toshare, A_wave_list_toshare):
  120. global G_A_wave_1, G_A_wave_list
  121. G_A_wave_1 = A_wave_1_toshare
  122. G_A_wave_list = A_wave_list_toshare
  123. do_fun = self._wrapper_kernel_list_do
  124. def func_assign(result, var_to_assign):
  125. var_to_assign[result[0]] = result[1]
  126. itr = range(len(g_list))
  127. len_itr = len(g_list)
  128. parallel_me(do_fun, func_assign, kernel_list, itr, len_itr=len_itr,
  129. init_worker=init_worker, glbv=(A_wave_1, A_wave_list), method='imap_unordered',
  130. n_jobs=self.n_jobs, itr_desc='Computing kernels', verbose=self.verbose)
  131. else: # @todo
  132. pass
  133. else: # @todo
  134. pass
  135. return kernel_list
  136. def _wrapper_kernel_list_do(self, itr):
  137. return itr, self._kernel_do(G_A_wave_1, G_A_wave_list[itr], self._weight)
  138. def _compute_single_kernel_series(self, g1, g2):
  139. self._check_edge_weight([g1] + [g2], self.verbose)
  140. self._check_graphs([g1] + [g2])
  141. if self.verbose >= 2:
  142. import warnings
  143. warnings.warn('All labels are ignored.')
  144. lmda = self._weight
  145. if self._q is None:
  146. # don't normalize adjacency matrices if q is a uniform vector. Note
  147. # A_wave_list actually contains the transposes of the adjacency matrices.
  148. A_wave_1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose()
  149. A_wave_2 = nx.adjacency_matrix(g2, self._edge_weight).todense().transpose()
  150. if self._p is None: # p is uniform distribution as default.
  151. kernel = self._kernel_do(A_wave_1, A_wave_2, lmda)
  152. else: # @todo
  153. pass
  154. else: # @todo
  155. pass
  156. return kernel
  157. def _kernel_do(self, A_wave1, A_wave2, lmda):
  158. S = lmda * A_wave2
  159. T_t = A_wave1
  160. # use uniform distribution if there is no prior knowledge.
  161. nb_pd = len(A_wave1) * len(A_wave2)
  162. p_times_uni = 1 / nb_pd
  163. M0 = np.full((len(A_wave2), len(A_wave1)), p_times_uni)
  164. X = dlyap(S, T_t, M0)
  165. X = np.reshape(X, (-1, 1), order='F')
  166. # use uniform distribution if there is no prior knowledge.
  167. q_times = np.full((1, nb_pd), p_times_uni)
  168. return np.dot(q_times, X)
  169. def _wrapper_kernel_do(self, itr):
  170. i = itr[0]
  171. j = itr[1]
  172. return i, j, self._kernel_do(G_A_wave_list[i], G_A_wave_list[j], self._weight)

A Python package for graph kernels, graph edit distances and graph pre-image problem.