@@ -14,61 +14,56 @@ import sys | |||||
from tqdm import tqdm | from tqdm import tqdm | ||||
import numpy as np | import numpy as np | ||||
import networkx as nx | import networkx as nx | ||||
from control import dlyap | |||||
from scipy import optimize | |||||
from gklearn.utils.parallel import parallel_gm, parallel_me | from gklearn.utils.parallel import parallel_gm, parallel_me | ||||
from gklearn.kernels import RandomWalk | |||||
from gklearn.kernels import RandomWalkMeta | |||||
from gklearn.utils.utils import compute_vertex_kernels | |||||
class FixedPoint(RandomWalk): | |||||
class FixedPoint(RandomWalkMeta): | |||||
def __init__(self, **kwargs): | def __init__(self, **kwargs): | ||||
RandomWalk.__init__(self, **kwargs) | |||||
super().__init__(**kwargs) | |||||
self._node_kernels = kwargs.get('node_kernels', None) | |||||
self._edge_kernels = kwargs.get('edge_kernels', None) | |||||
self._node_labels = kwargs.get('node_labels', []) | |||||
self._edge_labels = kwargs.get('edge_labels', []) | |||||
self._node_attrs = kwargs.get('node_attrs', []) | |||||
self._edge_attrs = kwargs.get('edge_attrs', []) | |||||
def _compute_gm_series(self): | def _compute_gm_series(self): | ||||
self._check_edge_weight(self._graphs) | |||||
self._check_edge_weight(self._graphs, self._verbose) | |||||
self._check_graphs(self._graphs) | self._check_graphs(self._graphs) | ||||
if self._verbose >= 2: | |||||
import warnings | |||||
warnings.warn('All labels are ignored.') | |||||
lmda = self._weight | lmda = self._weight | ||||
# compute Gram matrix. | |||||
# Compute Gram matrix. | |||||
gram_matrix = np.zeros((len(self._graphs), len(self._graphs))) | gram_matrix = np.zeros((len(self._graphs), len(self._graphs))) | ||||
if self._q == None: | |||||
# don't normalize adjacency matrices if q is a uniform vector. Note | |||||
# A_wave_list actually contains the transposes of the adjacency matrices. | |||||
# Reindex nodes using consecutive integers for the convenience of kernel computation. | |||||
if self._verbose >= 2: | |||||
iterator = tqdm(self._graphs, desc='Reindex vertices', file=sys.stdout) | |||||
else: | |||||
iterator = self._graphs | |||||
self._graphs = [nx.convert_node_labels_to_integers(g, first_label=0, label_attribute='label_orignal') for g in iterator] | |||||
if self._p is None and self._q is None: # p and q are uniform distributions as default. | |||||
from itertools import combinations_with_replacement | |||||
itr = combinations_with_replacement(range(0, len(self._graphs)), 2) | |||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
iterator = tqdm(self._graphs, desc='compute adjacency matrices', file=sys.stdout) | |||||
iterator = tqdm(itr, desc='Computing kernels', file=sys.stdout) | |||||
else: | else: | ||||
iterator = self._graphs | |||||
A_wave_list = [nx.adjacency_matrix(G, self._edge_weight).todense().transpose() for G in iterator] | |||||
# # normalized adjacency matrices | |||||
# A_wave_list = [] | |||||
# for G in tqdm(Gn, desc='compute adjacency matrices', file=sys.stdout): | |||||
# A_tilde = nx.adjacency_matrix(G, eweight).todense().transpose() | |||||
# norm = A_tilde.sum(axis=0) | |||||
# norm[norm == 0] = 1 | |||||
# A_wave_list.append(A_tilde / norm) | |||||
if self._p == None: # p is uniform distribution as default. | |||||
from itertools import combinations_with_replacement | |||||
itr = combinations_with_replacement(range(0, len(self._graphs)), 2) | |||||
if self._verbose >= 2: | |||||
iterator = tqdm(itr, desc='calculating kernels', file=sys.stdout) | |||||
else: | |||||
iterator = itr | |||||
for i, j in iterator: | |||||
kernel = self.__kernel_do(A_wave_list[i], A_wave_list[j], lmda) | |||||
gram_matrix[i][j] = kernel | |||||
gram_matrix[j][i] = kernel | |||||
else: # @todo | |||||
pass | |||||
iterator = itr | |||||
for i, j in iterator: | |||||
kernel = self.__kernel_do(self._graphs[i], self._graphs[j], lmda) | |||||
gram_matrix[i][j] = kernel | |||||
gram_matrix[j][i] = kernel | |||||
else: # @todo | else: # @todo | ||||
pass | pass | ||||
@@ -76,36 +71,31 @@ class FixedPoint(RandomWalk): | |||||
def _compute_gm_imap_unordered(self): | def _compute_gm_imap_unordered(self): | ||||
self._check_edge_weight(self._graphs) | |||||
self._check_edge_weight(self._graphs, self._verbose) | |||||
self._check_graphs(self._graphs) | self._check_graphs(self._graphs) | ||||
if self._verbose >= 2: | |||||
import warnings | |||||
warnings.warn('All labels are ignored.') | |||||
# compute Gram matrix. | |||||
gram_matrix = np.zeros((len(self._graphs), len(self._graphs))) | |||||
# Compute Gram matrix. | |||||
gram_matrix = np.zeros((len(self._graphs), len(self._graphs))) | |||||
if self._q == None: | |||||
# don't normalize adjacency matrices if q is a uniform vector. Note | |||||
# A_wave_list actually contains the transposes of the adjacency matrices. | |||||
if self._verbose >= 2: | |||||
iterator = tqdm(self._graphs, desc='compute adjacency matrices', file=sys.stdout) | |||||
else: | |||||
iterator = self._graphs | |||||
A_wave_list = [nx.adjacency_matrix(G, self._edge_weight).todense().transpose() for G in iterator] # @todo: parallel? | |||||
if self._p == None: # p is uniform distribution as default. | |||||
def init_worker(A_wave_list_toshare): | |||||
global G_A_wave_list | |||||
G_A_wave_list = A_wave_list_toshare | |||||
do_fun = self._wrapper_kernel_do | |||||
parallel_gm(do_fun, gram_matrix, self._graphs, init_worker=init_worker, | |||||
glbv=(A_wave_list,), n_jobs=self._n_jobs, verbose=self._verbose) | |||||
else: # @todo | |||||
pass | |||||
# @todo: parallel this. | |||||
# Reindex nodes using consecutive integers for the convenience of kernel computation. | |||||
if self._verbose >= 2: | |||||
iterator = tqdm(self._graphs, desc='Reindex vertices', file=sys.stdout) | |||||
else: | |||||
iterator = self._graphs | |||||
self._graphs = [nx.convert_node_labels_to_integers(g, first_label=0, label_attribute='label_orignal') for g in iterator] | |||||
if self._p is None and self._q is None: # p and q are uniform distributions as default. | |||||
def init_worker(gn_toshare): | |||||
global G_gn | |||||
G_gn = gn_toshare | |||||
do_fun = self._wrapper_kernel_do | |||||
parallel_gm(do_fun, gram_matrix, self._graphs, init_worker=init_worker, | |||||
glbv=(self._graphs,), n_jobs=self._n_jobs, verbose=self._verbose) | |||||
else: # @todo | else: # @todo | ||||
pass | pass | ||||
@@ -113,39 +103,33 @@ class FixedPoint(RandomWalk): | |||||
def _compute_kernel_list_series(self, g1, g_list): | def _compute_kernel_list_series(self, g1, g_list): | ||||
self._check_edge_weight(g_list + [g1]) | |||||
self._check_edge_weight(g_list + [g1], self._verbose) | |||||
self._check_graphs(g_list + [g1]) | self._check_graphs(g_list + [g1]) | ||||
if self._verbose >= 2: | |||||
import warnings | |||||
warnings.warn('All labels are ignored.') | |||||
lmda = self._weight | lmda = self._weight | ||||
# compute kernel list. | # compute kernel list. | ||||
kernel_list = [None] * len(g_list) | kernel_list = [None] * len(g_list) | ||||
# Reindex nodes using consecutive integers for the convenience of kernel computation. | |||||
g1 = nx.convert_node_labels_to_integers(g1, first_label=0, label_attribute='label_orignal') | |||||
if self._verbose >= 2: | |||||
iterator = tqdm(g_list, desc='Reindex vertices', file=sys.stdout) | |||||
else: | |||||
iterator = g_list | |||||
g_list = [nx.convert_node_labels_to_integers(g, first_label=0, label_attribute='label_orignal') for g in iterator] | |||||
if self._q == None: | |||||
# don't normalize adjacency matrices if q is a uniform vector. Note | |||||
# A_wave_list actually contains the transposes of the adjacency matrices. | |||||
A_wave_1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose() | |||||
if self._p is None and self._q is None: # p and q are uniform distributions as default. | |||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
iterator = tqdm(range(len(g_list)), desc='compute adjacency matrices', file=sys.stdout) | |||||
iterator = tqdm(range(len(g_list)), desc='Computing kernels', file=sys.stdout) | |||||
else: | else: | ||||
iterator = range(len(g_list)) | iterator = range(len(g_list)) | ||||
A_wave_list = [nx.adjacency_matrix(G, self._edge_weight).todense().transpose() for G in iterator] | |||||
if self._p == None: # p is uniform distribution as default. | |||||
if self._verbose >= 2: | |||||
iterator = tqdm(range(len(g_list)), desc='calculating kernels', file=sys.stdout) | |||||
else: | |||||
iterator = range(len(g_list)) | |||||
for i in iterator: | |||||
kernel = self.__kernel_do(A_wave_1, A_wave_list[i], lmda) | |||||
kernel_list[i] = kernel | |||||
else: # @todo | |||||
pass | |||||
for i in iterator: | |||||
kernel = self.__kernel_do(g1, g_list[i], lmda) | |||||
kernel_list[i] = kernel | |||||
else: # @todo | else: # @todo | ||||
pass | pass | ||||
@@ -153,43 +137,38 @@ class FixedPoint(RandomWalk): | |||||
def _compute_kernel_list_imap_unordered(self, g1, g_list): | def _compute_kernel_list_imap_unordered(self, g1, g_list): | ||||
self._check_edge_weight(g_list + [g1]) | |||||
self._check_edge_weight(g_list + [g1], self._verbose) | |||||
self._check_graphs(g_list + [g1]) | self._check_graphs(g_list + [g1]) | ||||
if self._verbose >= 2: | |||||
import warnings | |||||
warnings.warn('All labels are ignored.') | |||||
# compute kernel list. | # compute kernel list. | ||||
kernel_list = [None] * len(g_list) | kernel_list = [None] * len(g_list) | ||||
if self._q == None: | |||||
# don't normalize adjacency matrices if q is a uniform vector. Note | |||||
# A_wave_list actually contains the transposes of the adjacency matrices. | |||||
A_wave_1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose() | |||||
if self._verbose >= 2: | |||||
iterator = tqdm(range(len(g_list)), desc='compute adjacency matrices', file=sys.stdout) | |||||
else: | |||||
iterator = range(len(g_list)) | |||||
A_wave_list = [nx.adjacency_matrix(G, self._edge_weight).todense().transpose() for G in iterator] # @todo: parallel? | |||||
# Reindex nodes using consecutive integers for the convenience of kernel computation. | |||||
g1 = nx.convert_node_labels_to_integers(g1, first_label=0, label_attribute='label_orignal') | |||||
# @todo: parallel this. | |||||
if self._verbose >= 2: | |||||
iterator = tqdm(g_list, desc='Reindex vertices', file=sys.stdout) | |||||
else: | |||||
iterator = g_list | |||||
g_list = [nx.convert_node_labels_to_integers(g, first_label=0, label_attribute='label_orignal') for g in iterator] | |||||
if self._p is None and self._q is None: # p and q are uniform distributions as default. | |||||
if self._p == None: # p is uniform distribution as default. | |||||
def init_worker(A_wave_1_toshare, A_wave_list_toshare): | |||||
global G_A_wave_1, G_A_wave_list | |||||
G_A_wave_1 = A_wave_1_toshare | |||||
G_A_wave_list = A_wave_list_toshare | |||||
def init_worker(g1_toshare, g_list_toshare): | |||||
global G_g1, G_g_list | |||||
G_g1 = g1_toshare | |||||
G_g_list = g_list_toshare | |||||
do_fun = self._wrapper_kernel_list_do | |||||
def func_assign(result, var_to_assign): | |||||
var_to_assign[result[0]] = result[1] | |||||
itr = range(len(g_list)) | |||||
len_itr = len(g_list) | |||||
parallel_me(do_fun, func_assign, kernel_list, itr, len_itr=len_itr, | |||||
init_worker=init_worker, glbv=(A_wave_1, A_wave_list), method='imap_unordered', | |||||
n_jobs=self._n_jobs, itr_desc='calculating kernels', verbose=self._verbose) | |||||
do_fun = self._wrapper_kernel_list_do | |||||
def func_assign(result, var_to_assign): | |||||
var_to_assign[result[0]] = result[1] | |||||
itr = range(len(g_list)) | |||||
len_itr = len(g_list) | |||||
parallel_me(do_fun, func_assign, kernel_list, itr, len_itr=len_itr, | |||||
init_worker=init_worker, glbv=(g1, g_list), method='imap_unordered', | |||||
n_jobs=self._n_jobs, itr_desc='Computing kernels', verbose=self._verbose) | |||||
else: # @todo | |||||
pass | |||||
else: # @todo | else: # @todo | ||||
pass | pass | ||||
@@ -197,49 +176,146 @@ class FixedPoint(RandomWalk): | |||||
def _wrapper_kernel_list_do(self, itr): | def _wrapper_kernel_list_do(self, itr): | ||||
return itr, self._kernel_do(G_A_wave_1, G_A_wave_list[itr], self._weight) | |||||
return itr, self._kernel_do(G_g1, G_g_list[itr], self._weight) | |||||
def _compute_single_kernel_series(self, g1, g2): | def _compute_single_kernel_series(self, g1, g2): | ||||
self._check_edge_weight([g1] + [g2]) | |||||
self._check_edge_weight([g1] + [g2], self._verbose) | |||||
self._check_graphs([g1] + [g2]) | self._check_graphs([g1] + [g2]) | ||||
if self._verbose >= 2: | |||||
import warnings | |||||
warnings.warn('All labels are ignored.') | |||||
lmda = self._weight | lmda = self._weight | ||||
if self._q == None: | |||||
# don't normalize adjacency matrices if q is a uniform vector. Note | |||||
# A_wave_list actually contains the transposes of the adjacency matrices. | |||||
A_wave_1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose() | |||||
A_wave_2 = nx.adjacency_matrix(g2, self._edge_weight).todense().transpose() | |||||
if self._p == None: # p is uniform distribution as default. | |||||
kernel = self.__kernel_do(A_wave_1, A_wave_2, lmda) | |||||
else: # @todo | |||||
pass | |||||
# Reindex nodes using consecutive integers for the convenience of kernel computation. | |||||
g1 = nx.convert_node_labels_to_integers(g1, first_label=0, label_attribute='label_orignal') | |||||
g2 = nx.convert_node_labels_to_integers(g2, first_label=0, label_attribute='label_orignal') | |||||
if self._p is None and self._q is None: # p and q are uniform distributions as default. | |||||
kernel = self.__kernel_do(g1, g2, lmda) | |||||
else: # @todo | else: # @todo | ||||
pass | pass | ||||
return kernel | return kernel | ||||
def __kernel_do(self, A_wave1, A_wave2, lmda): | |||||
def __kernel_do(self, g1, g2, lmda): | |||||
S = lmda * A_wave2 | |||||
T_t = A_wave1 | |||||
# Frist, compute kernels between all pairs of nodes using the method borrowed | |||||
# from FCSP. It is faster than directly computing all edge kernels | |||||
# when $d_1d_2>2$, where $d_1$ and $d_2$ are vertex degrees of the | |||||
# graphs compared, which is the most case we went though. For very | |||||
# sparse graphs, this would be slow. | |||||
vk_dict = self._compute_vertex_kernels(g1, g2) | |||||
# Compute the weight matrix of the direct product graph. | |||||
w_times, w_dim = self._compute_weight_matrix(g1, g2, vk_dict) | |||||
# use uniform distribution if there is no prior knowledge. | # use uniform distribution if there is no prior knowledge. | ||||
nb_pd = len(A_wave1) * len(A_wave2) | |||||
p_times_uni = 1 / nb_pd | |||||
M0 = np.full((len(A_wave2), len(A_wave1)), p_times_uni) | |||||
X = dlyap(S, T_t, M0) | |||||
X = np.reshape(X, (-1, 1), order='F') | |||||
p_times_uni = 1 / w_dim | |||||
p_times = np.full((w_dim, 1), p_times_uni) | |||||
x = optimize.fixed_point(self._func_fp, p_times, args=(p_times, lmda, w_times), xtol=1e-06, maxiter=1000) | |||||
# use uniform distribution if there is no prior knowledge. | # use uniform distribution if there is no prior knowledge. | ||||
q_times = np.full((1, nb_pd), p_times_uni) | |||||
return np.dot(q_times, X) | |||||
q_times = np.full((1, w_dim), p_times_uni) | |||||
return np.dot(q_times, x) | |||||
def _wrapper_kernel_do(self, itr): | def _wrapper_kernel_do(self, itr): | ||||
i = itr[0] | i = itr[0] | ||||
j = itr[1] | j = itr[1] | ||||
return i, j, self.__kernel_do(G_A_wave_list[i], G_A_wave_list[j], self._weight) | |||||
return i, j, self.__kernel_do(G_gn[i], G_gn[j], self._weight) | |||||
def _func_fp(x, p_times, lmda, w_times): | |||||
haha = w_times * x | |||||
haha = lmda * haha | |||||
haha = p_times + haha | |||||
return p_times + lmda * np.dot(w_times, x) | |||||
def _compute_vertex_kernels(self, g1, g2): | |||||
"""Compute vertex kernels between vertices of two graphs. | |||||
""" | |||||
return compute_vertex_kernels(g1, g2, self._node_kernels, node_labels=self._node_labels, node_attrs=self._node_attrs) | |||||
# @todo: move if out to make it faster. | |||||
# @todo: node/edge kernels use direct function rather than dicts. | |||||
def _compute_weight_matrix(self, g1, g2, vk_dict): | |||||
"""Compute the weight matrix of the direct product graph. | |||||
""" | |||||
# Define edge kernels. | |||||
def compute_ek_11(e1, e2, ke): | |||||
e1_labels = [e1[2][el] for el in self._edge_labels] | |||||
e2_labels = [e2[2][el] for el in self.__edge_labels] | |||||
e1_attrs = [e1[2][ea] for ea in self._edge_attrs] | |||||
e2_attrs = [e2[2][ea] for ea in self._edge_attrs] | |||||
return ke(e1_labels, e2_labels, e1_attrs, e2_attrs) | |||||
def compute_ek_10(e1, e2, ke): | |||||
e1_labels = [e1[2][el] for el in self.__edge_labels] | |||||
e2_labels = [e2[2][el] for el in self.__edge_labels] | |||||
return ke(e1_labels, e2_labels) | |||||
def compute_ek_01(e1, e2, ke): | |||||
e1_attrs = [e1[2][ea] for ea in self.__edge_attrs] | |||||
e2_attrs = [e2[2][ea] for ea in self.__edge_attrs] | |||||
return ke(e1_attrs, e2_attrs) | |||||
def compute_ek_00(e1, e2, ke): | |||||
return 1 | |||||
# Select the proper edge kernel. | |||||
if len(self._edge_labels) > 0: | |||||
# edge symb and non-synb labeled | |||||
if len(self._edge_attrs) > 0: | |||||
ke = self._edge_kernels['mix'] | |||||
ek_temp = compute_ek_11 | |||||
# edge symb labeled | |||||
else: | |||||
ke = self._edge_kernels['symb'] | |||||
ek_temp = compute_ek_10 | |||||
else: | |||||
# edge non-synb labeled | |||||
if len(self._edge_attrs) > 0: | |||||
ke = self._edge_kernels['nsymb'] | |||||
ek_temp = compute_ek_01 | |||||
# edge unlabeled | |||||
else: | |||||
ke = None | |||||
ek_temp = compute_ek_00 # @todo: check how much slower is this. | |||||
# Compute the weight matrix. | |||||
w_dim = nx.number_of_nodes(g1) * nx.number_of_nodes(g2) | |||||
w_times = np.zeros((w_dim, w_dim)) | |||||
if vk_dict: # node labeled | |||||
if self._ds_infos['directed']: | |||||
for e1 in g1.edges(data=True): | |||||
for e2 in g2.edges(data=True): | |||||
w_idx = (e1[0] * nx.number_of_nodes(g2) + e2[0], e1[1] * nx.number_of_nodes(g2) + e2[1]) | |||||
w_times[w_idx] = vk_dict[(e1[0], e2[0])] * ek_temp(e1, e2, ke) * vk_dict[(e1[1], e2[1])] | |||||
else: # undirected | |||||
for e1 in g1.edges(data=True): | |||||
for e2 in g2.edges(data=True): | |||||
w_idx = (e1[0] * nx.number_of_nodes(g2) + e2[0], e1[1] * nx.number_of_nodes(g2) + e2[1]) | |||||
w_times[w_idx] = vk_dict[(e1[0], e2[0])] * ek_temp(e1, e2, ke) * vk_dict[(e1[1], e2[1])] + vk_dict[(e1[0], e2[1])] * ek_temp(e1, e2, ke) * vk_dict[(e1[1], e2[0])] | |||||
w_times[w_idx[1], w_idx[0]] = w_times[w_idx[0], w_idx[1]] | |||||
w_idx2 = (e1[0] * nx.number_of_nodes(g2) + e2[1], e1[1] * nx.number_of_nodes(g2) + e2[0]) | |||||
w_times[w_idx2[0], w_idx2[1]] = w_times[w_idx[0], w_idx[1]] | |||||
w_times[w_idx2[1], w_idx2[0]] = w_times[w_idx[0], w_idx[1]] | |||||
else: # node unlabeled | |||||
if self._ds_infos['directed']: | |||||
for e1 in g1.edges(data=True): | |||||
for e2 in g2.edges(data=True): | |||||
w_idx = (e1[0] * nx.number_of_nodes(g2) + e2[0], e1[1] * nx.number_of_nodes(g2) + e2[1]) | |||||
w_times[w_idx] = ek_temp(e1, e2, ke) | |||||
else: # undirected | |||||
for e1 in g1.edges(data=True): | |||||
for e2 in g2.edges(data=True): | |||||
w_idx = (e1[0] * nx.number_of_nodes(g2) + e2[0], e1[1] * nx.number_of_nodes(g2) + e2[1]) | |||||
w_times[w_idx] = ek_temp(e1, e2, ke) | |||||
w_times[w_idx[1], w_idx[0]] = w_times[w_idx[0], w_idx[1]] | |||||
w_idx2 = (e1[0] * nx.number_of_nodes(g2) + e2[1], e1[1] * nx.number_of_nodes(g2) + e2[0]) | |||||
w_times[w_idx2[0], w_idx2[1]] = w_times[w_idx[0], w_idx[1]] | |||||
w_times[w_idx2[1], w_idx2[0]] = w_times[w_idx[0], w_idx[1]] | |||||
return w_times, w_dim |
@@ -16,19 +16,19 @@ import numpy as np | |||||
import networkx as nx | import networkx as nx | ||||
from scipy.sparse import kron | from scipy.sparse import kron | ||||
from gklearn.utils.parallel import parallel_gm, parallel_me | from gklearn.utils.parallel import parallel_gm, parallel_me | ||||
from gklearn.kernels import RandomWalk | |||||
from gklearn.kernels import RandomWalkMeta | |||||
class SpectralDecomposition(RandomWalk): | |||||
class SpectralDecomposition(RandomWalkMeta): | |||||
def __init__(self, **kwargs): | def __init__(self, **kwargs): | ||||
RandomWalk.__init__(self, **kwargs) | |||||
super().__init__(**kwargs) | |||||
self._sub_kernel = kwargs.get('sub_kernel', None) | self._sub_kernel = kwargs.get('sub_kernel', None) | ||||
def _compute_gm_series(self): | def _compute_gm_series(self): | ||||
self._check_edge_weight(self._graphs) | |||||
self._check_edge_weight(self._graphs, self._verbose) | |||||
self._check_graphs(self._graphs) | self._check_graphs(self._graphs) | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
import warnings | import warnings | ||||
@@ -37,7 +37,7 @@ class SpectralDecomposition(RandomWalk): | |||||
# compute Gram matrix. | # compute Gram matrix. | ||||
gram_matrix = np.zeros((len(self._graphs), len(self._graphs))) | gram_matrix = np.zeros((len(self._graphs), len(self._graphs))) | ||||
if self._q == None: | |||||
if self._q is None: | |||||
# precompute the spectral decomposition of each graph. | # precompute the spectral decomposition of each graph. | ||||
P_list = [] | P_list = [] | ||||
D_list = [] | D_list = [] | ||||
@@ -54,14 +54,14 @@ class SpectralDecomposition(RandomWalk): | |||||
P_list.append(ev) | P_list.append(ev) | ||||
# P_inv_list = [p.T for p in P_list] # @todo: also works for directed graphs? | # P_inv_list = [p.T for p in P_list] # @todo: also works for directed graphs? | ||||
if self._p == None: # p is uniform distribution as default. | |||||
if self._p is None: # p is uniform distribution as default. | |||||
q_T_list = [np.full((1, nx.number_of_nodes(G)), 1 / nx.number_of_nodes(G)) for G in self._graphs] | q_T_list = [np.full((1, nx.number_of_nodes(G)), 1 / nx.number_of_nodes(G)) for G in self._graphs] | ||||
# q_T_list = [q.T for q in q_list] | # q_T_list = [q.T for q in q_list] | ||||
from itertools import combinations_with_replacement | from itertools import combinations_with_replacement | ||||
itr = combinations_with_replacement(range(0, len(self._graphs)), 2) | itr = combinations_with_replacement(range(0, len(self._graphs)), 2) | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
iterator = tqdm(itr, desc='calculating kernels', file=sys.stdout) | |||||
iterator = tqdm(itr, desc='Computing kernels', file=sys.stdout) | |||||
else: | else: | ||||
iterator = itr | iterator = itr | ||||
@@ -79,7 +79,7 @@ class SpectralDecomposition(RandomWalk): | |||||
def _compute_gm_imap_unordered(self): | def _compute_gm_imap_unordered(self): | ||||
self._check_edge_weight(self._graphs) | |||||
self._check_edge_weight(self._graphs, self._verbose) | |||||
self._check_graphs(self._graphs) | self._check_graphs(self._graphs) | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
import warnings | import warnings | ||||
@@ -88,7 +88,7 @@ class SpectralDecomposition(RandomWalk): | |||||
# compute Gram matrix. | # compute Gram matrix. | ||||
gram_matrix = np.zeros((len(self._graphs), len(self._graphs))) | gram_matrix = np.zeros((len(self._graphs), len(self._graphs))) | ||||
if self._q == None: | |||||
if self._q is None: | |||||
# precompute the spectral decomposition of each graph. | # precompute the spectral decomposition of each graph. | ||||
P_list = [] | P_list = [] | ||||
D_list = [] | D_list = [] | ||||
@@ -104,7 +104,7 @@ class SpectralDecomposition(RandomWalk): | |||||
D_list.append(ew) | D_list.append(ew) | ||||
P_list.append(ev) # @todo: parallel? | P_list.append(ev) # @todo: parallel? | ||||
if self._p == None: # p is uniform distribution as default. | |||||
if self._p is None: # p is uniform distribution as default. | |||||
q_T_list = [np.full((1, nx.number_of_nodes(G)), 1 / nx.number_of_nodes(G)) for G in self._graphs] # @todo: parallel? | q_T_list = [np.full((1, nx.number_of_nodes(G)), 1 / nx.number_of_nodes(G)) for G in self._graphs] # @todo: parallel? | ||||
def init_worker(q_T_list_toshare, P_list_toshare, D_list_toshare): | def init_worker(q_T_list_toshare, P_list_toshare, D_list_toshare): | ||||
@@ -126,7 +126,7 @@ class SpectralDecomposition(RandomWalk): | |||||
def _compute_kernel_list_series(self, g1, g_list): | def _compute_kernel_list_series(self, g1, g_list): | ||||
self._check_edge_weight(g_list + [g1]) | |||||
self._check_edge_weight(g_list + [g1], self._verbose) | |||||
self._check_graphs(g_list + [g1]) | self._check_graphs(g_list + [g1]) | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
import warnings | import warnings | ||||
@@ -135,16 +135,16 @@ class SpectralDecomposition(RandomWalk): | |||||
# compute kernel list. | # compute kernel list. | ||||
kernel_list = [None] * len(g_list) | kernel_list = [None] * len(g_list) | ||||
if self._q == None: | |||||
if self._q is None: | |||||
# precompute the spectral decomposition of each graph. | # precompute the spectral decomposition of each graph. | ||||
A1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose() | A1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose() | ||||
D1, P1 = np.linalg.eig(A1) | D1, P1 = np.linalg.eig(A1) | ||||
P_list = [] | P_list = [] | ||||
D_list = [] | D_list = [] | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
iterator = tqdm(range(len(g_list)), desc='spectral decompose', file=sys.stdout) | |||||
iterator = tqdm(g_list, desc='spectral decompose', file=sys.stdout) | |||||
else: | else: | ||||
iterator = range(len(g_list)) | |||||
iterator = g_list | |||||
for G in iterator: | for G in iterator: | ||||
# don't normalize adjacency matrices if q is a uniform vector. Note | # don't normalize adjacency matrices if q is a uniform vector. Note | ||||
# A actually is the transpose of the adjacency matrix. | # A actually is the transpose of the adjacency matrix. | ||||
@@ -153,11 +153,11 @@ class SpectralDecomposition(RandomWalk): | |||||
D_list.append(ew) | D_list.append(ew) | ||||
P_list.append(ev) | P_list.append(ev) | ||||
if self._p == None: # p is uniform distribution as default. | |||||
if self._p is None: # p is uniform distribution as default. | |||||
q_T1 = 1 / nx.number_of_nodes(g1) | q_T1 = 1 / nx.number_of_nodes(g1) | ||||
q_T_list = [np.full((1, nx.number_of_nodes(G)), 1 / nx.number_of_nodes(G)) for G in g_list] | q_T_list = [np.full((1, nx.number_of_nodes(G)), 1 / nx.number_of_nodes(G)) for G in g_list] | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
iterator = tqdm(range(len(g_list)), desc='calculating kernels', file=sys.stdout) | |||||
iterator = tqdm(range(len(g_list)), desc='Computing kernels', file=sys.stdout) | |||||
else: | else: | ||||
iterator = range(len(g_list)) | iterator = range(len(g_list)) | ||||
@@ -174,7 +174,7 @@ class SpectralDecomposition(RandomWalk): | |||||
def _compute_kernel_list_imap_unordered(self, g1, g_list): | def _compute_kernel_list_imap_unordered(self, g1, g_list): | ||||
self._check_edge_weight(g_list + [g1]) | |||||
self._check_edge_weight(g_list + [g1], self._verbose) | |||||
self._check_graphs(g_list + [g1]) | self._check_graphs(g_list + [g1]) | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
import warnings | import warnings | ||||
@@ -183,7 +183,7 @@ class SpectralDecomposition(RandomWalk): | |||||
# compute kernel list. | # compute kernel list. | ||||
kernel_list = [None] * len(g_list) | kernel_list = [None] * len(g_list) | ||||
if self._q == None: | |||||
if self._q is None: | |||||
# precompute the spectral decomposition of each graph. | # precompute the spectral decomposition of each graph. | ||||
A1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose() | A1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose() | ||||
D1, P1 = np.linalg.eig(A1) | D1, P1 = np.linalg.eig(A1) | ||||
@@ -201,7 +201,7 @@ class SpectralDecomposition(RandomWalk): | |||||
D_list.append(ew) | D_list.append(ew) | ||||
P_list.append(ev) # @todo: parallel? | P_list.append(ev) # @todo: parallel? | ||||
if self._p == None: # p is uniform distribution as default. | |||||
if self._p is None: # p is uniform distribution as default. | |||||
q_T1 = 1 / nx.number_of_nodes(g1) | q_T1 = 1 / nx.number_of_nodes(g1) | ||||
q_T_list = [np.full((1, nx.number_of_nodes(G)), 1 / nx.number_of_nodes(G)) for G in g_list] # @todo: parallel? | q_T_list = [np.full((1, nx.number_of_nodes(G)), 1 / nx.number_of_nodes(G)) for G in g_list] # @todo: parallel? | ||||
@@ -221,7 +221,7 @@ class SpectralDecomposition(RandomWalk): | |||||
itr = range(len(g_list)) | itr = range(len(g_list)) | ||||
len_itr = len(g_list) | len_itr = len(g_list) | ||||
parallel_me(do_fun, func_assign, kernel_list, itr, len_itr=len_itr, | parallel_me(do_fun, func_assign, kernel_list, itr, len_itr=len_itr, | ||||
init_worker=init_worker, glbv=(q_T1, P1, D1, q_T_list, P_list, D_list), method='imap_unordered', n_jobs=self._n_jobs, itr_desc='calculating kernels', verbose=self._verbose) | |||||
init_worker=init_worker, glbv=(q_T1, P1, D1, q_T_list, P_list, D_list), method='imap_unordered', n_jobs=self._n_jobs, itr_desc='Computing kernels', verbose=self._verbose) | |||||
else: # @todo | else: # @todo | ||||
pass | pass | ||||
@@ -236,20 +236,20 @@ class SpectralDecomposition(RandomWalk): | |||||
def _compute_single_kernel_series(self, g1, g2): | def _compute_single_kernel_series(self, g1, g2): | ||||
self._check_edge_weight([g1] + [g2]) | |||||
self._check_edge_weight([g1] + [g2], self._verbose) | |||||
self._check_graphs([g1] + [g2]) | self._check_graphs([g1] + [g2]) | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
import warnings | import warnings | ||||
warnings.warn('All labels are ignored. Only works for undirected graphs.') | warnings.warn('All labels are ignored. Only works for undirected graphs.') | ||||
if self._q == None: | |||||
if self._q is None: | |||||
# precompute the spectral decomposition of each graph. | # precompute the spectral decomposition of each graph. | ||||
A1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose() | A1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose() | ||||
D1, P1 = np.linalg.eig(A1) | D1, P1 = np.linalg.eig(A1) | ||||
A2 = nx.adjacency_matrix(g2, self._edge_weight).todense().transpose() | A2 = nx.adjacency_matrix(g2, self._edge_weight).todense().transpose() | ||||
D2, P2 = np.linalg.eig(A2) | D2, P2 = np.linalg.eig(A2) | ||||
if self._p == None: # p is uniform distribution as default. | |||||
if self._p is None: # p is uniform distribution as default. | |||||
q_T1 = 1 / nx.number_of_nodes(g1) | q_T1 = 1 / nx.number_of_nodes(g1) | ||||
q_T2 = 1 / nx.number_of_nodes(g2) | q_T2 = 1 / nx.number_of_nodes(g2) | ||||
kernel = self.__kernel_do(q_T1, q_T2, P1, P2, D1, D2, self._weight, self._sub_kernel) | kernel = self.__kernel_do(q_T1, q_T2, P1, P2, D1, D2, self._weight, self._sub_kernel) | ||||
@@ -16,18 +16,18 @@ import numpy as np | |||||
import networkx as nx | import networkx as nx | ||||
from control import dlyap | from control import dlyap | ||||
from gklearn.utils.parallel import parallel_gm, parallel_me | from gklearn.utils.parallel import parallel_gm, parallel_me | ||||
from gklearn.kernels import RandomWalk | |||||
from gklearn.kernels import RandomWalkMeta | |||||
class SylvesterEquation(RandomWalk): | |||||
class SylvesterEquation(RandomWalkMeta): | |||||
def __init__(self, **kwargs): | def __init__(self, **kwargs): | ||||
RandomWalk.__init__(self, **kwargs) | |||||
super().__init__(**kwargs) | |||||
def _compute_gm_series(self): | def _compute_gm_series(self): | ||||
self._check_edge_weight(self._graphs) | |||||
self._check_edge_weight(self._graphs, self._verbose) | |||||
self._check_graphs(self._graphs) | self._check_graphs(self._graphs) | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
import warnings | import warnings | ||||
@@ -38,7 +38,7 @@ class SylvesterEquation(RandomWalk): | |||||
# compute Gram matrix. | # compute Gram matrix. | ||||
gram_matrix = np.zeros((len(self._graphs), len(self._graphs))) | gram_matrix = np.zeros((len(self._graphs), len(self._graphs))) | ||||
if self._q == None: | |||||
if self._q is None: | |||||
# don't normalize adjacency matrices if q is a uniform vector. Note | # don't normalize adjacency matrices if q is a uniform vector. Note | ||||
# A_wave_list actually contains the transposes of the adjacency matrices. | # A_wave_list actually contains the transposes of the adjacency matrices. | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
@@ -54,16 +54,16 @@ class SylvesterEquation(RandomWalk): | |||||
# norm[norm == 0] = 1 | # norm[norm == 0] = 1 | ||||
# A_wave_list.append(A_tilde / norm) | # A_wave_list.append(A_tilde / norm) | ||||
if self._p == None: # p is uniform distribution as default. | |||||
if self._p is None: # p is uniform distribution as default. | |||||
from itertools import combinations_with_replacement | from itertools import combinations_with_replacement | ||||
itr = combinations_with_replacement(range(0, len(self._graphs)), 2) | itr = combinations_with_replacement(range(0, len(self._graphs)), 2) | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
iterator = tqdm(itr, desc='calculating kernels', file=sys.stdout) | |||||
iterator = tqdm(itr, desc='Computing kernels', file=sys.stdout) | |||||
else: | else: | ||||
iterator = itr | iterator = itr | ||||
for i, j in iterator: | for i, j in iterator: | ||||
kernel = self.__kernel_do(A_wave_list[i], A_wave_list[j], lmda) | |||||
kernel = self._kernel_do(A_wave_list[i], A_wave_list[j], lmda) | |||||
gram_matrix[i][j] = kernel | gram_matrix[i][j] = kernel | ||||
gram_matrix[j][i] = kernel | gram_matrix[j][i] = kernel | ||||
@@ -76,7 +76,7 @@ class SylvesterEquation(RandomWalk): | |||||
def _compute_gm_imap_unordered(self): | def _compute_gm_imap_unordered(self): | ||||
self._check_edge_weight(self._graphs) | |||||
self._check_edge_weight(self._graphs, self._verbose) | |||||
self._check_graphs(self._graphs) | self._check_graphs(self._graphs) | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
import warnings | import warnings | ||||
@@ -85,7 +85,7 @@ class SylvesterEquation(RandomWalk): | |||||
# compute Gram matrix. | # compute Gram matrix. | ||||
gram_matrix = np.zeros((len(self._graphs), len(self._graphs))) | gram_matrix = np.zeros((len(self._graphs), len(self._graphs))) | ||||
if self._q == None: | |||||
if self._q is None: | |||||
# don't normalize adjacency matrices if q is a uniform vector. Note | # don't normalize adjacency matrices if q is a uniform vector. Note | ||||
# A_wave_list actually contains the transposes of the adjacency matrices. | # A_wave_list actually contains the transposes of the adjacency matrices. | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
@@ -94,7 +94,7 @@ class SylvesterEquation(RandomWalk): | |||||
iterator = self._graphs | iterator = self._graphs | ||||
A_wave_list = [nx.adjacency_matrix(G, self._edge_weight).todense().transpose() for G in iterator] # @todo: parallel? | A_wave_list = [nx.adjacency_matrix(G, self._edge_weight).todense().transpose() for G in iterator] # @todo: parallel? | ||||
if self._p == None: # p is uniform distribution as default. | |||||
if self._p is None: # p is uniform distribution as default. | |||||
def init_worker(A_wave_list_toshare): | def init_worker(A_wave_list_toshare): | ||||
global G_A_wave_list | global G_A_wave_list | ||||
G_A_wave_list = A_wave_list_toshare | G_A_wave_list = A_wave_list_toshare | ||||
@@ -113,7 +113,7 @@ class SylvesterEquation(RandomWalk): | |||||
def _compute_kernel_list_series(self, g1, g_list): | def _compute_kernel_list_series(self, g1, g_list): | ||||
self._check_edge_weight(g_list + [g1]) | |||||
self._check_edge_weight(g_list + [g1], self._verbose) | |||||
self._check_graphs(g_list + [g1]) | self._check_graphs(g_list + [g1]) | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
import warnings | import warnings | ||||
@@ -124,24 +124,24 @@ class SylvesterEquation(RandomWalk): | |||||
# compute kernel list. | # compute kernel list. | ||||
kernel_list = [None] * len(g_list) | kernel_list = [None] * len(g_list) | ||||
if self._q == None: | |||||
if self._q is None: | |||||
# don't normalize adjacency matrices if q is a uniform vector. Note | # don't normalize adjacency matrices if q is a uniform vector. Note | ||||
# A_wave_list actually contains the transposes of the adjacency matrices. | # A_wave_list actually contains the transposes of the adjacency matrices. | ||||
A_wave_1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose() | A_wave_1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose() | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
iterator = tqdm(range(len(g_list)), desc='compute adjacency matrices', file=sys.stdout) | |||||
iterator = tqdm(g_list, desc='compute adjacency matrices', file=sys.stdout) | |||||
else: | else: | ||||
iterator = range(len(g_list)) | |||||
iterator = g_list | |||||
A_wave_list = [nx.adjacency_matrix(G, self._edge_weight).todense().transpose() for G in iterator] | A_wave_list = [nx.adjacency_matrix(G, self._edge_weight).todense().transpose() for G in iterator] | ||||
if self._p == None: # p is uniform distribution as default. | |||||
if self._p is None: # p is uniform distribution as default. | |||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
iterator = tqdm(range(len(g_list)), desc='calculating kernels', file=sys.stdout) | |||||
iterator = tqdm(range(len(g_list)), desc='Computing kernels', file=sys.stdout) | |||||
else: | else: | ||||
iterator = range(len(g_list)) | iterator = range(len(g_list)) | ||||
for i in iterator: | for i in iterator: | ||||
kernel = self.__kernel_do(A_wave_1, A_wave_list[i], lmda) | |||||
kernel = self._kernel_do(A_wave_1, A_wave_list[i], lmda) | |||||
kernel_list[i] = kernel | kernel_list[i] = kernel | ||||
else: # @todo | else: # @todo | ||||
@@ -153,7 +153,7 @@ class SylvesterEquation(RandomWalk): | |||||
def _compute_kernel_list_imap_unordered(self, g1, g_list): | def _compute_kernel_list_imap_unordered(self, g1, g_list): | ||||
self._check_edge_weight(g_list + [g1]) | |||||
self._check_edge_weight(g_list + [g1], self._verbose) | |||||
self._check_graphs(g_list + [g1]) | self._check_graphs(g_list + [g1]) | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
import warnings | import warnings | ||||
@@ -162,17 +162,17 @@ class SylvesterEquation(RandomWalk): | |||||
# compute kernel list. | # compute kernel list. | ||||
kernel_list = [None] * len(g_list) | kernel_list = [None] * len(g_list) | ||||
if self._q == None: | |||||
if self._q is None: | |||||
# don't normalize adjacency matrices if q is a uniform vector. Note | # don't normalize adjacency matrices if q is a uniform vector. Note | ||||
# A_wave_list actually contains the transposes of the adjacency matrices. | # A_wave_list actually contains the transposes of the adjacency matrices. | ||||
A_wave_1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose() | A_wave_1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose() | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
iterator = tqdm(range(len(g_list)), desc='compute adjacency matrices', file=sys.stdout) | |||||
iterator = tqdm(g_list, desc='compute adjacency matrices', file=sys.stdout) | |||||
else: | else: | ||||
iterator = range(len(g_list)) | |||||
iterator = g_list | |||||
A_wave_list = [nx.adjacency_matrix(G, self._edge_weight).todense().transpose() for G in iterator] # @todo: parallel? | A_wave_list = [nx.adjacency_matrix(G, self._edge_weight).todense().transpose() for G in iterator] # @todo: parallel? | ||||
if self._p == None: # p is uniform distribution as default. | |||||
if self._p is None: # p is uniform distribution as default. | |||||
def init_worker(A_wave_1_toshare, A_wave_list_toshare): | def init_worker(A_wave_1_toshare, A_wave_list_toshare): | ||||
global G_A_wave_1, G_A_wave_list | global G_A_wave_1, G_A_wave_list | ||||
G_A_wave_1 = A_wave_1_toshare | G_A_wave_1 = A_wave_1_toshare | ||||
@@ -186,7 +186,7 @@ class SylvesterEquation(RandomWalk): | |||||
len_itr = len(g_list) | len_itr = len(g_list) | ||||
parallel_me(do_fun, func_assign, kernel_list, itr, len_itr=len_itr, | parallel_me(do_fun, func_assign, kernel_list, itr, len_itr=len_itr, | ||||
init_worker=init_worker, glbv=(A_wave_1, A_wave_list), method='imap_unordered', | init_worker=init_worker, glbv=(A_wave_1, A_wave_list), method='imap_unordered', | ||||
n_jobs=self._n_jobs, itr_desc='calculating kernels', verbose=self._verbose) | |||||
n_jobs=self._n_jobs, itr_desc='Computing kernels', verbose=self._verbose) | |||||
else: # @todo | else: # @todo | ||||
pass | pass | ||||
@@ -201,7 +201,7 @@ class SylvesterEquation(RandomWalk): | |||||
def _compute_single_kernel_series(self, g1, g2): | def _compute_single_kernel_series(self, g1, g2): | ||||
self._check_edge_weight([g1] + [g2]) | |||||
self._check_edge_weight([g1] + [g2], self._verbose) | |||||
self._check_graphs([g1] + [g2]) | self._check_graphs([g1] + [g2]) | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
import warnings | import warnings | ||||
@@ -209,13 +209,13 @@ class SylvesterEquation(RandomWalk): | |||||
lmda = self._weight | lmda = self._weight | ||||
if self._q == None: | |||||
if self._q is None: | |||||
# don't normalize adjacency matrices if q is a uniform vector. Note | # don't normalize adjacency matrices if q is a uniform vector. Note | ||||
# A_wave_list actually contains the transposes of the adjacency matrices. | # A_wave_list actually contains the transposes of the adjacency matrices. | ||||
A_wave_1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose() | A_wave_1 = nx.adjacency_matrix(g1, self._edge_weight).todense().transpose() | ||||
A_wave_2 = nx.adjacency_matrix(g2, self._edge_weight).todense().transpose() | A_wave_2 = nx.adjacency_matrix(g2, self._edge_weight).todense().transpose() | ||||
if self._p == None: # p is uniform distribution as default. | |||||
kernel = self.__kernel_do(A_wave_1, A_wave_2, lmda) | |||||
if self._p is None: # p is uniform distribution as default. | |||||
kernel = self._kernel_do(A_wave_1, A_wave_2, lmda) | |||||
else: # @todo | else: # @todo | ||||
pass | pass | ||||
else: # @todo | else: # @todo | ||||
@@ -224,7 +224,7 @@ class SylvesterEquation(RandomWalk): | |||||
return kernel | return kernel | ||||
def __kernel_do(self, A_wave1, A_wave2, lmda): | |||||
def _kernel_do(self, A_wave1, A_wave2, lmda): | |||||
S = lmda * A_wave2 | S = lmda * A_wave2 | ||||
T_t = A_wave1 | T_t = A_wave1 | ||||
@@ -242,4 +242,4 @@ class SylvesterEquation(RandomWalk): | |||||
def _wrapper_kernel_do(self, itr): | def _wrapper_kernel_do(self, itr): | ||||
i = itr[0] | i = itr[0] | ||||
j = itr[1] | j = itr[1] | ||||
return i, j, self.__kernel_do(G_A_wave_list[i], G_A_wave_list[j], self._weight) | |||||
return i, j, self._kernel_do(G_A_wave_list[i], G_A_wave_list[j], self._weight) |