@@ -60,7 +60,7 @@ class ConjugateGradient(RandomWalkMeta): | |||||
iterator = itr | iterator = itr | ||||
for i, j in iterator: | for i, j in iterator: | ||||
kernel = self.__kernel_do(self._graphs[i], self._graphs[j], lmda) | |||||
kernel = self._kernel_do(self._graphs[i], self._graphs[j], lmda) | |||||
gram_matrix[i][j] = kernel | gram_matrix[i][j] = kernel | ||||
gram_matrix[j][i] = kernel | gram_matrix[j][i] = kernel | ||||
@@ -127,7 +127,7 @@ class ConjugateGradient(RandomWalkMeta): | |||||
iterator = range(len(g_list)) | iterator = range(len(g_list)) | ||||
for i in iterator: | for i in iterator: | ||||
kernel = self.__kernel_do(g1, g_list[i], lmda) | |||||
kernel = self._kernel_do(g1, g_list[i], lmda) | |||||
kernel_list[i] = kernel | kernel_list[i] = kernel | ||||
else: # @todo | else: # @todo | ||||
@@ -190,7 +190,7 @@ class ConjugateGradient(RandomWalkMeta): | |||||
g2 = nx.convert_node_labels_to_integers(g2, first_label=0, label_attribute='label_orignal') | g2 = nx.convert_node_labels_to_integers(g2, first_label=0, label_attribute='label_orignal') | ||||
if self._p is None and self._q is None: # p and q are uniform distributions as default. | if self._p is None and self._q is None: # p and q are uniform distributions as default. | ||||
kernel = self.__kernel_do(g1, g2, lmda) | |||||
kernel = self._kernel_do(g1, g2, lmda) | |||||
else: # @todo | else: # @todo | ||||
pass | pass | ||||
@@ -198,7 +198,7 @@ class ConjugateGradient(RandomWalkMeta): | |||||
return kernel | return kernel | ||||
def __kernel_do(self, g1, g2, lmda): | |||||
def _kernel_do(self, g1, g2, lmda): | |||||
# Frist, compute kernels between all pairs of nodes using the method borrowed | # Frist, compute kernels between all pairs of nodes using the method borrowed | ||||
# from FCSP. It is faster than directly computing all edge kernels | # from FCSP. It is faster than directly computing all edge kernels | ||||
@@ -222,7 +222,7 @@ class ConjugateGradient(RandomWalkMeta): | |||||
def _wrapper_kernel_do(self, itr): | def _wrapper_kernel_do(self, itr): | ||||
i = itr[0] | i = itr[0] | ||||
j = itr[1] | j = itr[1] | ||||
return i, j, self.__kernel_do(G_gn[i], G_gn[j], self._weight) | |||||
return i, j, self._kernel_do(G_gn[i], G_gn[j], self._weight) | |||||
def _func_fp(x, p_times, lmda, w_times): | def _func_fp(x, p_times, lmda, w_times): | ||||
@@ -246,19 +246,19 @@ class ConjugateGradient(RandomWalkMeta): | |||||
# Define edge kernels. | # Define edge kernels. | ||||
def compute_ek_11(e1, e2, ke): | def compute_ek_11(e1, e2, ke): | ||||
e1_labels = [e1[2][el] for el in self._edge_labels] | e1_labels = [e1[2][el] for el in self._edge_labels] | ||||
e2_labels = [e2[2][el] for el in self.__edge_labels] | |||||
e2_labels = [e2[2][el] for el in self._edge_labels] | |||||
e1_attrs = [e1[2][ea] for ea in self._edge_attrs] | e1_attrs = [e1[2][ea] for ea in self._edge_attrs] | ||||
e2_attrs = [e2[2][ea] for ea in self._edge_attrs] | e2_attrs = [e2[2][ea] for ea in self._edge_attrs] | ||||
return ke(e1_labels, e2_labels, e1_attrs, e2_attrs) | return ke(e1_labels, e2_labels, e1_attrs, e2_attrs) | ||||
def compute_ek_10(e1, e2, ke): | def compute_ek_10(e1, e2, ke): | ||||
e1_labels = [e1[2][el] for el in self.__edge_labels] | |||||
e2_labels = [e2[2][el] for el in self.__edge_labels] | |||||
e1_labels = [e1[2][el] for el in self._edge_labels] | |||||
e2_labels = [e2[2][el] for el in self._edge_labels] | |||||
return ke(e1_labels, e2_labels) | return ke(e1_labels, e2_labels) | ||||
def compute_ek_01(e1, e2, ke): | def compute_ek_01(e1, e2, ke): | ||||
e1_attrs = [e1[2][ea] for ea in self.__edge_attrs] | |||||
e2_attrs = [e2[2][ea] for ea in self.__edge_attrs] | |||||
e1_attrs = [e1[2][ea] for ea in self._edge_attrs] | |||||
e2_attrs = [e2[2][ea] for ea in self._edge_attrs] | |||||
return ke(e1_attrs, e2_attrs) | return ke(e1_attrs, e2_attrs) | ||||
def compute_ek_00(e1, e2, ke): | def compute_ek_00(e1, e2, ke): | ||||
@@ -60,7 +60,7 @@ class FixedPoint(RandomWalkMeta): | |||||
iterator = itr | iterator = itr | ||||
for i, j in iterator: | for i, j in iterator: | ||||
kernel = self.__kernel_do(self._graphs[i], self._graphs[j], lmda) | |||||
kernel = self._kernel_do(self._graphs[i], self._graphs[j], lmda) | |||||
gram_matrix[i][j] = kernel | gram_matrix[i][j] = kernel | ||||
gram_matrix[j][i] = kernel | gram_matrix[j][i] = kernel | ||||
@@ -127,7 +127,7 @@ class FixedPoint(RandomWalkMeta): | |||||
iterator = range(len(g_list)) | iterator = range(len(g_list)) | ||||
for i in iterator: | for i in iterator: | ||||
kernel = self.__kernel_do(g1, g_list[i], lmda) | |||||
kernel = self._kernel_do(g1, g_list[i], lmda) | |||||
kernel_list[i] = kernel | kernel_list[i] = kernel | ||||
else: # @todo | else: # @todo | ||||
@@ -190,7 +190,7 @@ class FixedPoint(RandomWalkMeta): | |||||
g2 = nx.convert_node_labels_to_integers(g2, first_label=0, label_attribute='label_orignal') | g2 = nx.convert_node_labels_to_integers(g2, first_label=0, label_attribute='label_orignal') | ||||
if self._p is None and self._q is None: # p and q are uniform distributions as default. | if self._p is None and self._q is None: # p and q are uniform distributions as default. | ||||
kernel = self.__kernel_do(g1, g2, lmda) | |||||
kernel = self._kernel_do(g1, g2, lmda) | |||||
else: # @todo | else: # @todo | ||||
pass | pass | ||||
@@ -198,7 +198,7 @@ class FixedPoint(RandomWalkMeta): | |||||
return kernel | return kernel | ||||
def __kernel_do(self, g1, g2, lmda): | |||||
def _kernel_do(self, g1, g2, lmda): | |||||
# Frist, compute kernels between all pairs of nodes using the method borrowed | # Frist, compute kernels between all pairs of nodes using the method borrowed | ||||
# from FCSP. It is faster than directly computing all edge kernels | # from FCSP. It is faster than directly computing all edge kernels | ||||
@@ -221,10 +221,10 @@ class FixedPoint(RandomWalkMeta): | |||||
def _wrapper_kernel_do(self, itr): | def _wrapper_kernel_do(self, itr): | ||||
i = itr[0] | i = itr[0] | ||||
j = itr[1] | j = itr[1] | ||||
return i, j, self.__kernel_do(G_gn[i], G_gn[j], self._weight) | |||||
return i, j, self._kernel_do(G_gn[i], G_gn[j], self._weight) | |||||
def _func_fp(x, p_times, lmda, w_times): | |||||
def _func_fp(self, x, p_times, lmda, w_times): | |||||
haha = w_times * x | haha = w_times * x | ||||
haha = lmda * haha | haha = lmda * haha | ||||
haha = p_times + haha | haha = p_times + haha | ||||
@@ -245,19 +245,19 @@ class FixedPoint(RandomWalkMeta): | |||||
# Define edge kernels. | # Define edge kernels. | ||||
def compute_ek_11(e1, e2, ke): | def compute_ek_11(e1, e2, ke): | ||||
e1_labels = [e1[2][el] for el in self._edge_labels] | e1_labels = [e1[2][el] for el in self._edge_labels] | ||||
e2_labels = [e2[2][el] for el in self.__edge_labels] | |||||
e2_labels = [e2[2][el] for el in self._edge_labels] | |||||
e1_attrs = [e1[2][ea] for ea in self._edge_attrs] | e1_attrs = [e1[2][ea] for ea in self._edge_attrs] | ||||
e2_attrs = [e2[2][ea] for ea in self._edge_attrs] | e2_attrs = [e2[2][ea] for ea in self._edge_attrs] | ||||
return ke(e1_labels, e2_labels, e1_attrs, e2_attrs) | return ke(e1_labels, e2_labels, e1_attrs, e2_attrs) | ||||
def compute_ek_10(e1, e2, ke): | def compute_ek_10(e1, e2, ke): | ||||
e1_labels = [e1[2][el] for el in self.__edge_labels] | |||||
e2_labels = [e2[2][el] for el in self.__edge_labels] | |||||
e1_labels = [e1[2][el] for el in self._edge_labels] | |||||
e2_labels = [e2[2][el] for el in self._edge_labels] | |||||
return ke(e1_labels, e2_labels) | return ke(e1_labels, e2_labels) | ||||
def compute_ek_01(e1, e2, ke): | def compute_ek_01(e1, e2, ke): | ||||
e1_attrs = [e1[2][ea] for ea in self.__edge_attrs] | |||||
e2_attrs = [e2[2][ea] for ea in self.__edge_attrs] | |||||
e1_attrs = [e1[2][ea] for ea in self._edge_attrs] | |||||
e2_attrs = [e2[2][ea] for ea in self._edge_attrs] | |||||
return ke(e1_attrs, e2_attrs) | return ke(e1_attrs, e2_attrs) | ||||
def compute_ek_00(e1, e2, ke): | def compute_ek_00(e1, e2, ke): | ||||
@@ -66,7 +66,7 @@ class SpectralDecomposition(RandomWalkMeta): | |||||
iterator = itr | iterator = itr | ||||
for i, j in iterator: | for i, j in iterator: | ||||
kernel = self.__kernel_do(q_T_list[i], q_T_list[j], P_list[i], P_list[j], D_list[i], D_list[j], self._weight, self._sub_kernel) | |||||
kernel = self._kernel_do(q_T_list[i], q_T_list[j], P_list[i], P_list[j], D_list[i], D_list[j], self._weight, self._sub_kernel) | |||||
gram_matrix[i][j] = kernel | gram_matrix[i][j] = kernel | ||||
gram_matrix[j][i] = kernel | gram_matrix[j][i] = kernel | ||||
@@ -162,7 +162,7 @@ class SpectralDecomposition(RandomWalkMeta): | |||||
iterator = range(len(g_list)) | iterator = range(len(g_list)) | ||||
for i in iterator: | for i in iterator: | ||||
kernel = self.__kernel_do(q_T1, q_T_list[i], P1, P_list[i], D1, D_list[i], self._weight, self._sub_kernel) | |||||
kernel = self._kernel_do(q_T1, q_T_list[i], P1, P_list[i], D1, D_list[i], self._weight, self._sub_kernel) | |||||
kernel_list[i] = kernel | kernel_list[i] = kernel | ||||
else: # @todo | else: # @todo | ||||
@@ -190,9 +190,9 @@ class SpectralDecomposition(RandomWalkMeta): | |||||
P_list = [] | P_list = [] | ||||
D_list = [] | D_list = [] | ||||
if self._verbose >= 2: | if self._verbose >= 2: | ||||
iterator = tqdm(range(len(g_list)), desc='spectral decompose', file=sys.stdout) | |||||
iterator = tqdm(g_list, desc='spectral decompose', file=sys.stdout) | |||||
else: | else: | ||||
iterator = range(len(g_list)) | |||||
iterator = g_list | |||||
for G in iterator: | for G in iterator: | ||||
# don't normalize adjacency matrices if q is a uniform vector. Note | # don't normalize adjacency matrices if q is a uniform vector. Note | ||||
# A actually is the transpose of the adjacency matrix. | # A actually is the transpose of the adjacency matrix. | ||||
@@ -252,7 +252,7 @@ class SpectralDecomposition(RandomWalkMeta): | |||||
if self._p is None: # p is uniform distribution as default. | if self._p is None: # p is uniform distribution as default. | ||||
q_T1 = 1 / nx.number_of_nodes(g1) | q_T1 = 1 / nx.number_of_nodes(g1) | ||||
q_T2 = 1 / nx.number_of_nodes(g2) | q_T2 = 1 / nx.number_of_nodes(g2) | ||||
kernel = self.__kernel_do(q_T1, q_T2, P1, P2, D1, D2, self._weight, self._sub_kernel) | |||||
kernel = self._kernel_do(q_T1, q_T2, P1, P2, D1, D2, self._weight, self._sub_kernel) | |||||
else: # @todo | else: # @todo | ||||
pass | pass | ||||
else: # @todo | else: # @todo | ||||
@@ -261,7 +261,7 @@ class SpectralDecomposition(RandomWalkMeta): | |||||
return kernel | return kernel | ||||
def __kernel_do(self, q_T1, q_T2, P1, P2, D1, D2, weight, sub_kernel): | |||||
def _kernel_do(self, q_T1, q_T2, P1, P2, D1, D2, weight, sub_kernel): | |||||
# use uniform distribution if there is no prior knowledge. | # use uniform distribution if there is no prior knowledge. | ||||
kl = kron(np.dot(q_T1, P1), np.dot(q_T2, P2)).todense() | kl = kron(np.dot(q_T1, P1), np.dot(q_T2, P2)).todense() | ||||
# @todo: this is not needed when p = q (kr = kl.T) for undirected graphs. | # @todo: this is not needed when p = q (kr = kl.T) for undirected graphs. | ||||
@@ -280,4 +280,4 @@ class SpectralDecomposition(RandomWalkMeta): | |||||
def _wrapper_kernel_do(self, itr): | def _wrapper_kernel_do(self, itr): | ||||
i = itr[0] | i = itr[0] | ||||
j = itr[1] | j = itr[1] | ||||
return i, j, self.__kernel_do(G_q_T_list[i], G_q_T_list[j], G_P_list[i], G_P_list[j], G_D_list[i], G_D_list[j], self._weight, self._sub_kernel) | |||||
return i, j, self._kernel_do(G_q_T_list[i], G_q_T_list[j], G_P_list[i], G_P_list[j], G_D_list[i], G_D_list[j], self._weight, self._sub_kernel) |