Browse Source

Add test for four random walk kernel classes.

v0.2.x
jajupmochi 4 years ago
parent
commit
5bdfb68551
1 changed files with 240 additions and 240 deletions
  1. +240
    -240
      gklearn/tests/test_graph_kernels.py

+ 240
- 240
gklearn/tests/test_graph_kernels.py View File

@@ -52,63 +52,63 @@ def chooseDataset(ds_name):
return dataset


# @pytest.mark.parametrize('ds_name', ['Alkane', 'AIDS'])
# @pytest.mark.parametrize('weight,compute_method', [(0.01, 'geo'), (1, 'exp')])
# @pytest.mark.parametrize('parallel', ['imap_unordered', None])
# def test_CommonWalk(ds_name, parallel, weight, compute_method):
# """Test common walk kernel.
# """
# from gklearn.kernels import CommonWalk
# import networkx as nx
#
# dataset = chooseDataset(ds_name)
# dataset.load_graphs([g for g in dataset.graphs if nx.number_of_nodes(g) > 1])
#
# try:
# graph_kernel = CommonWalk(node_labels=dataset.node_labels,
# edge_labels=dataset.edge_labels,
# ds_infos=dataset.get_dataset_infos(keys=['directed']),
# weight=weight,
# compute_method=compute_method)
# gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# kernel_list, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1:],
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# kernel, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1],
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
@pytest.mark.parametrize('ds_name', ['Alkane', 'AIDS'])
@pytest.mark.parametrize('weight,compute_method', [(0.01, 'geo'), (1, 'exp')])
@pytest.mark.parametrize('parallel', ['imap_unordered', None])
def test_CommonWalk(ds_name, parallel, weight, compute_method):
"""Test common walk kernel.
"""
from gklearn.kernels import CommonWalk
import networkx as nx
dataset = chooseDataset(ds_name)
dataset.load_graphs([g for g in dataset.graphs if nx.number_of_nodes(g) > 1])
try:
graph_kernel = CommonWalk(node_labels=dataset.node_labels,
edge_labels=dataset.edge_labels,
ds_infos=dataset.get_dataset_infos(keys=['directed']),
weight=weight,
compute_method=compute_method)
gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
kernel_list, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1:],
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
kernel, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1],
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)

# except Exception as exception:
# assert False, exception
#
#
# @pytest.mark.parametrize('ds_name', ['Alkane', 'AIDS'])
# @pytest.mark.parametrize('remove_totters', [False]) #[True, False])
# @pytest.mark.parametrize('parallel', ['imap_unordered', None])
# def test_Marginalized(ds_name, parallel, remove_totters):
# """Test marginalized kernel.
# """
# from gklearn.kernels import Marginalized
#
# dataset = chooseDataset(ds_name)
#
# try:
# graph_kernel = Marginalized(node_labels=dataset.node_labels,
# edge_labels=dataset.edge_labels,
# ds_infos=dataset.get_dataset_infos(keys=['directed']),
# p_quit=0.5,
# n_iteration=2,
# remove_totters=remove_totters)
# gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# kernel_list, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1:],
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# kernel, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1],
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
except Exception as exception:
assert False, exception
@pytest.mark.parametrize('ds_name', ['Alkane', 'AIDS'])
@pytest.mark.parametrize('remove_totters', [False]) #[True, False])
@pytest.mark.parametrize('parallel', ['imap_unordered', None])
def test_Marginalized(ds_name, parallel, remove_totters):
"""Test marginalized kernel.
"""
from gklearn.kernels import Marginalized
dataset = chooseDataset(ds_name)
try:
graph_kernel = Marginalized(node_labels=dataset.node_labels,
edge_labels=dataset.edge_labels,
ds_infos=dataset.get_dataset_infos(keys=['directed']),
p_quit=0.5,
n_iteration=2,
remove_totters=remove_totters)
gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
kernel_list, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1:],
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
kernel, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1],
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)

# except Exception as exception:
# assert False, exception
#
#
except Exception as exception:
assert False, exception
@pytest.mark.parametrize('ds_name', ['Acyclic'])
@pytest.mark.parametrize('parallel', ['imap_unordered', None])
def test_SylvesterEquation(ds_name, parallel):
@@ -239,203 +239,203 @@ def test_SpectralDecomposition(ds_name, sub_kernel, parallel):
except Exception as exception:
assert False, exception
#
#
# # @pytest.mark.parametrize(
# # 'compute_method,ds_name,sub_kernel',
# # [
# # ('sylvester', 'Alkane', None),
# # ('conjugate', 'Alkane', None),
# # ('conjugate', 'AIDS', None),
# # ('fp', 'Alkane', None),
# # ('fp', 'AIDS', None),
# # ('spectral', 'Alkane', 'exp'),
# # ('spectral', 'Alkane', 'geo'),
# # ]
# # )
# # @pytest.mark.parametrize('parallel', ['imap_unordered', None])
# # def test_RandomWalk(ds_name, compute_method, sub_kernel, parallel):
# # """Test random walk kernel.
# # """
# # from gklearn.kernels import RandomWalk
# # from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct
# # import functools
# #
# # dataset = chooseDataset(ds_name)
# @pytest.mark.parametrize(
# 'compute_method,ds_name,sub_kernel',
# [
# ('sylvester', 'Alkane', None),
# ('conjugate', 'Alkane', None),
# ('conjugate', 'AIDS', None),
# ('fp', 'Alkane', None),
# ('fp', 'AIDS', None),
# ('spectral', 'Alkane', 'exp'),
# ('spectral', 'Alkane', 'geo'),
# ]
# )
# @pytest.mark.parametrize('parallel', ['imap_unordered', None])
# def test_RandomWalk(ds_name, compute_method, sub_kernel, parallel):
# """Test random walk kernel.
# """
# from gklearn.kernels import RandomWalk
# from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct
# import functools
#
# dataset = chooseDataset(ds_name)

# # mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel)
# # sub_kernels = {'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}
# # # try:
# # graph_kernel = RandomWalk(node_labels=dataset.node_labels,
# # node_attrs=dataset.node_attrs,
# # edge_labels=dataset.edge_labels,
# # edge_attrs=dataset.edge_attrs,
# # ds_infos=dataset.get_dataset_infos(keys=['directed']),
# # compute_method=compute_method,
# # weight=1e-3,
# # p=None,
# # q=None,
# # edge_weight=None,
# # node_kernels=sub_kernels,
# # edge_kernels=sub_kernels,
# # sub_kernel=sub_kernel)
# # gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
# # parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# # kernel_list, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1:],
# # parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# # kernel, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1],
# # parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel)
# sub_kernels = {'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}
# # try:
# graph_kernel = RandomWalk(node_labels=dataset.node_labels,
# node_attrs=dataset.node_attrs,
# edge_labels=dataset.edge_labels,
# edge_attrs=dataset.edge_attrs,
# ds_infos=dataset.get_dataset_infos(keys=['directed']),
# compute_method=compute_method,
# weight=1e-3,
# p=None,
# q=None,
# edge_weight=None,
# node_kernels=sub_kernels,
# edge_kernels=sub_kernels,
# sub_kernel=sub_kernel)
# gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# kernel_list, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1:],
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# kernel, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1],
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)

# # except Exception as exception:
# # assert False, exception
# except Exception as exception:
# assert False, exception

#
# @pytest.mark.parametrize('ds_name', ['Alkane', 'Acyclic', 'Letter-med', 'AIDS', 'Fingerprint'])
# @pytest.mark.parametrize('parallel', ['imap_unordered', None])
# def test_ShortestPath(ds_name, parallel):
# """Test shortest path kernel.
# """
# from gklearn.kernels import ShortestPath
# from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct
# import functools
#
# dataset = chooseDataset(ds_name)
#
# mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel)
# sub_kernels = {'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}
# try:
# graph_kernel = ShortestPath(node_labels=dataset.node_labels,
# node_attrs=dataset.node_attrs,
# ds_infos=dataset.get_dataset_infos(keys=['directed']),
# node_kernels=sub_kernels)
# gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# kernel_list, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1:],
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# kernel, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1],
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
@pytest.mark.parametrize('ds_name', ['Alkane', 'Acyclic', 'Letter-med', 'AIDS', 'Fingerprint'])
@pytest.mark.parametrize('parallel', ['imap_unordered', None])
def test_ShortestPath(ds_name, parallel):
"""Test shortest path kernel.
"""
from gklearn.kernels import ShortestPath
from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct
import functools
dataset = chooseDataset(ds_name)
mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel)
sub_kernels = {'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}
try:
graph_kernel = ShortestPath(node_labels=dataset.node_labels,
node_attrs=dataset.node_attrs,
ds_infos=dataset.get_dataset_infos(keys=['directed']),
node_kernels=sub_kernels)
gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
kernel_list, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1:],
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
kernel, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1],
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)

# except Exception as exception:
# assert False, exception
except Exception as exception:
assert False, exception


# #@pytest.mark.parametrize('ds_name', ['Alkane', 'Acyclic', 'Letter-med', 'AIDS', 'Fingerprint'])
# @pytest.mark.parametrize('ds_name', ['Alkane', 'Acyclic', 'Letter-med', 'AIDS', 'Fingerprint', 'Fingerprint_edge', 'Cuneiform'])
# @pytest.mark.parametrize('parallel', ['imap_unordered', None])
# def test_StructuralSP(ds_name, parallel):
# """Test structural shortest path kernel.
# """
# from gklearn.kernels import StructuralSP
# from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct
# import functools
#
# dataset = chooseDataset(ds_name)
#
# mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel)
# sub_kernels = {'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}
# try:
# graph_kernel = StructuralSP(node_labels=dataset.node_labels,
# edge_labels=dataset.edge_labels,
# node_attrs=dataset.node_attrs,
# edge_attrs=dataset.edge_attrs,
# ds_infos=dataset.get_dataset_infos(keys=['directed']),
# node_kernels=sub_kernels,
# edge_kernels=sub_kernels)
# gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# kernel_list, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1:],
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# kernel, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1],
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
#@pytest.mark.parametrize('ds_name', ['Alkane', 'Acyclic', 'Letter-med', 'AIDS', 'Fingerprint'])
@pytest.mark.parametrize('ds_name', ['Alkane', 'Acyclic', 'Letter-med', 'AIDS', 'Fingerprint', 'Fingerprint_edge', 'Cuneiform'])
@pytest.mark.parametrize('parallel', ['imap_unordered', None])
def test_StructuralSP(ds_name, parallel):
"""Test structural shortest path kernel.
"""
from gklearn.kernels import StructuralSP
from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct
import functools
dataset = chooseDataset(ds_name)
mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel)
sub_kernels = {'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}
try:
graph_kernel = StructuralSP(node_labels=dataset.node_labels,
edge_labels=dataset.edge_labels,
node_attrs=dataset.node_attrs,
edge_attrs=dataset.edge_attrs,
ds_infos=dataset.get_dataset_infos(keys=['directed']),
node_kernels=sub_kernels,
edge_kernels=sub_kernels)
gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
kernel_list, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1:],
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
kernel, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1],
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)

# except Exception as exception:
# assert False, exception
except Exception as exception:
assert False, exception


# @pytest.mark.parametrize('ds_name', ['Alkane', 'AIDS'])
# @pytest.mark.parametrize('parallel', ['imap_unordered', None])
# #@pytest.mark.parametrize('k_func', ['MinMax', 'tanimoto', None])
# @pytest.mark.parametrize('k_func', ['MinMax', 'tanimoto'])
# @pytest.mark.parametrize('compute_method', ['trie', 'naive'])
# def test_PathUpToH(ds_name, parallel, k_func, compute_method):
# """Test path kernel up to length $h$.
# """
# from gklearn.kernels import PathUpToH
#
# dataset = chooseDataset(ds_name)
#
# try:
# graph_kernel = PathUpToH(node_labels=dataset.node_labels,
# edge_labels=dataset.edge_labels,
# ds_infos=dataset.get_dataset_infos(keys=['directed']),
# depth=2, k_func=k_func, compute_method=compute_method)
# gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# kernel_list, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1:],
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# kernel, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1],
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# except Exception as exception:
# assert False, exception
#
#
# @pytest.mark.parametrize('ds_name', ['Alkane', 'AIDS'])
# @pytest.mark.parametrize('parallel', ['imap_unordered', None])
# def test_Treelet(ds_name, parallel):
# """Test treelet kernel.
# """
# from gklearn.kernels import Treelet
# from gklearn.utils.kernels import polynomialkernel
# import functools
#
# dataset = chooseDataset(ds_name)
@pytest.mark.parametrize('ds_name', ['Alkane', 'AIDS'])
@pytest.mark.parametrize('parallel', ['imap_unordered', None])
#@pytest.mark.parametrize('k_func', ['MinMax', 'tanimoto', None])
@pytest.mark.parametrize('k_func', ['MinMax', 'tanimoto'])
@pytest.mark.parametrize('compute_method', ['trie', 'naive'])
def test_PathUpToH(ds_name, parallel, k_func, compute_method):
"""Test path kernel up to length $h$.
"""
from gklearn.kernels import PathUpToH
dataset = chooseDataset(ds_name)
try:
graph_kernel = PathUpToH(node_labels=dataset.node_labels,
edge_labels=dataset.edge_labels,
ds_infos=dataset.get_dataset_infos(keys=['directed']),
depth=2, k_func=k_func, compute_method=compute_method)
gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
kernel_list, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1:],
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
kernel, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1],
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
except Exception as exception:
assert False, exception
@pytest.mark.parametrize('ds_name', ['Alkane', 'AIDS'])
@pytest.mark.parametrize('parallel', ['imap_unordered', None])
def test_Treelet(ds_name, parallel):
"""Test treelet kernel.
"""
from gklearn.kernels import Treelet
from gklearn.utils.kernels import polynomialkernel
import functools
dataset = chooseDataset(ds_name)

# pkernel = functools.partial(polynomialkernel, d=2, c=1e5)
# try:
# graph_kernel = Treelet(node_labels=dataset.node_labels,
# edge_labels=dataset.edge_labels,
# ds_infos=dataset.get_dataset_infos(keys=['directed']),
# sub_kernel=pkernel)
# gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# kernel_list, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1:],
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# kernel, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1],
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# except Exception as exception:
# assert False, exception
#
#
# @pytest.mark.parametrize('ds_name', ['Acyclic'])
# #@pytest.mark.parametrize('base_kernel', ['subtree', 'sp', 'edge'])
# # @pytest.mark.parametrize('base_kernel', ['subtree'])
# @pytest.mark.parametrize('parallel', ['imap_unordered', None])
# def test_WLSubtree(ds_name, parallel):
# """Test Weisfeiler-Lehman subtree kernel.
# """
# from gklearn.kernels import WLSubtree
#
# dataset = chooseDataset(ds_name)
pkernel = functools.partial(polynomialkernel, d=2, c=1e5)
try:
graph_kernel = Treelet(node_labels=dataset.node_labels,
edge_labels=dataset.edge_labels,
ds_infos=dataset.get_dataset_infos(keys=['directed']),
sub_kernel=pkernel)
gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
kernel_list, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1:],
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
kernel, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1],
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
except Exception as exception:
assert False, exception
@pytest.mark.parametrize('ds_name', ['Acyclic'])
#@pytest.mark.parametrize('base_kernel', ['subtree', 'sp', 'edge'])
# @pytest.mark.parametrize('base_kernel', ['subtree'])
@pytest.mark.parametrize('parallel', ['imap_unordered', None])
def test_WLSubtree(ds_name, parallel):
"""Test Weisfeiler-Lehman subtree kernel.
"""
from gklearn.kernels import WLSubtree
dataset = chooseDataset(ds_name)

# try:
# graph_kernel = WLSubtree(node_labels=dataset.node_labels,
# edge_labels=dataset.edge_labels,
# ds_infos=dataset.get_dataset_infos(keys=['directed']),
# height=2)
# gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# kernel_list, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1:],
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# kernel, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1],
# parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
# except Exception as exception:
# assert False, exception
try:
graph_kernel = WLSubtree(node_labels=dataset.node_labels,
edge_labels=dataset.edge_labels,
ds_infos=dataset.get_dataset_infos(keys=['directed']),
height=2)
gram_matrix, run_time = graph_kernel.compute(dataset.graphs,
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
kernel_list, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1:],
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
kernel, run_time = graph_kernel.compute(dataset.graphs[0], dataset.graphs[1],
parallel=parallel, n_jobs=multiprocessing.cpu_count(), verbose=True)
except Exception as exception:
assert False, exception

if __name__ == "__main__":
# test_spkernel('Alkane', 'imap_unordered')
# test_spkernel('Alkane', 'imap_unordered')
test_StructuralSP('Fingerprint_edge', 'imap_unordered')
# test_RandomWalk('Acyclic', 'sylvester', None, 'imap_unordered')
# test_RandomWalk('Acyclic', 'conjugate', None, 'imap_unordered')
# test_RandomWalk('Acyclic', 'fp', None, None)
# test_RandomWalk('Acyclic', 'spectral', 'exp', 'imap_unordered')
# test_RandomWalk('Acyclic', 'sylvester', None, 'imap_unordered')
# test_RandomWalk('Acyclic', 'conjugate', None, 'imap_unordered')
# test_RandomWalk('Acyclic', 'fp', None, None)
# test_RandomWalk('Acyclic', 'spectral', 'exp', 'imap_unordered')

Loading…
Cancel
Save