@@ -1,6 +1,6 @@ | |||||
# About graph kenrels. | # About graph kenrels. | ||||
## (Random walk) Sylvester equation kernel. | |||||
## (Random walk) Sylvester equation kernel. | |||||
### ImportError: cannot import name 'frange' from 'matplotlib.mlab' | ### ImportError: cannot import name 'frange' from 'matplotlib.mlab' | ||||
@@ -18,4 +18,6 @@ Install MKL. Then add the following to your path: | |||||
export PATH=/opt/intel/bin:$PATH | export PATH=/opt/intel/bin:$PATH | ||||
export LD_LIBRARY_PATH=/opt/intel/lib/intel64:/opt/intel/mkl/lib/intel64:$LD_LIBRARY_PATH | export LD_LIBRARY_PATH=/opt/intel/lib/intel64:/opt/intel/mkl/lib/intel64:$LD_LIBRARY_PATH | ||||
export LD_PRELOAD=/opt/intel/mkl/lib/intel64/libmkl_def.so:/opt/intel/mkl/lib/intel64/libmkl_avx2.so:/opt/intel/mkl/lib/intel64/libmkl_core.so:/opt/intel/mkl/lib/intel64/libmkl_intel_lp64.so:/opt/intel/mkl/lib/intel64/libmkl_intel_thread.so:/opt/intel/lib/intel64_lin/libiomp5.so | |||||
``` | ``` |
@@ -60,7 +60,7 @@ Check [`notebooks`](https://github.com/jajupmochi/graphkit-learn/tree/master/not | |||||
The docs of the library can be found [here](https://graphkit-learn.readthedocs.io/en/master/?badge=master). | The docs of the library can be found [here](https://graphkit-learn.readthedocs.io/en/master/?badge=master). | ||||
## Main contents | |||||
## Main contents | |||||
### 1 List of graph kernels | ### 1 List of graph kernels | ||||
@@ -0,0 +1,62 @@ | |||||
#!/usr/bin/env python3 | |||||
# -*- coding: utf-8 -*- | |||||
""" | |||||
Created on Mon Sep 21 10:34:26 2020 | |||||
@author: ljia | |||||
""" | |||||
from utils import Graph_Kernel_List, Dataset_List, compute_graph_kernel | |||||
from gklearn.utils.graphdataset import load_predefined_dataset | |||||
import logging | |||||
def xp_runtimes_diff_chunksizes(): | |||||
# Run and save. | |||||
import pickle | |||||
import os | |||||
save_dir = 'outputs/runtimes_diff_chunksizes/' | |||||
if not os.path.exists(save_dir): | |||||
os.makedirs(save_dir) | |||||
run_times = {} | |||||
for kernel_name in Graph_Kernel_List: | |||||
print() | |||||
print('Kernel:', kernel_name) | |||||
run_times[kernel_name] = [] | |||||
for ds_name in Dataset_List: | |||||
print() | |||||
print('Dataset:', ds_name) | |||||
run_times[kernel_name].append([]) | |||||
for chunksize in [1, 5, 10, 50, 100, 500, 1000, 5000, 10000, 50000, 100000]: | |||||
print() | |||||
print('Chunksize:', chunksize) | |||||
# get graphs. | |||||
graphs, _ = load_predefined_dataset(ds_name) | |||||
# Compute Gram matrix. | |||||
run_time = 'error' | |||||
try: | |||||
gram_matrix, run_time = compute_graph_kernel(graphs, kernel_name, chunksize=chunksize) | |||||
except Exception as exp: | |||||
print('An exception occured when running this experiment:') | |||||
LOG_FILENAME = save_dir + 'error.txt' | |||||
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG) | |||||
logging.exception('') | |||||
print(repr(exp)) | |||||
run_times[kernel_name][-1].append(run_time) | |||||
pickle.dump(run_time, open(save_dir + 'run_time.' + kernel_name + '.' + ds_name + '.' + str(chunksize) + '.pkl', 'wb')) | |||||
# Save all. | |||||
pickle.dump(run_times, open(save_dir + 'run_times.pkl', 'wb')) | |||||
return | |||||
if __name__ == '__main__': | |||||
xp_runtimes_diff_chunksizes() |
@@ -27,7 +27,7 @@ Graph_Kernel_List_ECon = ['ConjugateGradient', 'FixedPoint', 'StructuralSP'] | |||||
Dataset_List = ['Alkane', 'Acyclic', 'MAO', 'PAH', 'MUTAG', 'Letter-med', 'ENZYMES', 'AIDS', 'NCI1', 'NCI109', 'DD'] | Dataset_List = ['Alkane', 'Acyclic', 'MAO', 'PAH', 'MUTAG', 'Letter-med', 'ENZYMES', 'AIDS', 'NCI1', 'NCI109', 'DD'] | ||||
def compute_graph_kernel(graphs, kernel_name, n_jobs=multiprocessing.cpu_count()): | |||||
def compute_graph_kernel(graphs, kernel_name, n_jobs=multiprocessing.cpu_count(), chunksize=None): | |||||
if kernel_name == 'CommonWalk': | if kernel_name == 'CommonWalk': | ||||
from gklearn.kernels.commonWalkKernel import commonwalkkernel | from gklearn.kernels.commonWalkKernel import commonwalkkernel | ||||
@@ -105,6 +105,7 @@ def compute_graph_kernel(graphs, kernel_name, n_jobs=multiprocessing.cpu_count() | |||||
# params['parallel'] = None | # params['parallel'] = None | ||||
params['n_jobs'] = n_jobs | params['n_jobs'] = n_jobs | ||||
params['chunksize'] = chunksize | |||||
params['verbose'] = True | params['verbose'] = True | ||||
results = estimator(graphs, **params) | results = estimator(graphs, **params) | ||||