Browse Source

Add k-nn test.

v0.2.x
jajupmochi 5 years ago
parent
commit
7e94ecaf31
12 changed files with 756 additions and 33 deletions
  1. +1
    -1
      gklearn/__init__.py
  2. +10
    -3
      gklearn/ged/util/util.py
  3. +3
    -0
      gklearn/kernels/graph_kernel.py
  4. +1
    -0
      gklearn/preimage/__init__.py
  5. +103
    -0
      gklearn/preimage/experiments/xp_1nn.py
  6. +418
    -0
      gklearn/preimage/kernel_knn_cv.py
  7. +2
    -2
      gklearn/preimage/median_preimage_generator.py
  8. +27
    -25
      gklearn/preimage/utils.py
  9. +2
    -0
      gklearn/utils/__init__.py
  10. +19
    -1
      gklearn/utils/dataset.py
  11. +141
    -0
      gklearn/utils/knn.py
  12. +29
    -1
      gklearn/utils/utils.py

+ 1
- 1
gklearn/__init__.py View File

@@ -18,4 +18,4 @@ __date__ = "November 2017"
# import sub modules
# from gklearn import c_ext
# from gklearn import ged
from gklearn import utils
# from gklearn import utils

+ 10
- 3
gklearn/ged/util/util.py View File

@@ -46,7 +46,7 @@ def compute_ged(g1, g2, options):
return dis, pi_forward, pi_backward


def compute_geds(graphs, options={}, parallel=False):
def compute_geds(graphs, options={}, parallel=False, verbose=True):
# initialize ged env.
ged_env = gedlibpy.GEDEnv()
ged_env.set_edit_cost(options['edit_cost'], edit_cost_constant=options['edit_cost_constants'])
@@ -81,8 +81,11 @@ def compute_geds(graphs, options={}, parallel=False):
G_listID = listID_toshare
do_partial = partial(_wrapper_compute_ged_parallel, neo_options)
pool = Pool(processes=n_jobs, initializer=init_worker, initargs=(graphs, ged_env, listID))
iterator = tqdm(pool.imap_unordered(do_partial, itr, chunksize),
if verbose:
iterator = tqdm(pool.imap_unordered(do_partial, itr, chunksize),
desc='computing GEDs', file=sys.stdout)
else:
iterator = pool.imap_unordered(do_partial, itr, chunksize)
# iterator = pool.imap_unordered(do_partial, itr, chunksize)
for i, j, dis, n_eo_tmp in iterator:
idx_itr = int(len(graphs) * i + j - (i + 1) * (i + 2) / 2)
@@ -98,7 +101,11 @@ def compute_geds(graphs, options={}, parallel=False):
else:
ged_vec = []
n_edit_operations = []
for i in tqdm(range(len(graphs)), desc='computing GEDs', file=sys.stdout):
if verbose:
iterator = tqdm(range(len(graphs)), desc='computing GEDs', file=sys.stdout)
else:
iterator = range(len(graphs))
for i in iterator:
# for i in range(len(graphs)):
for j in range(i + 1, len(graphs)):
dis, pi_forward, pi_backward = _compute_ged(ged_env, listID[i], listID[j], graphs[i], graphs[j])


+ 3
- 0
gklearn/kernels/graph_kernel.py View File

@@ -67,6 +67,9 @@ class GraphKernel(object):
def normalize_gm(self, gram_matrix):
import warnings
warnings.warn('gklearn.kernels.graph_kernel.normalize_gm will be deprecated, use gklearn.utils.normalize_gram_matrix instead', DeprecationWarning)

diag = gram_matrix.diagonal().copy()
for i in range(len(gram_matrix)):
for j in range(i, len(gram_matrix)):


+ 1
- 0
gklearn/preimage/__init__.py View File

@@ -12,3 +12,4 @@ __date__ = "March 2020"

from gklearn.preimage.preimage_generator import PreimageGenerator
from gklearn.preimage.median_preimage_generator import MedianPreimageGenerator
from gklearn.preimage.kernel_knn_cv import kernel_knn_cv

+ 103
- 0
gklearn/preimage/experiments/xp_1nn.py View File

@@ -0,0 +1,103 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 11 14:15:11 2020

@author: ljia
"""
import functools
import multiprocessing
import os
import sys
import logging
from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct
from gklearn.preimage import kernel_knn_cv

dir_root = '../results/xp_1nn.init1.no_triangle_rule.allow_zeros/'
num_random = 10
initial_solutions = 1
triangle_rule = False
allow_zeros = True
update_order = False
test_sizes = [0.9, 0.7] # , 0.5, 0.3, 0.1]

def xp_knn_1_1():
for test_size in test_sizes:
ds_name = 'Letter-high'
knn_options = {'n_neighbors': 1,
'n_splits': 30,
'test_size': test_size,
'verbose': True}
mpg_options = {'fit_method': 'k-graphs',
'init_ecc': [0.675, 0.675, 0.75, 0.425, 0.425],
'ds_name': ds_name,
'parallel': True, # False
'time_limit_in_sec': 0,
'max_itrs': 100,
'max_itrs_without_update': 3,
'epsilon_residual': 0.01,
'epsilon_ec': 0.1,
'allow_zeros': allow_zeros,
'triangle_rule': triangle_rule,
'verbose': 1}
mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel)
sub_kernels = {'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}
kernel_options = {'name': 'StructuralSP',
'edge_weight': None,
'node_kernels': sub_kernels,
'edge_kernels': sub_kernels,
'compute_method': 'naive',
'parallel': 'imap_unordered',
# 'parallel': None,
'n_jobs': multiprocessing.cpu_count(),
'normalize': True,
'verbose': 0}
ged_options = {'method': 'IPFP',
'initialization_method': 'RANDOM', # 'NODE'
'initial_solutions': initial_solutions, # 1
'edit_cost': 'LETTER2',
'attr_distance': 'euclidean',
'ratio_runs_from_initial_solutions': 1,
'threads': multiprocessing.cpu_count(),
'init_option': 'EAGER_WITHOUT_SHUFFLED_COPIES'}
mge_options = {'init_type': 'MEDOID',
'random_inits': 10,
'time_limit': 0,
'verbose': 1,
'update_order': update_order,
'randomness': 'REAL',
'refine': False}
save_results = True
dir_save = dir_root + ds_name + '.' + kernel_options['name'] + '/' + ('update_order/' if update_order else '')
if not os.path.exists(dir_save):
os.makedirs(dir_save)
file_output = open(dir_save + 'output.txt', 'a')
# sys.stdout = file_output

# print settings.
print('parameters:')
print('dataset name:', ds_name)
print('mpg_options:', mpg_options)
print('kernel_options:', kernel_options)
print('ged_options:', ged_options)
print('mge_options:', mge_options)
print('save_results:', save_results)
for train_examples in ['k-graphs', 'expert', 'random', 'best-dataset', 'trainset']:
# for train_examples in ['expert']:
print('\n-------------------------------------')
print('train examples used:', train_examples, '\n')
mpg_options['fit_method'] = train_examples
# try:
kernel_knn_cv(ds_name, train_examples, knn_options, mpg_options, kernel_options, ged_options, mge_options, save_results=save_results, load_gm='auto', dir_save=dir_save, irrelevant_labels=None, edge_required=False, cut_range=None)
# except Exception as exp:
# print('An exception occured when running this experiment:')
# LOG_FILENAME = dir_save + 'error.txt'
# logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
# logging.exception('')
# print(repr(exp))


if __name__ == '__main__':
xp_knn_1_1()

+ 418
- 0
gklearn/preimage/kernel_knn_cv.py View File

@@ -0,0 +1,418 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 12:52:15 2020

@author: ljia
"""
import numpy as np
import csv
import os
import os.path
from gklearn.utils import Dataset
from sklearn.model_selection import ShuffleSplit
from gklearn.preimage import MedianPreimageGenerator
from gklearn.utils import normalize_gram_matrix, compute_distance_matrix
from gklearn.preimage.utils import get_same_item_indices
from gklearn.utils.knn import knn_classification
from gklearn.preimage.utils import compute_k_dis

def kernel_knn_cv(ds_name, train_examples, knn_options, mpg_options, kernel_options, ged_options, mge_options, save_results=True, load_gm='auto', dir_save='', irrelevant_labels=None, edge_required=False, cut_range=None):
# 1. get dataset.
print('1. getting dataset...')
dataset_all = Dataset()
dataset_all.load_predefined_dataset(ds_name)
dataset_all.trim_dataset(edge_required=edge_required)
if irrelevant_labels is not None:
dataset_all.remove_labels(**irrelevant_labels)
if cut_range is not None:
dataset_all.cut_graphs(cut_range)
# datasets = split_dataset_by_target(dataset_all)

if save_results:
# create result files.
print('creating output files...')
fn_output_detail, fn_output_summary = __init_output_file_knn(ds_name, kernel_options['name'], mpg_options['fit_method'], dir_save)
else:
fn_output_detail, fn_output_summary = None, None
# 2. compute/load Gram matrix a priori.
print('2. computing/loading Gram matrix...')
gram_matrix_unnorm, time_precompute_gm = __get_gram_matrix(load_gm, dir_save, ds_name, kernel_options, dataset_all)
# 3. perform k-nn CV.
print('3. performing k-nn CV...')
if train_examples == 'k-graphs' or train_examples == 'expert' or train_examples == 'random':
__kernel_knn_cv_median(dataset_all, ds_name, knn_options, mpg_options, kernel_options, mge_options, ged_options, gram_matrix_unnorm, time_precompute_gm, train_examples, save_results, dir_save, fn_output_detail, fn_output_summary)
elif train_examples == 'best-dataset':
__kernel_knn_cv_best_ds(dataset_all, ds_name, knn_options, kernel_options, gram_matrix_unnorm, time_precompute_gm, train_examples, save_results, dir_save, fn_output_detail, fn_output_summary)
elif train_examples == 'trainset':
__kernel_knn_cv_trainset(dataset_all, ds_name, knn_options, kernel_options, gram_matrix_unnorm, time_precompute_gm, train_examples, save_results, dir_save, fn_output_detail, fn_output_summary)

print('\ncomplete.\n')
def __kernel_knn_cv_median(dataset_all, ds_name, knn_options, mpg_options, kernel_options, mge_options, ged_options, gram_matrix_unnorm, time_precompute_gm, train_examples, save_results, dir_save, fn_output_detail, fn_output_summary):
Gn = dataset_all.graphs
y_all = dataset_all.targets
n_neighbors, n_splits, test_size = knn_options['n_neighbors'], knn_options['n_splits'], knn_options['test_size']

# get shuffles.
train_indices, test_indices, train_nums, y_app = __get_shuffles(y_all, n_splits, test_size)
accuracies = [[], [], []]
for trial in range(len(train_indices)):
print('\ntrial =', trial)
train_index = train_indices[trial]
test_index = test_indices[trial]
G_app = [Gn[i] for i in train_index]
G_test = [Gn[i] for i in test_index]
y_test = [y_all[i] for i in test_index]
gm_unnorm_trial = gram_matrix_unnorm[train_index,:][:,train_index].copy()
# compute pre-images for each class.
medians = [[], [], []]
train_nums_tmp = [0] + train_nums
print('\ncomputing pre-image for each class...\n')
for i_class in range(len(train_nums_tmp) - 1):
print(i_class + 1, 'of', len(train_nums_tmp) - 1, 'classes:')
i_start = int(np.sum(train_nums_tmp[0:i_class + 1]))
i_end = i_start + train_nums_tmp[i_class + 1]
median_set = G_app[i_start:i_end]
dataset = dataset_all.copy()
dataset.load_graphs(median_set.copy(), targets=None)
mge_options['update_order'] = True
mpg_options['gram_matrix_unnorm'] = gm_unnorm_trial[i_start:i_end,i_start:i_end].copy()
mpg_options['runtime_precompute_gm'] = 0
set_median, gen_median_uo = __generate_median_preimages(dataset, mpg_options, kernel_options, ged_options, mge_options)
mge_options['update_order'] = False
mpg_options['gram_matrix_unnorm'] = gm_unnorm_trial[i_start:i_end,i_start:i_end].copy()
mpg_options['runtime_precompute_gm'] = 0
_, gen_median = __generate_median_preimages(dataset, mpg_options, kernel_options, ged_options, mge_options)
medians[0].append(set_median)
medians[1].append(gen_median)
medians[2].append(gen_median_uo)
# for each set of medians.
print('\nperforming k-nn...')
for i_app, G_app in enumerate(medians):
# compute dis_mat between medians.
dataset = dataset_all.copy()
dataset.load_graphs(G_app.copy(), targets=None)
gm_app_unnorm, _ = __compute_gram_matrix_unnorm(dataset, kernel_options.copy())
# compute the entire Gram matrix.
graph_kernel = __get_graph_kernel(dataset.copy(), kernel_options.copy())
kernels_to_medians = []
for g in G_app:
kernels_to_median, _ = graph_kernel.compute(g, G_test, **kernel_options.copy())
kernels_to_medians.append(kernels_to_median)
kernels_to_medians = np.array(kernels_to_medians)
gm_all = np.concatenate((gm_app_unnorm, kernels_to_medians), axis=1)
gm_all = np.concatenate((gm_all, np.concatenate((kernels_to_medians.T, gram_matrix_unnorm[test_index,:][:,test_index].copy()), axis=1)), axis=0)
gm_all = normalize_gram_matrix(gm_all.copy())
dis_mat, _, _, _ = compute_distance_matrix(gm_all)
N = len(G_app)
d_app = dis_mat[range(N),:][:,range(N)].copy()
d_test = np.zeros((N, len(test_index)))
for i in range(N):
for j in range(len(test_index)):
d_test[i, j] = dis_mat[i, j]
accuracies[i_app].append(knn_classification(d_app, d_test, y_app, y_test, n_neighbors, verbose=True, text=train_examples))
# write result detail.
if save_results:
f_detail = open(dir_save + fn_output_detail, 'a')
print('writing results to files...')
for i, median_type in enumerate(['set-median', 'gen median', 'gen median uo']):
csv.writer(f_detail).writerow([ds_name, kernel_options['name'],
train_examples + ': ' + median_type, trial,
knn_options['n_neighbors'],
len(gm_all), knn_options['test_size'],
accuracies[i][-1][0], accuracies[i][-1][1]])
f_detail.close()
results = {}
results['ave_perf_train'] = [np.mean([i[0] for i in j], axis=0) for j in accuracies]
results['std_perf_train'] = [np.std([i[0] for i in j], axis=0, ddof=1) for j in accuracies]
results['ave_perf_test'] = [np.mean([i[1] for i in j], axis=0) for j in accuracies]
results['std_perf_test'] = [np.std([i[1] for i in j], axis=0, ddof=1) for j in accuracies]

# write result summary for each letter.
if save_results:
f_summary = open(dir_save + fn_output_summary, 'a')
for i, median_type in enumerate('set-median', 'gen median', 'gen median uo'):
csv.writer(f_summary).writerow([ds_name, kernel_options['name'],
train_examples + ': ' + median_type,
knn_options['n_neighbors'],
knn_options['test_size'], results['ave_perf_train'][i],
results['ave_perf_test'][i], results['std_perf_train'][i],
results['std_perf_test'][i], time_precompute_gm])
f_summary.close()
def __kernel_knn_cv_best_ds(dataset_all, ds_name, knn_options, kernel_options, gram_matrix_unnorm, time_precompute_gm, train_examples, save_results, dir_save, fn_output_detail, fn_output_summary):
Gn = dataset_all.graphs
y_all = dataset_all.targets
n_neighbors, n_splits, test_size = knn_options['n_neighbors'], knn_options['n_splits'], knn_options['test_size']

# get shuffles.
train_indices, test_indices, train_nums, y_app = __get_shuffles(y_all, n_splits, test_size)
accuracies = []
for trial in range(len(train_indices)):
print('\ntrial =', trial)
train_index = train_indices[trial]
test_index = test_indices[trial]
G_app = [Gn[i] for i in train_index]
G_test = [Gn[i] for i in test_index]
y_test = [y_all[i] for i in test_index]
gm_unnorm_trial = gram_matrix_unnorm[train_index,:][:,train_index].copy()
# get best graph from trainset according to distance in kernel space for each class.
best_graphs = []
train_nums_tmp = [0] + train_nums
print('\ngetting best graph from trainset for each class...')
for i_class in range(len(train_nums_tmp) - 1):
print(i_class + 1, 'of', len(train_nums_tmp) - 1, 'classes.')
i_start = int(np.sum(train_nums_tmp[0:i_class + 1]))
i_end = i_start + train_nums_tmp[i_class + 1]
G_class = G_app[i_start:i_end]
gm_unnorm_class = gm_unnorm_trial[i_start:i_end,i_start:i_end]
gm_class = normalize_gram_matrix(gm_unnorm_class.copy())
k_dis_list = []
for idx in range(len(G_class)):
k_dis_list.append(compute_k_dis(idx, range(0, len(G_class)), [1 / len(G_class)] * len(G_class), gm_class, withterm3=False))
idx_k_dis_min = np.argmin(k_dis_list)
best_graphs.append(G_class[idx_k_dis_min].copy())
# perform k-nn.
print('\nperforming k-nn...')
# compute dis_mat between medians.
dataset = dataset_all.copy()
dataset.load_graphs(best_graphs.copy(), targets=None)
gm_app_unnorm, _ = __compute_gram_matrix_unnorm(dataset, kernel_options.copy())
# compute the entire Gram matrix.
graph_kernel = __get_graph_kernel(dataset.copy(), kernel_options.copy())
kernels_to_best_graphs = []
for g in best_graphs:
kernels_to_best_graph, _ = graph_kernel.compute(g, G_test, **kernel_options.copy())
kernels_to_best_graphs.append(kernels_to_best_graph)
kernels_to_best_graphs = np.array(kernels_to_best_graphs)
gm_all = np.concatenate((gm_app_unnorm, kernels_to_best_graphs), axis=1)
gm_all = np.concatenate((gm_all, np.concatenate((kernels_to_best_graphs.T, gram_matrix_unnorm[test_index,:][:,test_index].copy()), axis=1)), axis=0)
gm_all = normalize_gram_matrix(gm_all.copy())
dis_mat, _, _, _ = compute_distance_matrix(gm_all)
N = len(best_graphs)
d_app = dis_mat[range(N),:][:,range(N)].copy()
d_test = np.zeros((N, len(test_index)))
for i in range(N):
for j in range(len(test_index)):
d_test[i, j] = dis_mat[i, j]
accuracies.append(knn_classification(d_app, d_test, y_app, y_test, n_neighbors, verbose=True, text=train_examples))
# write result detail.
if save_results:
f_detail = open(dir_save + fn_output_detail, 'a')
print('writing results to files...')
csv.writer(f_detail).writerow([ds_name, kernel_options['name'],
train_examples, trial,
knn_options['n_neighbors'],
len(gm_all), knn_options['test_size'],
accuracies[-1][0], accuracies[-1][1]])
f_detail.close()
results = {}
results['ave_perf_train'] = np.mean([i[0] for i in accuracies], axis=0)
results['std_perf_train'] = np.std([i[0] for i in accuracies], axis=0, ddof=1)
results['ave_perf_test'] = np.mean([i[1] for i in accuracies], axis=0)
results['std_perf_test'] = np.std([i[1] for i in accuracies], axis=0, ddof=1)
# write result summary for each letter.
if save_results:
f_summary = open(dir_save + fn_output_summary, 'a')
csv.writer(f_summary).writerow([ds_name, kernel_options['name'],
train_examples,
knn_options['n_neighbors'],
knn_options['test_size'], results['ave_perf_train'],
results['ave_perf_test'], results['std_perf_train'],
results['std_perf_test'], time_precompute_gm])
f_summary.close()
def __kernel_knn_cv_trainset(dataset_all, ds_name, knn_options, kernel_options, gram_matrix_unnorm, time_precompute_gm, train_examples, save_results, dir_save, fn_output_detail, fn_output_summary):
y_all = dataset_all.targets
n_neighbors, n_splits, test_size = knn_options['n_neighbors'], knn_options['n_splits'], knn_options['test_size']
# compute distance matrix.
gram_matrix = normalize_gram_matrix(gram_matrix_unnorm.copy())
dis_mat, _, _, _ = compute_distance_matrix(gram_matrix)

# get shuffles.
train_indices, test_indices, _, _ = __get_shuffles(y_all, n_splits, test_size)
accuracies = []
for trial in range(len(train_indices)):
print('\ntrial =', trial)
train_index = train_indices[trial]
test_index = test_indices[trial]
y_app = [y_all[i] for i in train_index]
y_test = [y_all[i] for i in test_index]
N = len(train_index)
d_app = dis_mat[train_index,:][:,train_index].copy()
d_test = np.zeros((N, len(test_index)))
for i in range(N):
for j in range(len(test_index)):
d_test[i, j] = dis_mat[train_index[i], test_index[j]]
accuracies.append(knn_classification(d_app, d_test, y_app, y_test, n_neighbors, verbose=True, text=train_examples))
# write result detail.
if save_results:
print('writing results to files...')
f_detail = open(dir_save + fn_output_detail, 'a')
csv.writer(f_detail).writerow([ds_name, kernel_options['name'],
train_examples, trial, knn_options['n_neighbors'],
len(gram_matrix), knn_options['test_size'],
accuracies[-1][0], accuracies[-1][1]])
f_detail.close()
results = {}
results['ave_perf_train'] = np.mean([i[0] for i in accuracies], axis=0)
results['std_perf_train'] = np.std([i[0] for i in accuracies], axis=0, ddof=1)
results['ave_perf_test'] = np.mean([i[1] for i in accuracies], axis=0)
results['std_perf_test'] = np.std([i[1] for i in accuracies], axis=0, ddof=1)

# write result summary for each letter.
if save_results:
f_summary = open(dir_save + fn_output_summary, 'a')
csv.writer(f_summary).writerow([ds_name, kernel_options['name'],
train_examples, knn_options['n_neighbors'],
knn_options['test_size'], results['ave_perf_train'],
results['ave_perf_test'], results['std_perf_train'],
results['std_perf_test'], time_precompute_gm])
f_summary.close()
def __get_shuffles(y_all, n_splits, test_size):
rs = ShuffleSplit(n_splits=n_splits, test_size=test_size, random_state=0)
train_indices = [[] for _ in range(n_splits)]
test_indices = [[] for _ in range(n_splits)]
idx_targets = get_same_item_indices(y_all)
train_nums = []
keys = []
for key, item in idx_targets.items():
i = 0
for train_i, test_i in rs.split(item): # @todo: careful when parallel.
train_indices[i] += [item[idx] for idx in train_i]
test_indices[i] += [item[idx] for idx in test_i]
i += 1
train_nums.append(len(train_i))
keys.append(key)
return train_indices, test_indices, train_nums, keys
def __generate_median_preimages(dataset, mpg_options, kernel_options, ged_options, mge_options):
mpg = MedianPreimageGenerator()
mpg.dataset = dataset.copy()
mpg.set_options(**mpg_options.copy())
mpg.kernel_options = kernel_options.copy()
mpg.ged_options = ged_options.copy()
mpg.mge_options = mge_options.copy()
mpg.run()
return mpg.set_median, mpg.gen_median


def __get_gram_matrix(load_gm, dir_save, ds_name, kernel_options, dataset_all):
if load_gm == 'auto':
gm_fname = dir_save + 'gram_matrix_unnorm.' + ds_name + '.' + kernel_options['name'] + '.gm.npz'
gmfile_exist = os.path.isfile(os.path.abspath(gm_fname))
if gmfile_exist:
gmfile = np.load(gm_fname, allow_pickle=True) # @todo: may not be safe.
gram_matrix_unnorm = gmfile['gram_matrix_unnorm']
time_precompute_gm = float(gmfile['run_time'])
else:
gram_matrix_unnorm, time_precompute_gm = __compute_gram_matrix_unnorm(dataset_all, kernel_options)
np.savez(dir_save + 'gram_matrix_unnorm.' + ds_name + '.' + kernel_options['name'] + '.gm', gram_matrix_unnorm=gram_matrix_unnorm, run_time=time_precompute_gm)
elif not load_gm:
gram_matrix_unnorm, time_precompute_gm = __compute_gram_matrix_unnorm(dataset_all, kernel_options)
np.savez(dir_save + 'gram_matrix_unnorm.' + ds_name + '.' + kernel_options['name'] + '.gm', gram_matrix_unnorm=gram_matrix_unnorm, run_time=time_precompute_gm)
else:
gmfile = np.load()
gram_matrix_unnorm = gmfile['gram_matrix_unnorm']
time_precompute_gm = float(gmfile['run_time'])
return gram_matrix_unnorm, time_precompute_gm


def __get_graph_kernel(dataset, kernel_options):
from gklearn.utils.utils import get_graph_kernel_by_name
graph_kernel = get_graph_kernel_by_name(kernel_options['name'],
node_labels=dataset.node_labels,
edge_labels=dataset.edge_labels,
node_attrs=dataset.node_attrs,
edge_attrs=dataset.edge_attrs,
ds_infos=dataset.get_dataset_infos(keys=['directed']),
kernel_options=kernel_options)
return graph_kernel
def __compute_gram_matrix_unnorm(dataset, kernel_options):
from gklearn.utils.utils import get_graph_kernel_by_name
graph_kernel = get_graph_kernel_by_name(kernel_options['name'],
node_labels=dataset.node_labels,
edge_labels=dataset.edge_labels,
node_attrs=dataset.node_attrs,
edge_attrs=dataset.edge_attrs,
ds_infos=dataset.get_dataset_infos(keys=['directed']),
kernel_options=kernel_options)
gram_matrix, run_time = graph_kernel.compute(dataset.graphs, **kernel_options)
gram_matrix_unnorm = graph_kernel.gram_matrix_unnorm
return gram_matrix_unnorm, run_time
def __init_output_file_knn(ds_name, gkernel, fit_method, dir_output):
if not os.path.exists(dir_output):
os.makedirs(dir_output)
fn_output_detail = 'results_detail_knn.' + ds_name + '.' + gkernel + '.csv'
f_detail = open(dir_output + fn_output_detail, 'a')
csv.writer(f_detail).writerow(['dataset', 'graph kernel',
'train examples', 'trial', 'num neighbors', 'num graphs', 'test size',
'perf train', 'perf test'])
f_detail.close()
fn_output_summary = 'results_summary_knn.' + ds_name + '.' + gkernel + '.csv'
f_summary = open(dir_output + fn_output_summary, 'a')
csv.writer(f_summary).writerow(['dataset', 'graph kernel',
'train examples', 'num neighbors', 'test size',
'ave perf train', 'ave perf test',
'std perf train', 'std perf test', 'time precompute gm'])
f_summary.close()
return fn_output_detail, fn_output_summary

+ 2
- 2
gklearn/preimage/median_preimage_generator.py View File

@@ -281,7 +281,7 @@ class MedianPreimageGenerator(PreimageGenerator):
options['edge_labels'] = self._dataset.edge_labels
options['node_attrs'] = self._dataset.node_attrs
options['edge_attrs'] = self._dataset.edge_attrs
ged_vec_init, ged_mat, n_edit_operations = compute_geds(graphs, options=options, parallel=self.__parallel)
ged_vec_init, ged_mat, n_edit_operations = compute_geds(graphs, options=options, parallel=self.__parallel, verbose=(self._verbose > 1))
residual_list = [np.sqrt(np.sum(np.square(np.array(ged_vec_init) - dis_k_vec)))]
time_list = [time.time() - time0]
edit_cost_list = [self.__init_ecc]
@@ -323,7 +323,7 @@ class MedianPreimageGenerator(PreimageGenerator):
options['edge_labels'] = self._dataset.edge_labels
options['node_attrs'] = self._dataset.node_attrs
options['edge_attrs'] = self._dataset.edge_attrs
ged_vec, ged_mat, n_edit_operations = compute_geds(graphs, options=options, parallel=self.__parallel)
ged_vec, ged_mat, n_edit_operations = compute_geds(graphs, options=options, parallel=self.__parallel, verbose=(self._verbose > 1))
residual_list.append(np.sqrt(np.sum(np.square(np.array(ged_vec) - dis_k_vec))))
time_list.append(time.time() - time0)
edit_cost_list.append(self.__edit_cost_constants)


+ 27
- 25
gklearn/preimage/utils.py View File

@@ -45,7 +45,7 @@ def generate_median_preimages_by_class(ds_name, mpg_options, kernel_options, ged
if save_results:
# create result files.
print('creating output files...')
fn_output_detail, fn_output_summary = __init_output_file(ds_name, kernel_options['name'], mpg_options['fit_method'], dir_save)
fn_output_detail, fn_output_summary = __init_output_file_preimage(ds_name, kernel_options['name'], mpg_options['fit_method'], dir_save)
sod_sm_list = []
sod_gm_list = []
@@ -82,22 +82,22 @@ def generate_median_preimages_by_class(ds_name, mpg_options, kernel_options, ged
gram_matrix_unnorm_list = []
time_precompute_gm_list = []
else:
gmfile = np.load()
gram_matrix_unnorm_list = gmfile['gram_matrix_unnorm_list']
time_precompute_gm_list = gmfile['run_time_list']
# repeats_better_sod_sm2gm = []
# repeats_better_dis_k_sm2gm = []
# repeats_better_dis_k_gi2sm = []
# repeats_better_dis_k_gi2gm = []
gmfile = np.load(gm_fname, allow_pickle=True) # @todo: may not be safe.
gram_matrix_unnorm_list = [item for item in gmfile['gram_matrix_unnorm_list']]
time_precompute_gm_list = gmfile['run_time_list'].tolist()
# repeats_better_sod_sm2gm = []
# repeats_better_dis_k_sm2gm = []
# repeats_better_dis_k_gi2sm = []
# repeats_better_dis_k_gi2gm = []
print('start generating preimage for each class of target...')
print('starting generating preimage for each class of target...')
idx_offset = 0
for idx, dataset in enumerate(datasets):
target = dataset.targets[0]
print('\ntarget =', target, '\n')
# if target != 1:
# continue
# if target != 1:
# continue
num_graphs = len(dataset.graphs)
if num_graphs < 2:
@@ -148,7 +148,7 @@ def generate_median_preimages_by_class(ds_name, mpg_options, kernel_options, ged
results['sod_set_median'], results['sod_gen_median'],
results['k_dis_set_median'], results['k_dis_gen_median'],
results['k_dis_dataset'], sod_sm2gm, dis_k_sm2gm,
dis_k_gi2sm, dis_k_gi2gm, results['edit_cost_constants'],
dis_k_gi2sm, dis_k_gi2gm, results['edit_cost_constants'],
results['runtime_precompute_gm'], results['runtime_optimize_ec'],
results['runtime_generate_preimage'], results['runtime_total'],
results['itrs'], results['converged'],
@@ -177,7 +177,7 @@ def generate_median_preimages_by_class(ds_name, mpg_options, kernel_options, ged
# # SOD SM -> GM
if results['sod_set_median'] > results['sod_gen_median']:
nb_sod_sm2gm[0] += 1
# repeats_better_sod_sm2gm.append(1)
# repeats_better_sod_sm2gm.append(1)
elif results['sod_set_median'] == results['sod_gen_median']:
nb_sod_sm2gm[1] += 1
elif results['sod_set_median'] < results['sod_gen_median']:
@@ -185,7 +185,7 @@ def generate_median_preimages_by_class(ds_name, mpg_options, kernel_options, ged
# # dis_k SM -> GM
if results['k_dis_set_median'] > results['k_dis_gen_median']:
nb_dis_k_sm2gm[0] += 1
# repeats_better_dis_k_sm2gm.append(1)
# repeats_better_dis_k_sm2gm.append(1)
elif results['k_dis_set_median'] == results['k_dis_gen_median']:
nb_dis_k_sm2gm[1] += 1
elif results['k_dis_set_median'] < results['k_dis_gen_median']:
@@ -193,7 +193,7 @@ def generate_median_preimages_by_class(ds_name, mpg_options, kernel_options, ged
# # dis_k gi -> SM
if results['k_dis_dataset'] > results['k_dis_set_median']:
nb_dis_k_gi2sm[0] += 1
# repeats_better_dis_k_gi2sm.append(1)
# repeats_better_dis_k_gi2sm.append(1)
elif results['k_dis_dataset'] == results['k_dis_set_median']:
nb_dis_k_gi2sm[1] += 1
elif results['k_dis_dataset'] < results['k_dis_set_median']:
@@ -201,7 +201,7 @@ def generate_median_preimages_by_class(ds_name, mpg_options, kernel_options, ged
# # dis_k gi -> GM
if results['k_dis_dataset'] > results['k_dis_gen_median']:
nb_dis_k_gi2gm[0] += 1
# repeats_better_dis_k_gi2gm.append(1)
# repeats_better_dis_k_gi2gm.append(1)
elif results['k_dis_dataset'] == results['k_dis_gen_median']:
nb_dis_k_gi2gm[1] += 1
elif results['k_dis_dataset'] < results['k_dis_gen_median']:
@@ -225,7 +225,7 @@ def generate_median_preimages_by_class(ds_name, mpg_options, kernel_options, ged
results['mge']['num_increase_order'] > 0,
results['mge']['num_converged_descents'] > 0,
nb_sod_sm2gm,
nb_dis_k_sm2gm, nb_dis_k_gi2sm, nb_dis_k_gi2gm])
nb_dis_k_sm2gm, nb_dis_k_gi2sm, nb_dis_k_gi2gm])
f_summary.close()
# save median graphs.
@@ -235,15 +235,15 @@ def generate_median_preimages_by_class(ds_name, mpg_options, kernel_options, ged
print('Saving median graphs to files...')
fn_pre_sm = dir_save + 'medians/set_median.' + mpg_options['fit_method'] + '.nbg' + str(num_graphs) + '.y' + str(target) + '.repeat' + str(1)
saveGXL(mpg.set_median, fn_pre_sm + '.gxl', method='default',
node_labels=dataset.node_labels, edge_labels=dataset.edge_labels,
node_labels=dataset.node_labels, edge_labels=dataset.edge_labels,
node_attrs=dataset.node_attrs, edge_attrs=dataset.edge_attrs)
fn_pre_gm = dir_save + 'medians/gen_median.' + mpg_options['fit_method'] + '.nbg' + str(num_graphs) + '.y' + str(target) + '.repeat' + str(1)
saveGXL(mpg.gen_median, fn_pre_gm + '.gxl', method='default',
node_labels=dataset.node_labels, edge_labels=dataset.edge_labels,
node_labels=dataset.node_labels, edge_labels=dataset.edge_labels,
node_attrs=dataset.node_attrs, edge_attrs=dataset.edge_attrs)
fn_best_dataset = dir_save + 'medians/g_best_dataset.' + mpg_options['fit_method'] + '.nbg' + str(num_graphs) + '.y' + str(target) + '.repeat' + str(1)
saveGXL(mpg.best_from_dataset, fn_best_dataset + '.gxl', method='default',
node_labels=dataset.node_labels, edge_labels=dataset.edge_labels,
node_labels=dataset.node_labels, edge_labels=dataset.edge_labels,
node_attrs=dataset.node_attrs, edge_attrs=dataset.edge_attrs)
# plot median graphs.
@@ -304,10 +304,10 @@ def generate_median_preimages_by_class(ds_name, mpg_options, kernel_options, ged
if (load_gm == 'auto' and not gmfile_exist) or not load_gm:
np.savez(dir_save + 'gram_matrix_unnorm.' + ds_name + '.' + kernel_options['name'] + '.gm', gram_matrix_unnorm_list=gram_matrix_unnorm_list, run_time_list=time_precompute_gm_list)

print('\ncomplete.')
print('\ncomplete.\n')

def __init_output_file(ds_name, gkernel, fit_method, dir_output):
def __init_output_file_preimage(ds_name, gkernel, fit_method, dir_output):
if not os.path.exists(dir_output):
os.makedirs(dir_output)
# fn_output_detail = 'results_detail.' + ds_name + '.' + gkernel + '.' + fit_method + '.csv'
@@ -335,9 +335,9 @@ def __init_output_file(ds_name, gkernel, fit_method, dir_output):
'num updates ecc', 'mge num decrease order', 'mge num increase order',
'mge num converged', '# SOD SM -> GM', '# dis_k SM -> GM',
'# dis_k gi -> SM', '# dis_k gi -> GM'])
# 'repeats better SOD SM -> GM',
# 'repeats better dis_k SM -> GM', 'repeats better dis_k gi -> SM',
# 'repeats better dis_k gi -> GM'])
# 'repeats better SOD SM -> GM',
# 'repeats better dis_k SM -> GM', 'repeats better dis_k gi -> SM',
# 'repeats better dis_k gi -> GM'])
f_summary.close()
return fn_output_detail, fn_output_summary
@@ -462,6 +462,8 @@ def gram2distances(Kmatrix):

def kernel_distance_matrix(Gn, node_label, edge_label, Kmatrix=None,
gkernel=None, verbose=True):
import warnings
warnings.warn('gklearn.preimage.utils.kernel_distance_matrix is deprecated, use gklearn.kernels.graph_kernel.compute_distance_matrix or gklearn.utils.compute_distance_matrix instead', DeprecationWarning)
dis_mat = np.empty((len(Gn), len(Gn)))
if Kmatrix is None:
Kmatrix = compute_kernel(Gn, gkernel, node_label, edge_label, verbose)


+ 2
- 0
gklearn/utils/__init__.py View File

@@ -21,4 +21,6 @@ from gklearn.utils.timer import Timer
from gklearn.utils.utils import get_graph_kernel_by_name
from gklearn.utils.utils import compute_gram_matrices_by_class
from gklearn.utils.utils import SpecialLabel
from gklearn.utils.utils import normalize_gram_matrix, compute_distance_matrix
from gklearn.utils.trie import Trie
from gklearn.utils.knn import knn_cv, knn_classification

+ 19
- 1
gklearn/utils/dataset.py View File

@@ -522,6 +522,20 @@ class Dataset(object):
self.__targets = [self.__targets[i] for i in idx]
self.clean_labels()
def copy(self):
dataset = Dataset()
graphs = self.__graphs.copy() if self.__graphs is not None else None
target = self.__targets.copy() if self.__targets is not None else None
node_labels = self.__node_labels.copy() if self.__node_labels is not None else None
node_attrs = self.__node_attrs.copy() if self.__node_attrs is not None else None
edge_labels = self.__edge_labels.copy() if self.__edge_labels is not None else None
edge_attrs = self.__edge_attrs.copy() if self.__edge_attrs is not None else None
dataset.load_graphs(graphs, target)
dataset.set_labels(node_labels=node_labels, node_attrs=node_attrs, edge_labels=edge_labels, edge_attrs=edge_attrs)
# @todo: clean_labels and add other class members?
return dataset
def __get_dataset_size(self):
return len(self.__graphs)
@@ -721,7 +735,11 @@ def split_dataset_by_target(dataset):
sub_graphs = [graphs[i] for i in val]
sub_dataset = Dataset()
sub_dataset.load_graphs(sub_graphs, [key] * len(val))
sub_dataset.set_labels(node_labels=dataset.node_labels, node_attrs=dataset.node_attrs, edge_labels=dataset.edge_labels, edge_attrs=dataset.edge_attrs)
node_labels = dataset.node_labels.copy() if dataset.node_labels is not None else None
node_attrs = dataset.node_attrs.copy() if dataset.node_attrs is not None else None
edge_labels = dataset.edge_labels.copy() if dataset.edge_labels is not None else None
edge_attrs = dataset.edge_attrs.copy() if dataset.edge_attrs is not None else None
sub_dataset.set_labels(node_labels=node_labels, node_attrs=node_attrs, edge_labels=edge_labels, edge_attrs=edge_attrs)
datasets.append(sub_dataset)
# @todo: clean_labels?
return datasets

+ 141
- 0
gklearn/utils/knn.py View File

@@ -0,0 +1,141 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 11 11:03:01 2020

@author: ljia
"""
import numpy as np
from sklearn.model_selection import ShuffleSplit
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from gklearn.utils.utils import get_graph_kernel_by_name
# from gklearn.preimage.utils import get_same_item_indices

def sum_squares(a, b):
"""
Return the sum of squares of the difference between a and b, aka MSE
"""
return np.sum([(a[i] - b[i])**2 for i in range(len(a))])


def euclid_d(x, y):
"""
1D euclidean distance
"""
return np.sqrt((x-y)**2)


def man_d(x, y):
"""
1D manhattan distance
"""
return np.abs((x-y))


def knn_regression(D_app, D_test, y_app, y_test, n_neighbors, verbose=True, text=None):

from sklearn.neighbors import KNeighborsRegressor
knn = KNeighborsRegressor(n_neighbors=n_neighbors, metric='precomputed')
knn.fit(D_app, y_app)
y_pred = knn.predict(D_app)
y_pred_test = knn.predict(D_test.T)
perf_app = np.sqrt(sum_squares(y_pred, y_app)/len(y_app))
perf_test = np.sqrt(sum_squares(y_pred_test, y_test)/len(y_test))

if (verbose):
print("Learning error with {} train examples : {}".format(text, perf_app))
print("Test error with {} train examples : {}".format(text, perf_test))

return perf_app, perf_test


def knn_classification(d_app, d_test, y_app, y_test, n_neighbors, verbose=True, text=None):
knn = KNeighborsClassifier(n_neighbors=n_neighbors, metric='precomputed')
knn.fit(d_app, y_app)
y_pred = knn.predict(d_app)
y_pred_test = knn.predict(d_test.T)
perf_app = accuracy_score(y_app, y_pred)
perf_test = accuracy_score(y_test, y_pred_test)

if (verbose):
print("Learning accuracy with {} costs : {}".format(text, perf_app))
print("Test accuracy with {} costs : {}".format(text, perf_test))
return perf_app, perf_test

def knn_cv(dataset, kernel_options, trainset=None, n_neighbors=1, n_splits=50, test_size=0.9, verbose=True):
'''
Perform a knn classification cross-validation on given dataset.
'''
# Gn = dataset.graphs
y_all = dataset.targets
# compute kernel distances.
dis_mat = __compute_kernel_distances(dataset, kernel_options, trainset=trainset)
rs = ShuffleSplit(n_splits=n_splits, test_size=test_size, random_state=0)
# train_indices = [[] for _ in range(n_splits)]
# test_indices = [[] for _ in range(n_splits)]
# idx_targets = get_same_item_indices(y_all)
# for key, item in idx_targets.items():
# i = 0
# for train_i, test_i in rs.split(item): # @todo: careful when parallel.
# train_indices[i] += [item[idx] for idx in train_i]
# test_indices[i] += [item[idx] for idx in test_i]
# i += 1
accuracies = []
# for trial in range(len(train_indices)):
# train_index = train_indices[trial]
# test_index = test_indices[trial]
for train_index, test_index in rs.split(y_all):
# print(train_index, test_index)
# G_app = [Gn[i] for i in train_index]
# G_test = [Gn[i] for i in test_index]
y_app = [y_all[i] for i in train_index]
y_test = [y_all[i] for i in test_index]
N = len(train_index)
d_app = dis_mat.copy()
d_app = d_app[train_index,:]
d_app = d_app[:,train_index]
d_test = np.zeros((N, len(test_index)))
for i in range(N):
for j in range(len(test_index)):
d_test[i, j] = dis_mat[train_index[i], test_index[j]]
accuracies.append(knn_classification(d_app, d_test, y_app, y_test, n_neighbors, verbose=verbose, text=''))
results = {}
results['ave_perf_train'] = np.mean([i[0] for i in accuracies], axis=0)
results['std_perf_train'] = np.std([i[0] for i in accuracies], axis=0, ddof=1)
results['ave_perf_test'] = np.mean([i[1] for i in accuracies], axis=0)
results['std_perf_test'] = np.std([i[1] for i in accuracies], axis=0, ddof=1)

return results
def __compute_kernel_distances(dataset, kernel_options, trainset=None):
graph_kernel = get_graph_kernel_by_name(kernel_options['name'],
node_labels=dataset.node_labels,
edge_labels=dataset.edge_labels,
node_attrs=dataset.node_attrs,
edge_attrs=dataset.edge_attrs,
ds_infos=dataset.get_dataset_infos(keys=['directed']),
kernel_options=kernel_options)
gram_matrix, run_time = graph_kernel.compute(dataset.graphs, **kernel_options)

dis_mat, _, _, _ = graph_kernel.compute_distance_matrix()
if trainset is not None:
gram_matrix_unnorm = graph_kernel.gram_matrix_unnorm

return dis_mat

+ 29
- 1
gklearn/utils/utils.py View File

@@ -467,9 +467,37 @@ def get_mlti_dim_edge_attrs(G, attr_names):
attributes.append(tuple(attrs[aname] for aname in attr_names))
return attributes


@unique
class SpecialLabel(Enum):
"""can be used to define special labels.
"""
DUMMY = 1 # The dummy label.
# DUMMY = auto # enum.auto does not exist in Python 3.5.
# DUMMY = auto # enum.auto does not exist in Python 3.5.
def normalize_gram_matrix(gram_matrix):
diag = gram_matrix.diagonal().copy()
for i in range(len(gram_matrix)):
for j in range(i, len(gram_matrix)):
gram_matrix[i][j] /= np.sqrt(diag[i] * diag[j])
gram_matrix[j][i] = gram_matrix[i][j]
return gram_matrix
def compute_distance_matrix(gram_matrix):
dis_mat = np.empty((len(gram_matrix), len(gram_matrix)))
for i in range(len(gram_matrix)):
for j in range(i, len(gram_matrix)):
dis = gram_matrix[i, i] + gram_matrix[j, j] - 2 * gram_matrix[i, j]
if dis < 0:
if dis > -1e-10:
dis = 0
else:
raise ValueError('The distance is negative.')
dis_mat[i, j] = np.sqrt(dis)
dis_mat[j, i] = dis_mat[i, j]
dis_max = np.max(np.max(dis_mat))
dis_min = np.min(np.min(dis_mat[dis_mat != 0]))
dis_mean = np.mean(np.mean(dis_mat))
return dis_mat, dis_max, dis_min, dis_mean

Loading…
Cancel
Save