You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

kernel_knn_cv.py 18 kB

5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Tue May 12 12:52:15 2020
  5. @author: ljia
  6. """
  7. import numpy as np
  8. import csv
  9. import os
  10. import os.path
  11. from gklearn.utils import Dataset
  12. from sklearn.model_selection import ShuffleSplit
  13. from gklearn.preimage import MedianPreimageGenerator
  14. from gklearn.utils import normalize_gram_matrix, compute_distance_matrix
  15. from gklearn.preimage.utils import get_same_item_indices
  16. from gklearn.utils.knn import knn_classification
  17. from gklearn.preimage.utils import compute_k_dis
  18. def kernel_knn_cv(ds_name, train_examples, knn_options, mpg_options, kernel_options, ged_options, mge_options, save_results=True, load_gm='auto', dir_save='', irrelevant_labels=None, edge_required=False, cut_range=None):
  19. # 1. get dataset.
  20. print('1. getting dataset...')
  21. dataset_all = Dataset()
  22. dataset_all.load_predefined_dataset(ds_name)
  23. dataset_all.trim_dataset(edge_required=edge_required)
  24. if irrelevant_labels is not None:
  25. dataset_all.remove_labels(**irrelevant_labels)
  26. if cut_range is not None:
  27. dataset_all.cut_graphs(cut_range)
  28. # datasets = split_dataset_by_target(dataset_all)
  29. if save_results:
  30. # create result files.
  31. print('creating output files...')
  32. fn_output_detail, fn_output_summary = __init_output_file_knn(ds_name, kernel_options['name'], mpg_options['fit_method'], dir_save)
  33. else:
  34. fn_output_detail, fn_output_summary = None, None
  35. # 2. compute/load Gram matrix a priori.
  36. print('2. computing/loading Gram matrix...')
  37. gram_matrix_unnorm, time_precompute_gm = __get_gram_matrix(load_gm, dir_save, ds_name, kernel_options, dataset_all)
  38. # 3. perform k-nn CV.
  39. print('3. performing k-nn CV...')
  40. if train_examples == 'k-graphs' or train_examples == 'expert' or train_examples == 'random':
  41. __kernel_knn_cv_median(dataset_all, ds_name, knn_options, mpg_options, kernel_options, mge_options, ged_options, gram_matrix_unnorm, time_precompute_gm, train_examples, save_results, dir_save, fn_output_detail, fn_output_summary)
  42. elif train_examples == 'best-dataset':
  43. __kernel_knn_cv_best_ds(dataset_all, ds_name, knn_options, kernel_options, gram_matrix_unnorm, time_precompute_gm, train_examples, save_results, dir_save, fn_output_detail, fn_output_summary)
  44. elif train_examples == 'trainset':
  45. __kernel_knn_cv_trainset(dataset_all, ds_name, knn_options, kernel_options, gram_matrix_unnorm, time_precompute_gm, train_examples, save_results, dir_save, fn_output_detail, fn_output_summary)
  46. print('\ncomplete.\n')
  47. def __kernel_knn_cv_median(dataset_all, ds_name, knn_options, mpg_options, kernel_options, mge_options, ged_options, gram_matrix_unnorm, time_precompute_gm, train_examples, save_results, dir_save, fn_output_detail, fn_output_summary):
  48. Gn = dataset_all.graphs
  49. y_all = dataset_all.targets
  50. n_neighbors, n_splits, test_size = knn_options['n_neighbors'], knn_options['n_splits'], knn_options['test_size']
  51. # get shuffles.
  52. train_indices, test_indices, train_nums, y_app = __get_shuffles(y_all, n_splits, test_size)
  53. accuracies = [[], [], []]
  54. for trial in range(len(train_indices)):
  55. print('\ntrial =', trial)
  56. train_index = train_indices[trial]
  57. test_index = test_indices[trial]
  58. G_app = [Gn[i] for i in train_index]
  59. G_test = [Gn[i] for i in test_index]
  60. y_test = [y_all[i] for i in test_index]
  61. gm_unnorm_trial = gram_matrix_unnorm[train_index,:][:,train_index].copy()
  62. # compute pre-images for each class.
  63. medians = [[], [], []]
  64. train_nums_tmp = [0] + train_nums
  65. print('\ncomputing pre-image for each class...\n')
  66. for i_class in range(len(train_nums_tmp) - 1):
  67. print(i_class + 1, 'of', len(train_nums_tmp) - 1, 'classes:')
  68. i_start = int(np.sum(train_nums_tmp[0:i_class + 1]))
  69. i_end = i_start + train_nums_tmp[i_class + 1]
  70. median_set = G_app[i_start:i_end]
  71. dataset = dataset_all.copy()
  72. dataset.load_graphs(median_set.copy(), targets=None)
  73. mge_options['update_order'] = True
  74. mpg_options['gram_matrix_unnorm'] = gm_unnorm_trial[i_start:i_end,i_start:i_end].copy()
  75. mpg_options['runtime_precompute_gm'] = 0
  76. set_median, gen_median_uo = __generate_median_preimages(dataset, mpg_options, kernel_options, ged_options, mge_options)
  77. mge_options['update_order'] = False
  78. mpg_options['gram_matrix_unnorm'] = gm_unnorm_trial[i_start:i_end,i_start:i_end].copy()
  79. mpg_options['runtime_precompute_gm'] = 0
  80. _, gen_median = __generate_median_preimages(dataset, mpg_options, kernel_options, ged_options, mge_options)
  81. medians[0].append(set_median)
  82. medians[1].append(gen_median)
  83. medians[2].append(gen_median_uo)
  84. # for each set of medians.
  85. print('\nperforming k-nn...')
  86. for i_app, G_app in enumerate(medians):
  87. # compute dis_mat between medians.
  88. dataset = dataset_all.copy()
  89. dataset.load_graphs(G_app.copy(), targets=None)
  90. gm_app_unnorm, _ = __compute_gram_matrix_unnorm(dataset, kernel_options.copy())
  91. # compute the entire Gram matrix.
  92. graph_kernel = __get_graph_kernel(dataset.copy(), kernel_options.copy())
  93. kernels_to_medians = []
  94. for g in G_app:
  95. kernels_to_median, _ = graph_kernel.compute(g, G_test, **kernel_options.copy())
  96. kernels_to_medians.append(kernels_to_median)
  97. kernels_to_medians = np.array(kernels_to_medians)
  98. gm_all = np.concatenate((gm_app_unnorm, kernels_to_medians), axis=1)
  99. gm_all = np.concatenate((gm_all, np.concatenate((kernels_to_medians.T, gram_matrix_unnorm[test_index,:][:,test_index].copy()), axis=1)), axis=0)
  100. gm_all = normalize_gram_matrix(gm_all.copy())
  101. dis_mat, _, _, _ = compute_distance_matrix(gm_all)
  102. N = len(G_app)
  103. d_app = dis_mat[range(N),:][:,range(N)].copy()
  104. d_test = np.zeros((N, len(test_index)))
  105. for i in range(N):
  106. for j in range(len(test_index)):
  107. d_test[i, j] = dis_mat[i, j]
  108. accuracies[i_app].append(knn_classification(d_app, d_test, y_app, y_test, n_neighbors, verbose=True, text=train_examples))
  109. # write result detail.
  110. if save_results:
  111. f_detail = open(dir_save + fn_output_detail, 'a')
  112. print('writing results to files...')
  113. for i, median_type in enumerate(['set-median', 'gen median', 'gen median uo']):
  114. csv.writer(f_detail).writerow([ds_name, kernel_options['name'],
  115. train_examples + ': ' + median_type, trial,
  116. knn_options['n_neighbors'],
  117. len(gm_all), knn_options['test_size'],
  118. accuracies[i][-1][0], accuracies[i][-1][1]])
  119. f_detail.close()
  120. results = {}
  121. results['ave_perf_train'] = [np.mean([i[0] for i in j], axis=0) for j in accuracies]
  122. results['std_perf_train'] = [np.std([i[0] for i in j], axis=0, ddof=1) for j in accuracies]
  123. results['ave_perf_test'] = [np.mean([i[1] for i in j], axis=0) for j in accuracies]
  124. results['std_perf_test'] = [np.std([i[1] for i in j], axis=0, ddof=1) for j in accuracies]
  125. # write result summary for each letter.
  126. if save_results:
  127. f_summary = open(dir_save + fn_output_summary, 'a')
  128. for i, median_type in enumerate('set-median', 'gen median', 'gen median uo'):
  129. csv.writer(f_summary).writerow([ds_name, kernel_options['name'],
  130. train_examples + ': ' + median_type,
  131. knn_options['n_neighbors'],
  132. knn_options['test_size'], results['ave_perf_train'][i],
  133. results['ave_perf_test'][i], results['std_perf_train'][i],
  134. results['std_perf_test'][i], time_precompute_gm])
  135. f_summary.close()
  136. def __kernel_knn_cv_best_ds(dataset_all, ds_name, knn_options, kernel_options, gram_matrix_unnorm, time_precompute_gm, train_examples, save_results, dir_save, fn_output_detail, fn_output_summary):
  137. Gn = dataset_all.graphs
  138. y_all = dataset_all.targets
  139. n_neighbors, n_splits, test_size = knn_options['n_neighbors'], knn_options['n_splits'], knn_options['test_size']
  140. # get shuffles.
  141. train_indices, test_indices, train_nums, y_app = __get_shuffles(y_all, n_splits, test_size)
  142. accuracies = []
  143. for trial in range(len(train_indices)):
  144. print('\ntrial =', trial)
  145. train_index = train_indices[trial]
  146. test_index = test_indices[trial]
  147. G_app = [Gn[i] for i in train_index]
  148. G_test = [Gn[i] for i in test_index]
  149. y_test = [y_all[i] for i in test_index]
  150. gm_unnorm_trial = gram_matrix_unnorm[train_index,:][:,train_index].copy()
  151. # get best graph from trainset according to distance in kernel space for each class.
  152. best_graphs = []
  153. train_nums_tmp = [0] + train_nums
  154. print('\ngetting best graph from trainset for each class...')
  155. for i_class in range(len(train_nums_tmp) - 1):
  156. print(i_class + 1, 'of', len(train_nums_tmp) - 1, 'classes.')
  157. i_start = int(np.sum(train_nums_tmp[0:i_class + 1]))
  158. i_end = i_start + train_nums_tmp[i_class + 1]
  159. G_class = G_app[i_start:i_end]
  160. gm_unnorm_class = gm_unnorm_trial[i_start:i_end,i_start:i_end]
  161. gm_class = normalize_gram_matrix(gm_unnorm_class.copy())
  162. k_dis_list = []
  163. for idx in range(len(G_class)):
  164. k_dis_list.append(compute_k_dis(idx, range(0, len(G_class)), [1 / len(G_class)] * len(G_class), gm_class, withterm3=False))
  165. idx_k_dis_min = np.argmin(k_dis_list)
  166. best_graphs.append(G_class[idx_k_dis_min].copy())
  167. # perform k-nn.
  168. print('\nperforming k-nn...')
  169. # compute dis_mat between medians.
  170. dataset = dataset_all.copy()
  171. dataset.load_graphs(best_graphs.copy(), targets=None)
  172. gm_app_unnorm, _ = __compute_gram_matrix_unnorm(dataset, kernel_options.copy())
  173. # compute the entire Gram matrix.
  174. graph_kernel = __get_graph_kernel(dataset.copy(), kernel_options.copy())
  175. kernels_to_best_graphs = []
  176. for g in best_graphs:
  177. kernels_to_best_graph, _ = graph_kernel.compute(g, G_test, **kernel_options.copy())
  178. kernels_to_best_graphs.append(kernels_to_best_graph)
  179. kernels_to_best_graphs = np.array(kernels_to_best_graphs)
  180. gm_all = np.concatenate((gm_app_unnorm, kernels_to_best_graphs), axis=1)
  181. gm_all = np.concatenate((gm_all, np.concatenate((kernels_to_best_graphs.T, gram_matrix_unnorm[test_index,:][:,test_index].copy()), axis=1)), axis=0)
  182. gm_all = normalize_gram_matrix(gm_all.copy())
  183. dis_mat, _, _, _ = compute_distance_matrix(gm_all)
  184. N = len(best_graphs)
  185. d_app = dis_mat[range(N),:][:,range(N)].copy()
  186. d_test = np.zeros((N, len(test_index)))
  187. for i in range(N):
  188. for j in range(len(test_index)):
  189. d_test[i, j] = dis_mat[i, j]
  190. accuracies.append(knn_classification(d_app, d_test, y_app, y_test, n_neighbors, verbose=True, text=train_examples))
  191. # write result detail.
  192. if save_results:
  193. f_detail = open(dir_save + fn_output_detail, 'a')
  194. print('writing results to files...')
  195. csv.writer(f_detail).writerow([ds_name, kernel_options['name'],
  196. train_examples, trial,
  197. knn_options['n_neighbors'],
  198. len(gm_all), knn_options['test_size'],
  199. accuracies[-1][0], accuracies[-1][1]])
  200. f_detail.close()
  201. results = {}
  202. results['ave_perf_train'] = np.mean([i[0] for i in accuracies], axis=0)
  203. results['std_perf_train'] = np.std([i[0] for i in accuracies], axis=0, ddof=1)
  204. results['ave_perf_test'] = np.mean([i[1] for i in accuracies], axis=0)
  205. results['std_perf_test'] = np.std([i[1] for i in accuracies], axis=0, ddof=1)
  206. # write result summary for each letter.
  207. if save_results:
  208. f_summary = open(dir_save + fn_output_summary, 'a')
  209. csv.writer(f_summary).writerow([ds_name, kernel_options['name'],
  210. train_examples,
  211. knn_options['n_neighbors'],
  212. knn_options['test_size'], results['ave_perf_train'],
  213. results['ave_perf_test'], results['std_perf_train'],
  214. results['std_perf_test'], time_precompute_gm])
  215. f_summary.close()
  216. def __kernel_knn_cv_trainset(dataset_all, ds_name, knn_options, kernel_options, gram_matrix_unnorm, time_precompute_gm, train_examples, save_results, dir_save, fn_output_detail, fn_output_summary):
  217. y_all = dataset_all.targets
  218. n_neighbors, n_splits, test_size = knn_options['n_neighbors'], knn_options['n_splits'], knn_options['test_size']
  219. # compute distance matrix.
  220. gram_matrix = normalize_gram_matrix(gram_matrix_unnorm.copy())
  221. dis_mat, _, _, _ = compute_distance_matrix(gram_matrix)
  222. # get shuffles.
  223. train_indices, test_indices, _, _ = __get_shuffles(y_all, n_splits, test_size)
  224. accuracies = []
  225. for trial in range(len(train_indices)):
  226. print('\ntrial =', trial)
  227. train_index = train_indices[trial]
  228. test_index = test_indices[trial]
  229. y_app = [y_all[i] for i in train_index]
  230. y_test = [y_all[i] for i in test_index]
  231. N = len(train_index)
  232. d_app = dis_mat[train_index,:][:,train_index].copy()
  233. d_test = np.zeros((N, len(test_index)))
  234. for i in range(N):
  235. for j in range(len(test_index)):
  236. d_test[i, j] = dis_mat[train_index[i], test_index[j]]
  237. accuracies.append(knn_classification(d_app, d_test, y_app, y_test, n_neighbors, verbose=True, text=train_examples))
  238. # write result detail.
  239. if save_results:
  240. print('writing results to files...')
  241. f_detail = open(dir_save + fn_output_detail, 'a')
  242. csv.writer(f_detail).writerow([ds_name, kernel_options['name'],
  243. train_examples, trial, knn_options['n_neighbors'],
  244. len(gram_matrix), knn_options['test_size'],
  245. accuracies[-1][0], accuracies[-1][1]])
  246. f_detail.close()
  247. results = {}
  248. results['ave_perf_train'] = np.mean([i[0] for i in accuracies], axis=0)
  249. results['std_perf_train'] = np.std([i[0] for i in accuracies], axis=0, ddof=1)
  250. results['ave_perf_test'] = np.mean([i[1] for i in accuracies], axis=0)
  251. results['std_perf_test'] = np.std([i[1] for i in accuracies], axis=0, ddof=1)
  252. # write result summary for each letter.
  253. if save_results:
  254. f_summary = open(dir_save + fn_output_summary, 'a')
  255. csv.writer(f_summary).writerow([ds_name, kernel_options['name'],
  256. train_examples, knn_options['n_neighbors'],
  257. knn_options['test_size'], results['ave_perf_train'],
  258. results['ave_perf_test'], results['std_perf_train'],
  259. results['std_perf_test'], time_precompute_gm])
  260. f_summary.close()
  261. def __get_shuffles(y_all, n_splits, test_size):
  262. rs = ShuffleSplit(n_splits=n_splits, test_size=test_size, random_state=0)
  263. train_indices = [[] for _ in range(n_splits)]
  264. test_indices = [[] for _ in range(n_splits)]
  265. idx_targets = get_same_item_indices(y_all)
  266. train_nums = []
  267. keys = []
  268. for key, item in idx_targets.items():
  269. i = 0
  270. for train_i, test_i in rs.split(item): # @todo: careful when parallel.
  271. train_indices[i] += [item[idx] for idx in train_i]
  272. test_indices[i] += [item[idx] for idx in test_i]
  273. i += 1
  274. train_nums.append(len(train_i))
  275. keys.append(key)
  276. return train_indices, test_indices, train_nums, keys
  277. def __generate_median_preimages(dataset, mpg_options, kernel_options, ged_options, mge_options):
  278. mpg = MedianPreimageGenerator()
  279. mpg.dataset = dataset.copy()
  280. mpg.set_options(**mpg_options.copy())
  281. mpg.kernel_options = kernel_options.copy()
  282. mpg.ged_options = ged_options.copy()
  283. mpg.mge_options = mge_options.copy()
  284. mpg.run()
  285. return mpg.set_median, mpg.gen_median
  286. def __get_gram_matrix(load_gm, dir_save, ds_name, kernel_options, dataset_all):
  287. if load_gm == 'auto':
  288. gm_fname = dir_save + 'gram_matrix_unnorm.' + ds_name + '.' + kernel_options['name'] + '.gm.npz'
  289. gmfile_exist = os.path.isfile(os.path.abspath(gm_fname))
  290. if gmfile_exist:
  291. gmfile = np.load(gm_fname, allow_pickle=True) # @todo: may not be safe.
  292. gram_matrix_unnorm = gmfile['gram_matrix_unnorm']
  293. time_precompute_gm = float(gmfile['run_time'])
  294. else:
  295. gram_matrix_unnorm, time_precompute_gm = __compute_gram_matrix_unnorm(dataset_all, kernel_options)
  296. np.savez(dir_save + 'gram_matrix_unnorm.' + ds_name + '.' + kernel_options['name'] + '.gm', gram_matrix_unnorm=gram_matrix_unnorm, run_time=time_precompute_gm)
  297. elif not load_gm:
  298. gram_matrix_unnorm, time_precompute_gm = __compute_gram_matrix_unnorm(dataset_all, kernel_options)
  299. np.savez(dir_save + 'gram_matrix_unnorm.' + ds_name + '.' + kernel_options['name'] + '.gm', gram_matrix_unnorm=gram_matrix_unnorm, run_time=time_precompute_gm)
  300. else:
  301. gmfile = np.load()
  302. gram_matrix_unnorm = gmfile['gram_matrix_unnorm']
  303. time_precompute_gm = float(gmfile['run_time'])
  304. return gram_matrix_unnorm, time_precompute_gm
  305. def __get_graph_kernel(dataset, kernel_options):
  306. from gklearn.utils.utils import get_graph_kernel_by_name
  307. graph_kernel = get_graph_kernel_by_name(kernel_options['name'],
  308. node_labels=dataset.node_labels,
  309. edge_labels=dataset.edge_labels,
  310. node_attrs=dataset.node_attrs,
  311. edge_attrs=dataset.edge_attrs,
  312. ds_infos=dataset.get_dataset_infos(keys=['directed']),
  313. kernel_options=kernel_options)
  314. return graph_kernel
  315. def __compute_gram_matrix_unnorm(dataset, kernel_options):
  316. from gklearn.utils.utils import get_graph_kernel_by_name
  317. graph_kernel = get_graph_kernel_by_name(kernel_options['name'],
  318. node_labels=dataset.node_labels,
  319. edge_labels=dataset.edge_labels,
  320. node_attrs=dataset.node_attrs,
  321. edge_attrs=dataset.edge_attrs,
  322. ds_infos=dataset.get_dataset_infos(keys=['directed']),
  323. kernel_options=kernel_options)
  324. gram_matrix, run_time = graph_kernel.compute(dataset.graphs, **kernel_options)
  325. gram_matrix_unnorm = graph_kernel.gram_matrix_unnorm
  326. return gram_matrix_unnorm, run_time
  327. def __init_output_file_knn(ds_name, gkernel, fit_method, dir_output):
  328. if not os.path.exists(dir_output):
  329. os.makedirs(dir_output)
  330. fn_output_detail = 'results_detail_knn.' + ds_name + '.' + gkernel + '.csv'
  331. f_detail = open(dir_output + fn_output_detail, 'a')
  332. csv.writer(f_detail).writerow(['dataset', 'graph kernel',
  333. 'train examples', 'trial', 'num neighbors', 'num graphs', 'test size',
  334. 'perf train', 'perf test'])
  335. f_detail.close()
  336. fn_output_summary = 'results_summary_knn.' + ds_name + '.' + gkernel + '.csv'
  337. f_summary = open(dir_output + fn_output_summary, 'a')
  338. csv.writer(f_summary).writerow(['dataset', 'graph kernel',
  339. 'train examples', 'num neighbors', 'test size',
  340. 'ave perf train', 'ave perf test',
  341. 'std perf train', 'std perf test', 'time precompute gm'])
  342. f_summary.close()
  343. return fn_output_detail, fn_output_summary

A Python package for graph kernels, graph edit distances and graph pre-image problem.