You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

run_vertex_differs_cw.py 3.6 kB

5 years ago
5 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Sun Dec 23 17:57:18 2018
  5. @author: ljia
  6. """
  7. import sys
  8. import numpy as np
  9. import networkx as nx
  10. sys.path.insert(0, "../../")
  11. from gklearn.utils.graphfiles import loadDataset
  12. from gklearn.utils.model_selection_precomputed import compute_gram_matrices
  13. from sklearn.model_selection import ParameterGrid
  14. from libs import *
  15. import multiprocessing
  16. dslist = [
  17. # {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds',
  18. # 'task': 'regression'}, # node symb
  19. # {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression',
  20. # 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt', },
  21. # # contains single node graph, node symb
  22. # {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds', }, # node/edge symb
  23. {'name': 'PAH', 'dataset': '../../datasets/PAH/dataset.ds', }, # unlabeled
  24. {'name': 'MUTAG', 'dataset': '../../datasets/MUTAG/MUTAG.mat',
  25. 'extra_params': {'am_sp_al_nl_el': [0, 0, 3, 1, 2]}}, # node/edge symb
  26. # {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'},
  27. # # node nsymb
  28. {'name': 'ENZYMES', 'dataset': '../../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'},
  29. # node symb/nsymb
  30. ]
  31. def run_ms(dataset, y, ds):
  32. from gklearn.kernels.commonWalkKernel import commonwalkkernel
  33. estimator = commonwalkkernel
  34. param_grid_precomputed = [{'compute_method': ['geo'],
  35. 'weight': np.linspace(0.01, 0.15, 15)},
  36. # 'weight': np.logspace(-1, -10, num=10, base=10)},
  37. {'compute_method': ['exp'], 'weight': range(0, 15)}]
  38. param_grid = [{'C': np.logspace(-10, 10, num=41, base=10)},
  39. {'alpha': np.logspace(-10, 10, num=41, base=10)}]
  40. _, gram_matrix_time, _, _, _ = compute_gram_matrices(
  41. dataset, y, estimator, list(ParameterGrid(param_grid_precomputed)),
  42. '../../notebooks/results/' + estimator.__name__, ds['name'],
  43. n_jobs=multiprocessing.cpu_count(), verbose=False)
  44. average_gram_matrix_time = np.mean(gram_matrix_time)
  45. std_gram_matrix_time = np.std(gram_matrix_time, ddof=1)
  46. print('\n***** time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s'
  47. .format(average_gram_matrix_time, std_gram_matrix_time))
  48. print()
  49. return average_gram_matrix_time, std_gram_matrix_time
  50. for ds in dslist:
  51. print()
  52. print(ds['name'])
  53. Gn, y_all = loadDataset(
  54. ds['dataset'], filename_y=(ds['dataset_y'] if 'dataset_y' in ds else None),
  55. extra_params=(ds['extra_params'] if 'extra_params' in ds else None))
  56. vn_list = [nx.number_of_nodes(g) for g in Gn]
  57. idx_sorted = np.argsort(vn_list)
  58. vn_list.sort()
  59. Gn = [Gn[idx] for idx in idx_sorted]
  60. y_all = [y_all[idx] for idx in idx_sorted]
  61. len_1piece = int(len(Gn) / 5)
  62. ave_time = []
  63. std_time = []
  64. ave_vnb = []
  65. for piece in range(0, 5):
  66. print('piece', str(piece), ':')
  67. Gn_p = Gn[len_1piece * piece:len_1piece * (piece + 1)]
  68. y_all_p = y_all[len_1piece * piece:len_1piece * (piece + 1)]
  69. avevn = np.mean(vn_list[len_1piece * piece:len_1piece * (piece + 1)])
  70. ave_vnb.append(avevn)
  71. avet, stdt = run_ms(Gn_p, y_all_p, ds)
  72. ave_time.append(avet)
  73. std_time.append(stdt)
  74. print('\n****** for dataset', ds['name'], ', the average time is \n', ave_time,
  75. '\nthe time std is \n', std_time)
  76. print('corresponding average vertex numbers are', ave_vnb)
  77. print()

A Python package for graph kernels, graph edit distances and graph pre-image problem.