You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

run_vertex_differs_uhp.py 3.3 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Tue Jan 8 16:25:33 2019
  5. @author: ljia
  6. """
  7. import sys
  8. import numpy as np
  9. import networkx as nx
  10. sys.path.insert(0, "../")
  11. from pygraph.utils.graphfiles import loadDataset
  12. from pygraph.utils.model_selection_precomputed import compute_gram_matrices
  13. from sklearn.model_selection import ParameterGrid
  14. from libs import *
  15. import multiprocessing
  16. dslist = [
  17. # {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds',
  18. # 'task': 'regression'}, # node symb
  19. # {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression',
  20. # 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt', },
  21. # # contains single node graph, node symb
  22. # {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds', }, # node/edge symb
  23. {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds', }, # unlabeled
  24. {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG.mat',
  25. 'extra_params': {'am_sp_al_nl_el': [0, 0, 3, 1, 2]}}, # node/edge symb
  26. # {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'},
  27. # # node nsymb
  28. {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'},
  29. # node symb/nsymb
  30. ]
  31. def run_ms(dataset, y, ds):
  32. from pygraph.kernels.untilHPathKernel import untilhpathkernel
  33. estimator = untilhpathkernel
  34. param_grid_precomputed = {'depth': np.linspace(1, 10, 10), # [2],
  35. 'k_func': ['MinMax', 'tanimoto']} # ['MinMax']}
  36. param_grid = [{'C': np.logspace(-10, 10, num=41, base=10)},
  37. {'alpha': np.logspace(-10, 10, num=41, base=10)}]
  38. _, gram_matrix_time, _, _, _ = compute_gram_matrices(
  39. dataset, y, estimator, list(ParameterGrid(param_grid_precomputed)),
  40. '../notebooks/results/' + estimator.__name__, ds['name'],
  41. n_jobs=multiprocessing.cpu_count(), verbose=False)
  42. average_gram_matrix_time = np.mean(gram_matrix_time)
  43. std_gram_matrix_time = np.std(gram_matrix_time, ddof=1)
  44. print('\n***** time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s'
  45. .format(average_gram_matrix_time, std_gram_matrix_time))
  46. print()
  47. return average_gram_matrix_time, std_gram_matrix_time
  48. for ds in dslist:
  49. print()
  50. print(ds['name'])
  51. Gn, y_all = loadDataset(
  52. ds['dataset'], filename_y=(ds['dataset_y'] if 'dataset_y' in ds else None),
  53. extra_params=(ds['extra_params'] if 'extra_params' in ds else None))
  54. vn_list = [nx.number_of_nodes(g) for g in Gn]
  55. idx_sorted = np.argsort(vn_list)
  56. vn_list.sort()
  57. Gn = [Gn[idx] for idx in idx_sorted]
  58. y_all = [y_all[idx] for idx in idx_sorted]
  59. len_1piece = int(len(Gn) / 5)
  60. ave_time = []
  61. std_time = []
  62. for piece in range(0, 5):
  63. print('piece', str(piece), ':')
  64. Gn_p = Gn[len_1piece * piece:len_1piece * (piece + 1)]
  65. y_all_p = y_all[len_1piece * piece:len_1piece * (piece + 1)]
  66. avet, stdt = run_ms(Gn_p, y_all_p, ds)
  67. ave_time.append(avet)
  68. std_time.append(stdt)
  69. print('\n****** for dataset', ds['name'], ', the average time is \n', ave_time,
  70. '\nthe time std is \n', std_time)
  71. print()

A Python package for graph kernels, graph edit distances and graph pre-image problem.