You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

run_degree_differs_uhp.py 2.8 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Tue Jan 8 17:47:22 2019
  5. @author: ljia
  6. """
  7. import sys
  8. import numpy as np
  9. import networkx as nx
  10. sys.path.insert(0, "../")
  11. from pygraph.utils.graphfiles import loadDataset
  12. from pygraph.utils.model_selection_precomputed import compute_gram_matrices
  13. from sklearn.model_selection import ParameterGrid
  14. from libs import *
  15. import multiprocessing
  16. dslist = [
  17. {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'},
  18. # node symb/nsymb
  19. ]
  20. def run_ms(dataset, y, ds):
  21. from pygraph.kernels.untilHPathKernel import untilhpathkernel
  22. estimator = untilhpathkernel
  23. param_grid_precomputed = {'depth': np.linspace(1, 10, 10), # [2],
  24. 'k_func': ['MinMax', 'tanimoto']} # ['MinMax']}
  25. param_grid = [{'C': np.logspace(-10, 10, num=41, base=10)},
  26. {'alpha': np.logspace(-10, 10, num=41, base=10)}]
  27. _, gram_matrix_time, _, _, _ = compute_gram_matrices(
  28. dataset, y, estimator, list(ParameterGrid(param_grid_precomputed)),
  29. '../notebooks/results/' + estimator.__name__, ds['name'],
  30. n_jobs=multiprocessing.cpu_count(), verbose=False)
  31. average_gram_matrix_time = np.mean(gram_matrix_time)
  32. std_gram_matrix_time = np.std(gram_matrix_time, ddof=1)
  33. print('\n***** time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s'
  34. .format(average_gram_matrix_time, std_gram_matrix_time))
  35. print()
  36. return average_gram_matrix_time, std_gram_matrix_time
  37. for ds in dslist:
  38. print()
  39. print(ds['name'])
  40. Gn, y_all = loadDataset(
  41. ds['dataset'], filename_y=(ds['dataset_y'] if 'dataset_y' in ds else None),
  42. extra_params=(ds['extra_params'] if 'extra_params' in ds else None))
  43. degree_list = [np.mean(list(dict(g.degree()).values())) for g in Gn]
  44. idx_sorted = np.argsort(degree_list)
  45. degree_list.sort()
  46. Gn = [Gn[idx] for idx in idx_sorted]
  47. y_all = [y_all[idx] for idx in idx_sorted]
  48. len_1piece = int(len(Gn) / 5)
  49. ave_time = []
  50. std_time = []
  51. ave_degree = []
  52. for piece in range(1, 5):
  53. print('piece', str(piece), ':')
  54. Gn_p = Gn[len_1piece * piece:len_1piece * (piece + 1)]
  55. y_all_p = y_all[len_1piece * piece:len_1piece * (piece + 1)]
  56. aved = np.mean(degree_list[len_1piece * piece:len_1piece * (piece + 1)])
  57. ave_degree.append(aved)
  58. avet, stdt = run_ms(Gn_p, y_all_p, ds)
  59. ave_time.append(avet)
  60. std_time.append(stdt)
  61. print('\n****** for dataset', ds['name'], ', the average time is \n', ave_time,
  62. '\nthe time std is \n', std_time)
  63. print('corresponding average vertex degrees are', ave_degree)
  64. print()

A Python package for graph kernels, graph edit distances and graph pre-image problem.