You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

run_treeletkernel.py 4.5 kB

5 years ago
5 years ago
5 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Mon Mar 21 11:19:33 2019
  5. @author: ljia
  6. """
  7. from libs import *
  8. import multiprocessing
  9. import functools
  10. from gklearn.kernels.treeletKernel import treeletkernel
  11. from gklearn.utils.kernels import gaussiankernel, polynomialkernel
  12. dslist = [
  13. # {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression',
  14. # 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'},
  15. # # contains single node graph, node symb
  16. # {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds',
  17. # 'task': 'regression'}, # node symb
  18. # {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb
  19. # {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled
  20. # {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb
  21. # {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1_A.txt'}, # node symb
  22. # {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109_A.txt'}, # node symb
  23. # {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb
  24. # {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'},
  25. # {'name': 'monoterpenoides', 'dataset': '../datasets/monoterpenoides/dataset_10+.ds'}, # node/edge symb
  26. # node symb/nsymb
  27. {'name': 'D&D', 'dataset': '../datasets/DD/DD_A.txt'}, # node symb
  28. {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'},
  29. # node nsymb
  30. #
  31. # {'name': 'Mutagenicity', 'dataset': '../datasets/Mutagenicity/Mutagenicity_A.txt'},
  32. # # node/edge symb
  33. # {'name': 'COIL-DEL', 'dataset': '../datasets/COIL-DEL/COIL-DEL_A.txt'}, # edge symb, node nsymb
  34. # # # {'name': 'BZR', 'dataset': '../datasets/BZR_txt/BZR_A_sparse.txt'}, # node symb/nsymb
  35. # # # {'name': 'COX2', 'dataset': '../datasets/COX2_txt/COX2_A_sparse.txt'}, # node symb/nsymb
  36. # {'name': 'Fingerprint', 'dataset': '../datasets/Fingerprint/Fingerprint_A.txt'},
  37. #
  38. # # {'name': 'DHFR', 'dataset': '../datasets/DHFR_txt/DHFR_A_sparse.txt'}, # node symb/nsymb
  39. # # {'name': 'SYNTHETIC', 'dataset': '../datasets/SYNTHETIC_txt/SYNTHETIC_A_sparse.txt'}, # node symb/nsymb
  40. # # {'name': 'MSRC9', 'dataset': '../datasets/MSRC_9_txt/MSRC_9_A.txt'}, # node symb
  41. # # {'name': 'MSRC21', 'dataset': '../datasets/MSRC_21_txt/MSRC_21_A.txt'}, # node symb
  42. # # {'name': 'FIRSTMM_DB', 'dataset': '../datasets/FIRSTMM_DB/FIRSTMM_DB_A.txt'}, # node symb/nsymb ,edge nsymb
  43. # # {'name': 'PROTEINS', 'dataset': '../datasets/PROTEINS_txt/PROTEINS_A_sparse.txt'}, # node symb/nsymb
  44. # # {'name': 'PROTEINS_full', 'dataset': '../datasets/PROTEINS_full_txt/PROTEINS_full_A_sparse.txt'}, # node symb/nsymb
  45. # {'name': 'NCI-HIV', 'dataset': '../datasets/NCI-HIV/AIDO99SD.sdf',
  46. # 'dataset_y': '../datasets/NCI-HIV/aids_conc_may04.txt',}, # node/edge symb
  47. # # not working below
  48. # {'name': 'PTC_FM', 'dataset': '../datasets/PTC/Train/FM.ds',},
  49. # {'name': 'PTC_FR', 'dataset': '../datasets/PTC/Train/FR.ds',},
  50. # {'name': 'PTC_MM', 'dataset': '../datasets/PTC/Train/MM.ds',},
  51. # {'name': 'PTC_MR', 'dataset': '../datasets/PTC/Train/MR.ds',},
  52. ]
  53. estimator = treeletkernel
  54. gkernels = [functools.partial(gaussiankernel, gamma=1 / ga)
  55. # for ga in np.linspace(1, 10, 10)]
  56. for ga in np.logspace(0, 10, num=11, base=10)]
  57. pkernels = [functools.partial(polynomialkernel, d=d, c=c) for d in range(1, 11)
  58. for c in np.logspace(0, 10, num=11, base=10)]
  59. param_grid_precomputed = {'sub_kernel': pkernels + gkernels}
  60. param_grid = [{'C': np.logspace(-10, 10, num=41, base=10)},
  61. {'alpha': np.logspace(-10, 10, num=41, base=10)}]
  62. for ds in dslist:
  63. print()
  64. print(ds['name'])
  65. model_selection_for_precomputed_kernel(
  66. ds['dataset'],
  67. estimator,
  68. param_grid_precomputed,
  69. (param_grid[1] if ('task' in ds and ds['task']
  70. == 'regression') else param_grid[0]),
  71. (ds['task'] if 'task' in ds else 'classification'),
  72. NUM_TRIALS=30,
  73. datafile_y=(ds['dataset_y'] if 'dataset_y' in ds else None),
  74. extra_params=(ds['extra_params'] if 'extra_params' in ds else None),
  75. ds_name=ds['name'],
  76. n_jobs=multiprocessing.cpu_count(),
  77. read_gm_from_file=False,
  78. verbose=True)
  79. print()

A Python package for graph kernels, graph edit distances and graph pre-image problem.