You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

run_treeletkernel.ipynb 8.7 kB

6 years ago
5 years ago
6 years ago
6 years ago
6 years ago
6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "code",
  5. "execution_count": null,
  6. "metadata": {},
  7. "outputs": [
  8. {
  9. "name": "stdout",
  10. "output_type": "stream",
  11. "text": [
  12. "\n",
  13. "Acyclic\n",
  14. "\n",
  15. "--- This is a regression problem ---\n",
  16. "\n",
  17. "\n",
  18. "1. Loading dataset from file...\n",
  19. "\n",
  20. "2. Calculating gram matrices. This could take a while...\n",
  21. "getting canonkeys: 183it [00:00, 2869.32it/s]\n",
  22. "calculating kernels: 16836it [00:00, 289967.90it/s]\n",
  23. "\n",
  24. " --- treelet kernel matrix of size 183 built in 0.2736480236053467 seconds ---\n",
  25. "\n",
  26. "the gram matrix with parameters {'sub_kernel': <function gaussiankernel at 0x7f12f14ec730>, 'n_jobs': 8, 'verbose': True} is: \n",
  27. "\n",
  28. "\n",
  29. "getting canonkeys: 183it [00:00, 2431.05it/s]\n",
  30. "calculating kernels: 16836it [00:00, 225177.06it/s]\n",
  31. "\n",
  32. " --- treelet kernel matrix of size 183 built in 0.2614881992340088 seconds ---\n",
  33. "\n",
  34. "the gram matrix with parameters {'sub_kernel': <function polynomialkernel at 0x7f12f14ec7b8>, 'n_jobs': 8, 'verbose': True} is: \n",
  35. "\n",
  36. "\n",
  37. "\n",
  38. "2 gram matrices are calculated, 0 of which are ignored.\n",
  39. "\n",
  40. "3. Fitting and predicting using nested cross validation. This could really take a while...\n",
  41. "cross validation: 30it [00:06, 4.34it/s]\n",
  42. "\n",
  43. "4. Getting final performance...\n",
  44. "best_params_out: [{'sub_kernel': <function polynomialkernel at 0x7f12f14ec7b8>, 'n_jobs': 8, 'verbose': True}]\n",
  45. "best_params_in: [{'alpha': 0.01}]\n",
  46. "\n",
  47. "best_val_perf: 8.699254729880051\n",
  48. "best_val_std: 0.6859488791023038\n",
  49. "final_performance: [10.449041034883777]\n",
  50. "final_confidence: [5.005824863496953]\n",
  51. "train_performance: [1.3405521528763233]\n",
  52. "train_std: [0.0923786919637616]\n",
  53. "\n",
  54. "time to calculate gram matrix with different hyper-params: 0.27±0.01s\n",
  55. "time to calculate best gram matrix: 0.26±0.00s\n",
  56. "total training time with all hyper-param choices: 8.18s\n",
  57. "\n",
  58. "\n",
  59. "\n",
  60. "Alkane\n",
  61. "\n",
  62. "--- This is a regression problem ---\n",
  63. "\n",
  64. "\n",
  65. "1. Loading dataset from file...\n",
  66. "\n",
  67. "2. Calculating gram matrices. This could take a while...\n",
  68. "getting canonkeys: 150it [00:00, 1460.40it/s]\n",
  69. "calculating kernels: 11325it [00:00, 188753.18it/s]\n",
  70. "\n",
  71. " --- treelet kernel matrix of size 150 built in 0.452197790145874 seconds ---\n",
  72. "\n",
  73. "the gram matrix with parameters {'sub_kernel': <function gaussiankernel at 0x7f12f14ec730>, 'n_jobs': 8, 'verbose': True} is: \n",
  74. "\n",
  75. "\n",
  76. "getting canonkeys: 150it [00:00, 3273.02it/s]\n",
  77. "calculating kernels: 11325it [00:00, 223074.04it/s]\n",
  78. "\n",
  79. " --- treelet kernel matrix of size 150 built in 0.2638716697692871 seconds ---\n",
  80. "\n",
  81. "the gram matrix with parameters {'sub_kernel': <function polynomialkernel at 0x7f12f14ec7b8>, 'n_jobs': 8, 'verbose': True} is: \n",
  82. "\n",
  83. "\n",
  84. "\n",
  85. "2 gram matrices are calculated, 0 of which are ignored.\n",
  86. "\n",
  87. "3. Fitting and predicting using nested cross validation. This could really take a while...\n",
  88. "cross validation: 1it [00:01, 1.41s/it]"
  89. ]
  90. }
  91. ],
  92. "source": [
  93. "from libs import *\n",
  94. "import multiprocessing\n",
  95. "\n",
  96. "from gklearn.kernels.treeletKernel import treeletkernel\n",
  97. "from gklearn.utils.kernels import gaussiankernel, polynomialkernel\n",
  98. "\n",
  99. "dslist = [\n",
  100. " {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds',\n",
  101. " 'task': 'regression'}, # node symb\n",
  102. " {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression',\n",
  103. " 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, \n",
  104. " # contains single node graph, node symb\n",
  105. " {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb\n",
  106. " {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled\n",
  107. " {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb\n",
  108. "# {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'},\n",
  109. "# # node nsymb\n",
  110. " {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'},\n",
  111. " # node symb/nsymb\n",
  112. "# {'name': 'Mutagenicity', 'dataset': '../datasets/Mutagenicity/Mutagenicity_A.txt'},\n",
  113. "# # node/edge symb\n",
  114. "# {'name': 'D&D', 'dataset': '../datasets/DD/DD_A.txt'}, # node symb\n",
  115. "\n",
  116. " # {'name': 'COIL-DEL', 'dataset': '../datasets/COIL-DEL/COIL-DEL_A.txt'}, # edge symb, node nsymb\n",
  117. " # # # {'name': 'BZR', 'dataset': '../datasets/BZR_txt/BZR_A_sparse.txt'}, # node symb/nsymb\n",
  118. " # # # {'name': 'COX2', 'dataset': '../datasets/COX2_txt/COX2_A_sparse.txt'}, # node symb/nsymb\n",
  119. " # {'name': 'Fingerprint', 'dataset': '../datasets/Fingerprint/Fingerprint_A.txt'},\n",
  120. " #\n",
  121. " # # {'name': 'DHFR', 'dataset': '../datasets/DHFR_txt/DHFR_A_sparse.txt'}, # node symb/nsymb\n",
  122. " # # {'name': 'SYNTHETIC', 'dataset': '../datasets/SYNTHETIC_txt/SYNTHETIC_A_sparse.txt'}, # node symb/nsymb\n",
  123. " # # {'name': 'MSRC9', 'dataset': '../datasets/MSRC_9_txt/MSRC_9_A.txt'}, # node symb\n",
  124. " # # {'name': 'MSRC21', 'dataset': '../datasets/MSRC_21_txt/MSRC_21_A.txt'}, # node symb\n",
  125. " # # {'name': 'FIRSTMM_DB', 'dataset': '../datasets/FIRSTMM_DB/FIRSTMM_DB_A.txt'}, # node symb/nsymb ,edge nsymb\n",
  126. "\n",
  127. " # # {'name': 'PROTEINS', 'dataset': '../datasets/PROTEINS_txt/PROTEINS_A_sparse.txt'}, # node symb/nsymb\n",
  128. " # # {'name': 'PROTEINS_full', 'dataset': '../datasets/PROTEINS_full_txt/PROTEINS_full_A_sparse.txt'}, # node symb/nsymb\n",
  129. " {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb\n",
  130. " # {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1.mat',\n",
  131. " # 'extra_params': {'am_sp_al_nl_el': [1, 1, 2, 0, -1]}}, # node symb\n",
  132. " # {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109.mat',\n",
  133. " # 'extra_params': {'am_sp_al_nl_el': [1, 1, 2, 0, -1]}}, # node symb\n",
  134. " # {'name': 'NCI-HIV', 'dataset': '../datasets/NCI-HIV/AIDO99SD.sdf',\n",
  135. " # 'dataset_y': '../datasets/NCI-HIV/aids_conc_may04.txt',}, # node/edge symb\n",
  136. "\n",
  137. " # # not working below\n",
  138. " # {'name': 'PTC_FM', 'dataset': '../datasets/PTC/Train/FM.ds',},\n",
  139. " # {'name': 'PTC_FR', 'dataset': '../datasets/PTC/Train/FR.ds',},\n",
  140. " # {'name': 'PTC_MM', 'dataset': '../datasets/PTC/Train/MM.ds',},\n",
  141. " # {'name': 'PTC_MR', 'dataset': '../datasets/PTC/Train/MR.ds',},\n",
  142. "]\n",
  143. "estimator = treeletkernel\n",
  144. "param_grid_precomputed = {'sub_kernel': [gaussiankernel, polynomialkernel]}\n",
  145. "param_grid = [{'C': np.logspace(-10, 10, num=41, base=10)},\n",
  146. " {'alpha': np.logspace(-10, 10, num=41, base=10)}]\n",
  147. "\n",
  148. "for ds in dslist:\n",
  149. " print()\n",
  150. " print(ds['name'])\n",
  151. " model_selection_for_precomputed_kernel(\n",
  152. " ds['dataset'],\n",
  153. " estimator,\n",
  154. " param_grid_precomputed,\n",
  155. " (param_grid[1] if ('task' in ds and ds['task']\n",
  156. " == 'regression') else param_grid[0]),\n",
  157. " (ds['task'] if 'task' in ds else 'classification'),\n",
  158. " NUM_TRIALS=30,\n",
  159. " datafile_y=(ds['dataset_y'] if 'dataset_y' in ds else None),\n",
  160. " extra_params=(ds['extra_params'] if 'extra_params' in ds else None),\n",
  161. " ds_name=ds['name'],\n",
  162. " n_jobs=multiprocessing.cpu_count(),\n",
  163. " read_gm_from_file=False,\n",
  164. " verbose=True)\n",
  165. " print()"
  166. ]
  167. }
  168. ],
  169. "metadata": {
  170. "kernelspec": {
  171. "display_name": "Python 3",
  172. "language": "python",
  173. "name": "python3"
  174. },
  175. "language_info": {
  176. "codemirror_mode": {
  177. "name": "ipython",
  178. "version": 3
  179. },
  180. "file_extension": ".py",
  181. "mimetype": "text/x-python",
  182. "name": "python",
  183. "nbconvert_exporter": "python",
  184. "pygments_lexer": "ipython3",
  185. "version": "3.6.7"
  186. }
  187. },
  188. "nbformat": 4,
  189. "nbformat_minor": 2
  190. }

A Python package for graph kernels, graph edit distances and graph pre-image problem.