You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

memory_profile.ipynb 70 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "code",
  5. "execution_count": 1,
  6. "metadata": {
  7. "scrolled": false
  8. },
  9. "outputs": [
  10. {
  11. "name": "stdout",
  12. "output_type": "stream",
  13. "text": [
  14. "\n",
  15. "Acyclic\n",
  16. "\n",
  17. "--- This is a regression problem ---\n",
  18. "\n",
  19. "\n",
  20. "1. Loading dataset from file...\n",
  21. "\n",
  22. "2. Calculating gram matrices. This could take a while...\n",
  23. "\n",
  24. " None edge weight specified. Set all weight to 1.\n",
  25. "\n",
  26. "getting sp graphs: 183it [00:00, 1871.37it/s]\n",
  27. "calculating kernels: 16836it [00:16, 1014.42it/s]\n",
  28. "\n",
  29. " --- shortest path kernel matrix of size 183 built in 16.947543382644653 seconds ---\n",
  30. "\n",
  31. "the gram matrix with parameters {'node_kernels': {'symb': <function deltakernel at 0x7f3a99093950>, 'nsymb': <function gaussiankernel at 0x7f3a990931e0>, 'mix': functools.partial(<function kernelproduct at 0x7f3a99088ae8>, <function deltakernel at 0x7f3a99093950>, <function gaussiankernel at 0x7f3a990931e0>)}, 'n_jobs': 8} is: \n",
  32. "\n",
  33. "\n",
  34. "\n",
  35. "1 gram matrices are calculated, 0 of which are ignored.\n",
  36. "\n",
  37. "3. Fitting and predicting using nested cross validation. This could really take a while...\n",
  38. "cross validation: 30it [00:12, 2.03it/s]\n",
  39. "\n",
  40. "4. Getting final performance...\n",
  41. "best_params_out: [{'node_kernels': {'symb': <function deltakernel at 0x7f3a99093950>, 'nsymb': <function gaussiankernel at 0x7f3a990931e0>, 'mix': functools.partial(<function kernelproduct at 0x7f3a99088ae8>, <function deltakernel at 0x7f3a99093950>, <function gaussiankernel at 0x7f3a990931e0>)}, 'n_jobs': 8}]\n",
  42. "best_params_in: [{'alpha': 1e-06}]\n",
  43. "\n",
  44. "best_val_perf: 9.55244065682399\n",
  45. "best_val_std: 0.5574811966683159\n",
  46. "final_performance: [9.724426192585643]\n",
  47. "final_confidence: [2.999822095078807]\n",
  48. "train_performance: [6.141755071354953]\n",
  49. "train_std: [0.2732168016478284]\n",
  50. "\n",
  51. "time to calculate gram matrix with different hyper-params: 16.95±nans\n",
  52. "time to calculate best gram matrix: 16.95±nans\n",
  53. "total training time with all hyper-param choices: 32.74s\n",
  54. "\n"
  55. ]
  56. },
  57. {
  58. "name": "stderr",
  59. "output_type": "stream",
  60. "text": [
  61. "/usr/local/lib/python3.6/dist-packages/numpy/core/_methods.py:140: RuntimeWarning: Degrees of freedom <= 0 for slice\n",
  62. " keepdims=keepdims)\n",
  63. "/usr/local/lib/python3.6/dist-packages/numpy/core/_methods.py:132: RuntimeWarning: invalid value encountered in double_scalars\n",
  64. " ret = ret.dtype.type(ret / rcount)\n"
  65. ]
  66. },
  67. {
  68. "name": "stdout",
  69. "output_type": "stream",
  70. "text": [
  71. "Filename: ../../pygraph/utils/model_selection_precomputed.py\n",
  72. "\n",
  73. "Line # Mem usage Increment Line Contents\n",
  74. "================================================\n",
  75. " 24 115.2 MiB 115.2 MiB @profile\n",
  76. " 25 def model_selection_for_precomputed_kernel(datafile,\n",
  77. " 26 estimator,\n",
  78. " 27 param_grid_precomputed,\n",
  79. " 28 param_grid,\n",
  80. " 29 model_type,\n",
  81. " 30 NUM_TRIALS=30,\n",
  82. " 31 datafile_y=None,\n",
  83. " 32 extra_params=None,\n",
  84. " 33 ds_name='ds-unknown',\n",
  85. " 34 n_jobs=1,\n",
  86. " 35 read_gm_from_file=False):\n",
  87. " 36 \"\"\"Perform model selection, fitting and testing for precomputed kernels using nested cv. Print out neccessary data during the process then finally the results.\n",
  88. " 37 \n",
  89. " 38 Parameters\n",
  90. " 39 ----------\n",
  91. " 40 datafile : string\n",
  92. " 41 Path of dataset file.\n",
  93. " 42 estimator : function\n",
  94. " 43 kernel function used to estimate. This function needs to return a gram matrix.\n",
  95. " 44 param_grid_precomputed : dictionary\n",
  96. " 45 Dictionary with names (string) of parameters used to calculate gram matrices as keys and lists of parameter settings to try as values. This enables searching over any sequence of parameter settings. Params with length 1 will be omitted.\n",
  97. " 46 param_grid : dictionary\n",
  98. " 47 Dictionary with names (string) of parameters used as penelties as keys and lists of parameter settings to try as values. This enables searching over any sequence of parameter settings. Params with length 1 will be omitted.\n",
  99. " 48 model_type : string\n",
  100. " 49 Typr of the problem, can be regression or classification.\n",
  101. " 50 NUM_TRIALS : integer\n",
  102. " 51 Number of random trials of outer cv loop. The default is 30.\n",
  103. " 52 datafile_y : string\n",
  104. " 53 Path of file storing y data. This parameter is optional depending on the given dataset file.\n",
  105. " 54 read_gm_from_file : boolean\n",
  106. " 55 Whether gram matrices are loaded from file.\n",
  107. " 56 \n",
  108. " 57 Examples\n",
  109. " 58 --------\n",
  110. " 59 >>> import numpy as np\n",
  111. " 60 >>> import sys\n",
  112. " 61 >>> sys.path.insert(0, \"../\")\n",
  113. " 62 >>> from pygraph.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n",
  114. " 63 >>> from pygraph.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel\n",
  115. " 64 >>>\n",
  116. " 65 >>> datafile = '../../../../datasets/acyclic/Acyclic/dataset_bps.ds'\n",
  117. " 66 >>> estimator = weisfeilerlehmankernel\n",
  118. " 67 >>> param_grid_precomputed = {'height': [0,1,2,3,4,5,6,7,8,9,10], 'base_kernel': ['subtree']}\n",
  119. " 68 >>> param_grid = {\"alpha\": np.logspace(-2, 2, num = 10, base = 10)}\n",
  120. " 69 >>>\n",
  121. " 70 >>> model_selection_for_precomputed_kernel(datafile, estimator, param_grid_precomputed, param_grid, 'regression')\n",
  122. " 71 \"\"\"\n",
  123. " 72 115.2 MiB 0.0 MiB tqdm.monitor_interval = 0\n",
  124. " 73 \n",
  125. " 74 115.2 MiB 0.0 MiB results_dir = '../notebooks/results/' + estimator.__name__\n",
  126. " 75 115.2 MiB 0.0 MiB if not os.path.exists(results_dir):\n",
  127. " 76 os.makedirs(results_dir)\n",
  128. " 77 # a string to save all the results.\n",
  129. " 78 115.2 MiB 0.0 MiB str_fw = '###################### log time: ' + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + '. ######################\\n\\n'\n",
  130. " 79 115.2 MiB 0.0 MiB str_fw += '# This file contains results of ' + estimator.__name__ + ' on dataset ' + ds_name + ',\\n# including gram matrices, serial numbers for gram matrix figures and performance.\\n\\n'\n",
  131. " 80 \n",
  132. " 81 # setup the model type\n",
  133. " 82 115.2 MiB 0.0 MiB model_type = model_type.lower()\n",
  134. " 83 115.2 MiB 0.0 MiB if model_type != 'regression' and model_type != 'classification':\n",
  135. " 84 raise Exception(\n",
  136. " 85 'The model type is incorrect! Please choose from regression or classification.'\n",
  137. " 86 )\n",
  138. " 87 115.2 MiB 0.0 MiB print()\n",
  139. " 88 115.2 MiB 0.0 MiB print('--- This is a %s problem ---' % model_type)\n",
  140. " 89 115.2 MiB 0.0 MiB str_fw += 'This is a %s problem.\\n' % model_type\n",
  141. " 90 \n",
  142. " 91 # calculate gram matrices rather than read them from file.\n",
  143. " 92 115.2 MiB 0.0 MiB if read_gm_from_file == False:\n",
  144. " 93 # Load the dataset\n",
  145. " 94 115.2 MiB 0.0 MiB print()\n",
  146. " 95 115.2 MiB 0.0 MiB print('\\n1. Loading dataset from file...')\n",
  147. " 96 115.2 MiB 0.0 MiB if isinstance(datafile, str):\n",
  148. " 97 115.2 MiB 0.0 MiB dataset, y_all = loadDataset(\n",
  149. " 98 116.3 MiB 1.1 MiB datafile, filename_y=datafile_y, extra_params=extra_params)\n",
  150. " 99 else: # load data directly from variable.\n",
  151. " 100 dataset = datafile\n",
  152. " 101 y_all = datafile_y \n",
  153. " 102 \n",
  154. " 103 # import matplotlib.pyplot as plt\n",
  155. " 104 # import networkx as nx\n",
  156. " 105 # nx.draw_networkx(dataset[30])\n",
  157. " 106 # plt.show()\n",
  158. " 107 \n",
  159. " 108 # Grid of parameters with a discrete number of values for each.\n",
  160. " 109 116.3 MiB 0.0 MiB param_list_precomputed = list(ParameterGrid(param_grid_precomputed))\n",
  161. " 110 116.3 MiB 0.0 MiB param_list = list(ParameterGrid(param_grid))\n",
  162. " 111 \n",
  163. " 112 116.3 MiB 0.0 MiB gram_matrices = [\n",
  164. " 113 ] # a list to store gram matrices for all param_grid_precomputed\n",
  165. " 114 116.3 MiB 0.0 MiB gram_matrix_time = [\n",
  166. " 115 ] # a list to store time to calculate gram matrices\n",
  167. " 116 116.3 MiB 0.0 MiB param_list_pre_revised = [\n",
  168. " 117 ] # list to store param grids precomputed ignoring the useless ones\n",
  169. " 118 \n",
  170. " 119 # calculate all gram matrices\n",
  171. " 120 116.3 MiB 0.0 MiB print()\n",
  172. " 121 116.3 MiB 0.0 MiB print('2. Calculating gram matrices. This could take a while...')\n",
  173. " 122 116.3 MiB 0.0 MiB str_fw += '\\nII. Gram matrices.\\n\\n'\n",
  174. " 123 116.3 MiB 0.0 MiB tts = time.time() # start training time\n",
  175. " 124 116.3 MiB 0.0 MiB nb_gm_ignore = 0 # the number of gram matrices those should not be considered, as they may contain elements that are not numbers (NaN)\n",
  176. " 125 145.3 MiB 0.0 MiB for idx, params_out in enumerate(param_list_precomputed):\n",
  177. " 126 116.3 MiB 0.0 MiB y = y_all[:]\n",
  178. " 127 116.3 MiB 0.0 MiB params_out['n_jobs'] = n_jobs\n",
  179. " 128 # print(dataset)\n",
  180. " 129 # import networkx as nx\n",
  181. " 130 # nx.draw_networkx(dataset[1])\n",
  182. " 131 # plt.show()\n",
  183. " 132 119.5 MiB 3.1 MiB rtn_data = estimator(dataset[:], **params_out)\n",
  184. " 133 119.5 MiB 0.0 MiB Kmatrix = rtn_data[0]\n",
  185. " 134 119.5 MiB 0.0 MiB current_run_time = rtn_data[1]\n",
  186. " 135 # for some kernels, some graphs in datasets may not meet the \n",
  187. " 136 # kernels' requirements for graph structure. These graphs are trimmed. \n",
  188. " 137 119.5 MiB 0.0 MiB if len(rtn_data) == 3:\n",
  189. " 138 119.5 MiB 0.0 MiB idx_trim = rtn_data[2] # the index of trimmed graph list\n",
  190. " 139 119.5 MiB 0.0 MiB y = [y[idxt] for idxt in idx_trim] # trim y accordingly\n",
  191. " 140 # Kmatrix = np.random.rand(2250, 2250)\n",
  192. " 141 # current_run_time = 0.1\n",
  193. " 142 \n",
  194. " 143 # remove graphs whose kernels with themselves are zeros\n",
  195. " 144 119.5 MiB 0.0 MiB Kmatrix_diag = Kmatrix.diagonal().copy()\n",
  196. " 145 119.5 MiB 0.0 MiB nb_g_ignore = 0\n",
  197. " 146 119.5 MiB 0.0 MiB for idxk, diag in enumerate(Kmatrix_diag):\n",
  198. " 147 119.5 MiB 0.0 MiB if diag == 0:\n",
  199. " 148 Kmatrix = np.delete(Kmatrix, (idxk - nb_g_ignore), axis=0)\n",
  200. " 149 Kmatrix = np.delete(Kmatrix, (idxk - nb_g_ignore), axis=1)\n",
  201. " 150 nb_g_ignore += 1\n",
  202. " 151 # normalization\n",
  203. " 152 119.5 MiB 0.0 MiB Kmatrix_diag = Kmatrix.diagonal().copy()\n",
  204. " 153 119.5 MiB 0.0 MiB for i in range(len(Kmatrix)):\n",
  205. " 154 119.5 MiB 0.0 MiB for j in range(i, len(Kmatrix)):\n",
  206. " 155 119.5 MiB 0.0 MiB Kmatrix[i][j] /= np.sqrt(Kmatrix_diag[i] * Kmatrix_diag[j])\n",
  207. " 156 119.5 MiB 0.0 MiB Kmatrix[j][i] = Kmatrix[i][j]\n",
  208. " 157 \n",
  209. " 158 119.5 MiB 0.0 MiB print()\n",
  210. " 159 119.5 MiB 0.0 MiB if params_out == {}:\n",
  211. " 160 print('the gram matrix is: ')\n",
  212. " 161 str_fw += 'the gram matrix is:\\n\\n'\n",
  213. " 162 else:\n",
  214. " 163 119.5 MiB 0.0 MiB print('the gram matrix with parameters', params_out, 'is: \\n\\n')\n",
  215. " 164 119.5 MiB 0.0 MiB str_fw += 'the gram matrix with parameters %s is:\\n\\n' % params_out\n",
  216. " 165 119.5 MiB 0.0 MiB if len(Kmatrix) < 2:\n",
  217. " 166 nb_gm_ignore += 1\n",
  218. " 167 print('ignored, as at most only one of all its diagonal value is non-zero.')\n",
  219. " 168 str_fw += 'ignored, as at most only one of all its diagonal value is non-zero.\\n\\n'\n",
  220. " 169 else: \n",
  221. " 170 119.5 MiB 0.0 MiB if np.isnan(Kmatrix).any(\n",
  222. " 171 ): # if the matrix contains elements that are not numbers\n",
  223. " 172 nb_gm_ignore += 1\n",
  224. " 173 print('ignored, as it contains elements that are not numbers.')\n",
  225. " 174 str_fw += 'ignored, as it contains elements that are not numbers.\\n\\n'\n",
  226. " 175 else:\n",
  227. " 176 # print(Kmatrix)\n",
  228. " 177 119.5 MiB 0.0 MiB str_fw += np.array2string(\n",
  229. " 178 119.5 MiB 0.0 MiB Kmatrix,\n",
  230. " 179 119.5 MiB 0.0 MiB separator=',') + '\\n\\n'\n",
  231. " 180 # separator=',',\n",
  232. " 181 # threshold=np.inf,\n",
  233. " 182 # floatmode='unique') + '\\n\\n'\n",
  234. " 183 \n",
  235. " 184 119.5 MiB 0.0 MiB fig_file_name = results_dir + '/GM[ds]' + ds_name\n",
  236. " 185 119.5 MiB 0.0 MiB if params_out != {}:\n",
  237. " 186 119.5 MiB 0.0 MiB fig_file_name += '[params]' + str(idx)\n",
  238. " 187 120.3 MiB 0.7 MiB plt.imshow(Kmatrix)\n",
  239. " 188 120.4 MiB 0.1 MiB plt.colorbar()\n",
  240. " 189 145.3 MiB 24.9 MiB plt.savefig(fig_file_name + '.eps', format='eps', dpi=300)\n",
  241. " 190 # plt.show()\n",
  242. " 191 145.3 MiB 0.0 MiB plt.clf()\n",
  243. " 192 145.3 MiB 0.0 MiB gram_matrices.append(Kmatrix)\n",
  244. " 193 145.3 MiB 0.0 MiB gram_matrix_time.append(current_run_time)\n",
  245. " 194 145.3 MiB 0.0 MiB param_list_pre_revised.append(params_out)\n",
  246. " 195 145.3 MiB 0.0 MiB if nb_g_ignore > 0:\n",
  247. " 196 print(', where %d graphs are ignored as their graph kernels with themselves are zeros.' % nb_g_ignore)\n",
  248. " 197 str_fw += ', where %d graphs are ignored as their graph kernels with themselves are zeros.' % nb_g_ignore\n",
  249. " 198 145.3 MiB 0.0 MiB print()\n",
  250. " 199 145.3 MiB 0.0 MiB print(\n",
  251. " 200 145.3 MiB 0.0 MiB '{} gram matrices are calculated, {} of which are ignored.'.format(\n",
  252. " 201 145.3 MiB 0.0 MiB len(param_list_precomputed), nb_gm_ignore))\n",
  253. " 202 145.3 MiB 0.0 MiB str_fw += '{} gram matrices are calculated, {} of which are ignored.\\n\\n'.format(len(param_list_precomputed), nb_gm_ignore)\n",
  254. " 203 145.3 MiB 0.0 MiB str_fw += 'serial numbers of gram matrix figures and their corresponding parameters settings:\\n\\n'\n",
  255. " 204 145.3 MiB 0.0 MiB str_fw += ''.join([\n",
  256. " 205 145.3 MiB 0.0 MiB '{}: {}\\n'.format(idx, params_out)\n",
  257. " 206 145.3 MiB 0.0 MiB for idx, params_out in enumerate(param_list_precomputed)\n",
  258. " 207 ])\n",
  259. " 208 \n",
  260. " 209 145.3 MiB 0.0 MiB print()\n",
  261. " 210 145.3 MiB 0.0 MiB if len(gram_matrices) == 0:\n",
  262. " 211 print('all gram matrices are ignored, no results obtained.')\n",
  263. " 212 str_fw += '\\nall gram matrices are ignored, no results obtained.\\n\\n'\n",
  264. " 213 else:\n",
  265. " 214 # save gram matrices to file.\n",
  266. " 215 145.4 MiB 0.1 MiB np.savez(results_dir + '/' + ds_name + '.gm', \n",
  267. " 216 145.4 MiB 0.0 MiB gms=gram_matrices, params=param_list_pre_revised, y=y, \n",
  268. " 217 145.4 MiB 0.0 MiB gmtime=gram_matrix_time)\n",
  269. " 218 \n",
  270. " 219 145.4 MiB 0.0 MiB print(\n",
  271. " 220 145.4 MiB 0.0 MiB '3. Fitting and predicting using nested cross validation. This could really take a while...'\n",
  272. " 221 )\n",
  273. " 222 \n",
  274. " 223 # ---- use pool.imap_unordered to parallel and track progress. ----\n",
  275. " 224 # train_pref = []\n",
  276. " 225 # val_pref = []\n",
  277. " 226 # test_pref = []\n",
  278. " 227 # def func_assign(result, var_to_assign):\n",
  279. " 228 # for idx, itm in enumerate(var_to_assign):\n",
  280. " 229 # itm.append(result[idx]) \n",
  281. " 230 # trial_do_partial = partial(trial_do, param_list_pre_revised, param_list, y, model_type)\n",
  282. " 231 # \n",
  283. " 232 # parallel_me(trial_do_partial, range(NUM_TRIALS), func_assign, \n",
  284. " 233 # [train_pref, val_pref, test_pref], glbv=gram_matrices,\n",
  285. " 234 # method='imap_unordered', n_jobs=n_jobs, chunksize=1,\n",
  286. " 235 # itr_desc='cross validation')\n",
  287. " 236 \n",
  288. " 237 145.4 MiB 0.0 MiB def init_worker(gms_toshare):\n",
  289. " 238 global G_gms\n",
  290. " 239 G_gms = gms_toshare\n",
  291. " 240 \n",
  292. " 241 # gram_matrices = np.array(gram_matrices)\n",
  293. " 242 # gms_shape = gram_matrices.shape\n",
  294. " 243 # gms_array = Array('d', np.reshape(gram_matrices.copy(), -1, order='C'))\n",
  295. " 244 # pool = Pool(processes=n_jobs, initializer=init_worker, initargs=(gms_array, gms_shape))\n",
  296. " 245 145.4 MiB 0.0 MiB pool = Pool(processes=n_jobs, initializer=init_worker, initargs=(gram_matrices,))\n",
  297. " 246 145.4 MiB 0.0 MiB trial_do_partial = partial(parallel_trial_do, param_list_pre_revised, param_list, y, model_type)\n",
  298. " 247 145.4 MiB 0.0 MiB train_pref = []\n",
  299. " 248 145.4 MiB 0.0 MiB val_pref = []\n",
  300. " 249 145.4 MiB 0.0 MiB test_pref = []\n",
  301. " 250 # if NUM_TRIALS < 1000 * n_jobs:\n",
  302. " 251 # chunksize = int(NUM_TRIALS / n_jobs) + 1\n",
  303. " 252 # else:\n",
  304. " 253 # chunksize = 1000\n",
  305. " 254 145.4 MiB 0.0 MiB chunksize = 1\n",
  306. " 255 145.4 MiB 0.0 MiB for o1, o2, o3 in tqdm(pool.imap_unordered(trial_do_partial, range(NUM_TRIALS), chunksize), desc='cross validation', file=sys.stdout):\n",
  307. " 256 145.4 MiB 0.0 MiB train_pref.append(o1)\n",
  308. " 257 145.4 MiB 0.0 MiB val_pref.append(o2)\n",
  309. " 258 145.4 MiB 0.0 MiB test_pref.append(o3)\n",
  310. " 259 145.4 MiB 0.0 MiB pool.close()\n",
  311. " 260 145.4 MiB 0.0 MiB pool.join()\n",
  312. " 261 \n",
  313. " 262 # # ---- use pool.map to parallel. ----\n",
  314. " 263 # pool = Pool(n_jobs)\n",
  315. " 264 # trial_do_partial = partial(trial_do, param_list_pre_revised, param_list, gram_matrices, y[0:250], model_type)\n",
  316. " 265 # result_perf = pool.map(trial_do_partial, range(NUM_TRIALS))\n",
  317. " 266 # train_pref = [item[0] for item in result_perf]\n",
  318. " 267 # val_pref = [item[1] for item in result_perf]\n",
  319. " 268 # test_pref = [item[2] for item in result_perf]\n",
  320. " 269 \n",
  321. " 270 # # ---- direct running, normally use a single CPU core. ----\n",
  322. " 271 # train_pref = []\n",
  323. " 272 # val_pref = []\n",
  324. " 273 # test_pref = []\n",
  325. " 274 # for i in tqdm(range(NUM_TRIALS), desc='cross validation', file=sys.stdout):\n",
  326. " 275 # o1, o2, o3 = trial_do(param_list_pre_revised, param_list, gram_matrices, y, model_type, i)\n",
  327. " 276 # train_pref.append(o1)\n",
  328. " 277 # val_pref.append(o2)\n",
  329. " 278 # test_pref.append(o3)\n",
  330. " 279 # print()\n",
  331. " 280 \n",
  332. " 281 145.4 MiB 0.0 MiB print()\n",
  333. " 282 145.4 MiB 0.0 MiB print('4. Getting final performance...')\n",
  334. " 283 145.4 MiB 0.0 MiB str_fw += '\\nIII. Performance.\\n\\n'\n",
  335. " 284 # averages and confidences of performances on outer trials for each combination of parameters\n",
  336. " 285 145.4 MiB 0.0 MiB average_train_scores = np.mean(train_pref, axis=0)\n",
  337. " 286 # print('val_pref: ', val_pref[0][0])\n",
  338. " 287 145.4 MiB 0.0 MiB average_val_scores = np.mean(val_pref, axis=0)\n",
  339. " 288 # print('test_pref: ', test_pref[0][0])\n",
  340. " 289 145.4 MiB 0.0 MiB average_perf_scores = np.mean(test_pref, axis=0)\n",
  341. " 290 # sample std is used here\n",
  342. " 291 145.4 MiB 0.0 MiB std_train_scores = np.std(train_pref, axis=0, ddof=1)\n",
  343. " 292 145.4 MiB 0.0 MiB std_val_scores = np.std(val_pref, axis=0, ddof=1)\n",
  344. " 293 145.4 MiB 0.0 MiB std_perf_scores = np.std(test_pref, axis=0, ddof=1)\n",
  345. " 294 \n",
  346. " 295 145.4 MiB 0.0 MiB if model_type == 'regression':\n",
  347. " 296 145.4 MiB 0.0 MiB best_val_perf = np.amin(average_val_scores)\n",
  348. " 297 else:\n",
  349. " 298 best_val_perf = np.amax(average_val_scores)\n",
  350. " 299 # print('average_val_scores: ', average_val_scores)\n",
  351. " 300 # print('best_val_perf: ', best_val_perf)\n",
  352. " 301 # print()\n",
  353. " 302 145.4 MiB 0.0 MiB best_params_index = np.where(average_val_scores == best_val_perf)\n",
  354. " 303 # find smallest val std with best val perf.\n",
  355. " 304 best_val_stds = [\n",
  356. " 305 145.4 MiB 0.0 MiB std_val_scores[value][best_params_index[1][idx]]\n",
  357. " 306 145.4 MiB 0.0 MiB for idx, value in enumerate(best_params_index[0])\n",
  358. " 307 ]\n",
  359. " 308 145.4 MiB 0.0 MiB min_val_std = np.amin(best_val_stds)\n",
  360. " 309 145.4 MiB 0.0 MiB best_params_index = np.where(std_val_scores == min_val_std)\n",
  361. " 310 best_params_out = [\n",
  362. " 311 145.4 MiB 0.0 MiB param_list_pre_revised[i] for i in best_params_index[0]\n",
  363. " 312 ]\n",
  364. " 313 145.4 MiB 0.0 MiB best_params_in = [param_list[i] for i in best_params_index[1]]\n",
  365. " 314 145.4 MiB 0.0 MiB print('best_params_out: ', best_params_out)\n",
  366. " 315 145.4 MiB 0.0 MiB print('best_params_in: ', best_params_in)\n",
  367. " 316 145.4 MiB 0.0 MiB print()\n",
  368. " 317 145.4 MiB 0.0 MiB print('best_val_perf: ', best_val_perf)\n",
  369. " 318 145.4 MiB 0.0 MiB print('best_val_std: ', min_val_std)\n",
  370. " 319 145.4 MiB 0.0 MiB str_fw += 'best settings of hyper-params to build gram matrix: %s\\n' % best_params_out\n",
  371. " 320 145.4 MiB 0.0 MiB str_fw += 'best settings of other hyper-params: %s\\n\\n' % best_params_in\n",
  372. " 321 145.4 MiB 0.0 MiB str_fw += 'best_val_perf: %s\\n' % best_val_perf\n",
  373. " 322 145.4 MiB 0.0 MiB str_fw += 'best_val_std: %s\\n' % min_val_std\n",
  374. " 323 \n",
  375. " 324 # print(best_params_index)\n",
  376. " 325 # print(best_params_index[0])\n",
  377. " 326 # print(average_perf_scores)\n",
  378. " 327 final_performance = [\n",
  379. " 328 145.4 MiB 0.0 MiB average_perf_scores[value][best_params_index[1][idx]]\n",
  380. " 329 145.4 MiB 0.0 MiB for idx, value in enumerate(best_params_index[0])\n",
  381. " 330 ]\n",
  382. " 331 final_confidence = [\n",
  383. " 332 145.4 MiB 0.0 MiB std_perf_scores[value][best_params_index[1][idx]]\n",
  384. " 333 145.4 MiB 0.0 MiB for idx, value in enumerate(best_params_index[0])\n",
  385. " 334 ]\n",
  386. " 335 145.4 MiB 0.0 MiB print('final_performance: ', final_performance)\n",
  387. " 336 145.4 MiB 0.0 MiB print('final_confidence: ', final_confidence)\n",
  388. " 337 145.4 MiB 0.0 MiB str_fw += 'final_performance: %s\\n' % final_performance\n",
  389. " 338 145.4 MiB 0.0 MiB str_fw += 'final_confidence: %s\\n' % final_confidence\n",
  390. " 339 train_performance = [\n",
  391. " 340 145.4 MiB 0.0 MiB average_train_scores[value][best_params_index[1][idx]]\n",
  392. " 341 145.4 MiB 0.0 MiB for idx, value in enumerate(best_params_index[0])\n",
  393. " 342 ]\n",
  394. " 343 train_std = [\n",
  395. " 344 145.4 MiB 0.0 MiB std_train_scores[value][best_params_index[1][idx]]\n",
  396. " 345 145.4 MiB 0.0 MiB for idx, value in enumerate(best_params_index[0])\n",
  397. " 346 ]\n",
  398. " 347 145.4 MiB 0.0 MiB print('train_performance: %s' % train_performance)\n",
  399. " 348 145.4 MiB 0.0 MiB print('train_std: ', train_std)\n",
  400. " 349 145.4 MiB 0.0 MiB str_fw += 'train_performance: %s\\n' % train_performance\n",
  401. " 350 145.4 MiB 0.0 MiB str_fw += 'train_std: %s\\n\\n' % train_std\n",
  402. " 351 \n",
  403. " 352 145.4 MiB 0.0 MiB print()\n",
  404. " 353 145.4 MiB 0.0 MiB tt_total = time.time() - tts # training time for all hyper-parameters\n",
  405. " 354 145.4 MiB 0.0 MiB average_gram_matrix_time = np.mean(gram_matrix_time)\n",
  406. " 355 145.4 MiB 0.0 MiB std_gram_matrix_time = np.std(gram_matrix_time, ddof=1)\n",
  407. " 356 best_gram_matrix_time = [\n",
  408. " 357 145.4 MiB 0.0 MiB gram_matrix_time[i] for i in best_params_index[0]\n",
  409. " 358 ]\n",
  410. " 359 145.4 MiB 0.0 MiB ave_bgmt = np.mean(best_gram_matrix_time)\n",
  411. " 360 145.4 MiB 0.0 MiB std_bgmt = np.std(best_gram_matrix_time, ddof=1)\n",
  412. " 361 145.4 MiB 0.0 MiB print(\n",
  413. " 362 145.4 MiB 0.0 MiB 'time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s'\n",
  414. " 363 145.4 MiB 0.0 MiB .format(average_gram_matrix_time, std_gram_matrix_time))\n",
  415. " 364 145.4 MiB 0.0 MiB print('time to calculate best gram matrix: {:.2f}±{:.2f}s'.format(\n",
  416. " 365 145.4 MiB 0.0 MiB ave_bgmt, std_bgmt))\n",
  417. " 366 145.4 MiB 0.0 MiB print(\n",
  418. " 367 145.4 MiB 0.0 MiB 'total training time with all hyper-param choices: {:.2f}s'.format(\n",
  419. " 368 145.4 MiB 0.0 MiB tt_total))\n",
  420. " 369 145.4 MiB 0.0 MiB str_fw += 'time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s\\n'.format(average_gram_matrix_time, std_gram_matrix_time)\n",
  421. " 370 145.4 MiB 0.0 MiB str_fw += 'time to calculate best gram matrix: {:.2f}±{:.2f}s\\n'.format(ave_bgmt, std_bgmt)\n",
  422. " 371 145.4 MiB 0.0 MiB str_fw += 'total training time with all hyper-param choices: {:.2f}s\\n\\n'.format(tt_total)\n",
  423. " 372 \n",
  424. " 373 # # save results to file\n",
  425. " 374 # np.savetxt(results_name_pre + 'average_train_scores.dt',\n",
  426. " 375 # average_train_scores)\n",
  427. " 376 # np.savetxt(results_name_pre + 'average_val_scores', average_val_scores)\n",
  428. " 377 # np.savetxt(results_name_pre + 'average_perf_scores.dt',\n",
  429. " 378 # average_perf_scores)\n",
  430. " 379 # np.savetxt(results_name_pre + 'std_train_scores.dt', std_train_scores)\n",
  431. " 380 # np.savetxt(results_name_pre + 'std_val_scores.dt', std_val_scores)\n",
  432. " 381 # np.savetxt(results_name_pre + 'std_perf_scores.dt', std_perf_scores)\n",
  433. " 382 \n",
  434. " 383 # np.save(results_name_pre + 'best_params_index', best_params_index)\n",
  435. " 384 # np.save(results_name_pre + 'best_params_pre.dt', best_params_out)\n",
  436. " 385 # np.save(results_name_pre + 'best_params_in.dt', best_params_in)\n",
  437. " 386 # np.save(results_name_pre + 'best_val_perf.dt', best_val_perf)\n",
  438. " 387 # np.save(results_name_pre + 'best_val_std.dt', best_val_std)\n",
  439. " 388 # np.save(results_name_pre + 'final_performance.dt', final_performance)\n",
  440. " 389 # np.save(results_name_pre + 'final_confidence.dt', final_confidence)\n",
  441. " 390 # np.save(results_name_pre + 'train_performance.dt', train_performance)\n",
  442. " 391 # np.save(results_name_pre + 'train_std.dt', train_std)\n",
  443. " 392 \n",
  444. " 393 # np.save(results_name_pre + 'gram_matrix_time.dt', gram_matrix_time)\n",
  445. " 394 # np.save(results_name_pre + 'average_gram_matrix_time.dt',\n",
  446. " 395 # average_gram_matrix_time)\n",
  447. " 396 # np.save(results_name_pre + 'std_gram_matrix_time.dt',\n",
  448. " 397 # std_gram_matrix_time)\n",
  449. " 398 # np.save(results_name_pre + 'best_gram_matrix_time.dt',\n",
  450. " 399 # best_gram_matrix_time)\n",
  451. " 400 \n",
  452. " 401 # print out as table.\n",
  453. " 402 145.4 MiB 0.0 MiB from collections import OrderedDict\n",
  454. " 403 145.4 MiB 0.0 MiB from tabulate import tabulate\n",
  455. " 404 145.4 MiB 0.0 MiB table_dict = {}\n",
  456. " 405 145.4 MiB 0.0 MiB if model_type == 'regression':\n",
  457. " 406 145.6 MiB 0.0 MiB for param_in in param_list:\n",
  458. " 407 145.6 MiB 0.2 MiB param_in['alpha'] = '{:.2e}'.format(param_in['alpha'])\n",
  459. " 408 else:\n",
  460. " 409 for param_in in param_list:\n",
  461. " 410 param_in['C'] = '{:.2e}'.format(param_in['C'])\n",
  462. " 411 145.6 MiB 0.0 MiB table_dict['params'] = [{**param_out, **param_in}\n",
  463. " 412 145.6 MiB 0.0 MiB for param_in in param_list for param_out in param_list_pre_revised]\n",
  464. " 413 table_dict['gram_matrix_time'] = [\n",
  465. " 414 145.6 MiB 0.0 MiB '{:.2f}'.format(gram_matrix_time[index_out])\n",
  466. " 415 145.6 MiB 0.0 MiB for param_in in param_list\n",
  467. " 416 145.6 MiB 0.0 MiB for index_out, _ in enumerate(param_list_pre_revised)\n",
  468. " 417 ]\n",
  469. " 418 table_dict['valid_perf'] = [\n",
  470. " 419 145.6 MiB 0.0 MiB '{:.2f}±{:.2f}'.format(average_val_scores[index_out][index_in],\n",
  471. " 420 std_val_scores[index_out][index_in])\n",
  472. " 421 145.6 MiB 0.0 MiB for index_in, _ in enumerate(param_list)\n",
  473. " 422 145.6 MiB 0.0 MiB for index_out, _ in enumerate(param_list_pre_revised)\n",
  474. " 423 ]\n",
  475. " 424 table_dict['test_perf'] = [\n",
  476. " 425 145.6 MiB 0.0 MiB '{:.2f}±{:.2f}'.format(average_perf_scores[index_out][index_in],\n",
  477. " 426 std_perf_scores[index_out][index_in])\n",
  478. " 427 145.6 MiB 0.0 MiB for index_in, _ in enumerate(param_list)\n",
  479. " 428 145.6 MiB 0.0 MiB for index_out, _ in enumerate(param_list_pre_revised)\n",
  480. " 429 ]\n",
  481. " 430 table_dict['train_perf'] = [\n",
  482. " 431 145.6 MiB 0.0 MiB '{:.2f}±{:.2f}'.format(average_train_scores[index_out][index_in],\n",
  483. " 432 std_train_scores[index_out][index_in])\n",
  484. " 433 145.6 MiB 0.0 MiB for index_in, _ in enumerate(param_list)\n",
  485. " 434 145.6 MiB 0.0 MiB for index_out, _ in enumerate(param_list_pre_revised)\n",
  486. " 435 ]\n",
  487. " 436 keyorder = [\n",
  488. " 437 145.6 MiB 0.0 MiB 'params', 'train_perf', 'valid_perf', 'test_perf',\n",
  489. " 438 145.6 MiB 0.0 MiB 'gram_matrix_time'\n",
  490. " 439 ]\n",
  491. " 440 145.6 MiB 0.0 MiB print()\n",
  492. " 441 145.6 MiB 0.0 MiB tb_print = tabulate(\n",
  493. " 442 145.6 MiB 0.0 MiB OrderedDict(\n",
  494. " 443 145.6 MiB 0.0 MiB sorted(table_dict.items(),\n",
  495. " 444 145.6 MiB 0.0 MiB key=lambda i: keyorder.index(i[0]))),\n",
  496. " 445 145.6 MiB 0.0 MiB headers='keys')\n",
  497. " 446 # print(tb_print)\n",
  498. " 447 145.6 MiB 0.0 MiB str_fw += 'table of performance v.s. hyper-params:\\n\\n%s\\n\\n' % tb_print\n",
  499. " 448 \n",
  500. " 449 # read gram matrices from file.\n",
  501. " 450 else: \n",
  502. " 451 # Grid of parameters with a discrete number of values for each.\n",
  503. " 452 # param_list_precomputed = list(ParameterGrid(param_grid_precomputed))\n",
  504. " 453 param_list = list(ParameterGrid(param_grid))\n",
  505. " 454 \n",
  506. " 455 # read gram matrices from file.\n",
  507. " 456 print()\n",
  508. " 457 print('2. Reading gram matrices from file...')\n",
  509. " 458 str_fw += '\\nII. Gram matrices.\\n\\nGram matrices are read from file, see last log for detail.\\n'\n",
  510. " 459 gmfile = np.load(results_dir + '/' + ds_name + '.gm.npz')\n",
  511. " 460 gram_matrices = gmfile['gms'] # a list to store gram matrices for all param_grid_precomputed\n",
  512. " 461 gram_matrix_time = gmfile['gmtime'] # time used to compute the gram matrices\n",
  513. " 462 param_list_pre_revised = gmfile['params'] # list to store param grids precomputed ignoring the useless ones\n",
  514. " 463 y = gmfile['y'].tolist()\n",
  515. " 464 \n",
  516. " 465 tts = time.time() # start training time\n",
  517. " 466 # nb_gm_ignore = 0 # the number of gram matrices those should not be considered, as they may contain elements that are not numbers (NaN) \n",
  518. " 467 print(\n",
  519. " 468 '3. Fitting and predicting using nested cross validation. This could really take a while...'\n",
  520. " 469 )\n",
  521. " 470 \n",
  522. " 471 # ---- use pool.imap_unordered to parallel and track progress. ----\n",
  523. " 472 def init_worker(gms_toshare):\n",
  524. " 473 global G_gms\n",
  525. " 474 G_gms = gms_toshare\n",
  526. " 475 \n",
  527. " 476 pool = Pool(processes=n_jobs, initializer=init_worker, initargs=(gram_matrices,))\n",
  528. " 477 trial_do_partial = partial(parallel_trial_do, param_list_pre_revised, param_list, y, model_type)\n",
  529. " 478 train_pref = []\n",
  530. " 479 val_pref = []\n",
  531. " 480 test_pref = []\n",
  532. " 481 chunksize = 1\n",
  533. " 482 for o1, o2, o3 in tqdm(pool.imap_unordered(trial_do_partial, range(NUM_TRIALS), chunksize), desc='cross validation', file=sys.stdout):\n",
  534. " 483 train_pref.append(o1)\n",
  535. " 484 val_pref.append(o2)\n",
  536. " 485 test_pref.append(o3)\n",
  537. " 486 pool.close()\n",
  538. " 487 pool.join()\n",
  539. " 488 \n",
  540. " 489 # # ---- use pool.map to parallel. ----\n",
  541. " 490 # result_perf = pool.map(trial_do_partial, range(NUM_TRIALS))\n",
  542. " 491 # train_pref = [item[0] for item in result_perf]\n",
  543. " 492 # val_pref = [item[1] for item in result_perf]\n",
  544. " 493 # test_pref = [item[2] for item in result_perf]\n",
  545. " 494 \n",
  546. " 495 # # ---- use joblib.Parallel to parallel and track progress. ----\n",
  547. " 496 # trial_do_partial = partial(trial_do, param_list_pre_revised, param_list, gram_matrices, y, model_type)\n",
  548. " 497 # result_perf = Parallel(n_jobs=n_jobs, verbose=10)(delayed(trial_do_partial)(trial) for trial in range(NUM_TRIALS))\n",
  549. " 498 # train_pref = [item[0] for item in result_perf]\n",
  550. " 499 # val_pref = [item[1] for item in result_perf]\n",
  551. " 500 # test_pref = [item[2] for item in result_perf]\n",
  552. " 501 \n",
  553. " 502 # # ---- direct running, normally use a single CPU core. ----\n",
  554. " 503 # train_pref = []\n",
  555. " 504 # val_pref = []\n",
  556. " 505 # test_pref = []\n",
  557. " 506 # for i in tqdm(range(NUM_TRIALS), desc='cross validation', file=sys.stdout):\n",
  558. " 507 # o1, o2, o3 = trial_do(param_list_pre_revised, param_list, gram_matrices, y, model_type, i)\n",
  559. " 508 # train_pref.append(o1)\n",
  560. " 509 # val_pref.append(o2)\n",
  561. " 510 # test_pref.append(o3)\n",
  562. " 511 \n",
  563. " 512 print()\n",
  564. " 513 print('4. Getting final performance...')\n",
  565. " 514 str_fw += '\\nIII. Performance.\\n\\n'\n",
  566. " 515 # averages and confidences of performances on outer trials for each combination of parameters\n",
  567. " 516 average_train_scores = np.mean(train_pref, axis=0)\n",
  568. " 517 average_val_scores = np.mean(val_pref, axis=0)\n",
  569. " 518 average_perf_scores = np.mean(test_pref, axis=0)\n",
  570. " 519 # sample std is used here\n",
  571. " 520 std_train_scores = np.std(train_pref, axis=0, ddof=1)\n",
  572. " 521 std_val_scores = np.std(val_pref, axis=0, ddof=1)\n",
  573. " 522 std_perf_scores = np.std(test_pref, axis=0, ddof=1)\n",
  574. " 523 \n",
  575. " 524 if model_type == 'regression':\n",
  576. " 525 best_val_perf = np.amin(average_val_scores)\n",
  577. " 526 else:\n",
  578. " 527 best_val_perf = np.amax(average_val_scores)\n",
  579. " 528 best_params_index = np.where(average_val_scores == best_val_perf)\n",
  580. " 529 # find smallest val std with best val perf.\n",
  581. " 530 best_val_stds = [\n",
  582. " 531 std_val_scores[value][best_params_index[1][idx]]\n",
  583. " 532 for idx, value in enumerate(best_params_index[0])\n",
  584. " 533 ]\n",
  585. " 534 min_val_std = np.amin(best_val_stds)\n",
  586. " 535 best_params_index = np.where(std_val_scores == min_val_std)\n",
  587. " 536 best_params_out = [\n",
  588. " 537 param_list_pre_revised[i] for i in best_params_index[0]\n",
  589. " 538 ]\n",
  590. " 539 best_params_in = [param_list[i] for i in best_params_index[1]]\n",
  591. " 540 print('best_params_out: ', best_params_out)\n",
  592. " 541 print('best_params_in: ', best_params_in)\n",
  593. " 542 print()\n",
  594. " 543 print('best_val_perf: ', best_val_perf)\n",
  595. " 544 print('best_val_std: ', min_val_std)\n",
  596. " 545 str_fw += 'best settings of hyper-params to build gram matrix: %s\\n' % best_params_out\n",
  597. " 546 str_fw += 'best settings of other hyper-params: %s\\n\\n' % best_params_in\n",
  598. " 547 str_fw += 'best_val_perf: %s\\n' % best_val_perf\n",
  599. " 548 str_fw += 'best_val_std: %s\\n' % min_val_std\n",
  600. " 549 \n",
  601. " 550 final_performance = [\n",
  602. " 551 average_perf_scores[value][best_params_index[1][idx]]\n",
  603. " 552 for idx, value in enumerate(best_params_index[0])\n",
  604. " 553 ]\n",
  605. " 554 final_confidence = [\n",
  606. " 555 std_perf_scores[value][best_params_index[1][idx]]\n",
  607. " 556 for idx, value in enumerate(best_params_index[0])\n",
  608. " 557 ]\n",
  609. " 558 print('final_performance: ', final_performance)\n",
  610. " 559 print('final_confidence: ', final_confidence)\n",
  611. " 560 str_fw += 'final_performance: %s\\n' % final_performance\n",
  612. " 561 str_fw += 'final_confidence: %s\\n' % final_confidence\n",
  613. " 562 train_performance = [\n",
  614. " 563 average_train_scores[value][best_params_index[1][idx]]\n",
  615. " 564 for idx, value in enumerate(best_params_index[0])\n",
  616. " 565 ]\n",
  617. " 566 train_std = [\n",
  618. " 567 std_train_scores[value][best_params_index[1][idx]]\n",
  619. " 568 for idx, value in enumerate(best_params_index[0])\n",
  620. " 569 ]\n",
  621. " 570 print('train_performance: %s' % train_performance)\n",
  622. " 571 print('train_std: ', train_std)\n",
  623. " 572 str_fw += 'train_performance: %s\\n' % train_performance\n",
  624. " 573 str_fw += 'train_std: %s\\n\\n' % train_std\n",
  625. " 574 \n",
  626. " 575 print()\n",
  627. " 576 average_gram_matrix_time = np.mean(gram_matrix_time)\n",
  628. " 577 std_gram_matrix_time = np.std(gram_matrix_time, ddof=1)\n",
  629. " 578 best_gram_matrix_time = [\n",
  630. " 579 gram_matrix_time[i] for i in best_params_index[0]\n",
  631. " 580 ]\n",
  632. " 581 ave_bgmt = np.mean(best_gram_matrix_time)\n",
  633. " 582 std_bgmt = np.std(best_gram_matrix_time, ddof=1)\n",
  634. " 583 print(\n",
  635. " 584 'time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s'\n",
  636. " 585 .format(average_gram_matrix_time, std_gram_matrix_time))\n",
  637. " 586 print('time to calculate best gram matrix: {:.2f}±{:.2f}s'.format(\n",
  638. " 587 ave_bgmt, std_bgmt))\n",
  639. " 588 tt_poster = time.time() - tts # training time with hyper-param choices who did not participate in calculation of gram matrices\n",
  640. " 589 print(\n",
  641. " 590 'training time with hyper-param choices who did not participate in calculation of gram matrices: {:.2f}s'.format(\n",
  642. " 591 tt_poster))\n",
  643. " 592 print('total training time with all hyper-param choices: {:.2f}s'.format(\n",
  644. " 593 tt_poster + np.sum(gram_matrix_time)))\n",
  645. " 594 # str_fw += 'time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s\\n'.format(average_gram_matrix_time, std_gram_matrix_time)\n",
  646. " 595 # str_fw += 'time to calculate best gram matrix: {:.2f}±{:.2f}s\\n'.format(ave_bgmt, std_bgmt)\n",
  647. " 596 str_fw += 'training time with hyper-param choices who did not participate in calculation of gram matrices: {:.2f}s\\n\\n'.format(tt_poster)\n",
  648. " 597 \n",
  649. " 598 # print out as table.\n",
  650. " 599 from collections import OrderedDict\n",
  651. " 600 from tabulate import tabulate\n",
  652. " 601 table_dict = {}\n",
  653. " 602 if model_type == 'regression':\n",
  654. " 603 for param_in in param_list:\n",
  655. " 604 param_in['alpha'] = '{:.2e}'.format(param_in['alpha'])\n",
  656. " 605 else:\n",
  657. " 606 for param_in in param_list:\n",
  658. " 607 param_in['C'] = '{:.2e}'.format(param_in['C'])\n",
  659. " 608 table_dict['params'] = [{**param_out, **param_in}\n",
  660. " 609 for param_in in param_list for param_out in param_list_pre_revised]\n",
  661. " 610 # table_dict['gram_matrix_time'] = [\n",
  662. " 611 # '{:.2f}'.format(gram_matrix_time[index_out])\n",
  663. " 612 # for param_in in param_list\n",
  664. " 613 # for index_out, _ in enumerate(param_list_pre_revised)\n",
  665. " 614 # ]\n",
  666. " 615 table_dict['valid_perf'] = [\n",
  667. " 616 '{:.2f}±{:.2f}'.format(average_val_scores[index_out][index_in],\n",
  668. " 617 std_val_scores[index_out][index_in])\n",
  669. " 618 for index_in, _ in enumerate(param_list)\n",
  670. " 619 for index_out, _ in enumerate(param_list_pre_revised)\n",
  671. " 620 ]\n",
  672. " 621 table_dict['test_perf'] = [\n",
  673. " 622 '{:.2f}±{:.2f}'.format(average_perf_scores[index_out][index_in],\n",
  674. " 623 std_perf_scores[index_out][index_in])\n",
  675. " 624 for index_in, _ in enumerate(param_list)\n",
  676. " 625 for index_out, _ in enumerate(param_list_pre_revised)\n",
  677. " 626 ]\n",
  678. " 627 table_dict['train_perf'] = [\n",
  679. " 628 '{:.2f}±{:.2f}'.format(average_train_scores[index_out][index_in],\n",
  680. " 629 std_train_scores[index_out][index_in])\n",
  681. " 630 for index_in, _ in enumerate(param_list)\n",
  682. " 631 for index_out, _ in enumerate(param_list_pre_revised)\n",
  683. " 632 ]\n",
  684. " 633 keyorder = [\n",
  685. " 634 'params', 'train_perf', 'valid_perf', 'test_perf'\n",
  686. " 635 ]\n",
  687. " 636 print()\n",
  688. " 637 tb_print = tabulate(\n",
  689. " 638 OrderedDict(\n",
  690. " 639 sorted(table_dict.items(),\n",
  691. " 640 key=lambda i: keyorder.index(i[0]))),\n",
  692. " 641 headers='keys')\n",
  693. " 642 # print(tb_print)\n",
  694. " 643 str_fw += 'table of performance v.s. hyper-params:\\n\\n%s\\n\\n' % tb_print\n",
  695. " 644 \n",
  696. " 645 # open file to save all results for this dataset.\n",
  697. " 646 if not os.path.exists(results_dir):\n",
  698. " 647 os.makedirs(results_dir)\n",
  699. " 648 \n",
  700. " 649 # open file to save all results for this dataset.\n",
  701. " 650 145.6 MiB 0.0 MiB if not os.path.exists(results_dir + '/' + ds_name + '.output.txt'):\n",
  702. " 651 with open(results_dir + '/' + ds_name + '.output.txt', 'w') as f:\n",
  703. " 652 f.write(str_fw)\n",
  704. " 653 else:\n",
  705. " 654 145.6 MiB 0.0 MiB with open(results_dir + '/' + ds_name + '.output.txt', 'r+') as f:\n",
  706. " 655 145.6 MiB 0.0 MiB content = f.read()\n",
  707. " 656 145.6 MiB 0.0 MiB f.seek(0, 0)\n",
  708. " 657 145.6 MiB 0.0 MiB f.write(str_fw + '\\n\\n\\n' + content)\n",
  709. "\n",
  710. "\n",
  711. "\n"
  712. ]
  713. }
  714. ],
  715. "source": [
  716. "import functools\n",
  717. "import sys\n",
  718. "sys.path.insert(0, \"../\")\n",
  719. "sys.path.insert(0, \"../../\")\n",
  720. "from libs import *\n",
  721. "import multiprocessing\n",
  722. "\n",
  723. "from pygraph.kernels.spKernel import spkernel\n",
  724. "from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct\n",
  725. "#from pygraph.utils.model_selection_precomputed import trial_do\n",
  726. "\n",
  727. "dslist = [\n",
  728. " {'name': 'Acyclic', 'dataset': '../../datasets/acyclic/dataset_bps.ds',\n",
  729. " 'task': 'regression'}, # node symb\n",
  730. "# {'name': 'Alkane', 'dataset': '../../datasets/Alkane/dataset.ds', 'task': 'regression',\n",
  731. "# 'dataset_y': '../../datasets/Alkane/dataset_boiling_point_names.txt', }, \n",
  732. "# # contains single node graph, node symb\n",
  733. "# {'name': 'MAO', 'dataset': '../../datasets/MAO/dataset.ds', }, # node/edge symb\n",
  734. "# {'name': 'PAH', 'dataset': '../../datasets/PAH/dataset.ds', }, # unlabeled\n",
  735. "# {'name': 'MUTAG', 'dataset': '../../datasets/MUTAG/MUTAG.mat',\n",
  736. "# 'extra_params': {'am_sp_al_nl_el': [0, 0, 3, 1, 2]}}, # node/edge symb\n",
  737. "# {'name': 'Letter-med', 'dataset': '../../datasets/Letter-med/Letter-med_A.txt'},\n",
  738. "# # node nsymb\n",
  739. "# {'name': 'ENZYMES', 'dataset': '../../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'},\n",
  740. "# # node symb/nsymb\n",
  741. "# {'name': 'Mutagenicity', 'dataset': '../../datasets/Mutagenicity/Mutagenicity_A.txt'},\n",
  742. "# # node/edge symb\n",
  743. "# {'name': 'D&D', 'dataset': '../../datasets/D&D/DD.mat',\n",
  744. "# 'extra_params': {'am_sp_al_nl_el': [0, 1, 2, 1, -1]}}, # node symb\n",
  745. "\n",
  746. " # {'name': 'COIL-DEL', 'dataset': '../../datasets/COIL-DEL/COIL-DEL_A.txt'}, # edge symb, node nsymb\n",
  747. " # # # {'name': 'BZR', 'dataset': '../../datasets/BZR_txt/BZR_A_sparse.txt'}, # node symb/nsymb\n",
  748. " # # # {'name': 'COX2', 'dataset': '../../datasets/COX2_txt/COX2_A_sparse.txt'}, # node symb/nsymb\n",
  749. " # {'name': 'Fingerprint', 'dataset': '../../datasets/Fingerprint/Fingerprint_A.txt'},\n",
  750. " #\n",
  751. " # # {'name': 'DHFR', 'dataset': '../../datasets/DHFR_txt/DHFR_A_sparse.txt'}, # node symb/nsymb\n",
  752. " # # {'name': 'SYNTHETIC', 'dataset': '../../datasets/SYNTHETIC_txt/SYNTHETIC_A_sparse.txt'}, # node symb/nsymb\n",
  753. " # # {'name': 'MSRC9', 'dataset': '../../datasets/MSRC_9_txt/MSRC_9_A.txt'}, # node symb\n",
  754. " # # {'name': 'MSRC21', 'dataset': '../../datasets/MSRC_21_txt/MSRC_21_A.txt'}, # node symb\n",
  755. " # # {'name': 'FIRSTMM_DB', 'dataset': '../../datasets/FIRSTMM_DB/FIRSTMM_DB_A.txt'}, # node symb/nsymb ,edge nsymb\n",
  756. "\n",
  757. " # # {'name': 'PROTEINS', 'dataset': '../../datasets/PROTEINS_txt/PROTEINS_A_sparse.txt'}, # node symb/nsymb\n",
  758. " # # {'name': 'PROTEINS_full', 'dataset': '../../datasets/PROTEINS_full_txt/PROTEINS_full_A_sparse.txt'}, # node symb/nsymb\n",
  759. " # # {'name': 'AIDS', 'dataset': '../../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb\n",
  760. " # {'name': 'NCI1', 'dataset': '../../datasets/NCI1/NCI1.mat',\n",
  761. " # 'extra_params': {'am_sp_al_nl_el': [1, 1, 2, 0, -1]}}, # node symb\n",
  762. " # {'name': 'NCI109', 'dataset': '../../datasets/NCI109/NCI109.mat',\n",
  763. " # 'extra_params': {'am_sp_al_nl_el': [1, 1, 2, 0, -1]}}, # node symb\n",
  764. " # {'name': 'NCI-HIV', 'dataset': '../../datasets/NCI-HIV/AIDO99SD.sdf',\n",
  765. " # 'dataset_y': '../../datasets/NCI-HIV/aids_conc_may04.txt',}, # node/edge symb\n",
  766. "\n",
  767. " # # not working below\n",
  768. " # {'name': 'PTC_FM', 'dataset': '../../datasets/PTC/Train/FM.ds',},\n",
  769. " # {'name': 'PTC_FR', 'dataset': '../../datasets/PTC/Train/FR.ds',},\n",
  770. " # {'name': 'PTC_MM', 'dataset': '../../datasets/PTC/Train/MM.ds',},\n",
  771. " # {'name': 'PTC_MR', 'dataset': '../../datasets/PTC/Train/MR.ds',},\n",
  772. "]\n",
  773. "estimator = spkernel\n",
  774. "mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel)\n",
  775. "param_grid_precomputed = {'node_kernels': [\n",
  776. " {'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}]}\n",
  777. "param_grid = [{'C': np.logspace(-10, 10, num=41, base=10)},\n",
  778. " {'alpha': np.logspace(-10, 10, num=41, base=10)}]\n",
  779. "\n",
  780. "for ds in dslist:\n",
  781. " print()\n",
  782. " print(ds['name'])\n",
  783. " model_selection_for_precomputed_kernel(\n",
  784. " ds['dataset'],\n",
  785. " estimator,\n",
  786. " param_grid_precomputed,\n",
  787. " (param_grid[1] if ('task' in ds and ds['task']\n",
  788. " == 'regression') else param_grid[0]),\n",
  789. " (ds['task'] if 'task' in ds else 'classification'),\n",
  790. " NUM_TRIALS=30,\n",
  791. " datafile_y=(ds['dataset_y'] if 'dataset_y' in ds else None),\n",
  792. " extra_params=(ds['extra_params'] if 'extra_params' in ds else None),\n",
  793. " ds_name=ds['name'],\n",
  794. " n_jobs=multiprocessing.cpu_count(),\n",
  795. " read_gm_from_file=False)\n",
  796. " print()"
  797. ]
  798. }
  799. ],
  800. "metadata": {
  801. "kernelspec": {
  802. "display_name": "Python 3",
  803. "language": "python",
  804. "name": "python3"
  805. },
  806. "language_info": {
  807. "codemirror_mode": {
  808. "name": "ipython",
  809. "version": 3
  810. },
  811. "file_extension": ".py",
  812. "mimetype": "text/x-python",
  813. "name": "python",
  814. "nbconvert_exporter": "python",
  815. "pygments_lexer": "ipython3",
  816. "version": "3.6.7"
  817. }
  818. },
  819. "nbformat": 4,
  820. "nbformat_minor": 2
  821. }

A Python package for graph kernels, graph edit distances and graph pre-image problem.