{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "scrolled": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "Acyclic\n", "\n", "--- This is a regression problem ---\n", "\n", "\n", "1. Loading dataset from file...\n", "\n", "2. Calculating gram matrices. This could take a while...\n", "\n", " None edge weight specified. Set all weight to 1.\n", "\n", "getting sp graphs: 183it [00:00, 2198.32it/s]\n", "calculating kernels: 16836it [00:17, 983.99it/s] \n", "\n", " --- shortest path kernel matrix of size 183 built in 17.32457208633423 seconds ---\n", "\n", "the gram matrix with parameters {'node_kernels': {'symb': , 'nsymb': , 'mix': functools.partial(, , )}, 'n_jobs': 8} is: \n", "\n", "1 gram matrices are calculated, 0 of which are ignored.\n", "\n", "3. Fitting and predicting using nested cross validation. This could really take a while...\n", "cross validation: 30it [00:12, 2.48it/s]\n", "\n", "4. Getting final performance...\n", "best_params_out: [{'node_kernels': {'symb': , 'nsymb': , 'mix': functools.partial(, , )}, 'n_jobs': 8}]\n", "best_params_in: [{'alpha': 3.1622776601683795e-10}]\n", "\n", "best_val_perf: 9.64631220504699\n", "best_val_std: 0.6555235266552757\n", "final_performance: [9.306976995404987]\n", "final_confidence: [2.317244919360123]\n", "train_performance: [6.190191405968441]\n", "train_std: [0.21512408952827894]\n", "\n", "time to calculate gram matrix with different hyper-params: 17.32±nans\n", "time to calculate best gram matrix: 17.32±nans\n", "total training time with all hyper-param choices: 33.16s\n", "\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/usr/local/lib/python3.6/dist-packages/numpy/core/_methods.py:140: RuntimeWarning: Degrees of freedom <= 0 for slice\n", " keepdims=keepdims)\n", "/usr/local/lib/python3.6/dist-packages/numpy/core/_methods.py:132: RuntimeWarning: invalid value encountered in double_scalars\n", " ret = ret.dtype.type(ret / rcount)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Filename: ../pygraph/utils/model_selection_precomputed.py\n", "\n", "Line # Mem usage Increment Line Contents\n", "================================================\n", " 24 115.1 MiB 115.1 MiB @profile\n", " 25 def model_selection_for_precomputed_kernel(datafile,\n", " 26 estimator,\n", " 27 param_grid_precomputed,\n", " 28 param_grid,\n", " 29 model_type,\n", " 30 NUM_TRIALS=30,\n", " 31 datafile_y=None,\n", " 32 extra_params=None,\n", " 33 ds_name='ds-unknown',\n", " 34 n_jobs=1,\n", " 35 read_gm_from_file=False):\n", " 36 \"\"\"Perform model selection, fitting and testing for precomputed kernels using nested cv. Print out neccessary data during the process then finally the results.\n", " 37 \n", " 38 Parameters\n", " 39 ----------\n", " 40 datafile : string\n", " 41 Path of dataset file.\n", " 42 estimator : function\n", " 43 kernel function used to estimate. This function needs to return a gram matrix.\n", " 44 param_grid_precomputed : dictionary\n", " 45 Dictionary with names (string) of parameters used to calculate gram matrices as keys and lists of parameter settings to try as values. This enables searching over any sequence of parameter settings. Params with length 1 will be omitted.\n", " 46 param_grid : dictionary\n", " 47 Dictionary with names (string) of parameters used as penelties as keys and lists of parameter settings to try as values. This enables searching over any sequence of parameter settings. Params with length 1 will be omitted.\n", " 48 model_type : string\n", " 49 Typr of the problem, can be regression or classification.\n", " 50 NUM_TRIALS : integer\n", " 51 Number of random trials of outer cv loop. The default is 30.\n", " 52 datafile_y : string\n", " 53 Path of file storing y data. This parameter is optional depending on the given dataset file.\n", " 54 read_gm_from_file : boolean\n", " 55 Whether gram matrices are loaded from file.\n", " 56 \n", " 57 Examples\n", " 58 --------\n", " 59 >>> import numpy as np\n", " 60 >>> import sys\n", " 61 >>> sys.path.insert(0, \"../\")\n", " 62 >>> from pygraph.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", " 63 >>> from pygraph.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel\n", " 64 >>>\n", " 65 >>> datafile = '../../../../datasets/acyclic/Acyclic/dataset_bps.ds'\n", " 66 >>> estimator = weisfeilerlehmankernel\n", " 67 >>> param_grid_precomputed = {'height': [0,1,2,3,4,5,6,7,8,9,10], 'base_kernel': ['subtree']}\n", " 68 >>> param_grid = {\"alpha\": np.logspace(-2, 2, num = 10, base = 10)}\n", " 69 >>>\n", " 70 >>> model_selection_for_precomputed_kernel(datafile, estimator, param_grid_precomputed, param_grid, 'regression')\n", " 71 \"\"\"\n", " 72 115.1 MiB 0.0 MiB tqdm.monitor_interval = 0\n", " 73 \n", " 74 115.1 MiB 0.0 MiB results_dir = '../notebooks/results/' + estimator.__name__\n", " 75 115.1 MiB 0.0 MiB if not os.path.exists(results_dir):\n", " 76 os.makedirs(results_dir)\n", " 77 # a string to save all the results.\n", " 78 115.1 MiB 0.0 MiB str_fw = '###################### log time: ' + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + '. ######################\\n\\n'\n", " 79 115.1 MiB 0.0 MiB str_fw += '# This file contains results of ' + estimator.__name__ + ' on dataset ' + ds_name + ',\\n# including gram matrices, serial numbers for gram matrix figures and performance.\\n\\n'\n", " 80 \n", " 81 # setup the model type\n", " 82 115.1 MiB 0.0 MiB model_type = model_type.lower()\n", " 83 115.1 MiB 0.0 MiB if model_type != 'regression' and model_type != 'classification':\n", " 84 raise Exception(\n", " 85 'The model type is incorrect! Please choose from regression or classification.'\n", " 86 )\n", " 87 115.1 MiB 0.0 MiB print()\n", " 88 115.1 MiB 0.0 MiB print('--- This is a %s problem ---' % model_type)\n", " 89 115.1 MiB 0.0 MiB str_fw += 'This is a %s problem.\\n' % model_type\n", " 90 \n", " 91 # calculate gram matrices rather than read them from file.\n", " 92 115.1 MiB 0.0 MiB if read_gm_from_file == False:\n", " 93 # Load the dataset\n", " 94 115.1 MiB 0.0 MiB print()\n", " 95 115.1 MiB 0.0 MiB print('\\n1. Loading dataset from file...')\n", " 96 115.1 MiB 0.0 MiB if isinstance(datafile, str):\n", " 97 115.1 MiB 0.0 MiB dataset, y_all = loadDataset(\n", " 98 116.3 MiB 1.2 MiB datafile, filename_y=datafile_y, extra_params=extra_params)\n", " 99 else: # load data directly from variable.\n", " 100 dataset = datafile\n", " 101 y_all = datafile_y \n", " 102 \n", " 103 # import matplotlib.pyplot as plt\n", " 104 # import networkx as nx\n", " 105 # nx.draw_networkx(dataset[30])\n", " 106 # plt.show()\n", " 107 \n", " 108 # Grid of parameters with a discrete number of values for each.\n", " 109 116.3 MiB 0.0 MiB param_list_precomputed = list(ParameterGrid(param_grid_precomputed))\n", " 110 116.3 MiB 0.0 MiB param_list = list(ParameterGrid(param_grid))\n", " 111 \n", " 112 116.3 MiB 0.0 MiB gram_matrices = [\n", " 113 ] # a list to store gram matrices for all param_grid_precomputed\n", " 114 116.3 MiB 0.0 MiB gram_matrix_time = [\n", " 115 ] # a list to store time to calculate gram matrices\n", " 116 116.3 MiB 0.0 MiB param_list_pre_revised = [\n", " 117 ] # list to store param grids precomputed ignoring the useless ones\n", " 118 \n", " 119 # calculate all gram matrices\n", " 120 116.3 MiB 0.0 MiB print()\n", " 121 116.3 MiB 0.0 MiB print('2. Calculating gram matrices. This could take a while...')\n", " 122 116.3 MiB 0.0 MiB str_fw += '\\nII. Gram matrices.\\n\\n'\n", " 123 116.3 MiB 0.0 MiB tts = time.time() # start training time\n", " 124 116.3 MiB 0.0 MiB nb_gm_ignore = 0 # the number of gram matrices those should not be considered, as they may contain elements that are not numbers (NaN)\n", " 125 144.8 MiB 0.0 MiB for idx, params_out in enumerate(param_list_precomputed):\n", " 126 116.3 MiB 0.0 MiB y = y_all[:]\n", " 127 116.3 MiB 0.0 MiB params_out['n_jobs'] = n_jobs\n", " 128 # print(dataset)\n", " 129 # import networkx as nx\n", " 130 # nx.draw_networkx(dataset[1])\n", " 131 # plt.show()\n", " 132 119.1 MiB 2.8 MiB rtn_data = estimator(dataset[:], **params_out)\n", " 133 119.1 MiB 0.0 MiB Kmatrix = rtn_data[0]\n", " 134 119.1 MiB 0.0 MiB current_run_time = rtn_data[1]\n", " 135 # for some kernels, some graphs in datasets may not meet the \n", " 136 # kernels' requirements for graph structure. These graphs are trimmed. \n", " 137 119.1 MiB 0.0 MiB if len(rtn_data) == 3:\n", " 138 119.1 MiB 0.0 MiB idx_trim = rtn_data[2] # the index of trimmed graph list\n", " 139 119.1 MiB 0.0 MiB y = [y[idxt] for idxt in idx_trim] # trim y accordingly\n", " 140 # Kmatrix = np.random.rand(2250, 2250)\n", " 141 # current_run_time = 0.1\n", " 142 \n", " 143 119.1 MiB 0.0 MiB Kmatrix_diag = Kmatrix.diagonal().copy()\n", " 144 # remove graphs whose kernels with themselves are zeros\n", " 145 119.1 MiB 0.0 MiB nb_g_ignore = 0\n", " 146 119.1 MiB 0.0 MiB for idxk, diag in enumerate(Kmatrix_diag):\n", " 147 119.1 MiB 0.0 MiB if diag == 0:\n", " 148 Kmatrix = np.delete(Kmatrix, (idxk - nb_g_ignore), axis=0)\n", " 149 Kmatrix = np.delete(Kmatrix, (idxk - nb_g_ignore), axis=1)\n", " 150 nb_g_ignore += 1\n", " 151 # normalization\n", " 152 119.1 MiB 0.0 MiB for i in range(len(Kmatrix)):\n", " 153 119.1 MiB 0.0 MiB for j in range(i, len(Kmatrix)):\n", " 154 119.1 MiB 0.0 MiB Kmatrix[i][j] /= np.sqrt(Kmatrix_diag[i] * Kmatrix_diag[j])\n", " 155 119.1 MiB 0.0 MiB Kmatrix[j][i] = Kmatrix[i][j]\n", " 156 \n", " 157 119.1 MiB 0.0 MiB print()\n", " 158 119.1 MiB 0.0 MiB if params_out == {}:\n", " 159 print('the gram matrix is: ')\n", " 160 str_fw += 'the gram matrix is:\\n\\n'\n", " 161 else:\n", " 162 119.1 MiB 0.0 MiB print('the gram matrix with parameters', params_out, 'is: ')\n", " 163 119.1 MiB 0.0 MiB str_fw += 'the gram matrix with parameters %s is:\\n\\n' % params_out\n", " 164 119.1 MiB 0.0 MiB if len(Kmatrix) < 2:\n", " 165 nb_gm_ignore += 1\n", " 166 print('ignored, as at most only one of all its diagonal value is non-zero.')\n", " 167 str_fw += 'ignored, as at most only one of all its diagonal value is non-zero.\\n\\n'\n", " 168 else: \n", " 169 119.1 MiB 0.0 MiB if np.isnan(Kmatrix).any(\n", " 170 ): # if the matrix contains elements that are not numbers\n", " 171 nb_gm_ignore += 1\n", " 172 print('ignored, as it contains elements that are not numbers.')\n", " 173 str_fw += 'ignored, as it contains elements that are not numbers.\\n\\n'\n", " 174 else:\n", " 175 # print(Kmatrix)\n", " 176 119.1 MiB 0.0 MiB str_fw += np.array2string(\n", " 177 119.1 MiB 0.0 MiB Kmatrix,\n", " 178 119.1 MiB 0.0 MiB separator=',') + '\\n\\n'\n", " 179 # separator=',',\n", " 180 # threshold=np.inf,\n", " 181 # floatmode='unique') + '\\n\\n'\n", " 182 \n", " 183 119.1 MiB 0.0 MiB fig_file_name = results_dir + '/GM[ds]' + ds_name\n", " 184 119.1 MiB 0.0 MiB if params_out != {}:\n", " 185 119.1 MiB 0.0 MiB fig_file_name += '[params]' + str(idx)\n", " 186 119.8 MiB 0.7 MiB plt.imshow(Kmatrix)\n", " 187 119.9 MiB 0.1 MiB plt.colorbar()\n", " 188 144.8 MiB 24.9 MiB plt.savefig(fig_file_name + '.eps', format='eps', dpi=300)\n", " 189 # plt.show()\n", " 190 144.8 MiB 0.0 MiB plt.clf()\n", " 191 144.8 MiB 0.0 MiB gram_matrices.append(Kmatrix)\n", " 192 144.8 MiB 0.0 MiB gram_matrix_time.append(current_run_time)\n", " 193 144.8 MiB 0.0 MiB param_list_pre_revised.append(params_out)\n", " 194 144.8 MiB 0.0 MiB if nb_g_ignore > 0:\n", " 195 print(', where %d graphs are ignored as their graph kernels with themselves are zeros.' % nb_g_ignore)\n", " 196 str_fw += ', where %d graphs are ignored as their graph kernels with themselves are zeros.' % nb_g_ignore\n", " 197 144.8 MiB 0.0 MiB print()\n", " 198 144.8 MiB 0.0 MiB print(\n", " 199 144.8 MiB 0.0 MiB '{} gram matrices are calculated, {} of which are ignored.'.format(\n", " 200 144.8 MiB 0.0 MiB len(param_list_precomputed), nb_gm_ignore))\n", " 201 144.8 MiB 0.0 MiB str_fw += '{} gram matrices are calculated, {} of which are ignored.\\n\\n'.format(len(param_list_precomputed), nb_gm_ignore)\n", " 202 144.8 MiB 0.0 MiB str_fw += 'serial numbers of gram matrix figures and their corresponding parameters settings:\\n\\n'\n", " 203 144.8 MiB 0.0 MiB str_fw += ''.join([\n", " 204 144.8 MiB 0.0 MiB '{}: {}\\n'.format(idx, params_out)\n", " 205 144.8 MiB 0.0 MiB for idx, params_out in enumerate(param_list_precomputed)\n", " 206 ])\n", " 207 \n", " 208 144.8 MiB 0.0 MiB print()\n", " 209 144.8 MiB 0.0 MiB if len(gram_matrices) == 0:\n", " 210 print('all gram matrices are ignored, no results obtained.')\n", " 211 str_fw += '\\nall gram matrices are ignored, no results obtained.\\n\\n'\n", " 212 else:\n", " 213 # save gram matrices to file.\n", " 214 144.8 MiB 0.0 MiB np.savez(results_dir + '/' + ds_name + '.gm', \n", " 215 144.8 MiB 0.0 MiB gms=gram_matrices, params=param_list_pre_revised, y=y, \n", " 216 144.9 MiB 0.1 MiB gmtime=gram_matrix_time)\n", " 217 \n", " 218 144.9 MiB 0.0 MiB print(\n", " 219 144.9 MiB 0.0 MiB '3. Fitting and predicting using nested cross validation. This could really take a while...'\n", " 220 )\n", " 221 \n", " 222 # ---- use pool.imap_unordered to parallel and track progress. ----\n", " 223 # train_pref = []\n", " 224 # val_pref = []\n", " 225 # test_pref = []\n", " 226 # def func_assign(result, var_to_assign):\n", " 227 # for idx, itm in enumerate(var_to_assign):\n", " 228 # itm.append(result[idx]) \n", " 229 # trial_do_partial = partial(trial_do, param_list_pre_revised, param_list, y, model_type)\n", " 230 # \n", " 231 # parallel_me(trial_do_partial, range(NUM_TRIALS), func_assign, \n", " 232 # [train_pref, val_pref, test_pref], glbv=gram_matrices,\n", " 233 # method='imap_unordered', n_jobs=n_jobs, chunksize=1,\n", " 234 # itr_desc='cross validation')\n", " 235 \n", " 236 144.9 MiB 0.0 MiB def init_worker(gms_toshare):\n", " 237 global G_gms\n", " 238 G_gms = gms_toshare\n", " 239 \n", " 240 # gram_matrices = np.array(gram_matrices)\n", " 241 # gms_shape = gram_matrices.shape\n", " 242 # gms_array = Array('d', np.reshape(gram_matrices.copy(), -1, order='C'))\n", " 243 # pool = Pool(processes=n_jobs, initializer=init_worker, initargs=(gms_array, gms_shape))\n", " 244 144.9 MiB 0.1 MiB pool = Pool(processes=n_jobs, initializer=init_worker, initargs=(gram_matrices,))\n", " 245 144.9 MiB 0.0 MiB trial_do_partial = partial(trial_do, param_list_pre_revised, param_list, y, model_type)\n", " 246 144.9 MiB 0.0 MiB train_pref = []\n", " 247 144.9 MiB 0.0 MiB val_pref = []\n", " 248 144.9 MiB 0.0 MiB test_pref = []\n", " 249 # if NUM_TRIALS < 1000 * n_jobs:\n", " 250 # chunksize = int(NUM_TRIALS / n_jobs) + 1\n", " 251 # else:\n", " 252 # chunksize = 1000\n", " 253 144.9 MiB 0.0 MiB chunksize = 1\n", " 254 145.1 MiB 0.1 MiB for o1, o2, o3 in tqdm(pool.imap_unordered(trial_do_partial, range(NUM_TRIALS), chunksize), desc='cross validation', file=sys.stdout):\n", " 255 145.1 MiB 0.0 MiB train_pref.append(o1)\n", " 256 145.1 MiB 0.0 MiB val_pref.append(o2)\n", " 257 145.1 MiB 0.0 MiB test_pref.append(o3)\n", " 258 145.1 MiB 0.0 MiB pool.close()\n", " 259 145.1 MiB 0.0 MiB pool.join()\n", " 260 \n", " 261 # # ---- use pool.map to parallel. ----\n", " 262 # pool = Pool(n_jobs)\n", " 263 # trial_do_partial = partial(trial_do, param_list_pre_revised, param_list, gram_matrices, y[0:250], model_type)\n", " 264 # result_perf = pool.map(trial_do_partial, range(NUM_TRIALS))\n", " 265 # train_pref = [item[0] for item in result_perf]\n", " 266 # val_pref = [item[1] for item in result_perf]\n", " 267 # test_pref = [item[2] for item in result_perf]\n", " 268 \n", " 269 # # ---- direct running, normally use a single CPU core. ----\n", " 270 # train_pref = []\n", " 271 # val_pref = []\n", " 272 # test_pref = []\n", " 273 # for i in tqdm(range(NUM_TRIALS), desc='cross validation', file=sys.stdout):\n", " 274 # o1, o2, o3 = trial_do(param_list_pre_revised, param_list, gram_matrices, y, model_type, i)\n", " 275 # train_pref.append(o1)\n", " 276 # val_pref.append(o2)\n", " 277 # test_pref.append(o3)\n", " 278 # print()\n", " 279 \n", " 280 145.1 MiB 0.0 MiB print()\n", " 281 145.1 MiB 0.0 MiB print('4. Getting final performance...')\n", " 282 145.1 MiB 0.0 MiB str_fw += '\\nIII. Performance.\\n\\n'\n", " 283 # averages and confidences of performances on outer trials for each combination of parameters\n", " 284 145.1 MiB 0.0 MiB average_train_scores = np.mean(train_pref, axis=0)\n", " 285 # print('val_pref: ', val_pref[0][0])\n", " 286 145.1 MiB 0.0 MiB average_val_scores = np.mean(val_pref, axis=0)\n", " 287 # print('test_pref: ', test_pref[0][0])\n", " 288 145.1 MiB 0.0 MiB average_perf_scores = np.mean(test_pref, axis=0)\n", " 289 # sample std is used here\n", " 290 145.1 MiB 0.0 MiB std_train_scores = np.std(train_pref, axis=0, ddof=1)\n", " 291 145.1 MiB 0.0 MiB std_val_scores = np.std(val_pref, axis=0, ddof=1)\n", " 292 145.1 MiB 0.0 MiB std_perf_scores = np.std(test_pref, axis=0, ddof=1)\n", " 293 \n", " 294 145.1 MiB 0.0 MiB if model_type == 'regression':\n", " 295 145.1 MiB 0.0 MiB best_val_perf = np.amin(average_val_scores)\n", " 296 else:\n", " 297 best_val_perf = np.amax(average_val_scores)\n", " 298 # print('average_val_scores: ', average_val_scores)\n", " 299 # print('best_val_perf: ', best_val_perf)\n", " 300 # print()\n", " 301 145.1 MiB 0.0 MiB best_params_index = np.where(average_val_scores == best_val_perf)\n", " 302 # find smallest val std with best val perf.\n", " 303 best_val_stds = [\n", " 304 145.1 MiB 0.0 MiB std_val_scores[value][best_params_index[1][idx]]\n", " 305 145.1 MiB 0.0 MiB for idx, value in enumerate(best_params_index[0])\n", " 306 ]\n", " 307 145.1 MiB 0.0 MiB min_val_std = np.amin(best_val_stds)\n", " 308 145.1 MiB 0.0 MiB best_params_index = np.where(std_val_scores == min_val_std)\n", " 309 best_params_out = [\n", " 310 145.1 MiB 0.0 MiB param_list_pre_revised[i] for i in best_params_index[0]\n", " 311 ]\n", " 312 145.1 MiB 0.0 MiB best_params_in = [param_list[i] for i in best_params_index[1]]\n", " 313 145.1 MiB 0.0 MiB print('best_params_out: ', best_params_out)\n", " 314 145.1 MiB 0.0 MiB print('best_params_in: ', best_params_in)\n", " 315 145.1 MiB 0.0 MiB print()\n", " 316 145.1 MiB 0.0 MiB print('best_val_perf: ', best_val_perf)\n", " 317 145.1 MiB 0.0 MiB print('best_val_std: ', min_val_std)\n", " 318 145.1 MiB 0.0 MiB str_fw += 'best settings of hyper-params to build gram matrix: %s\\n' % best_params_out\n", " 319 145.1 MiB 0.0 MiB str_fw += 'best settings of other hyper-params: %s\\n\\n' % best_params_in\n", " 320 145.1 MiB 0.0 MiB str_fw += 'best_val_perf: %s\\n' % best_val_perf\n", " 321 145.1 MiB 0.0 MiB str_fw += 'best_val_std: %s\\n' % min_val_std\n", " 322 \n", " 323 # print(best_params_index)\n", " 324 # print(best_params_index[0])\n", " 325 # print(average_perf_scores)\n", " 326 final_performance = [\n", " 327 145.1 MiB 0.0 MiB average_perf_scores[value][best_params_index[1][idx]]\n", " 328 145.1 MiB 0.0 MiB for idx, value in enumerate(best_params_index[0])\n", " 329 ]\n", " 330 final_confidence = [\n", " 331 145.1 MiB 0.0 MiB std_perf_scores[value][best_params_index[1][idx]]\n", " 332 145.1 MiB 0.0 MiB for idx, value in enumerate(best_params_index[0])\n", " 333 ]\n", " 334 145.1 MiB 0.0 MiB print('final_performance: ', final_performance)\n", " 335 145.1 MiB 0.0 MiB print('final_confidence: ', final_confidence)\n", " 336 145.1 MiB 0.0 MiB str_fw += 'final_performance: %s\\n' % final_performance\n", " 337 145.1 MiB 0.0 MiB str_fw += 'final_confidence: %s\\n' % final_confidence\n", " 338 train_performance = [\n", " 339 145.1 MiB 0.0 MiB average_train_scores[value][best_params_index[1][idx]]\n", " 340 145.1 MiB 0.0 MiB for idx, value in enumerate(best_params_index[0])\n", " 341 ]\n", " 342 train_std = [\n", " 343 145.1 MiB 0.0 MiB std_train_scores[value][best_params_index[1][idx]]\n", " 344 145.1 MiB 0.0 MiB for idx, value in enumerate(best_params_index[0])\n", " 345 ]\n", " 346 145.1 MiB 0.0 MiB print('train_performance: %s' % train_performance)\n", " 347 145.1 MiB 0.0 MiB print('train_std: ', train_std)\n", " 348 145.1 MiB 0.0 MiB str_fw += 'train_performance: %s\\n' % train_performance\n", " 349 145.1 MiB 0.0 MiB str_fw += 'train_std: %s\\n\\n' % train_std\n", " 350 \n", " 351 145.1 MiB 0.0 MiB print()\n", " 352 145.1 MiB 0.0 MiB tt_total = time.time() - tts # training time for all hyper-parameters\n", " 353 145.1 MiB 0.0 MiB average_gram_matrix_time = np.mean(gram_matrix_time)\n", " 354 145.1 MiB 0.0 MiB std_gram_matrix_time = np.std(gram_matrix_time, ddof=1)\n", " 355 best_gram_matrix_time = [\n", " 356 145.1 MiB 0.0 MiB gram_matrix_time[i] for i in best_params_index[0]\n", " 357 ]\n", " 358 145.1 MiB 0.0 MiB ave_bgmt = np.mean(best_gram_matrix_time)\n", " 359 145.1 MiB 0.0 MiB std_bgmt = np.std(best_gram_matrix_time, ddof=1)\n", " 360 145.1 MiB 0.0 MiB print(\n", " 361 145.1 MiB 0.0 MiB 'time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s'\n", " 362 145.1 MiB 0.0 MiB .format(average_gram_matrix_time, std_gram_matrix_time))\n", " 363 145.1 MiB 0.0 MiB print('time to calculate best gram matrix: {:.2f}±{:.2f}s'.format(\n", " 364 145.1 MiB 0.0 MiB ave_bgmt, std_bgmt))\n", " 365 145.1 MiB 0.0 MiB print(\n", " 366 145.1 MiB 0.0 MiB 'total training time with all hyper-param choices: {:.2f}s'.format(\n", " 367 145.1 MiB 0.0 MiB tt_total))\n", " 368 145.1 MiB 0.0 MiB str_fw += 'time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s\\n'.format(average_gram_matrix_time, std_gram_matrix_time)\n", " 369 145.1 MiB 0.0 MiB str_fw += 'time to calculate best gram matrix: {:.2f}±{:.2f}s\\n'.format(ave_bgmt, std_bgmt)\n", " 370 145.1 MiB 0.0 MiB str_fw += 'total training time with all hyper-param choices: {:.2f}s\\n\\n'.format(tt_total)\n", " 371 \n", " 372 # # save results to file\n", " 373 # np.savetxt(results_name_pre + 'average_train_scores.dt',\n", " 374 # average_train_scores)\n", " 375 # np.savetxt(results_name_pre + 'average_val_scores', average_val_scores)\n", " 376 # np.savetxt(results_name_pre + 'average_perf_scores.dt',\n", " 377 # average_perf_scores)\n", " 378 # np.savetxt(results_name_pre + 'std_train_scores.dt', std_train_scores)\n", " 379 # np.savetxt(results_name_pre + 'std_val_scores.dt', std_val_scores)\n", " 380 # np.savetxt(results_name_pre + 'std_perf_scores.dt', std_perf_scores)\n", " 381 \n", " 382 # np.save(results_name_pre + 'best_params_index', best_params_index)\n", " 383 # np.save(results_name_pre + 'best_params_pre.dt', best_params_out)\n", " 384 # np.save(results_name_pre + 'best_params_in.dt', best_params_in)\n", " 385 # np.save(results_name_pre + 'best_val_perf.dt', best_val_perf)\n", " 386 # np.save(results_name_pre + 'best_val_std.dt', best_val_std)\n", " 387 # np.save(results_name_pre + 'final_performance.dt', final_performance)\n", " 388 # np.save(results_name_pre + 'final_confidence.dt', final_confidence)\n", " 389 # np.save(results_name_pre + 'train_performance.dt', train_performance)\n", " 390 # np.save(results_name_pre + 'train_std.dt', train_std)\n", " 391 \n", " 392 # np.save(results_name_pre + 'gram_matrix_time.dt', gram_matrix_time)\n", " 393 # np.save(results_name_pre + 'average_gram_matrix_time.dt',\n", " 394 # average_gram_matrix_time)\n", " 395 # np.save(results_name_pre + 'std_gram_matrix_time.dt',\n", " 396 # std_gram_matrix_time)\n", " 397 # np.save(results_name_pre + 'best_gram_matrix_time.dt',\n", " 398 # best_gram_matrix_time)\n", " 399 \n", " 400 # print out as table.\n", " 401 145.1 MiB 0.0 MiB from collections import OrderedDict\n", " 402 145.1 MiB 0.0 MiB from tabulate import tabulate\n", " 403 145.1 MiB 0.0 MiB table_dict = {}\n", " 404 145.1 MiB 0.0 MiB if model_type == 'regression':\n", " 405 145.1 MiB 0.0 MiB for param_in in param_list:\n", " 406 145.1 MiB 0.0 MiB param_in['alpha'] = '{:.2e}'.format(param_in['alpha'])\n", " 407 else:\n", " 408 for param_in in param_list:\n", " 409 param_in['C'] = '{:.2e}'.format(param_in['C'])\n", " 410 145.1 MiB 0.0 MiB table_dict['params'] = [{**param_out, **param_in}\n", " 411 145.1 MiB 0.0 MiB for param_in in param_list for param_out in param_list_pre_revised]\n", " 412 table_dict['gram_matrix_time'] = [\n", " 413 145.1 MiB 0.0 MiB '{:.2f}'.format(gram_matrix_time[index_out])\n", " 414 145.1 MiB 0.0 MiB for param_in in param_list\n", " 415 145.1 MiB 0.0 MiB for index_out, _ in enumerate(param_list_pre_revised)\n", " 416 ]\n", " 417 table_dict['valid_perf'] = [\n", " 418 145.1 MiB 0.0 MiB '{:.2f}±{:.2f}'.format(average_val_scores[index_out][index_in],\n", " 419 std_val_scores[index_out][index_in])\n", " 420 145.1 MiB 0.0 MiB for index_in, _ in enumerate(param_list)\n", " 421 145.1 MiB 0.0 MiB for index_out, _ in enumerate(param_list_pre_revised)\n", " 422 ]\n", " 423 table_dict['test_perf'] = [\n", " 424 145.1 MiB 0.0 MiB '{:.2f}±{:.2f}'.format(average_perf_scores[index_out][index_in],\n", " 425 std_perf_scores[index_out][index_in])\n", " 426 145.1 MiB 0.0 MiB for index_in, _ in enumerate(param_list)\n", " 427 145.1 MiB 0.0 MiB for index_out, _ in enumerate(param_list_pre_revised)\n", " 428 ]\n", " 429 table_dict['train_perf'] = [\n", " 430 145.1 MiB 0.0 MiB '{:.2f}±{:.2f}'.format(average_train_scores[index_out][index_in],\n", " 431 std_train_scores[index_out][index_in])\n", " 432 145.1 MiB 0.0 MiB for index_in, _ in enumerate(param_list)\n", " 433 145.1 MiB 0.0 MiB for index_out, _ in enumerate(param_list_pre_revised)\n", " 434 ]\n", " 435 keyorder = [\n", " 436 145.1 MiB 0.0 MiB 'params', 'train_perf', 'valid_perf', 'test_perf',\n", " 437 145.1 MiB 0.0 MiB 'gram_matrix_time'\n", " 438 ]\n", " 439 145.1 MiB 0.0 MiB print()\n", " 440 145.1 MiB 0.0 MiB tb_print = tabulate(\n", " 441 145.1 MiB 0.0 MiB OrderedDict(\n", " 442 145.1 MiB 0.0 MiB sorted(table_dict.items(),\n", " 443 145.1 MiB 0.0 MiB key=lambda i: keyorder.index(i[0]))),\n", " 444 145.1 MiB 0.0 MiB headers='keys')\n", " 445 # print(tb_print)\n", " 446 145.1 MiB 0.0 MiB str_fw += 'table of performance v.s. hyper-params:\\n\\n%s\\n\\n' % tb_print\n", " 447 \n", " 448 # read gram matrices from file.\n", " 449 else: \n", " 450 # Grid of parameters with a discrete number of values for each.\n", " 451 # param_list_precomputed = list(ParameterGrid(param_grid_precomputed))\n", " 452 param_list = list(ParameterGrid(param_grid))\n", " 453 \n", " 454 # read gram matrices from file.\n", " 455 print()\n", " 456 print('2. Reading gram matrices from file...')\n", " 457 str_fw += '\\nII. Gram matrices.\\n\\nGram matrices are read from file, see last log for detail.\\n'\n", " 458 gmfile = np.load(results_dir + '/' + ds_name + '.gm.npz')\n", " 459 gram_matrices = gmfile['gms'] # a list to store gram matrices for all param_grid_precomputed\n", " 460 gram_matrix_time = gmfile['gmtime'] # time used to compute the gram matrices\n", " 461 param_list_pre_revised = gmfile['params'] # list to store param grids precomputed ignoring the useless ones\n", " 462 y = gmfile['y'].tolist()\n", " 463 \n", " 464 tts = time.time() # start training time\n", " 465 # nb_gm_ignore = 0 # the number of gram matrices those should not be considered, as they may contain elements that are not numbers (NaN) \n", " 466 print(\n", " 467 '3. Fitting and predicting using nested cross validation. This could really take a while...'\n", " 468 )\n", " 469 \n", " 470 # ---- use pool.imap_unordered to parallel and track progress. ----\n", " 471 def init_worker(gms_toshare):\n", " 472 global G_gms\n", " 473 G_gms = gms_toshare\n", " 474 \n", " 475 pool = Pool(processes=n_jobs, initializer=init_worker, initargs=(gram_matrices,))\n", " 476 trial_do_partial = partial(trial_do, param_list_pre_revised, param_list, y, model_type)\n", " 477 train_pref = []\n", " 478 val_pref = []\n", " 479 test_pref = []\n", " 480 chunksize = 1\n", " 481 for o1, o2, o3 in tqdm(pool.imap_unordered(trial_do_partial, range(NUM_TRIALS), chunksize), desc='cross validation', file=sys.stdout):\n", " 482 train_pref.append(o1)\n", " 483 val_pref.append(o2)\n", " 484 test_pref.append(o3)\n", " 485 pool.close()\n", " 486 pool.join()\n", " 487 \n", " 488 # # ---- use pool.map to parallel. ----\n", " 489 # result_perf = pool.map(trial_do_partial, range(NUM_TRIALS))\n", " 490 # train_pref = [item[0] for item in result_perf]\n", " 491 # val_pref = [item[1] for item in result_perf]\n", " 492 # test_pref = [item[2] for item in result_perf]\n", " 493 \n", " 494 # # ---- use joblib.Parallel to parallel and track progress. ----\n", " 495 # trial_do_partial = partial(trial_do, param_list_pre_revised, param_list, gram_matrices, y, model_type)\n", " 496 # result_perf = Parallel(n_jobs=n_jobs, verbose=10)(delayed(trial_do_partial)(trial) for trial in range(NUM_TRIALS))\n", " 497 # train_pref = [item[0] for item in result_perf]\n", " 498 # val_pref = [item[1] for item in result_perf]\n", " 499 # test_pref = [item[2] for item in result_perf]\n", " 500 \n", " 501 # # ---- direct running, normally use a single CPU core. ----\n", " 502 # train_pref = []\n", " 503 # val_pref = []\n", " 504 # test_pref = []\n", " 505 # for i in tqdm(range(NUM_TRIALS), desc='cross validation', file=sys.stdout):\n", " 506 # o1, o2, o3 = trial_do(param_list_pre_revised, param_list, gram_matrices, y, model_type, i)\n", " 507 # train_pref.append(o1)\n", " 508 # val_pref.append(o2)\n", " 509 # test_pref.append(o3)\n", " 510 \n", " 511 print()\n", " 512 print('4. Getting final performance...')\n", " 513 str_fw += '\\nIII. Performance.\\n\\n'\n", " 514 # averages and confidences of performances on outer trials for each combination of parameters\n", " 515 average_train_scores = np.mean(train_pref, axis=0)\n", " 516 average_val_scores = np.mean(val_pref, axis=0)\n", " 517 average_perf_scores = np.mean(test_pref, axis=0)\n", " 518 # sample std is used here\n", " 519 std_train_scores = np.std(train_pref, axis=0, ddof=1)\n", " 520 std_val_scores = np.std(val_pref, axis=0, ddof=1)\n", " 521 std_perf_scores = np.std(test_pref, axis=0, ddof=1)\n", " 522 \n", " 523 if model_type == 'regression':\n", " 524 best_val_perf = np.amin(average_val_scores)\n", " 525 else:\n", " 526 best_val_perf = np.amax(average_val_scores)\n", " 527 best_params_index = np.where(average_val_scores == best_val_perf)\n", " 528 # find smallest val std with best val perf.\n", " 529 best_val_stds = [\n", " 530 std_val_scores[value][best_params_index[1][idx]]\n", " 531 for idx, value in enumerate(best_params_index[0])\n", " 532 ]\n", " 533 min_val_std = np.amin(best_val_stds)\n", " 534 best_params_index = np.where(std_val_scores == min_val_std)\n", " 535 best_params_out = [\n", " 536 param_list_pre_revised[i] for i in best_params_index[0]\n", " 537 ]\n", " 538 best_params_in = [param_list[i] for i in best_params_index[1]]\n", " 539 print('best_params_out: ', best_params_out)\n", " 540 print('best_params_in: ', best_params_in)\n", " 541 print()\n", " 542 print('best_val_perf: ', best_val_perf)\n", " 543 print('best_val_std: ', min_val_std)\n", " 544 str_fw += 'best settings of hyper-params to build gram matrix: %s\\n' % best_params_out\n", " 545 str_fw += 'best settings of other hyper-params: %s\\n\\n' % best_params_in\n", " 546 str_fw += 'best_val_perf: %s\\n' % best_val_perf\n", " 547 str_fw += 'best_val_std: %s\\n' % min_val_std\n", " 548 \n", " 549 final_performance = [\n", " 550 average_perf_scores[value][best_params_index[1][idx]]\n", " 551 for idx, value in enumerate(best_params_index[0])\n", " 552 ]\n", " 553 final_confidence = [\n", " 554 std_perf_scores[value][best_params_index[1][idx]]\n", " 555 for idx, value in enumerate(best_params_index[0])\n", " 556 ]\n", " 557 print('final_performance: ', final_performance)\n", " 558 print('final_confidence: ', final_confidence)\n", " 559 str_fw += 'final_performance: %s\\n' % final_performance\n", " 560 str_fw += 'final_confidence: %s\\n' % final_confidence\n", " 561 train_performance = [\n", " 562 average_train_scores[value][best_params_index[1][idx]]\n", " 563 for idx, value in enumerate(best_params_index[0])\n", " 564 ]\n", " 565 train_std = [\n", " 566 std_train_scores[value][best_params_index[1][idx]]\n", " 567 for idx, value in enumerate(best_params_index[0])\n", " 568 ]\n", " 569 print('train_performance: %s' % train_performance)\n", " 570 print('train_std: ', train_std)\n", " 571 str_fw += 'train_performance: %s\\n' % train_performance\n", " 572 str_fw += 'train_std: %s\\n\\n' % train_std\n", " 573 \n", " 574 print()\n", " 575 average_gram_matrix_time = np.mean(gram_matrix_time)\n", " 576 std_gram_matrix_time = np.std(gram_matrix_time, ddof=1)\n", " 577 best_gram_matrix_time = [\n", " 578 gram_matrix_time[i] for i in best_params_index[0]\n", " 579 ]\n", " 580 ave_bgmt = np.mean(best_gram_matrix_time)\n", " 581 std_bgmt = np.std(best_gram_matrix_time, ddof=1)\n", " 582 print(\n", " 583 'time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s'\n", " 584 .format(average_gram_matrix_time, std_gram_matrix_time))\n", " 585 print('time to calculate best gram matrix: {:.2f}±{:.2f}s'.format(\n", " 586 ave_bgmt, std_bgmt))\n", " 587 tt_poster = time.time() - tts # training time with hyper-param choices who did not participate in calculation of gram matrices\n", " 588 print(\n", " 589 'training time with hyper-param choices who did not participate in calculation of gram matrices: {:.2f}s'.format(\n", " 590 tt_poster))\n", " 591 print('total training time with all hyper-param choices: {:.2f}s'.format(\n", " 592 tt_poster + np.sum(gram_matrix_time)))\n", " 593 # str_fw += 'time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s\\n'.format(average_gram_matrix_time, std_gram_matrix_time)\n", " 594 # str_fw += 'time to calculate best gram matrix: {:.2f}±{:.2f}s\\n'.format(ave_bgmt, std_bgmt)\n", " 595 str_fw += 'training time with hyper-param choices who did not participate in calculation of gram matrices: {:.2f}s\\n\\n'.format(tt_poster)\n", " 596 \n", " 597 # print out as table.\n", " 598 from collections import OrderedDict\n", " 599 from tabulate import tabulate\n", " 600 table_dict = {}\n", " 601 if model_type == 'regression':\n", " 602 for param_in in param_list:\n", " 603 param_in['alpha'] = '{:.2e}'.format(param_in['alpha'])\n", " 604 else:\n", " 605 for param_in in param_list:\n", " 606 param_in['C'] = '{:.2e}'.format(param_in['C'])\n", " 607 table_dict['params'] = [{**param_out, **param_in}\n", " 608 for param_in in param_list for param_out in param_list_pre_revised]\n", " 609 # table_dict['gram_matrix_time'] = [\n", " 610 # '{:.2f}'.format(gram_matrix_time[index_out])\n", " 611 # for param_in in param_list\n", " 612 # for index_out, _ in enumerate(param_list_pre_revised)\n", " 613 # ]\n", " 614 table_dict['valid_perf'] = [\n", " 615 '{:.2f}±{:.2f}'.format(average_val_scores[index_out][index_in],\n", " 616 std_val_scores[index_out][index_in])\n", " 617 for index_in, _ in enumerate(param_list)\n", " 618 for index_out, _ in enumerate(param_list_pre_revised)\n", " 619 ]\n", " 620 table_dict['test_perf'] = [\n", " 621 '{:.2f}±{:.2f}'.format(average_perf_scores[index_out][index_in],\n", " 622 std_perf_scores[index_out][index_in])\n", " 623 for index_in, _ in enumerate(param_list)\n", " 624 for index_out, _ in enumerate(param_list_pre_revised)\n", " 625 ]\n", " 626 table_dict['train_perf'] = [\n", " 627 '{:.2f}±{:.2f}'.format(average_train_scores[index_out][index_in],\n", " 628 std_train_scores[index_out][index_in])\n", " 629 for index_in, _ in enumerate(param_list)\n", " 630 for index_out, _ in enumerate(param_list_pre_revised)\n", " 631 ]\n", " 632 keyorder = [\n", " 633 'params', 'train_perf', 'valid_perf', 'test_perf'\n", " 634 ]\n", " 635 print()\n", " 636 tb_print = tabulate(\n", " 637 OrderedDict(\n", " 638 sorted(table_dict.items(),\n", " 639 key=lambda i: keyorder.index(i[0]))),\n", " 640 headers='keys')\n", " 641 # print(tb_print)\n", " 642 str_fw += 'table of performance v.s. hyper-params:\\n\\n%s\\n\\n' % tb_print\n", " 643 \n", " 644 # open file to save all results for this dataset.\n", " 645 if not os.path.exists(results_dir):\n", " 646 os.makedirs(results_dir)\n", " 647 \n", " 648 # open file to save all results for this dataset.\n", " 649 145.1 MiB 0.0 MiB if not os.path.exists(results_dir + '/' + ds_name + '.output.txt'):\n", " 650 with open(results_dir + '/' + ds_name + '.output.txt', 'w') as f:\n", " 651 f.write(str_fw)\n", " 652 else:\n", " 653 145.1 MiB 0.0 MiB with open(results_dir + '/' + ds_name + '.output.txt', 'r+') as f:\n", " 654 145.1 MiB 0.0 MiB content = f.read()\n", " 655 145.1 MiB 0.0 MiB f.seek(0, 0)\n", " 656 145.1 MiB 0.0 MiB f.write(str_fw + '\\n\\n\\n' + content)\n", "\n", "\n", "\n" ] } ], "source": [ "import functools\n", "from libs import *\n", "import multiprocessing\n", "\n", "from pygraph.kernels.spKernel import spkernel\n", "from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct\n", "#from pygraph.utils.model_selection_precomputed import trial_do\n", "\n", "dslist = [\n", " {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds',\n", " 'task': 'regression'}, # node symb\n", "# {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression',\n", "# 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt', }, \n", "# # contains single node graph, node symb\n", "# {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds', }, # node/edge symb\n", "# {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds', }, # unlabeled\n", "# {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG.mat',\n", "# 'extra_params': {'am_sp_al_nl_el': [0, 0, 3, 1, 2]}}, # node/edge symb\n", "# {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'},\n", "# # node nsymb\n", "# {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'},\n", "# # node symb/nsymb\n", "# {'name': 'Mutagenicity', 'dataset': '../datasets/Mutagenicity/Mutagenicity_A.txt'},\n", "# # node/edge symb\n", "# {'name': 'D&D', 'dataset': '../datasets/D&D/DD.mat',\n", "# 'extra_params': {'am_sp_al_nl_el': [0, 1, 2, 1, -1]}}, # node symb\n", "\n", " # {'name': 'COIL-DEL', 'dataset': '../datasets/COIL-DEL/COIL-DEL_A.txt'}, # edge symb, node nsymb\n", " # # # {'name': 'BZR', 'dataset': '../datasets/BZR_txt/BZR_A_sparse.txt'}, # node symb/nsymb\n", " # # # {'name': 'COX2', 'dataset': '../datasets/COX2_txt/COX2_A_sparse.txt'}, # node symb/nsymb\n", " # {'name': 'Fingerprint', 'dataset': '../datasets/Fingerprint/Fingerprint_A.txt'},\n", " #\n", " # # {'name': 'DHFR', 'dataset': '../datasets/DHFR_txt/DHFR_A_sparse.txt'}, # node symb/nsymb\n", " # # {'name': 'SYNTHETIC', 'dataset': '../datasets/SYNTHETIC_txt/SYNTHETIC_A_sparse.txt'}, # node symb/nsymb\n", " # # {'name': 'MSRC9', 'dataset': '../datasets/MSRC_9_txt/MSRC_9_A.txt'}, # node symb\n", " # # {'name': 'MSRC21', 'dataset': '../datasets/MSRC_21_txt/MSRC_21_A.txt'}, # node symb\n", " # # {'name': 'FIRSTMM_DB', 'dataset': '../datasets/FIRSTMM_DB/FIRSTMM_DB_A.txt'}, # node symb/nsymb ,edge nsymb\n", "\n", " # # {'name': 'PROTEINS', 'dataset': '../datasets/PROTEINS_txt/PROTEINS_A_sparse.txt'}, # node symb/nsymb\n", " # # {'name': 'PROTEINS_full', 'dataset': '../datasets/PROTEINS_full_txt/PROTEINS_full_A_sparse.txt'}, # node symb/nsymb\n", " # # {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb\n", " # {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1.mat',\n", " # 'extra_params': {'am_sp_al_nl_el': [1, 1, 2, 0, -1]}}, # node symb\n", " # {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109.mat',\n", " # 'extra_params': {'am_sp_al_nl_el': [1, 1, 2, 0, -1]}}, # node symb\n", " # {'name': 'NCI-HIV', 'dataset': '../datasets/NCI-HIV/AIDO99SD.sdf',\n", " # 'dataset_y': '../datasets/NCI-HIV/aids_conc_may04.txt',}, # node/edge symb\n", "\n", " # # not working below\n", " # {'name': 'PTC_FM', 'dataset': '../datasets/PTC/Train/FM.ds',},\n", " # {'name': 'PTC_FR', 'dataset': '../datasets/PTC/Train/FR.ds',},\n", " # {'name': 'PTC_MM', 'dataset': '../datasets/PTC/Train/MM.ds',},\n", " # {'name': 'PTC_MR', 'dataset': '../datasets/PTC/Train/MR.ds',},\n", "]\n", "estimator = spkernel\n", "mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel)\n", "param_grid_precomputed = {'node_kernels': [\n", " {'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}]}\n", "param_grid = [{'C': np.logspace(-10, 10, num=41, base=10)},\n", " {'alpha': np.logspace(-10, 10, num=41, base=10)}]\n", "\n", "for ds in dslist:\n", " print()\n", " print(ds['name'])\n", " model_selection_for_precomputed_kernel(\n", " ds['dataset'],\n", " estimator,\n", " param_grid_precomputed,\n", " (param_grid[1] if ('task' in ds and ds['task']\n", " == 'regression') else param_grid[0]),\n", " (ds['task'] if 'task' in ds else 'classification'),\n", " NUM_TRIALS=30,\n", " datafile_y=(ds['dataset_y'] if 'dataset_y' in ds else None),\n", " extra_params=(ds['extra_params'] if 'extra_params' in ds else None),\n", " ds_name=ds['name'],\n", " n_jobs=multiprocessing.cpu_count(),\n", " read_gm_from_file=False)\n", " print()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.7" } }, "nbformat": 4, "nbformat_minor": 2 }