{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "Acyclic\n", "\n", "--- This is a regression problem ---\n", "\n", "\n", "1. Loading dataset from file...\n", "\n", "2. Calculating gram matrices. This could take a while...\n", "\n", " None edge weight specified. Set all weight to 1.\n", "\n", "compute adjacency matrices: 100%|██████████| 183/183 [00:00<00:00, 5308.25it/s]\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "../gklearn/kernels/randomWalkKernel.py:108: UserWarning: All labels are ignored.\n", " warnings.warn('All labels are ignored.')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "calculating kernels: 16836it [00:00, 65408.89it/s]\n", "\n", " --- kernel matrix of random walk kernel of size 183 built in 0.4157981872558594 seconds ---\n", "\n", "the gram matrix with parameters {'compute_method': 'sylvester', 'weight': 0.1, 'n_jobs': 8, 'verbose': True} is: \n", "\n", "\n", "\n", " None edge weight specified. Set all weight to 1.\n", "\n", "compute adjacency matrices: 100%|██████████| 183/183 [00:00<00:00, 5205.09it/s]\n", "calculating kernels: 16836it [00:00, 73715.56it/s]\n", "\n", " --- kernel matrix of random walk kernel of size 183 built in 0.36714887619018555 seconds ---\n", "\n", "the gram matrix with parameters {'compute_method': 'sylvester', 'weight': 0.01, 'n_jobs': 8, 'verbose': True} is: \n", "\n", "\n", "\n", " None edge weight specified. Set all weight to 1.\n", "\n", "compute adjacency matrices: 100%|██████████| 183/183 [00:00<00:00, 5344.96it/s]\n", "calculating kernels: 16836it [00:00, 68817.65it/s]\n", "\n", " --- kernel matrix of random walk kernel of size 183 built in 0.3666379451751709 seconds ---\n", "\n", "the gram matrix with parameters {'compute_method': 'sylvester', 'weight': 0.001, 'n_jobs': 8, 'verbose': True} is: \n", "\n", "\n", "\n", " None edge weight specified. Set all weight to 1.\n", "\n", "compute adjacency matrices: 100%|██████████| 183/183 [00:00<00:00, 5295.73it/s]\n", "calculating kernels: 16836it [00:00, 74865.49it/s]\n", "\n", " --- kernel matrix of random walk kernel of size 183 built in 0.36979222297668457 seconds ---\n", "\n", "the gram matrix with parameters {'compute_method': 'sylvester', 'weight': 0.0001, 'n_jobs': 8, 'verbose': True} is: \n", "\n", "\n", "\n", " None edge weight specified. Set all weight to 1.\n", "\n", "compute adjacency matrices: 100%|██████████| 183/183 [00:00<00:00, 5040.80it/s]\n", "calculating kernels: 16836it [00:00, 70923.54it/s]\n", "\n", " --- kernel matrix of random walk kernel of size 183 built in 0.3692610263824463 seconds ---\n", "\n", "the gram matrix with parameters {'compute_method': 'sylvester', 'weight': 1e-05, 'n_jobs': 8, 'verbose': True} is: \n", "\n", "\n", "\n", " None edge weight specified. Set all weight to 1.\n", "\n", "compute adjacency matrices: 100%|██████████| 183/183 [00:00<00:00, 5326.60it/s]\n", "calculating kernels: 16836it [00:00, 73697.55it/s]\n", "\n", " --- kernel matrix of random walk kernel of size 183 built in 0.37317800521850586 seconds ---\n", "\n", "the gram matrix with parameters {'compute_method': 'sylvester', 'weight': 1e-06, 'n_jobs': 8, 'verbose': True} is: \n", "\n", "\n", "\n", " None edge weight specified. Set all weight to 1.\n", "\n", "compute adjacency matrices: 100%|██████████| 183/183 [00:00<00:00, 5705.98it/s]\n", "calculating kernels: 16836it [00:00, 64238.65it/s]\n", "\n", " --- kernel matrix of random walk kernel of size 183 built in 0.36565732955932617 seconds ---\n", "\n", "the gram matrix with parameters {'compute_method': 'sylvester', 'weight': 1e-07, 'n_jobs': 8, 'verbose': True} is: \n", "\n", "\n", "\n", " None edge weight specified. Set all weight to 1.\n", "\n", "compute adjacency matrices: 100%|██████████| 183/183 [00:00<00:00, 4833.15it/s]\n", "calculating kernels: 16836it [00:00, 69971.77it/s]\n", "\n", " --- kernel matrix of random walk kernel of size 183 built in 0.37798523902893066 seconds ---\n", "\n", "the gram matrix with parameters {'compute_method': 'sylvester', 'weight': 1e-08, 'n_jobs': 8, 'verbose': True} is: \n", "\n", "\n", "\n", " None edge weight specified. Set all weight to 1.\n", "\n", "compute adjacency matrices: 100%|██████████| 183/183 [00:00<00:00, 4170.94it/s]\n", "calculating kernels: 16836it [00:00, 64187.38it/s]\n", "\n", " --- kernel matrix of random walk kernel of size 183 built in 0.39433860778808594 seconds ---\n", "\n", "the gram matrix with parameters {'compute_method': 'sylvester', 'weight': 1e-09, 'n_jobs': 8, 'verbose': True} is: \n", "\n", "\n", "\n", " None edge weight specified. Set all weight to 1.\n", "\n", "compute adjacency matrices: 100%|██████████| 183/183 [00:00<00:00, 5273.43it/s]\n", "calculating kernels: 16836it [00:00, 69555.28it/s]\n", "\n", " --- kernel matrix of random walk kernel of size 183 built in 0.3833920955657959 seconds ---\n", "\n", "the gram matrix with parameters {'compute_method': 'sylvester', 'weight': 1e-10, 'n_jobs': 8, 'verbose': True} is: \n", "\n", "\n", "\n", "10 gram matrices are calculated, 0 of which are ignored.\n", "\n", "3. Fitting and predicting using nested cross validation. This could really take a while...\n", "cross validation: 30it [00:33, 1.11s/it]\n", "\n", "4. Getting final performance...\n", "best_params_out: [{'compute_method': 'sylvester', 'weight': 0.01, 'n_jobs': 8, 'verbose': True}]\n", "best_params_in: [{'alpha': 1e-10}]\n", "\n", "best_val_perf: 31.76835551233969\n", "best_val_std: 0.43269972907929183\n", "final_performance: [32.391882524496765]\n", "final_confidence: [2.6542337929023336]\n", "train_performance: [30.70127313658435]\n", "train_std: [0.31861204198126475]\n", "\n", "time to calculate gram matrix with different hyper-params: 0.38±0.02s\n", "time to calculate best gram matrix: 0.37±0.00s\n", "total training time with all hyper-param choices: 40.53s\n", "\n", "\n", "--- This is a regression problem ---\n", "\n", "\n", "1. Loading dataset from file...\n", "\n", "2. Calculating gram matrices. This could take a while...\n", "\n", " None edge weight specified. Set all weight to 1.\n", "\n", "reindex vertices: 100%|██████████| 183/183 [00:00<00:00, 28950.24it/s]\n", "calculating kernels: 16836it [00:02, 6540.43it/s]\n", "\n", " --- kernel matrix of random walk kernel of size 183 built in 2.6675093173980713 seconds ---\n", "\n", "the gram matrix with parameters {'compute_method': 'conjugate', 'edge_kernels': {'symb': , 'nsymb': , 'mix': functools.partial(, , )}, 'node_kernels': {'symb': , 'nsymb': , 'mix': functools.partial(, , )}, 'weight': 0.1, 'n_jobs': 8, 'verbose': True} is: \n", "\n", "\n", "\n", " None edge weight specified. Set all weight to 1.\n", "\n", "reindex vertices: 100%|██████████| 183/183 [00:00<00:00, 28019.19it/s]\n", "calculating kernels: 16836it [00:02, 7963.48it/s]\n", "\n", " --- kernel matrix of random walk kernel of size 183 built in 2.2675061225891113 seconds ---\n", "\n", "the gram matrix with parameters {'compute_method': 'conjugate', 'edge_kernels': {'symb': , 'nsymb': , 'mix': functools.partial(, , )}, 'node_kernels': {'symb': , 'nsymb': , 'mix': functools.partial(, , )}, 'weight': 0.01, 'n_jobs': 8, 'verbose': True} is: \n", "\n", "\n", "\n", " None edge weight specified. Set all weight to 1.\n", "\n", "reindex vertices: 100%|██████████| 183/183 [00:00<00:00, 23036.63it/s]\n", "calculating kernels: 12801it [00:01, 8043.11it/s]" ] } ], "source": [ "# %load_ext line_profiler\n", "# %matplotlib inline\n", "import functools\n", "from libs import *\n", "import multiprocessing\n", "\n", "from gklearn.kernels.randomWalkKernel import randomwalkkernel\n", "from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct\n", "\n", "import numpy as np\n", "\n", "\n", "dslist = [\n", " {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds',\n", " 'task': 'regression'}, # node symb\n", " {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression',\n", " 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, \n", " # contains single node graph, node symb\n", " {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb\n", " {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled\n", " {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb\n", " {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'},\n", " # node nsymb\n", " {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'},\n", " # node symb/nsymb\n", "# {'name': 'Mutagenicity', 'dataset': '../datasets/Mutagenicity/Mutagenicity_A.txt'},\n", "# # node/edge symb\n", "# {'name': 'D&D', 'dataset': '../datasets/DD/DD_A.txt'}, # node symb\n", "\n", " # {'name': 'COIL-DEL', 'dataset': '../datasets/COIL-DEL/COIL-DEL_A.txt'}, # edge symb, node nsymb\n", " # # # {'name': 'BZR', 'dataset': '../datasets/BZR_txt/BZR_A_sparse.txt'}, # node symb/nsymb\n", " # # # {'name': 'COX2', 'dataset': '../datasets/COX2_txt/COX2_A_sparse.txt'}, # node symb/nsymb\n", " # {'name': 'Fingerprint', 'dataset': '../datasets/Fingerprint/Fingerprint_A.txt'},\n", " #\n", " # # {'name': 'DHFR', 'dataset': '../datasets/DHFR_txt/DHFR_A_sparse.txt'}, # node symb/nsymb\n", " # # {'name': 'SYNTHETIC', 'dataset': '../datasets/SYNTHETIC_txt/SYNTHETIC_A_sparse.txt'}, # node symb/nsymb\n", "# {'name': 'MSRC9', 'dataset': '../datasets/MSRC_9_txt/MSRC_9_A.txt'}, # node symb, missing values\n", "# {'name': 'MSRC21', 'dataset': '../datasets/MSRC_21_txt/MSRC_21_A.txt'}, # node symb, missing values\n", " # # {'name': 'FIRSTMM_DB', 'dataset': '../datasets/FIRSTMM_DB/FIRSTMM_DB_A.txt'}, # node symb/nsymb ,edge nsymb\n", "\n", " # # {'name': 'PROTEINS', 'dataset': '../datasets/PROTEINS_txt/PROTEINS_A_sparse.txt'}, # node symb/nsymb\n", " # # {'name': 'PROTEINS_full', 'dataset': '../datasets/PROTEINS_full_txt/PROTEINS_full_A_sparse.txt'}, # node symb/nsymb\n", " # # {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb\n", " # {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1.mat',\n", " # 'extra_params': {'am_sp_al_nl_el': [1, 1, 2, 0, -1]}}, # node symb\n", " # {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109.mat',\n", " # 'extra_params': {'am_sp_al_nl_el': [1, 1, 2, 0, -1]}}, # node symb\n", " # {'name': 'NCI-HIV', 'dataset': '../datasets/NCI-HIV/AIDO99SD.sdf',\n", " # 'dataset_y': '../datasets/NCI-HIV/aids_conc_may04.txt',}, # node/edge symb\n", "\n", "# # not working below\n", "# {'name': 'PTC_FM', 'dataset': '../datasets/PTC/Train/FM.ds',},\n", " # {'name': 'PTC_FR', 'dataset': '../datasets/PTC/Train/FR.ds',},\n", " # {'name': 'PTC_MM', 'dataset': '../datasets/PTC/Train/MM.ds',},\n", " # {'name': 'PTC_MR', 'dataset': '../datasets/PTC/Train/MR.ds',},\n", "]\n", "estimator = randomwalkkernel\n", "param_grid = [{'C': np.logspace(-10, 10, num=41, base=10)},\n", " {'alpha': np.logspace(-10, 10, num=41, base=10)}]\n", "\n", "for ds in dslist:\n", " print()\n", " print(ds['name'])\n", " for compute_method in ['sylvester', 'conjugate', 'fp', 'spectral']:\n", " if compute_method == 'sylvester':\n", " param_grid_precomputed = {'compute_method': ['sylvester'],\n", "# 'weight': np.linspace(0.01, 0.10, 10)}\n", " 'weight': np.logspace(-1, -10, num=10, base=10)}\n", " elif compute_method == 'conjugate':\n", " mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel)\n", " param_grid_precomputed = {'compute_method': ['conjugate'], \n", " 'node_kernels': \n", " [{'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}],\n", " 'edge_kernels': \n", " [{'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}],\n", " 'weight': np.logspace(-1, -10, num=10, base=10)}\n", " elif compute_method == 'fp':\n", " mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel)\n", " param_grid_precomputed = {'compute_method': ['fp'], \n", " 'node_kernels': \n", " [{'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}],\n", " 'edge_kernels': \n", " [{'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}],\n", " 'weight': np.logspace(-3, -10, num=8, base=10)}\n", " elif compute_method == 'spectral':\n", " param_grid_precomputed = {'compute_method': ['spectral'],\n", " 'weight': np.logspace(-1, -10, num=10, base=10),\n", " 'sub_kernel': ['geo', 'exp']}\n", " model_selection_for_precomputed_kernel(\n", " ds['dataset'],\n", " estimator,\n", " param_grid_precomputed,\n", " (param_grid[1] if ('task' in ds and ds['task']\n", " == 'regression') else param_grid[0]),\n", " (ds['task'] if 'task' in ds else 'classification'),\n", " NUM_TRIALS=30,\n", " datafile_y=(ds['dataset_y'] if 'dataset_y' in ds else None),\n", " extra_params=(ds['extra_params'] if 'extra_params' in ds else None),\n", " ds_name=ds['name'],\n", " n_jobs=multiprocessing.cpu_count(),\n", " read_gm_from_file=False,\n", " verbose=True)\n", " print()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.7" } }, "nbformat": 4, "nbformat_minor": 2 }