From 6222e1984f60eafbdc1d4c04bda28132b4ef8cc9 Mon Sep 17 00:00:00 2001 From: jajupmochi Date: Tue, 21 May 2019 17:22:14 +0200 Subject: [PATCH] update README --- README.md | 4 + notebooks/run_treeletkernel.ipynb | 192 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 196 insertions(+) create mode 100644 notebooks/run_treeletkernel.ipynb diff --git a/README.md b/README.md index 06e4b4c..67a82d4 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,8 @@ Simply clone this repository and voilà! Then check [`notebooks`](https://github * The path kernel up to length h [6] * The Tanimoto kernel * The MinMax kernel +* Non-linear kernels + * The treelet kernel [10] ## Computation optimization methods @@ -88,6 +90,8 @@ Linlin Jia, Benoit Gaüzère, and Paul Honeine. Graph Kernels Based on Linear Pa [9] Edward Fredkin. Trie memory. Communications of the ACM, 3(9):490–499, 1960. +[10] Gaüzere, B., Brun, L., Villemin, D., 2012. Two new graphs kernels in chemoinformatics. Pattern Recognition Letters 33, 2038–2047. + ## Authors * [Linlin Jia](https://github.com/jajupmochi), LITIS, INSA Rouen Normandie diff --git a/notebooks/run_treeletkernel.ipynb b/notebooks/run_treeletkernel.ipynb new file mode 100644 index 0000000..2d7d2a2 --- /dev/null +++ b/notebooks/run_treeletkernel.ipynb @@ -0,0 +1,192 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Acyclic\n", + "\n", + "--- This is a regression problem ---\n", + "\n", + "\n", + "1. Loading dataset from file...\n", + "\n", + "2. Calculating gram matrices. This could take a while...\n", + "getting canonkeys: 183it [00:00, 2869.32it/s]\n", + "calculating kernels: 16836it [00:00, 289967.90it/s]\n", + "\n", + " --- treelet kernel matrix of size 183 built in 0.2736480236053467 seconds ---\n", + "\n", + "the gram matrix with parameters {'sub_kernel': , 'n_jobs': 8, 'verbose': True} is: \n", + "\n", + "\n", + "getting canonkeys: 183it [00:00, 2431.05it/s]\n", + "calculating kernels: 16836it [00:00, 225177.06it/s]\n", + "\n", + " --- treelet kernel matrix of size 183 built in 0.2614881992340088 seconds ---\n", + "\n", + "the gram matrix with parameters {'sub_kernel': , 'n_jobs': 8, 'verbose': True} is: \n", + "\n", + "\n", + "\n", + "2 gram matrices are calculated, 0 of which are ignored.\n", + "\n", + "3. Fitting and predicting using nested cross validation. This could really take a while...\n", + "cross validation: 30it [00:06, 4.34it/s]\n", + "\n", + "4. Getting final performance...\n", + "best_params_out: [{'sub_kernel': , 'n_jobs': 8, 'verbose': True}]\n", + "best_params_in: [{'alpha': 0.01}]\n", + "\n", + "best_val_perf: 8.699254729880051\n", + "best_val_std: 0.6859488791023038\n", + "final_performance: [10.449041034883777]\n", + "final_confidence: [5.005824863496953]\n", + "train_performance: [1.3405521528763233]\n", + "train_std: [0.0923786919637616]\n", + "\n", + "time to calculate gram matrix with different hyper-params: 0.27±0.01s\n", + "time to calculate best gram matrix: 0.26±0.00s\n", + "total training time with all hyper-param choices: 8.18s\n", + "\n", + "\n", + "\n", + "Alkane\n", + "\n", + "--- This is a regression problem ---\n", + "\n", + "\n", + "1. Loading dataset from file...\n", + "\n", + "2. Calculating gram matrices. This could take a while...\n", + "getting canonkeys: 150it [00:00, 1460.40it/s]\n", + "calculating kernels: 11325it [00:00, 188753.18it/s]\n", + "\n", + " --- treelet kernel matrix of size 150 built in 0.452197790145874 seconds ---\n", + "\n", + "the gram matrix with parameters {'sub_kernel': , 'n_jobs': 8, 'verbose': True} is: \n", + "\n", + "\n", + "getting canonkeys: 150it [00:00, 3273.02it/s]\n", + "calculating kernels: 11325it [00:00, 223074.04it/s]\n", + "\n", + " --- treelet kernel matrix of size 150 built in 0.2638716697692871 seconds ---\n", + "\n", + "the gram matrix with parameters {'sub_kernel': , 'n_jobs': 8, 'verbose': True} is: \n", + "\n", + "\n", + "\n", + "2 gram matrices are calculated, 0 of which are ignored.\n", + "\n", + "3. Fitting and predicting using nested cross validation. This could really take a while...\n", + "cross validation: 1it [00:01, 1.41s/it]" + ] + } + ], + "source": [ + "from libs import *\n", + "import multiprocessing\n", + "\n", + "from pygraph.kernels.treeletKernel import treeletkernel\n", + "from pygraph.utils.kernels import gaussiankernel, polynomialkernel\n", + "\n", + "dslist = [\n", + " {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds',\n", + " 'task': 'regression'}, # node symb\n", + " {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression',\n", + " 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt', }, \n", + " # contains single node graph, node symb\n", + " {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds', }, # node/edge symb\n", + " {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds', }, # unlabeled\n", + " {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG.mat',\n", + " 'extra_params': {'am_sp_al_nl_el': [0, 0, 3, 1, 2]}}, # node/edge symb\n", + "# {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'},\n", + "# # node nsymb\n", + " {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'},\n", + " # node symb/nsymb\n", + "# {'name': 'Mutagenicity', 'dataset': '../datasets/Mutagenicity/Mutagenicity_A.txt'},\n", + "# # node/edge symb\n", + "# {'name': 'D&D', 'dataset': '../datasets/D&D/DD.mat',\n", + "# 'extra_params': {'am_sp_al_nl_el': [0, 1, 2, 1, -1]}}, # node symb\n", + "\n", + " # {'name': 'COIL-DEL', 'dataset': '../datasets/COIL-DEL/COIL-DEL_A.txt'}, # edge symb, node nsymb\n", + " # # # {'name': 'BZR', 'dataset': '../datasets/BZR_txt/BZR_A_sparse.txt'}, # node symb/nsymb\n", + " # # # {'name': 'COX2', 'dataset': '../datasets/COX2_txt/COX2_A_sparse.txt'}, # node symb/nsymb\n", + " # {'name': 'Fingerprint', 'dataset': '../datasets/Fingerprint/Fingerprint_A.txt'},\n", + " #\n", + " # # {'name': 'DHFR', 'dataset': '../datasets/DHFR_txt/DHFR_A_sparse.txt'}, # node symb/nsymb\n", + " # # {'name': 'SYNTHETIC', 'dataset': '../datasets/SYNTHETIC_txt/SYNTHETIC_A_sparse.txt'}, # node symb/nsymb\n", + " # # {'name': 'MSRC9', 'dataset': '../datasets/MSRC_9_txt/MSRC_9_A.txt'}, # node symb\n", + " # # {'name': 'MSRC21', 'dataset': '../datasets/MSRC_21_txt/MSRC_21_A.txt'}, # node symb\n", + " # # {'name': 'FIRSTMM_DB', 'dataset': '../datasets/FIRSTMM_DB/FIRSTMM_DB_A.txt'}, # node symb/nsymb ,edge nsymb\n", + "\n", + " # # {'name': 'PROTEINS', 'dataset': '../datasets/PROTEINS_txt/PROTEINS_A_sparse.txt'}, # node symb/nsymb\n", + " # # {'name': 'PROTEINS_full', 'dataset': '../datasets/PROTEINS_full_txt/PROTEINS_full_A_sparse.txt'}, # node symb/nsymb\n", + " {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb\n", + " # {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1.mat',\n", + " # 'extra_params': {'am_sp_al_nl_el': [1, 1, 2, 0, -1]}}, # node symb\n", + " # {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109.mat',\n", + " # 'extra_params': {'am_sp_al_nl_el': [1, 1, 2, 0, -1]}}, # node symb\n", + " # {'name': 'NCI-HIV', 'dataset': '../datasets/NCI-HIV/AIDO99SD.sdf',\n", + " # 'dataset_y': '../datasets/NCI-HIV/aids_conc_may04.txt',}, # node/edge symb\n", + "\n", + " # # not working below\n", + " # {'name': 'PTC_FM', 'dataset': '../datasets/PTC/Train/FM.ds',},\n", + " # {'name': 'PTC_FR', 'dataset': '../datasets/PTC/Train/FR.ds',},\n", + " # {'name': 'PTC_MM', 'dataset': '../datasets/PTC/Train/MM.ds',},\n", + " # {'name': 'PTC_MR', 'dataset': '../datasets/PTC/Train/MR.ds',},\n", + "]\n", + "estimator = treeletkernel\n", + "param_grid_precomputed = {'sub_kernel': [gaussiankernel, polynomialkernel]}\n", + "param_grid = [{'C': np.logspace(-10, 10, num=41, base=10)},\n", + " {'alpha': np.logspace(-10, 10, num=41, base=10)}]\n", + "\n", + "for ds in dslist:\n", + " print()\n", + " print(ds['name'])\n", + " model_selection_for_precomputed_kernel(\n", + " ds['dataset'],\n", + " estimator,\n", + " param_grid_precomputed,\n", + " (param_grid[1] if ('task' in ds and ds['task']\n", + " == 'regression') else param_grid[0]),\n", + " (ds['task'] if 'task' in ds else 'classification'),\n", + " NUM_TRIALS=30,\n", + " datafile_y=(ds['dataset_y'] if 'dataset_y' in ds else None),\n", + " extra_params=(ds['extra_params'] if 'extra_params' in ds else None),\n", + " ds_name=ds['name'],\n", + " n_jobs=multiprocessing.cpu_count(),\n", + " read_gm_from_file=False,\n", + " verbose=True)\n", + " print()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}