Browse Source

New translations run_commonwalkkernel.ipynb (Chinese Simplified)

l10n_v0.2.x
linlin 4 years ago
parent
commit
030cb7a98f
1 changed files with 166 additions and 0 deletions
  1. +166
    -0
      lang/zh/notebooks/run_commonwalkkernel.ipynb

+ 166
- 0
lang/zh/notebooks/run_commonwalkkernel.ipynb View File

@@ -0,0 +1,166 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Acyclic\n",
"\n",
"--- This is a regression problem ---\n",
"\n",
"\n",
"1. Loading dataset from file...\n",
"\n",
"2. Calculating gram matrices. This could take a while...\n",
"calculating kernels: 16836it [00:04, 3664.86it/s]\n",
"\n",
" --- kernel matrix of common walk kernel of size 183 built in 4.640817880630493 seconds ---\n",
"\n",
"the gram matrix with parameters {'compute_method': 'geo', 'weight': 0.01, 'n_jobs': 8} is: \n",
"\n",
"\n",
"calculating kernels: 16836it [00:04, 3737.97it/s]\n",
"\n",
" --- kernel matrix of common walk kernel of size 183 built in 4.647830486297607 seconds ---\n",
"\n",
"the gram matrix with parameters {'compute_method': 'geo', 'weight': 0.019999999999999997, 'n_jobs': 8} is: \n",
"\n",
"\n",
"calculating kernels: 16836it [00:04, 3385.68it/s]\n",
"\n",
" --- kernel matrix of common walk kernel of size 183 built in 5.04192042350769 seconds ---\n",
"\n",
"the gram matrix with parameters {'compute_method': 'geo', 'weight': 0.03, 'n_jobs': 8} is: \n",
"\n",
"\n",
"calculating kernels: 16836it [00:04, 3624.84it/s]\n",
"\n",
" --- kernel matrix of common walk kernel of size 183 built in 4.778176546096802 seconds ---\n",
"\n",
"the gram matrix with parameters {'compute_method': 'geo', 'weight': 0.039999999999999994, 'n_jobs': 8} is: \n",
"\n",
"\n",
"calculating kernels: 16836it [00:04, 3619.18it/s]\n",
"\n",
" --- kernel matrix of common walk kernel of size 183 built in 4.761460542678833 seconds ---\n",
"\n",
"the gram matrix with parameters {'compute_method': 'geo', 'weight': 0.049999999999999996, 'n_jobs': 8} is: \n",
"\n",
"\n",
"calculating kernels: 10201it [00:02, 3556.34it/s]"
]
}
],
"source": [
"# %load_ext line_profiler\n",
"# %matplotlib inline\n",
"from libs import *\n",
"import multiprocessing\n",
"from sklearn.metrics.pairwise import rbf_kernel\n",
"\n",
"from gklearn.kernels.commonWalkKernel import commonwalkkernel\n",
"from gklearn.utils.kernels import deltakernel, kernelproduct\n",
"\n",
"dslist = [\n",
" {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds',\n",
" 'task': 'regression'}, # node symb\n",
" {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression',\n",
" 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, \n",
" # contains single node graph, node symb\n",
" {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb\n",
" {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled\n",
" {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb\n",
" {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'},\n",
" # node nsymb\n",
" {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'},\n",
" # node symb/nsymb\n",
"# {'name': 'Mutagenicity', 'dataset': '../datasets/Mutagenicity/Mutagenicity_A.txt'},\n",
"# # node/edge symb\n",
"# {'name': 'D&D', 'dataset': '../datasets/DD/DD_A.txt'}, # node symb\n",
"\n",
" # {'name': 'COIL-DEL', 'dataset': '../datasets/COIL-DEL/COIL-DEL_A.txt'}, # edge symb, node nsymb\n",
" # # # {'name': 'BZR', 'dataset': '../datasets/BZR_txt/BZR_A_sparse.txt'}, # node symb/nsymb\n",
" # # # {'name': 'COX2', 'dataset': '../datasets/COX2_txt/COX2_A_sparse.txt'}, # node symb/nsymb\n",
" # {'name': 'Fingerprint', 'dataset': '../datasets/Fingerprint/Fingerprint_A.txt'},\n",
" #\n",
" # # {'name': 'DHFR', 'dataset': '../datasets/DHFR_txt/DHFR_A_sparse.txt'}, # node symb/nsymb\n",
" # # {'name': 'SYNTHETIC', 'dataset': '../datasets/SYNTHETIC_txt/SYNTHETIC_A_sparse.txt'}, # node symb/nsymb\n",
" # # {'name': 'MSRC9', 'dataset': '../datasets/MSRC_9_txt/MSRC_9_A.txt'}, # node symb\n",
" # # {'name': 'MSRC21', 'dataset': '../datasets/MSRC_21_txt/MSRC_21_A.txt'}, # node symb\n",
" # # {'name': 'FIRSTMM_DB', 'dataset': '../datasets/FIRSTMM_DB/FIRSTMM_DB_A.txt'}, # node symb/nsymb ,edge nsymb\n",
"\n",
" # # {'name': 'PROTEINS', 'dataset': '../datasets/PROTEINS_txt/PROTEINS_A_sparse.txt'}, # node symb/nsymb\n",
" # # {'name': 'PROTEINS_full', 'dataset': '../datasets/PROTEINS_full_txt/PROTEINS_full_A_sparse.txt'}, # node symb/nsymb\n",
" # # {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb\n",
" # {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1.mat',\n",
" # 'extra_params': {'am_sp_al_nl_el': [1, 1, 2, 0, -1]}}, # node symb\n",
" # {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109.mat',\n",
" # 'extra_params': {'am_sp_al_nl_el': [1, 1, 2, 0, -1]}}, # node symb\n",
" # {'name': 'NCI-HIV', 'dataset': '../datasets/NCI-HIV/AIDO99SD.sdf',\n",
" # 'dataset_y': '../datasets/NCI-HIV/aids_conc_may04.txt',}, # node/edge symb\n",
"\n",
" # # not working below\n",
" # {'name': 'PTC_FM', 'dataset': '../datasets/PTC/Train/FM.ds',},\n",
" # {'name': 'PTC_FR', 'dataset': '../datasets/PTC/Train/FR.ds',},\n",
" # {'name': 'PTC_MM', 'dataset': '../datasets/PTC/Train/MM.ds',},\n",
" # {'name': 'PTC_MR', 'dataset': '../datasets/PTC/Train/MR.ds',},\n",
"]\n",
"estimator = commonwalkkernel\n",
"param_grid_precomputed = [{'compute_method': ['geo'], \n",
" 'weight': np.linspace(0.01, 0.15, 15)},\n",
"# 'weight': np.logspace(-1, -10, num=10, base=10)},\n",
" {'compute_method': ['exp'], 'weight': range(0, 15)}]\n",
"param_grid = [{'C': np.logspace(-10, 10, num=41, base=10)},\n",
" {'alpha': np.logspace(-10, 10, num=41, base=10)}]\n",
"\n",
"for ds in dslist:\n",
" print()\n",
" print(ds['name'])\n",
" model_selection_for_precomputed_kernel(\n",
" ds['dataset'],\n",
" estimator,\n",
" param_grid_precomputed,\n",
" (param_grid[1] if ('task' in ds and ds['task']\n",
" == 'regression') else param_grid[0]),\n",
" (ds['task'] if 'task' in ds else 'classification'),\n",
" NUM_TRIALS=30,\n",
" datafile_y=(ds['dataset_y'] if 'dataset_y' in ds else None),\n",
" extra_params=(ds['extra_params'] if 'extra_params' in ds else None),\n",
" ds_name=ds['name'],\n",
" n_jobs=multiprocessing.cpu_count(),\n",
" read_gm_from_file=False,\n",
" verbose=True)\n",
" print()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

Loading…
Cancel
Save