From 342d513da425765e3bf1a317e4e31cf6bbb98fb6 Mon Sep 17 00:00:00 2001 From: linlin Date: Tue, 6 Oct 2020 17:24:42 +0200 Subject: [PATCH] New translations run_sp_symonly.py (Chinese Simplified) --- lang/zh/notebooks/else/run_sp_symonly.py | 61 ++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 lang/zh/notebooks/else/run_sp_symonly.py diff --git a/lang/zh/notebooks/else/run_sp_symonly.py b/lang/zh/notebooks/else/run_sp_symonly.py new file mode 100644 index 0000000..427dd42 --- /dev/null +++ b/lang/zh/notebooks/else/run_sp_symonly.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Dec 21 17:59:28 2018 + +@author: ljia +""" + +import functools +from libs import * +import multiprocessing + +from gklearn.kernels.sp_sym import spkernel +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct +#from gklearn.utils.model_selection_precomputed import trial_do + +dslist = [ + {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, + # node nsymb + {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, + # node symb/nsymb + + # {'name': 'COIL-DEL', 'dataset': '../datasets/COIL-DEL/COIL-DEL_A.txt'}, # edge symb, node nsymb + # # # {'name': 'BZR', 'dataset': '../datasets/BZR_txt/BZR_A_sparse.txt'}, # node symb/nsymb + # # # {'name': 'COX2', 'dataset': '../datasets/COX2_txt/COX2_A_sparse.txt'}, # node symb/nsymb + # {'name': 'Fingerprint', 'dataset': '../datasets/Fingerprint/Fingerprint_A.txt'}, + # + # # {'name': 'DHFR', 'dataset': '../datasets/DHFR_txt/DHFR_A_sparse.txt'}, # node symb/nsymb + # # {'name': 'SYNTHETIC', 'dataset': '../datasets/SYNTHETIC_txt/SYNTHETIC_A_sparse.txt'}, # node symb/nsymb + # # {'name': 'MSRC9', 'dataset': '../datasets/MSRC_9_txt/MSRC_9_A.txt'}, # node symb + # # {'name': 'MSRC21', 'dataset': '../datasets/MSRC_21_txt/MSRC_21_A.txt'}, # node symb + # # {'name': 'FIRSTMM_DB', 'dataset': '../datasets/FIRSTMM_DB/FIRSTMM_DB_A.txt'}, # node symb/nsymb ,edge nsymb + + # # {'name': 'PROTEINS', 'dataset': '../datasets/PROTEINS_txt/PROTEINS_A_sparse.txt'}, # node symb/nsymb + # # {'name': 'PROTEINS_full', 'dataset': '../datasets/PROTEINS_full_txt/PROTEINS_full_A_sparse.txt'}, # node symb/nsymb + # # {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb +] +estimator = spkernel +mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel) +param_grid_precomputed = {'node_kernels': [ + {'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}]} +param_grid = [{'C': np.logspace(-10, 10, num=41, base=10)}, + {'alpha': np.logspace(-10, 10, num=41, base=10)}] + +for ds in dslist: + print() + print(ds['name']) + model_selection_for_precomputed_kernel( + ds['dataset'], + estimator, + param_grid_precomputed, + (param_grid[1] if ('task' in ds and ds['task'] + == 'regression') else param_grid[0]), + (ds['task'] if 'task' in ds else 'classification'), + NUM_TRIALS=30, + datafile_y=(ds['dataset_y'] if 'dataset_y' in ds else None), + extra_params=(ds['extra_params'] if 'extra_params' in ds else None), + ds_name=ds['name'], + n_jobs=multiprocessing.cpu_count(), + read_gm_from_file=False) + print()