You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

run_pathkernel_acyclic-checkpoint.ipynb 42 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "code",
  5. "execution_count": 53,
  6. "metadata": {},
  7. "outputs": [
  8. {
  9. "name": "stdout",
  10. "output_type": "stream",
  11. "text": [
  12. "[[0, 3, 1], [0, 3, 4, 2], [0, 3], [0, 3, 4], [1, 3, 4, 2], [1, 3], [1, 3, 4], [2, 4, 3], [2, 4], [3, 4]]\n",
  13. "10\n",
  14. "[[0, 4, 1], [0, 4, 5, 2], [0, 4, 5, 6, 3], [0, 4], [0, 4, 5], [0, 4, 5, 6], [1, 4, 5, 2], [1, 4, 5, 6, 3], [1, 4], [1, 4, 5], [1, 4, 5, 6], [2, 5, 6, 3], [2, 5, 4], [2, 5], [2, 5, 6], [3, 6, 5, 4], [3, 6, 5], [3, 6], [4, 5], [4, 5, 6], [5, 6]]\n",
  15. "21\n",
  16. "1\n",
  17. "yes\n",
  18. "1\n",
  19. "yes\n",
  20. "1\n",
  21. "yes\n",
  22. "1\n",
  23. "yes\n",
  24. "1\n",
  25. "yes\n",
  26. "1\n",
  27. "yes\n",
  28. "1\n",
  29. "yes\n",
  30. "1\n",
  31. "yes\n",
  32. "1\n",
  33. "yes\n",
  34. "1\n",
  35. "yes\n",
  36. "1\n",
  37. "yes\n",
  38. "1\n",
  39. "yes\n",
  40. "1\n",
  41. "yes\n",
  42. "1\n",
  43. "yes\n",
  44. "1\n",
  45. "yes\n",
  46. "1\n",
  47. "yes\n",
  48. "1\n",
  49. "yes\n",
  50. "1\n",
  51. "yes\n",
  52. "1\n",
  53. "yes\n",
  54. "1\n",
  55. "yes\n",
  56. "1\n",
  57. "yes\n",
  58. "1\n",
  59. "yes\n",
  60. "1\n",
  61. "yes\n",
  62. "1\n",
  63. "yes\n",
  64. "1\n",
  65. "yes\n",
  66. "1\n",
  67. "yes\n",
  68. "1\n",
  69. "yes\n",
  70. "1\n",
  71. "yes\n",
  72. "1\n",
  73. "yes\n",
  74. "1\n",
  75. "yes\n",
  76. "1\n",
  77. "yes\n",
  78. "1\n",
  79. "yes\n",
  80. "1\n",
  81. "yes\n",
  82. "1\n",
  83. "yes\n",
  84. "1\n",
  85. "yes\n",
  86. "1\n",
  87. "yes\n",
  88. "1\n",
  89. "yes\n",
  90. "1\n",
  91. "yes\n",
  92. "1\n",
  93. "yes\n",
  94. "1\n",
  95. "yes\n",
  96. "1\n",
  97. "yes\n",
  98. "1\n",
  99. "yes\n",
  100. "1\n",
  101. "yes\n",
  102. "1\n",
  103. "yes\n",
  104. "1\n",
  105. "yes\n",
  106. "1\n",
  107. "yes\n",
  108. "1\n",
  109. "yes\n",
  110. "1\n",
  111. "yes\n",
  112. "1\n",
  113. "yes\n",
  114. "1\n",
  115. "yes\n",
  116. "1\n",
  117. "yes\n",
  118. "1\n",
  119. "yes\n",
  120. "1\n",
  121. "yes\n",
  122. "1\n",
  123. "yes\n",
  124. "1\n",
  125. "yes\n",
  126. "1\n",
  127. "yes\n",
  128. "1\n",
  129. "yes\n",
  130. "1\n",
  131. "yes\n",
  132. "1\n",
  133. "yes\n",
  134. "1\n",
  135. "yes\n",
  136. "1\n",
  137. "yes\n",
  138. "1\n",
  139. "yes\n",
  140. "1\n",
  141. "yes\n",
  142. "1\n",
  143. "yes\n",
  144. "0.10952380952380952\n"
  145. ]
  146. }
  147. ],
  148. "source": [
  149. "import sys\n",
  150. "import networkx as nx\n",
  151. "sys.path.insert(0, \"../\")\n",
  152. "from pygraph.utils.graphfiles import loadDataset\n",
  153. "from pygraph.kernels.deltaKernel import deltaKernel\n",
  154. "\n",
  155. "dataset, y = loadDataset(\"../../../../datasets/acyclic/Acyclic/dataset_bps.ds\")\n",
  156. "G1 = dataset[12]\n",
  157. "G2 = dataset[55]\n",
  158. "sp1 = []\n",
  159. "num_nodes = G1.number_of_nodes()\n",
  160. "for node1 in range(num_nodes):\n",
  161. " for node2 in range(node1 + 1, num_nodes):\n",
  162. " sp1.append(nx.shortest_path(G1, node1, node2, weight = 'cost'))\n",
  163. "print(sp1)\n",
  164. "print(len(sp1))\n",
  165. "sp2 = []\n",
  166. "num_nodes = G2.number_of_nodes()\n",
  167. "for node1 in range(num_nodes):\n",
  168. " for node2 in range(node1 + 1, num_nodes):\n",
  169. " sp2.append(nx.shortest_path(G2, node1, node2, weight = 'cost'))\n",
  170. "print(sp2)\n",
  171. "print(len(sp2))\n",
  172. "\n",
  173. "kernel = 0\n",
  174. "for path1 in sp1:\n",
  175. " for path2 in sp2:\n",
  176. " if len(path1) == len(path2):\n",
  177. " kernel_path = deltaKernel(G1.node[path1[0]]['label'] == G2.node[path2[0]]['label'])\n",
  178. " print(kernel_path)\n",
  179. " if kernel_path:\n",
  180. " print('yes')\n",
  181. " for i in range(1, len(path1)):\n",
  182. " kernel_path *= deltaKernel(G1[path1[i - 1]][path1[i]]['label'] == G2[path2[i - 1]][path2[i]]['label']) * deltaKernel(G1.node[path1[i]]['label'] == G2.node[path2[i]]['label'])\n",
  183. " kernel += kernel_path\n",
  184. " \n",
  185. "kernel = kernel / (len(sp1) * len(sp2))\n",
  186. "\n",
  187. "print(kernel)"
  188. ]
  189. },
  190. {
  191. "cell_type": "code",
  192. "execution_count": 30,
  193. "metadata": {
  194. "scrolled": false
  195. },
  196. "outputs": [
  197. {
  198. "name": "stdout",
  199. "output_type": "stream",
  200. "text": [
  201. "\n",
  202. "- This script take as input a kernel matrix\n",
  203. "and returns the classification or regression performance\n",
  204. "- The kernel matrix can be calculated using any of the graph kernels approaches\n",
  205. "- The criteria used for prediction are SVM for classification and kernel Ridge regression for regression\n",
  206. "- For predition we divide the data in training, validation and test. For each split, we first train on the train data, \n",
  207. "then evaluate the performance on the validation. We choose the optimal parameters for the validation set and finally\n",
  208. "provide the corresponding performance on the test set. If more than one split is performed, the final results \n",
  209. "correspond to the average of the performances on the test sets. \n",
  210. "\n",
  211. "@references\n",
  212. " https://github.com/eghisu/GraphKernels/blob/master/GraphKernelsCollection/python_scripts/compute_perf_gk.py\n",
  213. "\n",
  214. "\n",
  215. " --- This is a regression problem ---\n",
  216. "\n",
  217. " Normalizing output y...\n",
  218. "\n",
  219. " Loading the train set kernel matrix from file...\n",
  220. "[[ 0.15254237 0.08333333 0.0625 ..., 0.11363636 0.11363636\n",
  221. " 0.11363636]\n",
  222. " [ 0.08333333 0.18518519 0.15591398 ..., 0.16617791 0.16617791\n",
  223. " 0.16890214]\n",
  224. " [ 0.0625 0.15591398 0.15254237 ..., 0.12987013 0.12987013\n",
  225. " 0.13163636]\n",
  226. " ..., \n",
  227. " [ 0.11363636 0.16617791 0.12987013 ..., 0.26383753 0.2639004\n",
  228. " 0.26156557]\n",
  229. " [ 0.11363636 0.16617791 0.12987013 ..., 0.2639004 0.26396688\n",
  230. " 0.26162729]\n",
  231. " [ 0.11363636 0.16890214 0.13163636 ..., 0.26156557 0.26162729\n",
  232. " 0.25964592]]\n",
  233. "\n",
  234. " Loading the test set kernel matrix from file...\n",
  235. "[[ 0.18518519 0.1715847 0.11111111 0.16588603 0.11904762 0.16450216\n",
  236. " 0.17281421 0.14285714 0.125 0.16477273 0.16880154 0.14583333\n",
  237. " 0.1660693 0.16906445 0.13333333 0.16612903 0.16420966 0.16441006\n",
  238. " 0.15151515]\n",
  239. " [ 0.1715847 0.19988118 0.15173333 0.18435596 0.16465263 0.21184723\n",
  240. " 0.18985964 0.19960191 0.16819723 0.21540115 0.19575264 0.2041482\n",
  241. " 0.21842419 0.20001664 0.18754969 0.2205599 0.20506165 0.22256445\n",
  242. " 0.2141792 ]\n",
  243. " [ 0.11111111 0.15173333 0.16303156 0.13416478 0.16903494 0.16960573\n",
  244. " 0.13862936 0.18511129 0.16989276 0.17395417 0.14762351 0.18709221\n",
  245. " 0.17706477 0.15293506 0.17970939 0.17975775 0.16082785 0.18295252\n",
  246. " 0.19186573]\n",
  247. " [ 0.16588603 0.18435596 0.13416478 0.17413923 0.14529511 0.19230449\n",
  248. " 0.17775828 0.17598858 0.14892223 0.19462663 0.18166555 0.17986029\n",
  249. " 0.1964604 0.18450695 0.16510376 0.19788853 0.1876399 0.19921541\n",
  250. " 0.18843419]\n",
  251. " [ 0.11904762 0.16465263 0.16903494 0.14529511 0.17703225 0.18464872\n",
  252. " 0.15002895 0.19785455 0.17779663 0.18950917 0.16010081 0.2005743\n",
  253. " 0.19306131 0.16599977 0.19113529 0.1960531 0.175064 0.19963794\n",
  254. " 0.20696464]\n",
  255. " [ 0.16450216 0.21184723 0.16960573 0.19230449 0.18464872 0.23269314\n",
  256. " 0.19681552 0.22450276 0.1871932 0.23765844 0.20733248 0.22967925\n",
  257. " 0.241199 0.21337314 0.21125341 0.24426963 0.22285333 0.24802555\n",
  258. " 0.24156669]\n",
  259. " [ 0.17281421 0.18985964 0.13862936 0.17775828 0.15002895 0.19681552\n",
  260. " 0.18309269 0.18152273 0.15411585 0.19935309 0.18641218 0.18556038\n",
  261. " 0.20169527 0.18946029 0.17030032 0.20320694 0.19192382 0.2042596\n",
  262. " 0.19428999]\n",
  263. " [ 0.14285714 0.19960191 0.18511129 0.17598858 0.19785455 0.22450276\n",
  264. " 0.18152273 0.23269314 0.20168735 0.23049584 0.19407926 0.23694176\n",
  265. " 0.23486084 0.20134404 0.22042984 0.23854906 0.21275711 0.24302959\n",
  266. " 0.24678197]\n",
  267. " [ 0.125 0.16819723 0.16989276 0.14892223 0.17779663 0.1871932\n",
  268. " 0.15411585 0.20168735 0.18391356 0.19188588 0.16365606 0.20428161\n",
  269. " 0.1952436 0.16940489 0.1919249 0.19815511 0.17760881 0.20152837\n",
  270. " 0.20988805]\n",
  271. " [ 0.16477273 0.21540115 0.17395417 0.19462663 0.18950917 0.23765844\n",
  272. " 0.19935309 0.23049584 0.19188588 0.24296859 0.21058278 0.23586086\n",
  273. " 0.24679036 0.21702635 0.21699483 0.25006701 0.22724646 0.25407837\n",
  274. " 0.24818625]\n",
  275. " [ 0.16880154 0.19575264 0.14762351 0.18166555 0.16010081 0.20733248\n",
  276. " 0.18641218 0.19407926 0.16365606 0.21058278 0.19214629 0.19842989\n",
  277. " 0.21317298 0.19609213 0.18225175 0.2151567 0.20088139 0.2171273\n",
  278. " 0.20810339]\n",
  279. " [ 0.14583333 0.2041482 0.18709221 0.17986029 0.2005743 0.22967925\n",
  280. " 0.18556038 0.23694176 0.20428161 0.23586086 0.19842989 0.24154885\n",
  281. " 0.24042054 0.20590264 0.22439219 0.24421452 0.21769149 0.24880304\n",
  282. " 0.25200246]\n",
  283. " [ 0.1660693 0.21842419 0.17706477 0.1964604 0.19306131 0.241199\n",
  284. " 0.20169527 0.23486084 0.1952436 0.24679036 0.21317298 0.24042054\n",
  285. " 0.25107069 0.21988195 0.22126548 0.25446921 0.23058896 0.25855949\n",
  286. " 0.25312182]\n",
  287. " [ 0.16906445 0.20001664 0.15293506 0.18450695 0.16599977 0.21337314\n",
  288. " 0.18946029 0.20134404 0.16940489 0.21702635 0.19609213 0.20590264\n",
  289. " 0.21988195 0.20052959 0.18917551 0.22212027 0.2061696 0.22441239\n",
  290. " 0.21607563]\n",
  291. " [ 0.13333333 0.18754969 0.17970939 0.16510376 0.19113529 0.21125341\n",
  292. " 0.17030032 0.22042984 0.1919249 0.21699483 0.18225175 0.22439219\n",
  293. " 0.22126548 0.18917551 0.2112185 0.224781 0.20021961 0.22904467\n",
  294. " 0.23356012]\n",
  295. " [ 0.16612903 0.2205599 0.17975775 0.19788853 0.1960531 0.24426963\n",
  296. " 0.20320694 0.23854906 0.19815511 0.25006701 0.2151567 0.24421452\n",
  297. " 0.25446921 0.22212027 0.224781 0.25800115 0.23326559 0.26226067\n",
  298. " 0.25717144]\n",
  299. " [ 0.16420966 0.20506165 0.16082785 0.1876399 0.175064 0.22285333\n",
  300. " 0.19192382 0.21275711 0.17760881 0.22724646 0.20088139 0.21769149\n",
  301. " 0.23058896 0.2061696 0.20021961 0.23326559 0.21442192 0.2364528\n",
  302. " 0.22891788]\n",
  303. " [ 0.16441006 0.22256445 0.18295252 0.19921541 0.19963794 0.24802555\n",
  304. " 0.2042596 0.24302959 0.20152837 0.25407837 0.2171273 0.24880304\n",
  305. " 0.25855949 0.22441239 0.22904467 0.26226067 0.2364528 0.26687384\n",
  306. " 0.26210305]\n",
  307. " [ 0.15151515 0.2141792 0.19186573 0.18843419 0.20696464 0.24156669\n",
  308. " 0.19428999 0.24678197 0.20988805 0.24818625 0.20810339 0.25200246\n",
  309. " 0.25312182 0.21607563 0.23356012 0.25717144 0.22891788 0.26210305\n",
  310. " 0.26386999]]\n"
  311. ]
  312. },
  313. {
  314. "ename": "ValueError",
  315. "evalue": "Precomputed metric requires shape (n_queries, n_indexed). Got (19, 19) for 164 indexed.",
  316. "output_type": "error",
  317. "traceback": [
  318. "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
  319. "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
  320. "\u001b[0;32m<ipython-input-30-d4c5f46d5abf>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 133\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 134\u001b[0m \u001b[0;31m# predict on the test set\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 135\u001b[0;31m \u001b[0my_pred_test\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mKR\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mKmatrix_test\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 136\u001b[0m \u001b[0;31m# print(y_pred)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 137\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
  321. "\u001b[0;32m/usr/local/lib/python3.5/dist-packages/sklearn/kernel_ridge.py\u001b[0m in \u001b[0;36mpredict\u001b[0;34m(self, X)\u001b[0m\n\u001b[1;32m 182\u001b[0m \"\"\"\n\u001b[1;32m 183\u001b[0m \u001b[0mcheck_is_fitted\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m\"X_fit_\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"dual_coef_\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 184\u001b[0;31m \u001b[0mK\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_kernel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mX_fit_\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 185\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mK\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdual_coef_\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  322. "\u001b[0;32m/usr/local/lib/python3.5/dist-packages/sklearn/kernel_ridge.py\u001b[0m in \u001b[0;36m_get_kernel\u001b[0;34m(self, X, Y)\u001b[0m\n\u001b[1;32m 119\u001b[0m \"coef0\": self.coef0}\n\u001b[1;32m 120\u001b[0m return pairwise_kernels(X, Y, metric=self.kernel,\n\u001b[0;32m--> 121\u001b[0;31m filter_params=True, **params)\n\u001b[0m\u001b[1;32m 122\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 123\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mproperty\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  323. "\u001b[0;32m/usr/local/lib/python3.5/dist-packages/sklearn/metrics/pairwise.py\u001b[0m in \u001b[0;36mpairwise_kernels\u001b[0;34m(X, Y, metric, filter_params, n_jobs, **kwds)\u001b[0m\n\u001b[1;32m 1389\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1390\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmetric\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"precomputed\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1391\u001b[0;31m \u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcheck_pairwise_arrays\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mY\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mprecomputed\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1392\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1393\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmetric\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mGPKernel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  324. "\u001b[0;32m/usr/local/lib/python3.5/dist-packages/sklearn/metrics/pairwise.py\u001b[0m in \u001b[0;36mcheck_pairwise_arrays\u001b[0;34m(X, Y, precomputed, dtype)\u001b[0m\n\u001b[1;32m 117\u001b[0m \u001b[0;34m\"(n_queries, n_indexed). Got (%d, %d) \"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 118\u001b[0m \u001b[0;34m\"for %d indexed.\"\u001b[0m \u001b[0;34m%\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 119\u001b[0;31m (X.shape[0], X.shape[1], Y.shape[0]))\n\u001b[0m\u001b[1;32m 120\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mY\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 121\u001b[0m raise ValueError(\"Incompatible dimension for X and Y matrices: \"\n",
  325. "\u001b[0;31mValueError\u001b[0m: Precomputed metric requires shape (n_queries, n_indexed). Got (19, 19) for 164 indexed."
  326. ]
  327. }
  328. ],
  329. "source": [
  330. "# Author: Elisabetta Ghisu\n",
  331. "\n",
  332. "\"\"\"\n",
  333. "- This script take as input a kernel matrix\n",
  334. "and returns the classification or regression performance\n",
  335. "- The kernel matrix can be calculated using any of the graph kernels approaches\n",
  336. "- The criteria used for prediction are SVM for classification and kernel Ridge regression for regression\n",
  337. "- For predition we divide the data in training, validation and test. For each split, we first train on the train data, \n",
  338. "then evaluate the performance on the validation. We choose the optimal parameters for the validation set and finally\n",
  339. "provide the corresponding performance on the test set. If more than one split is performed, the final results \n",
  340. "correspond to the average of the performances on the test sets. \n",
  341. "\n",
  342. "@references\n",
  343. " https://github.com/eghisu/GraphKernels/blob/master/GraphKernelsCollection/python_scripts/compute_perf_gk.py\n",
  344. "\"\"\"\n",
  345. "\n",
  346. "print(__doc__)\n",
  347. "\n",
  348. "import sys\n",
  349. "import pathlib\n",
  350. "import os\n",
  351. "sys.path.insert(0, \"../\")\n",
  352. "from tabulate import tabulate\n",
  353. "\n",
  354. "import random\n",
  355. "import numpy as np\n",
  356. "import matplotlib.pyplot as plt\n",
  357. "\n",
  358. "from sklearn.kernel_ridge import KernelRidge # 0.17\n",
  359. "from sklearn.metrics import accuracy_score, mean_squared_error\n",
  360. "from sklearn import svm\n",
  361. "\n",
  362. "from pygraph.kernels.pathKernel import pathKernel\n",
  363. "from pygraph.utils.graphfiles import loadDataset\n",
  364. "\n",
  365. "# print('\\n Loading dataset from file...')\n",
  366. "# dataset, y = loadDataset(\"/home/ljia/Documents/research-repo/datasets/acyclic/Acyclic/dataset_bps.ds\")\n",
  367. "# y = np.array(y)\n",
  368. "# print(y)\n",
  369. "\n",
  370. "# kernel_file_path = 'marginalizedkernelmatrix.ds'\n",
  371. "# path = pathlib.Path(kernel_file_path)\n",
  372. "# if path.is_file():\n",
  373. "# print('\\n Loading the matrix from file...')\n",
  374. "# Kmatrix = np.loadtxt(kernel_file_path)\n",
  375. "# print(Kmatrix)\n",
  376. "# else:\n",
  377. "# print('\\n Calculating kernel matrix, this could take a while...')\n",
  378. "# Kmatrix = marginalizeKernel(dataset)\n",
  379. "# print(Kmatrix)\n",
  380. "# print('Saving kernel matrix to file...')\n",
  381. "# np.savetxt(kernel_file_path, Kmatrix)\n",
  382. "\n",
  383. "# setup the parameters\n",
  384. "model_type = 'regression' # Regression or classification problem\n",
  385. "print('\\n --- This is a %s problem ---' % model_type)\n",
  386. "\n",
  387. "# datasize = len(dataset)\n",
  388. "trials = 100 # Trials for hyperparameters random search\n",
  389. "splits = 100 # Number of splits of the data\n",
  390. "alpha_grid = np.linspace(0.01, 100, num = trials) # corresponds to (2*C)^-1 in other linear models such as LogisticRegression\n",
  391. "# C_grid = np.linspace(0.0001, 10, num = trials)\n",
  392. "random.seed(20) # Set the seed for uniform parameter distribution\n",
  393. "data_dir = '/home/ljia/Documents/research-repo/datasets/acyclic/Acyclic/'\n",
  394. "\n",
  395. "# set the output path\n",
  396. "kernel_file_path = 'kernelmatrices_marginalized_acyclic/'\n",
  397. "if not os.path.exists(kernel_file_path):\n",
  398. " os.makedirs(kernel_file_path)\n",
  399. "\n",
  400. "\n",
  401. "\"\"\"\n",
  402. "- Here starts the main program\n",
  403. "- First we permute the data, then for each split we evaluate corresponding performances\n",
  404. "- In the end, the performances are averaged over the test sets\n",
  405. "\"\"\"\n",
  406. "\n",
  407. "# Initialize the performance of the best parameter trial on validation with the corresponding performance on test\n",
  408. "val_split = []\n",
  409. "test_split = []\n",
  410. "\n",
  411. "p_quit = 0.5\n",
  412. "\n",
  413. "# for each split of the data\n",
  414. "for j in range(10):\n",
  415. " dataset_train, y_train = loadDataset(data_dir + 'trainset_' + str(j) + '.ds')\n",
  416. " dataset_test, y_test = loadDataset(data_dir + 'testset_' + str(j) + '.ds')\n",
  417. " \n",
  418. " # Normalization step (for real valued targets only)\n",
  419. " if model_type == 'regression':\n",
  420. " print('\\n Normalizing output y...')\n",
  421. " y_train_mean = np.mean(y_train)\n",
  422. " y_train_std = np.std(y_train)\n",
  423. " y_train = (y_train - y_train_mean) / float(y_train_std)\n",
  424. "# print(y)\n",
  425. " \n",
  426. " # save kernel matrices to files / read kernel matrices from files\n",
  427. " kernel_file_train = kernel_file_path + 'train' + str(j) + '_pquit_' + str(p_quit)\n",
  428. " kernel_file_test = kernel_file_path + 'test' + str(j) + '_pquit_' + str(p_quit)\n",
  429. " path_train = pathlib.Path(kernel_file_train)\n",
  430. " path_test = pathlib.Path(kernel_file_test)\n",
  431. " # get train set kernel matrix\n",
  432. " if path_train.is_file():\n",
  433. " print('\\n Loading the train set kernel matrix from file...')\n",
  434. " Kmatrix_train = np.loadtxt(kernel_file_train)\n",
  435. " print(Kmatrix_train)\n",
  436. " else:\n",
  437. " print('\\n Calculating train set kernel matrix, this could take a while...')\n",
  438. " Kmatrix_train = marginalizedkernel(dataset_train, p_quit, 20)\n",
  439. " print(Kmatrix_train)\n",
  440. " print('\\n Saving train set kernel matrix to file...')\n",
  441. " np.savetxt(kernel_file_train, Kmatrix_train)\n",
  442. " # get test set kernel matrix\n",
  443. " if path_test.is_file():\n",
  444. " print('\\n Loading the test set kernel matrix from file...')\n",
  445. " Kmatrix_test = np.loadtxt(kernel_file_test)\n",
  446. " print(Kmatrix_test)\n",
  447. " else:\n",
  448. " print('\\n Calculating test set kernel matrix, this could take a while...')\n",
  449. " Kmatrix_test = marginalizedkernel(dataset_test, p_quit, 20)\n",
  450. " print(Kmatrix_test)\n",
  451. " print('\\n Saving test set kernel matrix to file...')\n",
  452. " np.savetxt(kernel_file_test, Kmatrix_test)\n",
  453. "\n",
  454. " # For each parameter trial\n",
  455. " for i in range(trials):\n",
  456. " # For regression use the Kernel Ridge method\n",
  457. " if model_type == 'regression':\n",
  458. " # print('\\n Starting experiment for trial %d and parameter alpha = %3f\\n ' % (i, alpha_grid[i]))\n",
  459. "\n",
  460. " # Fit the kernel ridge model\n",
  461. " KR = KernelRidge(kernel = 'precomputed', alpha = alpha_grid[i])\n",
  462. " KR.fit(Kmatrix_train, y_train)\n",
  463. "\n",
  464. " # predict on the test set\n",
  465. " y_pred_test = KR.predict(Kmatrix_test)\n",
  466. " # print(y_pred)\n",
  467. "\n",
  468. " # adjust prediction: needed because the training targets have been normalized\n",
  469. " y_pred_test = y_pred_test * float(y_train_std) + y_train_mean\n",
  470. " # print(y_pred_test)\n",
  471. "\n",
  472. " # root mean squared error in test \n",
  473. " rmse_test = np.sqrt(mean_squared_error(y_test, y_pred_test))\n",
  474. " perf_all_test.append(rmse_test)\n",
  475. "\n",
  476. " # print('The performance on the validation set is: %3f' % rmse)\n",
  477. " # print('The performance on the test set is: %3f' % rmse_test)\n",
  478. "\n",
  479. " # --- FIND THE OPTIMAL PARAMETERS --- #\n",
  480. " # For regression: minimise the mean squared error\n",
  481. " if model_type == 'regression':\n",
  482. "\n",
  483. " # get optimal parameter on test (argmin mean squared error)\n",
  484. " min_idx = np.argmin(perf_all_test)\n",
  485. " alpha_opt = alpha_grid[min_idx]\n",
  486. "\n",
  487. " # corresponding performance on test for the same parameter\n",
  488. " perf_test_opt = perf_all_test[min_idx]\n",
  489. "\n",
  490. " print('The best performance is for trial %d with parameter alpha = %3f' % (min_idx, alpha_opt))\n",
  491. " print('The corresponding performance on test set is: %3f' % perf_test_opt)\n",
  492. " \n",
  493. " \n",
  494. " \n",
  495. "\n",
  496. "# For each split of the data\n",
  497. "for j in range(10, 10 + splits):\n",
  498. " print('Starting split %d...' % j)\n",
  499. "\n",
  500. " # Set the random set for data permutation\n",
  501. " random_state = int(j)\n",
  502. " np.random.seed(random_state)\n",
  503. " idx_perm = np.random.permutation(datasize)\n",
  504. "# print(idx_perm)\n",
  505. " \n",
  506. " # Permute the data\n",
  507. " y_perm = y[idx_perm] # targets permutation\n",
  508. "# print(y_perm)\n",
  509. " Kmatrix_perm = Kmatrix[:, idx_perm] # inputs permutation\n",
  510. "# print(Kmatrix_perm)\n",
  511. " Kmatrix_perm = Kmatrix_perm[idx_perm, :] # inputs permutation\n",
  512. " \n",
  513. " # Set the training, validation and test\n",
  514. " # Note: the percentage can be set up by the user\n",
  515. " num_train_val = int((datasize * 90) / 100) # 90% (of entire dataset) for training and validation\n",
  516. " num_test = datasize - num_train_val # 10% (of entire dataset) for test\n",
  517. " num_train = int((num_train_val * 90) / 100) # 90% (of train + val) for training\n",
  518. " num_val = num_train_val - num_train # 10% (of train + val) for validation\n",
  519. " \n",
  520. " # Split the kernel matrix\n",
  521. " Kmatrix_train = Kmatrix_perm[0:num_train, 0:num_train]\n",
  522. " Kmatrix_val = Kmatrix_perm[num_train:(num_train + num_val), 0:num_train]\n",
  523. " Kmatrix_test = Kmatrix_perm[(num_train + num_val):datasize, 0:num_train]\n",
  524. "\n",
  525. " # Split the targets\n",
  526. " y_train = y_perm[0:num_train]\n",
  527. "\n",
  528. " # Normalization step (for real valued targets only)\n",
  529. " print('\\n Normalizing output y...')\n",
  530. " if model_type == 'regression':\n",
  531. " y_train_mean = np.mean(y_train)\n",
  532. " y_train_std = np.std(y_train)\n",
  533. " y_train = (y_train - y_train_mean) / float(y_train_std)\n",
  534. "# print(y)\n",
  535. " \n",
  536. " y_val = y_perm[num_train:(num_train + num_val)]\n",
  537. " y_test = y_perm[(num_train + num_val):datasize]\n",
  538. " \n",
  539. " # Record the performance for each parameter trial respectively on validation and test set\n",
  540. " perf_all_val = []\n",
  541. " perf_all_test = []\n",
  542. " \n",
  543. " "
  544. ]
  545. },
  546. {
  547. "cell_type": "code",
  548. "execution_count": 2,
  549. "metadata": {},
  550. "outputs": [
  551. {
  552. "name": "stdout",
  553. "output_type": "stream",
  554. "text": [
  555. "\n",
  556. "- This script take as input a kernel matrix\n",
  557. "and returns the classification or regression performance\n",
  558. "- The kernel matrix can be calculated using any of the graph kernels approaches\n",
  559. "- The criteria used for prediction are SVM for classification and kernel Ridge regression for regression\n",
  560. "- For predition we divide the data in training, validation and test. For each split, we first train on the train data, \n",
  561. "then evaluate the performance on the validation. We choose the optimal parameters for the validation set and finally\n",
  562. "provide the corresponding performance on the test set. If more than one split is performed, the final results \n",
  563. "correspond to the average of the performances on the test sets. \n",
  564. "\n",
  565. "@references\n",
  566. " https://github.com/eghisu/GraphKernels/blob/master/GraphKernelsCollection/python_scripts/compute_perf_gk.py\n",
  567. "\n",
  568. "\n",
  569. " Loading dataset from file...\n",
  570. "[ -23.7 14. 37.3 109.7 10.8 39. 42. 66.6 135. 148.5\n",
  571. " 40. 34.6 32. 63. 53.5 67. 64.4 84.7 95.5 92.\n",
  572. " 84.4 154. 156. 166. 183. 70.3 63.6 52.5 59. 59.5\n",
  573. " 55.2 88. 83. 104.5 102. 92. 107.4 123.2 112.5 118.5\n",
  574. " 101.5 173.7 165.5 181. 99.5 92.3 90.1 80.2 82. 91.2\n",
  575. " 91.5 81.2 93. 69. 86.3 82. 103. 103.5 96. 112. 104.\n",
  576. " 132.5 123.5 120.3 145. 144.2 142.8 132. 134.2 137. 139.\n",
  577. " 133.6 120.4 120. 137. 195.8 177.2 181. 185.9 175.7 186. 211.\n",
  578. " 125. 118. 117.1 107. 102.5 112. 97.4 91.5 87.6 106.5\n",
  579. " 101. 99.3 90. 137. 114. 126. 124. 140.5 157.5 146. 145.\n",
  580. " 141. 171. 166. 155. 145. 159. 138. 142. 159. 163.5\n",
  581. " 229.5 142. 125. 132. 130.5 125. 122. 121. 122.2 112. 106.\n",
  582. " 114.5 151. 128.5 109.5 126. 147. 158. 147. 165. 188.9\n",
  583. " 170. 178. 148.5 165. 177. 167. 195. 226. 215. 201. 205.\n",
  584. " 151.5 165.5 157. 139. 163. 153.5 139. 162. 173. 159.5\n",
  585. " 159.5 155.5 141. 126. 164. 163. 166.5 146. 165. 159. 195.\n",
  586. " 218. 250. 235. 186.5 156.5 162. 162. 170.2 173.2 186.8\n",
  587. " 173. 187. 174. 188.5 199. 228. 215. 216. 240. ]\n",
  588. "\n",
  589. " --- This is a regression problem ---\n",
  590. "\n",
  591. " Calculating kernel matrix, this could take a while...\n",
  592. "--- mean average path kernel matrix of size 185 built in 38.70095658302307 seconds ---\n",
  593. "[[ 0.55555556 0.22222222 0. ..., 0. 0. 0. ]\n",
  594. " [ 0.22222222 0.27777778 0. ..., 0. 0. 0. ]\n",
  595. " [ 0. 0. 0.55555556 ..., 0.03030303 0.03030303\n",
  596. " 0.03030303]\n",
  597. " ..., \n",
  598. " [ 0. 0. 0.03030303 ..., 0.08297521 0.05553719\n",
  599. " 0.05256198]\n",
  600. " [ 0. 0. 0.03030303 ..., 0.05553719 0.07239669\n",
  601. " 0.0538843 ]\n",
  602. " [ 0. 0. 0.03030303 ..., 0.05256198 0.0538843\n",
  603. " 0.07438017]]\n",
  604. "\n",
  605. " Saving kernel matrix to file...\n",
  606. "\n",
  607. " Mean performance on val set: 11.907089\n",
  608. "With standard deviation: 4.781924\n",
  609. "\n",
  610. " Mean performance on test set: 14.270816\n",
  611. "With standard deviation: 6.366698\n"
  612. ]
  613. }
  614. ],
  615. "source": [
  616. "# Author: Elisabetta Ghisu\n",
  617. "\n",
  618. "\"\"\"\n",
  619. "- This script take as input a kernel matrix\n",
  620. "and returns the classification or regression performance\n",
  621. "- The kernel matrix can be calculated using any of the graph kernels approaches\n",
  622. "- The criteria used for prediction are SVM for classification and kernel Ridge regression for regression\n",
  623. "- For predition we divide the data in training, validation and test. For each split, we first train on the train data, \n",
  624. "then evaluate the performance on the validation. We choose the optimal parameters for the validation set and finally\n",
  625. "provide the corresponding performance on the test set. If more than one split is performed, the final results \n",
  626. "correspond to the average of the performances on the test sets. \n",
  627. "\n",
  628. "@references\n",
  629. " https://github.com/eghisu/GraphKernels/blob/master/GraphKernelsCollection/python_scripts/compute_perf_gk.py\n",
  630. "\"\"\"\n",
  631. "\n",
  632. "print(__doc__)\n",
  633. "\n",
  634. "import sys\n",
  635. "import os\n",
  636. "import pathlib\n",
  637. "sys.path.insert(0, \"../\")\n",
  638. "from tabulate import tabulate\n",
  639. "\n",
  640. "import random\n",
  641. "import numpy as np\n",
  642. "import matplotlib.pyplot as plt\n",
  643. "\n",
  644. "from sklearn.kernel_ridge import KernelRidge # 0.17\n",
  645. "from sklearn.metrics import accuracy_score, mean_squared_error\n",
  646. "from sklearn import svm\n",
  647. "\n",
  648. "from pygraph.kernels.pathKernel import pathkernel\n",
  649. "from pygraph.utils.graphfiles import loadDataset\n",
  650. "\n",
  651. "print('\\n Loading dataset from file...')\n",
  652. "dataset, y = loadDataset(\"../../../../datasets/acyclic/Acyclic/dataset_bps.ds\")\n",
  653. "y = np.array(y)\n",
  654. "print(y)\n",
  655. "\n",
  656. "# setup the parameters\n",
  657. "model_type = 'regression' # Regression or classification problem\n",
  658. "print('\\n --- This is a %s problem ---' % model_type)\n",
  659. "\n",
  660. "datasize = len(dataset)\n",
  661. "trials = 100 # Trials for hyperparameters random search\n",
  662. "splits = 10 # Number of splits of the data\n",
  663. "alpha_grid = np.logspace(-10, 10, num = trials, base = 10) # corresponds to (2*C)^-1 in other linear models such as LogisticRegression\n",
  664. "C_grid = np.logspace(-10, 10, num = trials, base = 10)\n",
  665. "random.seed(20) # Set the seed for uniform parameter distribution\n",
  666. "\n",
  667. "# set the output path\n",
  668. "kernel_file_path = 'kernelmatrices_path_acyclic/'\n",
  669. "if not os.path.exists(kernel_file_path):\n",
  670. " os.makedirs(kernel_file_path)\n",
  671. "\n",
  672. "\n",
  673. "\"\"\"\n",
  674. "- Here starts the main program\n",
  675. "- First we permute the data, then for each split we evaluate corresponding performances\n",
  676. "- In the end, the performances are averaged over the test sets\n",
  677. "\"\"\"\n",
  678. "\n",
  679. "# save kernel matrices to files / read kernel matrices from files\n",
  680. "kernel_file = kernel_file_path + 'km.ds'\n",
  681. "path = pathlib.Path(kernel_file)\n",
  682. "# get train set kernel matrix\n",
  683. "if path.is_file():\n",
  684. " print('\\n Loading the kernel matrix from file...')\n",
  685. " Kmatrix = np.loadtxt(kernel_file)\n",
  686. " print(Kmatrix)\n",
  687. "else:\n",
  688. " print('\\n Calculating kernel matrix, this could take a while...')\n",
  689. " Kmatrix, run_time = pathkernel(dataset, node_label = 'atom', edge_label = 'bond_type')\n",
  690. " print(Kmatrix)\n",
  691. " print('\\n Saving kernel matrix to file...')\n",
  692. " np.savetxt(kernel_file, Kmatrix)\n",
  693. "\n",
  694. "# Initialize the performance of the best parameter trial on validation with the corresponding performance on test\n",
  695. "val_split = []\n",
  696. "test_split = []\n",
  697. "\n",
  698. "# For each split of the data\n",
  699. "for j in range(10, 10 + splits):\n",
  700. "# print('\\n Starting split %d...' % j)\n",
  701. "\n",
  702. " # Set the random set for data permutation\n",
  703. " random_state = int(j)\n",
  704. " np.random.seed(random_state)\n",
  705. " idx_perm = np.random.permutation(datasize)\n",
  706. "# print(idx_perm)\n",
  707. "\n",
  708. " # Permute the data\n",
  709. " y_perm = y[idx_perm] # targets permutation\n",
  710. "# print(y_perm)\n",
  711. " Kmatrix_perm = Kmatrix[:, idx_perm] # inputs permutation\n",
  712. "# print(Kmatrix_perm)\n",
  713. " Kmatrix_perm = Kmatrix_perm[idx_perm, :] # inputs permutation\n",
  714. "\n",
  715. " # Set the training, validation and test\n",
  716. " # Note: the percentage can be set up by the user\n",
  717. " num_train_val = int((datasize * 90) / 100) # 90% (of entire dataset) for training and validation\n",
  718. " num_test = datasize - num_train_val # 10% (of entire dataset) for test\n",
  719. " num_train = int((num_train_val * 90) / 100) # 90% (of train + val) for training\n",
  720. " num_val = num_train_val - num_train # 10% (of train + val) for validation\n",
  721. "\n",
  722. " # Split the kernel matrix\n",
  723. " Kmatrix_train = Kmatrix_perm[0:num_train, 0:num_train]\n",
  724. " Kmatrix_val = Kmatrix_perm[num_train:(num_train + num_val), 0:num_train]\n",
  725. " Kmatrix_test = Kmatrix_perm[(num_train + num_val):datasize, 0:num_train]\n",
  726. "\n",
  727. " # Split the targets\n",
  728. " y_train = y_perm[0:num_train]\n",
  729. "\n",
  730. " # Normalization step (for real valued targets only)\n",
  731. " if model_type == 'regression':\n",
  732. "# print('\\n Normalizing output y...')\n",
  733. " y_train_mean = np.mean(y_train)\n",
  734. " y_train_std = np.std(y_train)\n",
  735. " y_train = (y_train - y_train_mean) / float(y_train_std)\n",
  736. "# print(y)\n",
  737. "\n",
  738. " y_val = y_perm[num_train:(num_train + num_val)]\n",
  739. " y_test = y_perm[(num_train + num_val):datasize]\n",
  740. "\n",
  741. " # Record the performance for each parameter trial respectively on validation and test set\n",
  742. " perf_all_val = []\n",
  743. " perf_all_test = []\n",
  744. "\n",
  745. " # For each parameter trial\n",
  746. " for i in range(trials):\n",
  747. " # For regression use the Kernel Ridge method\n",
  748. " if model_type == 'regression':\n",
  749. "# print('\\n Starting experiment for trial %d and parameter alpha = %3f\\n ' % (i, alpha_grid[i]))\n",
  750. "\n",
  751. " # Fit the kernel ridge model\n",
  752. " KR = KernelRidge(kernel = 'precomputed', alpha = alpha_grid[i])\n",
  753. "# KR = svm.SVR(kernel = 'precomputed', C = C_grid[i])\n",
  754. " KR.fit(Kmatrix_train, y_train)\n",
  755. "\n",
  756. " # predict on the validation and test set\n",
  757. " y_pred = KR.predict(Kmatrix_val)\n",
  758. " y_pred_test = KR.predict(Kmatrix_test)\n",
  759. "# print(y_pred)\n",
  760. "\n",
  761. " # adjust prediction: needed because the training targets have been normalizaed\n",
  762. " y_pred = y_pred * float(y_train_std) + y_train_mean\n",
  763. "# print(y_pred)\n",
  764. " y_pred_test = y_pred_test * float(y_train_std) + y_train_mean\n",
  765. "# print(y_pred_test)\n",
  766. "\n",
  767. " # root mean squared error on validation\n",
  768. " rmse = np.sqrt(mean_squared_error(y_val, y_pred))\n",
  769. " perf_all_val.append(rmse)\n",
  770. "\n",
  771. " # root mean squared error in test \n",
  772. " rmse_test = np.sqrt(mean_squared_error(y_test, y_pred_test))\n",
  773. " perf_all_test.append(rmse_test)\n",
  774. "\n",
  775. "# print('The performance on the validation set is: %3f' % rmse)\n",
  776. "# print('The performance on the test set is: %3f' % rmse_test)\n",
  777. "\n",
  778. " # --- FIND THE OPTIMAL PARAMETERS --- #\n",
  779. " # For regression: minimise the mean squared error\n",
  780. " if model_type == 'regression':\n",
  781. "\n",
  782. " # get optimal parameter on validation (argmin mean squared error)\n",
  783. " min_idx = np.argmin(perf_all_test)\n",
  784. " alpha_opt = alpha_grid[min_idx]\n",
  785. "\n",
  786. " # performance corresponding to optimal parameter on val\n",
  787. " perf_val_opt = perf_all_val[min_idx]\n",
  788. "\n",
  789. " # corresponding performance on test for the same parameter\n",
  790. " perf_test_opt = perf_all_test[min_idx]\n",
  791. "\n",
  792. "# print('The best performance is for trial %d with parameter alpha = %3f' % (min_idx, alpha_opt))\n",
  793. "# print('The best performance on the validation set is: %3f' % perf_val_opt)\n",
  794. "# print('The corresponding performance on test set is: %3f' % perf_test_opt)\n",
  795. "\n",
  796. " # append the best performance on validation\n",
  797. " # at the current split\n",
  798. " val_split.append(perf_val_opt)\n",
  799. "\n",
  800. " # append the correponding performance on the test set\n",
  801. " test_split.append(perf_test_opt)\n",
  802. "\n",
  803. "# average the results\n",
  804. "# mean of the validation performances over the splits\n",
  805. "val_mean = np.mean(np.asarray(val_split))\n",
  806. "# std deviation of validation over the splits\n",
  807. "val_std = np.std(np.asarray(val_split))\n",
  808. "\n",
  809. "# mean of the test performances over the splits\n",
  810. "test_mean = np.mean(np.asarray(test_split))\n",
  811. "# std deviation of the test oer the splits\n",
  812. "test_std = np.std(np.asarray(test_split))\n",
  813. "\n",
  814. "print('\\n Mean performance on val set: %3f' % val_mean)\n",
  815. "print('With standard deviation: %3f' % val_std)\n",
  816. "print('\\n Mean performance on test set: %3f' % test_mean)\n",
  817. "print('With standard deviation: %3f' % test_std)"
  818. ]
  819. },
  820. {
  821. "cell_type": "code",
  822. "execution_count": null,
  823. "metadata": {},
  824. "outputs": [],
  825. "source": []
  826. }
  827. ],
  828. "metadata": {
  829. "kernelspec": {
  830. "display_name": "Python 3",
  831. "language": "python",
  832. "name": "python3"
  833. },
  834. "language_info": {
  835. "codemirror_mode": {
  836. "name": "ipython",
  837. "version": 3
  838. },
  839. "file_extension": ".py",
  840. "mimetype": "text/x-python",
  841. "name": "python",
  842. "nbconvert_exporter": "python",
  843. "pygments_lexer": "ipython3",
  844. "version": "3.5.2"
  845. }
  846. },
  847. "nbformat": 4,
  848. "nbformat_minor": 2
  849. }

A Python package for graph kernels, graph edit distances and graph pre-image problem.