You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tutorial_6_datasetiter.ipynb 25 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "markdown",
  5. "metadata": {},
  6. "source": [
  7. "# 使用Trainer和Tester快速训练和测试"
  8. ]
  9. },
  10. {
  11. "cell_type": "markdown",
  12. "metadata": {},
  13. "source": [
  14. "## 数据读入和处理"
  15. ]
  16. },
  17. {
  18. "cell_type": "code",
  19. "execution_count": 1,
  20. "metadata": {},
  21. "outputs": [
  22. {
  23. "name": "stderr",
  24. "output_type": "stream",
  25. "text": [
  26. "/remote-home/ynzheng/anaconda3/envs/now/lib/python3.8/site-packages/FastNLP-0.5.0-py3.8.egg/fastNLP/io/loader/classification.py:340: UserWarning: SST2's test file has no target.\n"
  27. ]
  28. },
  29. {
  30. "name": "stdout",
  31. "output_type": "stream",
  32. "text": [
  33. "In total 3 datasets:\n",
  34. "\ttest has 1821 instances.\n",
  35. "\ttrain has 67349 instances.\n",
  36. "\tdev has 872 instances.\n",
  37. "In total 2 vocabs:\n",
  38. "\twords has 16292 entries.\n",
  39. "\ttarget has 2 entries.\n",
  40. "\n",
  41. "+-----------------------------------+--------+-----------------------------------+---------+\n",
  42. "| raw_words | target | words | seq_len |\n",
  43. "+-----------------------------------+--------+-----------------------------------+---------+\n",
  44. "| hide new secretions from the p... | 1 | [4110, 97, 12009, 39, 2, 6843,... | 7 |\n",
  45. "+-----------------------------------+--------+-----------------------------------+---------+\n",
  46. "Vocabulary(['hide', 'new', 'secretions', 'from', 'the']...)\n"
  47. ]
  48. }
  49. ],
  50. "source": [
  51. "from fastNLP.io import SST2Pipe\n",
  52. "\n",
  53. "pipe = SST2Pipe()\n",
  54. "databundle = pipe.process_from_file()\n",
  55. "vocab = databundle.get_vocab('words')\n",
  56. "print(databundle)\n",
  57. "print(databundle.get_dataset('train')[0])\n",
  58. "print(databundle.get_vocab('words'))"
  59. ]
  60. },
  61. {
  62. "cell_type": "code",
  63. "execution_count": 2,
  64. "metadata": {},
  65. "outputs": [
  66. {
  67. "name": "stdout",
  68. "output_type": "stream",
  69. "text": [
  70. "4925 872 75\n"
  71. ]
  72. }
  73. ],
  74. "source": [
  75. "train_data = databundle.get_dataset('train')[:5000]\n",
  76. "train_data, test_data = train_data.split(0.015)\n",
  77. "dev_data = databundle.get_dataset('dev')\n",
  78. "print(len(train_data),len(dev_data),len(test_data))"
  79. ]
  80. },
  81. {
  82. "cell_type": "code",
  83. "execution_count": 3,
  84. "metadata": {
  85. "scrolled": false
  86. },
  87. "outputs": [
  88. {
  89. "name": "stdout",
  90. "output_type": "stream",
  91. "text": [
  92. "+-------------+-----------+--------+-------+---------+\n",
  93. "| field_names | raw_words | target | words | seq_len |\n",
  94. "+-------------+-----------+--------+-------+---------+\n",
  95. "| is_input | False | False | True | True |\n",
  96. "| is_target | False | True | False | False |\n",
  97. "| ignore_type | | False | False | False |\n",
  98. "| pad_value | | 0 | 0 | 0 |\n",
  99. "+-------------+-----------+--------+-------+---------+\n"
  100. ]
  101. },
  102. {
  103. "data": {
  104. "text/plain": [
  105. "<prettytable.PrettyTable at 0x7f0db03d0640>"
  106. ]
  107. },
  108. "execution_count": 3,
  109. "metadata": {},
  110. "output_type": "execute_result"
  111. }
  112. ],
  113. "source": [
  114. "train_data.print_field_meta()"
  115. ]
  116. },
  117. {
  118. "cell_type": "code",
  119. "execution_count": 4,
  120. "metadata": {},
  121. "outputs": [],
  122. "source": [
  123. "from fastNLP import AccuracyMetric\n",
  124. "from fastNLP import Const\n",
  125. "\n",
  126. "# metrics=AccuracyMetric() 在本例中与下面这行代码等价\n",
  127. "metrics=AccuracyMetric(pred=Const.OUTPUT, target=Const.TARGET)"
  128. ]
  129. },
  130. {
  131. "cell_type": "markdown",
  132. "metadata": {},
  133. "source": [
  134. "## DataSetIter初探"
  135. ]
  136. },
  137. {
  138. "cell_type": "code",
  139. "execution_count": 5,
  140. "metadata": {},
  141. "outputs": [
  142. {
  143. "name": "stdout",
  144. "output_type": "stream",
  145. "text": [
  146. "batch_x: {'words': tensor([[ 13, 830, 7746, 174, 3, 47, 6, 83, 5752, 15,\n",
  147. " 2177, 15, 63, 57, 406, 84, 1009, 4973, 27, 17,\n",
  148. " 13785, 3, 533, 3687, 15623, 39, 375, 8, 15624, 8,\n",
  149. " 1323, 4398, 7],\n",
  150. " [ 1045, 11113, 16, 104, 5, 4, 176, 1824, 1704, 3,\n",
  151. " 2, 18, 11, 4, 1018, 432, 143, 33, 245, 308,\n",
  152. " 7, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  153. " 0, 0, 0]]), 'seq_len': tensor([33, 21])}\n",
  154. "batch_y: {'target': tensor([1, 0])}\n",
  155. "batch_x: {'words': tensor([[ 14, 10, 4, 311, 5, 154, 1418, 609, 7],\n",
  156. " [ 14, 10, 437, 32, 78, 3, 78, 437, 7]]), 'seq_len': tensor([9, 9])}\n",
  157. "batch_y: {'target': tensor([0, 1])}\n",
  158. "batch_x: {'words': tensor([[ 4, 277, 685, 18, 7],\n",
  159. " [15618, 3204, 5, 1675, 0]]), 'seq_len': tensor([5, 4])}\n",
  160. "batch_y: {'target': tensor([1, 1])}\n",
  161. "batch_x: {'words': tensor([[ 2, 155, 3, 4426, 3, 239, 3, 739, 5, 1136,\n",
  162. " 41, 43, 2427, 736, 2, 648, 10, 15620, 2285, 7],\n",
  163. " [ 24, 95, 28, 46, 8, 336, 38, 239, 8, 2133,\n",
  164. " 2, 18, 10, 15622, 1421, 6, 61, 5, 387, 7]]), 'seq_len': tensor([20, 20])}\n",
  165. "batch_y: {'target': tensor([0, 0])}\n",
  166. "batch_x: {'words': tensor([[ 879, 96, 8, 1026, 12, 8067, 11, 13623, 8, 15619,\n",
  167. " 4, 673, 662, 15, 4, 1154, 240, 639, 417, 7],\n",
  168. " [ 45, 752, 327, 180, 10, 15621, 16, 72, 8904, 9,\n",
  169. " 1217, 7, 0, 0, 0, 0, 0, 0, 0, 0]]), 'seq_len': tensor([20, 12])}\n",
  170. "batch_y: {'target': tensor([0, 1])}\n"
  171. ]
  172. }
  173. ],
  174. "source": [
  175. "from fastNLP import BucketSampler\n",
  176. "from fastNLP import DataSetIter\n",
  177. "\n",
  178. "tmp_data = dev_data[:10]\n",
  179. "# 定义一个Batch,传入DataSet,规定batch_size和去batch的规则。\n",
  180. "# 顺序(Sequential),随机(Random),相似长度组成一个batch(Bucket)\n",
  181. "sampler = BucketSampler(batch_size=2, seq_len_field_name='seq_len')\n",
  182. "batch = DataSetIter(batch_size=2, dataset=tmp_data, sampler=sampler)\n",
  183. "for batch_x, batch_y in batch:\n",
  184. " print(\"batch_x: \",batch_x)\n",
  185. " print(\"batch_y: \", batch_y)"
  186. ]
  187. },
  188. {
  189. "cell_type": "code",
  190. "execution_count": 6,
  191. "metadata": {},
  192. "outputs": [
  193. {
  194. "name": "stdout",
  195. "output_type": "stream",
  196. "text": [
  197. "batch_x: {'words': tensor([[ 13, 830, 7746, 174, 3, 47, 6, 83, 5752, 15,\n",
  198. " 2177, 15, 63, 57, 406, 84, 1009, 4973, 27, 17,\n",
  199. " 13785, 3, 533, 3687, 15623, 39, 375, 8, 15624, 8,\n",
  200. " 1323, 4398, 7],\n",
  201. " [ 1045, 11113, 16, 104, 5, 4, 176, 1824, 1704, 3,\n",
  202. " 2, 18, 11, 4, 1018, 432, 143, 33, 245, 308,\n",
  203. " 7, -1, -1, -1, -1, -1, -1, -1, -1, -1,\n",
  204. " -1, -1, -1]]), 'seq_len': tensor([33, 21])}\n",
  205. "batch_y: {'target': tensor([1, 0])}\n",
  206. "batch_x: {'words': tensor([[ 14, 10, 4, 311, 5, 154, 1418, 609, 7],\n",
  207. " [ 14, 10, 437, 32, 78, 3, 78, 437, 7]]), 'seq_len': tensor([9, 9])}\n",
  208. "batch_y: {'target': tensor([0, 1])}\n",
  209. "batch_x: {'words': tensor([[ 2, 155, 3, 4426, 3, 239, 3, 739, 5, 1136,\n",
  210. " 41, 43, 2427, 736, 2, 648, 10, 15620, 2285, 7],\n",
  211. " [ 24, 95, 28, 46, 8, 336, 38, 239, 8, 2133,\n",
  212. " 2, 18, 10, 15622, 1421, 6, 61, 5, 387, 7]]), 'seq_len': tensor([20, 20])}\n",
  213. "batch_y: {'target': tensor([0, 0])}\n",
  214. "batch_x: {'words': tensor([[ 4, 277, 685, 18, 7],\n",
  215. " [15618, 3204, 5, 1675, -1]]), 'seq_len': tensor([5, 4])}\n",
  216. "batch_y: {'target': tensor([1, 1])}\n",
  217. "batch_x: {'words': tensor([[ 879, 96, 8, 1026, 12, 8067, 11, 13623, 8, 15619,\n",
  218. " 4, 673, 662, 15, 4, 1154, 240, 639, 417, 7],\n",
  219. " [ 45, 752, 327, 180, 10, 15621, 16, 72, 8904, 9,\n",
  220. " 1217, 7, -1, -1, -1, -1, -1, -1, -1, -1]]), 'seq_len': tensor([20, 12])}\n",
  221. "batch_y: {'target': tensor([0, 1])}\n"
  222. ]
  223. }
  224. ],
  225. "source": [
  226. "tmp_data.set_pad_val('words',-1)\n",
  227. "batch = DataSetIter(batch_size=2, dataset=tmp_data, sampler=sampler)\n",
  228. "for batch_x, batch_y in batch:\n",
  229. " print(\"batch_x: \",batch_x)\n",
  230. " print(\"batch_y: \", batch_y)"
  231. ]
  232. },
  233. {
  234. "cell_type": "code",
  235. "execution_count": 7,
  236. "metadata": {},
  237. "outputs": [
  238. {
  239. "name": "stdout",
  240. "output_type": "stream",
  241. "text": [
  242. "batch_x: {'words': tensor([[ 45, 752, 327, 180, 10, 15621, 16, 72, 8904, 9,\n",
  243. " 1217, 7, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  244. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  245. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
  246. " [ 879, 96, 8, 1026, 12, 8067, 11, 13623, 8, 15619,\n",
  247. " 4, 673, 662, 15, 4, 1154, 240, 639, 417, 7,\n",
  248. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  249. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'seq_len': tensor([12, 20])}\n",
  250. "batch_y: {'target': tensor([1, 0])}\n",
  251. "batch_x: {'words': tensor([[ 13, 830, 7746, 174, 3, 47, 6, 83, 5752, 15,\n",
  252. " 2177, 15, 63, 57, 406, 84, 1009, 4973, 27, 17,\n",
  253. " 13785, 3, 533, 3687, 15623, 39, 375, 8, 15624, 8,\n",
  254. " 1323, 4398, 7, 0, 0, 0, 0, 0, 0, 0],\n",
  255. " [ 1045, 11113, 16, 104, 5, 4, 176, 1824, 1704, 3,\n",
  256. " 2, 18, 11, 4, 1018, 432, 143, 33, 245, 308,\n",
  257. " 7, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  258. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'seq_len': tensor([33, 21])}\n",
  259. "batch_y: {'target': tensor([1, 0])}\n",
  260. "batch_x: {'words': tensor([[ 14, 10, 4, 311, 5, 154, 1418, 609, 7, 0, 0, 0,\n",
  261. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  262. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  263. " 0, 0, 0, 0],\n",
  264. " [ 14, 10, 437, 32, 78, 3, 78, 437, 7, 0, 0, 0,\n",
  265. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  266. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  267. " 0, 0, 0, 0]]), 'seq_len': tensor([9, 9])}\n",
  268. "batch_y: {'target': tensor([0, 1])}\n",
  269. "batch_x: {'words': tensor([[ 2, 155, 3, 4426, 3, 239, 3, 739, 5, 1136,\n",
  270. " 41, 43, 2427, 736, 2, 648, 10, 15620, 2285, 7,\n",
  271. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  272. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
  273. " [ 24, 95, 28, 46, 8, 336, 38, 239, 8, 2133,\n",
  274. " 2, 18, 10, 15622, 1421, 6, 61, 5, 387, 7,\n",
  275. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  276. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'seq_len': tensor([20, 20])}\n",
  277. "batch_y: {'target': tensor([0, 0])}\n",
  278. "batch_x: {'words': tensor([[ 4, 277, 685, 18, 7, 0, 0, 0, 0, 0,\n",
  279. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  280. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  281. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
  282. " [15618, 3204, 5, 1675, 0, 0, 0, 0, 0, 0,\n",
  283. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  284. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
  285. " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'seq_len': tensor([5, 4])}\n",
  286. "batch_y: {'target': tensor([1, 1])}\n"
  287. ]
  288. }
  289. ],
  290. "source": [
  291. "from fastNLP.core.field import Padder\n",
  292. "import numpy as np\n",
  293. "class FixLengthPadder(Padder):\n",
  294. " def __init__(self, pad_val=0, length=None):\n",
  295. " super().__init__(pad_val=pad_val)\n",
  296. " self.length = length\n",
  297. " assert self.length is not None, \"Creating FixLengthPadder with no specific length!\"\n",
  298. "\n",
  299. " def __call__(self, contents, field_name, field_ele_dtype, dim):\n",
  300. " #计算当前contents中的最大长度\n",
  301. " max_len = max(map(len, contents))\n",
  302. " #如果当前contents中的最大长度大于指定的padder length的话就报错\n",
  303. " assert max_len <= self.length, \"Fixed padder length smaller than actual length! with length {}\".format(max_len)\n",
  304. " array = np.full((len(contents), self.length), self.pad_val, dtype=field_ele_dtype)\n",
  305. " for i, content_i in enumerate(contents):\n",
  306. " array[i, :len(content_i)] = content_i\n",
  307. " return array\n",
  308. "\n",
  309. "#设定FixLengthPadder的固定长度为40\n",
  310. "tmp_padder = FixLengthPadder(pad_val=0,length=40)\n",
  311. "#利用dataset的set_padder函数设定words field的padder\n",
  312. "tmp_data.set_padder('words',tmp_padder)\n",
  313. "batch = DataSetIter(batch_size=2, dataset=tmp_data, sampler=sampler)\n",
  314. "for batch_x, batch_y in batch:\n",
  315. " print(\"batch_x: \",batch_x)\n",
  316. " print(\"batch_y: \", batch_y)"
  317. ]
  318. },
  319. {
  320. "cell_type": "markdown",
  321. "metadata": {},
  322. "source": [
  323. "## 使用DataSetIter自己编写训练过程\n"
  324. ]
  325. },
  326. {
  327. "cell_type": "code",
  328. "execution_count": 8,
  329. "metadata": {},
  330. "outputs": [
  331. {
  332. "name": "stdout",
  333. "output_type": "stream",
  334. "text": [
  335. "-----start training-----\n"
  336. ]
  337. },
  338. {
  339. "data": {
  340. "application/vnd.jupyter.widget-view+json": {
  341. "model_id": "",
  342. "version_major": 2,
  343. "version_minor": 0
  344. },
  345. "text/plain": [
  346. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  347. ]
  348. },
  349. "metadata": {},
  350. "output_type": "display_data"
  351. },
  352. {
  353. "name": "stdout",
  354. "output_type": "stream",
  355. "text": [
  356. "\r",
  357. "Evaluate data in 2.68 seconds!\n",
  358. "Epoch 0 Avg Loss: 0.66 AccuracyMetric: acc=0.708716 29307ms\n"
  359. ]
  360. },
  361. {
  362. "data": {
  363. "application/vnd.jupyter.widget-view+json": {
  364. "model_id": "",
  365. "version_major": 2,
  366. "version_minor": 0
  367. },
  368. "text/plain": [
  369. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  370. ]
  371. },
  372. "metadata": {},
  373. "output_type": "display_data"
  374. },
  375. {
  376. "name": "stdout",
  377. "output_type": "stream",
  378. "text": [
  379. "\r",
  380. "Evaluate data in 0.38 seconds!\n",
  381. "Epoch 1 Avg Loss: 0.41 AccuracyMetric: acc=0.770642 52200ms\n"
  382. ]
  383. },
  384. {
  385. "data": {
  386. "application/vnd.jupyter.widget-view+json": {
  387. "model_id": "",
  388. "version_major": 2,
  389. "version_minor": 0
  390. },
  391. "text/plain": [
  392. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  393. ]
  394. },
  395. "metadata": {},
  396. "output_type": "display_data"
  397. },
  398. {
  399. "name": "stdout",
  400. "output_type": "stream",
  401. "text": [
  402. "\r",
  403. "Evaluate data in 0.51 seconds!\n",
  404. "Epoch 2 Avg Loss: 0.16 AccuracyMetric: acc=0.747706 70268ms\n"
  405. ]
  406. },
  407. {
  408. "data": {
  409. "application/vnd.jupyter.widget-view+json": {
  410. "model_id": "",
  411. "version_major": 2,
  412. "version_minor": 0
  413. },
  414. "text/plain": [
  415. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  416. ]
  417. },
  418. "metadata": {},
  419. "output_type": "display_data"
  420. },
  421. {
  422. "name": "stdout",
  423. "output_type": "stream",
  424. "text": [
  425. "\r",
  426. "Evaluate data in 0.96 seconds!\n",
  427. "Epoch 3 Avg Loss: 0.06 AccuracyMetric: acc=0.741972 90349ms\n"
  428. ]
  429. },
  430. {
  431. "data": {
  432. "application/vnd.jupyter.widget-view+json": {
  433. "model_id": "",
  434. "version_major": 2,
  435. "version_minor": 0
  436. },
  437. "text/plain": [
  438. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  439. ]
  440. },
  441. "metadata": {},
  442. "output_type": "display_data"
  443. },
  444. {
  445. "name": "stdout",
  446. "output_type": "stream",
  447. "text": [
  448. "\r",
  449. "Evaluate data in 1.04 seconds!\n",
  450. "Epoch 4 Avg Loss: 0.03 AccuracyMetric: acc=0.740826 114250ms\n"
  451. ]
  452. },
  453. {
  454. "data": {
  455. "application/vnd.jupyter.widget-view+json": {
  456. "model_id": "",
  457. "version_major": 2,
  458. "version_minor": 0
  459. },
  460. "text/plain": [
  461. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  462. ]
  463. },
  464. "metadata": {},
  465. "output_type": "display_data"
  466. },
  467. {
  468. "name": "stdout",
  469. "output_type": "stream",
  470. "text": [
  471. "\r",
  472. "Evaluate data in 0.8 seconds!\n",
  473. "Epoch 5 Avg Loss: 0.02 AccuracyMetric: acc=0.738532 134742ms\n"
  474. ]
  475. },
  476. {
  477. "data": {
  478. "application/vnd.jupyter.widget-view+json": {
  479. "model_id": "",
  480. "version_major": 2,
  481. "version_minor": 0
  482. },
  483. "text/plain": [
  484. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  485. ]
  486. },
  487. "metadata": {},
  488. "output_type": "display_data"
  489. },
  490. {
  491. "name": "stdout",
  492. "output_type": "stream",
  493. "text": [
  494. "\r",
  495. "Evaluate data in 0.65 seconds!\n",
  496. "Epoch 6 Avg Loss: 0.01 AccuracyMetric: acc=0.731651 154503ms\n"
  497. ]
  498. },
  499. {
  500. "data": {
  501. "application/vnd.jupyter.widget-view+json": {
  502. "model_id": "",
  503. "version_major": 2,
  504. "version_minor": 0
  505. },
  506. "text/plain": [
  507. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  508. ]
  509. },
  510. "metadata": {},
  511. "output_type": "display_data"
  512. },
  513. {
  514. "name": "stdout",
  515. "output_type": "stream",
  516. "text": [
  517. "\r",
  518. "Evaluate data in 0.8 seconds!\n",
  519. "Epoch 7 Avg Loss: 0.01 AccuracyMetric: acc=0.738532 175397ms\n"
  520. ]
  521. },
  522. {
  523. "data": {
  524. "application/vnd.jupyter.widget-view+json": {
  525. "model_id": "",
  526. "version_major": 2,
  527. "version_minor": 0
  528. },
  529. "text/plain": [
  530. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  531. ]
  532. },
  533. "metadata": {},
  534. "output_type": "display_data"
  535. },
  536. {
  537. "name": "stdout",
  538. "output_type": "stream",
  539. "text": [
  540. "\r",
  541. "Evaluate data in 0.36 seconds!\n",
  542. "Epoch 8 Avg Loss: 0.01 AccuracyMetric: acc=0.733945 192384ms\n"
  543. ]
  544. },
  545. {
  546. "data": {
  547. "application/vnd.jupyter.widget-view+json": {
  548. "model_id": "",
  549. "version_major": 2,
  550. "version_minor": 0
  551. },
  552. "text/plain": [
  553. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=55.0), HTML(value='')), layout=Layout(dis…"
  554. ]
  555. },
  556. "metadata": {},
  557. "output_type": "display_data"
  558. },
  559. {
  560. "name": "stdout",
  561. "output_type": "stream",
  562. "text": [
  563. "\r",
  564. "Evaluate data in 0.84 seconds!\n",
  565. "Epoch 9 Avg Loss: 0.01 AccuracyMetric: acc=0.744266 214417ms\n"
  566. ]
  567. },
  568. {
  569. "data": {
  570. "application/vnd.jupyter.widget-view+json": {
  571. "model_id": "",
  572. "version_major": 2,
  573. "version_minor": 0
  574. },
  575. "text/plain": [
  576. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=5.0), HTML(value='')), layout=Layout(disp…"
  577. ]
  578. },
  579. "metadata": {},
  580. "output_type": "display_data"
  581. },
  582. {
  583. "name": "stdout",
  584. "output_type": "stream",
  585. "text": [
  586. "\r",
  587. "Evaluate data in 0.04 seconds!\n",
  588. "[tester] \n",
  589. "AccuracyMetric: acc=0.786667\n"
  590. ]
  591. },
  592. {
  593. "data": {
  594. "text/plain": [
  595. "{'AccuracyMetric': {'acc': 0.786667}}"
  596. ]
  597. },
  598. "execution_count": 8,
  599. "metadata": {},
  600. "output_type": "execute_result"
  601. }
  602. ],
  603. "source": [
  604. "from fastNLP import BucketSampler\n",
  605. "from fastNLP import DataSetIter\n",
  606. "from fastNLP.models import CNNText\n",
  607. "from fastNLP import Tester\n",
  608. "import torch\n",
  609. "import time\n",
  610. "\n",
  611. "embed_dim = 100\n",
  612. "model = CNNText((len(vocab),embed_dim), num_classes=2, dropout=0.1)\n",
  613. "\n",
  614. "def train(epoch, data, devdata):\n",
  615. " optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n",
  616. " lossfunc = torch.nn.CrossEntropyLoss()\n",
  617. " batch_size = 32\n",
  618. "\n",
  619. " # 定义一个Batch,传入DataSet,规定batch_size和去batch的规则。\n",
  620. " # 顺序(Sequential),随机(Random),相似长度组成一个batch(Bucket)\n",
  621. " train_sampler = BucketSampler(batch_size=batch_size, seq_len_field_name='seq_len')\n",
  622. " train_batch = DataSetIter(batch_size=batch_size, dataset=data, sampler=train_sampler)\n",
  623. "\n",
  624. " start_time = time.time()\n",
  625. " print(\"-\"*5+\"start training\"+\"-\"*5)\n",
  626. " for i in range(epoch):\n",
  627. " loss_list = []\n",
  628. " for batch_x, batch_y in train_batch:\n",
  629. " optimizer.zero_grad()\n",
  630. " output = model(batch_x['words'])\n",
  631. " loss = lossfunc(output['pred'], batch_y['target'])\n",
  632. " loss.backward()\n",
  633. " optimizer.step()\n",
  634. " loss_list.append(loss.item())\n",
  635. "\n",
  636. " #这里verbose如果为0,在调用Tester对象的test()函数时不输出任何信息,返回评估信息; 如果为1,打印出验证结果,返回评估信息\n",
  637. " #在调用过Tester对象的test()函数后,调用其_format_eval_results(res)函数,结构化输出验证结果\n",
  638. " tester_tmp = Tester(devdata, model, metrics=AccuracyMetric(), verbose=0)\n",
  639. " res=tester_tmp.test()\n",
  640. "\n",
  641. " print('Epoch {:d} Avg Loss: {:.2f}'.format(i, sum(loss_list) / len(loss_list)),end=\" \")\n",
  642. " print(tester_tmp._format_eval_results(res),end=\" \")\n",
  643. " print('{:d}ms'.format(round((time.time()-start_time)*1000)))\n",
  644. " loss_list.clear()\n",
  645. "\n",
  646. "train(10, train_data, dev_data)\n",
  647. "#使用tester进行快速测试\n",
  648. "tester = Tester(test_data, model, metrics=AccuracyMetric())\n",
  649. "tester.test()"
  650. ]
  651. },
  652. {
  653. "cell_type": "code",
  654. "execution_count": null,
  655. "metadata": {},
  656. "outputs": [],
  657. "source": []
  658. }
  659. ],
  660. "metadata": {
  661. "kernelspec": {
  662. "display_name": "Python Now",
  663. "language": "python",
  664. "name": "now"
  665. },
  666. "language_info": {
  667. "codemirror_mode": {
  668. "name": "ipython",
  669. "version": 3
  670. },
  671. "file_extension": ".py",
  672. "mimetype": "text/x-python",
  673. "name": "python",
  674. "nbconvert_exporter": "python",
  675. "pygments_lexer": "ipython3",
  676. "version": "3.8.0"
  677. }
  678. },
  679. "nbformat": 4,
  680. "nbformat_minor": 2
  681. }