You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

序列标注.ipynb 27 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "markdown",
  5. "metadata": {},
  6. "source": [
  7. "# 序列标注\n",
  8. "\n",
  9. "这一部分的内容主要展示如何使用fastNLP实现序列标注(Sequence labeling)任务。您可以使用fastNLP的各个组件快捷,方便地完成序列标注任务,达到出色的效果。 在阅读这篇教程前,希望您已经熟悉了fastNLP的基础使用,尤其是数据的载入以及模型的构建,通过这个小任务的能让您进一步熟悉fastNLP的使用。\n",
  10. "\n",
  11. "## 命名实体识别(name entity recognition, NER)\n",
  12. "\n",
  13. "命名实体识别任务是从文本中抽取出具有特殊意义或者指代性非常强的实体,通常包括人名、地名、机构名和时间等。 如下面的例子中\n",
  14. "\n",
  15. "*我来自复旦大学*\n",
  16. "\n",
  17. "其中“复旦大学”就是一个机构名,命名实体识别就是要从中识别出“复旦大学”这四个字是一个整体,且属于机构名这个类别。这个问题在实际做的时候会被 转换为序列标注问题\n",
  18. "\n",
  19. "针对\"我来自复旦大学\"这句话,我们的预测目标将是[O, O, O, B-ORG, I-ORG, I-ORG, I-ORG],其中O表示out,即不是一个实体,B-ORG是ORG( organization的缩写)这个类别的开头(Begin),I-ORG是ORG类别的中间(Inside)。\n",
  20. "\n",
  21. "在本tutorial中我们将通过fastNLP尝试写出一个能够执行以上任务的模型。\n",
  22. "\n",
  23. "## 载入数据\n",
  24. "\n",
  25. "fastNLP的数据载入主要是由Loader与Pipe两个基类衔接完成的,您可以通过《使用Loader和Pipe处理数据》了解如何使用fastNLP提供的数据加载函数。下面我们以微博命名实体任务来演示一下在fastNLP进行序列标注任务。"
  26. ]
  27. },
  28. {
  29. "cell_type": "code",
  30. "execution_count": 1,
  31. "metadata": {},
  32. "outputs": [
  33. {
  34. "name": "stdout",
  35. "output_type": "stream",
  36. "text": [
  37. "+-----------------------------------+-----------------------------------+-----------------------------------+---------+\n",
  38. "| raw_chars | target | chars | seq_len |\n",
  39. "+-----------------------------------+-----------------------------------+-----------------------------------+---------+\n",
  40. "| ['科', '技', '全', '方', '位',... | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0,... | [792, 1015, 156, 198, 291, 714... | 26 |\n",
  41. "| ['对', ',', '输', '给', '一',... | [0, 0, 0, 0, 0, 0, 3, 1, 0, 0,... | [123, 2, 1205, 115, 8, 24, 101... | 15 |\n",
  42. "+-----------------------------------+-----------------------------------+-----------------------------------+---------+\n"
  43. ]
  44. }
  45. ],
  46. "source": [
  47. "from fastNLP.io import WeiboNERPipe\n",
  48. "data_bundle = WeiboNERPipe().process_from_file()\n",
  49. "print(data_bundle.get_dataset('train')[:2])"
  50. ]
  51. },
  52. {
  53. "cell_type": "markdown",
  54. "metadata": {},
  55. "source": [
  56. "## 模型构建\n",
  57. "\n",
  58. "首先选择需要使用的Embedding类型。关于Embedding的相关说明可以参见《使用Embedding模块将文本转成向量》。 在这里我们使用通过word2vec预训练的中文汉字embedding。"
  59. ]
  60. },
  61. {
  62. "cell_type": "code",
  63. "execution_count": 2,
  64. "metadata": {},
  65. "outputs": [
  66. {
  67. "name": "stdout",
  68. "output_type": "stream",
  69. "text": [
  70. "Found 3321 out of 3471 words in the pre-training embedding.\n"
  71. ]
  72. }
  73. ],
  74. "source": [
  75. "from fastNLP.embeddings import StaticEmbedding\n",
  76. "\n",
  77. "embed = StaticEmbedding(vocab=data_bundle.get_vocab('chars'), model_dir_or_name='cn-char-fastnlp-100d')"
  78. ]
  79. },
  80. {
  81. "cell_type": "markdown",
  82. "metadata": {},
  83. "source": [
  84. "选择好Embedding之后,我们可以使用fastNLP中自带的 fastNLP.models.BiLSTMCRF 作为模型。"
  85. ]
  86. },
  87. {
  88. "cell_type": "code",
  89. "execution_count": 3,
  90. "metadata": {},
  91. "outputs": [],
  92. "source": [
  93. "from fastNLP.models import BiLSTMCRF\n",
  94. "\n",
  95. "data_bundle.rename_field('chars', 'words') # 这是由于BiLSTMCRF模型的forward函数接受的words,而不是chars,所以需要把这一列重新命名\n",
  96. "model = BiLSTMCRF(embed=embed, num_classes=len(data_bundle.get_vocab('target')), num_layers=1, hidden_size=200, dropout=0.5,\n",
  97. " target_vocab=data_bundle.get_vocab('target'))"
  98. ]
  99. },
  100. {
  101. "cell_type": "markdown",
  102. "metadata": {},
  103. "source": [
  104. "## 进行训练\n",
  105. "下面我们选择用来评估模型的metric,以及优化用到的优化函数。"
  106. ]
  107. },
  108. {
  109. "cell_type": "code",
  110. "execution_count": 4,
  111. "metadata": {},
  112. "outputs": [],
  113. "source": [
  114. "from fastNLP import SpanFPreRecMetric\n",
  115. "from torch.optim import Adam\n",
  116. "from fastNLP import LossInForward\n",
  117. "\n",
  118. "metric = SpanFPreRecMetric(tag_vocab=data_bundle.get_vocab('target'))\n",
  119. "optimizer = Adam(model.parameters(), lr=1e-2)\n",
  120. "loss = LossInForward()"
  121. ]
  122. },
  123. {
  124. "cell_type": "markdown",
  125. "metadata": {},
  126. "source": [
  127. "使用Trainer进行训练, 您可以通过修改 device 的值来选择显卡。"
  128. ]
  129. },
  130. {
  131. "cell_type": "code",
  132. "execution_count": 5,
  133. "metadata": {},
  134. "outputs": [
  135. {
  136. "name": "stdout",
  137. "output_type": "stream",
  138. "text": [
  139. "input fields after batch(if batch size is 2):\n",
  140. "\ttarget: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2, 26]) \n",
  141. "\tseq_len: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2]) \n",
  142. "\twords: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2, 26]) \n",
  143. "target fields after batch(if batch size is 2):\n",
  144. "\ttarget: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2, 26]) \n",
  145. "\tseq_len: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2]) \n",
  146. "\n",
  147. "training epochs started 2020-02-27-13-53-24\n"
  148. ]
  149. },
  150. {
  151. "data": {
  152. "application/vnd.jupyter.widget-view+json": {
  153. "model_id": "",
  154. "version_major": 2,
  155. "version_minor": 0
  156. },
  157. "text/plain": [
  158. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=430.0), HTML(value='')), layout=Layout(di…"
  159. ]
  160. },
  161. "metadata": {},
  162. "output_type": "display_data"
  163. },
  164. {
  165. "data": {
  166. "application/vnd.jupyter.widget-view+json": {
  167. "model_id": "",
  168. "version_major": 2,
  169. "version_minor": 0
  170. },
  171. "text/plain": [
  172. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=9.0), HTML(value='')), layout=Layout(disp…"
  173. ]
  174. },
  175. "metadata": {},
  176. "output_type": "display_data"
  177. },
  178. {
  179. "name": "stdout",
  180. "output_type": "stream",
  181. "text": [
  182. "\r",
  183. "Evaluate data in 0.89 seconds!\n",
  184. "\r",
  185. "Evaluation on dev at Epoch 1/10. Step:43/430: \n",
  186. "\r",
  187. "SpanFPreRecMetric: f=0.067797, pre=0.192771, rec=0.041131\n",
  188. "\n"
  189. ]
  190. },
  191. {
  192. "data": {
  193. "application/vnd.jupyter.widget-view+json": {
  194. "model_id": "",
  195. "version_major": 2,
  196. "version_minor": 0
  197. },
  198. "text/plain": [
  199. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=9.0), HTML(value='')), layout=Layout(disp…"
  200. ]
  201. },
  202. "metadata": {},
  203. "output_type": "display_data"
  204. },
  205. {
  206. "name": "stdout",
  207. "output_type": "stream",
  208. "text": [
  209. "\r",
  210. "Evaluate data in 0.9 seconds!\n",
  211. "\r",
  212. "Evaluation on dev at Epoch 2/10. Step:86/430: \n",
  213. "\r",
  214. "SpanFPreRecMetric: f=0.344086, pre=0.568047, rec=0.246787\n",
  215. "\n"
  216. ]
  217. },
  218. {
  219. "data": {
  220. "application/vnd.jupyter.widget-view+json": {
  221. "model_id": "",
  222. "version_major": 2,
  223. "version_minor": 0
  224. },
  225. "text/plain": [
  226. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=9.0), HTML(value='')), layout=Layout(disp…"
  227. ]
  228. },
  229. "metadata": {},
  230. "output_type": "display_data"
  231. },
  232. {
  233. "name": "stdout",
  234. "output_type": "stream",
  235. "text": [
  236. "\r",
  237. "Evaluate data in 0.88 seconds!\n",
  238. "\r",
  239. "Evaluation on dev at Epoch 3/10. Step:129/430: \n",
  240. "\r",
  241. "SpanFPreRecMetric: f=0.446701, pre=0.653465, rec=0.339332\n",
  242. "\n"
  243. ]
  244. },
  245. {
  246. "data": {
  247. "application/vnd.jupyter.widget-view+json": {
  248. "model_id": "",
  249. "version_major": 2,
  250. "version_minor": 0
  251. },
  252. "text/plain": [
  253. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=9.0), HTML(value='')), layout=Layout(disp…"
  254. ]
  255. },
  256. "metadata": {},
  257. "output_type": "display_data"
  258. },
  259. {
  260. "name": "stdout",
  261. "output_type": "stream",
  262. "text": [
  263. "\r",
  264. "Evaluate data in 0.81 seconds!\n",
  265. "\r",
  266. "Evaluation on dev at Epoch 4/10. Step:172/430: \n",
  267. "\r",
  268. "SpanFPreRecMetric: f=0.479871, pre=0.642241, rec=0.383033\n",
  269. "\n"
  270. ]
  271. },
  272. {
  273. "data": {
  274. "application/vnd.jupyter.widget-view+json": {
  275. "model_id": "",
  276. "version_major": 2,
  277. "version_minor": 0
  278. },
  279. "text/plain": [
  280. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=9.0), HTML(value='')), layout=Layout(disp…"
  281. ]
  282. },
  283. "metadata": {},
  284. "output_type": "display_data"
  285. },
  286. {
  287. "name": "stdout",
  288. "output_type": "stream",
  289. "text": [
  290. "\r",
  291. "Evaluate data in 0.91 seconds!\n",
  292. "\r",
  293. "Evaluation on dev at Epoch 5/10. Step:215/430: \n",
  294. "\r",
  295. "SpanFPreRecMetric: f=0.486312, pre=0.650862, rec=0.388175\n",
  296. "\n"
  297. ]
  298. },
  299. {
  300. "data": {
  301. "application/vnd.jupyter.widget-view+json": {
  302. "model_id": "",
  303. "version_major": 2,
  304. "version_minor": 0
  305. },
  306. "text/plain": [
  307. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=9.0), HTML(value='')), layout=Layout(disp…"
  308. ]
  309. },
  310. "metadata": {},
  311. "output_type": "display_data"
  312. },
  313. {
  314. "name": "stdout",
  315. "output_type": "stream",
  316. "text": [
  317. "\r",
  318. "Evaluate data in 0.87 seconds!\n",
  319. "\r",
  320. "Evaluation on dev at Epoch 6/10. Step:258/430: \n",
  321. "\r",
  322. "SpanFPreRecMetric: f=0.541401, pre=0.711297, rec=0.437018\n",
  323. "\n"
  324. ]
  325. },
  326. {
  327. "data": {
  328. "application/vnd.jupyter.widget-view+json": {
  329. "model_id": "",
  330. "version_major": 2,
  331. "version_minor": 0
  332. },
  333. "text/plain": [
  334. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=9.0), HTML(value='')), layout=Layout(disp…"
  335. ]
  336. },
  337. "metadata": {},
  338. "output_type": "display_data"
  339. },
  340. {
  341. "name": "stdout",
  342. "output_type": "stream",
  343. "text": [
  344. "\r",
  345. "Evaluate data in 0.86 seconds!\n",
  346. "\r",
  347. "Evaluation on dev at Epoch 7/10. Step:301/430: \n",
  348. "\r",
  349. "SpanFPreRecMetric: f=0.430335, pre=0.685393, rec=0.313625\n",
  350. "\n"
  351. ]
  352. },
  353. {
  354. "data": {
  355. "application/vnd.jupyter.widget-view+json": {
  356. "model_id": "",
  357. "version_major": 2,
  358. "version_minor": 0
  359. },
  360. "text/plain": [
  361. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=9.0), HTML(value='')), layout=Layout(disp…"
  362. ]
  363. },
  364. "metadata": {},
  365. "output_type": "display_data"
  366. },
  367. {
  368. "name": "stdout",
  369. "output_type": "stream",
  370. "text": [
  371. "\r",
  372. "Evaluate data in 0.82 seconds!\n",
  373. "\r",
  374. "Evaluation on dev at Epoch 8/10. Step:344/430: \n",
  375. "\r",
  376. "SpanFPreRecMetric: f=0.477759, pre=0.665138, rec=0.372751\n",
  377. "\n"
  378. ]
  379. },
  380. {
  381. "data": {
  382. "application/vnd.jupyter.widget-view+json": {
  383. "model_id": "",
  384. "version_major": 2,
  385. "version_minor": 0
  386. },
  387. "text/plain": [
  388. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=9.0), HTML(value='')), layout=Layout(disp…"
  389. ]
  390. },
  391. "metadata": {},
  392. "output_type": "display_data"
  393. },
  394. {
  395. "name": "stdout",
  396. "output_type": "stream",
  397. "text": [
  398. "\r",
  399. "Evaluate data in 0.81 seconds!\n",
  400. "\r",
  401. "Evaluation on dev at Epoch 9/10. Step:387/430: \n",
  402. "\r",
  403. "SpanFPreRecMetric: f=0.500759, pre=0.611111, rec=0.424165\n",
  404. "\n"
  405. ]
  406. },
  407. {
  408. "data": {
  409. "application/vnd.jupyter.widget-view+json": {
  410. "model_id": "",
  411. "version_major": 2,
  412. "version_minor": 0
  413. },
  414. "text/plain": [
  415. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=9.0), HTML(value='')), layout=Layout(disp…"
  416. ]
  417. },
  418. "metadata": {},
  419. "output_type": "display_data"
  420. },
  421. {
  422. "name": "stdout",
  423. "output_type": "stream",
  424. "text": [
  425. "\r",
  426. "Evaluate data in 0.8 seconds!\n",
  427. "\r",
  428. "Evaluation on dev at Epoch 10/10. Step:430/430: \n",
  429. "\r",
  430. "SpanFPreRecMetric: f=0.496025, pre=0.65, rec=0.401028\n",
  431. "\n",
  432. "\r\n",
  433. "In Epoch:6/Step:258, got best dev performance:\n",
  434. "SpanFPreRecMetric: f=0.541401, pre=0.711297, rec=0.437018\n",
  435. "Reloaded the best model.\n"
  436. ]
  437. },
  438. {
  439. "data": {
  440. "text/plain": [
  441. "{'best_eval': {'SpanFPreRecMetric': {'f': 0.541401,\n",
  442. " 'pre': 0.711297,\n",
  443. " 'rec': 0.437018}},\n",
  444. " 'best_epoch': 6,\n",
  445. " 'best_step': 258,\n",
  446. " 'seconds': 121.39}"
  447. ]
  448. },
  449. "execution_count": 5,
  450. "metadata": {},
  451. "output_type": "execute_result"
  452. }
  453. ],
  454. "source": [
  455. "from fastNLP import Trainer\n",
  456. "import torch\n",
  457. "\n",
  458. "device= 0 if torch.cuda.is_available() else 'cpu'\n",
  459. "trainer = Trainer(data_bundle.get_dataset('train'), model, loss=loss, optimizer=optimizer,\n",
  460. " dev_data=data_bundle.get_dataset('dev'), metrics=metric, device=device)\n",
  461. "trainer.train()"
  462. ]
  463. },
  464. {
  465. "cell_type": "markdown",
  466. "metadata": {},
  467. "source": [
  468. "## 进行测试\n",
  469. "训练结束之后过,可以通过 Tester 测试其在测试集上的性能"
  470. ]
  471. },
  472. {
  473. "cell_type": "code",
  474. "execution_count": 6,
  475. "metadata": {},
  476. "outputs": [
  477. {
  478. "data": {
  479. "application/vnd.jupyter.widget-view+json": {
  480. "model_id": "",
  481. "version_major": 2,
  482. "version_minor": 0
  483. },
  484. "text/plain": [
  485. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=17.0), HTML(value='')), layout=Layout(dis…"
  486. ]
  487. },
  488. "metadata": {},
  489. "output_type": "display_data"
  490. },
  491. {
  492. "name": "stdout",
  493. "output_type": "stream",
  494. "text": [
  495. "\r",
  496. "Evaluate data in 1.54 seconds!\n",
  497. "[tester] \n",
  498. "SpanFPreRecMetric: f=0.439024, pre=0.685279, rec=0.322967\n"
  499. ]
  500. },
  501. {
  502. "data": {
  503. "text/plain": [
  504. "{'SpanFPreRecMetric': {'f': 0.439024, 'pre': 0.685279, 'rec': 0.322967}}"
  505. ]
  506. },
  507. "execution_count": 6,
  508. "metadata": {},
  509. "output_type": "execute_result"
  510. }
  511. ],
  512. "source": [
  513. "from fastNLP import Tester\n",
  514. "tester = Tester(data_bundle.get_dataset('test'), model, metrics=metric)\n",
  515. "tester.test()"
  516. ]
  517. },
  518. {
  519. "cell_type": "markdown",
  520. "metadata": {},
  521. "source": [
  522. "## 使用更强的Bert做序列标注\n",
  523. "\n",
  524. "在fastNLP使用Bert进行任务,您只需要把fastNLP.embeddings.StaticEmbedding 切换为 fastNLP.embeddings.BertEmbedding(可修改 device 选择显卡)。"
  525. ]
  526. },
  527. {
  528. "cell_type": "code",
  529. "execution_count": 8,
  530. "metadata": {},
  531. "outputs": [
  532. {
  533. "name": "stdout",
  534. "output_type": "stream",
  535. "text": [
  536. "loading vocabulary file /remote-home/ynzheng/.fastNLP/embedding/bert-chinese-wwm/vocab.txt\n",
  537. "Load pre-trained BERT parameters from file /remote-home/ynzheng/.fastNLP/embedding/bert-chinese-wwm/chinese_wwm_pytorch.bin.\n",
  538. "Start to generate word pieces for word.\n",
  539. "Found(Or segment into word pieces) 3384 words out of 3471.\n",
  540. "input fields after batch(if batch size is 2):\n",
  541. "\ttarget: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2, 26]) \n",
  542. "\tseq_len: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2]) \n",
  543. "\twords: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2, 26]) \n",
  544. "target fields after batch(if batch size is 2):\n",
  545. "\ttarget: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2, 26]) \n",
  546. "\tseq_len: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2]) \n",
  547. "\n",
  548. "training epochs started 2020-02-27-13-58-51\n"
  549. ]
  550. },
  551. {
  552. "data": {
  553. "application/vnd.jupyter.widget-view+json": {
  554. "model_id": "",
  555. "version_major": 2,
  556. "version_minor": 0
  557. },
  558. "text/plain": [
  559. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=1130.0), HTML(value='')), layout=Layout(d…"
  560. ]
  561. },
  562. "metadata": {},
  563. "output_type": "display_data"
  564. },
  565. {
  566. "data": {
  567. "application/vnd.jupyter.widget-view+json": {
  568. "model_id": "",
  569. "version_major": 2,
  570. "version_minor": 0
  571. },
  572. "text/plain": [
  573. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=23.0), HTML(value='')), layout=Layout(dis…"
  574. ]
  575. },
  576. "metadata": {},
  577. "output_type": "display_data"
  578. },
  579. {
  580. "name": "stdout",
  581. "output_type": "stream",
  582. "text": [
  583. "Evaluate data in 2.7 seconds!\n",
  584. "Evaluation on dev at Epoch 1/10. Step:113/1130: \n",
  585. "SpanFPreRecMetric: f=0.008114, pre=0.019231, rec=0.005141\n",
  586. "\n"
  587. ]
  588. },
  589. {
  590. "data": {
  591. "application/vnd.jupyter.widget-view+json": {
  592. "model_id": "",
  593. "version_major": 2,
  594. "version_minor": 0
  595. },
  596. "text/plain": [
  597. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=23.0), HTML(value='')), layout=Layout(dis…"
  598. ]
  599. },
  600. "metadata": {},
  601. "output_type": "display_data"
  602. },
  603. {
  604. "name": "stdout",
  605. "output_type": "stream",
  606. "text": [
  607. "Evaluate data in 2.49 seconds!\n",
  608. "Evaluation on dev at Epoch 2/10. Step:226/1130: \n",
  609. "SpanFPreRecMetric: f=0.467866, pre=0.467866, rec=0.467866\n",
  610. "\n"
  611. ]
  612. },
  613. {
  614. "data": {
  615. "application/vnd.jupyter.widget-view+json": {
  616. "model_id": "",
  617. "version_major": 2,
  618. "version_minor": 0
  619. },
  620. "text/plain": [
  621. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=23.0), HTML(value='')), layout=Layout(dis…"
  622. ]
  623. },
  624. "metadata": {},
  625. "output_type": "display_data"
  626. },
  627. {
  628. "name": "stdout",
  629. "output_type": "stream",
  630. "text": [
  631. "Evaluate data in 2.6 seconds!\n",
  632. "Evaluation on dev at Epoch 3/10. Step:339/1130: \n",
  633. "SpanFPreRecMetric: f=0.566879, pre=0.482821, rec=0.686375\n",
  634. "\n"
  635. ]
  636. },
  637. {
  638. "data": {
  639. "application/vnd.jupyter.widget-view+json": {
  640. "model_id": "",
  641. "version_major": 2,
  642. "version_minor": 0
  643. },
  644. "text/plain": [
  645. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=23.0), HTML(value='')), layout=Layout(dis…"
  646. ]
  647. },
  648. "metadata": {},
  649. "output_type": "display_data"
  650. },
  651. {
  652. "name": "stdout",
  653. "output_type": "stream",
  654. "text": [
  655. "Evaluate data in 2.56 seconds!\n",
  656. "Evaluation on dev at Epoch 4/10. Step:452/1130: \n",
  657. "SpanFPreRecMetric: f=0.651972, pre=0.59408, rec=0.722365\n",
  658. "\n"
  659. ]
  660. },
  661. {
  662. "data": {
  663. "application/vnd.jupyter.widget-view+json": {
  664. "model_id": "",
  665. "version_major": 2,
  666. "version_minor": 0
  667. },
  668. "text/plain": [
  669. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=23.0), HTML(value='')), layout=Layout(dis…"
  670. ]
  671. },
  672. "metadata": {},
  673. "output_type": "display_data"
  674. },
  675. {
  676. "name": "stdout",
  677. "output_type": "stream",
  678. "text": [
  679. "\r",
  680. "Evaluate data in 2.69 seconds!\n",
  681. "\r",
  682. "Evaluation on dev at Epoch 5/10. Step:565/1130: \n",
  683. "\r",
  684. "SpanFPreRecMetric: f=0.640909, pre=0.574338, rec=0.724936\n",
  685. "\n"
  686. ]
  687. },
  688. {
  689. "data": {
  690. "application/vnd.jupyter.widget-view+json": {
  691. "model_id": "",
  692. "version_major": 2,
  693. "version_minor": 0
  694. },
  695. "text/plain": [
  696. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=23.0), HTML(value='')), layout=Layout(dis…"
  697. ]
  698. },
  699. "metadata": {},
  700. "output_type": "display_data"
  701. },
  702. {
  703. "name": "stdout",
  704. "output_type": "stream",
  705. "text": [
  706. "Evaluate data in 2.52 seconds!\n",
  707. "Evaluation on dev at Epoch 6/10. Step:678/1130: \n",
  708. "SpanFPreRecMetric: f=0.661836, pre=0.624146, rec=0.70437\n",
  709. "\n"
  710. ]
  711. },
  712. {
  713. "data": {
  714. "application/vnd.jupyter.widget-view+json": {
  715. "model_id": "",
  716. "version_major": 2,
  717. "version_minor": 0
  718. },
  719. "text/plain": [
  720. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=23.0), HTML(value='')), layout=Layout(dis…"
  721. ]
  722. },
  723. "metadata": {},
  724. "output_type": "display_data"
  725. },
  726. {
  727. "name": "stdout",
  728. "output_type": "stream",
  729. "text": [
  730. "Evaluate data in 2.67 seconds!\n",
  731. "Evaluation on dev at Epoch 7/10. Step:791/1130: \n",
  732. "SpanFPreRecMetric: f=0.683429, pre=0.615226, rec=0.768638\n",
  733. "\n"
  734. ]
  735. },
  736. {
  737. "data": {
  738. "application/vnd.jupyter.widget-view+json": {
  739. "model_id": "",
  740. "version_major": 2,
  741. "version_minor": 0
  742. },
  743. "text/plain": [
  744. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=23.0), HTML(value='')), layout=Layout(dis…"
  745. ]
  746. },
  747. "metadata": {},
  748. "output_type": "display_data"
  749. },
  750. {
  751. "name": "stdout",
  752. "output_type": "stream",
  753. "text": [
  754. "\r",
  755. "Evaluate data in 2.37 seconds!\n",
  756. "\r",
  757. "Evaluation on dev at Epoch 8/10. Step:904/1130: \n",
  758. "\r",
  759. "SpanFPreRecMetric: f=0.674699, pre=0.634921, rec=0.719794\n",
  760. "\n"
  761. ]
  762. },
  763. {
  764. "data": {
  765. "application/vnd.jupyter.widget-view+json": {
  766. "model_id": "",
  767. "version_major": 2,
  768. "version_minor": 0
  769. },
  770. "text/plain": [
  771. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=23.0), HTML(value='')), layout=Layout(dis…"
  772. ]
  773. },
  774. "metadata": {},
  775. "output_type": "display_data"
  776. },
  777. {
  778. "name": "stdout",
  779. "output_type": "stream",
  780. "text": [
  781. "Evaluate data in 2.42 seconds!\n",
  782. "Evaluation on dev at Epoch 9/10. Step:1017/1130: \n",
  783. "SpanFPreRecMetric: f=0.693878, pre=0.650901, rec=0.742931\n",
  784. "\n"
  785. ]
  786. },
  787. {
  788. "data": {
  789. "application/vnd.jupyter.widget-view+json": {
  790. "model_id": "",
  791. "version_major": 2,
  792. "version_minor": 0
  793. },
  794. "text/plain": [
  795. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=23.0), HTML(value='')), layout=Layout(dis…"
  796. ]
  797. },
  798. "metadata": {},
  799. "output_type": "display_data"
  800. },
  801. {
  802. "name": "stdout",
  803. "output_type": "stream",
  804. "text": [
  805. "\r",
  806. "Evaluate data in 2.46 seconds!\n",
  807. "\r",
  808. "Evaluation on dev at Epoch 10/10. Step:1130/1130: \n",
  809. "\r",
  810. "SpanFPreRecMetric: f=0.686845, pre=0.62766, rec=0.758355\n",
  811. "\n",
  812. "\r\n",
  813. "In Epoch:9/Step:1017, got best dev performance:\n",
  814. "SpanFPreRecMetric: f=0.693878, pre=0.650901, rec=0.742931\n",
  815. "Reloaded the best model.\n"
  816. ]
  817. },
  818. {
  819. "data": {
  820. "application/vnd.jupyter.widget-view+json": {
  821. "model_id": "",
  822. "version_major": 2,
  823. "version_minor": 0
  824. },
  825. "text/plain": [
  826. "HBox(children=(FloatProgress(value=0.0, layout=Layout(flex='2'), max=17.0), HTML(value='')), layout=Layout(dis…"
  827. ]
  828. },
  829. "metadata": {},
  830. "output_type": "display_data"
  831. },
  832. {
  833. "name": "stdout",
  834. "output_type": "stream",
  835. "text": [
  836. "\r",
  837. "Evaluate data in 1.96 seconds!\n",
  838. "[tester] \n",
  839. "SpanFPreRecMetric: f=0.626561, pre=0.596112, rec=0.660287\n"
  840. ]
  841. },
  842. {
  843. "data": {
  844. "text/plain": [
  845. "{'SpanFPreRecMetric': {'f': 0.626561, 'pre': 0.596112, 'rec': 0.660287}}"
  846. ]
  847. },
  848. "execution_count": 8,
  849. "metadata": {},
  850. "output_type": "execute_result"
  851. }
  852. ],
  853. "source": [
  854. "\n",
  855. "from fastNLP.io import WeiboNERPipe\n",
  856. "data_bundle = WeiboNERPipe().process_from_file()\n",
  857. "data_bundle.rename_field('chars', 'words')\n",
  858. "\n",
  859. "from fastNLP.embeddings import BertEmbedding\n",
  860. "embed = BertEmbedding(vocab=data_bundle.get_vocab('words'), model_dir_or_name='cn')\n",
  861. "model = BiLSTMCRF(embed=embed, num_classes=len(data_bundle.get_vocab('target')), num_layers=1, hidden_size=200, dropout=0.5,\n",
  862. " target_vocab=data_bundle.get_vocab('target'))\n",
  863. "\n",
  864. "from fastNLP import SpanFPreRecMetric\n",
  865. "from torch.optim import Adam\n",
  866. "from fastNLP import LossInForward\n",
  867. "metric = SpanFPreRecMetric(tag_vocab=data_bundle.get_vocab('target'))\n",
  868. "optimizer = Adam(model.parameters(), lr=2e-5)\n",
  869. "loss = LossInForward()\n",
  870. "\n",
  871. "from fastNLP import Trainer\n",
  872. "import torch\n",
  873. "device= 5 if torch.cuda.is_available() else 'cpu'\n",
  874. "trainer = Trainer(data_bundle.get_dataset('train'), model, loss=loss, optimizer=optimizer, batch_size=12,\n",
  875. " dev_data=data_bundle.get_dataset('dev'), metrics=metric, device=device)\n",
  876. "trainer.train()\n",
  877. "\n",
  878. "from fastNLP import Tester\n",
  879. "tester = Tester(data_bundle.get_dataset('test'), model, metrics=metric)\n",
  880. "tester.test()"
  881. ]
  882. },
  883. {
  884. "cell_type": "code",
  885. "execution_count": null,
  886. "metadata": {},
  887. "outputs": [],
  888. "source": []
  889. }
  890. ],
  891. "metadata": {
  892. "kernelspec": {
  893. "display_name": "Python Now",
  894. "language": "python",
  895. "name": "now"
  896. },
  897. "language_info": {
  898. "codemirror_mode": {
  899. "name": "ipython",
  900. "version": 3
  901. },
  902. "file_extension": ".py",
  903. "mimetype": "text/x-python",
  904. "name": "python",
  905. "nbconvert_exporter": "python",
  906. "pygments_lexer": "ipython3",
  907. "version": "3.8.0"
  908. }
  909. },
  910. "nbformat": 4,
  911. "nbformat_minor": 2
  912. }