{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/Users/yh/miniconda2/envs/python3/lib/python3.6/site-packages/tqdm/autonotebook/__init__.py:14: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n", " \" (e.g. in jupyter console)\", TqdmExperimentalWarning)\n" ] }, { "data": { "text/plain": [ "DataSet({'raw_sent': this is a bad idea . type=str,\n", "'label': 0 type=int,\n", "'word_str_lst': ['this', 'is', 'a', 'bad', 'idea', '.'] type=list,\n", "'words': [4, 2, 5, 6, 7, 3] type=list},\n", "{'raw_sent': it is great . type=str,\n", "'label': 1 type=int,\n", "'word_str_lst': ['it', 'is', 'great', '.'] type=list,\n", "'words': [8, 2, 9, 3] type=list})" ] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# 假设有以下的DataSet, 这里只是为了举例所以只选择了两个sample\n", "import sys\n", "import os\n", "sys.path.append('/Users/yh/Desktop/fastNLP/fastNLP')\n", "\n", "from fastNLP import DataSet\n", "from fastNLP import Instance\n", "from fastNLP import Vocabulary\n", "\n", "dataset = DataSet()\n", "dataset.append(Instance(raw_sent='This is a bad idea .', label=0))\n", "dataset.append(Instance(raw_sent='It is great .', label=1))\n", "\n", "# 按照fastNLP_10min_tutorial.ipynb的步骤,对数据进行一些处理。这里为了演示padding操作,把field的名称做了一些改变\n", "dataset.apply(lambda x:x['raw_sent'].lower(), new_field_name='raw_sent')\n", "dataset.apply(lambda x:x['raw_sent'].split(), new_field_name='word_str_lst')\n", "\n", "# 建立Vocabulary\n", "word_vocab = Vocabulary()\n", "dataset.apply(lambda x:word_vocab.update(x['word_str_lst']))\n", "dataset.apply(lambda x:[word_vocab.to_index(word) for word in x['word_str_lst']], new_field_name='words')\n", "\n", "# 检查以下是否得到我们想要的结果了\n", "dataset[:2]" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "batch_x has: {'word_str_lst': array([list(['this', 'is', 'a', 'bad', 'idea', '.']),\n", " list(['it', 'is', 'great', '.'])], dtype=object), 'words': tensor([[4, 2, 5, 6, 7, 3],\n", " [8, 2, 9, 3, 0, 0]])}\n", "batch_y has: {'label': tensor([0, 1])}\n" ] }, { "data": { "text/plain": [ "'\"\\n结果中\\n Batch会对元素类型(元素即最内层的数据,raw_sent为str,word_str_lst为str,words为int, label为int)为int或者float的数据进行默认\\n padding,而非int或float的则不进行padding。但若每个Instance中该field为二维数据,也不进行padding。因为二维数据的padding涉及到\\n 两个维度的padding,不容易自动判断padding的形式。\\n'" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# 将field设置为input或者target\n", "dataset.set_input('word_str_lst')\n", "dataset.set_input('words')\n", "dataset.set_target('label')\n", "\n", "# 使用Batch取出batch数据\n", "from fastNLP.core.batch import Batch\n", "from fastNLP.core.sampler import RandomSampler\n", "\n", "batch_iterator = Batch(dataset=dataset, batch_size=2, sampler=RandomSampler())\n", "for batch_x, batch_y in batch_iterator:\n", " print(\"batch_x has: \", batch_x)\n", " print(\"batch_y has: \", batch_y)\n", "\"\"\"\"\n", "结果中\n", " Batch会对元素类型(元素即最内层的数据,raw_sent为str,word_str_lst为str,words为int, label为int)为int或者float的数据进行默认\n", " padding,而非int或float的则不进行padding。但若每个Instance中该field为二维数据,也不进行padding。因为二维数据的padding涉及到\n", " 两个维度的padding,不容易自动判断padding的形式。\n", "\"\"\"" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "batch_x has: {'word_str_lst': array([list(['it', 'is', 'great', '.']),\n", " list(['this', 'is', 'a', 'bad', 'idea', '.'])], dtype=object), 'words': tensor([[ 8, 2, 9, 3, -100, -100],\n", " [ 4, 2, 5, 6, 7, 3]])}\n", "batch_y has: {'label': tensor([1, 0])}\n" ] } ], "source": [ "# 所有的pad_val都默认为0,如果需要修改某一个field的默认pad值,可以通过DataSet.set_pad_val(field_name, pad_val)进行修改\n", "# 若需要将word的padding修改为-100\n", "dataset.set_pad_val('words', pad_val=-100)\n", "batch_iterator = Batch(dataset=dataset, batch_size=2, sampler=RandomSampler())\n", "for batch_x, batch_y in batch_iterator:\n", " print(\"batch_x has: \", batch_x)\n", " print(\"batch_y has: \", batch_y)\n", "# pad的值修改为-100了" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "DataSet({'raw_sent': this is a bad idea . type=str,\n", "'label': 0 type=int,\n", "'word_str_lst': ['this', 'is', 'a', 'bad', 'idea', '.'] type=list,\n", "'words': [4, 2, 5, 6, 7, 3] type=list,\n", "'char_str_lst': [['t', 'h', 'i', 's'], ['i', 's'], ['a'], ['b', 'a', 'd'], ['i', 'd', 'e', 'a'], ['.']] type=list,\n", "'chars': [[4, 9, 2, 5], [2, 5], [3], [10, 3, 6], [2, 6, 7, 3], [8]] type=list},\n", "{'raw_sent': it is great . type=str,\n", "'label': 1 type=int,\n", "'word_str_lst': ['it', 'is', 'great', '.'] type=list,\n", "'words': [8, 2, 9, 3] type=list,\n", "'char_str_lst': [['i', 't'], ['i', 's'], ['g', 'r', 'e', 'a', 't'], ['.']] type=list,\n", "'chars': [[2, 4], [2, 5], [11, 12, 7, 3, 4], [8]] type=list})" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# 若需要使用二维padding或指定padding方式,可以通过设置该field的padder实现,下面以英文的character padding为例。在某些场景下,可能想要\n", "# 使用英文word的character作为特征,character的padding为二维padding,fastNLP默认只会进行一维padding。\n", "\n", "dataset.apply(lambda x: [[c for c in word] for word in x['word_str_lst']], new_field_name='char_str_lst')\n", "char_vocab = Vocabulary()\n", "dataset.apply(lambda x:[char_vocab.update(chars) for chars in x['char_str_lst']])\n", "dataset.apply(lambda x:[[char_vocab.to_index(c) for c in chars] for chars in x['char_str_lst']],new_field_name='chars')\n", "dataset[:2]" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "batch_x has: {'word_str_lst': array([list(['this', 'is', 'a', 'bad', 'idea', '.']),\n", " list(['it', 'is', 'great', '.'])], dtype=object), 'words': tensor([[ 4, 2, 5, 6, 7, 3],\n", " [ 8, 2, 9, 3, -100, -100]]), 'chars': array([list([[4, 9, 2, 5], [2, 5], [3], [10, 3, 6], [2, 6, 7, 3], [8]]),\n", " list([[2, 4], [2, 5], [11, 12, 7, 3, 4], [8]])], dtype=object)}\n", "batch_y has: {'label': tensor([0, 1])}\n" ] }, { "data": { "text/plain": [ "'\\n 其它field与之前的是相同的。chars因为存在两个维度需要padding,不能自动决定padding方式,所以直接输出了原始形式。\\n'" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# 如果不针对二维的character指定padding方法\n", "dataset.set_input('chars')\n", "batch_iterator = Batch(dataset=dataset, batch_size=2, sampler=RandomSampler())\n", "for batch_x, batch_y in batch_iterator:\n", " print(\"batch_x has: \", batch_x)\n", " print(\"batch_y has: \", batch_y)\n", " \n", "\"\"\"\n", " 其它field与之前的是相同的。chars因为存在两个维度需要padding,不能自动决定padding方式,所以直接输出了原始形式。\n", "\"\"\"" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "batch_x has: {'word_str_lst': array([list(['this', 'is', 'a', 'bad', 'idea', '.']),\n", " list(['it', 'is', 'great', '.'])], dtype=object), 'words': tensor([[ 4, 2, 5, 6, 7, 3],\n", " [ 8, 2, 9, 3, -100, -100]]), 'chars': tensor([[[ 4, 9, 2, 5],\n", " [ 2, 5, 0, 0],\n", " [ 3, 0, 0, 0],\n", " [10, 3, 6, 0],\n", " [ 2, 6, 7, 3],\n", " [ 8, 0, 0, 0]],\n", "\n", " [[ 2, 4, 0, 0],\n", " [ 2, 5, 0, 0],\n", " [11, 12, 7, 3],\n", " [ 8, 0, 0, 0],\n", " [ 0, 0, 0, 0],\n", " [ 0, 0, 0, 0]]])}\n", "batch_y has: {'label': tensor([0, 1])}\n" ] }, { "data": { "text/plain": [ "'\\n chars被正确padding了\\n'" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# 若要使用二维padding,需要手动设置padding方式\n", "from fastNLP.core.fieldarray import EngChar2DPadder\n", "dataset.set_padder('chars', EngChar2DPadder())\n", "batch_iterator = Batch(dataset=dataset, batch_size=2, sampler=RandomSampler())\n", "for batch_x, batch_y in batch_iterator:\n", " print(\"batch_x has: \", batch_x)\n", " print(\"batch_y has: \", batch_y)\n", " \n", "\"\"\"\n", " chars被正确padding了\n", "\"\"\"" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "batch_x has: {'raw_sent': ['this is a bad idea .', 'it is great . '], 'word_str_lst': array([list(['this', 'is', 'a', 'bad', 'idea', '.']),\n", " list(['it', 'is', 'great', '.'])], dtype=object), 'words': tensor([[ 4, 2, 5, 6, 7, 3],\n", " [ 8, 2, 9, 3, -100, -100]]), 'chars': tensor([[[ 4, 9, 2, 5],\n", " [ 2, 5, 0, 0],\n", " [ 3, 0, 0, 0],\n", " [10, 3, 6, 0],\n", " [ 2, 6, 7, 3],\n", " [ 8, 0, 0, 0]],\n", "\n", " [[ 2, 4, 0, 0],\n", " [ 2, 5, 0, 0],\n", " [11, 12, 7, 3],\n", " [ 8, 0, 0, 0],\n", " [ 0, 0, 0, 0],\n", " [ 0, 0, 0, 0]]])}\n", "batch_y has: {'label': tensor([0, 1])}\n" ] }, { "data": { "text/plain": [ "'\\n raw_sent正确输出,对应内容也进行了pad。\\n'" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# 如果AutoPad与EngChar2DPadder不能满足需要,可以自己实现Padder对象。这里举一个例子,比如需要把raw_sentence pad到一样长\n", "from fastNLP.core.fieldarray import PadderBase\n", "\n", "class PadStr(PadderBase):\n", " def __init__(self, pad_val=' '):\n", " super().__init__(pad_val=pad_val) #让父类管理pad_val的值,这样可以通过DataSet.set_pad_val()修改到该值\n", " \n", " def __call__(self, contents, field_name, field_ele_dtype):\n", " \"\"\"\n", " 如果以上面的例子举例,在raw_sent这个field进行pad时,传入的\n", " contents:\n", " [\n", " 'This is a bad idea .',\n", " 'It is great .'\n", " ]\n", " field_name: 'raw_sent',当前field的名称,主要用于帮助debug。\n", " field_ele_dtype: np.str. 这个参数基本都用不上,是该field中内部元素的类型\n", " \"\"\"\n", " max_len = max([len(str_) for str_ in contents])\n", " pad_strs = []\n", " for content in contents:\n", " pad_strs.append(content + (max_len-len(content))*self.pad_val)\n", " return pad_strs\n", "\n", "dataset.set_input('raw_sent')\n", "dataset.set_padder('raw_sent', PadStr())\n", "batch_iterator = Batch(dataset=dataset, batch_size=2, sampler=RandomSampler())\n", "for batch_x, batch_y in batch_iterator:\n", " print(\"batch_x has: \", batch_x)\n", " print(\"batch_y has: \", batch_y)\n", "\n", "\"\"\"\n", " raw_sent正确输出,对应内容也进行了pad。\n", "\"\"\"" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.7" } }, "nbformat": 4, "nbformat_minor": 2 }