You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_seq2seq_encoder.py 1.3 kB

123456789101112131415161718192021222324252627282930313233
  1. import pytest
  2. from fastNLP.envs.imports import _NEED_IMPORT_TORCH
  3. if _NEED_IMPORT_TORCH:
  4. import torch
  5. from fastNLP.modules.torch.encoder.seq2seq_encoder import TransformerSeq2SeqEncoder, LSTMSeq2SeqEncoder
  6. from fastNLP import Vocabulary
  7. from fastNLP.embeddings.torch import StaticEmbedding
  8. class TestTransformerSeq2SeqEncoder:
  9. def test_case(self):
  10. vocab = Vocabulary().add_word_lst("This is a test .".split())
  11. embed = StaticEmbedding(vocab, embedding_dim=5)
  12. encoder = TransformerSeq2SeqEncoder(embed, num_layers=2, d_model=10, n_head=2)
  13. words_idx = torch.LongTensor([0, 1, 2]).unsqueeze(0)
  14. seq_len = torch.LongTensor([3])
  15. encoder_output, encoder_mask = encoder(words_idx, seq_len)
  16. assert (encoder_output.size() == (1, 3, 10))
  17. class TestBiLSTMEncoder:
  18. def test_case(self):
  19. vocab = Vocabulary().add_word_lst("This is a test .".split())
  20. embed = StaticEmbedding(vocab, embedding_dim=5)
  21. encoder = LSTMSeq2SeqEncoder(embed, hidden_size=5, num_layers=1)
  22. words_idx = torch.LongTensor([0, 1, 2]).unsqueeze(0)
  23. seq_len = torch.LongTensor([3])
  24. encoder_output, encoder_mask = encoder(words_idx, seq_len)
  25. assert (encoder_mask.size() == (1, 3))