Matthias Reso преди 1 година
родител
ревизия
53fd82355f
променени са 3 файла, в които са добавени 9 реда и са изтрити 68 реда
  1. 2 6
      tests/datasets/test_custom_dataset.py
  2. 3 7
      tests/datasets/test_grammar_datasets.py
  3. 4 55
      tests/datasets/test_samsum_datasets.py

+ 2 - 6
tests/datasets/test_custom_dataset.py

@@ -22,14 +22,10 @@ def check_padded_entry(batch):
 @patch('llama_recipes.finetuning.LlamaForCausalLM.from_pretrained')
 @patch('llama_recipes.finetuning.optim.AdamW')
 @patch('llama_recipes.finetuning.StepLR')
-def test_custom_dataset(step_lr, optimizer, get_model, tokenizer, train, mocker):
+def test_custom_dataset(step_lr, optimizer, get_model, tokenizer, train, mocker, setup_tokenizer):
     from llama_recipes.finetuning import main
 
-    #Align with Llama 2 tokenizer
-    tokenizer.from_pretrained.return_value = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
-    tokenizer.from_pretrained.return_value.add_special_tokens({'bos_token': '<s>', 'eos_token': '</s>'})
-    tokenizer.from_pretrained.return_value.bos_token_id = 1
-    tokenizer.from_pretrained.return_value.eos_token_id = 2
+    setup_tokenizer(tokenizer)
 
     kwargs = {
         "dataset": "custom_dataset",

+ 3 - 7
tests/datasets/test_grammar_datasets.py

@@ -11,19 +11,15 @@ from transformers import LlamaTokenizer
 @patch('llama_recipes.finetuning.LlamaForCausalLM.from_pretrained')
 @patch('llama_recipes.finetuning.optim.AdamW')
 @patch('llama_recipes.finetuning.StepLR')
-def test_grammar_dataset(step_lr, optimizer, get_model, tokenizer, train, mocker):
+def test_grammar_dataset(step_lr, optimizer, get_model, tokenizer, train, mocker, setup_tokenizer):
     from llama_recipes.finetuning import main
 
-    #Align with Llama 2 tokenizer
-    tokenizer.from_pretrained.return_value = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
-    tokenizer.from_pretrained.return_value.add_special_tokens({'bos_token': '<s>', 'eos_token': '</s>'})
-    tokenizer.from_pretrained.return_value.bos_token_id = 1
-    tokenizer.from_pretrained.return_value.eos_token_id = 2
+    setup_tokenizer(tokenizer)
 
     BATCH_SIZE = 8
     kwargs = {
         "model_name": "decapoda-research/llama-7b-hf",
-        "batch_size_training": 8,
+        "batch_size_training": BATCH_SIZE,
         "val_batch_size": 1,
         "use_peft": False,
         "dataset": "grammar_dataset",

+ 4 - 55
tests/datasets/test_samsum_datasets.py

@@ -1,29 +1,24 @@
 # Copyright (c) Meta Platforms, Inc. and affiliates.
 # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
 
+from functools import partial
 from unittest.mock import patch
 
-from transformers import LlamaTokenizer
-
 
 @patch('llama_recipes.finetuning.train')
 @patch('llama_recipes.finetuning.LlamaTokenizer')
 @patch('llama_recipes.finetuning.LlamaForCausalLM.from_pretrained')
 @patch('llama_recipes.finetuning.optim.AdamW')
 @patch('llama_recipes.finetuning.StepLR')
-def test_samsum_dataset(step_lr, optimizer, get_model, tokenizer, train, mocker):
+def test_samsum_dataset(step_lr, optimizer, get_model, tokenizer, train, mocker, setup_tokenizer):
     from llama_recipes.finetuning import main
 
-    #Align with Llama 2 tokenizer
-    tokenizer.from_pretrained.return_value = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
-    tokenizer.from_pretrained.return_value.add_special_tokens({'bos_token': '<s>', 'eos_token': '</s>'})
-    tokenizer.from_pretrained.return_value.bos_token_id = 1
-    tokenizer.from_pretrained.return_value.eos_token_id = 2
+    setup_tokenizer(tokenizer)
 
     BATCH_SIZE = 8
     kwargs = {
         "model_name": "decapoda-research/llama-7b-hf",
-        "batch_size_training": 8,
+        "batch_size_training": BATCH_SIZE,
         "val_batch_size": 1,
         "use_peft": False,
         "dataset": "samsum_dataset",
@@ -56,49 +51,3 @@ def test_samsum_dataset(step_lr, optimizer, get_model, tokenizer, train, mocker)
     assert batch["input_ids"][0][0] == 1
     assert batch["labels"][0][-1] == 2
     assert batch["input_ids"][0][-1] == 2
-
-
-@patch('llama_recipes.finetuning.train')
-@patch('llama_recipes.finetuning.LlamaTokenizer')
-@patch('llama_recipes.finetuning.LlamaForCausalLM.from_pretrained')
-@patch('llama_recipes.finetuning.optim.AdamW')
-@patch('llama_recipes.finetuning.StepLR')
-def test_samsum_dataset_packing(step_lr, optimizer, get_model, tokenizer, train, mocker):
-    from llama_recipes.finetuning import main
-
-    #Align with Llama 2 tokenizer
-    tokenizer.from_pretrained.return_value = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
-    tokenizer.from_pretrained.return_value.add_special_tokens({'bos_token': '<s>', 'eos_token': '</s>'})
-    tokenizer.from_pretrained.return_value.bos_token_id = 1
-    tokenizer.from_pretrained.return_value.eos_token_id = 2
-
-    BATCH_SIZE = 8
-    kwargs = {
-        "model_name": "decapoda-research/llama-7b-hf",
-        "batch_size_training": 8,
-        "val_batch_size": 1,
-        "use_peft": False,
-        "dataset": "samsum_dataset",
-        "batching_strategy": "packing",
-        }
-
-    main(**kwargs)
-
-    assert train.call_count == 1
-
-    args, kwargs = train.call_args
-    train_dataloader = args[1]
-    eval_dataloader = args[2]
-
-    assert len(train_dataloader) == 96
-    assert len(eval_dataloader) == 42
-
-    batch = next(iter(train_dataloader))
-
-    assert "labels" in batch.keys()
-    assert "input_ids" in batch.keys()
-    assert "attention_mask" in batch.keys()
-
-    assert batch["labels"][0].size(0) == 4096
-    assert batch["input_ids"][0].size(0) == 4096
-    assert batch["attention_mask"][0].size(0) == 4096