test_samsum_datasets.py 1.2 KB

12345678910111213141516171819202122232425262728293031323334353637
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
  3. from unittest.mock import patch
  4. @patch('llama_recipes.finetuning.train')
  5. @patch('llama_recipes.finetuning.LlamaForCausalLM.from_pretrained')
  6. @patch('llama_recipes.finetuning.LlamaTokenizer.from_pretrained')
  7. @patch('llama_recipes.finetuning.optim.AdamW')
  8. @patch('llama_recipes.finetuning.StepLR')
  9. def test_custom_dataset(step_lr, optimizer, tokenizer, get_model, train, mocker):
  10. from llama_recipes.finetuning import main
  11. tokenizer.return_value = mocker.MagicMock(side_effect=lambda x: {"input_ids":[len(x)*[0,]], "attention_mask": [len(x)*[0,]]})
  12. kwargs = {
  13. "batch_size_training": 1,
  14. "use_peft": False,
  15. "dataset": "samsum_dataset",
  16. }
  17. main(**kwargs)
  18. assert train.call_count == 1
  19. args, kwargs = train.call_args
  20. train_dataloader = args[1]
  21. eval_dataloader = args[2]
  22. VAL_SAMPLES = 818
  23. TRAIN_SAMPLES = 14732
  24. CONCAT_SIZE = 2048
  25. assert len(train_dataloader) == TRAIN_SAMPLES // CONCAT_SIZE
  26. assert len(eval_dataloader) == VAL_SAMPLES