test_custom_dataset.py 1.3 KB

1234567891011121314151617181920212223242526272829303132333435363738
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
  3. from unittest.mock import patch
  4. @patch('llama_recipes.finetuning.train')
  5. @patch('llama_recipes.finetuning.LlamaForCausalLM.from_pretrained')
  6. @patch('llama_recipes.finetuning.LlamaTokenizer.from_pretrained')
  7. @patch('llama_recipes.finetuning.optim.AdamW')
  8. @patch('llama_recipes.finetuning.StepLR')
  9. def test_custom_dataset(step_lr, optimizer, tokenizer, get_model, train, mocker):
  10. from llama_recipes.finetuning import main
  11. tokenizer.return_value = mocker.MagicMock(side_effect=lambda x: {"input_ids":[len(x)*[0,]], "attention_mask": [len(x)*[0,]]})
  12. kwargs = {
  13. "dataset": "custom_dataset",
  14. "custom_dataset.file": "examples/custom_dataset.py:get_preprocessed_samsum",
  15. "batch_size_training": 1,
  16. "use_peft": False,
  17. }
  18. main(**kwargs)
  19. assert train.call_count == 1
  20. args, kwargs = train.call_args
  21. train_dataloader = args[1]
  22. eval_dataloader = args[2]
  23. VAL_SAMPLES = 818
  24. TRAIN_SAMPLES = 14732
  25. CONCAT_SIZE = 2048
  26. assert len(train_dataloader) == TRAIN_SAMPLES // CONCAT_SIZE
  27. assert len(eval_dataloader) == VAL_SAMPLES