dataset_utils.py 2.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
  3. import importlib
  4. from functools import partial
  5. from pathlib import Path
  6. import torch
  7. from llama_recipes.datasets import (
  8. get_grammar_dataset,
  9. get_alpaca_dataset,
  10. get_samsum_dataset,
  11. )
  12. def load_module_from_py_file(py_file: str) -> object:
  13. """
  14. This method loads a module from a py file which is not in the Python path
  15. """
  16. module_name = Path(py_file).name
  17. loader = importlib.machinery.SourceFileLoader(module_name, py_file)
  18. spec = importlib.util.spec_from_loader(module_name, loader)
  19. module = importlib.util.module_from_spec(spec)
  20. loader.exec_module(module)
  21. return module
  22. def get_custom_dataset(dataset_config, tokenizer, split: str):
  23. if ":" in dataset_config.file:
  24. module_path, func_name = dataset_config.file.split(":")
  25. else:
  26. module_path, func_name = dataset_config.file, "get_custom_dataset"
  27. if not module_path.endswith(".py"):
  28. raise ValueError(f"Dataset file {module_path} is not a .py file.")
  29. module_path = Path(module_path)
  30. if not module_path.is_file():
  31. raise FileNotFoundError(f"Dataset py file {module_path.as_posix()} does not exist or is not a file.")
  32. module = load_module_from_py_file(module_path.as_posix())
  33. try:
  34. return getattr(module, func_name)(dataset_config, tokenizer, split)
  35. except AttributeError as e:
  36. print(f"It seems like the given method name ({func_name}) is not present in the dataset .py file ({module_path.as_posix()}).")
  37. raise e
  38. DATASET_PREPROC = {
  39. "alpaca_dataset": partial(get_alpaca_dataset),
  40. "grammar_dataset": get_grammar_dataset,
  41. "samsum_dataset": get_samsum_dataset,
  42. "custom_dataset": get_custom_dataset,
  43. }
  44. def get_preprocessed_dataset(
  45. tokenizer, dataset_config, split: str = "train"
  46. ) -> torch.utils.data.Dataset:
  47. if not dataset_config.dataset in DATASET_PREPROC:
  48. raise NotImplementedError(f"{dataset_config.dataset} is not (yet) implemented")
  49. def get_split():
  50. return (
  51. dataset_config.train_split
  52. if split == "train"
  53. else dataset_config.test_split
  54. )
  55. return DATASET_PREPROC[dataset_config.dataset](
  56. dataset_config,
  57. tokenizer,
  58. get_split(),
  59. )