eval.py 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
  3. import argparse
  4. import json
  5. import logging
  6. import os
  7. import re
  8. import sys
  9. from pathlib import Path
  10. import numpy as np
  11. import lm_eval
  12. from lm_eval import evaluator, tasks
  13. from lm_eval.utils import make_table
  14. def _handle_non_serializable(o):
  15. if isinstance(o, np.int64) or isinstance(o, np.int32):
  16. return int(o)
  17. elif isinstance(o, set):
  18. return list(o)
  19. else:
  20. return str(o)
  21. def setup_logging(verbosity):
  22. logging.basicConfig(
  23. level=verbosity.upper(), format="%(asctime)s - %(levelname)s - %(message)s"
  24. )
  25. return logging.getLogger(__name__)
  26. def handle_output(args, results, logger):
  27. if not args.output_path:
  28. if args.log_samples:
  29. logger.error("Specify --output_path for logging samples.")
  30. sys.exit(1)
  31. logger.info(json.dumps(results, indent=2, default=_handle_non_serializable))
  32. return
  33. path = Path(args.output_path)
  34. if path.is_file() or path.with_name("results.json").is_file():
  35. logger.warning(f"File already exists at {path}. Results will be overwritten.")
  36. output_dir = path.parent if path.suffix in (".json", ".jsonl") else path
  37. output_dir.mkdir(parents=True, exist_ok=True)
  38. results_str = json.dumps(results, indent=2, default=_handle_non_serializable)
  39. if args.show_config:
  40. logger.info(results_str)
  41. file_path = os.path.join(args.output_path, "results.json")
  42. with open(file_path , "w", encoding="utf-8") as f:
  43. f.write(results_str)
  44. if args.log_samples:
  45. samples = results.pop("samples", {})
  46. for task_name, _ in results.get("configs", {}).items():
  47. output_name = re.sub(r"/|=", "__", args.model_args) + "_" + task_name
  48. sample_file = output_dir.joinpath(f"{output_name}.jsonl")
  49. sample_data = json.dumps(
  50. samples.get(task_name, {}), indent=2, default=_handle_non_serializable
  51. )
  52. sample_file.write_text(sample_data, encoding="utf-8")
  53. batch_sizes = ",".join(map(str, results.get("config", {}).get("batch_sizes", [])))
  54. summary = f"{args.model} ({args.model_args}), gen_kwargs: ({args.gen_kwargs}), limit: {args.limit}, num_fewshot: {args.num_fewshot}, batch_size: {args.batch_size}{f' ({batch_sizes})' if batch_sizes else ''}"
  55. logger.info(summary)
  56. logger.info(make_table(results))
  57. if "groups" in results:
  58. logger.info(make_table(results, "groups"))
  59. def load_tasks(args):
  60. tasks.initialize_tasks()
  61. if args.open_llm_leaderboard_tasks:
  62. current_dir = os.getcwd()
  63. config_dir = os.path.join(current_dir, "open_llm_leaderboard")
  64. lm_eval.tasks.include_path(config_dir)
  65. return [
  66. "arc_challenge_25_shot",
  67. "hellaswag_10_shot",
  68. "truthfulqa_mc2",
  69. "winogrande_5_shot",
  70. "gsm8k",
  71. "mmlu",
  72. ]
  73. return args.tasks.split(",") if args.tasks else []
  74. def parse_eval_args():
  75. parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
  76. parser.add_argument(
  77. "--model", "-m", default="hf", help="Name of model, e.g., `hf`."
  78. )
  79. parser.add_argument(
  80. "--tasks",
  81. "-t",
  82. default=None,
  83. help="Comma-separated list of tasks, or 'list' to display available tasks.",
  84. )
  85. parser.add_argument(
  86. "--model_args",
  87. "-a",
  88. default="",
  89. help="Comma-separated string arguments for model, e.g., `pretrained=EleutherAI/pythia-160m`.",
  90. )
  91. parser.add_argument(
  92. "--open_llm_leaderboard_tasks",
  93. "-oplm",
  94. action="store_true",
  95. default=False,
  96. help="Choose the list of tasks with specification in HF open LLM-leaderboard.",
  97. )
  98. parser.add_argument(
  99. "--num_fewshot",
  100. "-f",
  101. type=int,
  102. default=None,
  103. help="Number of examples in few-shot context.",
  104. )
  105. parser.add_argument(
  106. "--batch_size",
  107. "-b",
  108. default=1,
  109. help="Batch size, can be 'auto', 'auto:N', or an integer.",
  110. )
  111. parser.add_argument(
  112. "--max_batch_size",
  113. type=int,
  114. default=None,
  115. help="Maximal batch size with 'auto' batch size.",
  116. )
  117. parser.add_argument(
  118. "--device", default=None, help="Device for evaluation, e.g., 'cuda', 'cpu'."
  119. )
  120. parser.add_argument(
  121. "--output_path", "-o", type=str, default=None, help="Path for saving results."
  122. )
  123. parser.add_argument(
  124. "--limit",
  125. "-L",
  126. type=float,
  127. default=None,
  128. help="Limit number of examples per task.",
  129. )
  130. parser.add_argument(
  131. "--use_cache", "-c", default=None, help="Path to cache db file, if used."
  132. )
  133. parser.add_argument(
  134. "--verbosity",
  135. "-v",
  136. default="INFO",
  137. help="Logging level: CRITICAL, ERROR, WARNING, INFO, DEBUG.",
  138. )
  139. parser.add_argument(
  140. "--gen_kwargs",
  141. default=None,
  142. help="Generation kwargs for tasks that support it.",
  143. )
  144. parser.add_argument(
  145. "--check_integrity",
  146. action="store_true",
  147. help="Whether to run the relevant part of the test suite for the tasks.",
  148. )
  149. parser.add_argument(
  150. "--write_out",
  151. "-w",
  152. action="store_true",
  153. default=False,
  154. help="Prints the prompt for the first few documents.",
  155. )
  156. parser.add_argument(
  157. "--log_samples",
  158. "-s",
  159. action="store_true",
  160. default=False,
  161. help="If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis.",
  162. )
  163. parser.add_argument(
  164. "--show_config",
  165. action="store_true",
  166. default=False,
  167. help="If True, shows the full config of all tasks at the end of the evaluation.",
  168. )
  169. parser.add_argument(
  170. "--include_path",
  171. type=str,
  172. default=None,
  173. help="Additional path to include if there are external tasks.",
  174. )
  175. parser.add_argument(
  176. "--decontamination_ngrams_path", default=None
  177. ) # Not currently used
  178. return parser.parse_args()
  179. def evaluate_model(args):
  180. try:
  181. task_list = load_tasks(args)
  182. # Customized model such as Quantized model etc.
  183. # In case you are working with a custom model, you can use the following guide to add it here:
  184. # https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/interface.md#external-library-usage
  185. # Evaluate
  186. results = evaluator.simple_evaluate(
  187. model=args.model,
  188. model_args=args.model_args,
  189. tasks=task_list,
  190. num_fewshot=args.num_fewshot,
  191. batch_size=args.batch_size,
  192. max_batch_size=args.max_batch_size,
  193. device=args.device,
  194. use_cache=args.use_cache,
  195. limit=args.limit,
  196. decontamination_ngrams_path=args.decontamination_ngrams_path,
  197. check_integrity=args.check_integrity,
  198. write_out=args.write_out,
  199. log_samples=args.log_samples,
  200. gen_kwargs=args.gen_kwargs,
  201. )
  202. handle_output(args, results, logger)
  203. except Exception as e:
  204. logger.error(f"An error occurred during evaluation: {e}")
  205. sys.exit(1)
  206. if __name__ == "__main__":
  207. args = parse_eval_args()
  208. logger = setup_logging(args.verbosity)
  209. evaluate_model(args)