inference.py 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
  3. # from accelerate import init_empty_weights, load_checkpoint_and_dispatch
  4. import fire
  5. import os
  6. import sys
  7. import time
  8. import torch
  9. from transformers import LlamaTokenizer
  10. from llama_recipes.inference.safety_utils import get_safety_checker
  11. from llama_recipes.inference.model_utils import load_model, load_peft_model
  12. from accelerate.utils import is_xpu_available
  13. def main(
  14. model_name,
  15. peft_model: str=None,
  16. quantization: bool=False,
  17. max_new_tokens =100, #The maximum numbers of tokens to generate
  18. prompt_file: str=None,
  19. seed: int=42, #seed value for reproducibility
  20. do_sample: bool=True, #Whether or not to use sampling ; use greedy decoding otherwise.
  21. min_length: int=None, #The minimum length of the sequence to be generated, input prompt + min_new_tokens
  22. use_cache: bool=True, #[optional] Whether or not the model should use the past last key/values attentions Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.
  23. top_p: float=1.0, # [optional] If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation.
  24. temperature: float=1.0, # [optional] The value used to modulate the next token probabilities.
  25. top_k: int=50, # [optional] The number of highest probability vocabulary tokens to keep for top-k-filtering.
  26. repetition_penalty: float=1.0, #The parameter for repetition penalty. 1.0 means no penalty.
  27. length_penalty: int=1, #[optional] Exponential penalty to the length that is used with beam-based generation.
  28. enable_azure_content_safety: bool=False, # Enable safety check with Azure content safety api
  29. enable_sensitive_topics: bool=False, # Enable check for sensitive topics using AuditNLG APIs
  30. enable_salesforce_content_safety: bool=True, # Enable safety check with Salesforce safety flan t5
  31. max_padding_length: int=None, # the max padding length to be used with tokenizer padding the prompts.
  32. use_fast_kernels: bool = False, # Enable using SDPA from PyTroch Accelerated Transformers, make use Flash Attention and Xformer memory-efficient kernels
  33. **kwargs
  34. ):
  35. if prompt_file is not None:
  36. assert os.path.exists(
  37. prompt_file
  38. ), f"Provided Prompt file does not exist {prompt_file}"
  39. with open(prompt_file, "r") as f:
  40. user_prompt = "\n".join(f.readlines())
  41. elif not sys.stdin.isatty():
  42. user_prompt = "\n".join(sys.stdin.readlines())
  43. else:
  44. print("No user prompt provided. Exiting.")
  45. sys.exit(1)
  46. # Set the seeds for reproducibility
  47. if is_xpu_available():
  48. torch.xpu.manual_seed(seed)
  49. else:
  50. torch.cuda.manual_seed(seed)
  51. torch.manual_seed(seed)
  52. model = load_model(model_name, quantization)
  53. if peft_model:
  54. model = load_peft_model(model, peft_model)
  55. model.eval()
  56. if use_fast_kernels:
  57. """
  58. Setting 'use_fast_kernels' will enable
  59. using of Flash Attention or Xformer memory-efficient kernels
  60. based on the hardware being used. This would speed up inference when used for batched inputs.
  61. """
  62. try:
  63. from optimum.bettertransformer import BetterTransformer
  64. model = BetterTransformer.transform(model)
  65. except ImportError:
  66. print("Module 'optimum' not found. Please install 'optimum' it before proceeding.")
  67. tokenizer = LlamaTokenizer.from_pretrained(model_name)
  68. tokenizer.add_special_tokens(
  69. {
  70. "pad_token": "<PAD>",
  71. }
  72. )
  73. model.resize_token_embeddings(model.config.vocab_size + 1)
  74. safety_checker = get_safety_checker(enable_azure_content_safety,
  75. enable_sensitive_topics,
  76. enable_salesforce_content_safety,
  77. )
  78. # Safety check of the user prompt
  79. safety_results = [check(user_prompt) for check in safety_checker]
  80. are_safe = all([r[1] for r in safety_results])
  81. if are_safe:
  82. print("User prompt deemed safe.")
  83. print(f"User prompt:\n{user_prompt}")
  84. else:
  85. print("User prompt deemed unsafe.")
  86. for method, is_safe, report in safety_results:
  87. if not is_safe:
  88. print(method)
  89. print(report)
  90. print("Skipping the inference as the prompt is not safe.")
  91. sys.exit(1) # Exit the program with an error status
  92. batch = tokenizer(user_prompt, padding='max_length', truncation=True, max_length=max_padding_length, return_tensors="pt")
  93. if is_xpu_available():
  94. batch = {k: v.to("xpu") for k, v in batch.items()}
  95. else:
  96. batch = {k: v.to("cuda") for k, v in batch.items()}
  97. start = time.perf_counter()
  98. with torch.no_grad():
  99. outputs = model.generate(
  100. **batch,
  101. max_new_tokens=max_new_tokens,
  102. do_sample=do_sample,
  103. top_p=top_p,
  104. temperature=temperature,
  105. min_length=min_length,
  106. use_cache=use_cache,
  107. top_k=top_k,
  108. repetition_penalty=repetition_penalty,
  109. length_penalty=length_penalty,
  110. **kwargs
  111. )
  112. e2e_inference_time = (time.perf_counter()-start)*1000
  113. print(f"the inference time is {e2e_inference_time} ms")
  114. output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
  115. # Safety check of the model output
  116. safety_results = [check(output_text) for check in safety_checker]
  117. are_safe = all([r[1] for r in safety_results])
  118. if are_safe:
  119. print("User input and model output deemed safe.")
  120. print(f"Model output:\n{output_text}")
  121. else:
  122. print("Model output deemed unsafe.")
  123. for method, is_safe, report in safety_results:
  124. if not is_safe:
  125. print(method)
  126. print(report)
  127. if __name__ == "__main__":
  128. fire.Fire(main)