code_completion_example.py 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
  3. # from accelerate import init_empty_weights, load_checkpoint_and_dispatch
  4. import fire
  5. import os
  6. import sys
  7. import time
  8. import torch
  9. from transformers import AutoTokenizer
  10. from llama_recipes.inference.safety_utils import get_safety_checker
  11. from llama_recipes.inference.model_utils import load_model, load_peft_model
  12. def main(
  13. model_name,
  14. peft_model: str=None,
  15. quantization: bool=False,
  16. max_new_tokens =100, #The maximum numbers of tokens to generate
  17. prompt_file: str=None,
  18. seed: int=42, #seed value for reproducibility
  19. do_sample: bool=True, #Whether or not to use sampling ; use greedy decoding otherwise.
  20. min_length: int=None, #The minimum length of the sequence to be generated, input prompt + min_new_tokens
  21. use_cache: bool=True, #[optional] Whether or not the model should use the past last key/values attentions Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.
  22. top_p: float=0.9, # [optional] If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation.
  23. temperature: float=0.6, # [optional] The value used to modulate the next token probabilities.
  24. top_k: int=50, # [optional] The number of highest probability vocabulary tokens to keep for top-k-filtering.
  25. repetition_penalty: float=1.0, #The parameter for repetition penalty. 1.0 means no penalty.
  26. length_penalty: int=1, #[optional] Exponential penalty to the length that is used with beam-based generation.
  27. enable_azure_content_safety: bool=False, # Enable safety check with Azure content safety api
  28. enable_sensitive_topics: bool=False, # Enable check for sensitive topics using AuditNLG APIs
  29. enable_salesforce_content_safety: bool=True, # Enable safety check with Salesforce safety flan t5
  30. enable_llamaguard_content_safety: bool=False, # Enable safety check with Llama-Guard
  31. use_fast_kernels: bool = True, # Enable using SDPA from PyTroch Accelerated Transformers, make use Flash Attention and Xformer memory-efficient kernels
  32. **kwargs
  33. ):
  34. if prompt_file is not None:
  35. assert os.path.exists(
  36. prompt_file
  37. ), f"Provided Prompt file does not exist {prompt_file}"
  38. with open(prompt_file, "r") as f:
  39. user_prompt = f.read()
  40. else:
  41. print("No user prompt provided. Exiting.")
  42. sys.exit(1)
  43. # Set the seeds for reproducibility
  44. torch.cuda.manual_seed(seed)
  45. torch.manual_seed(seed)
  46. model = load_model(model_name, quantization, use_fast_kernels)
  47. if peft_model:
  48. model = load_peft_model(model, peft_model)
  49. model.eval()
  50. tokenizer = AutoTokenizer.from_pretrained(model_name)
  51. safety_checker = get_safety_checker(enable_azure_content_safety,
  52. enable_sensitive_topics,
  53. enable_salesforce_content_safety,
  54. enable_llamaguard_content_safety,
  55. )
  56. # Safety check of the user prompt
  57. safety_results = [check(user_prompt) for check in safety_checker]
  58. are_safe = all([r[1] for r in safety_results])
  59. if are_safe:
  60. print("User prompt deemed safe.")
  61. print(f"User prompt:\n{user_prompt}")
  62. else:
  63. print("User prompt deemed unsafe.")
  64. for method, is_safe, report in safety_results:
  65. if not is_safe:
  66. print(method)
  67. print(report)
  68. print("Skipping the inference as the prompt is not safe.")
  69. sys.exit(1) # Exit the program with an error status
  70. batch = tokenizer(user_prompt, return_tensors="pt")
  71. batch = {k: v.to("cuda") for k, v in batch.items()}
  72. start = time.perf_counter()
  73. with torch.no_grad():
  74. outputs = model.generate(
  75. **batch,
  76. max_new_tokens=max_new_tokens,
  77. do_sample=do_sample,
  78. top_p=top_p,
  79. temperature=temperature,
  80. min_length=min_length,
  81. use_cache=use_cache,
  82. top_k=top_k,
  83. repetition_penalty=repetition_penalty,
  84. length_penalty=length_penalty,
  85. **kwargs
  86. )
  87. e2e_inference_time = (time.perf_counter()-start)*1000
  88. print(f"the inference time is {e2e_inference_time} ms")
  89. output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
  90. # Safety check of the model output
  91. safety_results = [check(output_text) for check in safety_checker]
  92. are_safe = all([r[1] for r in safety_results])
  93. if are_safe:
  94. print("User input and model output deemed safe.")
  95. print(f"Model output:\n{output_text}")
  96. else:
  97. print("Model output deemed unsafe.")
  98. for method, is_safe, report in safety_results:
  99. if not is_safe:
  100. print(method)
  101. print(report)
  102. if __name__ == "__main__":
  103. fire.Fire(main)