chat_completion.py 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
  3. # from accelerate import init_empty_weights, load_checkpoint_and_dispatch
  4. import fire
  5. import os
  6. import sys
  7. import torch
  8. from transformers import LlamaTokenizer
  9. from llama_recipes.inference.chat_utils import read_dialogs_from_file, format_tokens
  10. from llama_recipes.inference.model_utils import load_model, load_peft_model
  11. from llama_recipes.inference.safety_utils import get_safety_checker
  12. def main(
  13. model_name,
  14. peft_model: str=None,
  15. quantization: bool=False,
  16. max_new_tokens =256, #The maximum numbers of tokens to generate
  17. min_new_tokens:int=0, #The minimum numbers of tokens to generate
  18. prompt_file: str=None,
  19. seed: int=42, #seed value for reproducibility
  20. safety_score_threshold: float=0.5,
  21. do_sample: bool=True, #Whether or not to use sampling ; use greedy decoding otherwise.
  22. use_cache: bool=True, #[optional] Whether or not the model should use the past last key/values attentions Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.
  23. top_p: float=1.0, # [optional] If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation.
  24. temperature: float=1.0, # [optional] The value used to modulate the next token probabilities.
  25. top_k: int=50, # [optional] The number of highest probability vocabulary tokens to keep for top-k-filtering.
  26. repetition_penalty: float=1.0, #The parameter for repetition penalty. 1.0 means no penalty.
  27. length_penalty: int=1, #[optional] Exponential penalty to the length that is used with beam-based generation.
  28. enable_azure_content_safety: bool=False, # Enable safety check with Azure content safety api
  29. enable_sensitive_topics: bool=False, # Enable check for sensitive topics using AuditNLG APIs
  30. enable_saleforce_content_safety: bool=True, # Enable safety check woth Saleforce safety flan t5
  31. use_fast_kernels: bool = False, # Enable using SDPA from PyTorch Accelerated Transformers, make use Flash Attention and Xformer memory-efficient kernels
  32. **kwargs
  33. ):
  34. if prompt_file is not None:
  35. assert os.path.exists(
  36. prompt_file
  37. ), f"Provided Prompt file does not exist {prompt_file}"
  38. dialogs= read_dialogs_from_file(prompt_file)
  39. elif not sys.stdin.isatty():
  40. dialogs = "\n".join(sys.stdin.readlines())
  41. else:
  42. print("No user prompt provided. Exiting.")
  43. sys.exit(1)
  44. print(f"User dialogs:\n{dialogs}")
  45. print("\n==================================\n")
  46. # Set the seeds for reproducibility
  47. torch.cuda.manual_seed(seed)
  48. torch.manual_seed(seed)
  49. model = load_model(model_name, quantization)
  50. if peft_model:
  51. model = load_peft_model(model, peft_model)
  52. if use_fast_kernels:
  53. """
  54. Setting 'use_fast_kernels' will enable
  55. using of Flash Attention or Xformer memory-efficient kernels
  56. based on the hardware being used. This would speed up inference when used for batched inputs.
  57. """
  58. try:
  59. from optimum.bettertransformer import BetterTransformer
  60. model = BetterTransformer.transform(model)
  61. except ImportError:
  62. print("Module 'optimum' not found. Please install 'optimum' it before proceeding.")
  63. tokenizer = LlamaTokenizer.from_pretrained(model_name)
  64. tokenizer.add_special_tokens(
  65. {
  66. "pad_token": "<PAD>",
  67. }
  68. )
  69. chats = format_tokens(dialogs, tokenizer)
  70. with torch.no_grad():
  71. for idx, chat in enumerate(chats):
  72. safety_checker = get_safety_checker(enable_azure_content_safety,
  73. enable_sensitive_topics,
  74. enable_saleforce_content_safety,
  75. )
  76. # Safety check of the user prompt
  77. safety_results = [check(dialogs[idx][0]["content"]) for check in safety_checker]
  78. are_safe = all([r[1] for r in safety_results])
  79. if are_safe:
  80. print(f"User prompt deemed safe.")
  81. print("User prompt:\n", dialogs[idx][0]["content"])
  82. print("\n==================================\n")
  83. else:
  84. print("User prompt deemed unsafe.")
  85. for method, is_safe, report in safety_results:
  86. if not is_safe:
  87. print(method)
  88. print(report)
  89. print("Skipping the inferece as the prompt is not safe.")
  90. sys.exit(1) # Exit the program with an error status
  91. tokens= torch.tensor(chat).long()
  92. tokens= tokens.unsqueeze(0)
  93. tokens= tokens.to("cuda:0")
  94. outputs = model.generate(
  95. input_ids=tokens,
  96. max_new_tokens=max_new_tokens,
  97. do_sample=do_sample,
  98. top_p=top_p,
  99. temperature=temperature,
  100. use_cache=use_cache,
  101. top_k=top_k,
  102. repetition_penalty=repetition_penalty,
  103. length_penalty=length_penalty,
  104. **kwargs
  105. )
  106. output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
  107. # Safety check of the model output
  108. safety_results = [check(output_text) for check in safety_checker]
  109. are_safe = all([r[1] for r in safety_results])
  110. if are_safe:
  111. print("User input and model output deemed safe.")
  112. print(f"Model output:\n{output_text}")
  113. print("\n==================================\n")
  114. else:
  115. print("Model output deemed unsafe.")
  116. for method, is_safe, report in safety_results:
  117. if not is_safe:
  118. print(method)
  119. print(report)
  120. if __name__ == "__main__":
  121. fire.Fire(main)