chat_utils.py 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
  3. from typing import List, Literal, Optional, Tuple, TypedDict, Union
  4. import json
  5. Role = Literal["user", "assistant"]
  6. class Message(TypedDict):
  7. role: Role
  8. content: str
  9. Dialog = List[Message]
  10. B_INST, E_INST = "[INST]", "[/INST]"
  11. B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
  12. DEFAULT_SYSTEM_PROMPT = """\
  13. You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
  14. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
  15. def format_tokens(dialogs, tokenizer):
  16. prompt_tokens = []
  17. for dialog in dialogs:
  18. if dialog[0]["role"] != "system":
  19. dialog = [
  20. {
  21. "role": "system",
  22. "content": DEFAULT_SYSTEM_PROMPT,
  23. }
  24. ] + dialog
  25. dialog = [
  26. {
  27. "role": dialog[1]["role"],
  28. "content": B_SYS
  29. + dialog[0]["content"]
  30. + E_SYS
  31. + dialog[1]["content"],
  32. }
  33. ] + dialog[2:]
  34. assert all([msg["role"] == "user" for msg in dialog[::2]]) and all(
  35. [msg["role"] == "assistant" for msg in dialog[1::2]]
  36. ), (
  37. "model only supports 'system','user' and 'assistant' roles, "
  38. "starting with user and alternating (u/a/u/a/u...)"
  39. )
  40. """
  41. Please verify that yout tokenizer support adding "[INST]", "[/INST]" to your inputs.
  42. Here, we are adding it manually.
  43. """
  44. dialog_tokens: List[int] = sum(
  45. [
  46. tokenizer.encode(
  47. f"{B_INST} {(prompt['content']).strip()} {E_INST} {(answer['content']).strip()} ",
  48. )
  49. for prompt, answer in zip(dialog[::2], dialog[1::2])
  50. ],
  51. [],
  52. )
  53. assert (
  54. dialog[-1]["role"] == "user"
  55. ), f"Last message must be from user, got {dialog[-1]['role']}"
  56. dialog_tokens += tokenizer.encode(
  57. f"{B_INST} {(dialog[-1]['content']).strip()} {E_INST}",
  58. )
  59. prompt_tokens.append(dialog_tokens)
  60. return prompt_tokens
  61. def read_dialogs_from_file(file_path):
  62. with open(file_path, 'r') as file:
  63. dialogs = json.load(file)
  64. return dialogs