Browse Source

The tokenizer will not add eos_token by default

Yuanhao 1 year ago
parent
commit
e554c1c8bf
1 changed files with 2 additions and 2 deletions
  1. 2 2
      src/llama_recipes/inference/chat_utils.py

+ 2 - 2
src/llama_recipes/inference/chat_utils.py

@@ -44,7 +44,7 @@ def format_tokens(dialogs, tokenizer):
             [
                 tokenizer.encode(
                     f"{B_INST} {(prompt['content']).strip()} {E_INST} {(answer['content']).strip()} ",
-                )
+                ) + [tokenizer.eos_token_id]
                 for prompt, answer in zip(dialog[::2], dialog[1::2])
             ],
             [],
@@ -62,4 +62,4 @@ def format_tokens(dialogs, tokenizer):
 def read_dialogs_from_file(file_path):
     with open(file_path, 'r') as file:
         dialogs = json.load(file)
-    return dialogs
+    return dialogs