|
@@ -109,13 +109,11 @@ def main(**kwargs):
|
|
# Load the tokenizer and add special tokens
|
|
# Load the tokenizer and add special tokens
|
|
tokenizer = LlamaTokenizer.from_pretrained(train_config.model_name)
|
|
tokenizer = LlamaTokenizer.from_pretrained(train_config.model_name)
|
|
tokenizer.add_special_tokens(
|
|
tokenizer.add_special_tokens(
|
|
- {
|
|
|
|
- "eos_token": "</s>",
|
|
|
|
- "bos_token": "</s>",
|
|
|
|
- "unk_token": "</s>",
|
|
|
|
- "pad_token": '[PAD]',
|
|
|
|
- }
|
|
|
|
- )
|
|
|
|
|
|
+ {
|
|
|
|
+
|
|
|
|
+ "pad_token": "<PAD>",
|
|
|
|
+ }
|
|
|
|
+ )
|
|
if train_config.use_peft:
|
|
if train_config.use_peft:
|
|
peft_config = generate_peft_config(train_config, kwargs)
|
|
peft_config = generate_peft_config(train_config, kwargs)
|
|
model = get_peft_model(model, peft_config)
|
|
model = get_peft_model(model, peft_config)
|