Kaynağa Gözat

update due to peft new release (#407)

Hamid Shojanazeri 8 ay önce
ebeveyn
işleme
64e189914f
1 değiştirilmiş dosya ile 2 ekleme ve 2 silme
  1. 2 2
      src/llama_recipes/finetuning.py

+ 2 - 2
src/llama_recipes/finetuning.py

@@ -9,7 +9,7 @@ import fire
 import random
 import torch
 import torch.optim as optim
-from peft import get_peft_model, prepare_model_for_int8_training
+from peft import get_peft_model, prepare_model_for_kbit_training
 from torch.distributed.fsdp import (
     FullyShardedDataParallel as FSDP,
     ShardingStrategy
@@ -144,7 +144,7 @@ def main(**kwargs):
 
     # Prepare the model for int8 training if quantization is enabled
     if train_config.quantization:
-        model = prepare_model_for_int8_training(model)
+        model = prepare_model_for_kbit_training(model)
 
     # Convert the model to bfloat16 if fsdp and pure_bf16 is enabled
     if train_config.enable_fsdp and fsdp_config.pure_bf16: