Sfoglia il codice sorgente

adding the kbit prep in the code

Hamid Shojanazeri 11 mesi fa
parent
commit
11f51db28c
1 ha cambiato i file con 1 aggiunte e 1 eliminazioni
  1. 1 1
      src/llama_recipes/finetuning.py

+ 1 - 1
src/llama_recipes/finetuning.py

@@ -144,7 +144,7 @@ def main(**kwargs):
 
     # Prepare the model for int8 training if quantization is enabled
     if train_config.quantization:
-        model = prepare_model_for_int8_training(model)
+        model = prepare_model_for_kbit_training(model)
 
     # Convert the model to bfloat16 if fsdp and pure_bf16 is enabled
     if train_config.enable_fsdp and fsdp_config.pure_bf16: