Explorar o código

moving Bt to the try block

Hamid Shojanazeri hai 1 ano
pai
achega
51269b816f
Modificáronse 3 ficheiros con 3 adicións e 4 borrados
  1. 1 2
      inference/chat_completion.py
  2. 1 1
      inference/inference.py
  3. 1 1
      llama_finetuning.py

+ 1 - 2
inference/chat_completion.py

@@ -68,11 +68,10 @@ def main(
         """
         try:
             from optimum.bettertransformer import BetterTransformer
+            model = BetterTransformer.transform(model)   
         except ImportError:
             print("Module 'optimum' not found. Please install 'optimum' it before proceeding.")
 
-        model = BetterTransformer.transform(model)
-   
     tokenizer = LlamaTokenizer.from_pretrained(model_name)
     tokenizer.add_special_tokens(
         {

+ 1 - 1
inference/inference.py

@@ -65,10 +65,10 @@ def main(
         """
         try:
             from optimum.bettertransformer import BetterTransformer
+            model = BetterTransformer.transform(model)    
         except ImportError:
             print("Module 'optimum' not found. Please install 'optimum' it before proceeding.")
 
-        model = BetterTransformer.transform(model)
     tokenizer = LlamaTokenizer.from_pretrained(model_name)
     tokenizer.add_special_tokens(
         {

+ 1 - 1
llama_finetuning.py

@@ -102,9 +102,9 @@ def main(**kwargs):
         """
         try:
             from optimum.bettertransformer import BetterTransformer
+            model = BetterTransformer.transform(model) 
         except ImportError:
             print("Module 'optimum' not found. Please install 'optimum' it before proceeding.")
-        model = BetterTransformer.transform(model)
     print_model_size(model, train_config, rank if train_config.enable_fsdp else 0)
     
     # Prepare the model for int8 training if quantization is enabled