123456789101112131415161718192021222324252627282930 |
- from peft import PeftModel
- from transformers import LlamaForCausalLM, LlamaConfig
- def load_model(model_name, quantization):
- model = LlamaForCausalLM.from_pretrained(
- model_name,
- return_dict=True,
- load_in_8bit=quantization,
- device_map="auto",
- low_cpu_mem_usage=True,
- )
- return model
- def load_peft_model(model, peft_model):
- peft_model = PeftModel.from_pretrained(model, peft_model)
- return peft_model
- def load_llama_from_config(config_path):
- model_config = LlamaConfig.from_pretrained(config_path)
- model = LlamaForCausalLM(config=model_config)
- return model
-
-
|