Browse Source

Make code-llama and hf-tgi inference runnable as module

Matthias Reso 1 year ago
parent
commit
207d2f80e9

+ 2 - 0
src/llama_recipes/inference/__init__.py

@@ -0,0 +1,2 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.

+ 9 - 0
src/llama_recipes/inference/__main__.py

@@ -0,0 +1,9 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
+
+import fire
+
+from .inference import main
+
+if __name__ == "__main__":
+    fire.Fire(main)

+ 2 - 0
src/llama_recipes/inference/code_llama/__init__.py

@@ -0,0 +1,2 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.

+ 5 - 5
src/llama_recipes/inference/code-llama/code_completion_example.py

@@ -4,16 +4,16 @@
 # from accelerate import init_empty_weights, load_checkpoint_and_dispatch
 
 import fire
-import torch
 import os
 import sys
 import time
-from typing import List
 
+import torch
 from transformers import AutoTokenizer
-sys.path.append("..")
-from safety_utils import get_safety_checker
-from model_utils import load_model, load_peft_model, load_llama_from_config
+
+from ..safety_utils import get_safety_checker
+from ..model_utils import load_model, load_peft_model
+
 
 def main(
     model_name,

src/llama_recipes/inference/code-llama/code_completion_prompt.txt → src/llama_recipes/inference/code_llama/code_completion_prompt.txt


+ 3 - 4
src/llama_recipes/inference/code-llama/code_infilling_example.py

@@ -8,12 +8,11 @@ import torch
 import os
 import sys
 import time
-from typing import List
 
 from transformers import AutoTokenizer
-sys.path.append("..")
-from safety_utils import get_safety_checker
-from model_utils import load_model, load_peft_model, load_llama_from_config
+
+from ..safety_utils import get_safety_checker
+from ..model_utils import load_model, load_peft_model
 
 def main(
     model_name,

src/llama_recipes/inference/code-llama/code_infilling_prompt.txt → src/llama_recipes/inference/code_llama/code_infilling_prompt.txt


src/llama_recipes/inference/hf-text-generation-inference/README.md → src/llama_recipes/inference/hf_text_generation_inference/README.md


+ 2 - 0
src/llama_recipes/inference/hf_text_generation_inference/__init__.py

@@ -0,0 +1,2 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.

src/llama_recipes/inference/hf-text-generation-inference/merge_lora_weights.py → src/llama_recipes/inference/hf_text_generation_inference/merge_lora_weights.py