Browse Source

add doc string

Dong Wang 1 year ago
parent
commit
cf0c589dc6

+ 2 - 2
examples/hf_llama_conversion/README.md

@@ -1,11 +1,11 @@
-# Convert huggingface llama weights to official llama consolidated format
+# Convert Hugging Face llama weights to official llama consolidated format
 
 This is the reverse conversion for `convert_llama_weights_to_hf.py` script from the transformer package.
 
 ## Step 0: Convert to consolidated format
 - Create an output directory for the converted weights, such as `test70B`.
 - Copy file params.json from the official llama download into that directory.
-- Run the conversion script. `model-path` can be a huggingface hub model or a local hf model directory.
+- Run the conversion script. `model-path` can be a Hugging Face hub model or a local hf model directory.
 ```
 python -m llama_recipes.tools.convert_hf_weights_to_llama --model-path meta-llama/Llama-2-70b-chat-hf --output-dir test70B --model-size 70B
 ```

+ 10 - 5
src/llama_recipes/tools/convert_hf_weights_to_llama.py

@@ -142,11 +142,16 @@ def write_model(model_path, model_size, output_base_path):
 
 
 def main(
-    model_path: str, # Model name or path to the model directory
-    model_size: str, # llama model size.
-    output_dir: str # Save Llama weights. Should already contains params.json.
-    ):
-    """Convert llama huggingface format to consolidated weights."""
+    model_path: str,
+    model_size: str,
+    output_dir: str,
+):
+    """Convert llama weights from huggingface format to consolidated format.
+    params:
+    model_path: model name or path to the model directory.
+    model_size: Llama model size, one of 7B, 13B, 34B, 30B, 65B, 70B.
+    output_dir: directory to save Llama weights, should contains params.json.
+    """
     assert model_size in NUM_SHARDS, f"Unknown model size {model_size}"
     params_path = os.path.join(output_dir, "params.json")
     assert os.path.isfile(params_path), f"{params_path} does not exist"