|
@@ -241,12 +241,18 @@
|
|
|
]
|
|
|
},
|
|
|
{
|
|
|
- "cell_type": "markdown",
|
|
|
- "metadata": {},
|
|
|
+ "cell_type": "code",
|
|
|
+ "execution_count": null,
|
|
|
+ "metadata": {
|
|
|
+ "vscode": {
|
|
|
+ "languageId": "plaintext"
|
|
|
+ }
|
|
|
+ },
|
|
|
+ "outputs": [],
|
|
|
"source": [
|
|
|
- "model = meta-llama/Llama-2-7b-chat-hf \n",
|
|
|
- "volume = $PWD/data \n",
|
|
|
- "token = #Your own HF tokens \n",
|
|
|
+ "model = meta-llama/Llama-2-7b-chat-hf\n",
|
|
|
+ "volume = $PWD/data\n",
|
|
|
+ "token = #Your own HF tokens\n",
|
|
|
"docker run --gpus all --shm-size 1g -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.1.0 --model-id $model"
|
|
|
]
|
|
|
},
|