|
@@ -73,24 +73,27 @@
|
|
"cell_type": "markdown",
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"metadata": {},
|
|
"source": [
|
|
"source": [
|
|
- "Code Llama is a code-focused LLM built on top of Llama 2 also available in various sizes and finetunes:"
|
|
|
|
- ]
|
|
|
|
- },
|
|
|
|
- {
|
|
|
|
- "attachments": {},
|
|
|
|
- "cell_type": "markdown",
|
|
|
|
- "metadata": {},
|
|
|
|
- "source": [
|
|
|
|
- "#### Code Llama\n",
|
|
|
|
|
|
+ "#### Code Llama - Code Llama is a code-focused LLM built on top of Llama 2 also available in various sizes and finetunes:\n",
|
|
"1. `codellama-7b` - code fine-tuned 7 billion parameter model\n",
|
|
"1. `codellama-7b` - code fine-tuned 7 billion parameter model\n",
|
|
"1. `codellama-13b` - code fine-tuned 13 billion parameter model\n",
|
|
"1. `codellama-13b` - code fine-tuned 13 billion parameter model\n",
|
|
"1. `codellama-34b` - code fine-tuned 34 billion parameter model\n",
|
|
"1. `codellama-34b` - code fine-tuned 34 billion parameter model\n",
|
|
|
|
+ "1. `codellama-70b` - code fine-tuned 70 billion parameter model\n",
|
|
"1. `codellama-7b-instruct` - code & instruct fine-tuned 7 billion parameter model\n",
|
|
"1. `codellama-7b-instruct` - code & instruct fine-tuned 7 billion parameter model\n",
|
|
"2. `codellama-13b-instruct` - code & instruct fine-tuned 13 billion parameter model\n",
|
|
"2. `codellama-13b-instruct` - code & instruct fine-tuned 13 billion parameter model\n",
|
|
"3. `codellama-34b-instruct` - code & instruct fine-tuned 34 billion parameter model\n",
|
|
"3. `codellama-34b-instruct` - code & instruct fine-tuned 34 billion parameter model\n",
|
|
|
|
+ "3. `codellama-70b-instruct` - code & instruct fine-tuned 70 billion parameter model\n",
|
|
"1. `codellama-7b-python` - Python fine-tuned 7 billion parameter model\n",
|
|
"1. `codellama-7b-python` - Python fine-tuned 7 billion parameter model\n",
|
|
"2. `codellama-13b-python` - Python fine-tuned 13 billion parameter model\n",
|
|
"2. `codellama-13b-python` - Python fine-tuned 13 billion parameter model\n",
|
|
- "3. `codellama-34b-python` - Python fine-tuned 34 billion parameter model"
|
|
|
|
|
|
+ "3. `codellama-34b-python` - Python fine-tuned 34 billion parameter model\n",
|
|
|
|
+ "3. `codellama-70b-python` - Python fine-tuned 70 billion parameter model"
|
|
|
|
+ ]
|
|
|
|
+ },
|
|
|
|
+ {
|
|
|
|
+ "cell_type": "markdown",
|
|
|
|
+ "metadata": {},
|
|
|
|
+ "source": [
|
|
|
|
+ "#### Llama Guard\n",
|
|
|
|
+ "1. `llama-guard-7b` - input and output guardrails model"
|
|
]
|
|
]
|
|
},
|
|
},
|
|
{
|
|
{
|