Bladeren bron

Update deprecated demo app links to recipes

Navyata Bawa 11 maanden geleden
bovenliggende
commit
beca2aab9b

+ 44 - 44
recipes/responsible_ai/Purple_Llama_Anyscale.ipynb

@@ -3,8 +3,8 @@
     {
       "cell_type": "markdown",
       "metadata": {
-        "id": "view-in-github",
-        "colab_type": "text"
+        "colab_type": "text",
+        "id": "view-in-github"
       },
       "source": [
         "<a href=\"https://colab.research.google.com/github/amitsangani/Llama-2/blob/main/Purple_Llama_Anyscale.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
@@ -97,10 +97,10 @@
       "cell_type": "code",
       "execution_count": null,
       "metadata": {
-        "id": "yE3sPjS-cyd2",
         "colab": {
           "base_uri": "https://localhost:8080/"
         },
+        "id": "yE3sPjS-cyd2",
         "outputId": "93b36bc0-e6d4-493c-c88d-ec5c41266239"
       },
       "outputs": [
@@ -125,6 +125,11 @@
     },
     {
       "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "DOSiDW6hq9dI"
+      },
+      "outputs": [],
       "source": [
         "from string import Template\n",
         "\n",
@@ -195,18 +200,11 @@
         "    prompt = PROMPT_TEMPLATE.substitute(prompt=message, agent_type=role)\n",
         "    prompt = f\"<s>{B_INST} {prompt.strip()} {E_INST}\"\n",
         "    return prompt\n"
-      ],
-      "metadata": {
-        "id": "DOSiDW6hq9dI"
-      },
-      "execution_count": null,
-      "outputs": []
+      ]
     },
     {
       "cell_type": "code",
-      "source": [
-        "%pip install openai"
-      ],
+      "execution_count": null,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/"
@@ -214,11 +212,10 @@
         "id": "t6hkFlVD9XFw",
         "outputId": "25fd187e-a484-4b90-d104-a3320b98e8ea"
       },
-      "execution_count": null,
       "outputs": [
         {
-          "output_type": "stream",
           "name": "stdout",
+          "output_type": "stream",
           "text": [
             "Collecting openai\n",
             "  Downloading openai-1.3.7-py3-none-any.whl (221 kB)\n",
@@ -248,26 +245,14 @@
             "\u001b[0mSuccessfully installed h11-0.14.0 httpcore-1.0.2 httpx-0.25.2 openai-1.3.7\n"
           ]
         }
+      ],
+      "source": [
+        "%pip install openai"
       ]
     },
     {
       "cell_type": "code",
-      "source": [
-        "import openai\n",
-        "\n",
-        "system_content = \"You will be provided with a product description and seed words. Your task is to generate potential product names.\"\n",
-        "user_content = \"Product description: A home milkshake maker. Seed words: fast, healthy, compact.\"\n",
-        "\n",
-        "client = openai.OpenAI(\n",
-        "           base_url = \"https://api.endpoints.anyscale.com/v1\",\n",
-        "           api_key=ANYSCALE_API_KEY\n",
-        "        )\n",
-        "completion = client.completions.create(\n",
-        "    model=\"Meta-Llama/Llama-Guard-7b\",\n",
-        "    prompt=format_prompt(\"User\",\"Tell me about the history of racism in the US\")\n",
-        ")\n",
-        "print(completion.choices[0].text)"
-      ],
+      "execution_count": null,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/"
@@ -275,28 +260,35 @@
         "id": "xC-OYLF4rGc7",
         "outputId": "452ca85a-fd9f-426c-d4ab-7898bdabfab8"
       },
-      "execution_count": null,
       "outputs": [
         {
-          "output_type": "stream",
           "name": "stdout",
+          "output_type": "stream",
           "text": [
             " safe\n"
           ]
         }
-      ]
-    },
-    {
-      "cell_type": "code",
+      ],
       "source": [
         "import openai\n",
         "\n",
+        "system_content = \"You will be provided with a product description and seed words. Your task is to generate potential product names.\"\n",
+        "user_content = \"Product description: A home milkshake maker. Seed words: fast, healthy, compact.\"\n",
+        "\n",
+        "client = openai.OpenAI(\n",
+        "           base_url = \"https://api.endpoints.anyscale.com/v1\",\n",
+        "           api_key=ANYSCALE_API_KEY\n",
+        "        )\n",
         "completion = client.completions.create(\n",
         "    model=\"Meta-Llama/Llama-Guard-7b\",\n",
-        "    prompt=format_prompt(\"Agent\",\"User: How do you buy a tiger in the US\\n Agent: Steal one from the zoo\")\n",
+        "    prompt=format_prompt(\"User\",\"Tell me about the history of racism in the US\")\n",
         ")\n",
         "print(completion.choices[0].text)"
-      ],
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/"
@@ -304,16 +296,24 @@
         "id": "JtBTkJRxufb0",
         "outputId": "c581052d-348e-45ac-c874-80ecf4416a77"
       },
-      "execution_count": null,
       "outputs": [
         {
-          "output_type": "stream",
           "name": "stdout",
+          "output_type": "stream",
           "text": [
             " unsafe\n",
             "O3\n"
           ]
         }
+      ],
+      "source": [
+        "import openai\n",
+        "\n",
+        "completion = client.completions.create(\n",
+        "    model=\"Meta-Llama/Llama-Guard-7b\",\n",
+        "    prompt=format_prompt(\"Agent\",\"User: How do you buy a tiger in the US\\n Agent: Steal one from the zoo\")\n",
+        ")\n",
+        "print(completion.choices[0].text)"
       ]
     },
     {
@@ -326,7 +326,7 @@
         "- [Llama 2](https://ai.meta.com/llama/)\n",
         "- [Getting Started Guide - Llama 2](https://ai.meta.com/llama/get-started/)\n",
         "- [GitHub - Llama 2](https://github.com/facebookresearch/llama)\n",
-        "- [Github - LLama 2 Recipes](https://github.com/facebookresearch/llama-recipes) and [Llama 2 Demo Apps](https://github.com/facebookresearch/llama-recipes/tree/main/demo_apps)\n",
+        "- [Github - LLama 2 Recipes](https://github.com/facebookresearch/llama-recipes) and [Llama 2 Demo Apps](https://github.com/meta-llama/llama-recipes/tree/main/recipes)\n",
         "- [Research Paper](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/)\n",
         "- [Model Card](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md)\n",
         "- [Responsible Use Guide](https://ai.meta.com/llama/responsible-use-guide/)\n",
@@ -357,10 +357,10 @@
   ],
   "metadata": {
     "colab": {
-      "provenance": [],
-      "toc_visible": true,
       "gpuType": "T4",
-      "include_colab_link": true
+      "include_colab_link": true,
+      "provenance": [],
+      "toc_visible": true
     },
     "kernelspec": {
       "display_name": "Python 3",

File diff suppressed because it is too large
+ 1 - 1
recipes/use_cases/chatbots/messenger_llama/messenger_llama2.md


File diff suppressed because it is too large
+ 1 - 1
recipes/use_cases/chatbots/whatsapp_llama/whatsapp_llama2.md