Prechádzať zdrojové kódy

updated README.md for llama-demo-apps

Jeff Tang 1 rok pred
rodič
commit
78d6071399

+ 104 - 0
llama-demo-apps/Llama2_Gradio.ipynb

@@ -0,0 +1,104 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "id": "928041cc",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "Init param `input` is deprecated, please use `model_kwargs` instead.\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Running on local URL:  http://127.0.0.1:7860\n",
+      "\n",
+      "To create a public link, set `share=True` in `launch()`.\n"
+     ]
+    },
+    {
+     "data": {
+      "text/html": [
+       "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
+      ],
+      "text/plain": [
+       "<IPython.core.display.HTML object>"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/plain": []
+     },
+     "execution_count": 1,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "from langchain.schema import AIMessage, HumanMessage\n",
+    "import gradio as gr\n",
+    "from langchain.llms import Replicate\n",
+    "import os\n",
+    "\n",
+    "os.environ[\"REPLICATE_API_TOKEN\"] = \"<your replicate api token>\"\n",
+    "\n",
+    "llama2_13b_chat = \"meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d\"\n",
+    "\n",
+    "llm = Replicate(\n",
+    "    model=llama2_13b_chat,\n",
+    "    input={\"temperature\": 0.01, \"max_length\": 2000, \"top_p\": 1},\n",
+    ")\n",
+    "\n",
+    "\n",
+    "def predict(message, history):\n",
+    "    history_langchain_format = []\n",
+    "    for human, ai in history:\n",
+    "        history_langchain_format.append(HumanMessage(content=human))\n",
+    "        history_langchain_format.append(AIMessage(content=ai))\n",
+    "    history_langchain_format.append(HumanMessage(content=message))\n",
+    "    gpt_response = llm(message) #history_langchain_format)\n",
+    "    return gpt_response#.content\n",
+    "\n",
+    "gr.ChatInterface(predict).launch()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5f21f5a4",
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.18"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}

Rozdielové dáta súboru neboli zobrazené, pretože súbor je príliš veľký
+ 83 - 0
llama-demo-apps/README.md


BIN
llama-demo-apps/llama2-streamlit.png


BIN
llama-demo-apps/llama2-streamlit2.png


BIN
llama-demo-apps/llm_log1.png


+ 22 - 0
llama-demo-apps/streamlit_llama2.py

@@ -0,0 +1,22 @@
+import streamlit as st
+from langchain.llms import Replicate
+import os
+
+st.title("Llama2-powered Streamlit App")
+
+with st.sidebar:
+    os.environ["REPLICATE_API_TOKEN"] = "<your replicate api token>"
+
+def generate_response(input_text):
+    llama2_13b_chat = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d"
+
+    llm = Replicate(
+        model=llama2_13b_chat,
+        input={"temperature": 0.01, "max_length": 2000, "top_p": 1},
+    )
+    st.info(llm(input_text))
+
+with st.form("my_form"):
+    text = st.text_area("Enter text:", "What is Generative AI?")
+    submitted = st.form_submit_button("Submit")
+    generate_response(text)