streamlit_llama2.py 678 B

12345678910111213141516171819202122
  1. import streamlit as st
  2. from langchain.llms import Replicate
  3. import os
  4. st.title("Llama2-powered Streamlit App")
  5. with st.sidebar:
  6. os.environ["REPLICATE_API_TOKEN"] = "<your replicate api token>"
  7. def generate_response(input_text):
  8. llama2_13b_chat = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d"
  9. llm = Replicate(
  10. model=llama2_13b_chat,
  11. model_kwargs={"temperature": 0.01, "top_p": 1, "max_new_tokens":500}
  12. )
  13. st.info(llm(input_text))
  14. with st.form("my_form"):
  15. text = st.text_area("Enter text:", "What is Generative AI?")
  16. submitted = st.form_submit_button("Submit")
  17. generate_response(text)