Spaces:
Running
Running
File size: 6,587 Bytes
cbfb0d8 1a71694 cbfb0d8 1a71694 ae0bfe5 cbfb0d8 4959a1c cbfb0d8 90a4512 cbfb0d8 ae0bfe5 cbfb0d8 90a4512 ae0bfe5 cbfb0d8 bf56939 cbfb0d8 bbfde01 bf56939 ad653c7 cbfb0d8 43bd4b8 cbfb0d8 43bd4b8 cbfb0d8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
### app.py code is taken from https://huggingface.co/spaces/ngebodh/SimpleChatbot-Backup/blob/main/app.py
### https://medium.com/@nigelgebodh/large-language-models-chatting-with-ai-chatbots-from-google-mistral-ai-and-hugging-face-b33efedea38d
""" Simple Chatbot
@author: Sagar Padhiyar
@email: [email protected]
"""
import streamlit as st
from openai import OpenAI
import os
import sys
from dotenv import load_dotenv, dotenv_values
from huggingface_hub import InferenceClient
load_dotenv()
# # initialize the client
# client = OpenAI(
# base_url="https://api-inference.huggingface.co/v1",
# api_key=os.environ.get('HUGGINGFACE_API')#"hf_xxx" # Replace with your token
# )
base_url="https://api-inference.huggingface.co/v1"
API_KEY = os.environ.get('HUGGINGFACE_API')
#Create supported models
model_links ={
"Mistral":base_url+"mistralai/Mistral-7B-Instruct-v0.2",
"Gemma-7B":base_url+"google/gemma-7b-it",
"Gemma-2B":base_url+"google/gemma-2b-it",
"Zephyr-7B-β":base_url+"HuggingFaceH4/zephyr-7b-beta",
# "Llama-2":"meta-llama/Llama-2-7b-chat-hf"
}
headers = {"Authorization":"Bearer "+API_KEY}
#Pull info about the model to display
model_info ={
"Mistral":
{'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
"Gemma-7B":
{'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
\nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **7 billion parameters.** \n""",
'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
"Gemma-2B":
{'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
\nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **2 billion parameters.** \n""",
'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
"Zephyr-7B":
{'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
\nFrom Huggingface: \n\
Zephyr is a series of language models that are trained to act as helpful assistants. \
[Zephyr 7B Gemma](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1)\
is the third model in the series, and is a fine-tuned version of google/gemma-7b \
that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'},
"Zephyr-7B-β":
{'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
\nFrom Huggingface: \n\
Zephyr is a series of language models that are trained to act as helpful assistants. \
[Zephyr-7B-β](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)\
is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
}
def reset_conversation():
'''
Resets Conversation
'''
st.session_state.conversation = []
st.session_state.messages = []
return None
# Define the available models
models =[key for key in model_links.keys()]
# Create the sidebar with the dropdown for model selection
selected_model = st.sidebar.selectbox("Select Model", models)
#Create a temperature slider
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
#Add reset button to clear conversation
st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
# Create model description
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.markdown(model_info[selected_model]['description'])
st.sidebar.image(model_info[selected_model]['logo'])
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
st.sidebar.markdown("\nLearn how to build this chatbot [here](https://ngebodh.github.io/projects/2024-03-05/).")
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
# st.write(f"Changed to {selected_model}")
st.session_state.prev_option = selected_model
reset_conversation()
#Pull in the model we want to use
repo_id = model_links[selected_model]
st.subheader(f'AI - {selected_model}')
# st.title(f'ChatBot Using {selected_model}')
# Set a default model
if selected_model not in st.session_state:
st.session_state[selected_model] = model_links[selected_model]
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
with st.chat_message("assistant"):
client = InferenceClient(
model=model_links[selected_model],
headers=headers)
output = client.text_generation(
Prompt=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
temperature=temp_values,#0.5
max_tokens=3000,
)
response = st.write_stream(output)
st.session_state.messages.append({"role": "assistant", "content": response}) |