Spaces:
Sleeping
Sleeping
File size: 11,283 Bytes
bac90e2 dc597fc 4a6ffa9 bac90e2 4a6ffa9 1b16dac a68dd63 1b16dac a68dd63 1b16dac a68dd63 1b16dac f4f8e80 5131bbe a68dd63 dc597fc 38a30d6 4a6ffa9 38a30d6 4a6ffa9 38a30d6 4a6ffa9 38a30d6 4a6ffa9 38a30d6 4a6ffa9 38a30d6 4a6ffa9 38a30d6 07e3ec5 38a30d6 07e3ec5 38a30d6 4a6ffa9 ef4f099 e937327 cc4707b 1166206 cc4707b e937327 4cc589f 4a6ffa9 dff518b 38a30d6 85eefba 38a30d6 dff518b 38a30d6 dc597fc 38a30d6 dff518b 4457702 9b13d7e dff518b dc597fc c423df3 dc597fc 7cc4123 ee4a8f7 dc597fc a68dd63 1b16dac a68dd63 1b16dac a68dd63 1b16dac a68dd63 1b16dac 6f928c6 1b16dac 72b6b61 a68dd63 dff518b 4a6ffa9 a77f178 4a6ffa9 e2c7632 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 |
import os
from typing import List, Tuple, Dict, Union, Any
import requests
import numpy as np
import openai
import pandas as pd
import streamlit as st
from langchain.document_loaders import TextLoader
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from scipy.spatial.distance import cosine
openai.api_key = os.environ["OPENAI_API_KEY"]
def call_chatgpt(prompt: str) -> str:
"""
Uses the OpenAI API to generate an AI response to a prompt.
Args:
prompt: A string representing the prompt to send to the OpenAI API.
Returns:
A string representing the AI's generated response.
"""
# Use the OpenAI API to generate a response based on the input prompt.
response = openai.Completion.create(
model="gpt-3.5-turbo-instruct",
prompt=prompt,
temperature=0.5,
max_tokens=500,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
# Extract the text from the first (and only) choice in the response output.
ans = response.choices[0]["text"]
# Return the generated AI response.
return ans
# def ai_judge(prompt: str) -> float:
# """
# Uses the ChatGPT function to identify whether the content can answer the question
# Args:
# prompt: A string that represents the prompt
# Returns:
# float: A score
# """
# return call_chatgpt(prompt)
def ai_judge(sentence1: str, sentence2: str) -> float:
HF_TOKEN = os.environ["HF_TOKEN"]
API_URL = "https://api-inference.huggingface.co/models/sentence-transformers/msmarco-distilbert-base-tas-b"
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
def helper(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
data = helper(
{
"inputs": {
"source_sentence": sentence1,
"sentences": [sentence2]
}
}
)
return data
def query(payload: Dict[str, Any]) -> Dict[str, Any]:
"""
Sends a JSON payload to a predefined API URL and returns the JSON response.
Args:
payload (Dict[str, Any]): The JSON payload to be sent to the API.
Returns:
Dict[str, Any]: The JSON response received from the API.
"""
# API endpoint URL
API_URL = "https://sks7h7h5qkhoxwxo.us-east-1.aws.endpoints.huggingface.cloud"
# Headers to indicate both the request and response formats are JSON
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
# Sending a POST request with the JSON payload and headers
response = requests.post(API_URL, headers=headers, json=payload)
# Returning the JSON response
return response.json()
def llama2_7b_ysa(prompt: str) -> str:
"""
Queries a model and retrieves the generated text based on the given prompt.
This function sends a prompt to a model (presumably named 'llama2_7b') and extracts
the generated text from the model's response. It's tailored for handling responses
from a specific API or model query structure where the response is expected to be
a list of dictionaries, with at least one dictionary containing a key 'generated_text'.
Parameters:
- prompt (str): The text prompt to send to the model.
Returns:
- str: The generated text response from the model.
Note:
- The function assumes that the 'query' function is previously defined and accessible
within the same scope or module. It should send a request to the model and return
the response in a structured format.
- The 'parameters' dictionary is passed empty but can be customized to include specific
request parameters as needed by the model API.
"""
# Define the query payload with the prompt and any additional parameters
query_payload: Dict[str, Any] = {
"inputs": prompt,
"parameters": {}
}
# Send the query to the model and store the output response
output = query(query_payload)
# Extract the 'generated_text' from the first item in the response list
response: str = output[0]['generated_text']
return response
## rag strategy 1
# file_names = [f"output_files/file_{i}.txt" for i in range(131)]
# # file_names = [f"output_files_large/file_{i}.txt" for i in range(1310)]
# # Initialize an empty list to hold all documents
# all_documents = [] # this is just a copy, you don't have to use this
# # Iterate over each file and load its contents
# for file_name in file_names:
# loader = TextLoader(file_name)
# documents = loader.load()
# all_documents.extend(documents)
# # Split the loaded documents into chunks
# text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
# docs = text_splitter.split_documents(all_documents)
# # Create the open-source embedding function
# embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
# # embedding_function = SentenceTransformer("all-MiniLM-L6-v2")
# # embedding_function = openai_text_embedding
# # Load the documents into Chroma
# db = Chroma.from_documents(docs, embedding_function)
## rag strategy 2
from datasets import load_dataset
import chromadb
import string
dataset = load_dataset("eagle0504/youthless-homeless-shelter-web-scrape-dataset-qa-formatted")
client = chromadb.Client()
random_number = np.random.randint(low=1e9, high=1e10)
random_string = ''.join(np.random.choice(list(string.ascii_uppercase + string.digits), size=10))
combined_string = f"{random_number}{random_string}"
collection = client.create_collection(combined_string)
# Embed and store the first N supports for this demo
L = len(dataset["train"]['questions'])
collection.add(
ids=[str(i) for i in range(0, L)], # IDs are just strings
documents=dataset["train"]['questions'], # Enter questions here
metadatas=[{"type": "support"} for _ in range(0, L)],
)
st.title("Youth Homelessness Chatbot")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
st.sidebar.markdown(
"""
### Instructions:
This app guides you through YSA's website, utilizing a RAG-ready Q&A dataset [here](https://huggingface.co/datasets/eagle0504/youthless-homeless-shelter-web-scrape-dataset-qa-formatted) for chatbot assistance. 🤖 Enter a question, and it finds similar ones in the database, offering answers with a distance score to gauge relevance—the lower the score, the closer the match. 🎯 For better accuracy and to reduce errors, user feedback helps refine the database. ✨
""")
special_threshold = st.sidebar.slider('Choose a distance threshold (generally we advise 0.2 to 0.3)?', min_value=0, max_value=1, step=0.05, value=0.2) # 0.3
clear_button = st.sidebar.button("Clear Conversation", key="clear")
if clear_button:
st.session_state.messages = []
# React to user input
if prompt := st.chat_input("Tell me about YSA"):
# Display user message in chat message container
st.chat_message("user").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
question = prompt
with st.spinner("Wait for it..."):
# strategy 1
# docs = db.similarity_search(question)
# docs_2 = db.similarity_search_with_score(question)
# docs_2_table = pd.DataFrame(
# {
# "source": [docs_2[i][0].metadata["source"] for i in range(len(docs))],
# "content": [docs_2[i][0].page_content for i in range(len(docs))],
# "distances": [docs_2[i][1] for i in range(len(docs))],
# }
# )
# ref_from_db_search = docs_2_table["content"]
# strategy 2
results = collection.query(
query_texts=question,
n_results=5
)
idx = results["ids"][0]
idx = [int(i) for i in idx]
ref = pd.DataFrame(
{
"idx": idx,
"questions": [dataset["train"]['questions'][i] for i in idx],
"answers": [dataset["train"]['answers'][i] for i in idx],
"distances": results["distances"][0]
}
)
# special_threshold = st.sidebar.slider('How old are you?', 0, 0.6, 0.1) # 0.3
filtered_ref = ref[ref["distances"] < special_threshold]
if filtered_ref.shape[0] > 0:
st.success("There are highly relevant information in our database.")
ref_from_db_search = filtered_ref["answers"]
final_ref = filtered_ref
else:
st.warning("The database may not have relevant information to help your question so please be aware of hallucinations.")
ref_from_db_search = ref["answers"]
final_ref = ref
try:
llm_response = llama2_7b_ysa(question)
except:
llm_response = "Sorry, the inference endpoint is temporarily down. 😔"
finetuned_llm_guess = ["from_llm", question, llm_response, 0]
final_ref.loc[-1] = finetuned_llm_guess
final_ref.index = final_ref.index + 1
# add ai judge as additional rating
independent_ai_judge_score = []
for i in range(final_ref.shape[0]):
this_quest = question
this_content = final_ref["answers"][i]
# prompt_for_ai_judge = f"""
# The user asked a question: {question}
# We have found this content: {this_content}
# From 0 to 10, rate how well the content answer the user's question.
# Only produce a number from 0 to 10 while 10 being the best at answer user's question.
# If the content is a list of questions or not related to the user's question or it says inference endpoint is down, then you should say 0, because it does not answer user's question.
# """
this_score = ai_judge(question, this_content)
independent_ai_judge_score.append(this_score[0])
final_ref["ai_judge"] = independent_ai_judge_score
engineered_prompt = f"""
Based on the context: {ref_from_db_search},
answer the user question: {question}.
Answer the question directly (don't say "based on the context, ...")
"""
answer = call_chatgpt(engineered_prompt)
response = answer
# Display assistant response in chat message container
with st.chat_message("assistant"):
with st.spinner("Wait for it..."):
st.markdown(response)
with st.expander("See reference:"):
st.table(final_ref)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
# st.session_state.messages.append(
# {"role": "assistant", "content": final_ref.to_json()}
# )
|