Spaces:
Running
Running
File size: 3,833 Bytes
4777736 cece503 c583b68 cece503 4777736 74ee35b 4777736 c6774b7 55a30b0 4777736 cece503 c6774b7 cece503 4777736 7d09e3e c23d4b5 1691afd f4e6d64 36d2d95 c956554 80fd64a 75174ce 80fd64a 7cd0c55 55a30b0 36d2d95 55a30b0 36d2d95 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import gradio as gr
from huggingface_hub import InferenceClient
from sentence_transformers import SentenceTransformer
import torch
# Load knowledge
with open("recipesplease.txt", "r", encoding="utf-8") as file:
knowledge = file.read()
cleaned_chunks = [chunk.strip() for chunk in knowledge.strip().split("\n") if chunk.strip()]
model = SentenceTransformer('all-MiniLM-L6-v2')
chunk_embeddings = model.encode(cleaned_chunks, convert_to_tensor=True)
def get_top_chunks(query):
query_embedding = model.encode(query, convert_to_tensor=True)
query_embedding_normalized = query_embedding / query_embedding.norm()
similarities = torch.matmul(chunk_embeddings, query_embedding_normalized)
top_indices = torch.topk(similarities, k=5).indices.tolist()
return [cleaned_chunks[i] for i in top_indices]
client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
def respond(message, history, cuisine, dietary_restrictions, allergies):
response = ""
top_chunks = get_top_chunks(message)
context = "\n".join(top_chunks)
messages = [
{
"role": "system",
"content": f"You are a friendly recipe chatbot named BiteBot that responds to the user with any recipe from this: {context}. Find a recipe that is {cuisine} cuisine. They have the dietary restrictions,{dietary_restrictions} and are allergic to {allergies}. For example, you can say Based on your preference for something sweet and given the recipes you provided, let me suggest a recipe that might be of interest to you. Do you want to try Elizabeth's Sweet Potato Casserole? Return the title to the user and ask if this is the recipe they want. If they say yes return the recipe to the user, and if they say no ask if they want another recipe."
}
]
if history:
messages.extend(history)
messages.append({"role": "user", "content": message})
stream = client.chat_completion(
messages,
max_tokens=300,
temperature=1.2,
stream=True,
)
for message in stream:
token = message.choices[0].delta.content
if token is not None:
response += token
yield response
logo="banner.png"
theme = gr.themes.Monochrome(
primary_hue="orange",
secondary_hue="zinc",
neutral_hue=gr.themes.Color(c100="rgba(255, 227.4411088400613, 206.9078947368421, 1)", c200="rgba(255, 229.53334184977007, 218.0921052631579, 1)", c300="rgba(255, 234.91658150229947, 213.6184210526316, 1)", c400="rgba(189.603125, 154.41663986650488, 133.88641721491229, 1)", c50="#f3d1bbff", c500="rgba(170.2125, 139.18781968574348, 118.70082236842106, 1)", c600="rgba(193.32187499999998, 129.35648241888094, 111.07528782894737, 1)", c700="rgba(184.13125000000002, 141.9707339039346, 106.60230263157897, 1)", c800="rgba(156.06796875, 104.12209005333418, 69.81988075657894, 1)", c900="rgba(156.39999999999998, 117.22008175779253, 80.2578947368421, 1)", c950="rgba(158.43203125, 125.1788770279765, 97.28282620614036, 1)"),
text_size="sm",
spacing_size="md",
radius_size="sm",
).set(
body_background_fill='*primary_50',
body_background_fill_dark='*primary_50'
)
with gr.Blocks(theme=theme) as chatbot:
gr.Image(
value="banner.png",
show_label=False,
show_share_button = False,
show_download_button = False)
cuisine=gr.Textbox(label="cuisine")
dietary_restrictions=gr.Dropdown(["Gluten-Free","Dairy-Free","Vegan","Vegetarian","Keto","Kosher","No Soy","No Seafood","No Pork","No Beef"], label="dietary restrictions", multiselect=True,info="you can select multiple!")
allergies=gr.Textbox(label="allergies")
gr.ChatInterface(
fn=respond,
type="messages", additional_inputs=[cuisine,dietary_restrictions,allergies]
)
chatbot.launch()
|