aashnaj commited on
Commit
4777736
·
verified ·
1 Parent(s): 6f8ca02
Files changed (1) hide show
  1. app.py +26 -122
app.py CHANGED
@@ -1,99 +1,30 @@
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- import requests
4
  from sentence_transformers import SentenceTransformer
5
  import torch
6
 
7
-
8
- SPOONACULAR_API_KEY = "71259036cfb3405aa5d49c1220a988c5"
9
- recipe_id_map = {}
10
-
11
- client=InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
12
-
13
- # # Search recipes
14
-
15
- # global recipe_id_map
16
- url = "https://api.spoonacular.com/recipes/complexSearch"
17
- params = {
18
- # "query": ingredient,
19
- # "cuisine": cuisine,
20
- # "diet": dietary,
21
- "number": 3,
22
- "apiKey": SPOONACULAR_API_KEY
23
- }
24
- res = requests.get(url, params=params)
25
- data = res.json()
26
-
27
-
28
- # if "results" not in data or not data["results"]:
29
- # recipe_id_map = {}
30
- # return gr.update(choices=[], visible=True, label="No recipes found"), gr.update(value="No recipes found.")
31
-
32
- # recipe_id_map = {r["title"]: r["id"] for r in data["results"]}
33
- # return gr.update(choices=list(recipe_id_map.keys()), visible=True), gr.update(value="Select a recipe from the dropdown.")
34
-
35
- # # Get recipe details
36
- # def get_recipe_details(selected_title):
37
- # if not selected_title or selected_title not in recipe_id_map:
38
- # return "Please select a valid recipe."
39
-
40
- # recipe_id = recipe_id_map[selected_title]
41
- # url = f"https://api.spoonacular.com/recipes/{recipe_id}/information"
42
- # params = {"apiKey": SPOONACULAR_API_KEY}
43
- # res = requests.get(url, params=params)
44
- # data = res.json()
45
-
46
- # title = data.get("title", "Unknown Title")
47
- # time = data.get("readyInMinutes", "N/A")
48
- # instructions = data.get("instructions") or "No instructions available."
49
- # ingredients_list = data.get("extendedIngredients", [])
50
- # ingredients = "\n".join([f"- {item.get('original')}" for item in ingredients_list])
51
-
52
- # return f"### 🍽️ {title}\n**⏱️ Cook Time:** {time} minutes\n\n**📋 Instructions:**\n{instructions}"
53
- # gr.Markdown("💬 Go to the next tab to ask our chatbot your questions on the recipe!")
54
- # # Handle chatbot questions
55
- # def ask_recipe_bot(message, history):
56
- # # Try to find a recipe ID from previous dropdown results
57
- # if not recipe_id_map:
58
- # return "Please use the dropdown tab first to search for a recipe."
59
-
60
- # # Use the first recipe ID from the map
61
- # recipe_id = list(recipe_id_map.values())[0]
62
- # url = f"https://api.spoonacular.com/recipes/{recipe_id}/nutritionWidget.json"
63
- # params = {"apiKey": SPOONACULAR_API_KEY}
64
- # res = requests.get(url, params=params)
65
-
66
- # if res.status_code != 200:
67
- # return "Sorry, I couldn't retrieve nutrition info."
68
-
69
- # data = res.json()
70
- # calories = data.get("calories", "N/A")
71
- # carbs = data.get("carbs", "N/A")
72
- # protein = data.get("protein", "N/A")
73
- # fat = data.get("fat", "N/A")
74
-
75
- # if "calorie" in message.lower():
76
- # return f"This recipe has {calories}."
77
- # elif "protein" in message.lower():
78
- # return f"It contains {protein}."
79
- # elif "carb" in message.lower():
80
- # return f"It has {carbs}."
81
- # elif "fat" in message.lower():
82
- # return f"The fat content is {fat}."
83
- # elif "scale" in message.lower() or "double" in message.lower():
84
- # return "You can scale ingredients by multiplying each quantity. For example, to double the recipe, multiply every amount by 2."
85
- # elif "substitute" in message.lower():
86
- # return "Let me know the ingredient you'd like to substitute, and I’ll try to help!"
87
- # else:
88
- # return "You can ask about calories, protein, carbs, fat, substitutes, or scaling tips."
89
-
90
- def respond(message, history, ingredient, cuisine, diet):
91
-
92
- context = search_recipes(ingredient, cuisine, diet)
93
  messages = [
94
  {
95
  "role": "system",
96
- "content": f"You give recipes from {context}."
97
  }
98
  ]
99
  if history:
@@ -109,38 +40,11 @@ def respond(message, history, ingredient, cuisine, diet):
109
  token = message.choices[0].delta.content
110
  if token is not None:
111
  response += token
112
- yield response
113
-
114
-
115
- # # Gradio layout
116
- with gr.Blocks() as demo:
117
- gr.Markdown("## 🧠🍴 The BiteBot")
118
-
119
- with gr.Tabs():
120
- # with gr.Tab("Search Recipes"):
121
- with gr.Row():
122
- ingredient = gr.Textbox(label="Preferred Ingredient")
123
- cuisine = gr.Textbox(label="Preferred Cuisine")
124
- diet = gr.Textbox(label="Dietary Restrictions")
125
-
126
- # search_button = gr.Button("Search Recipes")
127
- # recipe_dropdown = gr.Dropdown(label="Select a recipe", visible=False)
128
- # recipe_output = gr.Markdown()
129
-
130
- # search_button.click(
131
- # fn=search_recipes,
132
- # inputs=[ingredient, cuisine, diet],
133
- # outputs=[recipe_dropdown, recipe_output]
134
- # )
135
-
136
- # recipe_dropdown.change(
137
- # fn=get_recipe_details,
138
- # inputs=recipe_dropdown,
139
- # outputs=recipe_output
140
- # )
141
-
142
- # with gr.Tab("Ask BiteBot"):
143
- chatbot = gr.ChatInterface(fn=respond, additional_inputs = [ingredient, cuisine, diet], type="messages")
144
- gr.Markdown("💬 Ask about calories, macros, scaling, or substitutions. (Run a recipe search first!)")
145
 
146
- demo.launch()
 
1
+
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
 
4
  from sentence_transformers import SentenceTransformer
5
  import torch
6
 
7
+ # Load knowledge
8
+ with open("recipedataset.txt", "r", encoding="utf-8") as file:
9
+ knowledge = file.read()
10
+ cleaned_chunks = [chunk.strip() for chunk in knowledge.strip().split("\n") if chunk.strip()]
11
+ model = SentenceTransformer('all-MiniLM-L6-v2')
12
+ chunk_embeddings = model.encode(cleaned_chunks, convert_to_tensor=True)
13
+ def get_top_chunks(query):
14
+ query_embedding = model.encode(query, convert_to_tensor=True)
15
+ query_embedding_normalized = query_embedding / query_embedding.norm()
16
+ similarities = torch.matmul(chunk_embeddings, query_embedding_normalized)
17
+ top_indices = torch.topk(similarities, k=5).indices.tolist()
18
+ return [cleaned_chunks[i] for i in top_indices]
19
+ client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
20
+ def respond(message, history):
21
+ response = ""
22
+ top_chunks = get_top_chunks(message)
23
+ context = "\n".join(top_chunks)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  messages = [
25
  {
26
  "role": "system",
27
+ "content": f"You are a friendly chatbot that responds to the user with this context {context}"
28
  }
29
  ]
30
  if history:
 
40
  token = message.choices[0].delta.content
41
  if token is not None:
42
  response += token
43
+ yield response
44
+ with gr.Blocks() as chatbot:
45
+ gr.ChatInterface(
46
+ fn=respond,
47
+ type="messages",
48
+ )
49
+ chatbot.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50