Spaces:
Sleeping
Sleeping
File size: 7,034 Bytes
85a758a d61cdb6 afbae0e d61cdb6 afbae0e d61cdb6 d3ce4d9 85a758a d3ce4d9 85a758a d3ce4d9 85a758a d3ce4d9 85a758a d3ce4d9 85a758a d3ce4d9 85a758a d3ce4d9 85a758a d3ce4d9 85a758a d3ce4d9 85a758a d3ce4d9 85a758a d3ce4d9 85a758a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 |
import gradio as gr
import requests
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Step 1 - Semantic Search
""from sentence_transformers import SentenceTransformer
import torch
# Step 2 - Semantic Search
# Open the water_cycle.txt file in read mode with UTF-8 encoding
with open("water_cycle.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
water_cycle_text = file.read()
# Print the text below
print(water_cycle_text)
# Step 3 - Semantic Search
def preprocess_text(text):
# Strip extra whitespace from the beginning and the end of the text
cleaned_text = text.strip()
# Split the cleaned_text by every newline character (\n)
chunks = cleaned_text.split("\n")
# Create an empty list to store cleaned chunks
cleaned_chunks = []
# Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
for chunk in chunks:
stripped_chunk = chunk.strip()
if len(stripped_chunk) > 0:
cleaned_chunks.append(stripped_chunk)
# Print cleaned_chunks
print(cleaned_chunks)
# Print the length of cleaned_chunks
print(len(cleaned_chunks))
# Return the cleaned_chunks
return cleaned_chunks
# Step 4 - Semantic Search
# Load the pre-trained embedding model that converts text to vectors
model = SentenceTransformer('all-MiniLM-L6-v2')
def create_embeddings(text_chunks):
# Convert each text chunk into a vector embedding and store as a tensor
chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
# Print the chunk embeddings
print(chunk_embeddings)
# Print the shape of chunk_embeddings
print(chunk_embeddings.shape)
# Return the chunk_embeddings
return chunk_embeddings
# Call the create_embeddings function and store the result in a new chunk_embeddings variable
chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
# Call the preprocess_text function and store the result in a cleaned_chunks variable
#cleaned_chunks = preprocess_text(water_cycle_text) # Complete this line
# Step 5 - Semantic Search
def get_top_chunks(query, chunk_embeddings, text_chunks):
# Convert the query text into a vector embedding
query_embedding = model.encode(query, convert_to_tensor = True) # Complete this line
# Normalize the query embedding to unit length for accurate similarity comparison
query_embedding_normalized = query_embedding / query_embedding.norm()
# Normalize all chunk embeddings to unit length for consistent comparison
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
# Calculate cosine similarity between query and all chunks using matrix multiplication
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
# Print the similarities
print(similarities)
# Find the indices of the 3 chunks with highest similarity scores
top_indices = torch.topk(similarities, k=3).indices
# Print the top indices
print(top_indices)
# Create an empty list to store the most relevant chunks
top_chunks = []
# Loop through the top indices and retrieve the corresponding text chunks
for i in top_indices:
chunk = text_chunks[i]
top_chunks.append(chunk)
# Return the list of most relevant chunks
return top_chunks
# Step 6 - Semantic Search
# Call the get_top_chunks function with the original query
top_results = get_top_chunks("How does water get into the sky", chunk_embeddings, cleaned_chunks) # Complete this line
# Print the top results
print(top_results)""
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SPOONACULAR_API_KEY = "71259036cfb3405aa5d49c1220a988c5" #our api key
recipe_id_map = {} #creating dictionary to keep track of which recipe title for which recipe id
# search for recipes
def search_recipes(ingredient, cuisine, dietary): #filter results based on these sends requests
global recipe_id_map
url = "https://api.spoonacular.com/recipes/complexSearch"
params = {
"query": ingredient,
"cuisine": cuisine,
"diet": dietary,
"number": 3,
"apiKey": SPOONACULAR_API_KEY
} #updates the dropdown with these titles and stores ids for later
res = requests.get(url, params=params)
data = res.json()
if "results" not in data or not data["results"]:
recipe_id_map = {}
return gr.update(choices=[], visible=True, label="No recipes found"), gr.update(value="No recipes found.") #if no recipe found
recipe_id_map = {r["title"]: r["id"] for r in data["results"]}
return gr.update(choices=list(recipe_id_map.keys()), visible=True, label="Select a recipe"), gr.update(value="Select a recipe from the dropdown above.")
#asks user to choose a recipe from dropdown
# get recipe details from the recipe the user selects
def get_recipe_details(selected_title):
if not selected_title or selected_title not in recipe_id_map:
return "Please select a valid recipe."
recipe_id = recipe_id_map[selected_title]
url = f"https://api.spoonacular.com/recipes/{recipe_id}/information"
params = {"apiKey": SPOONACULAR_API_KEY}
res = requests.get(url, params=params)
data = res.json()
title = data.get("title", "Unknown Title")
time = data.get("readyInMinutes", "N/A")
instructions = data.get("instructions") or "No instructions available."
return f"### 🍽️ {title}\n**⏱️ Cook Time:** {time} minutes\n\n**📋 Instructions:**\n{instructions}"
# UI
with gr.Blocks() as demo:
gr.Markdown("## 🥗 The BiteBot")
#creates interface using rows and columns layout
with gr.Row():
ingredient = gr.Textbox(label="Preferred Ingredient", placeholder="e.g., chicken")
cuisine = gr.Textbox(label="Preferred Cuisine", placeholder="e.g., Indian")
diet = gr.Textbox(label="Dietary Restrictions", placeholder="e.g., vegetarian")
#creates 3 input fields side by side for ingredient, cuisine, restrictions
search_button = gr.Button("Search Recipes")
recipe_dropdown = gr.Dropdown(label="Select a recipe", visible=False)
recipe_output = gr.Markdown()
#triggers the search, displays up to 3 recipes, shows the recipe selected
search_button.click(
fn=search_recipes,
inputs=[ingredient, cuisine, diet],
outputs=[recipe_dropdown, recipe_output]
) #when user clicks search runs search recipes function and fills dropdown
recipe_dropdown.change(
fn=get_recipe_details,
inputs=recipe_dropdown,
outputs=recipe_output
) #when user picks option, runs get recipe details and displays full recipe
demo.launch()
|