Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,14 @@
|
|
|
|
1 |
from transformers import FlaxAutoModelForSeq2SeqLM, AutoTokenizer, AutoModel
|
2 |
import torch
|
3 |
import numpy as np
|
4 |
import random
|
5 |
import json
|
6 |
-
from fastapi import FastAPI
|
7 |
from fastapi.responses import JSONResponse
|
8 |
from pydantic import BaseModel
|
9 |
|
10 |
-
# Lade RecipeBERT Modell
|
11 |
bert_model_name = "alexdseo/RecipeBERT"
|
12 |
bert_tokenizer = AutoTokenizer.from_pretrained(bert_model_name)
|
13 |
bert_model = AutoModel.from_pretrained(bert_model_name)
|
@@ -16,7 +17,7 @@ bert_model.eval() # Setze das Modell in den Evaluationsmodus
|
|
16 |
# Lade T5 Rezeptgenerierungsmodell
|
17 |
MODEL_NAME_OR_PATH = "flax-community/t5-recipe-generation"
|
18 |
t5_tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME_OR_PATH, use_fast=True)
|
19 |
-
t5_model = FlaxAutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME_OR_PATH)
|
20 |
|
21 |
# Token Mapping für die T5 Modell-Ausgabe
|
22 |
special_tokens = t5_tokenizer.all_special_tokens
|
@@ -25,83 +26,138 @@ tokens_map = {
|
|
25 |
"<section>": "\n"
|
26 |
}
|
27 |
|
28 |
-
# --- RecipeBERT-spezifische Funktionen (unverändert) ---
|
29 |
def get_embedding(text):
|
30 |
-
"""Berechnet das Embedding für einen Text mit Mean Pooling über alle Tokens
|
31 |
inputs = bert_tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
32 |
with torch.no_grad():
|
33 |
outputs = bert_model(**inputs)
|
|
|
|
|
34 |
attention_mask = inputs['attention_mask']
|
35 |
token_embeddings = outputs.last_hidden_state
|
36 |
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
|
37 |
sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
|
38 |
sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)
|
|
|
39 |
return (sum_embeddings / sum_mask).squeeze(0)
|
40 |
|
41 |
def average_embedding(embedding_list):
|
42 |
-
"""Berechnet den Durchschnitt einer Liste von Embeddings
|
43 |
-
|
|
|
44 |
return tensors.mean(dim=0)
|
45 |
|
46 |
def get_cosine_similarity(vec1, vec2):
|
47 |
-
"""Berechnet die Cosinus-Ähnlichkeit zwischen zwei Vektoren
|
48 |
-
if torch.is_tensor(vec1):
|
49 |
-
|
|
|
|
|
|
|
|
|
50 |
vec1 = vec1.flatten()
|
51 |
vec2 = vec2.flatten()
|
|
|
52 |
dot_product = np.dot(vec1, vec2)
|
53 |
norm_a = np.linalg.norm(vec1)
|
54 |
norm_b = np.linalg.norm(vec2)
|
55 |
-
|
|
|
|
|
|
|
|
|
56 |
return dot_product / (norm_a * norm_b)
|
57 |
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
"""
|
61 |
-
Findet die besten Zutaten
|
62 |
"""
|
|
|
63 |
required_ingredients = list(set(required_ingredients))
|
64 |
available_ingredients = list(set([i for i in available_ingredients if i not in required_ingredients]))
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
|
|
|
|
|
|
|
|
91 |
|
92 |
-
return final_ingredients[:max_ingredients]
|
93 |
-
|
94 |
-
|
95 |
-
# skip_special_tokens (unverändert, wird von generate_recipe_with_t5 genutzt)
|
96 |
def skip_special_tokens(text, special_tokens):
|
97 |
-
"""
|
98 |
for token in special_tokens:
|
99 |
text = text.replace(token, "")
|
100 |
return text
|
101 |
|
102 |
-
# target_postprocessing (unverändert, wird von generate_recipe_with_t5 genutzt)
|
103 |
def target_postprocessing(texts, special_tokens):
|
104 |
-
"""Post-
|
105 |
if not isinstance(texts, list):
|
106 |
texts = [texts]
|
107 |
|
@@ -116,17 +172,14 @@ def target_postprocessing(texts, special_tokens):
|
|
116 |
|
117 |
return new_texts
|
118 |
|
119 |
-
# validate_recipe_ingredients (unverändert, wird von generate_recipe_with_t5 genutzt)
|
120 |
def validate_recipe_ingredients(recipe_ingredients, expected_ingredients, tolerance=0):
|
121 |
"""
|
122 |
-
|
123 |
"""
|
124 |
recipe_count = len([ing for ing in recipe_ingredients if ing and ing.strip()])
|
125 |
expected_count = len(expected_ingredients)
|
126 |
return abs(recipe_count - expected_count) == tolerance
|
127 |
|
128 |
-
|
129 |
-
# generate_recipe_with_t5 (jetzt AKTIVIERT)
|
130 |
def generate_recipe_with_t5(ingredients_list, max_retries=5):
|
131 |
"""Generiert ein Rezept mit dem T5 Rezeptgenerierungsmodell mit Validierung."""
|
132 |
original_ingredients = ingredients_list.copy()
|
@@ -227,8 +280,8 @@ def generate_recipe_with_t5(ingredients_list, max_retries=5):
|
|
227 |
"directions": ["Fehler beim Generieren der Rezeptanweisungen"]
|
228 |
}
|
229 |
|
230 |
-
|
231 |
-
#
|
232 |
def process_recipe_request_logic(required_ingredients, available_ingredients, max_ingredients, max_retries):
|
233 |
"""
|
234 |
Kernlogik zur Verarbeitung einer Rezeptgenerierungsanfrage.
|
@@ -238,14 +291,14 @@ def process_recipe_request_logic(required_ingredients, available_ingredients, ma
|
|
238 |
return {"error": "Keine Zutaten angegeben"}
|
239 |
|
240 |
try:
|
241 |
-
# Optimale Zutaten finden
|
242 |
optimized_ingredients = find_best_ingredients(
|
243 |
required_ingredients,
|
244 |
available_ingredients,
|
245 |
max_ingredients
|
246 |
)
|
247 |
|
248 |
-
# Rezept mit optimierten Zutaten generieren
|
249 |
recipe = generate_recipe_with_t5(optimized_ingredients, max_retries)
|
250 |
|
251 |
# Ergebnis formatieren
|
@@ -260,33 +313,161 @@ def process_recipe_request_logic(required_ingredients, available_ingredients, ma
|
|
260 |
except Exception as e:
|
261 |
return {"error": f"Fehler bei der Rezeptgenerierung: {str(e)}"}
|
262 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
263 |
|
264 |
-
|
265 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
266 |
|
267 |
class RecipeRequest(BaseModel):
|
268 |
required_ingredients: list[str] = []
|
269 |
available_ingredients: list[str] = []
|
270 |
max_ingredients: int = 7
|
271 |
max_retries: int = 5
|
272 |
-
ingredients: list[str] = [] # Für Abwärtskompatibilität
|
273 |
|
274 |
-
@app.post("/generate_recipe") # Der
|
275 |
-
async def
|
276 |
-
|
277 |
-
|
278 |
-
|
|
|
|
|
|
|
|
|
|
|
279 |
|
280 |
result_dict = process_recipe_request_logic(
|
281 |
-
|
282 |
-
request_data.available_ingredients,
|
283 |
-
request_data.max_ingredients,
|
284 |
-
request_data.max_retries
|
285 |
)
|
|
|
286 |
return JSONResponse(content=result_dict)
|
287 |
|
288 |
-
|
289 |
-
|
290 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
291 |
|
292 |
-
|
|
|
|
1 |
+
import gradio as gr
|
2 |
from transformers import FlaxAutoModelForSeq2SeqLM, AutoTokenizer, AutoModel
|
3 |
import torch
|
4 |
import numpy as np
|
5 |
import random
|
6 |
import json
|
7 |
+
from fastapi import FastAPI, Request
|
8 |
from fastapi.responses import JSONResponse
|
9 |
from pydantic import BaseModel
|
10 |
|
11 |
+
# Lade RecipeBERT Modell (für semantische Zutat-Kombination)
|
12 |
bert_model_name = "alexdseo/RecipeBERT"
|
13 |
bert_tokenizer = AutoTokenizer.from_pretrained(bert_model_name)
|
14 |
bert_model = AutoModel.from_pretrained(bert_model_name)
|
|
|
17 |
# Lade T5 Rezeptgenerierungsmodell
|
18 |
MODEL_NAME_OR_PATH = "flax-community/t5-recipe-generation"
|
19 |
t5_tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME_OR_PATH, use_fast=True)
|
20 |
+
t5_model = FlaxAutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME_OR_PATH)
|
21 |
|
22 |
# Token Mapping für die T5 Modell-Ausgabe
|
23 |
special_tokens = t5_tokenizer.all_special_tokens
|
|
|
26 |
"<section>": "\n"
|
27 |
}
|
28 |
|
|
|
29 |
def get_embedding(text):
|
30 |
+
"""Berechnet das Embedding für einen Text mit Mean Pooling über alle Tokens"""
|
31 |
inputs = bert_tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
32 |
with torch.no_grad():
|
33 |
outputs = bert_model(**inputs)
|
34 |
+
|
35 |
+
# Mean Pooling - Mittelwert aller Token-Embeddings
|
36 |
attention_mask = inputs['attention_mask']
|
37 |
token_embeddings = outputs.last_hidden_state
|
38 |
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
|
39 |
sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
|
40 |
sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)
|
41 |
+
|
42 |
return (sum_embeddings / sum_mask).squeeze(0)
|
43 |
|
44 |
def average_embedding(embedding_list):
|
45 |
+
"""Berechnet den Durchschnitt einer Liste von Embeddings"""
|
46 |
+
# Sicherstellen, dass embedding_list Tupel von (Name, Embedding) enthält
|
47 |
+
tensors = torch.stack([emb for _, emb in embedding_list])
|
48 |
return tensors.mean(dim=0)
|
49 |
|
50 |
def get_cosine_similarity(vec1, vec2):
|
51 |
+
"""Berechnet die Cosinus-Ähnlichkeit zwischen zwei Vektoren"""
|
52 |
+
if torch.is_tensor(vec1):
|
53 |
+
vec1 = vec1.detach().numpy()
|
54 |
+
if torch.is_tensor(vec2):
|
55 |
+
vec2 = vec2.detach().numpy()
|
56 |
+
|
57 |
+
# Stelle sicher, dass die Vektoren die richtige Form haben (flachen sie bei Bedarf ab)
|
58 |
vec1 = vec1.flatten()
|
59 |
vec2 = vec2.flatten()
|
60 |
+
|
61 |
dot_product = np.dot(vec1, vec2)
|
62 |
norm_a = np.linalg.norm(vec1)
|
63 |
norm_b = np.linalg.norm(vec2)
|
64 |
+
|
65 |
+
# Division durch Null vermeiden
|
66 |
+
if norm_a == 0 or norm_b == 0:
|
67 |
+
return 0
|
68 |
+
|
69 |
return dot_product / (norm_a * norm_b)
|
70 |
|
71 |
+
def get_combined_scores(query_vector, embedding_list, all_good_embeddings, avg_weight=0.6):
|
72 |
+
"""Berechnet einen kombinierten Score unter Berücksichtigung der Ähnlichkeit zum Durchschnitt und zu einzelnen Zutaten"""
|
73 |
+
results = []
|
74 |
+
|
75 |
+
for name, emb in embedding_list:
|
76 |
+
# Ähnlichkeit zum Durchschnittsvektor
|
77 |
+
avg_similarity = get_cosine_similarity(query_vector, emb)
|
78 |
+
|
79 |
+
# Durchschnittliche Ähnlichkeit zu individuellen Zutaten
|
80 |
+
individual_similarities = [get_cosine_similarity(good_emb, emb)
|
81 |
+
for _, good_emb in all_good_embeddings]
|
82 |
+
# Vermeide Division durch Null, falls all_good_embeddings leer ist
|
83 |
+
avg_individual_similarity = sum(individual_similarities) / len(individual_similarities) if individual_similarities else 0
|
84 |
+
|
85 |
+
# Kombinierter Score (gewichteter Durchschnitt)
|
86 |
+
combined_score = avg_weight * avg_similarity + (1 - avg_weight) * avg_individual_similarity
|
87 |
+
|
88 |
+
results.append((name, emb, combined_score))
|
89 |
+
|
90 |
+
# Sortiere nach kombiniertem Score (absteigend)
|
91 |
+
results.sort(key=lambda x: x[2], reverse=True)
|
92 |
+
return results
|
93 |
+
|
94 |
+
# Die von dir bereitgestellte, korrigierte find_best_ingredients Funktion
|
95 |
+
def find_best_ingredients(required_ingredients, available_ingredients, max_ingredients=6, avg_weight=0.6):
|
96 |
"""
|
97 |
+
Findet die besten Zutaten basierend auf RecipeBERT Embeddings.
|
98 |
"""
|
99 |
+
# Ensure no duplicates in lists
|
100 |
required_ingredients = list(set(required_ingredients))
|
101 |
available_ingredients = list(set([i for i in available_ingredients if i not in required_ingredients]))
|
102 |
+
|
103 |
+
# Special case: If no required ingredients, randomly select one from available ingredients
|
104 |
+
if not required_ingredients and available_ingredients:
|
105 |
+
# Randomly select 1 ingredient as starting point
|
106 |
+
random_ingredient = random.choice(available_ingredients)
|
107 |
+
required_ingredients = [random_ingredient]
|
108 |
+
available_ingredients = [i for i in available_ingredients if i != random_ingredient]
|
109 |
+
print(f"No required ingredients provided. Randomly selected: {random_ingredient}")
|
110 |
+
|
111 |
+
# If still no ingredients or already at max capacity
|
112 |
+
if not required_ingredients or len(required_ingredients) >= max_ingredients:
|
113 |
+
return required_ingredients[:max_ingredients]
|
114 |
+
|
115 |
+
# If no additional ingredients available
|
116 |
+
if not available_ingredients:
|
117 |
+
return required_ingredients
|
118 |
+
|
119 |
+
# Calculate embeddings for all ingredients
|
120 |
+
embed_required = [(e, get_embedding(e)) for e in required_ingredients]
|
121 |
+
embed_available = [(e, get_embedding(e)) for e in available_ingredients]
|
122 |
+
|
123 |
+
# Number of ingredients to add
|
124 |
+
num_to_add = min(max_ingredients - len(required_ingredients), len(available_ingredients))
|
125 |
+
|
126 |
+
# Copy required ingredients to final list
|
127 |
+
final_ingredients = embed_required.copy()
|
128 |
+
|
129 |
+
# Add best ingredients
|
130 |
+
for _ in range(num_to_add):
|
131 |
+
# Calculate average vector of current combination
|
132 |
+
avg = average_embedding(final_ingredients)
|
133 |
+
|
134 |
+
# Calculate combined scores for all candidates
|
135 |
+
candidates = get_combined_scores(avg, embed_available, final_ingredients, avg_weight)
|
136 |
+
|
137 |
+
# If no candidates left, break
|
138 |
+
if not candidates:
|
139 |
+
break
|
140 |
|
141 |
+
# Choose best ingredient
|
142 |
+
best_name, best_embedding, _ = candidates[0]
|
143 |
+
|
144 |
+
# Add best ingredient to final list
|
145 |
+
final_ingredients.append((best_name, best_embedding))
|
146 |
+
|
147 |
+
# Remove ingredient from available ingredients
|
148 |
+
embed_available = [item for item in embed_available if item[0] != best_name]
|
149 |
+
|
150 |
+
# Extract only ingredient names
|
151 |
+
return [name for name, _ in final_ingredients]
|
152 |
|
|
|
|
|
|
|
|
|
153 |
def skip_special_tokens(text, special_tokens):
|
154 |
+
"""Removes special tokens from text"""
|
155 |
for token in special_tokens:
|
156 |
text = text.replace(token, "")
|
157 |
return text
|
158 |
|
|
|
159 |
def target_postprocessing(texts, special_tokens):
|
160 |
+
"""Post-processes generated text"""
|
161 |
if not isinstance(texts, list):
|
162 |
texts = [texts]
|
163 |
|
|
|
172 |
|
173 |
return new_texts
|
174 |
|
|
|
175 |
def validate_recipe_ingredients(recipe_ingredients, expected_ingredients, tolerance=0):
|
176 |
"""
|
177 |
+
Validates if the recipe contains approximately the expected ingredients.
|
178 |
"""
|
179 |
recipe_count = len([ing for ing in recipe_ingredients if ing and ing.strip()])
|
180 |
expected_count = len(expected_ingredients)
|
181 |
return abs(recipe_count - expected_count) == tolerance
|
182 |
|
|
|
|
|
183 |
def generate_recipe_with_t5(ingredients_list, max_retries=5):
|
184 |
"""Generiert ein Rezept mit dem T5 Rezeptgenerierungsmodell mit Validierung."""
|
185 |
original_ingredients = ingredients_list.copy()
|
|
|
280 |
"directions": ["Fehler beim Generieren der Rezeptanweisungen"]
|
281 |
}
|
282 |
|
283 |
+
# Diese Funktion wird von der Gradio-UI und der FastAPI-Route aufgerufen.
|
284 |
+
# Sie ist für die Kernlogik zuständig.
|
285 |
def process_recipe_request_logic(required_ingredients, available_ingredients, max_ingredients, max_retries):
|
286 |
"""
|
287 |
Kernlogik zur Verarbeitung einer Rezeptgenerierungsanfrage.
|
|
|
291 |
return {"error": "Keine Zutaten angegeben"}
|
292 |
|
293 |
try:
|
294 |
+
# Optimale Zutaten finden
|
295 |
optimized_ingredients = find_best_ingredients(
|
296 |
required_ingredients,
|
297 |
available_ingredients,
|
298 |
max_ingredients
|
299 |
)
|
300 |
|
301 |
+
# Rezept mit optimierten Zutaten generieren
|
302 |
recipe = generate_recipe_with_t5(optimized_ingredients, max_retries)
|
303 |
|
304 |
# Ergebnis formatieren
|
|
|
313 |
except Exception as e:
|
314 |
return {"error": f"Fehler bei der Rezeptgenerierung: {str(e)}"}
|
315 |
|
316 |
+
# Diese Funktion ist für den internen Gradio 'API-Test'-Tab gedacht,
|
317 |
+
# der einen JSON-String als Eingabe erwartet und einen JSON-String zurückgibt.
|
318 |
+
# Sie wird NICHT von deiner Flutter-App direkt aufgerufen, da die Flutter-App
|
319 |
+
# die /api/generate_recipe_rest FastAPI-Route direkt nutzt.
|
320 |
+
def flutter_api_generate_recipe(ingredients_data: str): # Typ-Hint für Klarheit
|
321 |
+
"""
|
322 |
+
Flutter-freundliche API-Funktion für den Gradio-API-Test-Tab.
|
323 |
+
Verarbeitet JSON-String-Eingabe und gibt JSON-String-Ausgabe zurück.
|
324 |
+
"""
|
325 |
+
try:
|
326 |
+
data = json.loads(ingredients_data) # Muss ein JSON-String sein
|
327 |
|
328 |
+
required_ingredients = data.get('required_ingredients', [])
|
329 |
+
available_ingredients = data.get('available_ingredients', [])
|
330 |
+
max_ingredients = data.get('max_ingredients', 7)
|
331 |
+
max_retries = data.get('max_retries', 5)
|
332 |
+
|
333 |
+
# Rufe die Kernlogik auf
|
334 |
+
result_dict = process_recipe_request_logic(
|
335 |
+
required_ingredients, available_ingredients, max_ingredients, max_retries
|
336 |
+
)
|
337 |
+
return json.dumps(result_dict) # Gibt einen JSON-STRING zurück
|
338 |
+
|
339 |
+
except Exception as e:
|
340 |
+
# Logge den Fehler für Debugging im Space-Log
|
341 |
+
print(f"Error in flutter_api_generate_recipe: {str(e)}")
|
342 |
+
return json.dumps({"error": f"Internal API Error: {str(e)}"})
|
343 |
+
|
344 |
+
def gradio_ui_generate_recipe(required_ingredients_text, available_ingredients_text, max_ingredients_val, max_retries_val):
|
345 |
+
"""Gradio UI Funktion für die Web-Oberfläche"""
|
346 |
+
try:
|
347 |
+
required_ingredients = [ing.strip() for ing in required_ingredients_text.split(',') if ing.strip()]
|
348 |
+
available_ingredients = [ing.strip() for ing in available_ingredients_text.split(',') if ing.strip()]
|
349 |
+
|
350 |
+
# Rufe die Kernlogik auf
|
351 |
+
result = process_recipe_request_logic(
|
352 |
+
required_ingredients, available_ingredients, max_ingredients_val, max_retries_val
|
353 |
+
)
|
354 |
+
|
355 |
+
if 'error' in result:
|
356 |
+
return result['error'], "", "", ""
|
357 |
+
|
358 |
+
ingredients_list = '\n'.join([f"• {ing}" for ing in result['ingredients']])
|
359 |
+
directions_list = '\n'.join([f"{i+1}. {dir}" for i, dir in enumerate(result['directions'])])
|
360 |
+
used_ingredients = ', '.join(result['used_ingredients'])
|
361 |
+
|
362 |
+
return (
|
363 |
+
result['title'],
|
364 |
+
ingredients_list,
|
365 |
+
directions_list,
|
366 |
+
used_ingredients
|
367 |
+
)
|
368 |
+
|
369 |
+
except Exception as e:
|
370 |
+
# Fehlermeldung für die Gradio UI
|
371 |
+
return f"Fehler: {str(e)}", "", "", ""
|
372 |
+
|
373 |
+
# Erstelle die Gradio Oberfläche
|
374 |
+
with gr.Blocks(title="AI Rezept Generator") as demo:
|
375 |
+
gr.Markdown("# 🍳 AI Rezept Generator")
|
376 |
+
gr.Markdown("Generiere Rezepte mit KI und intelligenter Zutat-Kombination!")
|
377 |
+
|
378 |
+
with gr.Tab("Web-Oberfläche"):
|
379 |
+
with gr.Row():
|
380 |
+
with gr.Column():
|
381 |
+
required_ing = gr.Textbox(
|
382 |
+
label="Benötigte Zutaten (kommasepariert)",
|
383 |
+
placeholder="Hähnchen, Reis, Zwiebel",
|
384 |
+
lines=2
|
385 |
+
)
|
386 |
+
available_ing = gr.Textbox(
|
387 |
+
label="Verfügbare Zutaten (kommasepariert, optional)",
|
388 |
+
placeholder="Knoblauch, Tomate, Pfeffer, Kräuter",
|
389 |
+
lines=2
|
390 |
+
)
|
391 |
+
max_ing = gr.Slider(3, 10, value=7, step=1, label="Maximale Zutaten")
|
392 |
+
max_retries = gr.Slider(1, 10, value=5, step=1, label="Max. Wiederholungsversuche")
|
393 |
+
|
394 |
+
generate_btn = gr.Button("Rezept generieren", variant="primary")
|
395 |
+
|
396 |
+
with gr.Column():
|
397 |
+
title_output = gr.Textbox(label="Rezepttitel", interactive=False)
|
398 |
+
ingredients_output = gr.Textbox(label="Zutaten", lines=8, interactive=False)
|
399 |
+
directions_output = gr.Textbox(label="Anweisungen", lines=10, interactive=False)
|
400 |
+
used_ingredients_output = gr.Textbox(label="Verwendete Zutaten", interactive=False)
|
401 |
+
|
402 |
+
generate_btn.click(
|
403 |
+
fn=gradio_ui_generate_recipe,
|
404 |
+
inputs=[required_ing, available_ing, max_ing, max_retries],
|
405 |
+
outputs=[title_output, ingredients_output, directions_output, used_ingredients_output]
|
406 |
+
)
|
407 |
+
|
408 |
+
with gr.Tab("API-Test"):
|
409 |
+
gr.Markdown("### Teste die Flutter API (via 'hugging_face_chat_gradio' Client)")
|
410 |
+
gr.Markdown("Dieser Tab zeigt, wie die Eingabe für die 'generate_recipe_for_flutter'-API aussehen sollte.")
|
411 |
+
|
412 |
+
api_input = gr.Textbox(
|
413 |
+
label="JSON-Eingabe (für API-Aufruf)",
|
414 |
+
placeholder='{"required_ingredients": ["chicken", "rice"], "available_ingredients": ["onion", "garlic"], "max_ingredients": 6}',
|
415 |
+
lines=4
|
416 |
+
)
|
417 |
+
api_output = gr.Textbox(label="JSON-Ausgabe", lines=15, interactive=False)
|
418 |
+
api_test_btn = gr.Button("API testen", variant="secondary")
|
419 |
+
|
420 |
+
# Hier wird die Funktion weiterhin für den Gradio-eigenen API-Test-Tab verwendet.
|
421 |
+
api_test_btn.click(
|
422 |
+
fn=flutter_api_generate_recipe,
|
423 |
+
inputs=[api_input],
|
424 |
+
outputs=[api_output],
|
425 |
+
api_name="generate_recipe_for_flutter" # Dies ist der api_name, den das Flutter-Paket verwendet
|
426 |
+
)
|
427 |
+
|
428 |
+
gr.Examples(
|
429 |
+
examples=[
|
430 |
+
['{"required_ingredients": ["chicken", "rice"], "available_ingredients": ["onion", "garlic", "tomato"], "max_ingredients": 6}'],
|
431 |
+
['{"ingredients": ["pasta"], "available_ingredients": ["cheese", "mushrooms", "cream"], "max_ingredients": 5}']
|
432 |
+
],
|
433 |
+
inputs=[api_input]
|
434 |
+
)
|
435 |
+
|
436 |
+
# --- FastAPI-Integration ---
|
437 |
+
app = FastAPI()
|
438 |
|
439 |
class RecipeRequest(BaseModel):
|
440 |
required_ingredients: list[str] = []
|
441 |
available_ingredients: list[str] = []
|
442 |
max_ingredients: int = 7
|
443 |
max_retries: int = 5
|
|
|
444 |
|
445 |
+
@app.post("/generate_recipe") # KORRIGIERT: Der Endpunkt ist jetzt /generate_recipe
|
446 |
+
async def generate_recipe_rest_api(request_data: RecipeRequest):
|
447 |
+
"""
|
448 |
+
Standard-REST-API-Endpunkt für die Flutter-App.
|
449 |
+
Nimmt direkt JSON-Daten an und gibt direkt JSON zurück.
|
450 |
+
"""
|
451 |
+
required_ingredients = request_data.required_ingredients
|
452 |
+
available_ingredients = request_data.available_ingredients
|
453 |
+
max_ingredients = request_data.max_ingredients
|
454 |
+
max_retries = request_data.max_retries
|
455 |
|
456 |
result_dict = process_recipe_request_logic(
|
457 |
+
required_ingredients, available_ingredients, max_ingredients, max_retries
|
|
|
|
|
|
|
458 |
)
|
459 |
+
|
460 |
return JSONResponse(content=result_dict)
|
461 |
|
462 |
+
# Gradio-App als Sub-App in die FastAPI-App mounten
|
463 |
+
# Dies ist der Standardweg, um Gradio in eine FastAPI-Anwendung einzubetten.
|
464 |
+
# Der Gradio-Teil wird dann unter dem Wurzelpfad '/'.
|
465 |
+
app = gr.mount_gradio_app(app, demo, path="/") # Gradio unter dem Wurzelpfad mounten
|
466 |
+
|
467 |
+
# Wenn du deine App lokal ausführst, kannst du FastAPI mit Uvicorn starten:
|
468 |
+
# if __name__ == "__main__":
|
469 |
+
# import uvicorn
|
470 |
+
# uvicorn.run(app, host="0.0.0.0", port=8000)
|
471 |
|
472 |
+
# Für Hugging Face Spaces ist der if __name__ == "__main__": Block nicht nötig,
|
473 |
+
# da Spaces Uvicorn automatisch startet und die "app"-Variable sucht.
|