Phoenix21 commited on
Commit
0b73312
·
verified ·
1 Parent(s): 5e885e5

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +279 -0
app.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ import json
4
+ from typing import Dict
5
+ import gradio as gr
6
+ import requests
7
+ from langchain import PromptTemplate, LLMChain
8
+ from langchain_groq import ChatGroq
9
+ from google import genai # For Gemini API usage
10
+
11
+ # ============================ #
12
+ # Logging Setup #
13
+ # ============================ #
14
+ logging.basicConfig(level=logging.WARNING)
15
+ logger = logging.getLogger(__name__)
16
+
17
+ # ============================ #
18
+ # API Key Handling #
19
+ # ============================ #
20
+ def clean_api_key(key: str) -> str:
21
+ return ''.join(c for c in key if ord(c) < 128).strip()
22
+
23
+ for key in ["GEMINI_API_KEY", "GROQ_API_KEY", "DID_API_KEY"]:
24
+ if not os.environ.get(key):
25
+ raise ValueError(f"Environment variable {key} is not set. Please set it in the Hugging Face Space secrets.")
26
+
27
+ gemini_api_key = clean_api_key(os.environ["GEMINI_API_KEY"])
28
+ groq_api_key = clean_api_key(os.environ["GROQ_API_KEY"])
29
+ did_api_key = clean_api_key(os.environ["DID_API_KEY"])
30
+
31
+ # Initialize Gemini client for calculations
32
+ client = genai.Client(api_key=gemini_api_key)
33
+ model_name = "gemini-2.0-flash-exp" # Adjust based on your model
34
+
35
+ # ============================ #
36
+ # Initialize LangChain #
37
+ # ============================ #
38
+ chat_model = ChatGroq(model="Gemma2-9b-It", groq_api_key=groq_api_key)
39
+
40
+ # ============================== #
41
+ # Chain Definitions and Helpers #
42
+ # ============================== #
43
+
44
+ # ============================== #
45
+ # Chain 1: Report Generation #
46
+ # ============================== #
47
+ report_prompt_template = PromptTemplate(
48
+ input_variables=["qa_summary"],
49
+ template=(
50
+ "You are a wellness assistant. The user provided the following answers:\n\n"
51
+ "{qa_summary}\n\n"
52
+ "Based on these answers, provide a brief, actionable wellness report. "
53
+ "Include simple suggestions to improve their sleep, exercise, stress management, and diet. "
54
+ "Be concise and helpful.\n\n"
55
+ "Report:"
56
+ )
57
+ )
58
+ report_chain = LLMChain(llm=chat_model, prompt=report_prompt_template)
59
+
60
+ def generate_short_report_for_session(responses: Dict[str, str]) -> str:
61
+ qa_summary = "\n".join([f"{q}: {a}" for q, a in responses.items()])
62
+ raw_report = report_chain.run(qa_summary=qa_summary)
63
+ clean_report = raw_report.replace("*", "").replace("**", "")
64
+ return clean_report
65
+
66
+ # ============================== #
67
+ # Chain 2: Problem Severity Analysis #
68
+ # ============================== #
69
+ problem_prompt_template = PromptTemplate(
70
+ input_variables=["responses", "internal_report"],
71
+ template=(
72
+ "You are a wellness analyst. You have the following user responses to health-related questions:\n"
73
+ "{responses}\n\n"
74
+ "You also have an internal analysis report:\n"
75
+ "{internal_report}\n\n"
76
+ "From these inputs, determine a 'problem severity percentage' for the user in the following areas: "
77
+ "sleep, exercise, stress, and diet. "
78
+ "Return your answer in JSON format with keys: sleep_problem, exercise_problem, stress_problem, diet_problem.\n"
79
+ "Ensure severity percentages are numbers from 0 to 100.\n\n"
80
+ "JSON Output:"
81
+ )
82
+ )
83
+ problem_chain = LLMChain(llm=chat_model, prompt=problem_prompt_template)
84
+
85
+ def analyze_problems_with_chain(responses: Dict[str, str], internal_report: str) -> Dict[str, float]:
86
+ responses_str = "\n".join([f"{q}: {a}" for q, a in responses.items()])
87
+ raw_text = problem_chain.run(responses=responses_str, internal_report=internal_report)
88
+ try:
89
+ start_idx = raw_text.find('{')
90
+ end_idx = raw_text.rfind('}') + 1
91
+ json_str = raw_text[start_idx:end_idx]
92
+ problems = json.loads(json_str)
93
+ for key in ["sleep_problem", "exercise_problem", "stress_problem", "diet_problem"]:
94
+ if key not in problems:
95
+ problems[key] = 0.0
96
+ problems = {k: float(v) for k, v in problems.items()}
97
+ return problems
98
+ except Exception as e:
99
+ logger.error(f"Error parsing problem percentages from LLM: {e}")
100
+ return {
101
+ "sleep_problem": 0.0,
102
+ "exercise_problem": 0.0,
103
+ "stress_problem": 0.0,
104
+ "diet_problem": 0.0
105
+ }
106
+
107
+ # ============================== #
108
+ # Chain 3: Package Recommendation #
109
+ # ============================== #
110
+ recommend_prompt_template = PromptTemplate(
111
+ input_variables=["problems"],
112
+ template=(
113
+ "Given the following problem severity percentages:\n"
114
+ "{problems}\n\n"
115
+ "Using these rules:\n"
116
+ "- If sleep_problem > 70: Recommend Sleep Improvement Package\n"
117
+ "- If stress_problem > 70: Recommend Stress Reduction Package\n"
118
+ "- If exercise_problem > 70: Recommend Exercise Enhancement Package\n"
119
+ "- If all problems are between 30 and 70: Recommend Balanced Wellness Package\n"
120
+ "- If no severe problems: Recommend General Wellness Package\n\n"
121
+ "What are the recommended wellness packages?"
122
+ )
123
+ )
124
+ recommend_chain = LLMChain(llm=chat_model, prompt=recommend_prompt_template)
125
+
126
+ def generate_recommendations(problems: Dict[str, float]) -> str:
127
+ recommendations = recommend_chain.run(problems=json.dumps(problems))
128
+ return recommendations.strip()
129
+
130
+ # ============================== #
131
+ # Chain 4: Final Summary Generation #
132
+ # ============================== #
133
+ final_prompt_template = PromptTemplate(
134
+ input_variables=["report", "problems", "recommendation"],
135
+ template=(
136
+ "Based on the following information:\n"
137
+ "Report:\n{report}\n\n"
138
+ "Problem Severity Percentages:\n{problems}\n\n"
139
+ "Recommended Packages:\n{recommendation}\n\n"
140
+ "Generate a short summary suitable for video narration that synthesizes this information."
141
+ )
142
+ )
143
+ final_chain = LLMChain(llm=chat_model, prompt=final_prompt_template)
144
+
145
+ def generate_final_summary(report: str, problems: Dict[str, float], recommendation: str) -> str:
146
+ summary = final_chain.run(
147
+ report=report,
148
+ problems=json.dumps(problems),
149
+ recommendation=recommendation
150
+ )
151
+ return summary.strip()
152
+
153
+ # ============================== #
154
+ # Chain 5: Shorten Final Summary #
155
+ # ============================== #
156
+ shorten_prompt_template = PromptTemplate(
157
+ input_variables=["final_summary"],
158
+ template=(
159
+ "Shorten the following summary to make it concise and engaging for video narration. "
160
+ "Ensure all key points remain intact:\n\n"
161
+ "{final_summary}\n\n"
162
+ "Shortened Summary:"
163
+ )
164
+ )
165
+ shorten_chain = LLMChain(llm=chat_model, prompt=shorten_prompt_template)
166
+
167
+ def shorten_summary(final_summary: str) -> str:
168
+ shortened = shorten_chain.run(final_summary=final_summary)
169
+ return shortened.strip()
170
+
171
+ # ============================== #
172
+ # Questions and Gradio #
173
+ # ============================== #
174
+ questions = [
175
+ "How many hours of sleep do you get each night?",
176
+ "How often do you exercise in a week?",
177
+ "What is your current stress level on a scale from 1 to 10?",
178
+ "What are your primary wellness goals?",
179
+ "Do you follow any specific diet or have any dietary restrictions?",
180
+ "How would you describe your current eating habits?",
181
+ "How much time do you spend on relaxation or mindfulness activities daily?",
182
+ "Do you experience any recurring health issues or pain?",
183
+ "How do you manage stress on a daily basis?",
184
+ "What does your typical daily routine look like?"
185
+ ]
186
+
187
+ def process_answers(
188
+ sleep: str,
189
+ exercise: str,
190
+ stress: str,
191
+ goals: str,
192
+ diet: str,
193
+ eating: str,
194
+ relaxation: str,
195
+ health_issues: str,
196
+ manage_stress: str,
197
+ routine: str
198
+ ):
199
+ # Map user inputs to questions
200
+ responses = {
201
+ questions[0]: sleep,
202
+ questions[1]: exercise,
203
+ questions[2]: stress,
204
+ questions[3]: goals,
205
+ questions[4]: diet,
206
+ questions[5]: eating,
207
+ questions[6]: relaxation,
208
+ questions[7]: health_issues,
209
+ questions[8]: manage_stress,
210
+ questions[9]: routine
211
+ }
212
+
213
+ # Execute chains sequentially using the collected responses
214
+ report = generate_short_report_for_session(responses)
215
+ problems = analyze_problems_with_chain(responses, report)
216
+ recommendation = generate_recommendations(problems)
217
+ final_summary = generate_final_summary(report, problems, recommendation) # for user
218
+ shortened_summary = shorten_summary(final_summary) # for video
219
+
220
+ # Prepare individual outputs for each section
221
+ wellness_report = f"**Wellness Report**\n------------------\n{report.strip()}"
222
+
223
+ identified_problems = (
224
+ "**Identified Problems**\n"
225
+ "-----------------------\n"
226
+ f"Sleep Problem: {problems.get('sleep_problem', 'N/A')}%\n"
227
+ f"Exercise Problem: {problems.get('exercise_problem', 'N/A')}%\n"
228
+ f"Stress Problem: {problems.get('stress_problem', 'N/A')}%\n"
229
+ f"Diet Problem: {problems.get('diet_problem', 'N/A')}%"
230
+ )
231
+
232
+ recommendations = (
233
+ "**Recommendations**\n"
234
+ "--------------------\n"
235
+ f"{recommendation.strip()}"
236
+ )
237
+
238
+ summary_shown = (
239
+ "**Summary (SHOWN TO USER)**\n"
240
+ "-----------------\n"
241
+ f"{final_summary.strip()}"
242
+ )
243
+
244
+ final_summary_video = (
245
+ "**Final Summary (FOR VIDEO CREATION)**\n"
246
+ "-----------------\n"
247
+ f"{shortened_summary.strip()}"
248
+ )
249
+
250
+ # Return each section as a separate output
251
+ return wellness_report, identified_problems, recommendations, summary_shown, final_summary_video
252
+
253
+ # Create Gradio interface with separate textboxes for each question
254
+ iface = gr.Interface(
255
+ fn=process_answers,
256
+ inputs=[
257
+ gr.Textbox(label=questions[0]),
258
+ gr.Textbox(label=questions[1]),
259
+ gr.Textbox(label=questions[2]),
260
+ gr.Textbox(label=questions[3]),
261
+ gr.Textbox(label=questions[4]),
262
+ gr.Textbox(label=questions[5]),
263
+ gr.Textbox(label=questions[6]),
264
+ gr.Textbox(label=questions[7]),
265
+ gr.Textbox(label=questions[8]),
266
+ gr.Textbox(label=questions[9])
267
+ ],
268
+ outputs=[
269
+ gr.Markdown(label="Wellness Report"),
270
+ gr.Markdown(label="Identified Problems"),
271
+ gr.Markdown(label="Recommendations"),
272
+ gr.Markdown(label="Summary (SHOWN TO USER)"),
273
+ gr.Markdown(label="Final Summary (FOR VIDEO CREATION)")
274
+ ],
275
+ title="Wellness Report Generator",
276
+ description="Answer the questions to generate a wellness report, problem analysis, recommendations, and a final summary."
277
+ )
278
+
279
+ iface.launch()