Spaces:
Sleeping
Sleeping
File size: 10,483 Bytes
0b73312 7f54130 0b73312 7f54130 0b73312 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 |
import os
import logging
import json
from typing import Dict
import gradio as gr
import requests
from langchain import PromptTemplate, LLMChain
from langchain_groq import ChatGroq
from google import genai # For Gemini API usage
# ============================ #
# Logging Setup #
# ============================ #
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
# ============================ #
# API Key Handling #
# ============================ #
def clean_api_key(key: str) -> str:
return ''.join(c for c in key if ord(c) < 128).strip()
for key in ["GEMINI_API_KEY", "GROQ_API_KEY"]:
if not os.environ.get(key):
raise ValueError(f"Environment variable {key} is not set. Please set it in the Hugging Face Space secrets.")
gemini_api_key = clean_api_key(os.environ["GEMINI_API_KEY"])
groq_api_key = clean_api_key(os.environ["GROQ_API_KEY"])
# did_api_key = clean_api_key(os.environ["DID_API_KEY"])
# Initialize Gemini client for calculations
client = genai.Client(api_key=gemini_api_key)
model_name = "gemini-2.0-flash-exp" # Adjust based on your model
# ============================ #
# Initialize LangChain #
# ============================ #
chat_model = ChatGroq(model="Gemma2-9b-It", groq_api_key=groq_api_key)
# ============================== #
# Chain Definitions and Helpers #
# ============================== #
# ============================== #
# Chain 1: Report Generation #
# ============================== #
report_prompt_template = PromptTemplate(
input_variables=["qa_summary"],
template=(
"You are a wellness assistant. The user provided the following answers:\n\n"
"{qa_summary}\n\n"
"Based on these answers, provide a brief, actionable wellness report. "
"Include simple suggestions to improve their sleep, exercise, stress management, and diet. "
"Be concise and helpful.\n\n"
"Report:"
)
)
report_chain = LLMChain(llm=chat_model, prompt=report_prompt_template)
def generate_short_report_for_session(responses: Dict[str, str]) -> str:
qa_summary = "\n".join([f"{q}: {a}" for q, a in responses.items()])
raw_report = report_chain.run(qa_summary=qa_summary)
clean_report = raw_report.replace("*", "").replace("**", "")
return clean_report
# ============================== #
# Chain 2: Problem Severity Analysis #
# ============================== #
problem_prompt_template = PromptTemplate(
input_variables=["responses", "internal_report"],
template=(
"You are a wellness analyst. You have the following user responses to health-related questions:\n"
"{responses}\n\n"
"You also have an internal analysis report:\n"
"{internal_report}\n\n"
"From these inputs, determine a 'problem severity percentage' for the user in the following areas: "
"sleep, exercise, stress, and diet. "
"Return your answer in JSON format with keys: sleep_problem, exercise_problem, stress_problem, diet_problem.\n"
"Ensure severity percentages are numbers from 0 to 100.\n\n"
"JSON Output:"
)
)
problem_chain = LLMChain(llm=chat_model, prompt=problem_prompt_template)
def analyze_problems_with_chain(responses: Dict[str, str], internal_report: str) -> Dict[str, float]:
responses_str = "\n".join([f"{q}: {a}" for q, a in responses.items()])
raw_text = problem_chain.run(responses=responses_str, internal_report=internal_report)
try:
start_idx = raw_text.find('{')
end_idx = raw_text.rfind('}') + 1
json_str = raw_text[start_idx:end_idx]
problems = json.loads(json_str)
for key in ["sleep_problem", "exercise_problem", "stress_problem", "diet_problem"]:
if key not in problems:
problems[key] = 0.0
problems = {k: float(v) for k, v in problems.items()}
return problems
except Exception as e:
logger.error(f"Error parsing problem percentages from LLM: {e}")
return {
"sleep_problem": 0.0,
"exercise_problem": 0.0,
"stress_problem": 0.0,
"diet_problem": 0.0
}
# ============================== #
# Chain 3: Package Recommendation #
# ============================== #
recommend_prompt_template = PromptTemplate(
input_variables=["problems"],
template=(
"Given the following problem severity percentages:\n"
"{problems}\n\n"
"Using these rules:\n"
"- If sleep_problem > 70: Recommend Sleep Improvement Package\n"
"- If stress_problem > 70: Recommend Stress Reduction Package\n"
"- If exercise_problem > 70: Recommend Exercise Enhancement Package\n"
"- If all problems are between 30 and 70: Recommend Balanced Wellness Package\n"
"- If no severe problems: Recommend General Wellness Package\n\n"
"What are the recommended wellness packages?"
)
)
recommend_chain = LLMChain(llm=chat_model, prompt=recommend_prompt_template)
def generate_recommendations(problems: Dict[str, float]) -> str:
recommendations = recommend_chain.run(problems=json.dumps(problems))
return recommendations.strip()
# ============================== #
# Chain 4: Final Summary Generation #
# ============================== #
final_prompt_template = PromptTemplate(
input_variables=["report", "problems", "recommendation"],
template=(
"Based on the following information:\n"
"Report:\n{report}\n\n"
"Problem Severity Percentages:\n{problems}\n\n"
"Recommended Packages:\n{recommendation}\n\n"
"Generate a short summary suitable for video narration that synthesizes this information."
)
)
final_chain = LLMChain(llm=chat_model, prompt=final_prompt_template)
def generate_final_summary(report: str, problems: Dict[str, float], recommendation: str) -> str:
summary = final_chain.run(
report=report,
problems=json.dumps(problems),
recommendation=recommendation
)
return summary.strip()
# ============================== #
# Chain 5: Shorten Final Summary #
# ============================== #
shorten_prompt_template = PromptTemplate(
input_variables=["final_summary"],
template=(
"Shorten the following summary to make it concise and engaging for video narration. "
"Ensure all key points remain intact:\n\n"
"{final_summary}\n\n"
"Shortened Summary:"
)
)
shorten_chain = LLMChain(llm=chat_model, prompt=shorten_prompt_template)
def shorten_summary(final_summary: str) -> str:
shortened = shorten_chain.run(final_summary=final_summary)
return shortened.strip()
# ============================== #
# Questions and Gradio #
# ============================== #
questions = [
"How many hours of sleep do you get each night?",
"How often do you exercise in a week?",
"What is your current stress level on a scale from 1 to 10?",
"What are your primary wellness goals?",
"Do you follow any specific diet or have any dietary restrictions?",
"How would you describe your current eating habits?",
"How much time do you spend on relaxation or mindfulness activities daily?",
"Do you experience any recurring health issues or pain?",
"How do you manage stress on a daily basis?",
"What does your typical daily routine look like?"
]
def process_answers(
sleep: str,
exercise: str,
stress: str,
goals: str,
diet: str,
eating: str,
relaxation: str,
health_issues: str,
manage_stress: str,
routine: str
):
# Map user inputs to questions
responses = {
questions[0]: sleep,
questions[1]: exercise,
questions[2]: stress,
questions[3]: goals,
questions[4]: diet,
questions[5]: eating,
questions[6]: relaxation,
questions[7]: health_issues,
questions[8]: manage_stress,
questions[9]: routine
}
# Execute chains sequentially using the collected responses
report = generate_short_report_for_session(responses)
problems = analyze_problems_with_chain(responses, report)
recommendation = generate_recommendations(problems)
final_summary = generate_final_summary(report, problems, recommendation) # for user
shortened_summary = shorten_summary(final_summary) # for video
# Prepare individual outputs for each section
wellness_report = f"**Wellness Report**\n------------------\n{report.strip()}"
identified_problems = (
"**Identified Problems**\n"
"-----------------------\n"
f"Sleep Problem: {problems.get('sleep_problem', 'N/A')}%\n"
f"Exercise Problem: {problems.get('exercise_problem', 'N/A')}%\n"
f"Stress Problem: {problems.get('stress_problem', 'N/A')}%\n"
f"Diet Problem: {problems.get('diet_problem', 'N/A')}%"
)
recommendations = (
"**Recommendations**\n"
"--------------------\n"
f"{recommendation.strip()}"
)
summary_shown = (
"**Summary (SHOWN TO USER)**\n"
"-----------------\n"
f"{final_summary.strip()}"
)
final_summary_video = (
"**Final Summary (FOR VIDEO CREATION)**\n"
"-----------------\n"
f"{shortened_summary.strip()}"
)
# Return each section as a separate output
return wellness_report, identified_problems, recommendations, summary_shown, final_summary_video
# Create Gradio interface with separate textboxes for each question
iface = gr.Interface(
fn=process_answers,
inputs=[
gr.Textbox(label=questions[0]),
gr.Textbox(label=questions[1]),
gr.Textbox(label=questions[2]),
gr.Textbox(label=questions[3]),
gr.Textbox(label=questions[4]),
gr.Textbox(label=questions[5]),
gr.Textbox(label=questions[6]),
gr.Textbox(label=questions[7]),
gr.Textbox(label=questions[8]),
gr.Textbox(label=questions[9])
],
outputs=[
gr.Markdown(label="Wellness Report"),
gr.Markdown(label="Identified Problems"),
gr.Markdown(label="Recommendations"),
gr.Markdown(label="Summary (SHOWN TO USER)"),
gr.Markdown(label="Final Summary (FOR VIDEO CREATION)")
],
title="Wellness Report Generator",
description="Answer the questions to generate a wellness report, problem analysis, recommendations, and a final summary."
)
iface.launch()
|