Spaces:
Sleeping
Sleeping
import os | |
import logging | |
import json | |
from typing import Dict | |
import gradio as gr | |
import requests | |
from langchain import PromptTemplate, LLMChain | |
from langchain_groq import ChatGroq | |
from google import genai # For Gemini API usage | |
# ============================ # | |
# Logging Setup # | |
# ============================ # | |
logging.basicConfig(level=logging.WARNING) | |
logger = logging.getLogger(__name__) | |
# ============================ # | |
# API Key Handling # | |
# ============================ # | |
def clean_api_key(key: str) -> str: | |
return ''.join(c for c in key if ord(c) < 128).strip() | |
for key in ["GEMINI_API_KEY", "GROQ_API_KEY"]: | |
if not os.environ.get(key): | |
raise ValueError(f"Environment variable {key} is not set. Please set it in the Hugging Face Space secrets.") | |
gemini_api_key = clean_api_key(os.environ["GEMINI_API_KEY"]) | |
groq_api_key = clean_api_key(os.environ["GROQ_API_KEY"]) | |
# did_api_key = clean_api_key(os.environ["DID_API_KEY"]) | |
# Initialize Gemini client for calculations | |
client = genai.Client(api_key=gemini_api_key) | |
model_name = "gemini-2.0-flash-exp" # Adjust based on your model | |
# ============================ # | |
# Initialize LangChain # | |
# ============================ # | |
chat_model = ChatGroq(model="Gemma2-9b-It", groq_api_key=groq_api_key) | |
# ============================== # | |
# Chain Definitions and Helpers # | |
# ============================== # | |
# ============================== # | |
# Chain 1: Report Generation # | |
# ============================== # | |
report_prompt_template = PromptTemplate( | |
input_variables=["qa_summary"], | |
template=( | |
"You are a wellness assistant. The user provided the following answers:\n\n" | |
"{qa_summary}\n\n" | |
"Based on these answers, provide a brief, actionable wellness report. " | |
"Include simple suggestions to improve their sleep, exercise, stress management, and diet. " | |
"Be concise and helpful.\n\n" | |
"Report:" | |
) | |
) | |
report_chain = LLMChain(llm=chat_model, prompt=report_prompt_template) | |
def generate_short_report_for_session(responses: Dict[str, str]) -> str: | |
qa_summary = "\n".join([f"{q}: {a}" for q, a in responses.items()]) | |
raw_report = report_chain.run(qa_summary=qa_summary) | |
clean_report = raw_report.replace("*", "").replace("**", "") | |
return clean_report | |
# ============================== # | |
# Chain 2: Problem Severity Analysis # | |
# ============================== # | |
problem_prompt_template = PromptTemplate( | |
input_variables=["responses", "internal_report"], | |
template=( | |
"You are a wellness analyst. You have the following user responses to health-related questions:\n" | |
"{responses}\n\n" | |
"You also have an internal analysis report:\n" | |
"{internal_report}\n\n" | |
"From these inputs, determine a 'problem severity percentage' for the user in the following areas: " | |
"sleep, exercise, stress, and diet. " | |
"Return your answer in JSON format with keys: sleep_problem, exercise_problem, stress_problem, diet_problem.\n" | |
"Ensure severity percentages are numbers from 0 to 100.\n\n" | |
"JSON Output:" | |
) | |
) | |
problem_chain = LLMChain(llm=chat_model, prompt=problem_prompt_template) | |
def analyze_problems_with_chain(responses: Dict[str, str], internal_report: str) -> Dict[str, float]: | |
responses_str = "\n".join([f"{q}: {a}" for q, a in responses.items()]) | |
raw_text = problem_chain.run(responses=responses_str, internal_report=internal_report) | |
try: | |
start_idx = raw_text.find('{') | |
end_idx = raw_text.rfind('}') + 1 | |
json_str = raw_text[start_idx:end_idx] | |
problems = json.loads(json_str) | |
for key in ["sleep_problem", "exercise_problem", "stress_problem", "diet_problem"]: | |
if key not in problems: | |
problems[key] = 0.0 | |
problems = {k: float(v) for k, v in problems.items()} | |
return problems | |
except Exception as e: | |
logger.error(f"Error parsing problem percentages from LLM: {e}") | |
return { | |
"sleep_problem": 0.0, | |
"exercise_problem": 0.0, | |
"stress_problem": 0.0, | |
"diet_problem": 0.0 | |
} | |
# ============================== # | |
# Chain 3: Package Recommendation # | |
# ============================== # | |
recommend_prompt_template = PromptTemplate( | |
input_variables=["problems"], | |
template=( | |
"Given the following problem severity percentages:\n" | |
"{problems}\n\n" | |
"Using these rules:\n" | |
"- If sleep_problem > 70: Recommend Sleep Improvement Package\n" | |
"- If stress_problem > 70: Recommend Stress Reduction Package\n" | |
"- If exercise_problem > 70: Recommend Exercise Enhancement Package\n" | |
"- If all problems are between 30 and 70: Recommend Balanced Wellness Package\n" | |
"- If no severe problems: Recommend General Wellness Package\n\n" | |
"What are the recommended wellness packages?" | |
) | |
) | |
recommend_chain = LLMChain(llm=chat_model, prompt=recommend_prompt_template) | |
def generate_recommendations(problems: Dict[str, float]) -> str: | |
recommendations = recommend_chain.run(problems=json.dumps(problems)) | |
return recommendations.strip() | |
# ============================== # | |
# Chain 4: Final Summary Generation # | |
# ============================== # | |
final_prompt_template = PromptTemplate( | |
input_variables=["report", "problems", "recommendation"], | |
template=( | |
"Based on the following information:\n" | |
"Report:\n{report}\n\n" | |
"Problem Severity Percentages:\n{problems}\n\n" | |
"Recommended Packages:\n{recommendation}\n\n" | |
"Generate a short summary suitable for video narration that synthesizes this information." | |
) | |
) | |
final_chain = LLMChain(llm=chat_model, prompt=final_prompt_template) | |
def generate_final_summary(report: str, problems: Dict[str, float], recommendation: str) -> str: | |
summary = final_chain.run( | |
report=report, | |
problems=json.dumps(problems), | |
recommendation=recommendation | |
) | |
return summary.strip() | |
# ============================== # | |
# Chain 5: Shorten Final Summary # | |
# ============================== # | |
shorten_prompt_template = PromptTemplate( | |
input_variables=["final_summary"], | |
template=( | |
"Shorten the following summary to make it concise and engaging for video narration. " | |
"Ensure all key points remain intact:\n\n" | |
"{final_summary}\n\n" | |
"Shortened Summary:" | |
) | |
) | |
shorten_chain = LLMChain(llm=chat_model, prompt=shorten_prompt_template) | |
def shorten_summary(final_summary: str) -> str: | |
shortened = shorten_chain.run(final_summary=final_summary) | |
return shortened.strip() | |
# ============================== # | |
# Questions and Gradio # | |
# ============================== # | |
questions = [ | |
"How many hours of sleep do you get each night?", | |
"How often do you exercise in a week?", | |
"What is your current stress level on a scale from 1 to 10?", | |
"What are your primary wellness goals?", | |
"Do you follow any specific diet or have any dietary restrictions?", | |
"How would you describe your current eating habits?", | |
"How much time do you spend on relaxation or mindfulness activities daily?", | |
"Do you experience any recurring health issues or pain?", | |
"How do you manage stress on a daily basis?", | |
"What does your typical daily routine look like?" | |
] | |
def process_answers( | |
sleep: str, | |
exercise: str, | |
stress: str, | |
goals: str, | |
diet: str, | |
eating: str, | |
relaxation: str, | |
health_issues: str, | |
manage_stress: str, | |
routine: str | |
): | |
# Map user inputs to questions | |
responses = { | |
questions[0]: sleep, | |
questions[1]: exercise, | |
questions[2]: stress, | |
questions[3]: goals, | |
questions[4]: diet, | |
questions[5]: eating, | |
questions[6]: relaxation, | |
questions[7]: health_issues, | |
questions[8]: manage_stress, | |
questions[9]: routine | |
} | |
# Execute chains sequentially using the collected responses | |
report = generate_short_report_for_session(responses) | |
problems = analyze_problems_with_chain(responses, report) | |
recommendation = generate_recommendations(problems) | |
final_summary = generate_final_summary(report, problems, recommendation) # for user | |
shortened_summary = shorten_summary(final_summary) # for video | |
# Prepare individual outputs for each section | |
wellness_report = f"**Wellness Report**\n------------------\n{report.strip()}" | |
identified_problems = ( | |
"**Identified Problems**\n" | |
"-----------------------\n" | |
f"Sleep Problem: {problems.get('sleep_problem', 'N/A')}%\n" | |
f"Exercise Problem: {problems.get('exercise_problem', 'N/A')}%\n" | |
f"Stress Problem: {problems.get('stress_problem', 'N/A')}%\n" | |
f"Diet Problem: {problems.get('diet_problem', 'N/A')}%" | |
) | |
recommendations = ( | |
"**Recommendations**\n" | |
"--------------------\n" | |
f"{recommendation.strip()}" | |
) | |
summary_shown = ( | |
"**Summary (SHOWN TO USER)**\n" | |
"-----------------\n" | |
f"{final_summary.strip()}" | |
) | |
final_summary_video = ( | |
"**Final Summary (FOR VIDEO CREATION)**\n" | |
"-----------------\n" | |
f"{shortened_summary.strip()}" | |
) | |
# Return each section as a separate output | |
return wellness_report, identified_problems, recommendations, summary_shown, final_summary_video | |
# Create Gradio interface with separate textboxes for each question | |
iface = gr.Interface( | |
fn=process_answers, | |
inputs=[ | |
gr.Textbox(label=questions[0]), | |
gr.Textbox(label=questions[1]), | |
gr.Textbox(label=questions[2]), | |
gr.Textbox(label=questions[3]), | |
gr.Textbox(label=questions[4]), | |
gr.Textbox(label=questions[5]), | |
gr.Textbox(label=questions[6]), | |
gr.Textbox(label=questions[7]), | |
gr.Textbox(label=questions[8]), | |
gr.Textbox(label=questions[9]) | |
], | |
outputs=[ | |
gr.Markdown(label="Wellness Report"), | |
gr.Markdown(label="Identified Problems"), | |
gr.Markdown(label="Recommendations"), | |
gr.Markdown(label="Summary (SHOWN TO USER)"), | |
gr.Markdown(label="Final Summary (FOR VIDEO CREATION)") | |
], | |
title="Wellness Report Generator", | |
description="Answer the questions to generate a wellness report, problem analysis, recommendations, and a final summary." | |
) | |
iface.launch() | |