Spaces:
Sleeping
Sleeping
Create chain_reports.py
Browse files- chain_reports.py +22 -0
chain_reports.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# chain_reports.py
|
2 |
+
from typing import Dict
|
3 |
+
from langchain import PromptTemplate, LLMChain
|
4 |
+
from models import chat_model
|
5 |
+
|
6 |
+
report_prompt_template = PromptTemplate(
|
7 |
+
input_variables=["qa_summary"],
|
8 |
+
template=(
|
9 |
+
"You are a wellness assistant. The user provided the following answers:\n\n"
|
10 |
+
"{qa_summary}\n\n"
|
11 |
+
"Based on these answers, provide a brief, actionable wellness report. "
|
12 |
+
"Include simple suggestions to improve their sleep, exercise, stress management, and diet. "
|
13 |
+
"Be concise and helpful.\n\n"
|
14 |
+
"Report:"
|
15 |
+
)
|
16 |
+
)
|
17 |
+
report_chain = LLMChain(llm=chat_model, prompt=report_prompt_template)
|
18 |
+
|
19 |
+
def generate_short_report_for_session(responses: Dict[str, str]) -> str:
|
20 |
+
qa_summary = "\n".join(f"{q}: {a}" for q, a in responses.items())
|
21 |
+
raw_report = report_chain.run(qa_summary=qa_summary)
|
22 |
+
return raw_report.replace("*", "").replace("**", "")
|