Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import streamlit as st
|
2 |
-
import
|
3 |
from datetime import datetime
|
4 |
import pandas as pd
|
5 |
import os
|
@@ -72,20 +72,27 @@ def get_hf_api_token():
|
|
72 |
# For local development, check environment variable
|
73 |
return os.environ.get("HF_API_TOKEN")
|
74 |
|
75 |
-
#
|
76 |
-
|
|
|
77 |
api_token = get_hf_api_token()
|
78 |
if not api_token:
|
79 |
-
|
80 |
-
|
81 |
-
API_URL = f"https://api-inference.huggingface.co/models/{model_name}"
|
82 |
|
83 |
-
|
84 |
-
"
|
85 |
-
|
86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
|
88 |
-
|
|
|
89 |
|
90 |
Journal entry: "{entry}"
|
91 |
|
@@ -96,27 +103,20 @@ Please provide:
|
|
96 |
|
97 |
Format your response clearly with these exact headings (Emotional tone, Recurring themes, Advice) but without using JSON.
|
98 |
"""
|
99 |
-
|
100 |
-
payload = {
|
101 |
-
"inputs": prompt,
|
102 |
-
"parameters": {
|
103 |
-
"max_new_tokens": 300,
|
104 |
-
"temperature": 0.7,
|
105 |
-
"top_p": 0.95,
|
106 |
-
"do_sample": True
|
107 |
-
}
|
108 |
-
}
|
109 |
-
|
110 |
-
try:
|
111 |
-
response = requests.post(API_URL, headers=headers, json=payload)
|
112 |
-
response.raise_for_status()
|
113 |
-
result = response.json()
|
114 |
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
# Format the output nicely
|
122 |
formatted_output = ""
|
@@ -135,13 +135,9 @@ Format your response clearly with these exact headings (Emotional tone, Recurrin
|
|
135 |
|
136 |
return formatted_output if formatted_output else output
|
137 |
|
138 |
-
except requests.exceptions.RequestException as e:
|
139 |
-
st.error(f"API Error: {str(e)}")
|
140 |
-
if hasattr(e, 'response') and e.response is not None:
|
141 |
-
st.error(f"Response: {e.response.text}")
|
142 |
-
return f"Error connecting to the language model: {str(e)}"
|
143 |
except Exception as e:
|
144 |
-
|
|
|
145 |
|
146 |
# Function to save an entry
|
147 |
def save_entry(entry, analysis):
|
@@ -186,7 +182,7 @@ with st.sidebar:
|
|
186 |
st.header("Settings")
|
187 |
model_choice = st.selectbox(
|
188 |
"Language Model",
|
189 |
-
["deepseek-ai/
|
190 |
index=0,
|
191 |
help="Select which AI model to use for analysis"
|
192 |
)
|
@@ -348,4 +344,7 @@ st.markdown("---")
|
|
348 |
st.caption("""
|
349 |
**Disclaimer**: This app is for educational purposes only and is not a substitute for professional mental health advice, diagnosis, or treatment.
|
350 |
Always seek the advice of your physician or other qualified health provider with any questions regarding a medical condition.
|
351 |
-
""")
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from huggingface_hub import InferenceClient
|
3 |
from datetime import datetime
|
4 |
import pandas as pd
|
5 |
import os
|
|
|
72 |
# For local development, check environment variable
|
73 |
return os.environ.get("HF_API_TOKEN")
|
74 |
|
75 |
+
# Initialize the InferenceClient
|
76 |
+
@st.cache_resource
|
77 |
+
def get_inference_client():
|
78 |
api_token = get_hf_api_token()
|
79 |
if not api_token:
|
80 |
+
st.error("No Hugging Face API token found. Please configure it in your Spaces secrets.")
|
81 |
+
return None
|
|
|
82 |
|
83 |
+
return InferenceClient(
|
84 |
+
provider="nebius",
|
85 |
+
api_key=api_token,
|
86 |
+
)
|
87 |
+
|
88 |
+
# Function to analyze journal entries
|
89 |
+
def analyze_journal(entry, model_name="deepseek-ai/DeepSeek-R1"):
|
90 |
+
client = get_inference_client()
|
91 |
+
if not client:
|
92 |
+
return "Error: Could not initialize the Inference Client."
|
93 |
|
94 |
+
try:
|
95 |
+
prompt = f"""You are a compassionate mental wellness assistant. Analyze the following journal entry with empathy and provide helpful insights.
|
96 |
|
97 |
Journal entry: "{entry}"
|
98 |
|
|
|
103 |
|
104 |
Format your response clearly with these exact headings (Emotional tone, Recurring themes, Advice) but without using JSON.
|
105 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
|
107 |
+
completion = client.chat.completions.create(
|
108 |
+
model=model_name,
|
109 |
+
messages=[
|
110 |
+
{
|
111 |
+
"role": "user",
|
112 |
+
"content": prompt
|
113 |
+
}
|
114 |
+
],
|
115 |
+
max_tokens=512,
|
116 |
+
temperature=0.7,
|
117 |
+
)
|
118 |
+
|
119 |
+
output = completion.choices[0].message.content
|
120 |
|
121 |
# Format the output nicely
|
122 |
formatted_output = ""
|
|
|
135 |
|
136 |
return formatted_output if formatted_output else output
|
137 |
|
|
|
|
|
|
|
|
|
|
|
138 |
except Exception as e:
|
139 |
+
st.error(f"API Error: {str(e)}")
|
140 |
+
return f"Error analyzing journal entry: {str(e)}"
|
141 |
|
142 |
# Function to save an entry
|
143 |
def save_entry(entry, analysis):
|
|
|
182 |
st.header("Settings")
|
183 |
model_choice = st.selectbox(
|
184 |
"Language Model",
|
185 |
+
["deepseek-ai/DeepSeek-R1", "meta-llama/Llama-3-8b-chat", "microsoft/phi-3-mini-4k-instruct"],
|
186 |
index=0,
|
187 |
help="Select which AI model to use for analysis"
|
188 |
)
|
|
|
344 |
st.caption("""
|
345 |
**Disclaimer**: This app is for educational purposes only and is not a substitute for professional mental health advice, diagnosis, or treatment.
|
346 |
Always seek the advice of your physician or other qualified health provider with any questions regarding a medical condition.
|
347 |
+
""")
|
348 |
+
|
349 |
+
|
350 |
+
|