Update app.py
Browse files
app.py
CHANGED
@@ -23,17 +23,18 @@ CORS(app)
|
|
23 |
system_instruction = """
|
24 |
You are a helpful AI assistant named Athspi. When responding:
|
25 |
1. Never mention "audio" or technical terms
|
26 |
-
2. For audio
|
27 |
[AUDIO]content here[/AUDIO]
|
28 |
3. Keep responses natural and friendly
|
29 |
-
4.
|
|
|
30 |
Example good response:
|
31 |
-
Here's
|
32 |
[AUDIO]Once upon a time...[/AUDIO]
|
33 |
"""
|
34 |
|
35 |
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
|
36 |
-
model = genai.GenerativeModel('gemini-
|
37 |
|
38 |
def convert_markdown_to_html(text):
|
39 |
html = markdown2.markdown(text, extras=["fenced-code-blocks", "tables"])
|
@@ -47,13 +48,6 @@ def process_response(full_response):
|
|
47 |
visible_text = re.sub(r'\[/?AUDIO\]', '', full_response).strip()
|
48 |
return visible_text, audio_content
|
49 |
|
50 |
-
def detect_audio_request(text):
|
51 |
-
audio_triggers = [
|
52 |
-
'audio', 'speak', 'say it', 'read aloud',
|
53 |
-
'hear', 'listen', 'tell me out loud'
|
54 |
-
]
|
55 |
-
return any(trigger in text.lower() for trigger in audio_triggers)
|
56 |
-
|
57 |
def generate_audio(text):
|
58 |
"""Generate audio file from text"""
|
59 |
text = re.sub(r'[^\w\s.,!?\-]', '', text)
|
@@ -72,17 +66,22 @@ def chat():
|
|
72 |
if not user_message:
|
73 |
return jsonify({"error": "Message required"}), 400
|
74 |
|
75 |
-
|
76 |
response = model.generate_content(user_message)
|
77 |
visible_text, audio_content = process_response(response.text)
|
78 |
|
|
|
|
|
|
|
|
|
79 |
result = {
|
80 |
"response_text": visible_text,
|
81 |
-
"response_html":
|
82 |
"has_audio": False
|
83 |
}
|
84 |
|
85 |
-
if
|
|
|
86 |
audio_filename = generate_audio(audio_content)
|
87 |
result["audio_filename"] = audio_filename
|
88 |
result["has_audio"] = True
|
|
|
23 |
system_instruction = """
|
24 |
You are a helpful AI assistant named Athspi. When responding:
|
25 |
1. Never mention "audio" or technical terms
|
26 |
+
2. For responses that would benefit from audio (like stories, explanations, or content meant to be heard), include the audio version between these markers:
|
27 |
[AUDIO]content here[/AUDIO]
|
28 |
3. Keep responses natural and friendly
|
29 |
+
4. Decide automatically when to include audio based on the content type
|
30 |
+
5. For stories, always include audio version
|
31 |
Example good response:
|
32 |
+
Here's a story for you!
|
33 |
[AUDIO]Once upon a time...[/AUDIO]
|
34 |
"""
|
35 |
|
36 |
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
|
37 |
+
model = genai.GenerativeModel('gemini-1.5-pro', system_instruction=system_instruction)
|
38 |
|
39 |
def convert_markdown_to_html(text):
|
40 |
html = markdown2.markdown(text, extras=["fenced-code-blocks", "tables"])
|
|
|
48 |
visible_text = re.sub(r'\[/?AUDIO\]', '', full_response).strip()
|
49 |
return visible_text, audio_content
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
def generate_audio(text):
|
52 |
"""Generate audio file from text"""
|
53 |
text = re.sub(r'[^\w\s.,!?\-]', '', text)
|
|
|
66 |
if not user_message:
|
67 |
return jsonify({"error": "Message required"}), 400
|
68 |
|
69 |
+
# Get AI response - the AI will automatically decide when to include audio
|
70 |
response = model.generate_content(user_message)
|
71 |
visible_text, audio_content = process_response(response.text)
|
72 |
|
73 |
+
# Generate HTML response
|
74 |
+
html_response = convert_markdown_to_html(visible_text)
|
75 |
+
|
76 |
+
# Prepare result
|
77 |
result = {
|
78 |
"response_text": visible_text,
|
79 |
+
"response_html": html_response,
|
80 |
"has_audio": False
|
81 |
}
|
82 |
|
83 |
+
# Generate audio if AI included audio content
|
84 |
+
if audio_content:
|
85 |
audio_filename = generate_audio(audio_content)
|
86 |
result["audio_filename"] = audio_filename
|
87 |
result["has_audio"] = True
|