Update app.py
Browse files
app.py
CHANGED
@@ -8,6 +8,7 @@ import markdown2
|
|
8 |
import re
|
9 |
from gtts import gTTS
|
10 |
import uuid
|
|
|
11 |
|
12 |
# Load environment variables
|
13 |
load_dotenv()
|
@@ -19,18 +20,18 @@ os.makedirs(AUDIO_FOLDER, exist_ok=True)
|
|
19 |
app = Flask(__name__, static_folder='static')
|
20 |
CORS(app)
|
21 |
|
|
|
|
|
|
|
|
|
22 |
# AI Configuration
|
23 |
system_instruction = """
|
24 |
You are a helpful AI assistant named Athspi. When responding:
|
25 |
-
1.
|
26 |
-
2. For responses that would benefit from audio (like stories
|
27 |
[AUDIO]content here[/AUDIO]
|
28 |
-
3. Keep responses
|
29 |
-
4.
|
30 |
-
5. For stories, always include audio version
|
31 |
-
Example good response:
|
32 |
-
Here's a story for you!
|
33 |
-
[AUDIO]Once upon a time...[/AUDIO]
|
34 |
"""
|
35 |
|
36 |
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
|
@@ -42,14 +43,12 @@ def convert_markdown_to_html(text):
|
|
42 |
return html
|
43 |
|
44 |
def process_response(full_response):
|
45 |
-
"""Extract visible text and audio content"""
|
46 |
audio_match = re.search(r'\[AUDIO\](.*?)\[/AUDIO\]', full_response, re.DOTALL)
|
47 |
audio_content = audio_match.group(1).strip() if audio_match else None
|
48 |
visible_text = re.sub(r'\[/?AUDIO\]', '', full_response).strip()
|
49 |
return visible_text, audio_content
|
50 |
|
51 |
def generate_audio(text):
|
52 |
-
"""Generate audio file from text"""
|
53 |
text = re.sub(r'[^\w\s.,!?\-]', '', text)
|
54 |
filename = f"audio_{uuid.uuid4()}.mp3"
|
55 |
filepath = os.path.join(AUDIO_FOLDER, filename)
|
@@ -57,30 +56,51 @@ def generate_audio(text):
|
|
57 |
tts.save(filepath)
|
58 |
return filename
|
59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
@app.route('/chat', methods=['POST'])
|
61 |
def chat():
|
62 |
try:
|
63 |
data = request.json
|
64 |
user_message = data.get('message', '').strip()
|
|
|
65 |
|
66 |
if not user_message:
|
67 |
return jsonify({"error": "Message required"}), 400
|
|
|
|
|
68 |
|
69 |
-
|
70 |
-
response = model.generate_content(user_message)
|
71 |
-
visible_text, audio_content = process_response(response.text)
|
72 |
|
73 |
-
#
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
-
# Prepare result
|
77 |
result = {
|
78 |
"response_text": visible_text,
|
79 |
-
"response_html":
|
80 |
"has_audio": False
|
81 |
}
|
82 |
|
83 |
-
# Generate audio if AI included audio content
|
84 |
if audio_content:
|
85 |
audio_filename = generate_audio(audio_content)
|
86 |
result["audio_filename"] = audio_filename
|
|
|
8 |
import re
|
9 |
from gtts import gTTS
|
10 |
import uuid
|
11 |
+
from collections import deque
|
12 |
|
13 |
# Load environment variables
|
14 |
load_dotenv()
|
|
|
20 |
app = Flask(__name__, static_folder='static')
|
21 |
CORS(app)
|
22 |
|
23 |
+
# Conversation history storage
|
24 |
+
conversation_histories = {}
|
25 |
+
MAX_HISTORY_LENGTH = 10 # Number of messages to remember
|
26 |
+
|
27 |
# AI Configuration
|
28 |
system_instruction = """
|
29 |
You are a helpful AI assistant named Athspi. When responding:
|
30 |
+
1. Maintain conversation context naturally
|
31 |
+
2. For responses that would benefit from audio (like stories), include between:
|
32 |
[AUDIO]content here[/AUDIO]
|
33 |
+
3. Keep responses conversational and friendly
|
34 |
+
4. Remember previous interactions in this conversation
|
|
|
|
|
|
|
|
|
35 |
"""
|
36 |
|
37 |
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
|
|
|
43 |
return html
|
44 |
|
45 |
def process_response(full_response):
|
|
|
46 |
audio_match = re.search(r'\[AUDIO\](.*?)\[/AUDIO\]', full_response, re.DOTALL)
|
47 |
audio_content = audio_match.group(1).strip() if audio_match else None
|
48 |
visible_text = re.sub(r'\[/?AUDIO\]', '', full_response).strip()
|
49 |
return visible_text, audio_content
|
50 |
|
51 |
def generate_audio(text):
|
|
|
52 |
text = re.sub(r'[^\w\s.,!?\-]', '', text)
|
53 |
filename = f"audio_{uuid.uuid4()}.mp3"
|
54 |
filepath = os.path.join(AUDIO_FOLDER, filename)
|
|
|
56 |
tts.save(filepath)
|
57 |
return filename
|
58 |
|
59 |
+
def get_conversation_history(session_id):
|
60 |
+
if session_id not in conversation_histories:
|
61 |
+
conversation_histories[session_id] = deque(maxlen=MAX_HISTORY_LENGTH)
|
62 |
+
return conversation_histories[session_id]
|
63 |
+
|
64 |
+
@app.route('/start_session', methods=['POST'])
|
65 |
+
def start_session():
|
66 |
+
session_id = str(uuid.uuid4())
|
67 |
+
conversation_histories[session_id] = deque(maxlen=MAX_HISTORY_LENGTH)
|
68 |
+
return jsonify({"session_id": session_id})
|
69 |
+
|
70 |
@app.route('/chat', methods=['POST'])
|
71 |
def chat():
|
72 |
try:
|
73 |
data = request.json
|
74 |
user_message = data.get('message', '').strip()
|
75 |
+
session_id = data.get('session_id')
|
76 |
|
77 |
if not user_message:
|
78 |
return jsonify({"error": "Message required"}), 400
|
79 |
+
if not session_id:
|
80 |
+
return jsonify({"error": "Session ID required"}), 400
|
81 |
|
82 |
+
history = get_conversation_history(session_id)
|
|
|
|
|
83 |
|
84 |
+
# Build conversation context
|
85 |
+
chat_session = model.start_chat(history=list(history))
|
86 |
+
|
87 |
+
# Get AI response with context
|
88 |
+
response = chat_session.send_message(user_message)
|
89 |
+
|
90 |
+
# Update history
|
91 |
+
history.extend([
|
92 |
+
{"role": "user", "parts": [user_message]},
|
93 |
+
{"role": "model", "parts": [response.text]}
|
94 |
+
])
|
95 |
+
|
96 |
+
visible_text, audio_content = process_response(response.text)
|
97 |
|
|
|
98 |
result = {
|
99 |
"response_text": visible_text,
|
100 |
+
"response_html": convert_markdown_to_html(visible_text),
|
101 |
"has_audio": False
|
102 |
}
|
103 |
|
|
|
104 |
if audio_content:
|
105 |
audio_filename = generate_audio(audio_content)
|
106 |
result["audio_filename"] = audio_filename
|