Athspi commited on
Commit
018463a
·
verified ·
1 Parent(s): 26c1fb4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -48
app.py CHANGED
@@ -1,5 +1,5 @@
1
- # app.py - Flask Backend
2
- from flask import Flask, request, jsonify, send_from_directory
3
  import google.generativeai as genai
4
  from dotenv import load_dotenv
5
  import os
@@ -8,7 +8,6 @@ import markdown2
8
  import re
9
  from gtts import gTTS
10
  import uuid
11
- from collections import deque
12
 
13
  # Load environment variables
14
  load_dotenv()
@@ -20,35 +19,40 @@ os.makedirs(AUDIO_FOLDER, exist_ok=True)
20
  app = Flask(__name__, static_folder='static')
21
  CORS(app)
22
 
23
- # Conversation history storage
24
- conversation_histories = {}
25
- MAX_HISTORY_LENGTH = 10 # Number of messages to remember
26
-
27
  # AI Configuration
28
  system_instruction = """
29
  You are a helpful AI assistant named Athspi. When responding:
30
- 1. Maintain conversation context naturally
31
- 2. For responses that would benefit from audio (like stories), include between:
32
  [AUDIO]content here[/AUDIO]
33
- 3. Keep responses conversational and friendly
34
- 4. Remember previous interactions in this conversation
 
 
 
 
35
  """
36
 
37
  genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
38
  model = genai.GenerativeModel('gemini-2.5-flash', system_instruction=system_instruction)
39
 
 
 
 
40
  def convert_markdown_to_html(text):
41
  html = markdown2.markdown(text, extras=["fenced-code-blocks", "tables"])
42
  html = re.sub(r'<pre><code(.*?)>', r'<pre class="code-block"><code\1>', html)
43
  return html
44
 
45
  def process_response(full_response):
 
46
  audio_match = re.search(r'\[AUDIO\](.*?)\[/AUDIO\]', full_response, re.DOTALL)
47
  audio_content = audio_match.group(1).strip() if audio_match else None
48
  visible_text = re.sub(r'\[/?AUDIO\]', '', full_response).strip()
49
  return visible_text, audio_content
50
 
51
  def generate_audio(text):
 
52
  text = re.sub(r'[^\w\s.,!?\-]', '', text)
53
  filename = f"audio_{uuid.uuid4()}.mp3"
54
  filepath = os.path.join(AUDIO_FOLDER, filename)
@@ -56,60 +60,55 @@ def generate_audio(text):
56
  tts.save(filepath)
57
  return filename
58
 
59
- def get_conversation_history(session_id):
60
- if session_id not in conversation_histories:
61
- conversation_histories[session_id] = deque(maxlen=MAX_HISTORY_LENGTH)
62
- return conversation_histories[session_id]
63
-
64
- @app.route('/start_session', methods=['POST'])
65
- def start_session():
66
- session_id = str(uuid.uuid4())
67
- conversation_histories[session_id] = deque(maxlen=MAX_HISTORY_LENGTH)
68
- return jsonify({"session_id": session_id})
69
-
70
  @app.route('/chat', methods=['POST'])
71
  def chat():
72
  try:
73
  data = request.json
74
  user_message = data.get('message', '').strip()
75
- session_id = data.get('session_id')
76
-
77
  if not user_message:
78
  return jsonify({"error": "Message required"}), 400
79
- if not session_id:
80
- return jsonify({"error": "Session ID required"}), 400
81
-
82
- history = get_conversation_history(session_id)
83
-
84
- # Build conversation context
85
- chat_session = model.start_chat(history=list(history))
86
-
87
- # Get AI response with context
88
  response = chat_session.send_message(user_message)
89
-
90
- # Update history
91
- history.extend([
92
- {"role": "user", "parts": [user_message]},
93
- {"role": "model", "parts": [response.text]}
94
- ])
95
-
96
  visible_text, audio_content = process_response(response.text)
97
-
 
 
 
98
  result = {
99
- "response_text": visible_text,
100
- "response_html": convert_markdown_to_html(visible_text),
101
  "has_audio": False
102
  }
103
-
 
104
  if audio_content:
105
  audio_filename = generate_audio(audio_content)
106
  result["audio_filename"] = audio_filename
107
  result["has_audio"] = True
108
-
109
- return jsonify(result)
110
-
 
 
 
111
  except Exception as e:
112
- return jsonify({"error": str(e)}), 500
 
 
 
 
 
 
 
 
 
113
 
114
  @app.route('/download/<filename>')
115
  def download_audio(filename):
 
1
+ # app.py - Flask Backend with Chat Memory
2
+ from flask import Flask, request, jsonify, send_from_directory, make_response
3
  import google.generativeai as genai
4
  from dotenv import load_dotenv
5
  import os
 
8
  import re
9
  from gtts import gTTS
10
  import uuid
 
11
 
12
  # Load environment variables
13
  load_dotenv()
 
19
  app = Flask(__name__, static_folder='static')
20
  CORS(app)
21
 
 
 
 
 
22
  # AI Configuration
23
  system_instruction = """
24
  You are a helpful AI assistant named Athspi. When responding:
25
+ 1. Never mention "audio" or technical terms
26
+ 2. For responses that would benefit from audio (like stories, explanations, or content meant to be heard), include the audio version between these markers:
27
  [AUDIO]content here[/AUDIO]
28
+ 3. Keep responses natural and friendly
29
+ 4. Decide automatically when to include audio based on the content type
30
+ 5. For stories, always include audio version
31
+ Example good response:
32
+ Here's a story for you!
33
+ [AUDIO]Once upon a time...[/AUDIO]
34
  """
35
 
36
  genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
37
  model = genai.GenerativeModel('gemini-2.5-flash', system_instruction=system_instruction)
38
 
39
+ # In-memory storage for chat sessions (use Redis in production)
40
+ chat_sessions = {}
41
+
42
  def convert_markdown_to_html(text):
43
  html = markdown2.markdown(text, extras=["fenced-code-blocks", "tables"])
44
  html = re.sub(r'<pre><code(.*?)>', r'<pre class="code-block"><code\1>', html)
45
  return html
46
 
47
  def process_response(full_response):
48
+ """Extract visible text and audio content"""
49
  audio_match = re.search(r'\[AUDIO\](.*?)\[/AUDIO\]', full_response, re.DOTALL)
50
  audio_content = audio_match.group(1).strip() if audio_match else None
51
  visible_text = re.sub(r'\[/?AUDIO\]', '', full_response).strip()
52
  return visible_text, audio_content
53
 
54
  def generate_audio(text):
55
+ """Generate audio file from text"""
56
  text = re.sub(r'[^\w\s.,!?\-]', '', text)
57
  filename = f"audio_{uuid.uuid4()}.mp3"
58
  filepath = os.path.join(AUDIO_FOLDER, filename)
 
60
  tts.save(filepath)
61
  return filename
62
 
 
 
 
 
 
 
 
 
 
 
 
63
  @app.route('/chat', methods=['POST'])
64
  def chat():
65
  try:
66
  data = request.json
67
  user_message = data.get('message', '').strip()
68
+ session_id = request.cookies.get('session_id') or str(uuid.uuid4())
69
+
70
  if not user_message:
71
  return jsonify({"error": "Message required"}), 400
72
+
73
+ # Retrieve or create chat session
74
+ if session_id not in chat_sessions:
75
+ chat_sessions[session_id] = model.start_chat(history=[])
76
+ chat_session = chat_sessions[session_id]
77
+
78
+ # Send message to Gemini with full history
 
 
79
  response = chat_session.send_message(user_message)
 
 
 
 
 
 
 
80
  visible_text, audio_content = process_response(response.text)
81
+
82
+ # Convert to HTML
83
+ html_response = convert_markdown_to_html(visible_text)
84
+
85
  result = {
86
+ "response_html": html_response,
 
87
  "has_audio": False
88
  }
89
+
90
+ # Generate audio if needed
91
  if audio_content:
92
  audio_filename = generate_audio(audio_content)
93
  result["audio_filename"] = audio_filename
94
  result["has_audio"] = True
95
+
96
+ # Send response and set session cookie
97
+ resp = make_response(jsonify(result))
98
+ resp.set_cookie('session_id', session_id, max_age=3600, httponly=True, samesite='Lax')
99
+ return resp
100
+
101
  except Exception as e:
102
+ print("Error:", str(e))
103
+ return jsonify({"error": "Something went wrong. Please try again."}), 500
104
+
105
+ @app.route('/new-chat', methods=['POST'])
106
+ def new_chat():
107
+ """Start a new chat session (clears memory)"""
108
+ session_id = str(uuid.uuid4())
109
+ resp = make_response(jsonify({"status": "new chat started"}))
110
+ resp.set_cookie('session_id', session_id, max_age=3600, httponly=True, samesite='Lax')
111
+ return resp
112
 
113
  @app.route('/download/<filename>')
114
  def download_audio(filename):