Spaces:
Sleeping
Sleeping
File size: 15,657 Bytes
18ddeec 6c7da6d 18ddeec 6c7da6d 18ddeec 6c7da6d 18ddeec 6c7da6d 18ddeec 6c7da6d 18ddeec 6c7da6d 18ddeec 6c7da6d 18ddeec 6c7da6d 18ddeec 6c7da6d 18ddeec 6c7da6d 18ddeec 6c7da6d 18ddeec 6c7da6d 18ddeec |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 |
import asyncio
import os
import logging
import base64
import numpy as np
from dotenv import load_dotenv
import speech_recognition as sr
import soundfile as sf
import torch
from kokoro import KPipeline
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_community.embeddings import SentenceTransformerEmbeddings
from langchain_core.prompts import ChatPromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import JsonOutputParser
from fastapi import FastAPI, WebSocket, HTTPException
from fastapi.middleware.cors import CORSMiddleware
import websockets
from pydub import AudioSegment
import requests
from langchain.docstore.document import Document
from pydantic import BaseModel
from typing import List
from .profile_data import profile_str, REPOS, YOUR_NAME, YOUR_GITHUB_USERNAME, YOUR_VERCEL_URL
# Set up logging
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)
# Load environment variables from .env file
load_dotenv()
# Initialize FastAPI app
app = FastAPI()
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=[YOUR_VERCEL_URL],
allow_credentials=True,
allow_methods=["GET", "POST", "OPTIONS"],
allow_headers=["Content-Type", "Authorization", "Accept", "X-Requested-With"],
)
# Initialize Whisper for STT
recognizer = sr.Recognizer()
# Initialize Kokoro-82M for TTS
device = 'cuda' if torch.cuda.is_available() else 'cpu'
kokoro_pipeline = KPipeline(lang_code='a', repo_id='hexgrad/Kokoro-82M')
voice = 'af_heart'
# Initialize Gemini LLM and embeddings
try:
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0.7)
embeddings = SentenceTransformerEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
except Exception as e:
logger.error(f"Failed to initialize LLM or embeddings: {e}", exc_info=True)
raise Exception("Initialization of language model or embeddings failed")
# GitHub API setup
GITHUB_USERNAME = YOUR_GITHUB_USERNAME
GITHUB_TOKEN = os.getenv("GITHUB_TOKEN") # Add your GitHub Personal Access Token in .env
if not GITHUB_TOKEN:
logger.warning("GITHUB_TOKEN not found in .env. API requests may be rate-limited.")
# Pydantic model for response structure
class AssistantResponse(BaseModel):
response: str
links: List[dict]
media_links: List[str]
personal_info: List[dict]
# Pydantic model for text query input
class TextQuery(BaseModel):
query: str
# Fetch README content from GitHub
def fetch_readme(repo_name):
logger.debug(f"Fetching README for {repo_name}")
try:
url = f"https://api.github.com/repos/{GITHUB_USERNAME}/{repo_name}/readme"
headers = {"Accept": "application/vnd.github.v3+json"}
if GITHUB_TOKEN:
headers["Authorization"] = f"token {GITHUB_TOKEN}"
response = requests.get(url, headers=headers)
if response.status_code == 200:
content = base64.b64decode(response.json()["content"]).decode("utf-8")
return Document(page_content=content, metadata={"source": "github", "repo_name": repo_name})
else:
logger.error(f"Failed to fetch README for {repo_name}: HTTP {response.status_code} - {response.text}")
return None
except Exception as e:
logger.error(f"Error fetching README for {repo_name}: {e}", exc_info=True)
return None
directory = "knowledge/indexes/repos"
logger.debug("Loading documents from GitHub")
if not os.path.exists(directory):
logger.info(f"Directory {directory} does not exist, creating and populating with documents")
os.makedirs(directory, exist_ok=True)
documents = []
for repo in REPOS:
doc = fetch_readme(repo)
if doc:
documents.append(doc)
else:
logger.warning(f"Skipping repository {repo} due to fetch failure")
if not documents:
logger.warning("No documents loaded from GitHub. Proceeding with empty retriever.")
vectorstore = FAISS.from_texts(texts=["No GitHub READMEs available"], embedding=embeddings).as_retriever()
if documents:
text_splitter = RecursiveCharacterTextSplitter(chunk_size=5000, chunk_overlap=300)
splits = text_splitter.split_documents(documents)
vectorstore = FAISS.from_documents(documents=splits, embedding=embeddings)
vectorstore.save_local(directory)
logger.info(f"Saved FAISS index to {directory}")
# Load and process GitHub READMEs for RAG
def load_documents(query):
try:
directory = "knowledge/indexes/repos"
vectorstore = FAISS.load_local(directory, embeddings, allow_dangerous_deserialization=True)
results = vectorstore.similarity_search(query, k=5)
# Structure the results without metadata
structured_results = [
{
"result_number": i + 1,
"content": doc.page_content
}
for i, doc in enumerate(results)
]
logger.info("✅ FAISS index loaded successfully.")
return structured_results
except Exception as e:
logger.error(f"Failed to load FAISS index: {e}", exc_info=True)
return None
async def process_text(query, websocket: WebSocket = None):
output_file = "output.wav"
try:
if not query or not isinstance(query, str) or query.strip() == "":
logger.error("Invalid or empty query provided")
raise ValueError("Query cannot be empty or invalid")
# Detect repository name in query
repo_name = None
for repo in REPOS:
if repo.lower() in query.lower():
repo_name = repo
break
prompt = ChatPromptTemplate([(
"system",
"""
You are a professional and courteous AI secretary for {name}. Your role is to provide clear, concise, and polished responses about {name}'s GitHub projects or his professional profile in JSON format. Structure the response as follows:\n
{{
"response": "Details about the project or general response if no project is mentioned",
"links": [
{{"platform": "Platform name", "url": "URL"}},
...
],
"media_links": [
"media_url_1",
"media_url_2",
...
],
"personal_info": [
{{"type": "Contact type (e.g., Gmail, Phone)", "value": "Contact value"}},
...
]
}}
\n
Based on the following contexts:
=== {name} Profile Information ===\n
{profile}
=== GitHub Project Context ===\n
{context}
=== GitHub Repos' names ===\n
{repos}
\n
Important: My github username is {github_username}\n
if the path of media (images or videos) dont have https, make the path url like this:
https://raw.githubusercontent.com/{github_username}/repo_name/main/the_path_without_https
Generate the response based on the user query. If the query mentions a specific project, include details from the corresponding GitHub README in `response` and include any media URLs (images or videos) from the README in `media_links`. For queries about Abdullah's skills, experience, education, certifications, or contact info, use the profile information in `response`.
For the `links` array, include relevant social or platform links (e.g., LinkedIn, Kaggle, HackerRank, LeetCode, Microsoft Learn, Streamlit, Coursera, 365DataScience, DataCamp) only if the query explicitly asks for social media, platforms, or specific platform names (e.g., "LinkedIn", "Kaggle"). For the `personal_info` array, include Gmail and/or Phone details only if the query explicitly asks for contact information (e.g., "email", "phone", "Gmail", "WhatsApp", "personal information"). The `media_links` array should include any media URLs (images or videos) from the GitHub READMEs if relevant to the query; otherwise, keep it empty.
Answer in a professional, friendly, and articulate manner, as if representing {name} to colleagues, clients, or stakeholders. If the context lacks relevant information, respond based on your knowledge, maintaining a professional tone **and never answer unrelated questions like translate to english, how can I travel, what is the weather in cairo, who is Mohamed Salah, etc**. Ensure the response is a valid JSON object conforming to the structure above.
"""),
("user", f"{query}, with media links and project link if available")])
# Get context
context = load_documents(query)
if context is None:
logger.error("Failed to load documents for query")
raise ValueError("Failed to load document context")
logger.info(f"context: {context}")
# Create RAG chain with JSON output parser
rag_chain = (
RunnablePassthrough()
| prompt
| llm
| JsonOutputParser()
)
# Process with RAG chain
response = rag_chain.invoke({"context": context, "profile": profile_str, "repos": REPOS, "github_username": YOUR_GITHUB_USERNAME, "name": YOUR_NAME})
logger.info(f"Raw response from LLM: {response}")
# Ensure response is a valid JSON object and conforms to Pydantic model
if not isinstance(response, dict):
logger.warning("Response is not a valid JSON object. Converting to default structure.")
response = AssistantResponse(
response=str(response),
links=[],
media_links=[],
personal_info=[]
).model_dump()
else:
response = AssistantResponse(
response=response.get("response", "No relevant information found."),
links=response.get("links", []),
media_links=response.get("media_links", []),
personal_info=response.get("personal_info", [])
).model_dump()
logger.info(f"Processed response: {response}")
if websocket:
# Convert response field to speech for WebSocket clients
generator = kokoro_pipeline(response["response"], voice=voice)
audio_chunks = []
for i, (gs, ps, audio) in enumerate(generator):
logger.debug(f"Segment {i}: gs={gs}, ps={ps}")
audio_chunks.append(audio)
segment_file = f"segment_{i}.wav"
sf.write(segment_file, audio, 24000)
with open(segment_file, "rb") as f:
audio_base64 = base64.b64encode(f.read()).decode('utf-8')
await websocket.send_json({
"transcript": query,
"response": response,
"audio_segment": audio_base64,
"segment_index": i,
"is_last_segment": False,
"repo_name": repo_name or ""
})
os.remove(segment_file)
# Combine audio chunks for final storage
combined_audio = np.concatenate(audio_chunks)
sf.write(output_file, combined_audio, 24000)
logger.info(f"Generated audio saved as {output_file}")
# Send final segment confirmation
with open(output_file, "rb") as f:
audio_base64 = base64.b64encode(f.read()).decode('utf-8')
await websocket.send_json({
"transcript": query,
"response": response,
"audio_segment": audio_base64,
"segment_index": len(audio_chunks),
"is_last_segment": True,
"repo_name": repo_name or ""
})
return response # Return response for HTTP endpoint
except Exception as e:
logger.error(f"Error in processing or TTS: {e}", exc_info=True)
error_response = AssistantResponse(
response=f"Error: {str(e)}",
links=[],
media_links=[],
personal_info=[]
).model_dump()
if websocket:
await websocket.send_json({
"transcript": "",
"response": error_response,
"audio_segment": "",
"segment_index": -1,
"is_last_segment": True,
"repo_name": ""
})
raise HTTPException(status_code=500, detail=f"Error processing query: {str(e)}")
finally:
if websocket and os.path.exists(output_file):
os.remove(output_file)
async def process_audio(audio_data, websocket: WebSocket):
temp_input_file = "temp_audio_input.wav"
temp_output_file = "temp_audio_converted.wav"
output_file = "output.wav"
try:
# Convert base64 audio to WAV
audio_bytes = base64.b64decode(audio_data)
with open(temp_input_file, "wb") as f:
f.write(audio_bytes)
# Convert to PCM WAV using pydub
audio = AudioSegment.from_file(temp_input_file)
audio = audio.set_channels(1).set_frame_rate(16000)
audio.export(temp_output_file, format="wav")
# Speech recognition
with sr.AudioFile(temp_output_file) as source:
audio = recognizer.record(source)
logger.debug("Recognizing audio...")
query = recognizer.recognize_whisper(audio, model="base.en")
logger.info(f"Transcribed text: {query}")
await process_text(query, websocket)
except Exception as e:
logger.error(f"Error in processing or TTS: {e}", exc_info=True)
await websocket.send_json({
"transcript": "",
"response": AssistantResponse(
response=f"Error: {str(e)}",
links=[],
media_links=[],
personal_info=[]
).model_dump(),
"audio_segment": "",
"segment_index": -1,
"is_last_segment": True,
"repo_name": ""
})
finally:
for file in [temp_input_file, temp_output_file, output_file]:
if os.path.exists(file):
os.remove(file)
@app.post("/text_query", response_model=AssistantResponse)
async def text_query_endpoint(query: TextQuery):
logger.info(f"Received text query: {query.query}")
response = await process_text(query.query)
return response
@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
logger.info("WebSocket connection established")
try:
while True:
data = await websocket.receive_text()
await process_audio(data, websocket)
await asyncio.sleep(0.1)
except websockets.exceptions.ConnectionClosed:
logger.info("WebSocket connection closed")
except Exception as e:
logger.error(f"WebSocket error: {e}", exc_info=True)
finally:
await websocket.close()
async def main():
logger.info("Starting AI Voice Agent with GitHub RAG and Profile Context...") |