n0v33n commited on
Commit
ddbcbee
Β·
1 Parent(s): 11a2bf7

Updated app.py and gradioapp.py

Browse files
Files changed (3) hide show
  1. Note.txt +6 -0
  2. app.py +412 -283
  3. gradioapp.py +246 -346
Note.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Download The excel Sheet done
2
+ Remove expection Block for Download done
3
+
4
+ in mock interview ask tech stack and then run tailvy search
5
+
6
+ make it conversational and save history(previous query) in output list
app.py CHANGED
@@ -3,339 +3,468 @@ import os
3
  import re
4
  import pandas as pd
5
  import random
6
- import warnings
7
  from fastapi import FastAPI, HTTPException
8
  from pydantic import BaseModel
9
  from dotenv import load_dotenv
10
  from langchain_tavily import TavilySearch
11
  import google.generativeai as genai
12
- import gdown
13
-
14
- warnings.filterwarnings("ignore")
15
 
 
16
  load_dotenv()
17
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
18
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
19
 
20
- user_sessions = {}
21
- if not GOOGLE_API_KEY:
22
- raise ValueError("GOOGLE_API_KEY environment variable is required.")
23
-
24
  genai.configure(api_key=GOOGLE_API_KEY)
25
 
26
- # β€”β€”β€” Load or fallback LeetCode data β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
27
  OUTPUT_FILE = "leetcode_downloaded.xlsx"
28
- GOOGLE_SHEET_URL = "https://docs.google.com/spreadsheets/d/1KK9Mnm15hV3ALJo-quJndftWfaujJ7K2_zHMCTo5mGE/"
29
- FILE_ID = GOOGLE_SHEET_URL.split("/d/")[1].split("/")[0]
30
- DOWNLOAD_URL = f"https://drive.google.com/uc?export=download&id={FILE_ID}"
31
-
32
  try:
33
- if os.path.exists(OUTPUT_FILE):
34
- print(f"Loading LeetCode data from local file: {OUTPUT_FILE}")
35
- LEETCODE_DATA = pd.read_excel(OUTPUT_FILE)
36
- else:
37
- print("Local LeetCode file not found. Attempting to download...")
38
- print("Downloading LeetCode data...")
39
- gdown.download(DOWNLOAD_URL, OUTPUT_FILE, quiet=False)
40
- LEETCODE_DATA = pd.read_excel(OUTPUT_FILE)
41
- print(f"Loaded {len(LEETCODE_DATA)} problems")
42
- except Exception as e:
43
- print(f"Failed to load or download LeetCode data: {str(e)}")
44
- print("Using fallback dataset.")
45
- LEETCODE_DATA = pd.DataFrame([
46
- {"problem_no": 3151, "problem_level": "Easy", "problem_statement": "special array",
47
- "problem_link": "https://leetcode.com/problems/special-array-i/?envType=daily-question&envId=2025-06-01"},
48
- {"problem_no": 1752, "problem_level": "Easy", "problem_statement": "check if array is sorted and rotated",
49
- "problem_link": "https://leetcode.com/problems/check-if-array-is-sorted-and-rotated/?envType=daily-question&envId=2025-06-01"},
50
- {"problem_no": 3105, "problem_level": "Easy", "problem_statement": "longest strictly increasing or strictly decreasing subarray",
51
- "problem_link": "https://leetcode.com/problems/longest-strictly-increasing-or-strictly-decreasing-subarray/?envType=daily-question&envId=2025-06-01"},
52
- {"problem_no": 1, "problem_level": "Easy", "problem_statement": "two sum",
53
- "problem_link": "https://leetcode.com/problems/two-sum/"},
54
- {"problem_no": 2, "problem_level": "Medium", "problem_statement": "add two numbers",
55
- "problem_link": "https://leetcode.com/problems/add-two-numbers/"},
56
- {"problem_no": 3, "problem_level": "Medium", "problem_statement": "longest substring without repeating characters",
57
- "problem_link": "https://leetcode.com/problems/longest-substring-without-repeating-characters/"},
58
- {"problem_no": 4, "problem_level": "Hard", "problem_statement": "median of two sorted arrays",
59
- "problem_link": "https://leetcode.com/problems/median-of-two-sorted-arrays/"},
60
- {"problem_no": 5, "problem_level": "Medium", "problem_statement": "longest palindromic substring",
61
- "problem_link": "https://leetcode.com/problems/longest-palindromic-substring/"}
62
- ])
63
-
64
- # β€”β€”β€” Helpers & Tools β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
65
-
66
- QUESTION_TYPE_MAPPING = {
67
- "easy": "Easy", "Easy": "Easy",
68
- "medium": "Medium", "Medium": "Medium",
69
- "hard": "Hard", "Hard": "Hard"
70
- }
71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  def preprocess_query(query: str) -> str:
73
- for k, v in QUESTION_TYPE_MAPPING.items():
74
- query = re.sub(rf'\b{k}\b', v, query, flags=re.IGNORECASE)
75
- query = re.sub(r'\bproblem\s*(\d+)', r'Problem_\1', query, flags=re.IGNORECASE)
76
- query = re.sub(r'\bquestion\s*(\d+)', r'Problem_\1', query, flags=re.IGNORECASE)
77
- query = re.sub(r'\b(find|search)\s+interview\s+questions\s+for\s+', '', query, flags=re.IGNORECASE)
78
- query = re.sub(r'\binterview\s+questions\b', '', query, flags=re.IGNORECASE).strip()
79
- return query
80
-
81
- def get_daily_coding_question(query: str = "") -> dict:
82
- try:
83
- response = "**Daily Coding Questions**\n\n"
84
-
85
- m = re.search(r'Problem_(\d+)', query, re.IGNORECASE)
86
- if m:
87
- df = LEETCODE_DATA[LEETCODE_DATA['problem_no'] == int(m.group(1))]
88
- if not df.empty:
89
- p = df.iloc[0]
90
- response += (
91
- f"**Problem {p['problem_no']}**\n"
92
- f"Level: {p['problem_level']}\n"
93
- f"Statement: {p['problem_statement']}\n"
94
- f"Link: {p['problem_link']}\n\n"
95
- )
96
- return {"status": "success", "response": response}
97
- else:
98
- return {"status": "error", "response": "Problem not found"}
99
-
100
- if query.strip():
101
- df = LEETCODE_DATA[LEETCODE_DATA['problem_statement'].str.contains(query, case=False, na=False)]
102
  else:
103
- df = LEETCODE_DATA
104
-
105
- easy_questions = df[df['problem_level'] == 'Easy'].sample(min(3, len(df[df['problem_level'] == 'Easy'])))
106
- medium_questions = df[df['problem_level'] == 'Medium'].sample(min(1, len(df[df['problem_level'] == 'Medium'])))
107
- hard_questions = df[df['problem_level'] == 'Hard'].sample(min(1, len(df[df['problem_level'] == 'Hard'])))
108
-
109
- response += "**Easy Questions**\n"
110
- for i, p in enumerate(easy_questions.itertuples(), 1):
111
- response += (
112
- f"{i}. Problem {p.problem_no}: {p.problem_statement}\n"
113
- f" Level: {p.problem_level}\n"
114
- f" Link: {p.problem_link}\n\n"
115
- )
116
-
117
- response += "**Medium Question**\n"
118
- for p in medium_questions.itertuples():
119
- response += (
120
- f"Problem {p.problem_no}: {p.problem_statement}\n"
121
- f"Level: {p.problem_level}\n"
122
- f"Link: {p.problem_link}\n\n"
123
- )
124
-
125
- response += "**Hard Question**\n"
126
- for p in hard_questions.itertuples():
127
- response += (
128
- f"Problem {p.problem_no}: {p.problem_statement}\n"
129
- f"Level: {p.problem_level}\n"
130
- f"Link: {p.problem_link}\n"
131
- )
132
-
133
- return {"status": "success", "response": response}
134
- except Exception as e:
135
- return {"status": "error", "response": f"Error: {e}"}
136
 
137
- def fetch_interview_questions(query: str) -> dict:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  if not TAVILY_API_KEY:
139
- return {"status": "error", "response": "Tavily API key not configured"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
 
141
- if not query.strip() or query.lower() in ["a", "interview", "question", "questions"]:
142
- return {"status": "error", "response": "Please provide a specific topic for interview questions (e.g., 'Python', 'data structures', 'system design')."}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
 
144
  try:
145
- tavily = TavilySearch(api_key=TAVILY_API_KEY, max_results=5)
146
- search_query = f"{query} interview questions -inurl:(signup | login)"
147
- print(f"Executing Tavily search for: {search_query}")
148
-
149
- results = tavily.invoke(search_query)
150
- print(f"Raw Tavily results: {results}")
151
-
152
- if not results or not isinstance(results, list) or len(results) == 0:
153
- return {"status": "success", "response": "No relevant interview questions found. Try a more specific topic or different keywords."}
154
-
155
- resp = "**Interview Questions Search Results for '{}':**\n\n".format(query)
156
- for i, r in enumerate(results, 1):
157
- if isinstance(r, dict):
158
- title = r.get('title', 'No title')
159
- url = r.get('url', 'No URL')
160
- content = r.get('content', '')
161
- content = content[:200] + '…' if len(content) > 200 else content or "No preview available"
162
- resp += f"{i}. **{title}**\n URL: {url}\n Preview: {content}\n\n"
163
- else:
164
- resp += f"{i}. {str(r)[:200]}{'…' if len(str(r)) > 200 else ''}\n\n"
165
-
166
- return {"status": "success", "response": resp}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
 
168
  except Exception as e:
169
- print(f"Tavily search failed: {str(e)}")
170
- return {"status": "error", "response": f"Search failed: {str(e)}"}
171
 
172
- def simulate_mock_interview(query: str, user_id: str = "default") -> dict:
173
- qtype = "mixed"
174
- if re.search(r'HR|Behavioral|hr|behavioral', query, re.IGNORECASE): qtype = "HR"
175
- if re.search(r'Technical|System Design|technical|coding', query, re.IGNORECASE): qtype = "Technical"
176
-
177
- if "interview question" in query.lower() and qtype == "mixed":
178
- qtype = "HR"
179
 
180
- if qtype == "HR":
181
- hr_questions = [
182
- "Tell me about yourself.",
183
- "What is your greatest weakness?",
184
- "Describe a challenge you overcame.",
185
- "Why do you want to work here?",
186
- "Where do you see yourself in 5 years?",
187
- "Why are you leaving your current job?",
188
- "Describe a time when you had to work with a difficult team member.",
189
- "What are your salary expectations?",
190
- "Tell me about a time you failed.",
191
- "What motivates you?",
192
- "How do you handle stress and pressure?",
193
- "Describe your leadership style."
194
- ]
195
- q = random.choice(hr_questions)
196
- return {"status": "success", "response": (
197
- f"**Mock Interview (HR/Behavioral)**\n\n**Question:** {q}\n\nπŸ’‘ **Tips:**\n"
198
- f"- Use the STAR method (Situation, Task, Action, Result)\n"
199
- f"- Provide specific examples from your experience\n"
200
- f"- Keep your answer concise but detailed\n\n**Your turn to answer!**"
201
- )}
202
- else:
203
- p = LEETCODE_DATA.sample(1).iloc[0]
 
204
  return {"status": "success", "response": (
205
- f"**Mock Interview (Technical)**\n\n**Problem:** {p['problem_statement'].title()}\n"
206
- f"**Difficulty:** {p['problem_level']}\n**Link:** {p['problem_link']}\n\nπŸ’‘ **Tips:**\n"
207
- f"- Think out loud as you solve\n"
208
- f"- Ask clarifying questions\n"
209
- f"- Discuss time/space complexity\n\n**Explain your approach!**"
 
 
210
  )}
 
 
 
211
 
212
- # β€”β€”β€” The Enhanced InterviewPrepAgent β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
213
-
214
  class InterviewPrepAgent:
215
  def __init__(self):
216
- self.model = genai.GenerativeModel('gemini-1.5-flash')
 
 
 
217
  self.tools = {
218
  "get_daily_coding_question": get_daily_coding_question,
219
  "fetch_interview_questions": fetch_interview_questions,
220
  "simulate_mock_interview": simulate_mock_interview
221
  }
222
- self.instruction_text = """
223
- You are an interview preparation assistant. Analyze the user's query and determine which tool to use.
224
-
225
- Available tools:
226
- 1. get_daily_coding_question - For coding practice, LeetCode problems, daily questions
227
- 2. fetch_interview_questions - For searching interview questions on specific topics
228
- 3. simulate_mock_interview - For mock interview practice (HR/behavioral or technical)
229
-
230
- Instructions:
231
- - If user asks for coding questions, daily questions, LeetCode problems, practice problems -> use get_daily_coding_question
232
- - If user asks for interview questions on specific topics (e.g., Python, data structures) without "mock" or "simulate" -> use fetch_interview_questions
233
- - If user asks for mock interview, interview simulation, practice interview, or HR/behavioral questions -> use simulate_mock_interview
234
- - If user explicitly mentions "HR" or "behavioral" -> use simulate_mock_interview with HR focus
235
-
236
- Respond ONLY with valid JSON in this exact format:
237
- {"tool": "tool_name", "args": {"param1": "value1", "param2": "value2"}}
238
 
239
- User Query: {query}
240
- """
241
-
242
- def _classify_intent(self, query: str) -> tuple[str, dict]:
243
- query_lower = query.lower()
244
-
245
- # Prioritize HR/behavioral for explicit mentions
246
- if any(keyword in query_lower for keyword in ["hr", "behavioral", "give hr questions", "give behavioral questions"]):
247
- return "simulate_mock_interview", {"query": query, "user_id": "default"}
248
-
249
- # Handle mock interview or simulation requests
250
- if any(keyword in query_lower for keyword in ["mock interview", "practice interview", "interview simulation", "simulate_mock_interview"]):
251
- return "simulate_mock_interview", {"query": query, "user_id": "default"}
252
-
253
- # Handle coding-related queries
254
- if any(keyword in query_lower for keyword in ["daily", "coding question", "leetcode", "practice problem", "coding practice"]):
255
- problem_match = re.search(r'problem\s*(\d+)', query_lower)
256
- if problem_match:
257
- return "get_daily_coding_question", {"query": f"Problem_{problem_match.group(1)}"}
258
-
259
- if "easy" in query_lower:
260
- return "get_daily_coding_question", {"query": "Easy"}
261
- elif "medium" in query_lower:
262
- return "get_daily_coding_question", {"query": "Medium"}
263
- elif "hard" in query_lower:
264
- return "get_daily_coding_question", {"query": "Hard"}
265
-
266
- return "get_daily_coding_question", {"query": ""}
267
-
268
- # Handle topic-specific interview questions
269
- if any(keyword in query_lower for keyword in ["search interview questions", "find interview questions", "interview prep resources"]) or \
270
- "interview" in query_lower:
271
- return "fetch_interview_questions", {"query": query}
272
 
273
- # Fallback to LLM classification
274
  try:
275
- prompt = self.instruction_text.format(query=query)
276
- response = self.model.generate_content(prompt)
277
- result = json.loads(response.text.strip())
278
- tool_name = result.get("tool")
279
- args = result.get("args", {})
280
- return tool_name, args
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281
  except Exception as e:
282
- print(f"LLM classification failed: {e}")
283
- return "get_daily_coding_question", {"query": ""}
284
 
285
- def process_query(self, query: str, user_id: str, session_id: str) -> str:
286
- if not GOOGLE_API_KEY:
287
- return "Error: Google API not configured."
288
-
289
- session_key = f"{user_id}_{session_id}"
290
- user_sessions.setdefault(session_key, {"history": []})
291
 
292
- tool_name, args = self._classify_intent(query)
293
- print(f"Selected tool: {tool_name}, args: {args}") # Debug log
294
-
295
- if tool_name not in self.tools:
296
- return f"I couldn't understand your request. Please try asking for:\n- Daily coding question\n- Mock interview\n- Interview questions for a specific topic"
297
-
298
- result = self.tools[tool_name](**args)
299
-
300
- user_sessions[session_key]["history"].append({
301
- "query": query,
302
- "response": result["response"]
303
- })
304
 
305
- return result["response"]
306
 
307
- # β€”β€”β€” FastAPI Setup β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
 
308
 
309
- app = FastAPI(title="Interview Prep API", version="2.0.0")
310
  agent = InterviewPrepAgent()
311
 
312
- class ChatRequest(BaseModel):
313
- user_id: str
314
- session_id: str
315
- question: str
316
-
317
- class ChatResponse(BaseModel):
318
- session_id: str
319
- answer: str
320
-
321
  @app.post("/chat", response_model=ChatResponse)
322
- async def chat(req: ChatRequest):
323
- q = preprocess_query(req.question)
324
- print(f"Preprocessed query: {q}") # Debug log
325
- ans = agent.process_query(q, req.user_id, req.session_id)
326
- return ChatResponse(session_id=req.session_id, answer=ans)
327
-
328
- @app.get("/healthz")
329
- def health():
330
- status = {"status": "ok", "google_api": bool(GOOGLE_API_KEY),
331
- "leetcode_count": len(LEETCODE_DATA),
332
- "tavily": bool(TAVILY_API_KEY)}
333
- return status
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334
 
335
  @app.get("/")
336
- def root():
337
- return {"message": "Interview Prep API v2", "endpoints": ["/chat", "/healthz"]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
 
339
  if __name__ == "__main__":
340
  import uvicorn
341
- uvicorn.run(app, host="0.0.0.0", port=8000)
 
 
 
 
 
3
  import re
4
  import pandas as pd
5
  import random
6
+ from typing import Dict, Optional, Any
7
  from fastapi import FastAPI, HTTPException
8
  from pydantic import BaseModel
9
  from dotenv import load_dotenv
10
  from langchain_tavily import TavilySearch
11
  import google.generativeai as genai
 
 
 
12
 
13
+ # Load environment variables
14
  load_dotenv()
15
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
16
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
17
 
18
+ # Configure Google AI
 
 
 
19
  genai.configure(api_key=GOOGLE_API_KEY)
20
 
21
+ # Load LeetCode data
22
  OUTPUT_FILE = "leetcode_downloaded.xlsx"
 
 
 
 
23
  try:
24
+ LEETCODE_DATA = pd.read_excel(OUTPUT_FILE)
25
+ print(f"Loaded {len(LEETCODE_DATA)} LeetCode problems from local file.")
26
+ except FileNotFoundError:
27
+ print("Warning: LeetCode data file not found. Some features may not work.")
28
+ LEETCODE_DATA = pd.DataFrame()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
+ # User sessions for mock interviews
31
+ user_sessions = {}
32
+
33
+ # β€”β€”β€” Pydantic Models β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
34
+ class ChatRequest(BaseModel):
35
+ user_id: str = "default"
36
+ session_id: str = "default"
37
+ message: str
38
+
39
+ class ChatResponse(BaseModel):
40
+ status: str
41
+ response: str
42
+ session_id: str
43
+
44
+ class HealthResponse(BaseModel):
45
+ status: str
46
+ google_api_configured: bool
47
+ leetcode_problems_loaded: int
48
+ tavily_search_available: bool
49
+
50
+ # β€”β€”β€” Utility Functions β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
51
  def preprocess_query(query: str) -> str:
52
+ """Preprocess user query for better understanding"""
53
+ return query.strip()
54
+
55
+ # β€”β€”β€” Tool 1: Get Daily Coding Question β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
56
+ def get_daily_coding_question(query=""):
57
+ """Get 3 random coding questions (one from each difficulty level)"""
58
+ if LEETCODE_DATA.empty:
59
+ return {"status": "error", "response": "LeetCode data not available. Please check the data file."}
60
+
61
+ response = "Here are your coding challenges for today:\n\n"
62
+
63
+ problem_match = re.search(r'problem[\s_]*(\d+)', query, re.IGNORECASE)
64
+ if problem_match:
65
+ problem_no = int(problem_match.group(1))
66
+ specific_problem = LEETCODE_DATA[LEETCODE_DATA['problem_no'] == problem_no]
67
+ if not specific_problem.empty:
68
+ p = specific_problem.iloc[0]
69
+ response = f"**Problem {p['problem_no']}: {p['problem_statement']}**\n"
70
+ response += f"**Difficulty**: {p['problem_level']}\n"
71
+ response += f"**Link**: {p['problem_link']}\n\n"
72
+ response += "Good luck with this problem!"
73
+ return {"status": "success", "response": response}
 
 
 
 
 
 
 
74
  else:
75
+ return {"status": "error", "response": "Problem not found. Try a different number!"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
+ easy = LEETCODE_DATA[LEETCODE_DATA['problem_level'] == 'Easy']
78
+ medium = LEETCODE_DATA[LEETCODE_DATA['problem_level'] == 'Medium']
79
+ hard = LEETCODE_DATA[LEETCODE_DATA['problem_level'] == 'Hard']
80
+
81
+ for label, df in [("🟒 Easy", easy), ("🟑 Medium", medium), ("πŸ”΄ Hard", hard)]:
82
+ if not df.empty:
83
+ q = df.sample(1).iloc[0]
84
+ response += f"**{label} Challenge**\n"
85
+ response += f"Problem {q['problem_no']}: {q['problem_statement']}\n"
86
+ response += f"Link: {q['problem_link']}\n\n"
87
+
88
+ response += "Choose one that matches your skill level and start coding!"
89
+ return {"status": "success", "response": response}
90
+
91
+ # β€”β€”β€” Tool 2: Fetch Interview Questions β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
92
+ def fetch_interview_questions(query):
93
  if not TAVILY_API_KEY:
94
+ return {"status": "error", "response": "Tavily API key not configured."}
95
+
96
+ try:
97
+ tavily = TavilySearch(api_key=TAVILY_API_KEY, max_results=3)
98
+ search_response = tavily.invoke(f"{query} interview questions")
99
+
100
+ # Extract the results list from the response dictionary
101
+ results = search_response.get("results", []) if isinstance(search_response, dict) else search_response
102
+
103
+ if not results:
104
+ return {"status": "success", "response": f"No results found for '{query}' interview questions."}
105
+
106
+ search_results = f"Here are the top 3 resources for {query} interview questions:\n\n"
107
+ for i, res in enumerate(results[:3], 1):
108
+ t = res.get('title', 'No title')
109
+ u = res.get('url', 'No URL')
110
+ c = res.get('content', '')
111
+ snippet = c[:200] + '...' if len(c) > 200 else c
112
+ search_results += f"**{i}. {t}**\nURL: {u}\nPreview: {snippet}\n\n"
113
+
114
+ model = genai.GenerativeModel('gemini-1.5-flash')
115
+ guidance = model.generate_content(f"""
116
+ Based on the topic '{query}', provide practical advice on how to prepare for and tackle interview questions in this area.
117
+ Include:
118
+ 1. Key concepts to focus on
119
+ 2. Common question types
120
+ 3. How to structure answers
121
+ 4. Tips for success
122
+
123
+ Keep it concise and actionable.
124
+ """).text
125
+
126
+ final = search_results + "\n**πŸ’‘ How to Tackle These Interviews:**\n\n" + guidance
127
+ return {"status": "success", "response": final}
128
 
129
+ except Exception as e:
130
+ return {"status": "error", "response": f"Error fetching interview questions: {str(e)}"}
131
+
132
+ # β€”β€”β€” Tool 3: Simulate Mock Interview β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
133
+ def simulate_mock_interview(query, user_id="default", session_id="default"):
134
+ session_key = f"mock_{user_id}_{session_id}"
135
+ if session_key not in user_sessions:
136
+ user_sessions[session_key] = {
137
+ "stage": "tech_stack",
138
+ "tech_stack": "",
139
+ "questions_asked": [],
140
+ "answers_given": [],
141
+ "current_question": "",
142
+ "question_count": 0,
143
+ "difficulty": "medium",
144
+ "feedback_history": []
145
+ }
146
+ session = user_sessions[session_key]
147
 
148
  try:
149
+ model = genai.GenerativeModel('gemini-1.5-flash')
150
+
151
+ # Tech stack collection stage
152
+ if session["stage"] == "tech_stack":
153
+ session["stage"] = "waiting_tech_stack"
154
+ return {"status": "success", "response": (
155
+ "Welcome to your mock interview! 🎯\n\n"
156
+ "Please tell me about your tech stack (e.g., Python, React, multi-agent systems) "
157
+ "or the role you're preparing for (e.g., software engineer, ML engineer)."
158
+ )}
159
+
160
+ elif session["stage"] == "waiting_tech_stack":
161
+ session["tech_stack"] = query
162
+ session["stage"] = "interviewing"
163
+ difficulty_options = " (easy/medium/hard)"
164
+ q = model.generate_content(f"""
165
+ Generate a relevant interview question for tech stack: {query}
166
+ Ensure it tests technical knowledge and problem-solving.
167
+ Keep it concise and return only the question.
168
+ """).text.strip()
169
+
170
+ session.update({
171
+ "current_question": q,
172
+ "questions_asked": [q],
173
+ "question_count": 1
174
+ })
175
+
176
+ return {"status": "success", "response": (
177
+ f"Great! Based on your tech stack ({query}), let's start your mock interview.\n\n"
178
+ f"**Question 1:** {q}\n"
179
+ f"Set difficulty level{difficulty_options} or proceed. Type 'quit' to end and get your summary."
180
+ )}
181
+
182
+ elif session["stage"] == "interviewing":
183
+ if query.lower().strip() in ["easy", "medium", "hard"]:
184
+ session["difficulty"] = query.lower().strip()
185
+ return {"status": "success", "response": (
186
+ f"Difficulty set to {session['difficulty']}. Let's continue!\n\n"
187
+ f"**Question {session['question_count']}:** {session['current_question']}\n\n"
188
+ "Take your time to answer. Type 'quit' to end and get your summary."
189
+ )}
190
+
191
+ if query.lower().strip() == "quit":
192
+ return end_mock_interview(session_key)
193
+
194
+ # Store answer and provide feedback
195
+ session["answers_given"].append(query)
196
+ feedback = model.generate_content(f"""
197
+ Question: {session['current_question']}
198
+ Answer: {query}
199
+ Tech Stack: {session['tech_stack']}
200
+ Difficulty: {session['difficulty']}
201
+
202
+ Provide concise, constructive feedback:
203
+ - What went well
204
+ - Areas to improve
205
+ - Missing points or better approach
206
+ - Suggested follow-up topic
207
+ """).text.strip()
208
+ session["feedback_history"].append(feedback)
209
+
210
+ # Generate next question with context
211
+ next_q = model.generate_content(f"""
212
+ Tech stack: {session['tech_stack']}
213
+ Difficulty: {session['difficulty']}
214
+ Previous questions: {session['questions_asked']}
215
+ Follow-up topic suggestion: {feedback.split('\n')[-1] if feedback else ''}
216
+
217
+ Generate a new, relevant interview question unseen before.
218
+ Ensure it aligns with the tech stack and difficulty.
219
+ Return only the question.
220
+ """).text.strip()
221
+
222
+ session["questions_asked"].append(next_q)
223
+ session["current_question"] = next_q
224
+ session["question_count"] += 1
225
+
226
+ return {"status": "success", "response": (
227
+ f"**Feedback on your previous answer:**\n{feedback}\n\n"
228
+ f"**Question {session['question_count']}:** {next_q}\n\n"
229
+ "Type 'quit' to end the interview and get your summary, or set a new difficulty (easy/medium/hard)."
230
+ )}
231
 
232
  except Exception as e:
233
+ return {"status": "error", "response": f"Error in mock interview: {str(e)}"}
 
234
 
235
+ def end_mock_interview(session_key):
236
+ session = user_sessions[session_key]
 
 
 
 
 
237
 
238
+ try:
239
+ model = genai.GenerativeModel('gemini-1.5-flash')
240
+
241
+ summary = model.generate_content(f"""
242
+ Mock Interview Summary:
243
+ Tech Stack: {session['tech_stack']}
244
+ Difficulty: {session['difficulty']}
245
+ Questions Asked: {session['questions_asked']}
246
+ Answers Given: {session['answers_given']}
247
+ Feedback History: {session['feedback_history']}
248
+
249
+ Provide a concise overall assessment:
250
+ - Strengths
251
+ - Areas for improvement
252
+ - Key recommendations
253
+ - Common mistakes to avoid
254
+ """).text.strip()
255
+
256
+ # Store session data before deletion for response
257
+ tech_stack = session['tech_stack']
258
+ difficulty = session['difficulty']
259
+ questions_count = len(session['questions_asked'])
260
+
261
+ del user_sessions[session_key]
262
+
263
  return {"status": "success", "response": (
264
+ "🎯 **Mock Interview Complete!**\n\n"
265
+ f"**Interview Summary:**\n"
266
+ f"- Tech Stack: {tech_stack}\n"
267
+ f"- Difficulty: {difficulty}\n"
268
+ f"- Questions Asked: {questions_count}\n\n"
269
+ "**Overall Assessment:**\n" + summary + "\n\n"
270
+ "Great jobβ€”use this feedback to level up! πŸ’ͺ"
271
  )}
272
+
273
+ except Exception as e:
274
+ return {"status": "error", "response": f"Error generating interview summary: {str(e)}"}
275
 
276
+ # β€”β€”β€” Main Agent Class β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
 
277
  class InterviewPrepAgent:
278
  def __init__(self):
279
+ if GOOGLE_API_KEY:
280
+ self.model = genai.GenerativeModel('gemini-1.5-flash')
281
+ else:
282
+ self.model = None
283
  self.tools = {
284
  "get_daily_coding_question": get_daily_coding_question,
285
  "fetch_interview_questions": fetch_interview_questions,
286
  "simulate_mock_interview": simulate_mock_interview
287
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
 
289
+ def classify_query(self, query):
290
+ if not self.model:
291
+ # Fallback classification without AI
292
+ query_lower = query.lower()
293
+ if any(keyword in query_lower for keyword in ['mock', 'interview', 'simulate', 'practice']):
294
+ return "simulate_mock_interview", {"query": query}
295
+ elif any(keyword in query_lower for keyword in ['coding', 'leetcode', 'daily', 'problem']):
296
+ return "get_daily_coding_question", {"query": query}
297
+ else:
298
+ return "fetch_interview_questions", {"query": query}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
 
 
300
  try:
301
+ prompt = f"""
302
+ Analyze this user query and determine which tool to use:
303
+
304
+ Query: "{query}"
305
+
306
+ Tools:
307
+ 1. get_daily_coding_question – for coding problems, leetcode, daily challenges
308
+ 2. fetch_interview_questions – for topic-specific interview question resources
309
+ 3. simulate_mock_interview – for mock interview practice or behavioral interviews
310
+
311
+ Rules:
312
+ - If query mentions 'mock', 'interview', 'simulate', or 'practice', choose simulate_mock_interview
313
+ - If query mentions 'coding', 'leetcode', 'daily', 'problem', choose get_daily_coding_question
314
+ - If query asks for interview questions on a specific technology (like 'Python interview questions'), choose fetch_interview_questions
315
+ - If unclear, default to simulate_mock_interview
316
+
317
+ Respond with JSON: {{"tool": "tool_name", "args": {{"query": "query_text"}}}}
318
+ """
319
+ resp = self.model.generate_content(prompt).text.strip()
320
+ if resp.startswith("```json"):
321
+ resp = resp.replace("```json", "").replace("```", "").strip()
322
+ j = json.loads(resp)
323
+ return j.get("tool"), j.get("args", {})
324
  except Exception as e:
325
+ # Fallback to simple classification
326
+ return "simulate_mock_interview", {"query": query}
327
 
328
+ def process_query(self, query, user_id="default", session_id="default"):
329
+ tool, args = self.classify_query(query)
330
+ if tool not in self.tools:
331
+ return {"status": "error", "response": "Sorry, I didn't get that. Ask for coding practice, interview questions, or mock interview!"}
 
 
332
 
333
+ if tool == "simulate_mock_interview":
334
+ result = self.tools[tool](args.get("query", query), user_id, session_id)
335
+ else:
336
+ result = self.tools[tool](args.get("query", query))
 
 
 
 
 
 
 
 
337
 
338
+ return result.get("response", "Something went wrong, try again.")
339
 
340
+ # β€”β€”β€” FastAPI Application β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
341
+ app = FastAPI(title="Interview Prep API", version="2.0.0", description="AI-powered interview practice companion")
342
 
343
+ # Initialize the agent
344
  agent = InterviewPrepAgent()
345
 
 
 
 
 
 
 
 
 
 
346
  @app.post("/chat", response_model=ChatResponse)
347
+ async def chat(request: ChatRequest):
348
+ """
349
+ Process a chat message and return a response
350
+ """
351
+ try:
352
+ query = preprocess_query(request.message)
353
+ response = agent.process_query(query, request.user_id, request.session_id)
354
+
355
+ return ChatResponse(
356
+ status="success",
357
+ response=response,
358
+ session_id=request.session_id
359
+ )
360
+ except Exception as e:
361
+ raise HTTPException(status_code=500, detail=f"Error processing chat: {str(e)}")
362
+
363
+ @app.get("/health", response_model=HealthResponse)
364
+ async def health_check():
365
+ """
366
+ Health check endpoint
367
+ """
368
+ return HealthResponse(
369
+ status="healthy",
370
+ google_api_configured=bool(GOOGLE_API_KEY),
371
+ leetcode_problems_loaded=len(LEETCODE_DATA),
372
+ tavily_search_available=bool(TAVILY_API_KEY)
373
+ )
374
 
375
  @app.get("/")
376
+ async def root():
377
+ """
378
+ Root endpoint with API information
379
+ """
380
+ return {
381
+ "message": "Interview Prep API v2.0.0",
382
+ "description": "AI-powered interview practice companion",
383
+ "endpoints": {
384
+ "/chat": "POST - Send chat messages",
385
+ "/health": "GET - Health check",
386
+ "/docs": "GET - API documentation",
387
+ "/examples": "GET - Example requests"
388
+ }
389
+ }
390
+
391
+ @app.get("/examples")
392
+ async def get_examples():
393
+ """
394
+ Get example requests for the API
395
+ """
396
+ return {
397
+ "examples": [
398
+ {
399
+ "description": "Get daily coding questions",
400
+ "request": {
401
+ "user_id": "user123",
402
+ "session_id": "session456",
403
+ "message": "Give me daily coding questions"
404
+ }
405
+ },
406
+ {
407
+ "description": "Start a mock interview",
408
+ "request": {
409
+ "user_id": "user123",
410
+ "session_id": "session456",
411
+ "message": "Start a mock interview"
412
+ }
413
+ },
414
+ {
415
+ "description": "Get Python interview questions",
416
+ "request": {
417
+ "user_id": "user123",
418
+ "session_id": "session456",
419
+ "message": "Python interview questions"
420
+ }
421
+ },
422
+ {
423
+ "description": "Get specific LeetCode problem",
424
+ "request": {
425
+ "user_id": "user123",
426
+ "session_id": "session456",
427
+ "message": "Show me problem 1"
428
+ }
429
+ }
430
+ ]
431
+ }
432
+
433
+ @app.delete("/session/{user_id}/{session_id}")
434
+ async def clear_session(user_id: str, session_id: str):
435
+ """
436
+ Clear a specific user session
437
+ """
438
+ session_key = f"mock_{user_id}_{session_id}"
439
+ if session_key in user_sessions:
440
+ del user_sessions[session_key]
441
+ return {"message": f"Session {session_id} for user {user_id} cleared successfully"}
442
+ else:
443
+ raise HTTPException(status_code=404, detail="Session not found")
444
+
445
+ @app.get("/sessions/{user_id}")
446
+ async def get_user_sessions(user_id: str):
447
+ """
448
+ Get all sessions for a specific user
449
+ """
450
+ user_session_keys = [key for key in user_sessions.keys() if key.startswith(f"mock_{user_id}_")]
451
+ sessions = []
452
+ for key in user_session_keys:
453
+ session_id = key.split("_")[-1]
454
+ session_data = user_sessions[key]
455
+ sessions.append({
456
+ "session_id": session_id,
457
+ "stage": session_data.get("stage"),
458
+ "tech_stack": session_data.get("tech_stack"),
459
+ "question_count": session_data.get("question_count", 0),
460
+ "difficulty": session_data.get("difficulty")
461
+ })
462
+ return {"user_id": user_id, "sessions": sessions}
463
 
464
  if __name__ == "__main__":
465
  import uvicorn
466
+ print("Starting Interview Prep FastAPI server...")
467
+ print(f"Google API configured: {bool(GOOGLE_API_KEY)}")
468
+ print(f"LeetCode problems loaded: {len(LEETCODE_DATA)}")
469
+ print(f"Tavily search available: {bool(TAVILY_API_KEY)}")
470
+ uvicorn.run(app, host="0.0.0.0", port=8000, reload=True)
gradioapp.py CHANGED
@@ -3,207 +3,228 @@ import os
3
  import re
4
  import pandas as pd
5
  import random
6
- import warnings
7
  from dotenv import load_dotenv
8
  from langchain_tavily import TavilySearch
9
  import google.generativeai as genai
10
- import gdown
11
  import gradio as gr
12
 
13
- warnings.filterwarnings("ignore")
14
-
15
  load_dotenv()
16
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
17
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
18
 
19
- user_sessions = {}
20
- if not GOOGLE_API_KEY:
21
- raise ValueError("GOOGLE_API_KEY environment variable is required.")
22
 
23
  genai.configure(api_key=GOOGLE_API_KEY)
24
 
25
- # β€”β€”β€” Load or fallback LeetCode data β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
26
- GOOGLE_SHEET_URL = "https://docs.google.com/spreadsheets/d/1KK9Mnm15hV3ALJo-quJndftWfaujJ7K2_zHMCTo5mGE/"
27
- FILE_ID = GOOGLE_SHEET_URL.split("/d/")[1].split("/")[0]
28
- DOWNLOAD_URL = f"https://drive.google.com/uc?export=download&id={FILE_ID}"
29
- OUTPUT_FILE = "leetcode_downloaded.xlsx"
30
-
31
- try:
32
- print("Downloading LeetCode data...")
33
- gdown.download(DOWNLOAD_URL, OUTPUT_FILE, quiet=False)
34
- LEETCODE_DATA = pd.read_excel(OUTPUT_FILE)
35
- print(f"Loaded {len(LEETCODE_DATA)} problems")
36
- except Exception:
37
- print("Failed to download/read. Using fallback.")
38
- LEETCODE_DATA = pd.DataFrame([
39
- {"problem_no": 3151, "problem_level": "Easy", "problem_statement": "special array",
40
- "problem_link": "https://leetcode.com/problems/special-array-i/?envType=daily-question&envId=2025-06-01"},
41
- {"problem_no": 1752, "problem_level": "Easy", "problem_statement": "check if array is sorted and rotated",
42
- "problem_link": "https://leetcode.com/problems/check-if-array-is-sorted-and-rotated/?envType=daily-question&envId=2025-06-01"},
43
- {"problem_no": 3105, "problem_level": "Easy", "problem_statement": "longest strictly increasing or strictly decreasing subarray",
44
- "problem_link": "https://leetcode.com/problems/longest-strictly-increasing-or-strictly-decreasing-subarray/?envType=daily-question&envId=2025-06-01"},
45
- {"problem_no": 1, "problem_level": "Easy", "problem_statement": "two sum",
46
- "problem_link": "https://leetcode.com/problems/two-sum/"},
47
- {"problem_no": 2, "problem_level": "Medium", "problem_statement": "add two numbers",
48
- "problem_link": "https://leetcode.com/problems/add-two-numbers/"},
49
- {"problem_no": 3, "problem_level": "Medium", "problem_statement": "longest substring without repeating characters",
50
- "problem_link": "https://leetcode.com/problems/longest-substring-without-repeating-characters/"},
51
- {"problem_no": 4, "problem_level": "Hard", "problem_statement": "median of two sorted arrays",
52
- "problem_link": "https://leetcode.com/problems/median-of-two-sorted-arrays/"},
53
- {"problem_no": 5, "problem_level": "Medium", "problem_statement": "longest palindromic substring",
54
- "problem_link": "https://leetcode.com/problems/longest-palindromic-substring/"}
55
- ])
56
-
57
- # β€”β€”β€” Helpers & Tools β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
58
-
59
- QUESTION_TYPE_MAPPING = {
60
- "easy": "Easy", "Easy": "Easy",
61
- "medium": "Medium", "Medium": "Medium",
62
- "hard": "Hard", "Hard": "Hard"
63
- }
64
-
65
- def preprocess_query(query: str) -> str:
66
- for k, v in QUESTION_TYPE_MAPPING.items():
67
- query = re.sub(rf'\b{k}\b', v, query, flags=re.IGNORECASE)
68
- query = re.sub(r'\bproblem\s*(\d+)', r'Problem_\1', query, flags=re.IGNORECASE)
69
- query = re.sub(r'\bquestion\s*(\d+)', r'Problem_\1', query, flags=re.IGNORECASE)
70
- query = re.sub(r'\b(find|search)\s+interview\s+questions\s+for\s+', '', query, flags=re.IGNORECASE)
71
- query = re.sub(r'\binterview\s+questions\b', '', query, flags=re.IGNORECASE).strip()
72
- return query
73
-
74
- def get_daily_coding_question(query: str = "") -> dict:
75
- try:
76
- response = "**Daily Coding Questions**\n\n"
77
-
78
- m = re.search(r'Problem_(\d+)', query, re.IGNORECASE)
79
- if m:
80
- df = LEETCODE_DATA[LEETCODE_DATA['problem_no'] == int(m.group(1))]
81
- if not df.empty:
82
- p = df.iloc[0]
83
- response += (
84
- f"**Problem {p['problem_no']}**\n"
85
- f"Level: {p['problem_level']}\n"
86
- f"Statement: {p['problem_statement']}\n"
87
- f"Link: {p['problem_link']}\n\n"
88
- )
89
- return {"status": "success", "response": response}
90
- else:
91
- return {"status": "error", "response": "Problem not found"}
92
-
93
- if query.strip():
94
- df = LEETCODE_DATA[LEETCODE_DATA['problem_statement'].str.contains(query, case=False, na=False)]
95
  else:
96
- df = LEETCODE_DATA
97
-
98
- easy_questions = df[df['problem_level'] == 'Easy'].sample(min(3, len(df[df['problem_level'] == 'Easy'])))
99
- medium_questions = df[df['problem_level'] == 'Medium'].sample(min(1, len(df[df['problem_level'] == 'Medium'])))
100
- hard_questions = df[df['problem_level'] == 'Hard'].sample(min(1, len(df[df['problem_level'] == 'Hard'])))
101
-
102
- response += "**Easy Questions**\n"
103
- for i, p in enumerate(easy_questions.itertuples(), 1):
104
- response += (
105
- f"{i}. Problem {p.problem_no}: {p.problem_statement}\n"
106
- f" Level: {p.problem_level}\n"
107
- f" Link: {p.problem_link}\n\n"
108
- )
109
-
110
- response += "**Medium Question**\n"
111
- for p in medium_questions.itertuples():
112
- response += (
113
- f"Problem {p.problem_no}: {p.problem_statement}\n"
114
- f"Level: {p.problem_level}\n"
115
- f"Link: {p.problem_link}\n\n"
116
- )
117
-
118
- response += "**Hard Question**\n"
119
- for p in hard_questions.itertuples():
120
- response += (
121
- f"Problem {p.problem_no}: {p.problem_statement}\n"
122
- f"Level: {p.problem_level}\n"
123
- f"Link: {p.problem_link}\n"
124
- )
125
-
126
- return {"status": "success", "response": response}
127
- except Exception as e:
128
- return {"status": "error", "response": f"Error: {e}"}
129
-
130
- def fetch_interview_questions(query: str) -> dict:
131
  if not TAVILY_API_KEY:
132
- return {"status": "error", "response": "Tavily API key not configured"}
133
-
134
- if not query.strip() or query.lower() in ["a", "interview", "question", "questions"]:
135
- return {"status": "error", "response": "Please provide a specific topic for interview questions (e.g., 'Python', 'data structures', 'system design')."}
136
-
137
- try:
138
- tavily = TavilySearch(api_key=TAVILY_API_KEY, max_results=5)
139
- search_query = f"{query} interview questions -inurl:(signup | login)"
140
- print(f"Executing Tavily search for: {search_query}")
141
-
142
- results = tavily.invoke(search_query)
143
- print(f"Raw Tavily results: {results}")
144
-
145
- if not results or not isinstance(results, list) or len(results) == 0:
146
- return {"status": "success", "response": "No relevant interview questions found. Try a more specific topic or different keywords."}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
148
- resp = "**Interview Questions Search Results for '{}':**\n\n".format(query)
149
- for i, r in enumerate(results, 1):
150
- if isinstance(r, dict):
151
- title = r.get('title', 'No title')
152
- url = r.get('url', 'No URL')
153
- content = r.get('content', '')
154
- content = content[:200] + '…' if len(content) > 200 else content or "No preview available"
155
- resp += f"{i}. **{title}**\n URL: {url}\n Preview: {content}\n\n"
156
- else:
157
- resp += f"{i}. {str(r)[:200]}{'…' if len(str(r)) > 200 else ''}\n\n"
158
 
159
- return {"status": "success", "response": resp}
160
-
161
- except Exception as e:
162
- print(f"Tavily search failed: {str(e)}")
163
- return {"status": "error", "response": f"Search failed: {str(e)}"}
164
-
165
- def simulate_mock_interview(query: str, user_id: str = "default") -> dict:
166
- qtype = "mixed"
167
- if re.search(r'HR|Behavioral|hr|behavioral', query, re.IGNORECASE): qtype = "HR"
168
- if re.search(r'Technical|System Design|technical|coding', query, re.IGNORECASE): qtype = "Technical"
169
-
170
- if "interview question" in query.lower() and qtype == "mixed":
171
- qtype = "HR"
172
-
173
- if qtype == "HR":
174
- hr_questions = [
175
- "Tell me about yourself.",
176
- "What is your greatest weakness?",
177
- "Describe a challenge you overcame.",
178
- "Why do you want to work here?",
179
- "Where do you see yourself in 5 years?",
180
- "Why are you leaving your current job?",
181
- "Describe a time when you had to work with a difficult team member.",
182
- "What are your salary expectations?",
183
- "Tell me about a time you failed.",
184
- "What motivates you?",
185
- "How do you handle stress and pressure?",
186
- "Describe your leadership style."
187
- ]
188
- q = random.choice(hr_questions)
189
  return {"status": "success", "response": (
190
- f"**Mock Interview (HR/Behavioral)**\n\n**Question:** {q}\n\nπŸ’‘ **Tips:**\n"
191
- f"- Use the STAR method (Situation, Task, Action, Result)\n"
192
- f"- Provide specific examples from your experience\n"
193
- f"- Keep your answer concise but detailed\n\n**Your turn to answer!**"
194
  )}
195
- else:
196
- p = LEETCODE_DATA.sample(1).iloc[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
  return {"status": "success", "response": (
198
- f"**Mock Interview (Technical)**\n\n**Problem:** {p['problem_statement'].title()}\n"
199
- f"**Difficulty:** {p['problem_level']}\n**Link:** {p['problem_link']}\n\nπŸ’‘ **Tips:**\n"
200
- f"- Think out loud as you solve\n"
201
- f"- Ask clarifying questions\n"
202
- f"- Discuss time/space complexity\n\n**Explain your approach!**"
203
  )}
204
 
205
- # β€”β€”β€” The Enhanced InterviewPrepAgent β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
206
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
  class InterviewPrepAgent:
208
  def __init__(self):
209
  self.model = genai.GenerativeModel('gemini-1.5-flash')
@@ -212,190 +233,69 @@ class InterviewPrepAgent:
212
  "fetch_interview_questions": fetch_interview_questions,
213
  "simulate_mock_interview": simulate_mock_interview
214
  }
215
- self.instruction_text = """
216
- You are an interview preparation assistant. Analyze the user's query and determine which tool to use.
217
 
218
- Available tools:
219
- 1. get_daily_coding_question - For coding practice, LeetCode problems, daily questions
220
- 2. fetch_interview_questions - For searching interview questions on specific topics
221
- 3. simulate_mock_interview - For mock interview practice (HR/behavioral or technical)
222
 
223
- Instructions:
224
- - If user asks for coding questions, daily questions, LeetCode problems, practice problems -> use get_daily_coding_question
225
- - If user asks for interview questions on specific topics (e.g., Python, data structures) without "mock" or "simulate" -> use fetch_interview_questions
226
- - If user asks for mock interview, interview simulation, practice interview, or HR/behavioral questions -> use simulate_mock_interview
227
- - If user explicitly mentions "HR" or "behavioral" -> use simulate_mock_interview with HR focus
228
 
229
- Respond ONLY with valid JSON in this exact format:
230
- {"tool": "tool_name", "args": {"param1": "value1", "param2": "value2"}}
231
-
232
- User Query: {query}
233
- """
234
-
235
- def _classify_intent(self, query: str) -> tuple[str, dict]:
236
- query_lower = query.lower()
237
-
238
- # Prioritize HR/behavioral for explicit mentions
239
- if any(keyword in query_lower for keyword in ["hr", "behavioral", "give hr questions", "give behavioral questions"]):
240
- return "simulate_mock_interview", {"query": query, "user_id": "default"}
241
-
242
- # Handle mock interview or simulation requests
243
- if any(keyword in query_lower for keyword in ["mock interview", "practice interview", "interview simulation", "simulate_mock_interview"]):
244
- return "simulate_mock_interview", {"query": query, "user_id": "default"}
245
-
246
- # Handle coding-related queries
247
- if any(keyword in query_lower for keyword in ["daily", "coding question", "leetcode", "practice problem", "coding practice"]):
248
- problem_match = re.search(r'problem\s*(\d+)', query_lower)
249
- if problem_match:
250
- return "get_daily_coding_question", {"query": f"Problem_{problem_match.group(1)}"}
251
-
252
- if "easy" in query_lower:
253
- return "get_daily_coding_question", {"query": "Easy"}
254
- elif "medium" in query_lower:
255
- return "get_daily_coding_question", {"query": "Medium"}
256
- elif "hard" in query_lower:
257
- return "get_daily_coding_question", {"query": "Hard"}
258
-
259
- return "get_daily_coding_question", {"query": ""}
260
-
261
- # Handle topic-specific interview questions
262
- if any(keyword in query_lower for keyword in ["search interview questions", "find interview questions", "interview prep resources"]) or \
263
- "interview" in query_lower:
264
- return "fetch_interview_questions", {"query": query}
265
-
266
- # Fallback to LLM classification
267
- try:
268
- prompt = self.instruction_text.format(query=query)
269
- response = self.model.generate_content(prompt)
270
- result = json.loads(response.text.strip())
271
- tool_name = result.get("tool")
272
- args = result.get("args", {})
273
- return tool_name, args
274
- except Exception as e:
275
- print(f"LLM classification failed: {e}")
276
- return "get_daily_coding_question", {"query": ""}
277
-
278
- def process_query(self, query: str, user_id: str = "default", session_id: str = "default") -> str:
279
- if not GOOGLE_API_KEY:
280
- return "Error: Google API not configured."
281
-
282
- session_key = f"{user_id}_{session_id}"
283
- user_sessions.setdefault(session_key, {"history": []})
284
 
285
- tool_name, args = self._classify_intent(query)
286
-
287
- if tool_name not in self.tools:
288
- return f"I couldn't understand your request. Please try asking for:\n- Daily coding question\n- Mock interview\n- Interview questions for a specific topic"
 
289
 
290
- result = self.tools[tool_name](**args)
291
-
292
- user_sessions[session_key]["history"].append({
293
- "query": query,
294
- "response": result["response"]
295
- })
296
-
297
- return result["response"]
 
 
 
 
 
 
 
 
 
 
298
 
299
  # β€”β€”β€” Gradio Interface β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
300
-
301
  agent = InterviewPrepAgent()
302
 
303
  def chat_interface(message, history):
304
- """Handle chat messages and return response"""
305
- try:
306
- # Preprocess the query
307
- processed_query = preprocess_query(message)
308
-
309
- # Get response from agent
310
- response = agent.process_query(processed_query, user_id="gradio_user", session_id="session_1")
311
-
312
- return response
313
- except Exception as e:
314
- return f"Sorry, I encountered an error: {str(e)}"
315
 
316
  def create_examples():
317
- """Create example messages for the interface"""
318
  return [
319
- ["Give me a daily coding question"],
320
- ["I want to practice mock interview"],
321
- ["Find interview questions for Python"],
322
- ["Give me HR interview questions"],
323
- ["Technical mock interview"],
324
- ["Search interview questions for data structures"],
325
  ]
326
 
327
- # Create the Gradio interface
328
- with gr.Blocks(
329
- title="Interview Prep Assistant",
330
- theme=gr.themes.Soft(),
331
- css="""
332
- .gradio-container {
333
- max-width: 900px !important;
334
- }
335
- .chat-message {
336
- font-size: 14px !important;
337
- }
338
- """
339
- ) as interface:
340
-
341
- gr.Markdown(
342
- """
343
- # 🎯 Interview Prep Assistant
344
-
345
- Your AI-powered interview preparation companion! I can help you with:
346
-
347
- - **Daily Coding Questions** - Get LeetCode problems for practice
348
- - **Mock Interviews** - Practice HR/behavioral or technical interviews
349
- - **Interview Questions** - Search for specific topic-based interview questions
350
-
351
- Just type your request below and I'll help you prepare for your next interview!
352
- """
353
- )
354
-
355
- # Create the chat interface
356
  chatbot = gr.ChatInterface(
357
  fn=chat_interface,
358
- title="Chat with Interview Prep Assistant",
359
- description="Ask me for coding questions, mock interviews, or interview preparation resources!",
360
  examples=create_examples(),
361
- textbox=gr.Textbox(
362
- placeholder="Type your message here... (e.g., 'Give me a daily coding question')",
363
- container=False,
364
- scale=7
365
- ),
366
- chatbot=gr.Chatbot(
367
- height=500,
368
- show_label=False,
369
- container=True
370
- )
371
- )
372
-
373
- # Add footer with information
374
- gr.Markdown(
375
- """
376
- ---
377
- ### πŸ’‘ Tips for using the Interview Prep Assistant:
378
-
379
- - **For coding practice**: "daily coding question", "easy coding problem", "leetcode problem 1"
380
- - **For mock interviews**: "mock interview", "HR interview", "technical interview"
381
- - **For topic research**: "Python interview questions", "system design interview questions"
382
-
383
- ### πŸ“Š System Status:
384
- - Google API: βœ… Configured
385
- - LeetCode Problems: {} loaded
386
- - Tavily Search: {} Available
387
- """.format(
388
- len(LEETCODE_DATA),
389
- "βœ…" if TAVILY_API_KEY else "❌"
390
- )
391
  )
 
392
 
393
- # Launch the interface
394
  if __name__ == "__main__":
395
- interface.launch(
396
- # server_name="0.0.0.0",
397
- server_port=8000,
398
- share=False,
399
- show_error=True,
400
- quiet=False
401
- )
 
3
  import re
4
  import pandas as pd
5
  import random
 
6
  from dotenv import load_dotenv
7
  from langchain_tavily import TavilySearch
8
  import google.generativeai as genai
 
9
  import gradio as gr
10
 
 
 
11
  load_dotenv()
12
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
13
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
14
 
15
+ # User sessions for mock interviews
16
+ # user_sessions = {}
 
17
 
18
  genai.configure(api_key=GOOGLE_API_KEY)
19
 
20
+ # Load LeetCode data
21
+ OUTPUT_FILE = "Interview-QA-Practice-Bot/leetcode_downloaded.xlsx"
22
+ LEETCODE_DATA = pd.read_excel(OUTPUT_FILE)
23
+ print(f"Loaded {len(LEETCODE_DATA)} LeetCode problems from local file.")
24
+
25
+ # β€”β€”β€” Tool 1: Get Daily Coding Question β€”β€”β€”β€”οΏ½οΏ½β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
26
+ def get_daily_coding_question(query=""):
27
+ """Get 3 random coding questions (one from each difficulty level)"""
28
+ response = "Here are your coding challenges for today:\n\n"
29
+
30
+ problem_match = re.search(r'problem[\s_]*(\d+)', query, re.IGNORECASE)
31
+ if problem_match:
32
+ problem_no = int(problem_match.group(1))
33
+ specific_problem = LEETCODE_DATA[LEETCODE_DATA['problem_no'] == problem_no]
34
+ if not specific_problem.empty:
35
+ p = specific_problem.iloc[0]
36
+ response = f"**Problem {p['problem_no']}: {p['problem_statement']}**\n"
37
+ response += f"**Difficulty**: {p['problem_level']}\n"
38
+ response += f"**Link**: {p['problem_link']}\n\n"
39
+ response += "Good luck with this problem!"
40
+ return {"status": "success", "response": response}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  else:
42
+ return {"status": "error", "response": "Problem not found. Try a different number!"}
43
+
44
+ easy = LEETCODE_DATA[LEETCODE_DATA['problem_level'] == 'Easy']
45
+ medium = LEETCODE_DATA[LEETCODE_DATA['problem_level'] == 'Medium']
46
+ hard = LEETCODE_DATA[LEETCODE_DATA['problem_level'] == 'Hard']
47
+
48
+ for label, df in [("🟒 Easy", easy), ("🟑 Medium", medium), ("πŸ”΄ Hard", hard)]:
49
+ if not df.empty:
50
+ q = df.sample(1).iloc[0]
51
+ response += f"**{label} Challenge**\n"
52
+ response += f"Problem {q['problem_no']}: {q['problem_statement']}\n"
53
+ response += f"Link: {q['problem_link']}\n\n"
54
+
55
+ response += "Choose one that matches your skill level and start coding!"
56
+ return {"status": "success", "response": response}
57
+
58
+ # β€”β€”β€” Tool 2: Fetch Interview Questions β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
59
+ def fetch_interview_questions(query):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  if not TAVILY_API_KEY:
61
+ return {"status": "error", "response": "Tavily API key not configured."}
62
+
63
+ tavily = TavilySearch(api_key=TAVILY_API_KEY, max_results=3)
64
+ search_response = tavily.invoke(f"{query} interview questions")
65
+
66
+ # Extract the results list from the response dictionary
67
+ results = search_response.get("results", []) if isinstance(search_response, dict) else search_response
68
+
69
+ if not results:
70
+ return {"status": "success", "response": f"No results found for '{query}' interview questions."}
71
+
72
+ search_results = f"Here are the top 3 resources for {query} interview questions:\n\n"
73
+ for i, res in enumerate(results[:3], 1):
74
+ t = res.get('title', 'No title')
75
+ u = res.get('url', 'No URL')
76
+ c = res.get('content', '')
77
+ snippet = c[:200] + '...' if len(c) > 200 else c
78
+ search_results += f"**{i}. {t}**\nURL: {u}\nPreview: {snippet}\n\n"
79
+
80
+ model = genai.GenerativeModel('gemini-1.5-flash')
81
+ guidance = model.generate_content(f"""
82
+ Based on the topic '{query}', provide practical advice on how to prepare for and tackle interview questions in this area.
83
+ Include:
84
+ 1. Key concepts to focus on
85
+ 2. Common question types
86
+ 3. How to structure answers
87
+ 4. Tips for success
88
+
89
+ Keep it concise and actionable.
90
+ """).text
91
+
92
+ final = search_results + "\n**πŸ’‘ How to Tackle These Interviews:**\n\n" + guidance
93
+ return {"status": "success", "response": final}
94
+
95
+ # β€”β€”β€” Tool 3: Simulate Mock Interview β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
96
+ # Enhanced user session management
97
+ user_sessions = {}
98
+
99
+ def simulate_mock_interview(query, user_id="default"):
100
+ session_key = f"mock_{user_id}"
101
+ if session_key not in user_sessions:
102
+ user_sessions[session_key] = {
103
+ "stage": "tech_stack",
104
+ "tech_stack": "",
105
+ "questions_asked": [],
106
+ "answers_given": [],
107
+ "current_question": "",
108
+ "question_count": 0,
109
+ "difficulty": "medium", # Added difficulty level
110
+ "feedback_history": [] # Added feedback tracking
111
+ }
112
+ session = user_sessions[session_key]
113
+ model = genai.GenerativeModel('gemini-1.5-flash')
114
+
115
+ # Tech stack collection stage
116
+ if session["stage"] == "tech_stack":
117
+ session["stage"] = "waiting_tech_stack"
118
+ return {"status": "success", "response": (
119
+ "Welcome to your mock interview! 🎯\n\n"
120
+ "Please tell me about your tech stack (e.g., Python, React, multi-agent systems) "
121
+ "or the role you're preparing for (e.g., software engineer, ML engineer)."
122
+ )}
123
+
124
+ elif session["stage"] == "waiting_tech_stack":
125
+ session["tech_stack"] = query
126
+ session["stage"] = "interviewing"
127
+ difficulty_options = " (easy/medium/hard)"
128
+ q = model.generate_content(f"""
129
+ Generate a relevant interview question for tech stack: {query}
130
+ Ensure it tests technical knowledge and problem-solving.
131
+ Keep it concise and return only the question.
132
+ """).text.strip()
133
 
134
+ session.update({
135
+ "current_question": q,
136
+ "questions_asked": [q],
137
+ "question_count": 1
138
+ })
 
 
 
 
 
139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  return {"status": "success", "response": (
141
+ f"Great! Based on your tech stack ({query}), let's start your mock interview.\n\n"
142
+ f"**Question 1:** {q}\n"
143
+ f"Set difficulty level{difficulty_options} or proceed. Type 'quit' to end and get your summary."
 
144
  )}
145
+
146
+ elif session["stage"] == "interviewing":
147
+ if query.lower().strip() in ["easy", "medium", "hard"]:
148
+ session["difficulty"] = query.lower().strip()
149
+ return {"status": "success", "response": (
150
+ f"Difficulty set to {session['difficulty']}. Let's continue!\n\n"
151
+ f"**Question {session['question_count']}:** {session['current_question']}\n\n"
152
+ "Take your time to answer. Type 'quit' to end and get your summary."
153
+ )}
154
+
155
+ if query.lower().strip() == "quit":
156
+ return end_mock_interview(session_key)
157
+
158
+ # Store answer and provide feedback
159
+ session["answers_given"].append(query)
160
+ feedback = model.generate_content(f"""
161
+ Question: {session['current_question']}
162
+ Answer: {query}
163
+ Tech Stack: {session['tech_stack']}
164
+ Difficulty: {session['difficulty']}
165
+
166
+ Provide concise, constructive feedback:
167
+ - What went well
168
+ - Areas to improve
169
+ - Missing points or better approach
170
+ - Suggested follow-up topic
171
+ """).text.strip()
172
+ session["feedback_history"].append(feedback)
173
+
174
+ # Generate next question with context
175
+ next_q = model.generate_content(f"""
176
+ Tech stack: {session['tech_stack']}
177
+ Difficulty: {session['difficulty']}
178
+ Previous questions: {session['questions_asked']}
179
+ Follow-up topic suggestion: {feedback.split('\n')[-1] if feedback else ''}
180
+
181
+ Generate a new, relevant interview question unseen before.
182
+ Ensure it aligns with the tech stack and difficulty.
183
+ Return only the question.
184
+ """).text.strip()
185
+
186
+ session["questions_asked"].append(next_q)
187
+ session["current_question"] = next_q
188
+ session["question_count"] += 1
189
+
190
  return {"status": "success", "response": (
191
+ f"**Feedback on your previous answer:**\n{feedback}\n\n"
192
+ f"**Question {session['question_count']}:** {next_q}\n\n"
193
+ "Type 'quit' to end the interview and get your summary, or set a new difficulty (easy/medium/hard)."
 
 
194
  )}
195
 
196
+ def end_mock_interview(session_key):
197
+ session = user_sessions[session_key]
198
+ model = genai.GenerativeModel('gemini-1.5-flash')
199
+
200
+ summary = model.generate_content(f"""
201
+ Mock Interview Summary:
202
+ Tech Stack: {session['tech_stack']}
203
+ Difficulty: {session['difficulty']}
204
+ Questions Asked: {session['questions_asked']}
205
+ Answers Given: {session['answers_given']}
206
+ Feedback History: {session['feedback_history']}
207
+
208
+ Provide a concise overall assessment:
209
+ - Strengths
210
+ - Areas for improvement
211
+ - Key recommendations
212
+ - Common mistakes to avoid
213
+ """).text.strip()
214
+
215
+ del user_sessions[session_key]
216
+
217
+ return {"status": "success", "response": (
218
+ "🎯 **Mock Interview Complete!**\n\n"
219
+ f"**Interview Summary:**\n"
220
+ f"- Tech Stack: {session['tech_stack']}\n"
221
+ f"- Difficulty: {session['difficulty']}\n"
222
+ f"- Questions Asked: {len(session['questions_asked'])}\n\n"
223
+ "**Overall Assessment:**\n" + summary + "\n\n"
224
+ "Great jobβ€”use this feedback to level up! πŸ’ͺ"
225
+ )}
226
+
227
+ # β€”β€”β€” Main Agent Class β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
228
  class InterviewPrepAgent:
229
  def __init__(self):
230
  self.model = genai.GenerativeModel('gemini-1.5-flash')
 
233
  "fetch_interview_questions": fetch_interview_questions,
234
  "simulate_mock_interview": simulate_mock_interview
235
  }
 
 
236
 
237
+ def classify_query(self, query):
238
+ prompt = f"""
239
+ Analyze this user query and determine which tool to use:
 
240
 
241
+ Query: "{query}"
 
 
 
 
242
 
243
+ Tools:
244
+ 1. get_daily_coding_question – for coding problems, leetcode, daily challenges
245
+ 2. fetch_interview_questions – for topic-specific interview question resources
246
+ 3. simulate_mock_interview – for mock interview practice or behavioral interviews
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
 
248
+ Rules:
249
+ - If query mentions 'mock', 'interview', 'simulate', or 'practice', choose simulate_mock_interview
250
+ - If query mentions 'coding', 'leetcode', 'daily', 'problem', choose get_daily_coding_question
251
+ - If query asks for interview questions on a specific technology (like 'Python interview questions'), choose fetch_interview_questions
252
+ - If unclear, default to simulate_mock_interview
253
 
254
+ Respond with JSON
255
+ """
256
+ resp = self.model.generate_content(prompt).text.strip()
257
+ if resp.startswith("```json"):
258
+ resp = resp.replace("```json", "").replace("```", "").strip()
259
+ j = json.loads(resp)
260
+ return j.get("tool"), j.get("args", {})
261
+
262
+ def process_query(self, query, user_id="default"):
263
+ tool, args = self.classify_query(query)
264
+ if tool not in self.tools:
265
+ return {"text": "Sorry, I didn't get that. Ask for coding practice, interview questions, or mock interview!"}
266
+
267
+ if tool == "simulate_mock_interview":
268
+ result = self.tools[tool](args.get("query", query), user_id)
269
+ else:
270
+ result = self.tools[tool](args.get("query", query))
271
+ return {"text": result["response"]}
272
 
273
  # β€”β€”β€” Gradio Interface β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
 
274
  agent = InterviewPrepAgent()
275
 
276
  def chat_interface(message, history):
277
+ resp = agent.process_query(message, user_id="gradio_user")
278
+ return resp.get("text", "Something went wrong, try again.")
 
 
 
 
 
 
 
 
 
279
 
280
  def create_examples():
 
281
  return [
282
+ ["Give me daily coding questions"],
283
+ ["Start a mock interview"],
284
+ ["Python interview questions"],
285
+ ["React interview questions"],
286
+ ["Show me problem 1"],
287
+ ["Data structures interview questions"],
288
  ]
289
 
290
+ with gr.Blocks(title="Interview Prep Assistant", theme=gr.themes.Soft()) as interface:
291
+ gr.Markdown("# 🎯 Interview Prep Assistant\nYour AI-powered interview practice companion!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292
  chatbot = gr.ChatInterface(
293
  fn=chat_interface,
 
 
294
  examples=create_examples(),
295
+ chatbot=gr.Chatbot(height=500, show_label=False, container=True, type="messages"),
296
+ textbox=gr.Textbox(placeholder="Type your message here...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
297
  )
298
+ gr.Markdown(f"\n---\n**System Status:**\n- βœ… Google API Configured\n- βœ… {len(LEETCODE_DATA)} LeetCode Problems Loaded\n- {'βœ…' if TAVILY_API_KEY else '❌'} Tavily Search Available")
299
 
 
300
  if __name__ == "__main__":
301
+ interface.launch(server_port=8000, share=True, show_error=True, quiet=False)