panchadip commited on
Commit
13e41f5
·
verified ·
1 Parent(s): 536e93b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +325 -322
app.py CHANGED
@@ -1,322 +1,325 @@
1
- from fastapi import FastAPI, UploadFile, File, HTTPException
2
- from fastapi.middleware.cors import CORSMiddleware
3
- from fastapi.responses import JSONResponse
4
- from pydantic import BaseModel, EmailStr
5
- import tempfile
6
- import os
7
- import shutil
8
- from typing import List, Dict, Any
9
- import json
10
- import numpy as np
11
- from pathlib import Path
12
- import asyncio
13
- from sentence_transformers import SentenceTransformer
14
- import sqlite3
15
-
16
- from jd_embedding_utils import generate_jd_embedding, extract_sections
17
- from resume_embedding_utils import pdf_to_text, extract_resume_sections, generate_resume_embedding
18
- from matcher import calculate_match_score, match_all_resumes
19
- from email_utils import send_email
20
- from agent_framework import AgentCoordinator
21
-
22
- app = FastAPI()
23
-
24
- # Add CORS middleware
25
- app.add_middleware(
26
- CORSMiddleware,
27
- allow_origins=["*"],
28
- allow_credentials=True,
29
- allow_methods=["*"],
30
- allow_headers=["*"],
31
- )
32
-
33
- # Initialize SQLite database
34
- def init_db():
35
- conn = sqlite3.connect("recruitly.db")
36
- cursor = conn.cursor()
37
- cursor.execute("""
38
- CREATE TABLE IF NOT EXISTS job_descriptions (
39
- id INTEGER PRIMARY KEY AUTOINCREMENT,
40
- title TEXT,
41
- embedding TEXT,
42
- sections TEXT,
43
- summary TEXT
44
- )
45
- """)
46
- cursor.execute("""
47
- CREATE TABLE IF NOT EXISTS resumes (
48
- id INTEGER PRIMARY KEY AUTOINCREMENT,
49
- filename TEXT,
50
- embedding TEXT,
51
- parsed TEXT,
52
- summary TEXT
53
- )
54
- """)
55
- cursor.execute("""
56
- CREATE TABLE IF NOT EXISTS matches (
57
- id INTEGER PRIMARY KEY AUTOINCREMENT,
58
- resume_id INTEGER,
59
- jd_id INTEGER,
60
- score REAL,
61
- reasoning TEXT,
62
- FOREIGN KEY (resume_id) REFERENCES resumes (id),
63
- FOREIGN KEY (jd_id) REFERENCES job_descriptions (id)
64
- )
65
- """)
66
- conn.commit()
67
- conn.close()
68
-
69
- # Call init_db on startup
70
- init_db()
71
-
72
- # Classes for request/response models
73
- class JDRequest(BaseModel):
74
- text: str
75
-
76
- class MatchRequest(BaseModel):
77
- jd_sections: Dict[str, List[str]]
78
- resume_data: Dict[str, Dict[str, Any]]
79
-
80
- class EmailRequest(BaseModel):
81
- email: str
82
- name: str
83
- subject: str
84
- body: str
85
-
86
- class ScheduleRequest(BaseModel):
87
- candidate_id: str
88
- name: str
89
- email: str
90
-
91
- class NumpyEncoder(json.JSONEncoder):
92
- def default(self, obj):
93
- if isinstance(obj, np.ndarray):
94
- return obj.tolist()
95
- return json.JSONEncoder.default(self, obj)
96
-
97
- # Store processed JD and resumes in memory for matching
98
- current_session = {
99
- "jd": None,
100
- "resumes": {},
101
- "agent_coordinator": AgentCoordinator()
102
- }
103
-
104
- # Create a single model instance for reuse
105
- model = SentenceTransformer("all-MiniLM-L6-v2")
106
-
107
- @app.post("/embed")
108
- def get_embedding(request: JDRequest):
109
- """Process a job description and generate its embedding"""
110
- coordinator = current_session["agent_coordinator"]
111
- result = coordinator.process_job_description(request.text)
112
-
113
- # Store in current session
114
- current_session["jd"] = result
115
-
116
- # Convert embedding dictionary properly for JSON response
117
- serializable_embedding = json.loads(
118
- json.dumps(result["embedding"], cls=NumpyEncoder)
119
- )
120
-
121
- response_data = {
122
- "title": result["title"],
123
- "embedding": serializable_embedding,
124
- "sections": result["sections"],
125
- "summary": result.get("summary", "")
126
- }
127
-
128
- return response_data
129
-
130
- @app.post("/upload-resumes")
131
- async def upload_resumes(files: List[UploadFile] = File(...)):
132
- """Process multiple resume PDFs and generate embeddings for each"""
133
- if not files:
134
- raise HTTPException(status_code=400, detail="No files provided")
135
-
136
- # Create temp directory for saving uploaded files
137
- with tempfile.TemporaryDirectory() as temp_dir:
138
- resume_results = {}
139
-
140
- # First save all files to disk to avoid keeping file handles open too long
141
- file_paths = []
142
- for file in files:
143
- file_path = os.path.join(temp_dir, file.filename)
144
- with open(file_path, "wb") as buffer:
145
- shutil.copyfileobj(file.file, buffer)
146
- file_paths.append((file.filename, file_path))
147
-
148
- # Process files in batches to avoid memory issues
149
- batch_size = 3
150
- for i in range(0, len(file_paths), batch_size):
151
- batch = file_paths[i:i+batch_size]
152
- batch_tasks = []
153
-
154
- for filename, file_path in batch:
155
- batch_tasks.append(process_resume(filename, file_path))
156
-
157
- # Process each batch concurrently
158
- batch_results = await asyncio.gather(*batch_tasks)
159
-
160
- # Combine results
161
- for filename, result in batch_results:
162
- resume_results[filename] = result
163
- # Add to current session
164
- if "error" not in result:
165
- current_session["resumes"][filename] = result
166
-
167
- # Convert NumPy arrays to lists for JSON response
168
- serializable_results = json.loads(
169
- json.dumps(resume_results, cls=NumpyEncoder)
170
- )
171
-
172
- return JSONResponse(content=serializable_results)
173
-
174
- async def process_resume(filename, file_path):
175
- """Process a single resume PDF file"""
176
- try:
177
- coordinator = current_session["agent_coordinator"]
178
- result = coordinator.cv_agent.process_cv(file_path, filename)
179
- return filename, result
180
-
181
- except Exception as e:
182
- print(f"Error processing {filename}: {str(e)}")
183
- return filename, {"error": str(e)}
184
-
185
- @app.post("/match")
186
- def match_resumes():
187
- """Match the current JD with all processed resumes"""
188
- jd = current_session["jd"]
189
- resumes = current_session["resumes"]
190
-
191
- if not jd or not resumes:
192
- raise HTTPException(status_code=400, detail="Job description or resumes missing")
193
-
194
- jd_title = jd["title"]
195
- jd_embeddings = jd["embedding"]
196
-
197
- # Match all resumes
198
- all_candidates = match_all_resumes(jd_title, jd_embeddings, resumes, threshold=0.8)
199
-
200
- # Save all candidates to the database
201
- conn = sqlite3.connect("recruitly.db")
202
- cursor = conn.cursor()
203
- for candidate in all_candidates:
204
- cursor.execute("""
205
- INSERT INTO matches (resume_id, jd_id, score, reasoning)
206
- VALUES (?, ?, ?, ?)
207
- """, (candidate.get("resume_id"), jd.get("id"), candidate["score"], json.dumps(candidate["reasoning"])))
208
- conn.commit()
209
- conn.close()
210
-
211
- # Include all candidates in the response
212
- return {"candidates": all_candidates}
213
-
214
- @app.post("/generate-interview-slots")
215
- def generate_interview_slots():
216
- """Generate potential interview time slots"""
217
- if not current_session["agent_coordinator"]:
218
- raise HTTPException(status_code=400, detail="Agent coordinator not initialized")
219
-
220
- slots = current_session["agent_coordinator"].scheduler_agent.generate_interview_slots()
221
-
222
- return {"slots": slots}
223
-
224
- @app.post("/prepare-interview-email/{candidate_id}")
225
- def prepare_interview_email(candidate_id: str):
226
- """Prepare an interview email for a specific candidate"""
227
- if not current_session["jd"]:
228
- raise HTTPException(status_code=400, detail="No job description processed")
229
-
230
- # Find the candidate in the matches
231
- matched_candidates = []
232
- if "matches" in current_session:
233
- matched_candidates = current_session["matches"]["matches"]
234
-
235
- candidate = None
236
- for match in matched_candidates:
237
- if match["name"] == candidate_id or str(match.get("id", "")) == candidate_id:
238
- candidate = match
239
- break
240
-
241
- if not candidate:
242
- raise HTTPException(status_code=404, detail=f"Candidate {candidate_id} not found")
243
-
244
- # Generate email content
245
- email_data = current_session["agent_coordinator"].scheduler_agent.prepare_email_for_candidate(
246
- candidate,
247
- current_session["jd"]["title"]
248
- )
249
-
250
- return email_data
251
-
252
- @app.post("/send-email")
253
- def send_candidate_email(request: EmailRequest):
254
- """Send an email to a candidate"""
255
- try:
256
- result = send_email(
257
- to_email=request.email,
258
- subject=request.subject,
259
- body=request.body
260
- )
261
-
262
- if result["success"]:
263
- return {"success": True, "message": f"Email sent to {request.name}"}
264
- else:
265
- raise HTTPException(status_code=500, detail=result["message"])
266
- except Exception as e:
267
- raise HTTPException(status_code=500, detail=str(e))
268
-
269
- @app.get("/suggest-interview-times/{candidate_id}")
270
- def suggest_interview_times(candidate_id: str):
271
- """Suggest available interview time slots for a candidate"""
272
- coordinator = current_session["agent_coordinator"]
273
- slots = coordinator.scheduler_agent.generate_interview_slots(days_ahead=7, slots_per_day=3)
274
-
275
- return {"candidate_id": candidate_id, "slots": slots}
276
-
277
- # Helper function to extract name from parsed resume
278
- def _extract_name(parsed, fallback):
279
- if "name" in parsed and parsed["name"] and len(parsed["name"]) > 0:
280
- return parsed["name"][0]
281
- return Path(fallback).stem
282
-
283
- @app.get("/clear-session")
284
- def clear_session():
285
- """Clear the current session data"""
286
- current_session["jd"] = None
287
- current_session["resumes"] = {}
288
- return {"message": "Session cleared"}
289
-
290
- @app.get("/test-match")
291
- def test_match():
292
- """Test endpoint to diagnose matching issues"""
293
- test_jd = """We are seeking an innovative and strategic Product Manager to lead the development and execution of new products. The ideal candidate will collaborate with cross-functional teams to define product roadmaps, analyze market trends, and ensure successful product launches. Responsibilities: Define product vision and strategy based on market research and customer needs. Work closely with engineering, design, and marketing teams to develop and launch products. Prioritize features, create roadmaps, and manage product lifecycle. Analyze user feedback and data to optimize product performance. Ensure alignment between business goals and product development. Qualifications: Bachelor's degree in Business, Computer Science, or a related field. Experience in product management, agile methodologies, and market research. Strong analytical, leadership, and communication skills. Familiarity with project management tools and data-driven decision-making."""
294
-
295
- # Process the test JD
296
- title, embedding = generate_jd_embedding(test_jd)
297
- sections = extract_sections(test_jd)
298
-
299
- # Create a simple test resume with matching sections
300
- test_resume = {
301
- "skills": ["Product management", "Agile methodologies", "Leadership"],
302
- "experience": ["5 years experience in product management", "Led cross-functional teams"],
303
- "education": ["Bachelor's degree in Computer Science"],
304
- "qualifications": ["Strong analytical skills", "Communication skills"]
305
- }
306
-
307
- # Run the matcher with debug output
308
- score, reasoning = calculate_match_score(sections, test_resume)
309
-
310
- return {
311
- "jd_sections": sections,
312
- "resume_sections": test_resume,
313
- "score": score,
314
- "reasoning": reasoning
315
- }
316
-
317
- # Run the app locally
318
- if __name__ == "__main__":
319
- import uvicorn
320
- uvicorn.run(app, host="127.0.0.1", port=8000)
321
-
322
-
 
 
 
 
1
+ from fastapi import FastAPI, UploadFile, File, HTTPException
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from fastapi.responses import JSONResponse
4
+ from pydantic import BaseModel, EmailStr
5
+ import tempfile
6
+ import os
7
+ import shutil
8
+ from typing import List, Dict, Any
9
+ import json
10
+ import numpy as np
11
+ from pathlib import Path
12
+ import asyncio
13
+ from sentence_transformers import SentenceTransformer
14
+ import sqlite3
15
+
16
+ from jd_embedding_utils import generate_jd_embedding, extract_sections
17
+ from resume_embedding_utils import pdf_to_text, extract_resume_sections, generate_resume_embedding
18
+ from matcher import calculate_match_score, match_all_resumes
19
+ from email_utils import send_email
20
+ from agent_framework import AgentCoordinator
21
+
22
+ app = FastAPI()
23
+
24
+ # Add CORS middleware
25
+ app.add_middleware(
26
+ CORSMiddleware,
27
+ allow_origins=["*"],
28
+ allow_credentials=True,
29
+ allow_methods=["*"],
30
+ allow_headers=["*"],
31
+ )
32
+
33
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
34
+ DB_PATH = os.path.join(BASE_DIR, "recruitly.db")
35
+
36
+ # Initialize SQLite database
37
+ def init_db():
38
+ conn = sqlite3.connect("recruitly.db")
39
+ cursor = conn.cursor()
40
+ cursor.execute("""
41
+ CREATE TABLE IF NOT EXISTS job_descriptions (
42
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
43
+ title TEXT,
44
+ embedding TEXT,
45
+ sections TEXT,
46
+ summary TEXT
47
+ )
48
+ """)
49
+ cursor.execute("""
50
+ CREATE TABLE IF NOT EXISTS resumes (
51
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
52
+ filename TEXT,
53
+ embedding TEXT,
54
+ parsed TEXT,
55
+ summary TEXT
56
+ )
57
+ """)
58
+ cursor.execute("""
59
+ CREATE TABLE IF NOT EXISTS matches (
60
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
61
+ resume_id INTEGER,
62
+ jd_id INTEGER,
63
+ score REAL,
64
+ reasoning TEXT,
65
+ FOREIGN KEY (resume_id) REFERENCES resumes (id),
66
+ FOREIGN KEY (jd_id) REFERENCES job_descriptions (id)
67
+ )
68
+ """)
69
+ conn.commit()
70
+ conn.close()
71
+
72
+ # Call init_db on startup
73
+ init_db()
74
+
75
+ # Classes for request/response models
76
+ class JDRequest(BaseModel):
77
+ text: str
78
+
79
+ class MatchRequest(BaseModel):
80
+ jd_sections: Dict[str, List[str]]
81
+ resume_data: Dict[str, Dict[str, Any]]
82
+
83
+ class EmailRequest(BaseModel):
84
+ email: str
85
+ name: str
86
+ subject: str
87
+ body: str
88
+
89
+ class ScheduleRequest(BaseModel):
90
+ candidate_id: str
91
+ name: str
92
+ email: str
93
+
94
+ class NumpyEncoder(json.JSONEncoder):
95
+ def default(self, obj):
96
+ if isinstance(obj, np.ndarray):
97
+ return obj.tolist()
98
+ return json.JSONEncoder.default(self, obj)
99
+
100
+ # Store processed JD and resumes in memory for matching
101
+ current_session = {
102
+ "jd": None,
103
+ "resumes": {},
104
+ "agent_coordinator": AgentCoordinator()
105
+ }
106
+
107
+ # Create a single model instance for reuse
108
+ model = SentenceTransformer("all-MiniLM-L6-v2")
109
+
110
+ @app.post("/embed")
111
+ def get_embedding(request: JDRequest):
112
+ """Process a job description and generate its embedding"""
113
+ coordinator = current_session["agent_coordinator"]
114
+ result = coordinator.process_job_description(request.text)
115
+
116
+ # Store in current session
117
+ current_session["jd"] = result
118
+
119
+ # Convert embedding dictionary properly for JSON response
120
+ serializable_embedding = json.loads(
121
+ json.dumps(result["embedding"], cls=NumpyEncoder)
122
+ )
123
+
124
+ response_data = {
125
+ "title": result["title"],
126
+ "embedding": serializable_embedding,
127
+ "sections": result["sections"],
128
+ "summary": result.get("summary", "")
129
+ }
130
+
131
+ return response_data
132
+
133
+ @app.post("/upload-resumes")
134
+ async def upload_resumes(files: List[UploadFile] = File(...)):
135
+ """Process multiple resume PDFs and generate embeddings for each"""
136
+ if not files:
137
+ raise HTTPException(status_code=400, detail="No files provided")
138
+
139
+ # Create temp directory for saving uploaded files
140
+ with tempfile.TemporaryDirectory() as temp_dir:
141
+ resume_results = {}
142
+
143
+ # First save all files to disk to avoid keeping file handles open too long
144
+ file_paths = []
145
+ for file in files:
146
+ file_path = os.path.join(temp_dir, file.filename)
147
+ with open(file_path, "wb") as buffer:
148
+ shutil.copyfileobj(file.file, buffer)
149
+ file_paths.append((file.filename, file_path))
150
+
151
+ # Process files in batches to avoid memory issues
152
+ batch_size = 3
153
+ for i in range(0, len(file_paths), batch_size):
154
+ batch = file_paths[i:i+batch_size]
155
+ batch_tasks = []
156
+
157
+ for filename, file_path in batch:
158
+ batch_tasks.append(process_resume(filename, file_path))
159
+
160
+ # Process each batch concurrently
161
+ batch_results = await asyncio.gather(*batch_tasks)
162
+
163
+ # Combine results
164
+ for filename, result in batch_results:
165
+ resume_results[filename] = result
166
+ # Add to current session
167
+ if "error" not in result:
168
+ current_session["resumes"][filename] = result
169
+
170
+ # Convert NumPy arrays to lists for JSON response
171
+ serializable_results = json.loads(
172
+ json.dumps(resume_results, cls=NumpyEncoder)
173
+ )
174
+
175
+ return JSONResponse(content=serializable_results)
176
+
177
+ async def process_resume(filename, file_path):
178
+ """Process a single resume PDF file"""
179
+ try:
180
+ coordinator = current_session["agent_coordinator"]
181
+ result = coordinator.cv_agent.process_cv(file_path, filename)
182
+ return filename, result
183
+
184
+ except Exception as e:
185
+ print(f"Error processing {filename}: {str(e)}")
186
+ return filename, {"error": str(e)}
187
+
188
+ @app.post("/match")
189
+ def match_resumes():
190
+ """Match the current JD with all processed resumes"""
191
+ jd = current_session["jd"]
192
+ resumes = current_session["resumes"]
193
+
194
+ if not jd or not resumes:
195
+ raise HTTPException(status_code=400, detail="Job description or resumes missing")
196
+
197
+ jd_title = jd["title"]
198
+ jd_embeddings = jd["embedding"]
199
+
200
+ # Match all resumes
201
+ all_candidates = match_all_resumes(jd_title, jd_embeddings, resumes, threshold=0.8)
202
+
203
+ # Save all candidates to the database
204
+ conn = sqlite3.connect("recruitly.db")
205
+ cursor = conn.cursor()
206
+ for candidate in all_candidates:
207
+ cursor.execute("""
208
+ INSERT INTO matches (resume_id, jd_id, score, reasoning)
209
+ VALUES (?, ?, ?, ?)
210
+ """, (candidate.get("resume_id"), jd.get("id"), candidate["score"], json.dumps(candidate["reasoning"])))
211
+ conn.commit()
212
+ conn.close()
213
+
214
+ # Include all candidates in the response
215
+ return {"candidates": all_candidates}
216
+
217
+ @app.post("/generate-interview-slots")
218
+ def generate_interview_slots():
219
+ """Generate potential interview time slots"""
220
+ if not current_session["agent_coordinator"]:
221
+ raise HTTPException(status_code=400, detail="Agent coordinator not initialized")
222
+
223
+ slots = current_session["agent_coordinator"].scheduler_agent.generate_interview_slots()
224
+
225
+ return {"slots": slots}
226
+
227
+ @app.post("/prepare-interview-email/{candidate_id}")
228
+ def prepare_interview_email(candidate_id: str):
229
+ """Prepare an interview email for a specific candidate"""
230
+ if not current_session["jd"]:
231
+ raise HTTPException(status_code=400, detail="No job description processed")
232
+
233
+ # Find the candidate in the matches
234
+ matched_candidates = []
235
+ if "matches" in current_session:
236
+ matched_candidates = current_session["matches"]["matches"]
237
+
238
+ candidate = None
239
+ for match in matched_candidates:
240
+ if match["name"] == candidate_id or str(match.get("id", "")) == candidate_id:
241
+ candidate = match
242
+ break
243
+
244
+ if not candidate:
245
+ raise HTTPException(status_code=404, detail=f"Candidate {candidate_id} not found")
246
+
247
+ # Generate email content
248
+ email_data = current_session["agent_coordinator"].scheduler_agent.prepare_email_for_candidate(
249
+ candidate,
250
+ current_session["jd"]["title"]
251
+ )
252
+
253
+ return email_data
254
+
255
+ @app.post("/send-email")
256
+ def send_candidate_email(request: EmailRequest):
257
+ """Send an email to a candidate"""
258
+ try:
259
+ result = send_email(
260
+ to_email=request.email,
261
+ subject=request.subject,
262
+ body=request.body
263
+ )
264
+
265
+ if result["success"]:
266
+ return {"success": True, "message": f"Email sent to {request.name}"}
267
+ else:
268
+ raise HTTPException(status_code=500, detail=result["message"])
269
+ except Exception as e:
270
+ raise HTTPException(status_code=500, detail=str(e))
271
+
272
+ @app.get("/suggest-interview-times/{candidate_id}")
273
+ def suggest_interview_times(candidate_id: str):
274
+ """Suggest available interview time slots for a candidate"""
275
+ coordinator = current_session["agent_coordinator"]
276
+ slots = coordinator.scheduler_agent.generate_interview_slots(days_ahead=7, slots_per_day=3)
277
+
278
+ return {"candidate_id": candidate_id, "slots": slots}
279
+
280
+ # Helper function to extract name from parsed resume
281
+ def _extract_name(parsed, fallback):
282
+ if "name" in parsed and parsed["name"] and len(parsed["name"]) > 0:
283
+ return parsed["name"][0]
284
+ return Path(fallback).stem
285
+
286
+ @app.get("/clear-session")
287
+ def clear_session():
288
+ """Clear the current session data"""
289
+ current_session["jd"] = None
290
+ current_session["resumes"] = {}
291
+ return {"message": "Session cleared"}
292
+
293
+ @app.get("/test-match")
294
+ def test_match():
295
+ """Test endpoint to diagnose matching issues"""
296
+ test_jd = """We are seeking an innovative and strategic Product Manager to lead the development and execution of new products. The ideal candidate will collaborate with cross-functional teams to define product roadmaps, analyze market trends, and ensure successful product launches. Responsibilities: Define product vision and strategy based on market research and customer needs. Work closely with engineering, design, and marketing teams to develop and launch products. Prioritize features, create roadmaps, and manage product lifecycle. Analyze user feedback and data to optimize product performance. Ensure alignment between business goals and product development. Qualifications: Bachelor's degree in Business, Computer Science, or a related field. Experience in product management, agile methodologies, and market research. Strong analytical, leadership, and communication skills. Familiarity with project management tools and data-driven decision-making."""
297
+
298
+ # Process the test JD
299
+ title, embedding = generate_jd_embedding(test_jd)
300
+ sections = extract_sections(test_jd)
301
+
302
+ # Create a simple test resume with matching sections
303
+ test_resume = {
304
+ "skills": ["Product management", "Agile methodologies", "Leadership"],
305
+ "experience": ["5 years experience in product management", "Led cross-functional teams"],
306
+ "education": ["Bachelor's degree in Computer Science"],
307
+ "qualifications": ["Strong analytical skills", "Communication skills"]
308
+ }
309
+
310
+ # Run the matcher with debug output
311
+ score, reasoning = calculate_match_score(sections, test_resume)
312
+
313
+ return {
314
+ "jd_sections": sections,
315
+ "resume_sections": test_resume,
316
+ "score": score,
317
+ "reasoning": reasoning
318
+ }
319
+
320
+ # Run the app locally
321
+ if __name__ == "__main__":
322
+ import uvicorn
323
+ uvicorn.run(app, host="127.0.0.1", port=8000)
324
+
325
+