Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,643 +1,680 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import re
|
4 |
-
import time
|
5 |
-
from typing import Dict, Any, List, Optional, Annotated
|
6 |
-
from chatbot_model import (
|
7 |
-
UserMemory,
|
8 |
-
ChatbotState,
|
9 |
-
ProfileAnalysisModel,
|
10 |
-
JobFitModel,
|
11 |
-
ContentGenerationModel,
|
12 |
-
|
13 |
-
)
|
14 |
-
from llm_utils import call_llm_and_parse
|
15 |
-
from profile_preprocessing import (
|
16 |
-
preprocess_profile,
|
17 |
-
initialize_state,
|
18 |
-
normalize_url
|
19 |
-
)
|
20 |
-
from openai import OpenAI
|
21 |
-
import streamlit as st
|
22 |
-
import hashlib
|
23 |
-
from dotenv import load_dotenv
|
24 |
-
from pydantic import BaseModel, Field,ValidationError
|
25 |
-
# import pdb; pdb.set_trace()
|
26 |
-
from scraping_profile import scrape_linkedin_profile
|
27 |
-
from langchain_openai import ChatOpenAI
|
28 |
-
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage,BaseMessage,ToolMessage
|
29 |
-
from langchain_core.tools import tool
|
30 |
-
from langgraph.graph import StateGraph, END,START
|
31 |
-
from langgraph.checkpoint.memory import MemorySaver
|
32 |
-
from langgraph.graph import add_messages # if your framework exposes this
|
33 |
-
from langgraph.prebuilt import ToolNode,tools_condition,InjectedState
|
34 |
-
import dirtyjson
|
35 |
-
import sqlite3
|
36 |
-
try:
|
37 |
-
from langgraph.checkpoint.sqlite import SqliteSaver
|
38 |
-
SQLITE_AVAILABLE = True
|
39 |
-
except ImportError:
|
40 |
-
SQLITE_AVAILABLE = False
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
# ========== 1. ENVIRONMENT & LLM SETUP ==========
|
45 |
-
load_dotenv()
|
46 |
-
groq_key = os.getenv("GROQ_API_KEY")
|
47 |
-
assert groq_key, "GROQ_API_KEY not found in environment!"
|
48 |
-
groq_client=OpenAI(
|
49 |
-
api_key=os.getenv("GROQ_API_KEY"),
|
50 |
-
base_url="https://api.groq.com/openai/v1"
|
51 |
-
)
|
52 |
-
|
53 |
-
def normalize_url(url):
|
54 |
-
return url.strip().rstrip('/')
|
55 |
-
|
56 |
-
def validate_state(state: dict) -> None:
|
57 |
-
"""
|
58 |
-
Validate given state dict against ChatbotState schema.
|
59 |
-
Displays result in Streamlit instead of printing.
|
60 |
-
"""
|
61 |
-
# st.write("=== Validating chatbot state ===")
|
62 |
-
try:
|
63 |
-
ChatbotState.model_validate(state)
|
64 |
-
# st.success("β
State is valid!")
|
65 |
-
except ValidationError as e:
|
66 |
-
st.error("β Validation failed!")
|
67 |
-
errors_list = []
|
68 |
-
for error in e.errors():
|
69 |
-
loc = " β ".join(str(item) for item in error['loc'])
|
70 |
-
msg = error['msg']
|
71 |
-
errors_list.append(f"- At: {loc}\n Error: {msg}")
|
72 |
-
st.write("\n".join(errors_list))
|
73 |
-
# Optionally show raw validation error too:
|
74 |
-
st.expander("See raw validation error").write(str(e))
|
75 |
-
st.stop()
|
76 |
-
|
77 |
-
|
78 |
-
user_memory = UserMemory()
|
79 |
-
|
80 |
-
# ========== 7. AGENT FUNCTIONS ==========
|
81 |
-
|
82 |
-
def profile_analysis_prompt(profile: Dict[str, str]) -> str:
|
83 |
-
return f"""
|
84 |
-
You are a top-tier LinkedIn career coach and AI analyst.
|
85 |
-
|
86 |
-
Analyze the following candidate profile carefully.
|
87 |
-
|
88 |
-
Candidate profile data:
|
89 |
-
FullName: {profile.get("FullName", "")}
|
90 |
-
Headline: {profile.get("Headline", "")}
|
91 |
-
JobTitle: {profile.get("JobTitle", "")}
|
92 |
-
CompanyName: {profile.get("CompanyName", "")}
|
93 |
-
CompanyIndustry: {profile.get("CompanyIndustry", "")}
|
94 |
-
CurrentJobDuration: {profile.get("CurrentJobDuration", "")}
|
95 |
-
About: {profile.get("About", "")}
|
96 |
-
Experiences: {profile.get("Experiences", "")}
|
97 |
-
Skills: {profile.get("Skills", "")}
|
98 |
-
Educations: {profile.get("Educations", "")}
|
99 |
-
Certifications: {profile.get("Certifications", "")}
|
100 |
-
HonorsAndAwards: {profile.get("HonorsAndAwards", "")}
|
101 |
-
Verifications: {profile.get("Verifications", "")}
|
102 |
-
Highlights: {profile.get("Highlights", "")}
|
103 |
-
Projects: {profile.get("Projects", "")}
|
104 |
-
Publications: {profile.get("Publications", "")}
|
105 |
-
Patents: {profile.get("Patents", "")}
|
106 |
-
Courses: {profile.get("Courses", "")}
|
107 |
-
TestScores: {profile.get("TestScores", "")}
|
108 |
-
|
109 |
-
|
110 |
-
Identify and summarize:
|
111 |
-
1. strengths:
|
112 |
-
- technical strengths (skills, tools, frameworks)
|
113 |
-
- project strengths (impactful projects, innovation)
|
114 |
-
- educational strengths (degrees, certifications, awards)
|
115 |
-
- soft skills and personality traits (teamwork, leadership)
|
116 |
-
2. weaknesses:
|
117 |
-
- missing or weak technical skills
|
118 |
-
- gaps in projects, experience, or education
|
119 |
-
- unclear profile sections or missing context
|
120 |
-
3. actionable suggestions:
|
121 |
-
- concrete ways to improve profile headline, about section, or add projects
|
122 |
-
- suggestions to learn or highlight new skills
|
123 |
-
- ideas to make the profile more attractive for recruiters
|
124 |
-
|
125 |
-
Important instructions:
|
126 |
-
- Respond ONLY with valid JSON.
|
127 |
-
- Do NOT include text before or after JSON.
|
128 |
-
- Be concise but detailed.
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
Example JSON format:
|
133 |
-
{{
|
134 |
-
"strengths": {{
|
135 |
-
"technical": ["...", "..."],
|
136 |
-
"projects": ["...", "..."],
|
137 |
-
"education": ["...", "..."],
|
138 |
-
"soft_skills": ["...", "..."]
|
139 |
-
}},
|
140 |
-
"weaknesses": {{
|
141 |
-
"technical_gaps": ["...", "..."],
|
142 |
-
"project_or_experience_gaps": ["...", "..."],
|
143 |
-
"missing_context": ["...", "..."]
|
144 |
-
}},
|
145 |
-
"suggestions": [
|
146 |
-
"...",
|
147 |
-
"...",
|
148 |
-
"..."
|
149 |
-
]
|
150 |
-
}}
|
151 |
-
""".strip()
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
def job_fit_prompt(sections: Dict[str, str], target_role: str) -> str:
|
157 |
-
return f"""
|
158 |
-
You are an expert career coach and recruiter.
|
159 |
-
|
160 |
-
Compare the following candidate profile against the typical requirements for the role of "{target_role}".
|
161 |
-
|
162 |
-
Candidate Profile:
|
163 |
-
- Headline: {sections.get('headline', '')}
|
164 |
-
- About: {sections.get('about', '')}
|
165 |
-
- Job Title: {sections.get('job_title', '')}
|
166 |
-
- Company: {sections.get('company_name', '')}
|
167 |
-
- Industry: {sections.get('company_industry', '')}
|
168 |
-
- Current Job Duration: {sections.get('current_job_duration', '')}
|
169 |
-
- Skills: {sections.get('skills', '')}
|
170 |
-
- Projects: {sections.get('projects', '')}
|
171 |
-
- Educations: {sections.get('educations', '')}
|
172 |
-
- Certifications: {sections.get('certifications', '')}
|
173 |
-
- Honors & Awards: {sections.get('honors_and_awards', '')}
|
174 |
-
- Experiences: {sections.get('experiences', '')}
|
175 |
-
|
176 |
-
**Instructions:**
|
177 |
-
- Respond ONLY with valid JSON.
|
178 |
-
- Your JSON must exactly match the following schema:
|
179 |
-
{{
|
180 |
-
"match_score": 85,
|
181 |
-
"missing_skills": ["Skill1", "Skill2"],
|
182 |
-
"suggestions": ["...", "...", "..."]
|
183 |
-
}}
|
184 |
-
- "match_score": integer from 0β100 estimating how well the profile fits the target role.
|
185 |
-
- "missing_skills": key missing or weakly mentioned skills.
|
186 |
-
- "suggestions": 3 actionable recommendations to improve fit (e.g., learn tools, rewrite headline).
|
187 |
-
|
188 |
-
Do NOT include explanations, text outside JSON, or markdown.
|
189 |
-
Start with '{{' and end with '}}'.
|
190 |
-
The JSON must be directly parseable.
|
191 |
-
""".strip()
|
192 |
-
|
193 |
-
|
194 |
-
# --- Tool: Profile Analyzer ---
|
195 |
-
@tool
|
196 |
-
def profile_analyzer(state: Annotated[ChatbotState, InjectedState]) -> dict:
|
197 |
-
"""
|
198 |
-
Tool: Analyze the overall full user's profile to give strengths, weaknesses, suggestions.
|
199 |
-
This is needed only if full analysis of profile is needed.
|
200 |
-
Returns the full analysis in the form of a json.
|
201 |
-
|
202 |
-
- It takes no arguments
|
203 |
-
"""
|
204 |
-
|
205 |
-
|
206 |
-
# Get summarized profile (dictionary of strings)
|
207 |
-
profile = getattr(state, "profile", {}) or {}
|
208 |
-
|
209 |
-
# Build prompt
|
210 |
-
prompt = profile_analysis_prompt(profile)
|
211 |
-
|
212 |
-
# Call the LLM & parse structured result
|
213 |
-
analysis_model = call_llm_and_parse(groq_client,prompt, ProfileAnalysisModel)
|
214 |
-
analysis_dict = analysis_model.model_dump()
|
215 |
-
|
216 |
-
# Save to state and user memory
|
217 |
-
state.profile_analysis = analysis_dict
|
218 |
-
user_memory.save("profile_analysis", analysis_dict)
|
219 |
-
|
220 |
-
print("πΎ [DEBUG] Saved analysis to user memory.")
|
221 |
-
print("π¦ [DEBUG] Updated state.profile_analysis with analysis.")
|
222 |
-
|
223 |
-
return analysis_dict
|
224 |
-
|
225 |
-
# --- Tool: Job Matcher ---
|
226 |
-
|
227 |
-
|
228 |
-
@tool
|
229 |
-
def job_matcher(
|
230 |
-
state: Annotated[ChatbotState, InjectedState],
|
231 |
-
target_role: str = None
|
232 |
-
) -> dict:
|
233 |
-
"""
|
234 |
-
Tool: Analyze how well the user's profile fits the target role.
|
235 |
-
- If user is asking if he is a good fit for a certain role, or needs to see if his profile is compatible with a certain role, call this.
|
236 |
-
- Takes target_role as an argument.
|
237 |
-
- this tool is needed when match score, missing skills, suggestions are needed based on a job name given.
|
238 |
-
"""
|
239 |
-
print(f"target role is {target_role}")
|
240 |
-
# Update state.target_role if provided
|
241 |
-
|
242 |
-
sections = getattr(state, "sections", {})
|
243 |
-
|
244 |
-
# Build prompt
|
245 |
-
prompt = job_fit_prompt(sections, target_role)
|
246 |
-
|
247 |
-
# Call LLM and parse
|
248 |
-
try:
|
249 |
-
job_fit_model = call_llm_and_parse(groq_client,prompt, JobFitModel)
|
250 |
-
job_fit_dict = job_fit_model.model_dump()
|
251 |
-
job_fit_dict["target_role"] = target_role
|
252 |
-
except Exception as e:
|
253 |
-
job_fit_dict = {
|
254 |
-
"target_role":target_role,
|
255 |
-
"match_score": 0,
|
256 |
-
"missing_skills": [],
|
257 |
-
"suggestions": ["Parsing failed or incomplete response."]
|
258 |
-
}
|
259 |
-
|
260 |
-
# Save to state and user memory
|
261 |
-
state.job_fit = job_fit_dict
|
262 |
-
user_memory.save("job_fit", job_fit_dict)
|
263 |
-
|
264 |
-
return job_fit_dict
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
@tool
|
272 |
-
def extract_from_state_tool(
|
273 |
-
state: Annotated[ChatbotState, InjectedState],
|
274 |
-
key: str
|
275 |
-
) -> dict:
|
276 |
-
"""
|
277 |
-
This tool is used if user wants to ask about any particular part of this profile. Use this if a singe section is targeted. It expects key as an arguement, that represents what
|
278 |
-
the user is wanting to look at, from his profile.
|
279 |
-
Argument:
|
280 |
-
key: only pass one from the below list, identify one thing the user wants to look into and choose that:
|
281 |
-
"sections.about", "sections.headline", "sections.skills", "sections.projects",
|
282 |
-
"sections.educations", "sections.certifications", "sections.honors_and_awards",
|
283 |
-
"sections.experiences", "sections.publications", "sections.patents",
|
284 |
-
"sections.courses", "sections.test_scores", "sections.verifications",
|
285 |
-
"sections.highlights", "sections.job_title", "sections.company_name",
|
286 |
-
"sections.company_industry", "sections.current_job_duration", "sections.full_name",
|
287 |
-
"enhanced_content,"profile_analysis", "job_fit", "target_role", "editing_section"
|
288 |
-
"""
|
289 |
-
value = state
|
290 |
-
try:
|
291 |
-
for part in key.split('.'):
|
292 |
-
# Support both dict and Pydantic model
|
293 |
-
if isinstance(value, dict):
|
294 |
-
value = value.get(part)
|
295 |
-
elif hasattr(value, part):
|
296 |
-
value = getattr(value, part)
|
297 |
-
else:
|
298 |
-
value = None
|
299 |
-
if value is None:
|
300 |
-
break
|
301 |
-
except Exception:
|
302 |
-
value = None
|
303 |
-
return {"result": value}
|
304 |
-
|
305 |
-
|
306 |
-
tools = [
|
307 |
-
profile_analyzer,
|
308 |
-
job_matcher,
|
309 |
-
extract_from_state_tool
|
310 |
-
]
|
311 |
-
llm = ChatOpenAI(
|
312 |
-
api_key=groq_key,
|
313 |
-
base_url="https://api.groq.com/openai/v1",
|
314 |
-
model="llama3-8b-8192",
|
315 |
-
temperature=0
|
316 |
-
)
|
317 |
-
llm_with_tools = llm.bind_tools(tools)
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
# ========== 8. LANGGRAPH PIPELINE ==========
|
322 |
-
|
323 |
-
|
324 |
-
def chatbot_node(state: ChatbotState) -> ChatbotState:
|
325 |
-
validate_state(state)
|
326 |
-
|
327 |
-
messages = state.get("messages", [])
|
328 |
-
|
329 |
-
system_prompt = """
|
330 |
-
You are a helpful AI assistant specialized in LinkedIn profile coaching.
|
331 |
-
|
332 |
-
|
333 |
-
-
|
334 |
-
-
|
335 |
-
-
|
336 |
-
|
337 |
-
|
338 |
-
-
|
339 |
-
- If user asks to
|
340 |
-
- If
|
341 |
-
-
|
342 |
-
-
|
343 |
-
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
if
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
st.
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
st.
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
st.
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
492 |
-
|
493 |
-
|
494 |
-
|
495 |
-
|
496 |
-
.
|
497 |
-
|
498 |
-
.
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
|
520 |
-
|
521 |
-
|
522 |
-
|
523 |
-
|
524 |
-
|
525 |
-
|
526 |
-
|
527 |
-
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
|
541 |
-
|
542 |
-
|
543 |
-
|
544 |
-
|
545 |
-
|
546 |
-
|
547 |
-
|
548 |
-
|
549 |
-
|
550 |
-
|
551 |
-
|
552 |
-
|
553 |
-
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
|
558 |
-
|
559 |
-
|
560 |
-
|
561 |
-
|
562 |
-
|
563 |
-
|
564 |
-
|
565 |
-
|
566 |
-
|
567 |
-
|
568 |
-
|
569 |
-
|
570 |
-
|
571 |
-
|
572 |
-
|
573 |
-
|
574 |
-
|
575 |
-
|
576 |
-
|
577 |
-
|
578 |
-
|
579 |
-
|
580 |
-
|
581 |
-
|
582 |
-
|
583 |
-
|
584 |
-
|
585 |
-
|
586 |
-
|
587 |
-
|
588 |
-
|
589 |
-
|
590 |
-
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
"""
|
602 |
-
|
603 |
-
|
604 |
-
|
605 |
-
|
606 |
-
|
607 |
-
|
608 |
-
|
609 |
-
|
610 |
-
|
611 |
-
|
612 |
-
|
613 |
-
|
614 |
-
""
|
615 |
-
|
616 |
-
|
617 |
-
|
618 |
-
|
619 |
-
|
620 |
-
|
621 |
-
|
622 |
-
|
623 |
-
|
624 |
-
|
625 |
-
|
626 |
-
|
627 |
-
|
628 |
-
|
629 |
-
|
630 |
-
|
631 |
-
|
632 |
-
|
633 |
-
|
634 |
-
|
635 |
-
|
636 |
-
|
637 |
-
|
638 |
-
|
639 |
-
|
640 |
-
|
641 |
-
|
642 |
-
|
643 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import re
|
4 |
+
import time
|
5 |
+
from typing import Dict, Any, List, Optional, Annotated
|
6 |
+
from chatbot_model import (
|
7 |
+
UserMemory,
|
8 |
+
ChatbotState,
|
9 |
+
ProfileAnalysisModel,
|
10 |
+
JobFitModel,
|
11 |
+
ContentGenerationModel,
|
12 |
+
|
13 |
+
)
|
14 |
+
from llm_utils import call_llm_and_parse
|
15 |
+
from profile_preprocessing import (
|
16 |
+
preprocess_profile,
|
17 |
+
initialize_state,
|
18 |
+
normalize_url
|
19 |
+
)
|
20 |
+
from openai import OpenAI
|
21 |
+
import streamlit as st
|
22 |
+
import hashlib
|
23 |
+
from dotenv import load_dotenv
|
24 |
+
from pydantic import BaseModel, Field,ValidationError
|
25 |
+
# import pdb; pdb.set_trace()
|
26 |
+
from scraping_profile import scrape_linkedin_profile
|
27 |
+
from langchain_openai import ChatOpenAI
|
28 |
+
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage,BaseMessage,ToolMessage
|
29 |
+
from langchain_core.tools import tool
|
30 |
+
from langgraph.graph import StateGraph, END,START
|
31 |
+
from langgraph.checkpoint.memory import MemorySaver
|
32 |
+
from langgraph.graph import add_messages # if your framework exposes this
|
33 |
+
from langgraph.prebuilt import ToolNode,tools_condition,InjectedState
|
34 |
+
import dirtyjson
|
35 |
+
import sqlite3
|
36 |
+
try:
|
37 |
+
from langgraph.checkpoint.sqlite import SqliteSaver
|
38 |
+
SQLITE_AVAILABLE = True
|
39 |
+
except ImportError:
|
40 |
+
SQLITE_AVAILABLE = False
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
# ========== 1. ENVIRONMENT & LLM SETUP ==========
|
45 |
+
load_dotenv()
|
46 |
+
groq_key = os.getenv("GROQ_API_KEY")
|
47 |
+
assert groq_key, "GROQ_API_KEY not found in environment!"
|
48 |
+
groq_client=OpenAI(
|
49 |
+
api_key=os.getenv("GROQ_API_KEY"),
|
50 |
+
base_url="https://api.groq.com/openai/v1"
|
51 |
+
)
|
52 |
+
|
53 |
+
def normalize_url(url):
|
54 |
+
return url.strip().rstrip('/')
|
55 |
+
|
56 |
+
def validate_state(state: dict) -> None:
|
57 |
+
"""
|
58 |
+
Validate given state dict against ChatbotState schema.
|
59 |
+
Displays result in Streamlit instead of printing.
|
60 |
+
"""
|
61 |
+
# st.write("=== Validating chatbot state ===")
|
62 |
+
try:
|
63 |
+
ChatbotState.model_validate(state)
|
64 |
+
# st.success("β
State is valid!")
|
65 |
+
except ValidationError as e:
|
66 |
+
st.error("β Validation failed!")
|
67 |
+
errors_list = []
|
68 |
+
for error in e.errors():
|
69 |
+
loc = " β ".join(str(item) for item in error['loc'])
|
70 |
+
msg = error['msg']
|
71 |
+
errors_list.append(f"- At: {loc}\n Error: {msg}")
|
72 |
+
st.write("\n".join(errors_list))
|
73 |
+
# Optionally show raw validation error too:
|
74 |
+
st.expander("See raw validation error").write(str(e))
|
75 |
+
st.stop()
|
76 |
+
|
77 |
+
|
78 |
+
user_memory = UserMemory()
|
79 |
+
|
80 |
+
# ========== 7. AGENT FUNCTIONS ==========
|
81 |
+
|
82 |
+
def profile_analysis_prompt(profile: Dict[str, str]) -> str:
|
83 |
+
return f"""
|
84 |
+
You are a top-tier LinkedIn career coach and AI analyst.
|
85 |
+
|
86 |
+
Analyze the following candidate profile carefully.
|
87 |
+
|
88 |
+
Candidate profile data:
|
89 |
+
FullName: {profile.get("FullName", "")}
|
90 |
+
Headline: {profile.get("Headline", "")}
|
91 |
+
JobTitle: {profile.get("JobTitle", "")}
|
92 |
+
CompanyName: {profile.get("CompanyName", "")}
|
93 |
+
CompanyIndustry: {profile.get("CompanyIndustry", "")}
|
94 |
+
CurrentJobDuration: {profile.get("CurrentJobDuration", "")}
|
95 |
+
About: {profile.get("About", "")}
|
96 |
+
Experiences: {profile.get("Experiences", "")}
|
97 |
+
Skills: {profile.get("Skills", "")}
|
98 |
+
Educations: {profile.get("Educations", "")}
|
99 |
+
Certifications: {profile.get("Certifications", "")}
|
100 |
+
HonorsAndAwards: {profile.get("HonorsAndAwards", "")}
|
101 |
+
Verifications: {profile.get("Verifications", "")}
|
102 |
+
Highlights: {profile.get("Highlights", "")}
|
103 |
+
Projects: {profile.get("Projects", "")}
|
104 |
+
Publications: {profile.get("Publications", "")}
|
105 |
+
Patents: {profile.get("Patents", "")}
|
106 |
+
Courses: {profile.get("Courses", "")}
|
107 |
+
TestScores: {profile.get("TestScores", "")}
|
108 |
+
|
109 |
+
|
110 |
+
Identify and summarize:
|
111 |
+
1. strengths:
|
112 |
+
- technical strengths (skills, tools, frameworks)
|
113 |
+
- project strengths (impactful projects, innovation)
|
114 |
+
- educational strengths (degrees, certifications, awards)
|
115 |
+
- soft skills and personality traits (teamwork, leadership)
|
116 |
+
2. weaknesses:
|
117 |
+
- missing or weak technical skills
|
118 |
+
- gaps in projects, experience, or education
|
119 |
+
- unclear profile sections or missing context
|
120 |
+
3. actionable suggestions:
|
121 |
+
- concrete ways to improve profile headline, about section, or add projects
|
122 |
+
- suggestions to learn or highlight new skills
|
123 |
+
- ideas to make the profile more attractive for recruiters
|
124 |
+
|
125 |
+
Important instructions:
|
126 |
+
- Respond ONLY with valid JSON.
|
127 |
+
- Do NOT include text before or after JSON.
|
128 |
+
- Be concise but detailed.
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
Example JSON format:
|
133 |
+
{{
|
134 |
+
"strengths": {{
|
135 |
+
"technical": ["...", "..."],
|
136 |
+
"projects": ["...", "..."],
|
137 |
+
"education": ["...", "..."],
|
138 |
+
"soft_skills": ["...", "..."]
|
139 |
+
}},
|
140 |
+
"weaknesses": {{
|
141 |
+
"technical_gaps": ["...", "..."],
|
142 |
+
"project_or_experience_gaps": ["...", "..."],
|
143 |
+
"missing_context": ["...", "..."]
|
144 |
+
}},
|
145 |
+
"suggestions": [
|
146 |
+
"...",
|
147 |
+
"...",
|
148 |
+
"..."
|
149 |
+
]
|
150 |
+
}}
|
151 |
+
""".strip()
|
152 |
+
|
153 |
+
|
154 |
+
|
155 |
+
|
156 |
+
def job_fit_prompt(sections: Dict[str, str], target_role: str) -> str:
|
157 |
+
return f"""
|
158 |
+
You are an expert career coach and recruiter.
|
159 |
+
|
160 |
+
Compare the following candidate profile against the typical requirements for the role of "{target_role}".
|
161 |
+
|
162 |
+
Candidate Profile:
|
163 |
+
- Headline: {sections.get('headline', '')}
|
164 |
+
- About: {sections.get('about', '')}
|
165 |
+
- Job Title: {sections.get('job_title', '')}
|
166 |
+
- Company: {sections.get('company_name', '')}
|
167 |
+
- Industry: {sections.get('company_industry', '')}
|
168 |
+
- Current Job Duration: {sections.get('current_job_duration', '')}
|
169 |
+
- Skills: {sections.get('skills', '')}
|
170 |
+
- Projects: {sections.get('projects', '')}
|
171 |
+
- Educations: {sections.get('educations', '')}
|
172 |
+
- Certifications: {sections.get('certifications', '')}
|
173 |
+
- Honors & Awards: {sections.get('honors_and_awards', '')}
|
174 |
+
- Experiences: {sections.get('experiences', '')}
|
175 |
+
|
176 |
+
**Instructions:**
|
177 |
+
- Respond ONLY with valid JSON.
|
178 |
+
- Your JSON must exactly match the following schema:
|
179 |
+
{{
|
180 |
+
"match_score": 85,
|
181 |
+
"missing_skills": ["Skill1", "Skill2"],
|
182 |
+
"suggestions": ["...", "...", "..."]
|
183 |
+
}}
|
184 |
+
- "match_score": integer from 0β100 estimating how well the profile fits the target role.
|
185 |
+
- "missing_skills": key missing or weakly mentioned skills.
|
186 |
+
- "suggestions": 3 actionable recommendations to improve fit (e.g., learn tools, rewrite headline).
|
187 |
+
|
188 |
+
Do NOT include explanations, text outside JSON, or markdown.
|
189 |
+
Start with '{{' and end with '}}'.
|
190 |
+
The JSON must be directly parseable.
|
191 |
+
""".strip()
|
192 |
+
|
193 |
+
|
194 |
+
# --- Tool: Profile Analyzer ---
|
195 |
+
@tool
|
196 |
+
def profile_analyzer(state: Annotated[ChatbotState, InjectedState]) -> dict:
|
197 |
+
"""
|
198 |
+
Tool: Analyze the overall full user's profile to give strengths, weaknesses, suggestions.
|
199 |
+
This is needed only if full analysis of profile is needed.
|
200 |
+
Returns the full analysis in the form of a json.
|
201 |
+
|
202 |
+
- It takes no arguments
|
203 |
+
"""
|
204 |
+
|
205 |
+
|
206 |
+
# Get summarized profile (dictionary of strings)
|
207 |
+
profile = getattr(state, "profile", {}) or {}
|
208 |
+
|
209 |
+
# Build prompt
|
210 |
+
prompt = profile_analysis_prompt(profile)
|
211 |
+
|
212 |
+
# Call the LLM & parse structured result
|
213 |
+
analysis_model = call_llm_and_parse(groq_client,prompt, ProfileAnalysisModel)
|
214 |
+
analysis_dict = analysis_model.model_dump()
|
215 |
+
|
216 |
+
# Save to state and user memory
|
217 |
+
state.profile_analysis = analysis_dict
|
218 |
+
user_memory.save("profile_analysis", analysis_dict)
|
219 |
+
|
220 |
+
print("πΎ [DEBUG] Saved analysis to user memory.")
|
221 |
+
print("π¦ [DEBUG] Updated state.profile_analysis with analysis.")
|
222 |
+
|
223 |
+
return analysis_dict
|
224 |
+
|
225 |
+
# --- Tool: Job Matcher ---
|
226 |
+
|
227 |
+
|
228 |
+
@tool
|
229 |
+
def job_matcher(
|
230 |
+
state: Annotated[ChatbotState, InjectedState],
|
231 |
+
target_role: str = None
|
232 |
+
) -> dict:
|
233 |
+
"""
|
234 |
+
Tool: Analyze how well the user's profile fits the target role.
|
235 |
+
- If user is asking if he is a good fit for a certain role, or needs to see if his profile is compatible with a certain role, call this.
|
236 |
+
- Takes target_role as an argument.
|
237 |
+
- this tool is needed when match score, missing skills, suggestions are needed based on a job name given.
|
238 |
+
"""
|
239 |
+
print(f"target role is {target_role}")
|
240 |
+
# Update state.target_role if provided
|
241 |
+
|
242 |
+
sections = getattr(state, "sections", {})
|
243 |
+
|
244 |
+
# Build prompt
|
245 |
+
prompt = job_fit_prompt(sections, target_role)
|
246 |
+
|
247 |
+
# Call LLM and parse
|
248 |
+
try:
|
249 |
+
job_fit_model = call_llm_and_parse(groq_client,prompt, JobFitModel)
|
250 |
+
job_fit_dict = job_fit_model.model_dump()
|
251 |
+
job_fit_dict["target_role"] = target_role
|
252 |
+
except Exception as e:
|
253 |
+
job_fit_dict = {
|
254 |
+
"target_role":target_role,
|
255 |
+
"match_score": 0,
|
256 |
+
"missing_skills": [],
|
257 |
+
"suggestions": ["Parsing failed or incomplete response."]
|
258 |
+
}
|
259 |
+
|
260 |
+
# Save to state and user memory
|
261 |
+
state.job_fit = job_fit_dict
|
262 |
+
user_memory.save("job_fit", job_fit_dict)
|
263 |
+
|
264 |
+
return job_fit_dict
|
265 |
+
|
266 |
+
|
267 |
+
|
268 |
+
|
269 |
+
|
270 |
+
|
271 |
+
@tool
|
272 |
+
def extract_from_state_tool(
|
273 |
+
state: Annotated[ChatbotState, InjectedState],
|
274 |
+
key: str
|
275 |
+
) -> dict:
|
276 |
+
"""
|
277 |
+
This tool is used if user wants to ask about any particular part of this profile. Use this if a singe section is targeted. It expects key as an arguement, that represents what
|
278 |
+
the user is wanting to look at, from his profile.
|
279 |
+
Argument:
|
280 |
+
key: only pass one from the below list, identify one thing the user wants to look into and choose that:
|
281 |
+
"sections.about", "sections.headline", "sections.skills", "sections.projects",
|
282 |
+
"sections.educations", "sections.certifications", "sections.honors_and_awards",
|
283 |
+
"sections.experiences", "sections.publications", "sections.patents",
|
284 |
+
"sections.courses", "sections.test_scores", "sections.verifications",
|
285 |
+
"sections.highlights", "sections.job_title", "sections.company_name",
|
286 |
+
"sections.company_industry", "sections.current_job_duration", "sections.full_name",
|
287 |
+
"enhanced_content,"profile_analysis", "job_fit", "target_role", "editing_section"
|
288 |
+
"""
|
289 |
+
value = state
|
290 |
+
try:
|
291 |
+
for part in key.split('.'):
|
292 |
+
# Support both dict and Pydantic model
|
293 |
+
if isinstance(value, dict):
|
294 |
+
value = value.get(part)
|
295 |
+
elif hasattr(value, part):
|
296 |
+
value = getattr(value, part)
|
297 |
+
else:
|
298 |
+
value = None
|
299 |
+
if value is None:
|
300 |
+
break
|
301 |
+
except Exception:
|
302 |
+
value = None
|
303 |
+
return {"result": value}
|
304 |
+
|
305 |
+
|
306 |
+
tools = [
|
307 |
+
profile_analyzer,
|
308 |
+
job_matcher,
|
309 |
+
extract_from_state_tool
|
310 |
+
]
|
311 |
+
llm = ChatOpenAI(
|
312 |
+
api_key=groq_key,
|
313 |
+
base_url="https://api.groq.com/openai/v1",
|
314 |
+
model="llama3-8b-8192",
|
315 |
+
temperature=0
|
316 |
+
)
|
317 |
+
llm_with_tools = llm.bind_tools(tools)
|
318 |
+
|
319 |
+
|
320 |
+
|
321 |
+
# ========== 8. LANGGRAPH PIPELINE ==========
|
322 |
+
|
323 |
+
|
324 |
+
def chatbot_node(state: ChatbotState) -> ChatbotState:
|
325 |
+
validate_state(state)
|
326 |
+
|
327 |
+
messages = state.get("messages", [])
|
328 |
+
|
329 |
+
system_prompt = """
|
330 |
+
You are a helpful AI assistant specialized in LinkedIn profile coaching.
|
331 |
+
|
332 |
+
Guidelines:
|
333 |
+
- Greet the user if they greet you, and explain you can help analyze, enhance, and improve their LinkedIn profile.
|
334 |
+
- Prefer using tools instead of answering directly whenever this can give better, data-backed answers.
|
335 |
+
- Call only one tool at a time. Never call multiple tools together.
|
336 |
+
|
337 |
+
When to use tools:
|
338 |
+
- If the user asks to show a section (like About, Projects, etc.): call extract_from_state_tool, unless you already have that section stored.
|
339 |
+
- If the user asks to enhance a section: use extract_from_state_tool first if you donβt already have that section, then enhance it.
|
340 |
+
- If the user requests a full profile analysis: use profile_analyzer.
|
341 |
+
- If the user wants to know how well they fit a target job role: use job_matcher with the given role.
|
342 |
+
- Use tools to check strengths, weaknesses, missing skills, or improvement suggestions.
|
343 |
+
- If the tool was just called recently and info is still fresh, you may answer directly.
|
344 |
+
|
345 |
+
Important:
|
346 |
+
- Never describe or print JSON of a tool call.
|
347 |
+
- Never say "I'm about to call a tool" β just call the tool properly.
|
348 |
+
- Keep answers clear, helpful, and actionable.
|
349 |
+
|
350 |
+
Your goal: help the user see, improve, and analyze their LinkedIn profile.
|
351 |
+
|
352 |
+
"""
|
353 |
+
recent_messages = []
|
354 |
+
for msg in messages[-6:]: # last few, e.g., 6
|
355 |
+
if isinstance(msg, HumanMessage):
|
356 |
+
recent_messages.append({
|
357 |
+
"role": "user",
|
358 |
+
"content": f"User asked: {msg.content}"
|
359 |
+
})
|
360 |
+
elif isinstance(msg, AIMessage):
|
361 |
+
# keep only non-empty AI replies (actual answers)
|
362 |
+
if msg.content.strip():
|
363 |
+
recent_messages.append({
|
364 |
+
"role": "assistant",
|
365 |
+
"content": msg.content
|
366 |
+
})
|
367 |
+
elif isinstance(msg, ToolMessage):
|
368 |
+
recent_messages.append({
|
369 |
+
"role": "assistant",
|
370 |
+
"content": f"[Tool: {msg.name}] {msg.content}"
|
371 |
+
})
|
372 |
+
|
373 |
+
|
374 |
+
# Build messages & invoke LLM
|
375 |
+
messages = [SystemMessage(content=system_prompt)] + recent_messages
|
376 |
+
# messages = [SystemMessage(content=system_prompt)]
|
377 |
+
response = llm_with_tools.invoke(messages)
|
378 |
+
if hasattr(response, "tool_calls") and response.tool_calls:
|
379 |
+
first_tool = response.tool_calls[0]
|
380 |
+
tool_name = first_tool.get("name") if isinstance(first_tool, dict) else getattr(first_tool, "name", None)
|
381 |
+
tool_args = first_tool.get("args") if isinstance(first_tool, dict) else getattr(first_tool, "args", {})
|
382 |
+
print(f"[DEBBBBUUUUGGG] using tool {tool_name}")
|
383 |
+
|
384 |
+
# DEBUG
|
385 |
+
print("[DEBUG] LLM response:", response)
|
386 |
+
state.setdefault("messages", []).append(response)
|
387 |
+
|
388 |
+
return state
|
389 |
+
|
390 |
+
|
391 |
+
|
392 |
+
|
393 |
+
|
394 |
+
# --- Graph definition ---
|
395 |
+
graph = StateGraph(state_schema=ChatbotState)
|
396 |
+
graph.add_node("chatbot", chatbot_node)
|
397 |
+
graph.add_node("tools", ToolNode(tools))
|
398 |
+
graph.add_edge(START, "chatbot")
|
399 |
+
graph.add_conditional_edges("chatbot", tools_condition)
|
400 |
+
graph.add_edge("tools","chatbot")
|
401 |
+
graph.set_entry_point("chatbot")
|
402 |
+
|
403 |
+
# --- Streamlit UI ---
|
404 |
+
st.set_page_config(page_title="πΌ LinkedIn AI Career Assistant", page_icon="π€", layout="wide")
|
405 |
+
st.title("π§βπΌ LinkedIn AI Career Assistant")
|
406 |
+
|
407 |
+
# --- Checkpointer and graph initialization ---
|
408 |
+
if "checkpointer" not in st.session_state:
|
409 |
+
if SQLITE_AVAILABLE:
|
410 |
+
import os
|
411 |
+
print("Current working directory:", os.getcwd())
|
412 |
+
print("Files in working directory:", os.listdir("."))
|
413 |
+
|
414 |
+
conn = sqlite3.connect("checkpoints1.db", check_same_thread=False)
|
415 |
+
st.session_state["checkpointer"] = SqliteSaver(conn)
|
416 |
+
else:
|
417 |
+
st.session_state["checkpointer"] = MemorySaver()
|
418 |
+
checkpointer = st.session_state["checkpointer"]
|
419 |
+
|
420 |
+
if "app_graph" not in st.session_state:
|
421 |
+
st.session_state["app_graph"] = graph.compile(checkpointer=checkpointer)
|
422 |
+
app_graph = st.session_state["app_graph"]
|
423 |
+
# Find or create thread
|
424 |
+
def find_thread_id_for_url(checkpointer, url, max_threads=100):
|
425 |
+
search_url = normalize_url(url)
|
426 |
+
for tid in range(max_threads):
|
427 |
+
config = {"configurable": {"thread_id": str(tid), "checkpoint_ns": ""}}
|
428 |
+
state = checkpointer.get(config)
|
429 |
+
if state and "channel_values" in state:
|
430 |
+
user_state = state["channel_values"]
|
431 |
+
stored_url = normalize_url(user_state.get("profile_url", ""))
|
432 |
+
if stored_url == search_url:
|
433 |
+
return str(tid), user_state
|
434 |
+
return None, None
|
435 |
+
|
436 |
+
def delete_thread_checkpoint(checkpointer, thread_id):
|
437 |
+
# For SqliteSaver, use the delete_thread method if available
|
438 |
+
if hasattr(checkpointer, "delete_thread"):
|
439 |
+
checkpointer.delete_thread(thread_id)
|
440 |
+
else:
|
441 |
+
# For in-memory or custom checkpointers, implement as needed
|
442 |
+
pass
|
443 |
+
|
444 |
+
|
445 |
+
def get_next_thread_id(checkpointer, max_threads=100):
|
446 |
+
used = set()
|
447 |
+
for tid in range(max_threads):
|
448 |
+
config = {"configurable": {"thread_id": str(tid), "checkpoint_ns": ""}}
|
449 |
+
if checkpointer.get(config):
|
450 |
+
used.add(tid)
|
451 |
+
for tid in range(max_threads):
|
452 |
+
if tid not in used:
|
453 |
+
return str(tid)
|
454 |
+
raise RuntimeError("No available thread_id")
|
455 |
+
|
456 |
+
# --- Session selection and state initialization ---
|
457 |
+
|
458 |
+
if "chat_mode" not in st.session_state:
|
459 |
+
profile_url = st.text_input("Profile URL (e.g., https://www.linkedin.com/in/username/)")
|
460 |
+
if not profile_url:
|
461 |
+
st.info("Please enter a valid LinkedIn profile URL above to start.")
|
462 |
+
st.stop()
|
463 |
+
|
464 |
+
valid_pattern = r"^https://www\.linkedin\.com/in/[^/]+/?$"
|
465 |
+
if not re.match(valid_pattern, profile_url.strip()):
|
466 |
+
st.error("β Invalid LinkedIn profile URL. Make sure it matches the format.")
|
467 |
+
st.stop()
|
468 |
+
url = profile_url.strip()
|
469 |
+
|
470 |
+
existing_thread_id, previous_state = find_thread_id_for_url(checkpointer, url)
|
471 |
+
# Defensive: ensure required fields
|
472 |
+
required_fields = ["profile", "sections"]
|
473 |
+
if previous_state and not all(f in previous_state and previous_state[f] for f in required_fields):
|
474 |
+
st.warning("Previous session is missing required data. Please start a new chat.")
|
475 |
+
previous_state = None
|
476 |
+
|
477 |
+
if previous_state:
|
478 |
+
st.info("A previous session found. Choose:")
|
479 |
+
col1, col2 = st.columns(2)
|
480 |
+
if col1.button("Continue previous chat"):
|
481 |
+
st.session_state["chat_mode"] = "continue"
|
482 |
+
st.session_state["thread_id"] = existing_thread_id
|
483 |
+
st.session_state.state = previous_state
|
484 |
+
st.rerun()
|
485 |
+
elif col2.button("Start new chat"):
|
486 |
+
delete_thread_checkpoint(checkpointer, existing_thread_id)
|
487 |
+
with st.spinner("Fetching and processing profile... β³"):
|
488 |
+
raw=scrape_linkedin_profile(url)
|
489 |
+
thread_id = existing_thread_id
|
490 |
+
st.session_state["chat_mode"] = "new"
|
491 |
+
st.session_state["thread_id"] = thread_id
|
492 |
+
st.session_state.state = initialize_state(raw)
|
493 |
+
st.session_state.state["profile_url"] = normalize_url(url)
|
494 |
+
st.session_state.state["messages"] = []
|
495 |
+
st.rerun()
|
496 |
+
st.stop()
|
497 |
+
else:
|
498 |
+
with st.spinner("Fetching and processing profile... β³"):
|
499 |
+
raw=scrape_linkedin_profile(url)
|
500 |
+
thread_id = get_next_thread_id(checkpointer)
|
501 |
+
st.session_state["thread_id"] = thread_id
|
502 |
+
st.session_state["chat_mode"] = "new"
|
503 |
+
st.session_state.state = initialize_state(raw)
|
504 |
+
st.session_state.state["profile_url"] = normalize_url(url)
|
505 |
+
st.session_state.state["messages"] = []
|
506 |
+
st.rerun()
|
507 |
+
|
508 |
+
# --- Main chat UI (only after chat_mode is set) ---
|
509 |
+
state = st.session_state.state
|
510 |
+
thread_id = st.session_state.get("thread_id")
|
511 |
+
|
512 |
+
st.subheader("π¬ Chat with your AI Assistant")
|
513 |
+
messages = state.get("messages", [])
|
514 |
+
chat_container = st.container()
|
515 |
+
|
516 |
+
with chat_container:
|
517 |
+
st.markdown(
|
518 |
+
"""
|
519 |
+
<style>
|
520 |
+
.chat-row { display: flex; width: 100%; margin-bottom: 12px; animation: fadeIn 0.5s; }
|
521 |
+
.chat-row.user { justify-content: flex-end; }
|
522 |
+
.chat-row.ai { justify-content: flex-start; }
|
523 |
+
.chat-bubble { font-family: 'Segoe UI', 'Roboto', 'Arial', sans-serif; font-size: 1.08rem; line-height: 1.65; padding: 14px 22px; border-radius: 20px; min-width: 60px; max-width: 75vw; box-shadow: 0 2px 12px rgba(0,0,0,0.10); word-break: break-word; display: inline-block; position: relative; margin-bottom: 2px; }
|
524 |
+
.bubble-user { background: linear-gradient(90deg, #43e97b 0%, #38f9d7 100%); color: #fff; border-bottom-right-radius: 6px; border-top-right-radius: 22px; text-align: right; box-shadow: 0 4px 16px rgba(67,233,123,0.13); }
|
525 |
+
.bubble-ai { background: linear-gradient(90deg, #e3f0ff 0%, #c9eaff 100%); color: #1a237e; border-bottom-left-radius: 6px; border-top-left-radius: 22px; text-align: left; border: 1.5px solid #b3e0fc; box-shadow: 0 4px 16px rgba(44, 62, 80, 0.08); }
|
526 |
+
.bubble-unknown { background: #fffbe6; color: #8a6d3b; border-radius: 14px; text-align: center; border: 1px solid #ffe082; display: inline-block; }
|
527 |
+
.sender-label { font-size: 0.93em; font-weight: 600; opacity: 0.7; margin-bottom: 4px; display: block; }
|
528 |
+
.avatar { width: 38px; height: 38px; border-radius: 50%; margin-right: 10px; margin-top: 2px; background: #e0e0e0; object-fit: cover; box-shadow: 0 2px 6px rgba(0,0,0,0.07); }
|
529 |
+
@keyframes fadeIn { from { opacity: 0; transform: translateY(12px);} to { opacity: 1; transform: translateY(0);} }
|
530 |
+
</style>
|
531 |
+
""",
|
532 |
+
unsafe_allow_html=True,
|
533 |
+
)
|
534 |
+
|
535 |
+
job_fit = state.get("job_fit")
|
536 |
+
for msg in messages:
|
537 |
+
if isinstance(msg, HumanMessage):
|
538 |
+
st.markdown(
|
539 |
+
f"""
|
540 |
+
<div class="chat-row user">
|
541 |
+
<div class="chat-bubble bubble-user">
|
542 |
+
<span class="sender-label">π§βπ» You</span>
|
543 |
+
{msg.content}
|
544 |
+
</div>
|
545 |
+
</div>
|
546 |
+
""",
|
547 |
+
unsafe_allow_html=True,
|
548 |
+
)
|
549 |
+
elif isinstance(msg, AIMessage):
|
550 |
+
if not msg.content or not msg.content.strip():
|
551 |
+
continue
|
552 |
+
st.markdown(
|
553 |
+
f"""
|
554 |
+
<div class="chat-row ai">
|
555 |
+
<img class="avatar" src="https://img.icons8.com/ios-filled/50/1a237e/robot-2.png" alt="AI"/>
|
556 |
+
<div class="chat-bubble bubble-ai">
|
557 |
+
<span class="sender-label">π€ AI</span>
|
558 |
+
{msg.content}
|
559 |
+
</div>
|
560 |
+
</div>
|
561 |
+
""",
|
562 |
+
unsafe_allow_html=True,
|
563 |
+
)
|
564 |
+
elif isinstance(msg, ToolMessage):
|
565 |
+
raw_content = msg.content or "(no content)"
|
566 |
+
try:
|
567 |
+
parsed = json.loads(raw_content)
|
568 |
+
except Exception:
|
569 |
+
parsed = None
|
570 |
+
|
571 |
+
if parsed and isinstance(parsed, dict):
|
572 |
+
# --- Profile analysis format ---
|
573 |
+
if all(k in parsed for k in ("strengths", "weaknesses", "suggestions")):
|
574 |
+
strengths = parsed["strengths"]
|
575 |
+
weaknesses = parsed["weaknesses"]
|
576 |
+
suggestions = parsed["suggestions"]
|
577 |
+
formatted_html = f"""
|
578 |
+
<h3>πͺ <b>Strengths</b></h3>
|
579 |
+
<ul>
|
580 |
+
<li><b>Technical:</b> {', '.join(strengths.get('technical', []) or ['None'])}</li>
|
581 |
+
<li><b>Projects:</b> {', '.join(strengths.get('projects', []) or ['None'])}</li>
|
582 |
+
<li><b>Education:</b> {', '.join(strengths.get('education', []) or ['None'])}</li>
|
583 |
+
<li><b>Soft Skills:</b> {', '.join(strengths.get('soft_skills', []) or ['None'])}</li>
|
584 |
+
</ul>
|
585 |
+
|
586 |
+
<h3>β οΈ <b>Weaknesses</b></h3>
|
587 |
+
<ul>
|
588 |
+
<li><b>Technical Gaps:</b> {', '.join(weaknesses.get('technical_gaps', []) or ['None'])}</li>
|
589 |
+
<li><b>Project/Experience Gaps:</b> {', '.join(weaknesses.get('project_or_experience_gaps', []) or ['None'])}</li>
|
590 |
+
<li><b>Missing Context:</b> {', '.join(weaknesses.get('missing_context', []) or ['None'])}</li>
|
591 |
+
</ul>
|
592 |
+
|
593 |
+
<h3>π <b>Suggestions to improve</b></h3>
|
594 |
+
<ul>
|
595 |
+
{''.join(f'<li>{s}</li>' for s in suggestions)}
|
596 |
+
</ul>
|
597 |
+
"""
|
598 |
+
|
599 |
+
st.markdown(f"""
|
600 |
+
<div class="chat-row ai">
|
601 |
+
<img class="avatar" src="https://img.icons8.com/ios-filled/50/1a237e/robot-2.png" alt="Tool"/>
|
602 |
+
<div class="chat-bubble bubble-ai">
|
603 |
+
<span class="sender-label">π Profile Analysis</span>
|
604 |
+
{formatted_html}
|
605 |
+
</div>
|
606 |
+
</div>
|
607 |
+
""", unsafe_allow_html=True)
|
608 |
+
|
609 |
+
|
610 |
+
|
611 |
+
# --- Job fit format ---
|
612 |
+
elif "match_score" in parsed:
|
613 |
+
percent = parsed["match_score"]
|
614 |
+
suggestions = parsed.get("suggestions", [])
|
615 |
+
missing = parsed.get("missing_skills", [])
|
616 |
+
target_role = parsed.get('target_role', 'unspecified')
|
617 |
+
state["target_role"]=target_role
|
618 |
+
suggestions_html = "<br>".join(f"β’ {s}" for s in suggestions)
|
619 |
+
missing_html = "<br>".join(f"β’ {s}" for s in missing)
|
620 |
+
|
621 |
+
st.markdown(f"""
|
622 |
+
<div class="chat-row ai">
|
623 |
+
<img class="avatar" src="https://img.icons8.com/ios-filled/50/1a237e/robot-2.png" alt="Tool"/>
|
624 |
+
<div class="chat-bubble bubble-ai">
|
625 |
+
<span class="sender-label">π Job Fit</span>
|
626 |
+
<b>π― Target Role:</b> {target_role}<br>
|
627 |
+
<div style="
|
628 |
+
width: 120px; height: 120px; border-radius: 50%;
|
629 |
+
background: conic-gradient(#25D366 {percent * 3.6}deg, #e0e0e0 0deg);
|
630 |
+
display: flex; align-items: center; justify-content: center;
|
631 |
+
font-size: 1.8rem; color: #333; margin: 10px auto;">
|
632 |
+
{percent}%
|
633 |
+
</div>
|
634 |
+
<b>Missing Skills:</b><br>{missing_html}<br><br>
|
635 |
+
<b>Suggestions:</b><br>{suggestions_html}
|
636 |
+
</div>
|
637 |
+
</div>
|
638 |
+
""", unsafe_allow_html=True)
|
639 |
+
|
640 |
+
# --- Section text format ---
|
641 |
+
elif "result" in parsed:
|
642 |
+
text = parsed["result"]
|
643 |
+
st.markdown(f"""
|
644 |
+
<div class="chat-row ai">
|
645 |
+
<img class="avatar" src="https://img.icons8.com/ios-filled/50/1a237e/robot-2.png" alt="Tool"/>
|
646 |
+
<div class="chat-bubble bubble-ai">
|
647 |
+
<span class="sender-label">π Section Content</span>
|
648 |
+
{text}
|
649 |
+
</div>
|
650 |
+
</div>
|
651 |
+
""", unsafe_allow_html=True)
|
652 |
+
|
653 |
+
else:
|
654 |
+
st.markdown(
|
655 |
+
f"""
|
656 |
+
<div class="chat-row">
|
657 |
+
<div class="chat-bubble bubble-unknown">
|
658 |
+
<span class="sender-label">β οΈ Unknown</span>
|
659 |
+
{getattr(msg, 'content', str(msg))}
|
660 |
+
</div>
|
661 |
+
</div>
|
662 |
+
""",
|
663 |
+
unsafe_allow_html=True,
|
664 |
+
)
|
665 |
+
st.markdown('<div style="clear:both"></div>', unsafe_allow_html=True)
|
666 |
+
|
667 |
+
st.markdown("---")
|
668 |
+
|
669 |
+
user_input = st.chat_input(
|
670 |
+
placeholder="Ask about your LinkedIn profile, e.g., 'Analyze my profile, how do I fit for AI role, how is my about section?'"
|
671 |
+
)
|
672 |
+
|
673 |
+
if user_input and user_input.strip():
|
674 |
+
state.setdefault("messages", []).append(HumanMessage(content=user_input.strip()))
|
675 |
+
validate_state(state)
|
676 |
+
thread_id = st.session_state.get("thread_id")
|
677 |
+
config = {"configurable": {"thread_id": thread_id}}
|
678 |
+
with st.spinner("Processing your request..."):
|
679 |
+
st.session_state.state = app_graph.invoke(state, config)
|
680 |
+
st.rerun()
|