tsrivallabh commited on
Commit
0d03aec
Β·
verified Β·
1 Parent(s): 5318b09

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +680 -643
app.py CHANGED
@@ -1,643 +1,680 @@
1
- import os
2
- import json
3
- import re
4
- import time
5
- from typing import Dict, Any, List, Optional, Annotated
6
- from chatbot_model import (
7
- UserMemory,
8
- ChatbotState,
9
- ProfileAnalysisModel,
10
- JobFitModel,
11
- ContentGenerationModel,
12
-
13
- )
14
- from llm_utils import call_llm_and_parse
15
- from profile_preprocessing import (
16
- preprocess_profile,
17
- initialize_state,
18
- normalize_url
19
- )
20
- from openai import OpenAI
21
- import streamlit as st
22
- import hashlib
23
- from dotenv import load_dotenv
24
- from pydantic import BaseModel, Field,ValidationError
25
- # import pdb; pdb.set_trace()
26
- from scraping_profile import scrape_linkedin_profile
27
- from langchain_openai import ChatOpenAI
28
- from langchain_core.messages import SystemMessage, HumanMessage, AIMessage,BaseMessage,ToolMessage
29
- from langchain_core.tools import tool
30
- from langgraph.graph import StateGraph, END,START
31
- from langgraph.checkpoint.memory import MemorySaver
32
- from langgraph.graph import add_messages # if your framework exposes this
33
- from langgraph.prebuilt import ToolNode,tools_condition,InjectedState
34
- import dirtyjson
35
- import sqlite3
36
- try:
37
- from langgraph.checkpoint.sqlite import SqliteSaver
38
- SQLITE_AVAILABLE = True
39
- except ImportError:
40
- SQLITE_AVAILABLE = False
41
-
42
-
43
-
44
- # ========== 1. ENVIRONMENT & LLM SETUP ==========
45
- load_dotenv()
46
- groq_key = os.getenv("GROQ_API_KEY")
47
- assert groq_key, "GROQ_API_KEY not found in environment!"
48
- groq_client=OpenAI(
49
- api_key=os.getenv("GROQ_API_KEY"),
50
- base_url="https://api.groq.com/openai/v1"
51
- )
52
-
53
- def normalize_url(url):
54
- return url.strip().rstrip('/')
55
-
56
- def validate_state(state: dict) -> None:
57
- """
58
- Validate given state dict against ChatbotState schema.
59
- Displays result in Streamlit instead of printing.
60
- """
61
- # st.write("=== Validating chatbot state ===")
62
- try:
63
- ChatbotState.model_validate(state)
64
- # st.success("βœ… State is valid!")
65
- except ValidationError as e:
66
- st.error("❌ Validation failed!")
67
- errors_list = []
68
- for error in e.errors():
69
- loc = " β†’ ".join(str(item) for item in error['loc'])
70
- msg = error['msg']
71
- errors_list.append(f"- At: {loc}\n Error: {msg}")
72
- st.write("\n".join(errors_list))
73
- # Optionally show raw validation error too:
74
- st.expander("See raw validation error").write(str(e))
75
- st.stop()
76
-
77
-
78
- user_memory = UserMemory()
79
-
80
- # ========== 7. AGENT FUNCTIONS ==========
81
-
82
- def profile_analysis_prompt(profile: Dict[str, str]) -> str:
83
- return f"""
84
- You are a top-tier LinkedIn career coach and AI analyst.
85
-
86
- Analyze the following candidate profile carefully.
87
-
88
- Candidate profile data:
89
- FullName: {profile.get("FullName", "")}
90
- Headline: {profile.get("Headline", "")}
91
- JobTitle: {profile.get("JobTitle", "")}
92
- CompanyName: {profile.get("CompanyName", "")}
93
- CompanyIndustry: {profile.get("CompanyIndustry", "")}
94
- CurrentJobDuration: {profile.get("CurrentJobDuration", "")}
95
- About: {profile.get("About", "")}
96
- Experiences: {profile.get("Experiences", "")}
97
- Skills: {profile.get("Skills", "")}
98
- Educations: {profile.get("Educations", "")}
99
- Certifications: {profile.get("Certifications", "")}
100
- HonorsAndAwards: {profile.get("HonorsAndAwards", "")}
101
- Verifications: {profile.get("Verifications", "")}
102
- Highlights: {profile.get("Highlights", "")}
103
- Projects: {profile.get("Projects", "")}
104
- Publications: {profile.get("Publications", "")}
105
- Patents: {profile.get("Patents", "")}
106
- Courses: {profile.get("Courses", "")}
107
- TestScores: {profile.get("TestScores", "")}
108
-
109
-
110
- Identify and summarize:
111
- 1. strengths:
112
- - technical strengths (skills, tools, frameworks)
113
- - project strengths (impactful projects, innovation)
114
- - educational strengths (degrees, certifications, awards)
115
- - soft skills and personality traits (teamwork, leadership)
116
- 2. weaknesses:
117
- - missing or weak technical skills
118
- - gaps in projects, experience, or education
119
- - unclear profile sections or missing context
120
- 3. actionable suggestions:
121
- - concrete ways to improve profile headline, about section, or add projects
122
- - suggestions to learn or highlight new skills
123
- - ideas to make the profile more attractive for recruiters
124
-
125
- Important instructions:
126
- - Respond ONLY with valid JSON.
127
- - Do NOT include text before or after JSON.
128
- - Be concise but detailed.
129
-
130
-
131
-
132
- Example JSON format:
133
- {{
134
- "strengths": {{
135
- "technical": ["...", "..."],
136
- "projects": ["...", "..."],
137
- "education": ["...", "..."],
138
- "soft_skills": ["...", "..."]
139
- }},
140
- "weaknesses": {{
141
- "technical_gaps": ["...", "..."],
142
- "project_or_experience_gaps": ["...", "..."],
143
- "missing_context": ["...", "..."]
144
- }},
145
- "suggestions": [
146
- "...",
147
- "...",
148
- "..."
149
- ]
150
- }}
151
- """.strip()
152
-
153
-
154
-
155
-
156
- def job_fit_prompt(sections: Dict[str, str], target_role: str) -> str:
157
- return f"""
158
- You are an expert career coach and recruiter.
159
-
160
- Compare the following candidate profile against the typical requirements for the role of "{target_role}".
161
-
162
- Candidate Profile:
163
- - Headline: {sections.get('headline', '')}
164
- - About: {sections.get('about', '')}
165
- - Job Title: {sections.get('job_title', '')}
166
- - Company: {sections.get('company_name', '')}
167
- - Industry: {sections.get('company_industry', '')}
168
- - Current Job Duration: {sections.get('current_job_duration', '')}
169
- - Skills: {sections.get('skills', '')}
170
- - Projects: {sections.get('projects', '')}
171
- - Educations: {sections.get('educations', '')}
172
- - Certifications: {sections.get('certifications', '')}
173
- - Honors & Awards: {sections.get('honors_and_awards', '')}
174
- - Experiences: {sections.get('experiences', '')}
175
-
176
- **Instructions:**
177
- - Respond ONLY with valid JSON.
178
- - Your JSON must exactly match the following schema:
179
- {{
180
- "match_score": 85,
181
- "missing_skills": ["Skill1", "Skill2"],
182
- "suggestions": ["...", "...", "..."]
183
- }}
184
- - "match_score": integer from 0–100 estimating how well the profile fits the target role.
185
- - "missing_skills": key missing or weakly mentioned skills.
186
- - "suggestions": 3 actionable recommendations to improve fit (e.g., learn tools, rewrite headline).
187
-
188
- Do NOT include explanations, text outside JSON, or markdown.
189
- Start with '{{' and end with '}}'.
190
- The JSON must be directly parseable.
191
- """.strip()
192
-
193
-
194
- # --- Tool: Profile Analyzer ---
195
- @tool
196
- def profile_analyzer(state: Annotated[ChatbotState, InjectedState]) -> dict:
197
- """
198
- Tool: Analyze the overall full user's profile to give strengths, weaknesses, suggestions.
199
- This is needed only if full analysis of profile is needed.
200
- Returns the full analysis in the form of a json.
201
-
202
- - It takes no arguments
203
- """
204
-
205
-
206
- # Get summarized profile (dictionary of strings)
207
- profile = getattr(state, "profile", {}) or {}
208
-
209
- # Build prompt
210
- prompt = profile_analysis_prompt(profile)
211
-
212
- # Call the LLM & parse structured result
213
- analysis_model = call_llm_and_parse(groq_client,prompt, ProfileAnalysisModel)
214
- analysis_dict = analysis_model.model_dump()
215
-
216
- # Save to state and user memory
217
- state.profile_analysis = analysis_dict
218
- user_memory.save("profile_analysis", analysis_dict)
219
-
220
- print("πŸ’Ύ [DEBUG] Saved analysis to user memory.")
221
- print("πŸ“¦ [DEBUG] Updated state.profile_analysis with analysis.")
222
-
223
- return analysis_dict
224
-
225
- # --- Tool: Job Matcher ---
226
-
227
-
228
- @tool
229
- def job_matcher(
230
- state: Annotated[ChatbotState, InjectedState],
231
- target_role: str = None
232
- ) -> dict:
233
- """
234
- Tool: Analyze how well the user's profile fits the target role.
235
- - If user is asking if he is a good fit for a certain role, or needs to see if his profile is compatible with a certain role, call this.
236
- - Takes target_role as an argument.
237
- - this tool is needed when match score, missing skills, suggestions are needed based on a job name given.
238
- """
239
- print(f"target role is {target_role}")
240
- # Update state.target_role if provided
241
-
242
- sections = getattr(state, "sections", {})
243
-
244
- # Build prompt
245
- prompt = job_fit_prompt(sections, target_role)
246
-
247
- # Call LLM and parse
248
- try:
249
- job_fit_model = call_llm_and_parse(groq_client,prompt, JobFitModel)
250
- job_fit_dict = job_fit_model.model_dump()
251
- job_fit_dict["target_role"] = target_role
252
- except Exception as e:
253
- job_fit_dict = {
254
- "target_role":target_role,
255
- "match_score": 0,
256
- "missing_skills": [],
257
- "suggestions": ["Parsing failed or incomplete response."]
258
- }
259
-
260
- # Save to state and user memory
261
- state.job_fit = job_fit_dict
262
- user_memory.save("job_fit", job_fit_dict)
263
-
264
- return job_fit_dict
265
-
266
-
267
-
268
-
269
-
270
-
271
- @tool
272
- def extract_from_state_tool(
273
- state: Annotated[ChatbotState, InjectedState],
274
- key: str
275
- ) -> dict:
276
- """
277
- This tool is used if user wants to ask about any particular part of this profile. Use this if a singe section is targeted. It expects key as an arguement, that represents what
278
- the user is wanting to look at, from his profile.
279
- Argument:
280
- key: only pass one from the below list, identify one thing the user wants to look into and choose that:
281
- "sections.about", "sections.headline", "sections.skills", "sections.projects",
282
- "sections.educations", "sections.certifications", "sections.honors_and_awards",
283
- "sections.experiences", "sections.publications", "sections.patents",
284
- "sections.courses", "sections.test_scores", "sections.verifications",
285
- "sections.highlights", "sections.job_title", "sections.company_name",
286
- "sections.company_industry", "sections.current_job_duration", "sections.full_name",
287
- "enhanced_content,"profile_analysis", "job_fit", "target_role", "editing_section"
288
- """
289
- value = state
290
- try:
291
- for part in key.split('.'):
292
- # Support both dict and Pydantic model
293
- if isinstance(value, dict):
294
- value = value.get(part)
295
- elif hasattr(value, part):
296
- value = getattr(value, part)
297
- else:
298
- value = None
299
- if value is None:
300
- break
301
- except Exception:
302
- value = None
303
- return {"result": value}
304
-
305
-
306
- tools = [
307
- profile_analyzer,
308
- job_matcher,
309
- extract_from_state_tool
310
- ]
311
- llm = ChatOpenAI(
312
- api_key=groq_key,
313
- base_url="https://api.groq.com/openai/v1",
314
- model="llama3-8b-8192",
315
- temperature=0
316
- )
317
- llm_with_tools = llm.bind_tools(tools)
318
-
319
-
320
-
321
- # ========== 8. LANGGRAPH PIPELINE ==========
322
-
323
-
324
- def chatbot_node(state: ChatbotState) -> ChatbotState:
325
- validate_state(state)
326
-
327
- messages = state.get("messages", [])
328
-
329
- system_prompt = """
330
- You are a helpful AI assistant specialized in LinkedIn profile coaching.
331
-
332
- You can:
333
- - Answer user questions.
334
- - If user is greeting , greet him back also telling how you can help him.
335
- - You should proactively use specialized tools whenever possible to give richer, data-driven answers.
336
- IMPORTANT RULES:
337
- - You must call at most one tool at a time.
338
- - Never call multiple tools together in the same step.
339
- - If user asks to show any section, use extract_from_state_tool, and after that, show the exact result from it.
340
- - If information about that section is already known, dont call extract_from_state_tool, directly answer the user query.
341
- - call profile_analyzer function only when full profile analysis is needed, otherwise rely on extract_from_state_tool.
342
- - If user asks to enhance any section, check if it is there in history, otherwise call extract_from_state_tool first.
343
- - Prefer to call a tool when answering instead of directly replying, especially if it can add new, useful insights or up-to-date data.
344
- - If a tool has been recently used and new info isn’t needed, you may answer directly.
345
- - Use tools to verify assumptions, enrich answers, or when the user asks about strengths, weaknesses, job fit, or wants improvements.
346
-
347
- Always respond helpfully, clearly, and with actionable advice to guide the user in improving their LinkedIn profile.
348
- """
349
-
350
- # Build messages & invoke LLM
351
- messages = [SystemMessage(content=system_prompt)] + messages[-2:]
352
- # messages = [SystemMessage(content=system_prompt)]
353
- response = llm_with_tools.invoke(messages)
354
- if hasattr(response, "tool_calls") and response.tool_calls:
355
- first_tool = response.tool_calls[0]
356
- tool_name = first_tool.get("name") if isinstance(first_tool, dict) else getattr(first_tool, "name", None)
357
- tool_args = first_tool.get("args") if isinstance(first_tool, dict) else getattr(first_tool, "args", {})
358
- print(f"[DEBBBBUUUUGGG] using tool {tool_name}")
359
-
360
- # DEBUG
361
- print("[DEBUG] LLM response:", response)
362
- state.setdefault("messages", []).append(response)
363
-
364
- return state
365
-
366
-
367
-
368
-
369
-
370
- # --- Graph definition ---
371
- graph = StateGraph(state_schema=ChatbotState)
372
- graph.add_node("chatbot", chatbot_node)
373
- graph.add_node("tools", ToolNode(tools))
374
- graph.add_edge(START, "chatbot")
375
- graph.add_conditional_edges("chatbot", tools_condition)
376
- graph.add_edge("tools","chatbot")
377
- graph.set_entry_point("chatbot")
378
-
379
- # --- Streamlit UI ---
380
- st.set_page_config(page_title="πŸ’Ό LinkedIn AI Career Assistant", page_icon="πŸ€–", layout="wide")
381
- st.title("πŸ§‘β€πŸ’Ό LinkedIn AI Career Assistant")
382
-
383
- # --- Checkpointer and graph initialization ---
384
- if "checkpointer" not in st.session_state:
385
- if SQLITE_AVAILABLE:
386
- conn = sqlite3.connect("checkpoints1.db", check_same_thread=False)
387
- st.session_state["checkpointer"] = SqliteSaver(conn)
388
- else:
389
- st.session_state["checkpointer"] = MemorySaver()
390
- checkpointer = st.session_state["checkpointer"]
391
-
392
- if "app_graph" not in st.session_state:
393
- st.session_state["app_graph"] = graph.compile(checkpointer=checkpointer)
394
- app_graph = st.session_state["app_graph"]
395
- # Find or create thread
396
- def find_thread_id_for_url(checkpointer, url, max_threads=100):
397
- search_url = normalize_url(url)
398
- for tid in range(max_threads):
399
- config = {"configurable": {"thread_id": str(tid), "checkpoint_ns": ""}}
400
- state = checkpointer.get(config)
401
- if state and "channel_values" in state:
402
- user_state = state["channel_values"]
403
- stored_url = normalize_url(user_state.get("profile_url", ""))
404
- if stored_url == search_url:
405
- return str(tid), user_state
406
- return None, None
407
-
408
- def delete_thread_checkpoint(checkpointer, thread_id):
409
- # For SqliteSaver, use the delete_thread method if available
410
- if hasattr(checkpointer, "delete_thread"):
411
- checkpointer.delete_thread(thread_id)
412
- else:
413
- # For in-memory or custom checkpointers, implement as needed
414
- pass
415
-
416
-
417
- def get_next_thread_id(checkpointer, max_threads=100):
418
- used = set()
419
- for tid in range(max_threads):
420
- config = {"configurable": {"thread_id": str(tid), "checkpoint_ns": ""}}
421
- if checkpointer.get(config):
422
- used.add(tid)
423
- for tid in range(max_threads):
424
- if tid not in used:
425
- return str(tid)
426
- raise RuntimeError("No available thread_id")
427
-
428
- # --- Session selection and state initialization ---
429
-
430
- if "chat_mode" not in st.session_state:
431
- profile_url = st.text_input("Profile URL (e.g., https://www.linkedin.com/in/username/)")
432
- if not profile_url:
433
- st.info("Please enter a valid LinkedIn profile URL above to start.")
434
- st.stop()
435
-
436
- valid_pattern = r"^https://www\.linkedin\.com/in/[^/]+/?$"
437
- if not re.match(valid_pattern, profile_url.strip()):
438
- st.error("❌ Invalid LinkedIn profile URL. Make sure it matches the format.")
439
- st.stop()
440
- url = profile_url.strip()
441
-
442
- existing_thread_id, previous_state = find_thread_id_for_url(checkpointer, url)
443
- # Defensive: ensure required fields
444
- required_fields = ["profile", "sections"]
445
- if previous_state and not all(f in previous_state and previous_state[f] for f in required_fields):
446
- st.warning("Previous session is missing required data. Please start a new chat.")
447
- previous_state = None
448
-
449
- if previous_state:
450
- st.info("A previous session found. Choose:")
451
- col1, col2 = st.columns(2)
452
- if col1.button("Continue previous chat"):
453
- st.session_state["chat_mode"] = "continue"
454
- st.session_state["thread_id"] = existing_thread_id
455
- st.session_state.state = previous_state
456
- st.rerun()
457
- elif col2.button("Start new chat"):
458
- delete_thread_checkpoint(checkpointer, existing_thread_id)
459
- with st.spinner("Fetching and processing profile... ⏳"):
460
- raw=scrape_linkedin_profile(url)
461
- thread_id = existing_thread_id
462
- st.session_state["chat_mode"] = "new"
463
- st.session_state["thread_id"] = thread_id
464
- st.session_state.state = initialize_state(raw)
465
- st.session_state.state["profile_url"] = normalize_url(url)
466
- st.session_state.state["messages"] = []
467
- st.rerun()
468
- st.stop()
469
- else:
470
- with st.spinner("Fetching and processing profile... ⏳"):
471
- raw=scrape_linkedin_profile(url)
472
- thread_id = get_next_thread_id(checkpointer)
473
- st.session_state["thread_id"] = thread_id
474
- st.session_state["chat_mode"] = "new"
475
- st.session_state.state = initialize_state(raw)
476
- st.session_state.state["profile_url"] = normalize_url(url)
477
- st.session_state.state["messages"] = []
478
- st.rerun()
479
-
480
- # --- Main chat UI (only after chat_mode is set) ---
481
- state = st.session_state.state
482
- thread_id = st.session_state.get("thread_id")
483
-
484
- st.subheader("πŸ’¬ Chat with your AI Assistant")
485
- messages = state.get("messages", [])
486
- chat_container = st.container()
487
-
488
- with chat_container:
489
- st.markdown(
490
- """
491
- <style>
492
- .chat-row { display: flex; width: 100%; margin-bottom: 12px; animation: fadeIn 0.5s; }
493
- .chat-row.user { justify-content: flex-end; }
494
- .chat-row.ai { justify-content: flex-start; }
495
- .chat-bubble { font-family: 'Segoe UI', 'Roboto', 'Arial', sans-serif; font-size: 1.08rem; line-height: 1.65; padding: 14px 22px; border-radius: 20px; min-width: 60px; max-width: 75vw; box-shadow: 0 2px 12px rgba(0,0,0,0.10); word-break: break-word; display: inline-block; position: relative; margin-bottom: 2px; }
496
- .bubble-user { background: linear-gradient(90deg, #43e97b 0%, #38f9d7 100%); color: #fff; border-bottom-right-radius: 6px; border-top-right-radius: 22px; text-align: right; box-shadow: 0 4px 16px rgba(67,233,123,0.13); }
497
- .bubble-ai { background: linear-gradient(90deg, #e3f0ff 0%, #c9eaff 100%); color: #1a237e; border-bottom-left-radius: 6px; border-top-left-radius: 22px; text-align: left; border: 1.5px solid #b3e0fc; box-shadow: 0 4px 16px rgba(44, 62, 80, 0.08); }
498
- .bubble-unknown { background: #fffbe6; color: #8a6d3b; border-radius: 14px; text-align: center; border: 1px solid #ffe082; display: inline-block; }
499
- .sender-label { font-size: 0.93em; font-weight: 600; opacity: 0.7; margin-bottom: 4px; display: block; }
500
- .avatar { width: 38px; height: 38px; border-radius: 50%; margin-right: 10px; margin-top: 2px; background: #e0e0e0; object-fit: cover; box-shadow: 0 2px 6px rgba(0,0,0,0.07); }
501
- @keyframes fadeIn { from { opacity: 0; transform: translateY(12px);} to { opacity: 1; transform: translateY(0);} }
502
- </style>
503
- """,
504
- unsafe_allow_html=True,
505
- )
506
-
507
- job_fit = state.get("job_fit")
508
- for msg in messages:
509
- if isinstance(msg, HumanMessage):
510
- st.markdown(
511
- f"""
512
- <div class="chat-row user">
513
- <div class="chat-bubble bubble-user">
514
- <span class="sender-label">πŸ§‘β€πŸ’» You</span>
515
- {msg.content}
516
- </div>
517
- </div>
518
- """,
519
- unsafe_allow_html=True,
520
- )
521
- elif isinstance(msg, AIMessage):
522
- if not msg.content or not msg.content.strip():
523
- continue
524
- st.markdown(
525
- f"""
526
- <div class="chat-row ai">
527
- <img class="avatar" src="https://img.icons8.com/ios-filled/50/1a237e/robot-2.png" alt="AI"/>
528
- <div class="chat-bubble bubble-ai">
529
- <span class="sender-label">πŸ€– AI</span>
530
- {msg.content}
531
- </div>
532
- </div>
533
- """,
534
- unsafe_allow_html=True,
535
- )
536
- elif isinstance(msg, ToolMessage):
537
- raw_content = msg.content or "(no content)"
538
- try:
539
- parsed = json.loads(raw_content)
540
- except Exception:
541
- parsed = None
542
-
543
- if parsed and isinstance(parsed, dict):
544
- # --- Profile analysis format ---
545
- if all(k in parsed for k in ("strengths", "weaknesses", "suggestions")):
546
- strengths = parsed["strengths"]
547
- weaknesses = parsed["weaknesses"]
548
- suggestions = parsed["suggestions"]
549
-
550
- formatted = (
551
- "### πŸ’ͺ **Strengths**\n"
552
- f"- **Technical:** {', '.join(strengths.get('technical', []) or ['None'])}\n"
553
- f"- **Projects:** {', '.join(strengths.get('projects', []) or ['None'])}\n"
554
- f"- **Education:** {', '.join(strengths.get('education', []) or ['None'])}\n"
555
- f"- **Soft Skills:** {', '.join(strengths.get('soft_skills', []) or ['None'])}\n\n"
556
- "### ⚠️ **Weaknesses**\n"
557
- f"- **Technical Gaps:** {', '.join(weaknesses.get('technical_gaps', []) or ['None'])}\n"
558
- f"- **Project/Experience Gaps:** {', '.join(weaknesses.get('project_or_experience_gaps', []) or ['None'])}\n"
559
- f"- **Missing Context:** {', '.join(weaknesses.get('missing_context', []) or ['None'])}\n\n"
560
- "### πŸ›  **Suggestions to improve**\n"
561
- + "\n".join(f"- {s}" for s in suggestions)
562
- )
563
-
564
- st.markdown(f"""
565
- <div class="chat-row ai">
566
- <img class="avatar" src="https://img.icons8.com/ios-filled/50/1a237e/robot-2.png" alt="Tool"/>
567
- <div class="chat-bubble bubble-ai">
568
- <span class="sender-label">πŸ“Š Profile Analysis</span>
569
- {formatted}
570
- </div>
571
- </div>
572
- """, unsafe_allow_html=True)
573
-
574
- # --- Job fit format ---
575
- elif "match_score" in parsed:
576
- percent = parsed["match_score"]
577
- suggestions = parsed.get("suggestions", [])
578
- missing = parsed.get("missing_skills", [])
579
- target_role = parsed.get('target_role', 'unspecified')
580
- state["target_role"]=target_role
581
- suggestions_html = "<br>".join(f"β€’ {s}" for s in suggestions)
582
- missing_html = "<br>".join(f"β€’ {s}" for s in missing)
583
-
584
- st.markdown(f"""
585
- <div class="chat-row ai">
586
- <img class="avatar" src="https://img.icons8.com/ios-filled/50/1a237e/robot-2.png" alt="Tool"/>
587
- <div class="chat-bubble bubble-ai">
588
- <span class="sender-label">πŸ“Š Job Fit</span>
589
- <b>🎯 Target Role:</b> {target_role}<br>
590
- <div style="
591
- width: 120px; height: 120px; border-radius: 50%;
592
- background: conic-gradient(#25D366 {percent * 3.6}deg, #e0e0e0 0deg);
593
- display: flex; align-items: center; justify-content: center;
594
- font-size: 1.8rem; color: #333; margin: 10px auto;">
595
- {percent}%
596
- </div>
597
- <b>Missing Skills:</b><br>{missing_html}<br><br>
598
- <b>Suggestions:</b><br>{suggestions_html}
599
- </div>
600
- </div>
601
- """, unsafe_allow_html=True)
602
-
603
- # --- Section text format ---
604
- elif "result" in parsed:
605
- text = parsed["result"]
606
- st.markdown(f"""
607
- <div class="chat-row ai">
608
- <img class="avatar" src="https://img.icons8.com/ios-filled/50/1a237e/robot-2.png" alt="Tool"/>
609
- <div class="chat-bubble bubble-ai">
610
- <span class="sender-label">πŸ“„ Section Content</span>
611
- {text}
612
- </div>
613
- </div>
614
- """, unsafe_allow_html=True)
615
-
616
- else:
617
- st.markdown(
618
- f"""
619
- <div class="chat-row">
620
- <div class="chat-bubble bubble-unknown">
621
- <span class="sender-label">⚠️ Unknown</span>
622
- {getattr(msg, 'content', str(msg))}
623
- </div>
624
- </div>
625
- """,
626
- unsafe_allow_html=True,
627
- )
628
- st.markdown('<div style="clear:both"></div>', unsafe_allow_html=True)
629
-
630
- st.markdown("---")
631
-
632
- user_input = st.chat_input(
633
- placeholder="Ask about your LinkedIn profile, e.g., 'Analyze my profile, how do I fit for AI role, how is my about section?'"
634
- )
635
-
636
- if user_input and user_input.strip():
637
- state.setdefault("messages", []).append(HumanMessage(content=user_input.strip()))
638
- validate_state(state)
639
- thread_id = st.session_state.get("thread_id")
640
- config = {"configurable": {"thread_id": thread_id}}
641
- with st.spinner("Processing your request..."):
642
- st.session_state.state = app_graph.invoke(state, config)
643
- st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import re
4
+ import time
5
+ from typing import Dict, Any, List, Optional, Annotated
6
+ from chatbot_model import (
7
+ UserMemory,
8
+ ChatbotState,
9
+ ProfileAnalysisModel,
10
+ JobFitModel,
11
+ ContentGenerationModel,
12
+
13
+ )
14
+ from llm_utils import call_llm_and_parse
15
+ from profile_preprocessing import (
16
+ preprocess_profile,
17
+ initialize_state,
18
+ normalize_url
19
+ )
20
+ from openai import OpenAI
21
+ import streamlit as st
22
+ import hashlib
23
+ from dotenv import load_dotenv
24
+ from pydantic import BaseModel, Field,ValidationError
25
+ # import pdb; pdb.set_trace()
26
+ from scraping_profile import scrape_linkedin_profile
27
+ from langchain_openai import ChatOpenAI
28
+ from langchain_core.messages import SystemMessage, HumanMessage, AIMessage,BaseMessage,ToolMessage
29
+ from langchain_core.tools import tool
30
+ from langgraph.graph import StateGraph, END,START
31
+ from langgraph.checkpoint.memory import MemorySaver
32
+ from langgraph.graph import add_messages # if your framework exposes this
33
+ from langgraph.prebuilt import ToolNode,tools_condition,InjectedState
34
+ import dirtyjson
35
+ import sqlite3
36
+ try:
37
+ from langgraph.checkpoint.sqlite import SqliteSaver
38
+ SQLITE_AVAILABLE = True
39
+ except ImportError:
40
+ SQLITE_AVAILABLE = False
41
+
42
+
43
+
44
+ # ========== 1. ENVIRONMENT & LLM SETUP ==========
45
+ load_dotenv()
46
+ groq_key = os.getenv("GROQ_API_KEY")
47
+ assert groq_key, "GROQ_API_KEY not found in environment!"
48
+ groq_client=OpenAI(
49
+ api_key=os.getenv("GROQ_API_KEY"),
50
+ base_url="https://api.groq.com/openai/v1"
51
+ )
52
+
53
+ def normalize_url(url):
54
+ return url.strip().rstrip('/')
55
+
56
+ def validate_state(state: dict) -> None:
57
+ """
58
+ Validate given state dict against ChatbotState schema.
59
+ Displays result in Streamlit instead of printing.
60
+ """
61
+ # st.write("=== Validating chatbot state ===")
62
+ try:
63
+ ChatbotState.model_validate(state)
64
+ # st.success("βœ… State is valid!")
65
+ except ValidationError as e:
66
+ st.error("❌ Validation failed!")
67
+ errors_list = []
68
+ for error in e.errors():
69
+ loc = " β†’ ".join(str(item) for item in error['loc'])
70
+ msg = error['msg']
71
+ errors_list.append(f"- At: {loc}\n Error: {msg}")
72
+ st.write("\n".join(errors_list))
73
+ # Optionally show raw validation error too:
74
+ st.expander("See raw validation error").write(str(e))
75
+ st.stop()
76
+
77
+
78
+ user_memory = UserMemory()
79
+
80
+ # ========== 7. AGENT FUNCTIONS ==========
81
+
82
+ def profile_analysis_prompt(profile: Dict[str, str]) -> str:
83
+ return f"""
84
+ You are a top-tier LinkedIn career coach and AI analyst.
85
+
86
+ Analyze the following candidate profile carefully.
87
+
88
+ Candidate profile data:
89
+ FullName: {profile.get("FullName", "")}
90
+ Headline: {profile.get("Headline", "")}
91
+ JobTitle: {profile.get("JobTitle", "")}
92
+ CompanyName: {profile.get("CompanyName", "")}
93
+ CompanyIndustry: {profile.get("CompanyIndustry", "")}
94
+ CurrentJobDuration: {profile.get("CurrentJobDuration", "")}
95
+ About: {profile.get("About", "")}
96
+ Experiences: {profile.get("Experiences", "")}
97
+ Skills: {profile.get("Skills", "")}
98
+ Educations: {profile.get("Educations", "")}
99
+ Certifications: {profile.get("Certifications", "")}
100
+ HonorsAndAwards: {profile.get("HonorsAndAwards", "")}
101
+ Verifications: {profile.get("Verifications", "")}
102
+ Highlights: {profile.get("Highlights", "")}
103
+ Projects: {profile.get("Projects", "")}
104
+ Publications: {profile.get("Publications", "")}
105
+ Patents: {profile.get("Patents", "")}
106
+ Courses: {profile.get("Courses", "")}
107
+ TestScores: {profile.get("TestScores", "")}
108
+
109
+
110
+ Identify and summarize:
111
+ 1. strengths:
112
+ - technical strengths (skills, tools, frameworks)
113
+ - project strengths (impactful projects, innovation)
114
+ - educational strengths (degrees, certifications, awards)
115
+ - soft skills and personality traits (teamwork, leadership)
116
+ 2. weaknesses:
117
+ - missing or weak technical skills
118
+ - gaps in projects, experience, or education
119
+ - unclear profile sections or missing context
120
+ 3. actionable suggestions:
121
+ - concrete ways to improve profile headline, about section, or add projects
122
+ - suggestions to learn or highlight new skills
123
+ - ideas to make the profile more attractive for recruiters
124
+
125
+ Important instructions:
126
+ - Respond ONLY with valid JSON.
127
+ - Do NOT include text before or after JSON.
128
+ - Be concise but detailed.
129
+
130
+
131
+
132
+ Example JSON format:
133
+ {{
134
+ "strengths": {{
135
+ "technical": ["...", "..."],
136
+ "projects": ["...", "..."],
137
+ "education": ["...", "..."],
138
+ "soft_skills": ["...", "..."]
139
+ }},
140
+ "weaknesses": {{
141
+ "technical_gaps": ["...", "..."],
142
+ "project_or_experience_gaps": ["...", "..."],
143
+ "missing_context": ["...", "..."]
144
+ }},
145
+ "suggestions": [
146
+ "...",
147
+ "...",
148
+ "..."
149
+ ]
150
+ }}
151
+ """.strip()
152
+
153
+
154
+
155
+
156
+ def job_fit_prompt(sections: Dict[str, str], target_role: str) -> str:
157
+ return f"""
158
+ You are an expert career coach and recruiter.
159
+
160
+ Compare the following candidate profile against the typical requirements for the role of "{target_role}".
161
+
162
+ Candidate Profile:
163
+ - Headline: {sections.get('headline', '')}
164
+ - About: {sections.get('about', '')}
165
+ - Job Title: {sections.get('job_title', '')}
166
+ - Company: {sections.get('company_name', '')}
167
+ - Industry: {sections.get('company_industry', '')}
168
+ - Current Job Duration: {sections.get('current_job_duration', '')}
169
+ - Skills: {sections.get('skills', '')}
170
+ - Projects: {sections.get('projects', '')}
171
+ - Educations: {sections.get('educations', '')}
172
+ - Certifications: {sections.get('certifications', '')}
173
+ - Honors & Awards: {sections.get('honors_and_awards', '')}
174
+ - Experiences: {sections.get('experiences', '')}
175
+
176
+ **Instructions:**
177
+ - Respond ONLY with valid JSON.
178
+ - Your JSON must exactly match the following schema:
179
+ {{
180
+ "match_score": 85,
181
+ "missing_skills": ["Skill1", "Skill2"],
182
+ "suggestions": ["...", "...", "..."]
183
+ }}
184
+ - "match_score": integer from 0–100 estimating how well the profile fits the target role.
185
+ - "missing_skills": key missing or weakly mentioned skills.
186
+ - "suggestions": 3 actionable recommendations to improve fit (e.g., learn tools, rewrite headline).
187
+
188
+ Do NOT include explanations, text outside JSON, or markdown.
189
+ Start with '{{' and end with '}}'.
190
+ The JSON must be directly parseable.
191
+ """.strip()
192
+
193
+
194
+ # --- Tool: Profile Analyzer ---
195
+ @tool
196
+ def profile_analyzer(state: Annotated[ChatbotState, InjectedState]) -> dict:
197
+ """
198
+ Tool: Analyze the overall full user's profile to give strengths, weaknesses, suggestions.
199
+ This is needed only if full analysis of profile is needed.
200
+ Returns the full analysis in the form of a json.
201
+
202
+ - It takes no arguments
203
+ """
204
+
205
+
206
+ # Get summarized profile (dictionary of strings)
207
+ profile = getattr(state, "profile", {}) or {}
208
+
209
+ # Build prompt
210
+ prompt = profile_analysis_prompt(profile)
211
+
212
+ # Call the LLM & parse structured result
213
+ analysis_model = call_llm_and_parse(groq_client,prompt, ProfileAnalysisModel)
214
+ analysis_dict = analysis_model.model_dump()
215
+
216
+ # Save to state and user memory
217
+ state.profile_analysis = analysis_dict
218
+ user_memory.save("profile_analysis", analysis_dict)
219
+
220
+ print("πŸ’Ύ [DEBUG] Saved analysis to user memory.")
221
+ print("πŸ“¦ [DEBUG] Updated state.profile_analysis with analysis.")
222
+
223
+ return analysis_dict
224
+
225
+ # --- Tool: Job Matcher ---
226
+
227
+
228
+ @tool
229
+ def job_matcher(
230
+ state: Annotated[ChatbotState, InjectedState],
231
+ target_role: str = None
232
+ ) -> dict:
233
+ """
234
+ Tool: Analyze how well the user's profile fits the target role.
235
+ - If user is asking if he is a good fit for a certain role, or needs to see if his profile is compatible with a certain role, call this.
236
+ - Takes target_role as an argument.
237
+ - this tool is needed when match score, missing skills, suggestions are needed based on a job name given.
238
+ """
239
+ print(f"target role is {target_role}")
240
+ # Update state.target_role if provided
241
+
242
+ sections = getattr(state, "sections", {})
243
+
244
+ # Build prompt
245
+ prompt = job_fit_prompt(sections, target_role)
246
+
247
+ # Call LLM and parse
248
+ try:
249
+ job_fit_model = call_llm_and_parse(groq_client,prompt, JobFitModel)
250
+ job_fit_dict = job_fit_model.model_dump()
251
+ job_fit_dict["target_role"] = target_role
252
+ except Exception as e:
253
+ job_fit_dict = {
254
+ "target_role":target_role,
255
+ "match_score": 0,
256
+ "missing_skills": [],
257
+ "suggestions": ["Parsing failed or incomplete response."]
258
+ }
259
+
260
+ # Save to state and user memory
261
+ state.job_fit = job_fit_dict
262
+ user_memory.save("job_fit", job_fit_dict)
263
+
264
+ return job_fit_dict
265
+
266
+
267
+
268
+
269
+
270
+
271
+ @tool
272
+ def extract_from_state_tool(
273
+ state: Annotated[ChatbotState, InjectedState],
274
+ key: str
275
+ ) -> dict:
276
+ """
277
+ This tool is used if user wants to ask about any particular part of this profile. Use this if a singe section is targeted. It expects key as an arguement, that represents what
278
+ the user is wanting to look at, from his profile.
279
+ Argument:
280
+ key: only pass one from the below list, identify one thing the user wants to look into and choose that:
281
+ "sections.about", "sections.headline", "sections.skills", "sections.projects",
282
+ "sections.educations", "sections.certifications", "sections.honors_and_awards",
283
+ "sections.experiences", "sections.publications", "sections.patents",
284
+ "sections.courses", "sections.test_scores", "sections.verifications",
285
+ "sections.highlights", "sections.job_title", "sections.company_name",
286
+ "sections.company_industry", "sections.current_job_duration", "sections.full_name",
287
+ "enhanced_content,"profile_analysis", "job_fit", "target_role", "editing_section"
288
+ """
289
+ value = state
290
+ try:
291
+ for part in key.split('.'):
292
+ # Support both dict and Pydantic model
293
+ if isinstance(value, dict):
294
+ value = value.get(part)
295
+ elif hasattr(value, part):
296
+ value = getattr(value, part)
297
+ else:
298
+ value = None
299
+ if value is None:
300
+ break
301
+ except Exception:
302
+ value = None
303
+ return {"result": value}
304
+
305
+
306
+ tools = [
307
+ profile_analyzer,
308
+ job_matcher,
309
+ extract_from_state_tool
310
+ ]
311
+ llm = ChatOpenAI(
312
+ api_key=groq_key,
313
+ base_url="https://api.groq.com/openai/v1",
314
+ model="llama3-8b-8192",
315
+ temperature=0
316
+ )
317
+ llm_with_tools = llm.bind_tools(tools)
318
+
319
+
320
+
321
+ # ========== 8. LANGGRAPH PIPELINE ==========
322
+
323
+
324
+ def chatbot_node(state: ChatbotState) -> ChatbotState:
325
+ validate_state(state)
326
+
327
+ messages = state.get("messages", [])
328
+
329
+ system_prompt = """
330
+ You are a helpful AI assistant specialized in LinkedIn profile coaching.
331
+
332
+ Guidelines:
333
+ - Greet the user if they greet you, and explain you can help analyze, enhance, and improve their LinkedIn profile.
334
+ - Prefer using tools instead of answering directly whenever this can give better, data-backed answers.
335
+ - Call only one tool at a time. Never call multiple tools together.
336
+
337
+ When to use tools:
338
+ - If the user asks to show a section (like About, Projects, etc.): call extract_from_state_tool, unless you already have that section stored.
339
+ - If the user asks to enhance a section: use extract_from_state_tool first if you don’t already have that section, then enhance it.
340
+ - If the user requests a full profile analysis: use profile_analyzer.
341
+ - If the user wants to know how well they fit a target job role: use job_matcher with the given role.
342
+ - Use tools to check strengths, weaknesses, missing skills, or improvement suggestions.
343
+ - If the tool was just called recently and info is still fresh, you may answer directly.
344
+
345
+ Important:
346
+ - Never describe or print JSON of a tool call.
347
+ - Never say "I'm about to call a tool" β€” just call the tool properly.
348
+ - Keep answers clear, helpful, and actionable.
349
+
350
+ Your goal: help the user see, improve, and analyze their LinkedIn profile.
351
+
352
+ """
353
+ recent_messages = []
354
+ for msg in messages[-6:]: # last few, e.g., 6
355
+ if isinstance(msg, HumanMessage):
356
+ recent_messages.append({
357
+ "role": "user",
358
+ "content": f"User asked: {msg.content}"
359
+ })
360
+ elif isinstance(msg, AIMessage):
361
+ # keep only non-empty AI replies (actual answers)
362
+ if msg.content.strip():
363
+ recent_messages.append({
364
+ "role": "assistant",
365
+ "content": msg.content
366
+ })
367
+ elif isinstance(msg, ToolMessage):
368
+ recent_messages.append({
369
+ "role": "assistant",
370
+ "content": f"[Tool: {msg.name}] {msg.content}"
371
+ })
372
+
373
+
374
+ # Build messages & invoke LLM
375
+ messages = [SystemMessage(content=system_prompt)] + recent_messages
376
+ # messages = [SystemMessage(content=system_prompt)]
377
+ response = llm_with_tools.invoke(messages)
378
+ if hasattr(response, "tool_calls") and response.tool_calls:
379
+ first_tool = response.tool_calls[0]
380
+ tool_name = first_tool.get("name") if isinstance(first_tool, dict) else getattr(first_tool, "name", None)
381
+ tool_args = first_tool.get("args") if isinstance(first_tool, dict) else getattr(first_tool, "args", {})
382
+ print(f"[DEBBBBUUUUGGG] using tool {tool_name}")
383
+
384
+ # DEBUG
385
+ print("[DEBUG] LLM response:", response)
386
+ state.setdefault("messages", []).append(response)
387
+
388
+ return state
389
+
390
+
391
+
392
+
393
+
394
+ # --- Graph definition ---
395
+ graph = StateGraph(state_schema=ChatbotState)
396
+ graph.add_node("chatbot", chatbot_node)
397
+ graph.add_node("tools", ToolNode(tools))
398
+ graph.add_edge(START, "chatbot")
399
+ graph.add_conditional_edges("chatbot", tools_condition)
400
+ graph.add_edge("tools","chatbot")
401
+ graph.set_entry_point("chatbot")
402
+
403
+ # --- Streamlit UI ---
404
+ st.set_page_config(page_title="πŸ’Ό LinkedIn AI Career Assistant", page_icon="πŸ€–", layout="wide")
405
+ st.title("πŸ§‘β€πŸ’Ό LinkedIn AI Career Assistant")
406
+
407
+ # --- Checkpointer and graph initialization ---
408
+ if "checkpointer" not in st.session_state:
409
+ if SQLITE_AVAILABLE:
410
+ import os
411
+ print("Current working directory:", os.getcwd())
412
+ print("Files in working directory:", os.listdir("."))
413
+
414
+ conn = sqlite3.connect("checkpoints1.db", check_same_thread=False)
415
+ st.session_state["checkpointer"] = SqliteSaver(conn)
416
+ else:
417
+ st.session_state["checkpointer"] = MemorySaver()
418
+ checkpointer = st.session_state["checkpointer"]
419
+
420
+ if "app_graph" not in st.session_state:
421
+ st.session_state["app_graph"] = graph.compile(checkpointer=checkpointer)
422
+ app_graph = st.session_state["app_graph"]
423
+ # Find or create thread
424
+ def find_thread_id_for_url(checkpointer, url, max_threads=100):
425
+ search_url = normalize_url(url)
426
+ for tid in range(max_threads):
427
+ config = {"configurable": {"thread_id": str(tid), "checkpoint_ns": ""}}
428
+ state = checkpointer.get(config)
429
+ if state and "channel_values" in state:
430
+ user_state = state["channel_values"]
431
+ stored_url = normalize_url(user_state.get("profile_url", ""))
432
+ if stored_url == search_url:
433
+ return str(tid), user_state
434
+ return None, None
435
+
436
+ def delete_thread_checkpoint(checkpointer, thread_id):
437
+ # For SqliteSaver, use the delete_thread method if available
438
+ if hasattr(checkpointer, "delete_thread"):
439
+ checkpointer.delete_thread(thread_id)
440
+ else:
441
+ # For in-memory or custom checkpointers, implement as needed
442
+ pass
443
+
444
+
445
+ def get_next_thread_id(checkpointer, max_threads=100):
446
+ used = set()
447
+ for tid in range(max_threads):
448
+ config = {"configurable": {"thread_id": str(tid), "checkpoint_ns": ""}}
449
+ if checkpointer.get(config):
450
+ used.add(tid)
451
+ for tid in range(max_threads):
452
+ if tid not in used:
453
+ return str(tid)
454
+ raise RuntimeError("No available thread_id")
455
+
456
+ # --- Session selection and state initialization ---
457
+
458
+ if "chat_mode" not in st.session_state:
459
+ profile_url = st.text_input("Profile URL (e.g., https://www.linkedin.com/in/username/)")
460
+ if not profile_url:
461
+ st.info("Please enter a valid LinkedIn profile URL above to start.")
462
+ st.stop()
463
+
464
+ valid_pattern = r"^https://www\.linkedin\.com/in/[^/]+/?$"
465
+ if not re.match(valid_pattern, profile_url.strip()):
466
+ st.error("❌ Invalid LinkedIn profile URL. Make sure it matches the format.")
467
+ st.stop()
468
+ url = profile_url.strip()
469
+
470
+ existing_thread_id, previous_state = find_thread_id_for_url(checkpointer, url)
471
+ # Defensive: ensure required fields
472
+ required_fields = ["profile", "sections"]
473
+ if previous_state and not all(f in previous_state and previous_state[f] for f in required_fields):
474
+ st.warning("Previous session is missing required data. Please start a new chat.")
475
+ previous_state = None
476
+
477
+ if previous_state:
478
+ st.info("A previous session found. Choose:")
479
+ col1, col2 = st.columns(2)
480
+ if col1.button("Continue previous chat"):
481
+ st.session_state["chat_mode"] = "continue"
482
+ st.session_state["thread_id"] = existing_thread_id
483
+ st.session_state.state = previous_state
484
+ st.rerun()
485
+ elif col2.button("Start new chat"):
486
+ delete_thread_checkpoint(checkpointer, existing_thread_id)
487
+ with st.spinner("Fetching and processing profile... ⏳"):
488
+ raw=scrape_linkedin_profile(url)
489
+ thread_id = existing_thread_id
490
+ st.session_state["chat_mode"] = "new"
491
+ st.session_state["thread_id"] = thread_id
492
+ st.session_state.state = initialize_state(raw)
493
+ st.session_state.state["profile_url"] = normalize_url(url)
494
+ st.session_state.state["messages"] = []
495
+ st.rerun()
496
+ st.stop()
497
+ else:
498
+ with st.spinner("Fetching and processing profile... ⏳"):
499
+ raw=scrape_linkedin_profile(url)
500
+ thread_id = get_next_thread_id(checkpointer)
501
+ st.session_state["thread_id"] = thread_id
502
+ st.session_state["chat_mode"] = "new"
503
+ st.session_state.state = initialize_state(raw)
504
+ st.session_state.state["profile_url"] = normalize_url(url)
505
+ st.session_state.state["messages"] = []
506
+ st.rerun()
507
+
508
+ # --- Main chat UI (only after chat_mode is set) ---
509
+ state = st.session_state.state
510
+ thread_id = st.session_state.get("thread_id")
511
+
512
+ st.subheader("πŸ’¬ Chat with your AI Assistant")
513
+ messages = state.get("messages", [])
514
+ chat_container = st.container()
515
+
516
+ with chat_container:
517
+ st.markdown(
518
+ """
519
+ <style>
520
+ .chat-row { display: flex; width: 100%; margin-bottom: 12px; animation: fadeIn 0.5s; }
521
+ .chat-row.user { justify-content: flex-end; }
522
+ .chat-row.ai { justify-content: flex-start; }
523
+ .chat-bubble { font-family: 'Segoe UI', 'Roboto', 'Arial', sans-serif; font-size: 1.08rem; line-height: 1.65; padding: 14px 22px; border-radius: 20px; min-width: 60px; max-width: 75vw; box-shadow: 0 2px 12px rgba(0,0,0,0.10); word-break: break-word; display: inline-block; position: relative; margin-bottom: 2px; }
524
+ .bubble-user { background: linear-gradient(90deg, #43e97b 0%, #38f9d7 100%); color: #fff; border-bottom-right-radius: 6px; border-top-right-radius: 22px; text-align: right; box-shadow: 0 4px 16px rgba(67,233,123,0.13); }
525
+ .bubble-ai { background: linear-gradient(90deg, #e3f0ff 0%, #c9eaff 100%); color: #1a237e; border-bottom-left-radius: 6px; border-top-left-radius: 22px; text-align: left; border: 1.5px solid #b3e0fc; box-shadow: 0 4px 16px rgba(44, 62, 80, 0.08); }
526
+ .bubble-unknown { background: #fffbe6; color: #8a6d3b; border-radius: 14px; text-align: center; border: 1px solid #ffe082; display: inline-block; }
527
+ .sender-label { font-size: 0.93em; font-weight: 600; opacity: 0.7; margin-bottom: 4px; display: block; }
528
+ .avatar { width: 38px; height: 38px; border-radius: 50%; margin-right: 10px; margin-top: 2px; background: #e0e0e0; object-fit: cover; box-shadow: 0 2px 6px rgba(0,0,0,0.07); }
529
+ @keyframes fadeIn { from { opacity: 0; transform: translateY(12px);} to { opacity: 1; transform: translateY(0);} }
530
+ </style>
531
+ """,
532
+ unsafe_allow_html=True,
533
+ )
534
+
535
+ job_fit = state.get("job_fit")
536
+ for msg in messages:
537
+ if isinstance(msg, HumanMessage):
538
+ st.markdown(
539
+ f"""
540
+ <div class="chat-row user">
541
+ <div class="chat-bubble bubble-user">
542
+ <span class="sender-label">πŸ§‘β€πŸ’» You</span>
543
+ {msg.content}
544
+ </div>
545
+ </div>
546
+ """,
547
+ unsafe_allow_html=True,
548
+ )
549
+ elif isinstance(msg, AIMessage):
550
+ if not msg.content or not msg.content.strip():
551
+ continue
552
+ st.markdown(
553
+ f"""
554
+ <div class="chat-row ai">
555
+ <img class="avatar" src="https://img.icons8.com/ios-filled/50/1a237e/robot-2.png" alt="AI"/>
556
+ <div class="chat-bubble bubble-ai">
557
+ <span class="sender-label">πŸ€– AI</span>
558
+ {msg.content}
559
+ </div>
560
+ </div>
561
+ """,
562
+ unsafe_allow_html=True,
563
+ )
564
+ elif isinstance(msg, ToolMessage):
565
+ raw_content = msg.content or "(no content)"
566
+ try:
567
+ parsed = json.loads(raw_content)
568
+ except Exception:
569
+ parsed = None
570
+
571
+ if parsed and isinstance(parsed, dict):
572
+ # --- Profile analysis format ---
573
+ if all(k in parsed for k in ("strengths", "weaknesses", "suggestions")):
574
+ strengths = parsed["strengths"]
575
+ weaknesses = parsed["weaknesses"]
576
+ suggestions = parsed["suggestions"]
577
+ formatted_html = f"""
578
+ <h3>πŸ’ͺ <b>Strengths</b></h3>
579
+ <ul>
580
+ <li><b>Technical:</b> {', '.join(strengths.get('technical', []) or ['None'])}</li>
581
+ <li><b>Projects:</b> {', '.join(strengths.get('projects', []) or ['None'])}</li>
582
+ <li><b>Education:</b> {', '.join(strengths.get('education', []) or ['None'])}</li>
583
+ <li><b>Soft Skills:</b> {', '.join(strengths.get('soft_skills', []) or ['None'])}</li>
584
+ </ul>
585
+
586
+ <h3>⚠️ <b>Weaknesses</b></h3>
587
+ <ul>
588
+ <li><b>Technical Gaps:</b> {', '.join(weaknesses.get('technical_gaps', []) or ['None'])}</li>
589
+ <li><b>Project/Experience Gaps:</b> {', '.join(weaknesses.get('project_or_experience_gaps', []) or ['None'])}</li>
590
+ <li><b>Missing Context:</b> {', '.join(weaknesses.get('missing_context', []) or ['None'])}</li>
591
+ </ul>
592
+
593
+ <h3>πŸ›  <b>Suggestions to improve</b></h3>
594
+ <ul>
595
+ {''.join(f'<li>{s}</li>' for s in suggestions)}
596
+ </ul>
597
+ """
598
+
599
+ st.markdown(f"""
600
+ <div class="chat-row ai">
601
+ <img class="avatar" src="https://img.icons8.com/ios-filled/50/1a237e/robot-2.png" alt="Tool"/>
602
+ <div class="chat-bubble bubble-ai">
603
+ <span class="sender-label">πŸ“Š Profile Analysis</span>
604
+ {formatted_html}
605
+ </div>
606
+ </div>
607
+ """, unsafe_allow_html=True)
608
+
609
+
610
+
611
+ # --- Job fit format ---
612
+ elif "match_score" in parsed:
613
+ percent = parsed["match_score"]
614
+ suggestions = parsed.get("suggestions", [])
615
+ missing = parsed.get("missing_skills", [])
616
+ target_role = parsed.get('target_role', 'unspecified')
617
+ state["target_role"]=target_role
618
+ suggestions_html = "<br>".join(f"β€’ {s}" for s in suggestions)
619
+ missing_html = "<br>".join(f"β€’ {s}" for s in missing)
620
+
621
+ st.markdown(f"""
622
+ <div class="chat-row ai">
623
+ <img class="avatar" src="https://img.icons8.com/ios-filled/50/1a237e/robot-2.png" alt="Tool"/>
624
+ <div class="chat-bubble bubble-ai">
625
+ <span class="sender-label">πŸ“Š Job Fit</span>
626
+ <b>🎯 Target Role:</b> {target_role}<br>
627
+ <div style="
628
+ width: 120px; height: 120px; border-radius: 50%;
629
+ background: conic-gradient(#25D366 {percent * 3.6}deg, #e0e0e0 0deg);
630
+ display: flex; align-items: center; justify-content: center;
631
+ font-size: 1.8rem; color: #333; margin: 10px auto;">
632
+ {percent}%
633
+ </div>
634
+ <b>Missing Skills:</b><br>{missing_html}<br><br>
635
+ <b>Suggestions:</b><br>{suggestions_html}
636
+ </div>
637
+ </div>
638
+ """, unsafe_allow_html=True)
639
+
640
+ # --- Section text format ---
641
+ elif "result" in parsed:
642
+ text = parsed["result"]
643
+ st.markdown(f"""
644
+ <div class="chat-row ai">
645
+ <img class="avatar" src="https://img.icons8.com/ios-filled/50/1a237e/robot-2.png" alt="Tool"/>
646
+ <div class="chat-bubble bubble-ai">
647
+ <span class="sender-label">πŸ“„ Section Content</span>
648
+ {text}
649
+ </div>
650
+ </div>
651
+ """, unsafe_allow_html=True)
652
+
653
+ else:
654
+ st.markdown(
655
+ f"""
656
+ <div class="chat-row">
657
+ <div class="chat-bubble bubble-unknown">
658
+ <span class="sender-label">⚠️ Unknown</span>
659
+ {getattr(msg, 'content', str(msg))}
660
+ </div>
661
+ </div>
662
+ """,
663
+ unsafe_allow_html=True,
664
+ )
665
+ st.markdown('<div style="clear:both"></div>', unsafe_allow_html=True)
666
+
667
+ st.markdown("---")
668
+
669
+ user_input = st.chat_input(
670
+ placeholder="Ask about your LinkedIn profile, e.g., 'Analyze my profile, how do I fit for AI role, how is my about section?'"
671
+ )
672
+
673
+ if user_input and user_input.strip():
674
+ state.setdefault("messages", []).append(HumanMessage(content=user_input.strip()))
675
+ validate_state(state)
676
+ thread_id = st.session_state.get("thread_id")
677
+ config = {"configurable": {"thread_id": thread_id}}
678
+ with st.spinner("Processing your request..."):
679
+ st.session_state.state = app_graph.invoke(state, config)
680
+ st.rerun()