ME commited on
Commit
688190f
Β·
verified Β·
1 Parent(s): 6d133e5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -58
app.py CHANGED
@@ -1,10 +1,12 @@
 
1
  import streamlit as st
2
  import requests
3
  import json
4
  import webbrowser
5
  from io import StringIO
6
- from groq import Groq
7
  from bs4 import BeautifulSoup
 
 
8
 
9
  # Initialize session state
10
  if 'original_resume' not in st.session_state:
@@ -20,52 +22,74 @@ def scrape_website(url):
20
  soup = BeautifulSoup(response.text, 'html.parser')
21
  return soup.get_text()
22
 
23
- def extract_keywords(job_description, client):
24
- completion = client.chat.completions.create(
25
- model="llama-3.1-70b-versatile",
26
- messages=[
27
- {
28
- "role": "system",
29
- "content": (
30
- "You are an expert in extracting essential information from job postings for optimal ATS compatibility. "
31
- "Focus on identifying keywords and skills, prioritized by importance."
32
- )
33
- },
34
- {
35
- "role": "user",
36
- "content": (
37
- f"Extract keywords from this job posting and categorize them by importance. "
38
- f"Return as JSON with exactly these keys: 'high', 'medium', and 'low' containing arrays of strings.\n\n{job_description}"
39
- )
40
- }
41
- ],
42
- temperature=1,
43
- max_tokens=4096,
44
- response_format={"type": "json_object"}
45
- )
46
- return json.loads(completion.choices[0].message.content)
47
-
48
- def adapt_resume(resume_data, keywords, job_description, client):
49
- completion = client.chat.completions.create(
50
- model="llama-3.1-8b-instant",
51
- messages=[
52
- {
53
- "role": "system",
54
- "content": (
55
- "You are a CV coach skilled in resume customization and JSON formatting. "
56
- "Tailor the resume to emphasize relevant keywords while maintaining factual accuracy."
57
- )
58
- },
59
- {
60
- "role": "user",
61
- "content": f"Keywords: {json.dumps(keywords)}\nResume: {json.dumps(resume_data)}\nJob Description: {job_description}"
62
- }
63
- ],
64
- temperature=0.9,
65
- max_tokens=8000,
66
- response_format={"type": "json_object"}
67
  )
68
- return json.loads(completion.choices[0].message.content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  def calculate_resume_match(resume_data, keywords):
71
  """Calculate match score between resume and keywords"""
@@ -105,16 +129,13 @@ st.set_page_config(page_title="Resume Tailor", page_icon="πŸ“„", layout="wide")
105
  # Header
106
  st.title("🎯 AI Resume Tailor")
107
  st.markdown("### Transform your resume for your dream job")
108
-
109
  # Sidebar with API key
110
  with st.sidebar:
111
- api_key = st.text_input(
112
- "Groq API Key",
113
- type="password",
114
- help="Get your API key at https://console.groq.com/keys"
115
- )
116
  if not api_key:
117
- st.markdown("[Get API Key](https://console.groq.com/keys)")
118
 
119
  # Main input section
120
  col1, col2 = st.columns(2)
@@ -131,13 +152,13 @@ if st.button("πŸš€ Tailor Resume", type="primary", use_container_width=True):
131
  if job_url and api_key and resume_file:
132
  try:
133
  with st.status("πŸ”„ Processing...") as status:
134
- # Initialize client
135
- client = Groq(api_key=api_key)
136
 
137
- # Scrape and process
138
  status.update(label="Analyzing job posting...")
139
  job_description = scrape_website(job_url)
140
- keywords = extract_keywords(job_description, client)
141
  st.session_state['keywords'] = keywords
142
 
143
  status.update(label="Tailoring resume...")
@@ -145,7 +166,7 @@ if st.button("πŸš€ Tailor Resume", type="primary", use_container_width=True):
145
  st.session_state['original_resume'],
146
  keywords,
147
  job_description,
148
- client
149
  )
150
  st.session_state['tailored_resume'] = tailored_resume
151
  status.update(label="βœ… Done!", state="complete")
 
1
+ # Update imports
2
  import streamlit as st
3
  import requests
4
  import json
5
  import webbrowser
6
  from io import StringIO
 
7
  from bs4 import BeautifulSoup
8
+ import google.generativeai as genai
9
+ import os
10
 
11
  # Initialize session state
12
  if 'original_resume' not in st.session_state:
 
22
  soup = BeautifulSoup(response.text, 'html.parser')
23
  return soup.get_text()
24
 
25
+ def trim_text(text, max_length=3000):
26
+ """Trim text while preserving important content"""
27
+ if len(text) <= max_length:
28
+ return text
29
+ # Keep first and last parts
30
+ half_length = max_length // 2
31
+ return text[:half_length] + "..." + text[-half_length:]
32
+
33
+ # Configure Gemini
34
+ def init_gemini(api_key):
35
+ genai.configure(api_key=api_key)
36
+ generation_config = {
37
+ "temperature": 0.7,
38
+ "top_p": 0.95,
39
+ "top_k": 40,
40
+ "max_output_tokens": 8192,
41
+ "response_mime_type": "application/json",
42
+ }
43
+ return genai.GenerativeModel(
44
+ model_name="gemini-1.5-flash-002",
45
+ generation_config=generation_config
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  )
47
+
48
+ def extract_keywords(job_description, model):
49
+ prompt = f"""Extract key ATS keywords from job posting. Return JSON with 'high', 'medium', 'low' arrays:
50
+ {job_description}"""
51
+
52
+ response = model.generate_content(prompt)
53
+ return json.loads(response.text)
54
+
55
+ def validate_resume_schema(resume_data, original_schema):
56
+ """Validate and enforce resume schema consistency"""
57
+ def get_schema_structure(data):
58
+ if isinstance(data, dict):
59
+ return {k: get_schema_structure(v) for k, v in data.items()}
60
+ elif isinstance(data, list):
61
+ return [get_schema_structure(data[0])] if data else []
62
+ else:
63
+ return type(data).__name__
64
+
65
+ original_structure = get_schema_structure(original_schema)
66
+ new_structure = get_schema_structure(resume_data)
67
+ return original_structure == new_structure
68
+
69
+ def adapt_resume(resume_data, keywords, job_description, model, max_retries=3):
70
+ original_schema = resume_data.copy()
71
+
72
+ for attempt in range(max_retries):
73
+ try:
74
+ prompt = f"""You are a CV coach skilled in resume customization and JSON formatting. Consider what the ideal candidate for the target role would be like. You have creative freedom to tailor the provided base Original CV JSON to align with the company's needs using relevant keywords based on their importance level (high: 3x, medium: 2x, low: 1x). Use the language established in the Keywords. IMPORTANT: Consider the company's research and job details to enhance the CV's relevance.
75
+ Embed relevant keywords throughout specific sections: 'summary', 'experience', 'volunteer', 'interests', 'awards', 'projects' and 'skills' without altering any factual information.
76
+ Output the modified CV as JSON, strictly maintaining the original structure, keys, and logical flow of information. Use the language specified in the Keywords and avoid adding any fabricated details.
77
+ Retain specific keys such as 'id' and 'url' as in the original CV, as well as any key found within the 'metadata' section of the schema.
78
+ Schema: {json.dumps(original_schema)}
79
+ Keywords: {json.dumps(keywords)}
80
+ Job: {job_description}"""
81
+
82
+ response = model.generate_content(prompt)
83
+ tailored_resume = json.loads(response.text)
84
+
85
+ if validate_resume_schema(tailored_resume, original_schema):
86
+ return tailored_resume
87
+
88
+ except Exception as e:
89
+ if attempt == max_retries - 1:
90
+ raise e
91
+
92
+ raise ValueError("Schema validation failed")
93
 
94
  def calculate_resume_match(resume_data, keywords):
95
  """Calculate match score between resume and keywords"""
 
129
  # Header
130
  st.title("🎯 AI Resume Tailor")
131
  st.markdown("### Transform your resume for your dream job")
 
132
  # Sidebar with API key
133
  with st.sidebar:
134
+ st.markdown("### Google API Key")
135
+ st.markdown("This tool works with Google's Gemini model, which you can use for free. For more information, visit [Google AI Studio](https://ai.google.dev/aistudio).")
136
+ api_key = st.secrets["google_api_key"]
 
 
137
  if not api_key:
138
+ st.error("API key not found in secrets. Please add your API key to the secrets.")
139
 
140
  # Main input section
141
  col1, col2 = st.columns(2)
 
152
  if job_url and api_key and resume_file:
153
  try:
154
  with st.status("πŸ”„ Processing...") as status:
155
+ # Initialize Gemini
156
+ model = init_gemini(api_key)
157
 
158
+ # Rest of the processing remains the same, just using model instead of client
159
  status.update(label="Analyzing job posting...")
160
  job_description = scrape_website(job_url)
161
+ keywords = extract_keywords(job_description, model)
162
  st.session_state['keywords'] = keywords
163
 
164
  status.update(label="Tailoring resume...")
 
166
  st.session_state['original_resume'],
167
  keywords,
168
  job_description,
169
+ model
170
  )
171
  st.session_state['tailored_resume'] = tailored_resume
172
  status.update(label="βœ… Done!", state="complete")