File size: 10,943 Bytes
688190f
efc4cf7
29708eb
 
 
 
 
688190f
 
efc4cf7
29708eb
 
 
 
 
 
 
 
 
 
 
 
 
 
688190f
 
 
 
 
 
 
29708eb
688190f
 
 
 
 
 
 
 
 
 
 
3a69134
688190f
29708eb
688190f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01b2099
 
 
 
 
 
 
 
 
 
 
688190f
01b2099
688190f
 
 
 
 
 
 
 
 
 
 
 
29708eb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01b2099
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29708eb
 
 
 
01b2099
29708eb
 
 
247fde3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01b2099
 
 
 
 
 
247fde3
 
01b2099
 
688190f
29708eb
688190f
29708eb
 
 
 
 
 
 
 
 
 
 
 
01b2099
29708eb
 
 
688190f
 
29708eb
688190f
29708eb
 
688190f
29708eb
 
 
 
 
 
 
688190f
29708eb
 
 
 
 
 
01b2099
29708eb
 
 
 
 
 
 
 
 
 
 
01b2099
 
 
 
 
 
 
29708eb
 
 
 
 
 
 
 
 
 
 
01b2099
29708eb
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
# Update imports
import streamlit as st
import requests
import json
import webbrowser
from io import StringIO
from bs4 import BeautifulSoup
import google.generativeai as genai
import os

# Initialize session state
if 'original_resume' not in st.session_state:
    st.session_state['original_resume'] = None
if 'keywords' not in st.session_state:
    st.session_state['keywords'] = None
if 'tailored_resume' not in st.session_state:
    st.session_state['tailored_resume'] = None

def scrape_website(url):
    response = requests.get(url)
    response.raise_for_status()
    soup = BeautifulSoup(response.text, 'html.parser')
    return soup.get_text()

def trim_text(text, max_length=3000):
    """Trim text while preserving important content"""
    if len(text) <= max_length:
        return text
    # Keep first and last parts
    half_length = max_length // 2
    return text[:half_length] + "..." + text[-half_length:]

# Configure Gemini
def init_gemini(api_key):
    genai.configure(api_key=api_key)
    generation_config = {
        "temperature": 0.7,
        "top_p": 0.95,
        "top_k": 40,
        "max_output_tokens": 8192,
        "response_mime_type": "application/json",
    }
    return genai.GenerativeModel(
        model_name="gemini-2.0-flash-exp",
        generation_config=generation_config
    )

def extract_keywords(job_description, model):
    prompt = f"""Extract key ATS keywords from job posting. Return JSON with 'high', 'medium', 'low' arrays:
    {job_description}"""
    
    response = model.generate_content(prompt)
    return json.loads(response.text)

def validate_resume_schema(resume_data, original_schema):
    """Validate and enforce resume schema consistency"""
    def get_schema_structure(data):
        if isinstance(data, dict):
            return {k: get_schema_structure(v) for k, v in data.items()}
        elif isinstance(data, list):
            return [get_schema_structure(data[0])] if data else []
        else:
            return type(data).__name__
    
    original_structure = get_schema_structure(original_schema)
    new_structure = get_schema_structure(resume_data)
    return original_structure == new_structure

def adapt_resume(resume_data, keywords, job_description, model, max_retries=3):
    original_schema = resume_data.copy()
    
    for attempt in range(max_retries):
        try:
            prompt = f"""As a CV expert, optimize the provided resume JSON for the target role. 
            Enhance sections (summary, experience, volunteer, interests, awards, projects, skills) by incorporating provided keywords:
            - High priority (3x weight)
            - Medium priority (2x weight)
            - Low priority (1x weight)
            Rules:
            - Keep all original facts and information
            - Maintain exact JSON structure and all existing keys
            - Use natural language from the keywords list
            - Do not add fictional content
            Base Schema: {json.dumps(original_schema)}
            Keywords: {json.dumps(keywords)}
            Job Description: {job_description}"""
            
            response = model.generate_content(prompt)
            tailored_resume = json.loads(response.text)
            
            if validate_resume_schema(tailored_resume, original_schema):
                return tailored_resume
                
        except Exception as e:
            if attempt == max_retries - 1:
                raise e
            
    raise ValueError("Schema validation failed")

def calculate_resume_match(resume_data, keywords):
    """Calculate match score between resume and keywords"""
    resume_text = json.dumps(resume_data).lower()
    total_score = 0
    matches = {'high': [], 'medium': [], 'low': []}
    
    # Weight multipliers for different priority levels
    weights = {"high": 3, "medium": 2, "low": 1}
    
    # Ensure keywords has the expected structure
    if not all(key in keywords for key in ['high', 'medium', 'low']):
        raise ValueError("Keywords must contain 'high', 'medium', and 'low' arrays")
    
    for priority in ['high', 'medium', 'low']:
        priority_score = 0
        priority_matches = []
        
        for word in keywords[priority]:
            word = word.lower()
            if word in resume_text:
                priority_score += weights[priority]
                priority_matches.append(word)
                
        matches[priority] = priority_matches
        total_score += priority_score
    
    # Normalize score to 0-100
    max_possible = sum(len(keywords[p]) * weights[p] for p in ['high', 'medium', 'low'])
    normalized_score = (total_score / max_possible * 100) if max_possible > 0 else 0
    
    return normalized_score, matches

def create_match_visualization(original_score, tailored_score, keywords, original_matches, tailored_matches):
    """Create visualization showing resume match comparison"""
    
    # Overall score comparison
    st.markdown("### πŸ“Š Resume Match Analysis")
    
    # Score metrics side by side
    col1, col2 = st.columns(2)
    with col1:
        st.metric(
            "Original Resume Match Score", 
            f"{original_score:.1f}%"
        )
    with col2:
        st.metric(
            "Tailored Resume Match Score", 
            f"{tailored_score:.1f}%",
            delta=f"+{tailored_score - original_score:.1f}%"
        )

    # Keyword analysis by priority
    st.markdown("### 🎯 Keyword Matches")
    tabs = st.tabs(["High Priority πŸ”΄", "Medium Priority 🟑", "Low Priority 🟒"])
    
    for idx, priority in enumerate(['high', 'medium', 'low']):
        with tabs[idx]:
            col1, col2 = st.columns(2)
            
            orig_matches = set(original_matches[priority])
            new_matches = set(tailored_matches[priority])
            added = new_matches - orig_matches
            
            # Original matches
            with col1:
                st.markdown("#### Original Matching Keywords")
                if orig_matches:
                    for keyword in orig_matches:
                        st.markdown(f"βœ“ `{keyword}`")
                else:
                    st.info("No matches found")
            
            # New matches
            with col2:
                st.markdown("#### Added the following Keywords")
                if added:
                    for keyword in added:
                        st.markdown(f"βž• `{keyword}`")
                else:
                    st.info("No new matches")

# Page config
st.set_page_config(page_title="Resume Tailor", page_icon="πŸ“„", layout="wide")

# Header
st.title("πŸ“„ Curriculum Customization Tool")
st.markdown("### Transform your resume for your dream job")
# Sidebar with API key
with st.sidebar:
    st.markdown("### πŸ“ How to Use")
    st.markdown("""
    1. **Prepare Your Resume**:
       - Create a basic resume at [rxresu.me](https://rxresu.me)
       - Export it as JSON (not PDF)
    
    2. **Get Job Details**:
       - Copy the job posting URL
    
    3. **Use the Tool**:
       - Upload your resume JSON
       - Paste the job URL
       - Click 'Tailor Resume'
    
    4. **Final Steps**:
       - Download the tailored JSON
       - Import back to rxresu.me
       - Export as PDF for application
    """)
    
    st.markdown("### ℹ️ About")
    st.markdown("""
    This tool uses Google's Gemini model to optimize your resume for ATS systems.
    
    πŸ”“ Open Source: Feel free to modify and adapt this tool to your needs.
    The source code is available and customizable.
    
    πŸ“§ Contact: For questions or suggestions, reach out to:
    [email protected]
    """)
    
    # Disclaimer
    st.warning("""
    ⚠️ **Disclaimer**
    
    This tool is for educational purposes only. 
    AI-based tools can produce unexpected results. 
    Always verify the output before using.
    """)
    
    api_key = st.secrets["google_api_key"]
    if not api_key:
        st.error("API key not found in secrets. Please add your API key to the secrets.")

# Main input section
col1, col2 = st.columns(2)
with col1:
    job_url = st.text_input("Job Posting URL", placeholder="https://...")
with col2:
    resume_file = st.file_uploader("Upload Resume (JSON)", type="json")
    if resume_file:
        resume_str = StringIO(resume_file.getvalue().decode("utf-8"))
        st.session_state['original_resume'] = json.load(resume_str)

# Process button
if st.button("🎯 Tailor Resume", type="primary", use_container_width=True):
    if job_url and api_key and resume_file:
        try:
            with st.status("πŸ”„ Processing...") as status:
                # Initialize Gemini
                model = init_gemini(api_key)
                
                # Rest of the processing remains the same, just using model instead of client
                status.update(label="Analyzing job posting...")
                job_description = scrape_website(job_url)
                keywords = extract_keywords(job_description, model)
                st.session_state['keywords'] = keywords
                
                status.update(label="Tailoring resume...")
                tailored_resume = adapt_resume(
                    st.session_state['original_resume'],
                    keywords,
                    job_description,
                    model
                )
                st.session_state['tailored_resume'] = tailored_resume
                status.update(label="βœ… Done!", state="complete")

            # Results section
            st.markdown("---")
            st.markdown("## πŸ“Š Results")
            
            # Calculate and display scores
            original_score, original_matches = calculate_resume_match(
                st.session_state['original_resume'],
                st.session_state['keywords']
            )
            tailored_score, tailored_matches = calculate_resume_match(
                st.session_state['tailored_resume'],
                st.session_state['keywords']
            )

            create_match_visualization(
                original_score,
                tailored_score,
                st.session_state['keywords'],
                original_matches,
                tailored_matches
            )

            # Download section
            st.markdown("### πŸ“₯ Download")
            if st.download_button(
                "⬇️ Download Tailored Resume",
                data=json.dumps(st.session_state['tailored_resume'], indent=4),
                file_name="tailored_resume.json",
                mime="application/json",
                use_container_width=True
            ):
                webbrowser.open_new_tab("https://rxresu.me/")
                st.info("πŸ“ Resume Builder opened in new tab")

        except Exception as e:
            st.error(f"An error occurred: {str(e)}")
    else:
        st.error("Please provide all required inputs")