Spaces:
Sleeping
Sleeping
File size: 6,927 Bytes
3baf333 7c0b46d 557b7cf 7c0b46d 23c19e0 dc6ab46 23c19e0 dc6ab46 23c19e0 7c0b46d 23c19e0 7c0b46d 23c19e0 7c0b46d 9b0e653 7c0b46d 9b0e653 7c0b46d 9b0e653 7c0b46d 23c19e0 dc6ab46 23c19e0 dc6ab46 23c19e0 10493d1 dc6ab46 23c19e0 dc6ab46 23c19e0 10493d1 dc6ab46 23c19e0 7c0b46d 23c19e0 dc6ab46 23c19e0 7c0b46d 23c19e0 3baf333 23c19e0 7c0b46d 23c19e0 dc6ab46 23c19e0 3baf333 23c19e0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 |
import streamlit as st
from huggingface_hub import InferenceClient
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import os
from PyPDF2 import PdfReader
import docx
import re
from typing import Dict
def parse_cv_sections(text: str) -> Dict[str, str]:
"""Parse CV text into structured sections."""
sections = {
'contact': '',
'education': '',
'experience': '',
'skills': '',
'projects': '',
}
# Common section headers in CVs
section_patterns = {
'contact': r'(?i)(contact|personal\s+information|profile)',
'education': r'(?i)(education|academic|qualification)',
'experience': r'(?i)(experience|work|employment|professional)',
'skills': r'(?i)(skills|technical skills|competencies)',
'projects': r'(?i)(projects|personal projects)',
}
# Split text into lines
lines = text.split('\n')
current_section = None
for line in lines:
line = line.strip()
if not line:
continue
# Check if line is a section header
for section, pattern in section_patterns.items():
if re.search(pattern, line, re.IGNORECASE):
current_section = section
break
if current_section and line:
sections[current_section] += line + '\n'
return sections
def extract_cv_text(file):
"""Extract text from PDF or DOCX CV files."""
if file is None:
return "No CV uploaded"
file_ext = os.path.splitext(file.name)[1].lower()
text = ""
try:
if file_ext == '.pdf':
reader = PdfReader(file)
for page in reader.pages:
text += page.extract_text()
elif file_ext == '.docx':
doc = docx.Document(file)
for paragraph in doc.paragraphs:
text += paragraph.text + '\n'
else:
return "Unsupported file format. Please upload PDF or DOCX files."
# Parse the CV into sections
sections = parse_cv_sections(text)
return sections
except Exception as e:
return f"Error processing file: {str(e)}"
# Replace 'your_huggingface_token' with your actual Hugging Face access token
access_token = os.getenv('API_KEY')
# Initialize the tokenizer and model with the Hugging Face access token
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it", use_auth_token=access_token)
model = AutoModelForCausalLM.from_pretrained(
"google/gemma-2b-it",
torch_dtype=torch.bfloat16,
use_auth_token=access_token
)
model.eval() # Set the model to evaluation mode
# Initialize the inference client (if needed for other API-based tasks)
client = InferenceClient(token=access_token)
def create_email_prompt(job_description: str, cv_sections: Dict[str, str]) -> str:
"""Create a detailed prompt for email generation."""
return f"""Job Description:
{job_description}
Your CV Details:
Experience:
{cv_sections['experience']}
Skills:
{cv_sections['skills']}
Education:
{cv_sections['education']}
Instructions: Write a professional job application email following these guidelines:
1. Start with a proper greeting
2. First paragraph: Express interest in the position and mention how you found it
3. Second paragraph: Highlight 2-3 most relevant experiences from your CV that match the job requirements
4. Third paragraph: Mention specific skills that align with the role
5. Closing paragraph: Express enthusiasm for an interview and provide contact information
6. End with a professional closing
Keep the tone professional, confident, and enthusiastic. Be concise but impactful.
Email:"""
def conversation_predict(input_text: str, cv_sections: Dict[str, str]):
"""Generate a response using the model with improved prompting."""
prompt = create_email_prompt(input_text, cv_sections)
# Tokenize the input text
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
# Generate a response with the model
outputs = model.generate(
input_ids,
max_new_tokens=2048,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and return the generated response
return tokenizer.decode(outputs[0], skip_special_tokens=True)
def respond(
message: str,
history: list[tuple[str, str]],
system_message: str,
cv_file,
max_tokens: int,
temperature: float,
top_p: float,
):
"""Generate a response for a multi-turn chat conversation."""
# Extract CV text and update system message
cv_text = extract_cv_text(cv_file) if cv_file else "No CV provided"
updated_system_message = f"""Task: Write a professional job application email.
CV Summary:
{cv_text}
{system_message}"""
messages = [{"role": "system", "content": updated_system_message}]
for user_input, assistant_reply in history:
if user_input:
messages.append({"role": "user", "content": user_input})
if assistant_reply:
messages.append({"role": "assistant", "content": assistant_reply})
messages.append({"role": "user", "content": message})
response = ""
for message_chunk in client.chat_completion(
messages=messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message_chunk["choices"][0]["delta"].get("content", "")
response += token
yield response
# Streamlit UI section
st.title("AI Job Application Email Generator")
# Add tabs for different sections
tab1, tab2 = st.tabs(["Generate Email", "View CV Details"])
with tab1:
# CV file upload
cv_file = st.file_uploader("Upload CV (PDF or DOCX)", type=["pdf", "docx"])
if cv_file:
cv_sections = extract_cv_text(cv_file)
if isinstance(cv_sections, dict):
st.success("CV uploaded and parsed successfully!")
else:
st.error(cv_sections) # Show error message if parsing failed
# Job description input
st.markdown("### Job Description")
message = st.text_area("Paste the job description here:", height=200)
# Generate button
if st.button("Generate Email"):
if message and cv_file and isinstance(cv_sections, dict):
response = conversation_predict(message, cv_sections)
st.markdown("### Generated Email:")
st.markdown(response)
else:
st.warning("Please upload a CV and enter a job description.")
with tab2:
if cv_file and isinstance(cv_sections, dict):
st.markdown("### Parsed CV Details")
for section, content in cv_sections.items():
with st.expander(f"{section.title()}"):
st.text(content)
else:
st.info("Upload a CV to view parsed details")
|