|
|
|
import os
|
|
import json
|
|
import logging
|
|
from huggingface_hub import InferenceClient
|
|
from huggingface_hub.utils._errors import BadRequestError
|
|
from dotenv import load_dotenv
|
|
from utils.fileTotext import extract_text_based_on_format
|
|
import re
|
|
from utils.spacy import Parser_from_model
|
|
|
|
|
|
load_dotenv()
|
|
|
|
|
|
HFT = os.getenv('HF_TOKEN')
|
|
if not HFT:
|
|
raise ValueError("Hugging Face token is not set in environment variables.")
|
|
client = InferenceClient(model="mistralai/Mistral-Nemo-Instruct-2407", token=HFT)
|
|
|
|
|
|
def Data_Cleaner(text):
|
|
pattern = r".*?format:"
|
|
result = re.split(pattern, text, maxsplit=1)
|
|
if len(result) > 1:
|
|
text_after_format = result[1].strip().strip('`').strip('json')
|
|
else:
|
|
text_after_format = text.strip().strip('`').strip('json')
|
|
|
|
return text_after_format
|
|
|
|
|
|
def Model_ProfessionalDetails_Output(resume, client):
|
|
system_role = {
|
|
"role": "system",
|
|
"content": "You are a skilled resume parser. Your task is to extract professional details from resumes in a structured JSON format defined by the User. Ensure accuracy and completeness while maintaining the format provided and if field are missing just return 'not found'."
|
|
}
|
|
user_prompt = {
|
|
"role": "user",
|
|
"content": f'''Act as a resume parser for the following text given in text: {resume}
|
|
Extract the text in the following output JSON string as:
|
|
{{
|
|
"professional": {{
|
|
"technical_skills": "List all technical skills, programming languages, frameworks, and technologies mentioned in the resume, ensuring they are not mixed with other skill types.",
|
|
"non_technical_skills": "Identify and list non-technical skills such as leadership, teamwork, and communication skills, ensuring they are not mixed with technical skills.",
|
|
"tools": "Enumerate all software tools, platforms, and applications (e.g., Figma, Unity, MS Office, etc.) referenced in the resume, distinctly separate from skills.",
|
|
"projects": "Extract the names or titles of all projects mentioned in the resume.",
|
|
"projects_experience": "Summarize overall project experiences, providing a brief description of each project as detailed in the resume.",
|
|
"experience": "Calculate total professional work experience in years and months based on the resume.",
|
|
"companies_worked_at": "List the names of all companies where employment is mentioned in the resume.",
|
|
"certifications": "Extract and list all certifications obtained as stated in the resume.",
|
|
"roles": "Include the names of all job titles or roles held as indicated in the resume.",
|
|
"qualifications": "List educational qualifications (e.g., B.Tech) from the resume. If none are found, return 'No education listed'.",
|
|
"courses": "Extract the names of completed courses based on the resume. If none are found, return 'No courses listed'.",
|
|
"university": "Identify the name of the university, college, or institute attended, based on the resume. If not found, return 'No university listed'.",
|
|
"year_of_graduation": "Extract the year of graduation from the resume. If not found, return 'No year of graduation listed'."
|
|
}}
|
|
}}
|
|
Json Output:
|
|
'''
|
|
}
|
|
|
|
|
|
response = ""
|
|
for message in client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=True, temperature=0.35):
|
|
response += message.choices[0].delta.content
|
|
|
|
try:
|
|
clean_response = Data_Cleaner(response)
|
|
parsed_response = json.loads(clean_response)
|
|
except json.JSONDecodeError as e:
|
|
logging.error(f"JSON Decode Error: {e}")
|
|
return {}
|
|
|
|
return parsed_response
|
|
|
|
def Model_PersonalDetails_Output(resume, client):
|
|
system_role = {
|
|
"role": "system",
|
|
"content": "You are a skilled resume parser. Your task is to extract professional details from resumes in a structured JSON format defined by the User. Ensure accuracy and completeness while maintaining the format provided and if field are missing just return 'not found'."
|
|
}
|
|
user_prompt = {
|
|
"role": "user",
|
|
"content": f'''Act as a resume parser for the following text given in text: {resume}
|
|
Extract the text in the following output JSON string as:
|
|
{{
|
|
"personal": {{
|
|
"name": "Extract the full name based on the resume. If not found, return 'No name listed'.",
|
|
"contact_number": "Extract the contact number from the resume. If not found, return 'No contact number listed'.",
|
|
"email": "Extract the email address from the resume. If not found, return 'No email listed'.",
|
|
"Address": "Extract the Address or address from the resume. If not found, return 'No Address listed'.",
|
|
"link": "Extract any relevant links (e.g., portfolio, LinkedIn) from the resume. If not found, return 'No link listed'."
|
|
}}
|
|
}}
|
|
output:
|
|
'''
|
|
}
|
|
|
|
|
|
response = ""
|
|
for message in client.chat_completion(
|
|
messages=[system_role, user_prompt],
|
|
max_tokens=3000,
|
|
stream=True,
|
|
temperature=0.35,
|
|
):
|
|
response += message.choices[0].delta.content
|
|
|
|
|
|
try:
|
|
|
|
clean_response=Data_Cleaner(response)
|
|
|
|
parsed_response = json.loads(clean_response)
|
|
|
|
except json.JSONDecodeError as e:
|
|
print("JSON Decode Error:", e)
|
|
print("Raw Response:", response)
|
|
return {}
|
|
|
|
return parsed_response
|
|
|
|
|
|
|
|
|
|
|
|
linkedin_pattern = r"https?://(?:www\.)?linkedin\.com/[\w\-_/]+"
|
|
github_pattern = r"https?://(?:www\.)?github\.com/[\w\-_/]+"
|
|
email_pattern = r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$"
|
|
contact_pattern = r"^\+?[\d\s\-()]{7,15}$"
|
|
|
|
def extract_links(hyperlinks):
|
|
linkedin_links = []
|
|
github_links = []
|
|
|
|
|
|
for link in hyperlinks:
|
|
if re.match(linkedin_pattern, link):
|
|
linkedin_links.append(link)
|
|
elif re.match(github_pattern, link):
|
|
github_links.append(link)
|
|
|
|
return linkedin_links, github_links
|
|
|
|
def is_valid_email(email):
|
|
email_regex = r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$'
|
|
return re.match(email_regex, email) is not None
|
|
|
|
def is_valid_contact(contact):
|
|
patterns = [
|
|
r'^\+91[\s\.\-\/]?\(?0?\)?[\s\-\.\/]?\d{5}[\s\-\.\/]?\d{5}$',
|
|
r'^\+91[\s\.\-\/]?\d{5}[\s\-\.\/]?\d{5}$',
|
|
r'^\d{5}[\s\-\.\/]?\d{5}$',
|
|
r'^\+91[\s\.\-\/]?\d{10}$',
|
|
r'^\d{10}$',
|
|
r'^\+91[\s\.\-\/]?\(?\d{5}\)?[\s\-\.\/]?\d{5}[\s\-\.\/]?\d{5}$'
|
|
r'\+1\s\(\d{3}\)\s\d{3}-\d{4} ',
|
|
r'\(\d{3}\)\s\d{3}-\d{4} ',
|
|
r'\(\d{3}\)\s\d{3}\s\d{4} ',
|
|
r'\(\d{3}\)\s\d{3}\s\d{3} ',
|
|
r'\+1\d{10} ',
|
|
r'\d{10} ',
|
|
r'\+44\s\d{4}\s\d{6} ',
|
|
r'\+44\s\d{3}\s\d{3}\s\d{4} ',
|
|
r'0\d{4}\s\d{6} ',
|
|
r'0\d{3}\s\d{3}\s\d{4} ',
|
|
r'\+44\d{10} ',
|
|
r'0\d{10} ',
|
|
r'\+61\s\d\s\d{4}\s\d{4} ',
|
|
r'0\d\s\d{4}\s\d{4} ',
|
|
r'\+61\d{9} ',
|
|
r'0\d{9} ',
|
|
r'\+91\s\d{5}-\d{5} ',
|
|
r'\+91\s\d{4}-\d{6} ',
|
|
r'\+91\s\d{10} ',
|
|
r'0\d{2}-\d{7} ',
|
|
r'\+91\d{10} ',
|
|
r'\+49\s\d{4}\s\d{8} ',
|
|
r'\+49\s\d{3}\s\d{7} ',
|
|
r'0\d{3}\s\d{8} ',
|
|
r'\+49\d{12} ',
|
|
r'\+49\d{10} ',
|
|
r'0\d{11} ',
|
|
r'\+86\s\d{3}\s\d{4}\s\d{4} ',
|
|
r'0\d{3}\s\d{4}\s\d{4} ',
|
|
r'\+86\d{11} ',
|
|
r'\+81\s\d\s\d{4}\s\d{4} ',
|
|
r'\+81\s\d{2}\s\d{4}\s\d{4} ',
|
|
r'0\d\s\d{4}\s\d{4} ',
|
|
r'\+81\d{10} ',
|
|
r'\+81\d{9} ',
|
|
r'0\d{9} ',
|
|
r'\+55\s\d{2}\s\d{5}-\d{4} ',
|
|
r'\+55\s\d{2}\s\d{4}-\d{4} ',
|
|
r'0\d{2}\s\d{4}\s\d{4} ',
|
|
r'\+55\d{11} ',
|
|
r'\+55\d{10} ',
|
|
r'0\d{10} ',
|
|
r'\+33\s\d\s\d{2}\s\d{2}\s\d{2}\s\d{2} ',
|
|
r'0\d\s\d{2}\s\d{2}\s\d{2}\s\d{2} ',
|
|
r'\+33\d{9} ',
|
|
r'0\d{9} ',
|
|
r'\+7\s\d{3}\s\d{3}-\d{2}-\d{2} ',
|
|
r'8\s\d{3}\s\d{3}-\d{2}-\d{2} ',
|
|
r'\+7\d{10} ',
|
|
r'8\d{10} ',
|
|
r'\+27\s\d{2}\s\d{3}\s\d{4} ',
|
|
r'0\d{2}\s\d{3}\s\d{4} ',
|
|
r'\+27\d{9} ',
|
|
r'0\d{9} ',
|
|
r'\+52\s\d{3}\s\d{3}\s\d{4} ',
|
|
r'\+52\s\d{2}\s\d{4}\s\d{4} ',
|
|
r'01\s\d{3}\s\d{4} ',
|
|
r'\+52\d{10} ',
|
|
r'01\d{7} ',
|
|
r'\+234\s\d{3}\s\d{3}\s\d{4} ',
|
|
r'0\d{3}\s\d{3}\s\d{4} ',
|
|
r'\+234\d{10} ',
|
|
r'0\d{10} ',
|
|
r'\+971\s\d\s\d{3}\s\d{4} ',
|
|
r'0\d\s\d{3}\s\d{4} ',
|
|
r'\+971\d{8} ',
|
|
r'0\d{8} ',
|
|
r'\+54\s9\s\d{3}\s\d{3}\s\d{4} ',
|
|
r'\+54\s\d{1}\s\d{4}\s\d{4} ',
|
|
r'0\d{3}\s\d{4} ',
|
|
r'\+54\d{10} ',
|
|
r'\+54\d{9} ',
|
|
r'0\d{7} ',
|
|
r'\+966\s\d\s\d{3}\s\d{4} ',
|
|
r'0\d\s\d{3}\s\d{4} ',
|
|
r'\+966\d{8} ',
|
|
r'0\d{8} ',
|
|
r'\+1\d{10} ',
|
|
r'\+1\s\d{3}\s\d{3}\s\d{4} ',
|
|
r'\d{5}\s\d{5} ',
|
|
r'\d{10} ',
|
|
r'\+44\d{10} ',
|
|
r'0\d{10} ',
|
|
r'\+61\d{9} ',
|
|
r'0\d{9} ',
|
|
r'\+91\d{10} ',
|
|
r'\+49\d{12} ',
|
|
r'\+49\d{10} ',
|
|
r'0\d{11} ',
|
|
r'\+86\d{11} ',
|
|
r'\+81\d{10} ',
|
|
r'\+81\d{9} ',
|
|
r'0\d{9} ',
|
|
r'\+55\d{11} ',
|
|
r'\+55\d{10} ',
|
|
r'0\d{10} ',
|
|
r'\+33\d{9} ',
|
|
r'0\d{9} ',
|
|
r'\+7\d{10} ',
|
|
r'8\d{10} ',
|
|
r'\+27\d{9} ',
|
|
r'0\d{9} ',
|
|
r'\+52\d{10} ',
|
|
r'01\d{7} ',
|
|
r'\+234\d{10} ',
|
|
r'0\d{10} ',
|
|
r'\+971\d{8} ',
|
|
r'0\d{8} ',
|
|
r'\+54\s9\s\d{10} ',
|
|
r'\+54\d{9} ',
|
|
r'0\d{7} ',
|
|
r'\+966\d{8} ',
|
|
r'0\d{8}'
|
|
]
|
|
|
|
|
|
return any(re.match(pattern, contact) for pattern in patterns) is not None
|
|
|
|
|
|
def validate_contact_email(personal_data):
|
|
contact = personal_data.get('contact', 'Not found')
|
|
email = personal_data.get('email', 'Not found')
|
|
|
|
valid_contact = is_valid_contact(contact) if contact != 'Not found' else False
|
|
valid_email = is_valid_email(email) if email != 'Not found' else False
|
|
|
|
invalid_contact = 'Invalid contact' if not valid_contact else 'Valid contact'
|
|
invalid_email = 'Invalid email' if not valid_email else 'Valid email'
|
|
|
|
return valid_contact, invalid_contact, valid_email, invalid_email
|
|
|
|
|
|
def process_resume_data(file_path):
|
|
resume_text, hyperlinks = extract_text_based_on_format(file_path)
|
|
print("Resume converted to text successfully.")
|
|
|
|
if not resume_text:
|
|
return {"error": "Text extraction failed"}
|
|
|
|
|
|
linkedin_links, github_links = extract_links(hyperlinks)
|
|
|
|
|
|
try:
|
|
|
|
per_data = Model_PersonalDetails_Output(resume_text, client)
|
|
|
|
|
|
pro_data = Model_ProfessionalDetails_Output(resume_text, client)
|
|
|
|
|
|
if not per_data:
|
|
logging.warning("Mistral personal data extraction failed.")
|
|
per_data = {}
|
|
|
|
if not pro_data:
|
|
logging.warning("Mistral professional data extraction failed.")
|
|
pro_data = {}
|
|
|
|
|
|
result = {
|
|
"personal": {
|
|
"name": per_data.get('personal', {}).get('name', 'Not found'),
|
|
"contact": per_data.get('personal', {}).get('contact_number', 'Not found'),
|
|
"email": per_data.get('personal', {}).get('email', 'Not found'),
|
|
"location": per_data.get('personal', {}).get('Address', 'Not found'),
|
|
"linkedin": linkedin_links,
|
|
"github": github_links,
|
|
"other_links": hyperlinks
|
|
},
|
|
"professional": {
|
|
"technical_skills": pro_data.get('professional', {}).get('technical_skills', 'Not found'),
|
|
"non_technical_skills": pro_data.get('professional', {}).get('non_technical_skills', 'Not found'),
|
|
"tools": pro_data.get('professional', {}).get('tools', 'Not found'),
|
|
"experience": [
|
|
{
|
|
"company": pro_data.get('professional', {}).get('companies_worked_at', 'Not found'),
|
|
"projects": pro_data.get('professional', {}).get('projects', 'Not found'),
|
|
"role": pro_data.get('professional', {}).get('worked_as', 'Not found'),
|
|
"years": pro_data.get('professional', {}).get('experience', 'Not found'),
|
|
"project_experience": pro_data.get('professional', {}).get('projects_experience', 'Not found')
|
|
}
|
|
],
|
|
"education": [
|
|
{
|
|
"qualification": pro_data.get('professional', {}).get('qualification', 'Not found'),
|
|
"university": pro_data.get('professional', {}).get('university', 'Not found'),
|
|
"course": pro_data.get('professional', {}).get('course', 'Not found'),
|
|
"certificate": pro_data.get('professional', {}).get('certification', 'Not found')
|
|
}
|
|
]
|
|
}
|
|
}
|
|
|
|
|
|
valid_contact, invalid_contact, valid_email, invalid_email = validate_contact_email(result['personal'])
|
|
result['personal']['valid_contact'] = valid_contact
|
|
result['personal']['invalid_contact'] = invalid_contact
|
|
result['personal']['valid_email'] = valid_email
|
|
result['personal']['invalid_email'] = invalid_email
|
|
|
|
|
|
if per_data or pro_data:
|
|
logging.info("Successfully extracted data using Mistral.")
|
|
print("---------Mistral-------")
|
|
return result
|
|
else:
|
|
raise ValueError("Mistral returned no output")
|
|
|
|
|
|
except BadRequestError as e:
|
|
logging.error(f"HuggingFace API error: {e}. Falling back to SpaCy.")
|
|
print(f"HuggingFace API error: {e}. Falling back to SpaCy.")
|
|
except Exception as e:
|
|
logging.error(f"An error occurred while processing with Mistral: {e}. Falling back to SpaCy.")
|
|
print(f"An error occurred while processing with Mistral: {e}. Falling back to SpaCy.")
|
|
|
|
|
|
logging.warning("Mistral failed, switching to SpaCy.")
|
|
print("---------SpaCy-------")
|
|
return Parser_from_model(file_path)
|
|
|