|
|
|
import os |
|
import json |
|
import logging |
|
from huggingface_hub import InferenceClient |
|
from huggingface_hub.utils._errors import BadRequestError |
|
from dotenv import load_dotenv |
|
from utils.fileTotext import extract_text_based_on_format |
|
import re |
|
from utils.spacy import Parser_from_model |
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
HFT = os.getenv('HF_TOKEN') |
|
|
|
if not HFT: |
|
raise ValueError("Hugging Face token is not set in environment variables.") |
|
client = InferenceClient(model="mistralai/Mistral-Nemo-Instruct-2407", token=HFT) |
|
|
|
|
|
def Data_Cleaner(text): |
|
pattern = r".*?format:" |
|
result = re.split(pattern, text, maxsplit=1) |
|
if len(result) > 1: |
|
|
|
text_after_format = result[1].strip().strip('`').strip('json') |
|
else: |
|
text_after_format = text.strip().strip('`').strip('json') |
|
|
|
|
|
try: |
|
json.loads(text_after_format) |
|
return text_after_format |
|
except json.JSONDecodeError: |
|
logging.error("Data cleaning led to invalid JSON") |
|
return text |
|
|
|
|
|
|
|
def Model_ProfessionalDetails_Output(resume, client): |
|
system_role = { |
|
"role": "system", |
|
"content": "You are a skilled resume parser. Your task is to extract Professional details from resumes in a structured JSON format defined by the User. Ensure accuracy and completeness while maintaining the format provided and if field are missing just return []." |
|
} |
|
user_prompt = { |
|
"role": "user", |
|
"content": f'''<s>[INST] Act as a resume parser for the following text given in text: {resume} |
|
Extract the text in the following output JSON string as: |
|
{{ |
|
"professional": {{ |
|
"technical_skills": ["List all technical skills, programming languages, frameworks, and technologies mentioned in the resume, ensuring they are not mixed with other skill types. If not found, return []"], |
|
"non_technical_skills": ["Identify and list non-technical skills such as leadership, teamwork, and communication skills, ensuring they are not mixed with technical skills. If not found, return []"], |
|
"tools": ["Enumerate and extract all software tools, platforms, and applications referenced in the resume, distinctly separate from skills. If not found, return []"], |
|
"companies_worked_at": ["List the names of all companies where employment is mentioned in the resume. If not found, return []"], |
|
"projects": ["Extract all projects names or titles mentioned in the resume. If not found, return []"], |
|
"projects_experience": ["Summarize overall project experiences, providing a brief description of each project as detailed in the resume. If not found, return []"], |
|
"experience": ["Calculate total professional work experience in years and months based on the resume. If not found, return []"], |
|
"roles": ["List and Extract the names of all job titles or roles mentioned in the resume. If not found, return []"] |
|
}} |
|
}} |
|
output: |
|
[/INST]</s> |
|
''' |
|
} |
|
|
|
response = "" |
|
for message in client.chat_completion(messages=[system_role, user_prompt], max_tokens=4096, stream=True): |
|
response += message.choices[0].delta.content |
|
|
|
data = client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=False) |
|
print("This is without stream data ",data.choices[0].message.content) |
|
|
|
try: |
|
clean_response = Data_Cleaner(response) |
|
parsed_response = json.loads(clean_response) |
|
except json.JSONDecodeError as e: |
|
logging.error(f"JSON Decode Error: {e}") |
|
return {} |
|
|
|
return parsed_response |
|
|
|
|
|
def Model_EducationalDetails_Output(resume, client): |
|
system_role = { |
|
"role": "system", |
|
"content": "You are a skilled resume parser. Your task is to Extract All Educational qualifications, including Degrees and Certifications from resumes in a structured JSON format defined by the User. Ensure accuracy and completeness while maintaining the format provided and if field are missing just return []." |
|
} |
|
user_prompt = { |
|
"role": "user", |
|
"content": f'''<s>[INST] Act as a resume parser for the following text given in text: {resume} |
|
Extract the text in the following output JSON string as: |
|
{{ |
|
"educational": {{ |
|
"certifications": ["List and extract all certifications mentioned in the resume. If none are found, return []."], |
|
"qualifications": ["List and extract all educational qualifications, including degrees (e.g., BBA, MBA), their full forms, and associated levels (e.g., undergraduate, postgraduate) from the resume. If [] are found, return []."], |
|
"university": ["List and extract the name of the university, college, or institute attended based on the resume. If not found, return []."], |
|
"courses": ["List and extract the names of completed courses or based on the resume. If none are found, return []."] |
|
}} |
|
}} |
|
output: |
|
[/INST]</s> |
|
''' |
|
} |
|
|
|
response = "" |
|
for message in client.chat_completion(messages=[system_role, user_prompt], max_tokens=4096, stream=True): |
|
response += message.choices[0].delta.content |
|
data = client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=False) |
|
print("This is without stream data ",data.choices[0].message.content) |
|
|
|
try: |
|
clean_response = Data_Cleaner(response) |
|
parsed_response = json.loads(clean_response) |
|
except json.JSONDecodeError as e: |
|
logging.error(f"JSON Decode Error: {e}") |
|
return {} |
|
|
|
return parsed_response |
|
|
|
def Model_PersonalDetails_Output(resume, client): |
|
system_role = { |
|
"role": "system", |
|
"content": "You are a skilled resume parser. Your task is to extract personal details from resumes in a structured JSON format defined by the User. Ensure accuracy and completeness while maintaining the format provided and if field are missing just return []." |
|
} |
|
user_prompt = { |
|
"role": "user", |
|
"content": f'''<s>[INST] Act as a resume parser for the following text given in text: {resume} |
|
Extract the text in the following output JSON string as: |
|
{{ |
|
"personal": {{ |
|
"name": "Extract the full name based on the resume. If not found, return [].", |
|
"contact_number": "Extract the contact number from the resume. If not found, return [].", |
|
"email": "Extract the email address from the resume. If not found, return [].", |
|
"address": "Extract the address or address from the resume. If not found, return [].", |
|
"link": "Extract any relevant links (e.g., portfolio, LinkedIn) from the resume. If not found, return []." |
|
}} |
|
}} |
|
output: |
|
[/INST]</s> |
|
''' |
|
} |
|
|
|
|
|
response = "" |
|
for message in client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=True): |
|
response += message.choices[0].delta.content |
|
|
|
data = client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=False) |
|
print("This is without stream data ",data.choices[0].message.content) |
|
|
|
|
|
try: |
|
|
|
clean_response=Data_Cleaner(response) |
|
|
|
parsed_response = json.loads(clean_response) |
|
|
|
except json.JSONDecodeError as e: |
|
print("JSON Decode Error:", e) |
|
print("Raw Response:", response) |
|
return {} |
|
|
|
return parsed_response |
|
|
|
|
|
|
|
|
|
|
|
linkedin_pattern = r"https?://(?:www\.)?linkedin\.com/[\w\-_/]+" |
|
github_pattern = r"https?://(?:www\.)?github\.com/[\w\-_/]+" |
|
email_pattern = r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$" |
|
contact_pattern = r"^\+?[\d\s\-()]{7,15}$" |
|
|
|
def extract_links(hyperlinks): |
|
linkedin_links = [] |
|
github_links = [] |
|
|
|
|
|
for link in hyperlinks: |
|
if re.match(linkedin_pattern, link): |
|
linkedin_links.append(link) |
|
elif re.match(github_pattern, link): |
|
github_links.append(link) |
|
|
|
return linkedin_links, github_links |
|
|
|
def is_valid_email(email): |
|
email_regex = r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$' |
|
return re.match(email_regex, email) is not None |
|
|
|
def is_valid_contact(contact): |
|
patterns = [ |
|
r'^\+91[\s\.\-\/]?\(?0?\)?[\s\-\.\/]?\d{5}[\s\-\.\/]?\d{5}$', |
|
r'^\+91[\s\.\-\/]?\d{5}[\s\-\.\/]?\d{5}$', |
|
r'^\d{5}[\s\-\.\/]?\d{5}$', |
|
r'^\+91[\s\.\-\/]?\d{10}$', |
|
r'^\d{10}$', |
|
r'^\+91[\s\.\-\/]?\(?\d{5}\)?[\s\-\.\/]?\d{5}[\s\-\.\/]?\d{5}$' |
|
r'\+1\s\(\d{3}\)\s\d{3}-\d{4} ', |
|
r'\(\d{3}\)\s\d{3}-\d{4} ', |
|
r'\(\d{3}\)\s\d{3}\s\d{4} ', |
|
r'\(\d{3}\)\s\d{3}\s\d{3} ', |
|
r'\+1\d{10} ', |
|
r'\d{10} ', |
|
r'\+44\s\d{4}\s\d{6} ', |
|
r'\+44\s\d{3}\s\d{3}\s\d{4} ', |
|
r'0\d{4}\s\d{6} ', |
|
r'0\d{3}\s\d{3}\s\d{4} ', |
|
r'\+44\d{10} ', |
|
r'0\d{10} ', |
|
r'\+61\s\d\s\d{4}\s\d{4} ', |
|
r'0\d\s\d{4}\s\d{4} ', |
|
r'\+61\d{9} ', |
|
r'0\d{9} ', |
|
r'\+91\s\d{5}-\d{5} ', |
|
r'\+91\s\d{4}-\d{6} ', |
|
r'\+91\s\d{10} ', |
|
r'0\d{2}-\d{7} ', |
|
r'\+91\d{10} ', |
|
r'\+49\s\d{4}\s\d{8} ', |
|
r'\+49\s\d{3}\s\d{7} ', |
|
r'0\d{3}\s\d{8} ', |
|
r'\+49\d{12} ', |
|
r'\+49\d{10} ', |
|
r'0\d{11} ', |
|
r'\+86\s\d{3}\s\d{4}\s\d{4} ', |
|
r'0\d{3}\s\d{4}\s\d{4} ', |
|
r'\+86\d{11} ', |
|
r'\+81\s\d\s\d{4}\s\d{4} ', |
|
r'\+81\s\d{2}\s\d{4}\s\d{4} ', |
|
r'0\d\s\d{4}\s\d{4} ', |
|
r'\+81\d{10} ', |
|
r'\+81\d{9} ', |
|
r'0\d{9} ', |
|
r'\+55\s\d{2}\s\d{5}-\d{4} ', |
|
r'\+55\s\d{2}\s\d{4}-\d{4} ', |
|
r'0\d{2}\s\d{4}\s\d{4} ', |
|
r'\+55\d{11} ', |
|
r'\+55\d{10} ', |
|
r'0\d{10} ', |
|
r'\+33\s\d\s\d{2}\s\d{2}\s\d{2}\s\d{2} ', |
|
r'0\d\s\d{2}\s\d{2}\s\d{2}\s\d{2} ', |
|
r'\+33\d{9} ', |
|
r'0\d{9} ', |
|
r'\+7\s\d{3}\s\d{3}-\d{2}-\d{2} ', |
|
r'8\s\d{3}\s\d{3}-\d{2}-\d{2} ', |
|
r'\+7\d{10} ', |
|
r'8\d{10} ', |
|
r'\+27\s\d{2}\s\d{3}\s\d{4} ', |
|
r'0\d{2}\s\d{3}\s\d{4} ', |
|
r'\+27\d{9} ', |
|
r'0\d{9} ', |
|
r'\+52\s\d{3}\s\d{3}\s\d{4} ', |
|
r'\+52\s\d{2}\s\d{4}\s\d{4} ', |
|
r'01\s\d{3}\s\d{4} ', |
|
r'\+52\d{10} ', |
|
r'01\d{7} ', |
|
r'\+234\s\d{3}\s\d{3}\s\d{4} ', |
|
r'0\d{3}\s\d{3}\s\d{4} ', |
|
r'\+234\d{10} ', |
|
r'0\d{10} ', |
|
r'\+971\s\d\s\d{3}\s\d{4} ', |
|
r'0\d\s\d{3}\s\d{4} ', |
|
r'\+971\d{8} ', |
|
r'0\d{8} ', |
|
r'\+54\s9\s\d{3}\s\d{3}\s\d{4} ', |
|
r'\+54\s\d{1}\s\d{4}\s\d{4} ', |
|
r'0\d{3}\s\d{4} ', |
|
r'\+54\d{10} ', |
|
r'\+54\d{9} ', |
|
r'0\d{7} ', |
|
r'\+966\s\d\s\d{3}\s\d{4} ', |
|
r'0\d\s\d{3}\s\d{4} ', |
|
r'\+966\d{8} ', |
|
r'0\d{8} ', |
|
r'\+1\d{10} ', |
|
r'\+1\s\d{3}\s\d{3}\s\d{4} ', |
|
r'\d{5}\s\d{5} ', |
|
r'\d{10} ', |
|
r'\+44\d{10} ', |
|
r'0\d{10} ', |
|
r'\+61\d{9} ', |
|
r'0\d{9} ', |
|
r'\+91\d{10} ', |
|
r'\+49\d{12} ', |
|
r'\+49\d{10} ', |
|
r'0\d{11} ', |
|
r'\+86\d{11} ', |
|
r'\+81\d{10} ', |
|
r'\+81\d{9} ', |
|
r'0\d{9} ', |
|
r'\+55\d{11} ', |
|
r'\+55\d{10} ', |
|
r'0\d{10} ', |
|
r'\+33\d{9} ', |
|
r'0\d{9} ', |
|
r'\+7\d{10} ', |
|
r'8\d{10} ', |
|
r'\+27\d{9} ', |
|
r'0\d{9} ', |
|
r'\+52\d{10} ', |
|
r'01\d{7} ', |
|
r'\+234\d{10} ', |
|
r'0\d{10} ', |
|
r'\+971\d{8} ', |
|
r'0\d{8} ', |
|
r'\+54\s9\s\d{10} ', |
|
r'\+54\d{9} ', |
|
r'0\d{7} ', |
|
r'\+966\d{8} ', |
|
r'0\d{8}' |
|
] |
|
|
|
|
|
return any(re.match(pattern, contact) for pattern in patterns) is not None |
|
|
|
|
|
def validate_contact_email(personal_data): |
|
contact = personal_data.get('contact', []) |
|
email = personal_data.get('email', []) |
|
|
|
valid_contact = is_valid_contact(contact) if contact != [] else False |
|
valid_email = is_valid_email(email) if email != [] else False |
|
|
|
invalid_contact = 'Invalid contact' if not valid_contact else 'Valid contact' |
|
invalid_email = 'Invalid email' if not valid_email else 'Valid email' |
|
|
|
return valid_contact, invalid_contact, valid_email, invalid_email |
|
|
|
|
|
def extract_link_details(text): |
|
|
|
|
|
|
|
email_regex = re.compile(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}\b') |
|
|
|
|
|
link_regex = re.compile(r'\b(?:https?:\/\/)?(?:www\.)[a-zA-Z0-9-]+\.(?:com|co\.in|co|io|org|net|edu|gov|mil|int|uk|us|in|de|au|app|tech|xyz|info|biz|fr|dev)\b') |
|
|
|
emails = email_regex.findall(text) |
|
|
|
links_RE = [link for link in link_regex.findall(text) if len(link)>=11] |
|
|
|
|
|
links_RE = [link for link in links_RE if not any(email in link for email in emails)] |
|
|
|
return links_RE |
|
|
|
def process_resume_data(file_path): |
|
resume_text, hyperlinks = extract_text_based_on_format(file_path) |
|
print("Resume converted to text successfully.") |
|
|
|
if not resume_text: |
|
return {"error": "Text extraction failed"} |
|
|
|
|
|
linkedin_links, github_links = extract_links(hyperlinks) |
|
|
|
|
|
try: |
|
|
|
per_data = Model_PersonalDetails_Output(resume_text, client) |
|
print(f"Personal Data -----> {per_data}") |
|
|
|
|
|
pro_data = Model_ProfessionalDetails_Output(resume_text, client) |
|
print(f"Professional Data -----> {pro_data}") |
|
|
|
Edu_data=Model_EducationalDetails_Output(resume_text, client) |
|
print(f"Educational Data -----> {Edu_data}") |
|
|
|
|
|
links = extract_link_details(resume_text) |
|
print(f"Links Data -----> {links}") |
|
|
|
|
|
if not per_data: |
|
logging.warning("Mistral personal data extraction failed.") |
|
per_data = {} |
|
|
|
if not pro_data: |
|
logging.warning("Mistral professional data extraction failed.") |
|
pro_data = {} |
|
|
|
|
|
result = { |
|
"personal": { |
|
"name": per_data.get('personal', {}).get('name', None), |
|
"contact": per_data.get('personal', {}).get('contact_number', None), |
|
"email": per_data.get('personal', {}).get('email', None), |
|
"location": per_data.get('personal', {}).get('address', None), |
|
"linkedin": linkedin_links, |
|
"github": github_links, |
|
"other_links": hyperlinks |
|
}, |
|
"professional": { |
|
"technical_skills": pro_data.get('professional', {}).get('technical_skills', None), |
|
"non_technical_skills": pro_data.get('professional', {}).get('non_technical_skills', None), |
|
"tools": pro_data.get('professional', {}).get('tools', None), |
|
"experience": [ |
|
{ |
|
"company": pro_data.get('professional', {}).get('companies_worked_at', None), |
|
"projects": pro_data.get('professional', {}).get('projects', None), |
|
"role": pro_data.get('professional', {}).get('roles', None), |
|
"years": pro_data.get('professional', {}).get('experience', None), |
|
"project_experience": pro_data.get('professional', {}).get('projects_experience', None) |
|
} |
|
], |
|
"education": [ |
|
{ |
|
"qualification": Edu_data.get('educational', {}).get('qualifications', None), |
|
"university": Edu_data.get('educational', {}).get('university', None), |
|
"course": Edu_data.get('educational', {}).get('courses', None), |
|
"certificate": Edu_data.get('educational', {}).get('certifications', None) |
|
} |
|
] |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
if result['personal']['other_links'] is not None: |
|
result['personal']['other_links'] += links |
|
|
|
|
|
valid_contact, invalid_contact, valid_email, invalid_email = validate_contact_email(result['personal']) |
|
result['personal']['valid_contact'] = valid_contact |
|
result['personal']['invalid_contact'] = invalid_contact |
|
result['personal']['valid_email'] = valid_email |
|
result['personal']['invalid_email'] = invalid_email |
|
|
|
|
|
|
|
|
|
if per_data or pro_data: |
|
logging.info("Successfully extracted data using Mistral.") |
|
print(result) |
|
print("---------Mistral-------") |
|
return result |
|
else: |
|
raise ValueError("Mistral returned no output") |
|
|
|
|
|
except BadRequestError as e: |
|
logging.error(f"HuggingFace API error: {e}. Falling back to SpaCy.") |
|
print(f"HuggingFace API error: {e}. Falling back to SpaCy.") |
|
except Exception as e: |
|
logging.error(f"An error occurred while processing with Mistral: {e}. Falling back to SpaCy.") |
|
print(f"An error occurred while processing with Mistral: {e}. Falling back to SpaCy.") |
|
|
|
|
|
logging.warning("Mistral failed, switching to SpaCy.") |
|
print("---------SpaCy-------") |
|
return Parser_from_model(file_path) |
|
|
|
|
|
|