| from langchain import LLMChain | |
| from langchain.chains import SequentialChain | |
| from langchain.prompts import ChatPromptTemplate | |
| def get_vacancy_skills_chain(llm) -> LLMChain: | |
| template_vacancy_get_skills = """ | |
| Given the following vacancy delimited by three backticks, retrieve the skills requested in the vacancy. | |
| Describe the skills in preferably 1 word and maximum 3 words. | |
| Return the skills as a JSON list on 1 line, do not add newlines or any other text. | |
| ``` | |
| {vacancy} | |
| ``` | |
| """ | |
| prompt_vacancy_skills = ChatPromptTemplate.from_template( | |
| template=template_vacancy_get_skills | |
| ) | |
| vacancy_skills = LLMChain( | |
| llm=llm, prompt=prompt_vacancy_skills, output_key="vacancy_skills_predicted" | |
| ) | |
| return vacancy_skills | |
| def get_resume_skills_chain(llm) -> LLMChain: | |
| template_resume_skills = """ | |
| Given the following resume delimited by three backticks, retrieve the skills from the resume. | |
| Describe the skills in preferably 1 word and maximum 3 words. | |
| Return the skills as a JSON list on 1 line, do not add newlines or any other text. | |
| ``` | |
| {resume} | |
| ``` | |
| """ | |
| prompt_resume_skills = ChatPromptTemplate.from_template( | |
| template=template_resume_skills | |
| ) | |
| resume_skills = LLMChain( | |
| llm=llm, prompt=prompt_resume_skills, output_key="resume_skills_predicted" | |
| ) | |
| return resume_skills | |
| def get_skills_intersection_chain(llm) -> LLMChain: | |
| """ | |
| # deprecated prompt: | |
| # Can you return the intersection of the skills above delimited by backticks with the list of skills below delimited by backticks. | |
| # Consider skills that are not exact but are close to each other in terms of meaning or usage. | |
| # For example, 'Python programming' and 'Python' should be considered a match. Similarly, 'Strong problem-solving skills' and 'problem solver' should be considered the same. | |
| # Please consider all skills in lowercase for matching. We're trying to match the skills of a job candidate (second list) with the requirements of a job vacancy (first list). | |
| # Please keep this context in mind while performing the matching. | |
| # If no skills match do not make up a response and return an empty list. | |
| # Return the intersection as a JSON list on 1 line, do not add newlines or any other text. | |
| """ | |
| template_get_skills_intersection = """ | |
| ``` | |
| {vacancy_skills_predicted} | |
| ``` | |
| Can you return the intersection of the skills above delimited by backticks with the list of skills below delimited by backticks. | |
| Consider skills that are not exact but are close to each other in terms of meaning or usage. For example, 'Python programming' and 'Python' should be considered a match. Similarly, 'TensorFlow machine learning' and 'Machine Learning with TensorFlow' should be considered the same. Please consider all skills in lowercase for matching. We're trying to match the skills of a job candidate (second list) with the requirements of a job vacancy (first list). Please keep this context in mind while performing the matching. | |
| If no skills match do not make up a response and return an empty list. | |
| Return the intersection as a JSON list on 1 line, do not add newlines or any other text. | |
| ``` | |
| {resume_skills_predicted} | |
| ``` | |
| """ | |
| prompt_get_skills_intersection = ChatPromptTemplate.from_template( | |
| template=template_get_skills_intersection | |
| ) | |
| skills_intersection = LLMChain( | |
| llm=llm, | |
| prompt=prompt_get_skills_intersection, | |
| output_key="skills_intersection_predicted", | |
| ) | |
| return skills_intersection | |
| def get_skills_chain(llm) -> SequentialChain: | |
| vacancy_skills_chain = get_vacancy_skills_chain(llm=llm) | |
| resume_skills_chain = get_resume_skills_chain(llm=llm) | |
| intersection_skills_chain = get_skills_intersection_chain(llm=llm) | |
| return SequentialChain( | |
| chains=[vacancy_skills_chain, resume_skills_chain, intersection_skills_chain], | |
| input_variables=["vacancy", "resume"], | |
| output_variables=[ | |
| vacancy_skills_chain.output_key, | |
| resume_skills_chain.output_key, | |
| intersection_skills_chain.output_key, | |
| ], | |
| verbose=False, | |
| ) | |
| def get_skills_match(llm, vacancy, resume) -> SequentialChain: | |
| template_get_skills_intersection = """ | |
| ``` | |
| VACANCY: | |
| {vacancy} | |
| ``` | |
| ``` | |
| RESUME: | |
| {resume} | |
| ``` | |
| Both the vacancy and resume are delimited by three backticks. | |
| Can you list any matches you find in both the vacancy and the resume. | |
| Each match is constructed as the following JSON object: | |
| "content" : < any match related to job specific content, experience and location > | |
| "resume_index" : < the number of times the match occurred in the resume > | |
| "vacancy_index" : < the number of times the match occurred in the vacancy > | |
| Return all the JSON objects as a JSON list with no new lines or any other text. | |
| If there is no match at all, do not make up a response and return an empty list. | |
| """ | |
| prompt_get_skills_intersection = ChatPromptTemplate.from_template( | |
| template=template_get_skills_intersection | |
| ) | |
| skills_match_chain = LLMChain( | |
| llm=llm, | |
| prompt=prompt_get_skills_intersection, | |
| output_key="skills_match_predicted", | |
| ) | |
| chain = SequentialChain( | |
| chains=[skills_match_chain], | |
| input_variables=["vacancy", "resume"], | |
| output_variables=[ | |
| skills_match_chain.output_key, | |
| ], | |
| verbose=False, | |
| ) | |
| return chain({"vacancy": vacancy, "resume": resume}) | |
