File size: 3,384 Bytes
fdc0d68
 
 
92ae9aa
8e6e3c5
752f8a9
 
fdc0d68
4b41cfa
fdc0d68
4b41cfa
 
 
 
 
6d9b2de
 
92ae9aa
0eae39f
92ae9aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6d9b2de
 
 
 
92ae9aa
03c9778
6d9b2de
92ae9aa
6d9b2de
 
 
 
 
 
 
 
92ae9aa
6d9b2de
92ae9aa
 
 
 
 
 
85a522e
1cba1c6
85a522e
1cba1c6
 
 
 
 
e39e244
1cba1c6
 
8fc84f6
e836885
745b176
92ae9aa
fdc0d68
 
752f8a9
 
 
 
6d726e3
1cba1c6
92ae9aa
 
1cba1c6
92ae9aa
0eae39f
fdc0d68
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate
from langchain.chains import create_retrieval_chain
from langchain.chains.summarize.chain import load_summarize_chain
from langchain_community.llms.huggingface_hub import HuggingFaceHub
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain.retrievers import ContextualCompressionRetriever

#from Api_Key import google_plam
from langchain_groq import ChatGroq
import os
from dotenv import load_dotenv
load_dotenv()


def prompt_template_to_analyze_resume():
    template = """
    You are provided with the Resume of the Candidate in the context below . As an Talent Aquistion bot , your task is to provide insights about the 
    candidate in point wise. Mention his skills and experience higlighting his strength and wekaness.
    
    \n\n:{context}
    """
    prompt = ChatPromptTemplate.from_messages(
        [
            ('system',template),
            ('human','input'),
        ]
        )
    return prompt

def prompt_template_for_relaibility():
    template ="""
    You are provided with the Resume of the Candidate in the context below
    If asked about reliability , check How frequently the candidate has switched from one company to another. 
    Grade him on the given basis: 
        If less than 2 Year - very less Reliable  
        if more than 2 years but less than 5 years - Reliable 
        if more than 5 Years - Highly Reliable
    and generate verdict . 
    
    \n\n:{context}
    
    """
    prompt = ChatPromptTemplate.from_messages(
        [
            ('system',template),
            ('human','input'),
        ]
        )
    return prompt
    

def summarize(documents,llm):
    summarize_chain = load_summarize_chain(llm=llm, chain_type='refine', verbose = True)
    results = summarize_chain.invoke({'input_documents':documents})
    return results['output_text']


def get_hugging_face_model(model_id='mistralai/Mistral-7B-Instruct-v0.2',temperature=0.01,max_tokens=4096,api_key=None):
    llm = HuggingFaceHub(
        huggingfacehub_api_token =api_key,
        repo_id=model_id, 
        model_kwargs={"temperature":temperature, "max_new_tokens":max_tokens}
        )
    return llm

def Q_A(vectorstore,question,API_KEY):
    if API_KEY.startswith('gsk'):
        os.environ["GROQ_API_KEY"] = API_KEY
        chat_llm = ChatGroq(model="gemma2-9b-it") #(model="llama3-8b-8192")
    elif API_KEY.startswith('hf'):
        chat_llm = get_hugging_face_model(api_key=API_KEY)
    
    # Create a retriever
    retriever = vectorstore.as_retriever(search_type = 'similarity',search_kwargs = {'k':2},)
    #Create a contextual compressor
    compressor = LLMChainExtractor.from_llm(chat_llm)
    compression_retriever = ContextualCompressionRetriever(base_compressor=compressor,base_retriever=retriever)
    
    if  'reliable' in question.lower() or 'relaibility' in question.lower():
        question_answer_chain = create_stuff_documents_chain(chat_llm, prompt_template_for_relaibility())
        
    else:
        question_answer_chain = create_stuff_documents_chain(chat_llm, prompt_template_to_analyze_resume())
    
    chain = create_retrieval_chain(compression_retriever, question_answer_chain)
    result = chain.invoke({'input':question})
    return result['answer']