File size: 4,354 Bytes
3baf333
7c0b46d
 
 
557b7cf
7c0b46d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b0e653
7c0b46d
 
9b0e653
7c0b46d
 
 
9b0e653
7c0b46d
 
 
 
 
 
 
 
 
3baf333
7c0b46d
10493d1
 
 
 
 
7c0b46d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3baf333
 
 
 
 
 
 
 
 
 
7c0b46d
3baf333
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import streamlit as st
from huggingface_hub import InferenceClient
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import os
from PyPDF2 import PdfReader
import docx

def extract_cv_text(file):
    """Extract text from PDF or DOCX CV files."""
    if file is None:
        return "No CV uploaded"
    
    file_ext = os.path.splitext(file.name)[1].lower()
    
    if file_ext == '.pdf':
        reader = PdfReader(file)
        text = ""
        for page in reader.pages:
            text += page.extract_text()
        return text
    
    elif file_ext == '.docx':
        doc = docx.Document(file)
        text = ""
        for paragraph in doc.paragraphs:
            text += paragraph.text + "\n"
        return text
    
    else:
        return "Unsupported file format. Please upload PDF or DOCX files."

# Replace 'your_huggingface_token' with your actual Hugging Face access token
access_token = os.getenv('API_KEY')

# Initialize the tokenizer and model with the Hugging Face access token
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it", use_auth_token=access_token)
model = AutoModelForCausalLM.from_pretrained(
    "google/gemma-2b-it",
    torch_dtype=torch.bfloat16,
    use_auth_token=access_token
)
model.eval()  # Set the model to evaluation mode

# Initialize the inference client (if needed for other API-based tasks)
client = InferenceClient(token=access_token)

def conversation_predict(input_text):
    """Generate a response for single-turn input using the model."""
    # Tokenize the input text
    input_ids = tokenizer(f"""Job Description:
{input_text}

Instructions: Write a concise and professional email expressing interest in the position. 
Highlight relevant experience and skills from the CV that match the job requirements.
Keep the tone professional and enthusiastic.

Email:""", return_tensors="pt").input_ids

    # Generate a response with the model
    outputs = model.generate(input_ids, max_new_tokens=2048)

    # Decode and return the generated response
    return tokenizer.decode(outputs[0], skip_special_tokens=True)

def respond(
    message: str,
    history: list[tuple[str, str]],
    system_message: str,
    cv_file,
    max_tokens: int,
    temperature: float,
    top_p: float,
):
    """Generate a response for a multi-turn chat conversation."""
    # Extract CV text and update system message
    cv_text = extract_cv_text(cv_file) if cv_file else "No CV provided"
    
    updated_system_message = f"""Task: Write a professional job application email.

CV Summary:
{cv_text}

{system_message}"""

    messages = [{"role": "system", "content": updated_system_message}]

    for user_input, assistant_reply in history:
        if user_input:
            messages.append({"role": "user", "content": user_input})
        if assistant_reply:
            messages.append({"role": "assistant", "content": assistant_reply})

    messages.append({"role": "user", "content": message})

    response = ""

    for message_chunk in client.chat_completion(
        messages=messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        token = message_chunk["choices"][0]["delta"].get("content", "")
        response += token
        yield response

# Streamlit UI
st.title("Job Application Email Generator")

# Instructions text area
system_message = st.text_area("System message", 
                              "Instructions: Write a concise and professional email expressing interest in the position.", 
                              height=150)

# CV file upload
cv_file = st.file_uploader("Upload CV (PDF or DOCX)", type=["pdf", "docx"])

# Sliders for max tokens, temperature, and top-p
max_tokens = st.slider("Max new tokens", min_value=1, max_value=2048, value=512, step=1)
temperature = st.slider("Temperature", min_value=0.1, max_value=4.0, value=0.7, step=0.1)
top_p = st.slider("Top-p (nucleus sampling)", min_value=0.1, max_value=1.0, value=0.95, step=0.05)

# Input message field
message = st.text_input("Job Description", "")

# Button to generate response
if st.button("Generate Email"):
    if message:
        response = conversation_predict(message)
        st.write("Generated Email:")
        st.write(response)
    else:
        st.warning("Please enter a job description.")