AppyJob / app.py
Dhahlan2000's picture
Update app.py to replace 'use_auth_token' with 'token' in model and tokenizer initialization for compatibility with Hugging Face's latest API changes. This adjustment ensures proper authentication handling when loading the model and tokenizer.
d8aeb3b
raw
history blame
4.03 kB
import gradio as gr
from huggingface_hub import InferenceClient
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import os
from PyPDF2 import PdfReader
import docx
def extract_cv_text(file):
"""Extract text from PDF or DOCX CV files."""
if file is None:
return "No CV uploaded"
file_ext = os.path.splitext(file.name)[1].lower()
if file_ext == '.pdf':
reader = PdfReader(file)
text = ""
for page in reader.pages:
text += page.extract_text()
return text
elif file_ext == '.docx':
doc = docx.Document(file)
text = ""
for paragraph in doc.paragraphs:
text += paragraph.text + "\n"
return text
else:
return "Unsupported file format. Please upload PDF or DOCX files."
# Replace 'your_huggingface_token' with your actual Hugging Face access token
access_token = os.getenv('token')
# Initialize the tokenizer and model with the Hugging Face access token
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it", token=access_token)
model = AutoModelForCausalLM.from_pretrained(
"google/gemma-2b-it",
torch_dtype=torch.bfloat16,
token=access_token
)
model.eval() # Set the model to evaluation mode
# Initialize the inference client (if needed for other API-based tasks)
client = InferenceClient(token=access_token)
def conversation_predict(input_text):
"""Generate a response for single-turn input using the model."""
# Tokenize the input text
input_ids = tokenizer("""Job Description:
{input_text}
Instructions: Write a concise and professional email expressing interest in the position.
Highlight relevant experience and skills from the CV that match the job requirements.
Keep the tone professional and enthusiastic.
Email:""", return_tensors="pt").input_ids
# Generate a response with the model
outputs = model.generate(input_ids, max_new_tokens=2048)
# Decode and return the generated response
return tokenizer.decode(outputs[0], skip_special_tokens=True)
def respond(
message: str,
history: list[tuple[str, str]],
system_message: str,
cv_file,
max_tokens: int,
temperature: float,
top_p: float,
):
"""Generate a response for a multi-turn chat conversation."""
# Extract CV text and update system message
cv_text = extract_cv_text(cv_file) if cv_file else "No CV provided"
updated_system_message = f"""Task: Write a professional job application email.
CV Summary:
{cv_text}
{system_message}"""
messages = [{"role": "system", "content": updated_system_message}]
for user_input, assistant_reply in history:
if user_input:
messages.append({"role": "user", "content": user_input})
if assistant_reply:
messages.append({"role": "assistant", "content": assistant_reply})
messages.append({"role": "user", "content": message})
response = ""
for message_chunk in client.chat_completion(
messages=messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message_chunk["choices"][0]["delta"].get("content", "")
response += token
yield response
# Create a Gradio ChatInterface demo
demo = gr.ChatInterface(
fn=respond,
additional_inputs=[
gr.Textbox(value="Instructions: Write a concise and professional email expressing interest in the position.",
label="System message"),
gr.File(label="Upload CV (PDF or DOCX)", file_types=[".pdf", ".docx"]),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo.launch()