Spaces:
Sleeping
Sleeping
Refactor app.py to transition from Gradio to Streamlit for the job application email generator interface. Update UI components including text areas, file upload, and sliders for user input. Modify requirements.txt to remove Gradio and include necessary dependencies for Streamlit and Hugging Face. This change enhances user experience and streamlines the email generation process.
3baf333
import streamlit as st | |
from huggingface_hub import InferenceClient | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
import os | |
from PyPDF2 import PdfReader | |
import docx | |
def extract_cv_text(file): | |
"""Extract text from PDF or DOCX CV files.""" | |
if file is None: | |
return "No CV uploaded" | |
file_ext = os.path.splitext(file.name)[1].lower() | |
if file_ext == '.pdf': | |
reader = PdfReader(file) | |
text = "" | |
for page in reader.pages: | |
text += page.extract_text() | |
return text | |
elif file_ext == '.docx': | |
doc = docx.Document(file) | |
text = "" | |
for paragraph in doc.paragraphs: | |
text += paragraph.text + "\n" | |
return text | |
else: | |
return "Unsupported file format. Please upload PDF or DOCX files." | |
# Replace 'your_huggingface_token' with your actual Hugging Face access token | |
access_token = os.getenv('API_KEY') | |
# Initialize the tokenizer and model with the Hugging Face access token | |
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it", use_auth_token=access_token) | |
model = AutoModelForCausalLM.from_pretrained( | |
"google/gemma-2b-it", | |
torch_dtype=torch.bfloat16, | |
use_auth_token=access_token | |
) | |
model.eval() # Set the model to evaluation mode | |
# Initialize the inference client (if needed for other API-based tasks) | |
client = InferenceClient(token=access_token) | |
def conversation_predict(input_text): | |
"""Generate a response for single-turn input using the model.""" | |
# Tokenize the input text | |
input_ids = tokenizer(f"""Job Description: | |
{input_text} | |
Instructions: Write a concise and professional email expressing interest in the position. | |
Highlight relevant experience and skills from the CV that match the job requirements. | |
Keep the tone professional and enthusiastic. | |
Email:""", return_tensors="pt").input_ids | |
# Generate a response with the model | |
outputs = model.generate(input_ids, max_new_tokens=2048) | |
# Decode and return the generated response | |
return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
def respond( | |
message: str, | |
history: list[tuple[str, str]], | |
system_message: str, | |
cv_file, | |
max_tokens: int, | |
temperature: float, | |
top_p: float, | |
): | |
"""Generate a response for a multi-turn chat conversation.""" | |
# Extract CV text and update system message | |
cv_text = extract_cv_text(cv_file) if cv_file else "No CV provided" | |
updated_system_message = f"""Task: Write a professional job application email. | |
CV Summary: | |
{cv_text} | |
{system_message}""" | |
messages = [{"role": "system", "content": updated_system_message}] | |
for user_input, assistant_reply in history: | |
if user_input: | |
messages.append({"role": "user", "content": user_input}) | |
if assistant_reply: | |
messages.append({"role": "assistant", "content": assistant_reply}) | |
messages.append({"role": "user", "content": message}) | |
response = "" | |
for message_chunk in client.chat_completion( | |
messages=messages, | |
max_tokens=max_tokens, | |
stream=True, | |
temperature=temperature, | |
top_p=top_p, | |
): | |
token = message_chunk["choices"][0]["delta"].get("content", "") | |
response += token | |
yield response | |
# Streamlit UI | |
st.title("Job Application Email Generator") | |
# Instructions text area | |
system_message = st.text_area("System message", | |
"Instructions: Write a concise and professional email expressing interest in the position.", | |
height=150) | |
# CV file upload | |
cv_file = st.file_uploader("Upload CV (PDF or DOCX)", type=["pdf", "docx"]) | |
# Sliders for max tokens, temperature, and top-p | |
max_tokens = st.slider("Max new tokens", min_value=1, max_value=2048, value=512, step=1) | |
temperature = st.slider("Temperature", min_value=0.1, max_value=4.0, value=0.7, step=0.1) | |
top_p = st.slider("Top-p (nucleus sampling)", min_value=0.1, max_value=1.0, value=0.95, step=0.05) | |
# Input message field | |
message = st.text_input("Job Description", "") | |
# Button to generate response | |
if st.button("Generate Email"): | |
if message: | |
response = conversation_predict(message) | |
st.write("Generated Email:") | |
st.write(response) | |
else: | |
st.warning("Please enter a job description.") | |