resume / app.py
rikki809's picture
Update app.py
55b1c45 verified
raw
history blame
2.1 kB
from fastapi import FastAPI
from fastapi.responses import FileResponse
from transformers import pipeline
from fpdf import FPDF
import os
import uvicorn
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Or put your frontend URL later for security
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Hugging Face token (set this in your Space settings → Variables and secrets)
HF_TOKEN = os.getenv("HF_TOKEN")
# Load Gemma model
pipe = pipeline(
"text-generation",
model="google/gemma-3-270m-it",
token=HF_TOKEN,
max_new_tokens=400
)
def generate_resume_with_gemma(data):
prompt = f"""
Create a professional resume based on the following details.
Write in clear sections (Education, Internships, Projects, Skills, Extra Curricular).
Personal Information:
- Name: {data.get('name')}
- Email: {data.get('email')}
- Phone: {data.get('phone')}
- LinkedIn: {data.get('linkedin')}
Education:
{data.get('education')}
Internships:
{data.get('internships')}
Projects:
{data.get('projects')}
Skills:
{data.get('skills')}
Extra Curricular:
{data.get('extra')}
Resume:
"""
result = pipe(prompt, max_new_tokens=400, do_sample=False)
return result[0]['generated_text']
def generate_resume_pdf(text):
pdf = FPDF()
pdf.add_page()
pdf.set_font("Arial", size=12)
for line in text.split("\n"):
pdf.multi_cell(0, 8, line)
filename = "resume.pdf"
pdf.output(filename)
return filename
@app.post("/generate_resume")
async def generate_resume(data: dict):
# Step 1: Ask Gemma to polish resume text
text = generate_resume_with_gemma(data)
# Step 2: Generate PDF
pdf_file = generate_resume_pdf(text)
# Step 3: Return PDF file
return FileResponse(pdf_file, media_type="application/pdf", filename="resume.pdf")
# For local debugging (not needed in HF Spaces)
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=7860)