Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,13 @@
|
|
1 |
-
|
2 |
-
from
|
3 |
from transformers import pipeline
|
|
|
4 |
import os
|
|
|
|
|
|
|
5 |
|
6 |
-
#
|
7 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
8 |
|
9 |
# Load Gemma model
|
@@ -14,94 +18,56 @@ pipe = pipeline(
|
|
14 |
max_new_tokens=400
|
15 |
)
|
16 |
|
17 |
-
def generate_resume_with_gemma(
|
18 |
prompt = f"""
|
19 |
Create a professional resume based on the following details.
|
20 |
-
Write in clear sections
|
21 |
|
22 |
Personal Information:
|
23 |
-
- Name: {data
|
24 |
-
- Email: {data
|
25 |
-
- Phone: {data
|
26 |
-
- LinkedIn: {data
|
27 |
|
28 |
Education:
|
29 |
-
{data
|
30 |
|
31 |
Internships:
|
32 |
-
{data
|
33 |
|
34 |
Projects:
|
35 |
-
{data
|
36 |
|
37 |
Skills:
|
38 |
-
{data
|
39 |
|
40 |
Extra Curricular:
|
41 |
-
{data
|
42 |
|
43 |
Resume:
|
44 |
"""
|
45 |
-
|
46 |
result = pipe(prompt, max_new_tokens=400, do_sample=False)
|
47 |
return result[0]['generated_text']
|
48 |
|
49 |
-
|
50 |
-
def generate_resume_pdf(text, template="modern"):
|
51 |
pdf = FPDF()
|
52 |
pdf.add_page()
|
53 |
-
|
54 |
-
|
55 |
-
pdf.
|
56 |
-
for line in text.split("\n"):
|
57 |
-
pdf.multi_cell(0, 8, line)
|
58 |
-
else: # minimal template
|
59 |
-
pdf.set_font("Times", size=12)
|
60 |
-
for line in text.split("\n"):
|
61 |
-
pdf.multi_cell(0, 8, line)
|
62 |
-
|
63 |
filename = "resume.pdf"
|
64 |
pdf.output(filename)
|
65 |
return filename
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
-
|
69 |
-
data = {
|
70 |
-
"name": name,
|
71 |
-
"email": email,
|
72 |
-
"phone": phone,
|
73 |
-
"linkedin": linkedin,
|
74 |
-
"education": education,
|
75 |
-
"internships": internships,
|
76 |
-
"projects": projects,
|
77 |
-
"skills": skills,
|
78 |
-
"extra": extra
|
79 |
-
}
|
80 |
-
# Step 1: Generate polished text with Gemma
|
81 |
-
polished_resume = generate_resume_with_gemma(pipe, data)
|
82 |
-
# Step 2: Convert to PDF
|
83 |
-
pdf_file = generate_resume_pdf(polished_resume, template)
|
84 |
-
return pdf_file
|
85 |
-
|
86 |
-
|
87 |
-
demo = gr.Interface(
|
88 |
-
fn=build_resume,
|
89 |
-
inputs=[
|
90 |
-
gr.Textbox(label="Full Name"),
|
91 |
-
gr.Textbox(label="Email"),
|
92 |
-
gr.Textbox(label="Phone"),
|
93 |
-
gr.Textbox(label="LinkedIn"),
|
94 |
-
gr.Textbox(label="Education (Degrees, Years, Scores)", lines=4),
|
95 |
-
gr.Textbox(label="Internships", lines=4),
|
96 |
-
gr.Textbox(label="Projects", lines=4),
|
97 |
-
gr.Textbox(label="Skills (comma separated)"),
|
98 |
-
gr.Textbox(label="Extra Curricular Activities", lines=3),
|
99 |
-
gr.Dropdown(choices=["modern", "minimal"], value="modern", label="Choose Resume Template")
|
100 |
-
],
|
101 |
-
outputs=gr.File(label="Download Resume PDF"),
|
102 |
-
title="AI Resume Builder (Gemma + Templates)",
|
103 |
-
description="Fill your details → AI generates a polished resume → Download as PDF."
|
104 |
-
)
|
105 |
-
|
106 |
if __name__ == "__main__":
|
107 |
-
|
|
|
1 |
+
from fastapi import FastAPI
|
2 |
+
from fastapi.responses import FileResponse
|
3 |
from transformers import pipeline
|
4 |
+
from fpdf import FPDF
|
5 |
import os
|
6 |
+
import uvicorn
|
7 |
+
|
8 |
+
app = FastAPI()
|
9 |
|
10 |
+
# Hugging Face token (set this in your Space settings → Variables and secrets)
|
11 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
12 |
|
13 |
# Load Gemma model
|
|
|
18 |
max_new_tokens=400
|
19 |
)
|
20 |
|
21 |
+
def generate_resume_with_gemma(data):
|
22 |
prompt = f"""
|
23 |
Create a professional resume based on the following details.
|
24 |
+
Write in clear sections (Education, Internships, Projects, Skills, Extra Curricular).
|
25 |
|
26 |
Personal Information:
|
27 |
+
- Name: {data.get('name')}
|
28 |
+
- Email: {data.get('email')}
|
29 |
+
- Phone: {data.get('phone')}
|
30 |
+
- LinkedIn: {data.get('linkedin')}
|
31 |
|
32 |
Education:
|
33 |
+
{data.get('education')}
|
34 |
|
35 |
Internships:
|
36 |
+
{data.get('internships')}
|
37 |
|
38 |
Projects:
|
39 |
+
{data.get('projects')}
|
40 |
|
41 |
Skills:
|
42 |
+
{data.get('skills')}
|
43 |
|
44 |
Extra Curricular:
|
45 |
+
{data.get('extra')}
|
46 |
|
47 |
Resume:
|
48 |
"""
|
|
|
49 |
result = pipe(prompt, max_new_tokens=400, do_sample=False)
|
50 |
return result[0]['generated_text']
|
51 |
|
52 |
+
def generate_resume_pdf(text):
|
|
|
53 |
pdf = FPDF()
|
54 |
pdf.add_page()
|
55 |
+
pdf.set_font("Arial", size=12)
|
56 |
+
for line in text.split("\n"):
|
57 |
+
pdf.multi_cell(0, 8, line)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
filename = "resume.pdf"
|
59 |
pdf.output(filename)
|
60 |
return filename
|
61 |
|
62 |
+
@app.post("/generate_resume")
|
63 |
+
async def generate_resume(data: dict):
|
64 |
+
# Step 1: Ask Gemma to polish resume text
|
65 |
+
text = generate_resume_with_gemma(data)
|
66 |
+
# Step 2: Generate PDF
|
67 |
+
pdf_file = generate_resume_pdf(text)
|
68 |
+
# Step 3: Return PDF file
|
69 |
+
return FileResponse(pdf_file, media_type="application/pdf", filename="resume.pdf")
|
70 |
|
71 |
+
# For local debugging (not needed in HF Spaces)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
if __name__ == "__main__":
|
73 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|