Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,84 +1,13 @@
|
|
1 |
-
import base64
|
2 |
-
import os
|
3 |
-
from datetime import datetime
|
4 |
-
from openai import OpenAI
|
5 |
import gradio as gr
|
6 |
|
7 |
-
|
8 |
-
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
raise ValueError("OPENAI_API_KEY environment variable is not set.")
|
15 |
-
|
16 |
-
client = OpenAI(api_key=openai_api_key)
|
17 |
-
|
18 |
-
# === Prompts ===
|
19 |
-
system_prompt = (
|
20 |
-
"You are a detail-oriented assistant that specializes in transcribing and polishing "
|
21 |
-
"handwritten notes from images. Your goal is to turn rough, casual, or handwritten "
|
22 |
-
"content into clean, structured, and professional-looking text that sounds like it "
|
23 |
-
"was written by a human—not an AI. You do not include icons, emojis, or suggest next "
|
24 |
-
"steps unless explicitly instructed."
|
25 |
-
)
|
26 |
-
|
27 |
-
user_prompt_template = (
|
28 |
-
"You will receive an image of handwritten notes. Transcribe the content accurately, "
|
29 |
-
"correcting any spelling or grammar issues. Then, organize it clearly with headings, "
|
30 |
-
"bullet points, and proper formatting. Maintain the original intent and voice of the "
|
31 |
-
"author, but enhance readability and flow. Do not add embellishments or AI-style phrasing."
|
32 |
-
)
|
33 |
-
|
34 |
-
# === Image processing ===
|
35 |
-
def encode_image_to_base64(image_file):
|
36 |
-
image_bytes = image_file.read()
|
37 |
-
return base64.b64encode(image_bytes).decode("utf-8")
|
38 |
-
|
39 |
-
# === Transcription function ===
|
40 |
-
def transcribe_images(images):
|
41 |
-
if not images:
|
42 |
-
return "No images uploaded."
|
43 |
-
|
44 |
-
results = []
|
45 |
-
for image in images:
|
46 |
-
encoded_image = encode_image_to_base64(image)
|
47 |
-
image_url = f"data:image/jpeg;base64,{encoded_image}"
|
48 |
-
|
49 |
-
response = client.chat.completions.create(
|
50 |
-
model="gpt-4-turbo",
|
51 |
-
messages=[
|
52 |
-
{"role": "system", "content": system_prompt},
|
53 |
-
{"role": "user", "content": [
|
54 |
-
{"type": "text", "text": user_prompt_template},
|
55 |
-
{"type": "image_url", "image_url": {"url": image_url}}
|
56 |
-
]}
|
57 |
-
],
|
58 |
-
max_tokens=1500
|
59 |
-
)
|
60 |
-
|
61 |
-
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
62 |
-
result_text = f"🗓️ Transcribed on: {timestamp}\n\n{response.choices[0].message.content}"
|
63 |
-
results.append(result_text)
|
64 |
-
|
65 |
-
return "\n\n---\n\n".join(results)
|
66 |
-
|
67 |
-
# === Gradio Interface ===
|
68 |
-
app = gr.Interface(
|
69 |
-
fn=transcribe_images,
|
70 |
-
inputs=gr.File(
|
71 |
-
type="file",
|
72 |
-
label="Upload handwritten note images",
|
73 |
-
file_types=[".jpg", ".jpeg", ".png"],
|
74 |
-
multiple=True
|
75 |
-
),
|
76 |
-
outputs=gr.Textbox(label="Transcribed Output", lines=30),
|
77 |
-
title="Handwritten Note Transcriber",
|
78 |
-
description="Upload one or more images of handwritten notes to receive clean, professional transcriptions."
|
79 |
)
|
80 |
|
81 |
-
|
82 |
-
if __name__ == "__main__":
|
83 |
-
app.launch()
|
84 |
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
+
def echo(files):
|
4 |
+
return f"Received {len(files)} file(s)."
|
5 |
|
6 |
+
demo = gr.Interface(
|
7 |
+
fn=echo,
|
8 |
+
inputs=gr.File(label="Upload", file_types=[".jpg", ".jpeg", ".png"], type="file", multiple=True),
|
9 |
+
outputs="text"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
)
|
11 |
|
12 |
+
demo.launch()
|
|
|
|
|
13 |
|