Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import os
|
3 |
+
from datetime import datetime
|
4 |
+
from openai import OpenAI
|
5 |
+
import gradio as gr
|
6 |
+
import oci
|
7 |
+
import io
|
8 |
+
import re
|
9 |
+
import tempfile
|
10 |
+
from PIL import Image as PILImage
|
11 |
+
from collections import Counter
|
12 |
+
import matplotlib.pyplot as plt
|
13 |
+
from wordcloud import WordCloud
|
14 |
+
|
15 |
+
# === OpenAI API Setup ===
|
16 |
+
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
17 |
+
if not openai_api_key:
|
18 |
+
raise ValueError("OPENAI_API_KEY environment variable is not set.")
|
19 |
+
|
20 |
+
client = OpenAI(api_key=openai_api_key)
|
21 |
+
|
22 |
+
# === OCI Object Storage Setup ===
|
23 |
+
oci_config = {
|
24 |
+
"user": os.environ.get("OCI_USER"),
|
25 |
+
"tenancy": os.environ.get("OCI_TENANCY"),
|
26 |
+
"fingerprint": os.environ.get("OCI_FINGERPRINT"),
|
27 |
+
"region": os.environ.get("OCI_REGION"),
|
28 |
+
"key_content": os.environ.get("OCI_PRIVATE_KEY")
|
29 |
+
}
|
30 |
+
|
31 |
+
namespace = os.environ.get("OCI_NAMESPACE")
|
32 |
+
bucket_name = os.environ.get("OCI_BUCKET_NAME")
|
33 |
+
|
34 |
+
try:
|
35 |
+
object_storage = oci.object_storage.ObjectStorageClient(oci_config)
|
36 |
+
except Exception as e:
|
37 |
+
print("Failed to initialize OCI Object Storage client:", e)
|
38 |
+
|
39 |
+
# === Prompts ===
|
40 |
+
system_prompt = (
|
41 |
+
"You are a detail-oriented assistant that specializes in transcribing and polishing "
|
42 |
+
"handwritten notes from images. Your goal is to turn rough, casual, or handwritten "
|
43 |
+
"content into clean, structured, and professional-looking text that sounds like it "
|
44 |
+
"was written by a human—not an AI. You do not include icons, emojis, or suggest next "
|
45 |
+
"steps unless explicitly instructed."
|
46 |
+
)
|
47 |
+
|
48 |
+
user_prompt_template = (
|
49 |
+
"You will receive an image of handwritten notes. Transcribe the content accurately, "
|
50 |
+
"correcting any spelling or grammar issues. Then, organize it clearly with headings, "
|
51 |
+
"bullet points, and proper formatting. Maintain the original intent and voice of the "
|
52 |
+
"author, but enhance readability and flow. Do not add embellishments or AI-style phrasing."
|
53 |
+
)
|
54 |
+
|
55 |
+
# === Encode uploaded bytes ===
|
56 |
+
def encode_image_to_base64(file_bytes):
|
57 |
+
return base64.b64encode(file_bytes).decode("utf-8")
|
58 |
+
|
59 |
+
# === Upload transcription result to OCI ===
|
60 |
+
def upload_to_object_storage(user_name, text):
|
61 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
62 |
+
filename = f"{user_name.replace(' ', '_')}_{timestamp}.txt"
|
63 |
+
object_storage.put_object(
|
64 |
+
namespace_name=namespace,
|
65 |
+
bucket_name=bucket_name,
|
66 |
+
object_name=filename,
|
67 |
+
put_object_body=text.encode("utf-8")
|
68 |
+
)
|
69 |
+
return filename
|
70 |
+
|
71 |
+
# === List object storage ===
|
72 |
+
def list_object_store():
|
73 |
+
try:
|
74 |
+
objects = object_storage.list_objects(namespace, bucket_name)
|
75 |
+
return [obj.name for obj in objects.data.objects if obj.name.endswith(".txt")]
|
76 |
+
except Exception as e:
|
77 |
+
return [f"Failed to list objects: {str(e)}"]
|
78 |
+
|
79 |
+
# === View file contents ===
|
80 |
+
def view_transcription(file_name):
|
81 |
+
try:
|
82 |
+
response = object_storage.get_object(namespace, bucket_name, file_name)
|
83 |
+
return response.data.text
|
84 |
+
except Exception as e:
|
85 |
+
return f"Failed to load file: {str(e)}"
|
86 |
+
|
87 |
+
# === Analyze content with OpenAI ===
|
88 |
+
def summarize_selected_files(file_list):
|
89 |
+
combined_text = ""
|
90 |
+
for name in file_list:
|
91 |
+
combined_text += view_transcription(name) + "\n"
|
92 |
+
if not combined_text.strip():
|
93 |
+
return "No content found."
|
94 |
+
response = client.chat.completions.create(
|
95 |
+
model="gpt-4-turbo",
|
96 |
+
messages=[
|
97 |
+
{"role": "system", "content": "You are a summarization expert."},
|
98 |
+
{"role": "user", "content": "Please summarize the following transcriptions in detail:\n" + combined_text}
|
99 |
+
],
|
100 |
+
max_tokens=1500
|
101 |
+
)
|
102 |
+
return response.choices[0].message.content
|
103 |
+
|
104 |
+
def recommend_from_selected_files(file_list):
|
105 |
+
combined_text = ""
|
106 |
+
for name in file_list:
|
107 |
+
combined_text += view_transcription(name) + "\n"
|
108 |
+
if not combined_text.strip():
|
109 |
+
return "No content found."
|
110 |
+
response = client.chat.completions.create(
|
111 |
+
model="gpt-4-turbo",
|
112 |
+
messages=[
|
113 |
+
{"role": "system", "content": "You are an operations consultant."},
|
114 |
+
{"role": "user", "content": "Please recommend next steps based on these transcriptions:\n" + combined_text}
|
115 |
+
],
|
116 |
+
max_tokens=1500
|
117 |
+
)
|
118 |
+
return response.choices[0].message.content
|
119 |
+
|
120 |
+
# === Generate word cloud from selected files ===
|
121 |
+
def generate_word_map_from_files(file_list):
|
122 |
+
combined_text = ""
|
123 |
+
for name in file_list:
|
124 |
+
combined_text += view_transcription(name) + "\n"
|
125 |
+
if not combined_text.strip():
|
126 |
+
return None
|
127 |
+
|
128 |
+
wordcloud = WordCloud(width=800, height=400, background_color='white').generate(combined_text)
|
129 |
+
plt.figure(figsize=(10, 5))
|
130 |
+
plt.imshow(wordcloud, interpolation='bilinear')
|
131 |
+
plt.axis("off")
|
132 |
+
buf = io.BytesIO()
|
133 |
+
plt.savefig(buf, format="png")
|
134 |
+
buf.seek(0)
|
135 |
+
|
136 |
+
# Upload image to object storage
|
137 |
+
filename = f"wordcloud_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
|
138 |
+
object_storage.put_object(
|
139 |
+
namespace_name=namespace,
|
140 |
+
bucket_name=bucket_name,
|
141 |
+
object_name=filename,
|
142 |
+
put_object_body=buf.getvalue()
|
143 |
+
)
|
144 |
+
|
145 |
+
buf.seek(0)
|
146 |
+
return PILImage.open(buf)
|
147 |
+
|
148 |
+
# === Transcription logic ===
|
149 |
+
def transcribe_image(file_bytes, user_name):
|
150 |
+
if not file_bytes:
|
151 |
+
return "No image uploaded."
|
152 |
+
encoded = encode_image_to_base64(file_bytes)
|
153 |
+
image_url = f"data:image/jpeg;base64,{encoded}"
|
154 |
+
response = client.chat.completions.create(
|
155 |
+
model="gpt-4-turbo",
|
156 |
+
messages=[
|
157 |
+
{"role": "system", "content": system_prompt},
|
158 |
+
{"role": "user", "content": [
|
159 |
+
{"type": "text", "text": user_prompt_template},
|
160 |
+
{"type": "image_url", "image_url": {"url": image_url}}
|
161 |
+
]}
|
162 |
+
],
|
163 |
+
max_tokens=1500
|
164 |
+
)
|
165 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
166 |
+
result = f"🗓️ Transcribed on: {timestamp}\n\n{response.choices[0].message.content}"
|
167 |
+
upload_to_object_storage(user_name, result)
|
168 |
+
return result
|
169 |
+
|
170 |
+
# === Gradio Interface ===
|
171 |
+
with gr.Blocks() as app:
|
172 |
+
gr.Markdown("## Handwritten Note Transcriber & Analyzer")
|
173 |
+
|
174 |
+
with gr.Row():
|
175 |
+
user_dropdown = gr.Dropdown(
|
176 |
+
choices=["Jim Goodwin", "Zahabiya Ali rampurawala", "Keith Gauvin"],
|
177 |
+
label="Who is uploading this?"
|
178 |
+
)
|
179 |
+
|
180 |
+
input_file = gr.File(label="Upload image", type="binary", file_types=[".jpg", ".jpeg", ".png"])
|
181 |
+
output_text = gr.Textbox(label="Transcription Output", lines=30)
|
182 |
+
input_file.change(fn=transcribe_image, inputs=[input_file, user_dropdown], outputs=output_text)
|
183 |
+
|
184 |
+
gr.Markdown("### List Object Store Contents")
|
185 |
+
gr.Button("List Object Store").click(fn=lambda: "\n".join(list_object_store()), outputs=gr.Textbox(label="Object Store Contents"))
|
186 |
+
|
187 |
+
gr.Markdown("### View Transcription")
|
188 |
+
file_selector = gr.Dropdown(choices=list_object_store(), label="Select transcription file")
|
189 |
+
view_output = gr.Textbox(label="File Content")
|
190 |
+
file_selector.change(fn=view_transcription, inputs=file_selector, outputs=view_output)
|
191 |
+
|
192 |
+
gr.Markdown("### Summarize or Recommend")
|
193 |
+
file_multiselect = gr.Dropdown(choices=list_object_store(), label="Select files to analyze", multiselect=True)
|
194 |
+
summary_output = gr.Textbox(label="Summary of Selected Transcriptions")
|
195 |
+
rec_output = gr.Textbox(label="Recommended Next Steps")
|
196 |
+
gr.Button("Summarize Files").click(fn=summarize_selected_files, inputs=file_multiselect, outputs=summary_output)
|
197 |
+
gr.Button("Recommend from Files").click(fn=recommend_from_selected_files, inputs=file_multiselect, outputs=rec_output)
|
198 |
+
|
199 |
+
gr.Markdown("### Word Cloud from Files")
|
200 |
+
wordcloud_image = gr.Image(label="Word Cloud")
|
201 |
+
gr.Button("Generate Word Map from Files").click(
|
202 |
+
fn=generate_word_map_from_files,
|
203 |
+
inputs=file_multiselect,
|
204 |
+
outputs=wordcloud_image
|
205 |
+
)
|
206 |
+
|
207 |
+
# === Launch App ===
|
208 |
+
if __name__ == "__main__":
|
209 |
+
app.launch(share=True)
|