Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import os
|
3 |
+
from datetime import datetime, timedelta
|
4 |
+
from openai import OpenAI
|
5 |
+
import gradio as gr
|
6 |
+
import oci
|
7 |
+
import io
|
8 |
+
import re
|
9 |
+
from collections import Counter
|
10 |
+
import matplotlib.pyplot as plt
|
11 |
+
from wordcloud import WordCloud
|
12 |
+
|
13 |
+
# === OpenAI API Setup ===
|
14 |
+
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
15 |
+
if not openai_api_key:
|
16 |
+
raise ValueError("OPENAI_API_KEY environment variable is not set.")
|
17 |
+
|
18 |
+
client = OpenAI(api_key=openai_api_key)
|
19 |
+
|
20 |
+
# === OCI Object Storage Setup ===
|
21 |
+
oci_config = {
|
22 |
+
"user": os.environ.get("OCI_USER"),
|
23 |
+
"tenancy": os.environ.get("OCI_TENANCY"),
|
24 |
+
"fingerprint": os.environ.get("OCI_FINGERPRINT"),
|
25 |
+
"region": os.environ.get("OCI_REGION"),
|
26 |
+
"key_content": os.environ.get("OCI_PRIVATE_KEY")
|
27 |
+
}
|
28 |
+
|
29 |
+
namespace = os.environ.get("OCI_NAMESPACE")
|
30 |
+
bucket_name = os.environ.get("OCI_BUCKET_NAME")
|
31 |
+
|
32 |
+
try:
|
33 |
+
object_storage = oci.object_storage.ObjectStorageClient(oci_config)
|
34 |
+
except Exception as e:
|
35 |
+
print("Failed to initialize OCI Object Storage client:", e)
|
36 |
+
|
37 |
+
# === Prompts ===
|
38 |
+
system_prompt = (
|
39 |
+
"You are a detail-oriented assistant that specializes in transcribing and polishing "
|
40 |
+
"handwritten notes from images. Your goal is to turn rough, casual, or handwritten "
|
41 |
+
"content into clean, structured, and professional-looking text that sounds like it "
|
42 |
+
"was written by a human—not an AI. You do not include icons, emojis, or suggest next "
|
43 |
+
"steps unless explicitly instructed."
|
44 |
+
)
|
45 |
+
|
46 |
+
user_prompt_template = (
|
47 |
+
"You will receive an image of handwritten notes. Transcribe the content accurately, "
|
48 |
+
"correcting any spelling or grammar issues. Then, organize it clearly with headings, "
|
49 |
+
"bullet points, and proper formatting. Maintain the original intent and voice of the "
|
50 |
+
"author, but enhance readability and flow. Do not add embellishments or AI-style phrasing."
|
51 |
+
)
|
52 |
+
|
53 |
+
# === Encode uploaded bytes ===
|
54 |
+
def encode_image_to_base64(file_bytes):
|
55 |
+
return base64.b64encode(file_bytes).decode("utf-8")
|
56 |
+
|
57 |
+
# === Upload transcription result to OCI ===
|
58 |
+
def upload_to_object_storage(user_name, text):
|
59 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
60 |
+
filename = f"{user_name.replace(' ', '_')}_{timestamp}.txt"
|
61 |
+
object_storage.put_object(
|
62 |
+
namespace_name=namespace,
|
63 |
+
bucket_name=bucket_name,
|
64 |
+
object_name=filename,
|
65 |
+
put_object_body=text.encode("utf-8")
|
66 |
+
)
|
67 |
+
return filename
|
68 |
+
|
69 |
+
# === List files in object storage ===
|
70 |
+
def list_object_store():
|
71 |
+
try:
|
72 |
+
objects = object_storage.list_objects(namespace, bucket_name)
|
73 |
+
return "\n".join([obj.name for obj in objects.data.objects if obj.name.endswith(".txt")])
|
74 |
+
except Exception as e:
|
75 |
+
return f"Failed to list objects: {str(e)}"
|
76 |
+
|
77 |
+
# === Download file ===
|
78 |
+
def download_transcription(file_name):
|
79 |
+
try:
|
80 |
+
response = object_storage.get_object(namespace, bucket_name, file_name)
|
81 |
+
return response.data.text
|
82 |
+
except Exception as e:
|
83 |
+
return f"Failed to download: {str(e)}"
|
84 |
+
|
85 |
+
# === Get files in date range ===
|
86 |
+
def get_files_by_date_range(start_date, end_date):
|
87 |
+
result = []
|
88 |
+
objects = object_storage.list_objects(namespace, bucket_name).data.objects
|
89 |
+
for obj in objects:
|
90 |
+
match = re.search(r'(\d{8}_\d{6})', obj.name)
|
91 |
+
if match:
|
92 |
+
obj_date = datetime.strptime(match.group(1), "%Y%m%d_%H%M%S")
|
93 |
+
if start_date <= obj_date <= end_date:
|
94 |
+
result.append(obj.name)
|
95 |
+
return result
|
96 |
+
|
97 |
+
# === Analyze content with OpenAI ===
|
98 |
+
def summarize_range(start_date, end_date):
|
99 |
+
files = get_files_by_date_range(start_date, end_date)
|
100 |
+
combined_text = ""
|
101 |
+
for name in files:
|
102 |
+
combined_text += download_transcription(name) + "\n"
|
103 |
+
if not combined_text.strip():
|
104 |
+
return "No content found."
|
105 |
+
response = client.chat.completions.create(
|
106 |
+
model="gpt-4-turbo",
|
107 |
+
messages=[
|
108 |
+
{"role": "system", "content": "You are a summarization expert."},
|
109 |
+
{"role": "user", "content": "Please summarize the following transcriptions in detail:\n" + combined_text}
|
110 |
+
],
|
111 |
+
max_tokens=1500
|
112 |
+
)
|
113 |
+
return response.choices[0].message.content
|
114 |
+
|
115 |
+
def recommend_next_steps(start_date, end_date):
|
116 |
+
files = get_files_by_date_range(start_date, end_date)
|
117 |
+
combined_text = ""
|
118 |
+
for name in files:
|
119 |
+
combined_text += download_transcription(name) + "\n"
|
120 |
+
if not combined_text.strip():
|
121 |
+
return "No content found."
|
122 |
+
response = client.chat.completions.create(
|
123 |
+
model="gpt-4-turbo",
|
124 |
+
messages=[
|
125 |
+
{"role": "system", "content": "You are an operations consultant."},
|
126 |
+
{"role": "user", "content": "Please recommend next steps based on these transcriptions:\n" + combined_text}
|
127 |
+
],
|
128 |
+
max_tokens=1500
|
129 |
+
)
|
130 |
+
return response.choices[0].message.content
|
131 |
+
|
132 |
+
# === Generate word cloud and sentiment mock ===
|
133 |
+
def generate_word_map(start_date, end_date):
|
134 |
+
files = get_files_by_date_range(start_date, end_date)
|
135 |
+
combined_text = ""
|
136 |
+
for name in files:
|
137 |
+
combined_text += download_transcription(name) + "\n"
|
138 |
+
if not combined_text.strip():
|
139 |
+
return "No content found."
|
140 |
+
wordcloud = WordCloud(width=800, height=400, background_color='white').generate(combined_text)
|
141 |
+
plt.figure(figsize=(10, 5))
|
142 |
+
plt.imshow(wordcloud, interpolation='bilinear')
|
143 |
+
plt.axis("off")
|
144 |
+
buf = io.BytesIO()
|
145 |
+
plt.savefig(buf, format="png")
|
146 |
+
buf.seek(0)
|
147 |
+
return gr.Image.update(value=buf)
|
148 |
+
|
149 |
+
# === Transcription logic ===
|
150 |
+
def transcribe_image(file_bytes, user_name):
|
151 |
+
if not file_bytes:
|
152 |
+
return "No image uploaded."
|
153 |
+
encoded = encode_image_to_base64(file_bytes)
|
154 |
+
image_url = f"data:image/jpeg;base64,{encoded}"
|
155 |
+
response = client.chat.completions.create(
|
156 |
+
model="gpt-4-turbo",
|
157 |
+
messages=[
|
158 |
+
{"role": "system", "content": system_prompt},
|
159 |
+
{"role": "user", "content": [
|
160 |
+
{"type": "text", "text": user_prompt_template},
|
161 |
+
{"type": "image_url", "image_url": {"url": image_url}}
|
162 |
+
]}
|
163 |
+
],
|
164 |
+
max_tokens=1500
|
165 |
+
)
|
166 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
167 |
+
result = f"🗓️ Transcribed on: {timestamp}\n\n{response.choices[0].message.content}"
|
168 |
+
upload_to_object_storage(user_name, result)
|
169 |
+
return result
|
170 |
+
|
171 |
+
# === Gradio Interface ===
|
172 |
+
with gr.Blocks() as app:
|
173 |
+
gr.Markdown("## Handwritten Note Transcriber & Analyzer")
|
174 |
+
|
175 |
+
with gr.Row():
|
176 |
+
user_dropdown = gr.Dropdown(
|
177 |
+
choices=["Jim Goodwin", "Zahabiya Ali rampurawala", "Keith Gauvin"],
|
178 |
+
label="Who is uploading this?"
|
179 |
+
)
|
180 |
+
|
181 |
+
input_file = gr.File(label="Upload image", type="binary", file_types=[".jpg", ".jpeg", ".png"])
|
182 |
+
output_text = gr.Textbox(label="Transcription Output", lines=30)
|
183 |
+
input_file.change(fn=transcribe_image, inputs=[input_file, user_dropdown], outputs=output_text)
|
184 |
+
|
185 |
+
gr.Button("List Object Store").click(fn=list_object_store, outputs=gr.Textbox(label="Object Store Contents"))
|
186 |
+
|
187 |
+
gr.Markdown("### Download or Analyze Transcriptions")
|
188 |
+
download_input = gr.Textbox(label="File name to download")
|
189 |
+
download_output = gr.Textbox(label="Downloaded Transcription")
|
190 |
+
gr.Button("Download File").click(fn=download_transcription, inputs=download_input, outputs=download_output)
|
191 |
+
|
192 |
+
gr.Markdown("### Summarize or Recommend from Date Range")
|
193 |
+
start = gr.Textbox(label="Start Date (YYYY-MM-DD)")
|
194 |
+
end = gr.Textbox(label="End Date (YYYY-MM-DD)")
|
195 |
+
summary_output = gr.Textbox(label="Summary")
|
196 |
+
gr.Button("Summarize Range").click(
|
197 |
+
fn=lambda s, e: summarize_range(datetime.strptime(s, "%Y-%m-%d"), datetime.strptime(e, "%Y-%m-%d")),
|
198 |
+
inputs=[start, end],
|
199 |
+
outputs=summary_output
|
200 |
+
)
|
201 |
+
|
202 |
+
rec_output = gr.Textbox(label="Recommendations")
|
203 |
+
gr.Button("Recommend Next Steps").click(
|
204 |
+
fn=lambda s, e: recommend_next_steps(datetime.strptime(s, "%Y-%m-%d"), datetime.strptime(e, "%Y-%m-%d")),
|
205 |
+
inputs=[start, end],
|
206 |
+
outputs=rec_output
|
207 |
+
)
|
208 |
+
|
209 |
+
gr.Markdown("### Word Cloud")
|
210 |
+
gr.Button("Generate Word Map").click(
|
211 |
+
fn=lambda s, e: generate_word_map(datetime.strptime(s, "%Y-%m-%d"), datetime.strptime(e, "%Y-%m-%d")),
|
212 |
+
inputs=[start, end],
|
213 |
+
outputs=gr.Image()
|
214 |
+
)
|
215 |
+
|
216 |
+
# === Launch App ===
|
217 |
+
if __name__ == "__main__":
|
218 |
+
app.launch(share=True)
|