Sebbe33 commited on
Commit
d2aded5
·
verified ·
1 Parent(s): 66952ef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -10
app.py CHANGED
@@ -2,7 +2,7 @@ import os
2
  import re
3
  import io
4
  import streamlit as st
5
- from PIL import Image, ImageDraw
6
  from google import genai
7
  from google.genai import types
8
  from pdf2image import convert_from_bytes
@@ -15,6 +15,8 @@ Only return the list, nothing else. Example:
15
  [[0.05, 0.12, 0.25, 0.18], [0.30, 0.40, 0.50, 0.55]]
16
  """
17
 
 
 
18
  def parse_list_boxes(text):
19
  """Improved parsing with better error handling"""
20
  try:
@@ -24,25 +26,65 @@ def parse_list_boxes(text):
24
  return [[float(x) for x in m] for m in matches]
25
 
26
  def draw_bounding_boxes(image, boxes):
27
- """Enhanced drawing with diagnostics"""
28
  if not boxes:
29
  return image
30
 
31
  draw = ImageDraw.Draw(image)
32
  width, height = image.size
33
 
34
- for box in boxes:
35
  try:
 
36
  xmin = max(0.0, min(1.0, box[0])) * width
37
  ymin = max(0.0, min(1.0, box[1])) * height
38
  xmax = max(0.0, min(1.0, box[2])) * width
39
  ymax = max(0.0, min(1.0, box[3])) * height
40
 
 
41
  draw.rectangle([xmin, ymin, xmax, ymax], outline="#00FF00", width=3)
 
 
 
 
42
  except Exception as e:
43
  st.error(f"Error drawing box: {str(e)}")
44
  return image
45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  # Streamlit UI
47
  st.title("PDF Text Detection")
48
  uploaded_file = st.file_uploader("Upload PDF", type=["pdf"])
@@ -53,7 +95,6 @@ if uploaded_file and st.button("Analyze"):
53
  images = convert_from_bytes(uploaded_file.read(), dpi=300)
54
  client = genai.Client(api_key=os.getenv("KEY"))
55
 
56
- # Create tabs for pages
57
  tabs = st.tabs([f"Page {i+1}" for i in range(len(images))])
58
 
59
  for idx, (tab, image) in enumerate(zip(tabs, images)):
@@ -61,13 +102,12 @@ if uploaded_file and st.button("Analyze"):
61
  col1, col2 = st.columns(2)
62
 
63
  with col1:
64
- st.image(image, caption="Original", use_container_width=True)
65
 
66
  with col2:
 
67
  img_byte_arr = io.BytesIO()
68
  image.save(img_byte_arr, format='PNG')
69
-
70
- # Get bounding boxes
71
  response = client.models.generate_content(
72
  model="gemini-2.0-flash-exp",
73
  contents=[
@@ -79,14 +119,21 @@ if uploaded_file and st.button("Analyze"):
79
  ]
80
  )
81
 
82
- # Parse and draw
83
  boxes = parse_list_boxes(response.text)
84
- annotated = draw_bounding_boxes(image.copy(), boxes)
85
 
 
 
86
  st.image(annotated,
87
  caption=f"Detected {len(boxes)} text regions",
88
- use_container_width=True)
89
 
 
 
 
 
 
 
90
  # Debug section
91
  debug_expander = st.expander("Debug Details")
92
  with debug_expander:
 
2
  import re
3
  import io
4
  import streamlit as st
5
+ from PIL import Image, ImageDraw, ImageFont
6
  from google import genai
7
  from google.genai import types
8
  from pdf2image import convert_from_bytes
 
15
  [[0.05, 0.12, 0.25, 0.18], [0.30, 0.40, 0.50, 0.55]]
16
  """
17
 
18
+ TEXT_EXTRACTION_PROMPT = "Extract the text in this image. Return only the exact text, nothing else."
19
+
20
  def parse_list_boxes(text):
21
  """Improved parsing with better error handling"""
22
  try:
 
26
  return [[float(x) for x in m] for m in matches]
27
 
28
  def draw_bounding_boxes(image, boxes):
29
+ """Enhanced drawing with numbering"""
30
  if not boxes:
31
  return image
32
 
33
  draw = ImageDraw.Draw(image)
34
  width, height = image.size
35
 
36
+ for i, box in enumerate(boxes):
37
  try:
38
+ # Convert normalized coordinates to pixel values
39
  xmin = max(0.0, min(1.0, box[0])) * width
40
  ymin = max(0.0, min(1.0, box[1])) * height
41
  xmax = max(0.0, min(1.0, box[2])) * width
42
  ymax = max(0.0, min(1.0, box[3])) * height
43
 
44
+ # Draw bounding box
45
  draw.rectangle([xmin, ymin, xmax, ymax], outline="#00FF00", width=3)
46
+
47
+ # Draw number label
48
+ label = str(i+1)
49
+ draw.text((xmin + 5, ymin + 5), label, fill="red")
50
  except Exception as e:
51
  st.error(f"Error drawing box: {str(e)}")
52
  return image
53
 
54
+ def extract_text_from_region(client, image, box):
55
+ """Extract text from a specific region using Gemini"""
56
+ try:
57
+ width, height = image.size
58
+ # Convert normalized coordinates to pixel values
59
+ xmin = int(max(0.0, min(1.0, box[0])) * width)
60
+ ymin = int(max(0.0, min(1.0, box[1])) * height)
61
+ xmax = int(max(0.0, min(1.0, box[2])) * width)
62
+ ymax = int(max(0.0, min(1.0, box[3])) * height)
63
+
64
+ if xmin >= xmax or ymin >= ymax:
65
+ return ""
66
+
67
+ # Crop and convert to bytes
68
+ cropped = image.crop((xmin, ymin, xmax, ymax))
69
+ img_byte_arr = io.BytesIO()
70
+ cropped.save(img_byte_arr, format='PNG')
71
+
72
+ # Call Gemini API
73
+ response = client.models.generate_content(
74
+ model="gemini-2.0-flash-exp",
75
+ contents=[
76
+ TEXT_EXTRACTION_PROMPT,
77
+ types.Part.from_bytes(
78
+ data=img_byte_arr.getvalue(),
79
+ mime_type="image/png"
80
+ )
81
+ ]
82
+ )
83
+ return response.text.strip()
84
+ except Exception as e:
85
+ st.error(f"Text extraction error: {str(e)}")
86
+ return ""
87
+
88
  # Streamlit UI
89
  st.title("PDF Text Detection")
90
  uploaded_file = st.file_uploader("Upload PDF", type=["pdf"])
 
95
  images = convert_from_bytes(uploaded_file.read(), dpi=300)
96
  client = genai.Client(api_key=os.getenv("KEY"))
97
 
 
98
  tabs = st.tabs([f"Page {i+1}" for i in range(len(images))])
99
 
100
  for idx, (tab, image) in enumerate(zip(tabs, images)):
 
102
  col1, col2 = st.columns(2)
103
 
104
  with col1:
105
+ st.image(image, caption="Original", use_column_width=True)
106
 
107
  with col2:
108
+ # Get bounding boxes
109
  img_byte_arr = io.BytesIO()
110
  image.save(img_byte_arr, format='PNG')
 
 
111
  response = client.models.generate_content(
112
  model="gemini-2.0-flash-exp",
113
  contents=[
 
119
  ]
120
  )
121
 
 
122
  boxes = parse_list_boxes(response.text)
123
+ texts = [extract_text_from_region(client, image, box) for box in boxes]
124
 
125
+ # Draw annotated image
126
+ annotated = draw_bounding_boxes(image.copy(), boxes)
127
  st.image(annotated,
128
  caption=f"Detected {len(boxes)} text regions",
129
+ use_column_width=True)
130
 
131
+ # Display extracted texts
132
+ if any(texts):
133
+ st.subheader("Extracted Texts:")
134
+ for i, text in enumerate(texts, 1):
135
+ st.write(f"{i}. {text if text else 'No text detected'}")
136
+
137
  # Debug section
138
  debug_expander = st.expander("Debug Details")
139
  with debug_expander: