Pavan147 commited on
Commit
6c102e5
·
verified ·
1 Parent(s): ea5c0d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -74
app.py CHANGED
@@ -64,85 +64,35 @@
64
  # demo.launch()
65
 
66
  import gradio as gr
67
- from transformers import AutoProcessor, AutoModelForVision2Seq, TextIteratorStreamer
68
- from transformers.image_utils import load_image
69
- from threading import Thread
70
- import torch
71
- import html
72
- import re
73
- from PIL import Image, ImageOps
74
 
75
  # Load model & processor once at startup
76
  processor = AutoProcessor.from_pretrained("ds4sd/SmolDocling-256M-preview")
77
- model = AutoModelForVision2Seq.from_pretrained("ds4sd/SmolDocling-256M-preview")
78
-
79
- def add_random_padding(image, min_percent=0.1, max_percent=0.10):
80
- image = image.convert("RGB")
81
- width, height = image.size
82
- pad_w_percent = random.uniform(min_percent, max_percent)
83
- pad_h_percent = random.uniform(min_percent, max_percent)
84
- pad_w = int(width * pad_w_percent)
85
- pad_h = int(height * pad_h_percent)
86
- corner_pixel = image.getpixel((0, 0)) # Top-left corner
87
- padded_image = ImageOps.expand(image, border=(pad_w, pad_h, pad_w, pad_h), fill=corner_pixel)
88
- return padded_image
89
-
90
- def extract_table(image_file):
91
- # Load image
92
- image = load_image(image_file)
93
-
94
- # Optionally add padding if needed for model robustness (optional)
95
- image = add_random_padding(image)
96
-
97
- # Fixed prompt to extract table only (modify if needed)
98
- text = "Convert this table to OTSL."
99
-
100
- # Build the message structure for processor
101
- resulting_messages = [{
102
- "role": "user",
103
- "content": [{"type": "image"}] + [{"type": "text", "text": text}]
104
- }]
105
-
106
- prompt = processor.apply_chat_template(resulting_messages, add_generation_prompt=True)
107
- inputs = processor(text=prompt, images=[image], return_tensors="pt").to('cuda')
108
-
109
- generation_args = {
110
- "input_ids": inputs.input_ids,
111
- "pixel_values": inputs.pixel_values,
112
- "attention_mask": inputs.attention_mask,
113
- "max_new_tokens": 8192,
114
- "num_return_sequences": 1,
115
- }
116
-
117
- streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
118
- generation_args["streamer"] = streamer
119
-
120
- thread = Thread(target=model.generate, kwargs=generation_args)
121
- thread.start()
122
-
123
- output_text = ""
124
- for new_text in streamer:
125
- output_text += new_text
126
-
127
- # Clean and return output
128
- cleaned_output = output_text.replace("<end_of_utterance>", "").strip()
129
-
130
- # Optionally convert <chart> tags to <otsl> if present
131
- if "<chart>" in cleaned_output:
132
- cleaned_output = cleaned_output.replace("<chart>", "<otsl>").replace("</chart>", "</otsl>")
133
- cleaned_output = re.sub(r'(<loc_500>)(?!.*<loc_500>)<[^>]+>', r'\1', cleaned_output)
134
-
135
- return cleaned_output or "No table found or unable to extract."
136
 
137
  # Gradio UI
138
  demo = gr.Interface(
139
- fn=extract_table,
140
- inputs=gr.Image(type="filepath", label="Upload Table Image"),
141
- outputs=gr.Textbox(label="Extracted Table (OTSL Format)"),
142
- title="Table Extraction from Image using SmolDocling-256M",
143
- description="Upload an image containing a table. The model will extract the table and output it in OTSL format."
 
 
 
144
  )
145
 
146
- demo.launch(debug=True)
147
-
148
-
 
64
  # demo.launch()
65
 
66
  import gradio as gr
67
+ from transformers import AutoProcessor, AutoModelForImageTextToText
68
+ from PIL import Image
 
 
 
 
 
69
 
70
  # Load model & processor once at startup
71
  processor = AutoProcessor.from_pretrained("ds4sd/SmolDocling-256M-preview")
72
+ model = AutoModelForImageTextToText.from_pretrained("ds4sd/SmolDocling-256M-preview")
73
+
74
+ def smoldocling_readimage(image, prompt_text):
75
+ messages = [
76
+ {"role": "user", "content": [{"type": "image"}, {"type": "text", "text": prompt_text}]}
77
+ ]
78
+ prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
79
+ inputs = processor(text=prompt, images=[image], return_tensors="pt")
80
+ outputs = model.generate(**inputs, max_new_tokens=1024)
81
+ prompt_length = inputs.input_ids.shape[1]
82
+ generated = outputs[:, prompt_length:]
83
+ result = processor.batch_decode(generated, skip_special_tokens=False)[0]
84
+ return result.replace("<end_of_utterance>", "").strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
  # Gradio UI
87
  demo = gr.Interface(
88
+ fn=smoldocling_readimage,
89
+ inputs=[
90
+ gr.Image(type="pil", label="Upload Image"),
91
+ gr.Textbox(lines=1, placeholder="Enter prompt (e.g. Convert to docling)", label="Prompt"),
92
+ ],
93
+ outputs="ostl",
94
+ title="SmolDocling Web App",
95
+ description="Upload a document image and convert it to structured docling format."
96
  )
97
 
98
+ demo.launch()