isLinXu commited on
Commit
af964a5
·
1 Parent(s): bfe7ac7
Files changed (1) hide show
  1. app.py +9 -16
app.py CHANGED
@@ -34,7 +34,7 @@ textrec_model_list = ['ABINet', 'ASTER', 'CRNN', 'MASTER', 'NRTR', 'RobustScanne
34
  textkie_model_list = ['SDMGR','SDMGR']
35
 
36
 
37
- def ocr_inference(inputs, out_dir, det, det_weights, rec, rec_weights, kie, kie_weights, device, batch_size):
38
  init_args, call_args = parse_args()
39
  inputs = np.array(inputs)
40
  img_path = "demo_text_ocr.jpg"
@@ -49,13 +49,7 @@ def ocr_inference(inputs, out_dir, det, det_weights, rec, rec_weights, kie, kie_
49
  init_args['det_weights'] = det_weights
50
  init_args['rec'] = None
51
  init_args['rec_weights'] = rec_weights
52
- if kie is not None:
53
- init_args['kie'] = kie
54
- init_args['kie_weights'] = None
55
- if kie_weights is not None:
56
- init_args['kie'] = None
57
- init_args['kie_weights'] = kie_weights
58
-
59
  call_args['inputs'] = img_path
60
  call_args['out_dir'] = out_dir
61
  call_args['batch_size'] = int(batch_size)
@@ -189,17 +183,17 @@ if __name__ == '__main__':
189
  kie = gr.inputs.Textbox(default='SDMGR')
190
  # kie = gr.inputs.Dropdown(label="Key Information Extraction Model", choices=[m for m in textkie_model_list],
191
  # default='SDMGR')
192
- kie_weights = gr.inputs.Textbox(default=None)
193
  device = gr.inputs.Radio(choices=["cpu", "cuda"], label="Device used for inference", default="cpu")
194
  batch_size = gr.inputs.Number(default=1, label="Inference batch size")
195
  output_image = gr.outputs.Image(type="pil", label="Output Image")
196
  output_json = gr.outputs.Textbox()
197
  download_test_image()
198
- examples = [["demo_text_ocr.jpg", "results", "DBNet", None, "CRNN", None, "SDMGR", None, "cpu", 1],
199
- ["demo_text_det.jpg", "results", "FCENet", None, "ASTER", None, "SDMGR", None, "cpu", 1],
200
- ["demo_text_recog.jpg", "results", "PANet", None, "MASTER", None, "SDMGR", None, "cpu", 1],
201
- ["demo_densetext_det.jpg", "results", "PSENet", None, "CRNN", None, "SDMGR", None, "cpu", 1],
202
- ["demo_kie.jpg", "results", "TextSnake", None, "RobustScanner", None, "SDMGR", None, "cpu", 1]
203
  ]
204
 
205
  title = "MMOCR web demo"
@@ -213,8 +207,7 @@ if __name__ == '__main__':
213
  iface = gr.Interface(
214
  fn=ocr_inference,
215
  inputs=[
216
- input_image, out_dir, det, det_weights, rec, rec_weights,
217
- kie, kie_weights, device, batch_size
218
  ],
219
  outputs=[output_image, output_json], examples=examples,
220
  title=title, description=description, article=article,
 
34
  textkie_model_list = ['SDMGR','SDMGR']
35
 
36
 
37
+ def ocr_inference(inputs, out_dir, det, det_weights, rec, rec_weights, device, batch_size):
38
  init_args, call_args = parse_args()
39
  inputs = np.array(inputs)
40
  img_path = "demo_text_ocr.jpg"
 
49
  init_args['det_weights'] = det_weights
50
  init_args['rec'] = None
51
  init_args['rec_weights'] = rec_weights
52
+
 
 
 
 
 
 
53
  call_args['inputs'] = img_path
54
  call_args['out_dir'] = out_dir
55
  call_args['batch_size'] = int(batch_size)
 
183
  kie = gr.inputs.Textbox(default='SDMGR')
184
  # kie = gr.inputs.Dropdown(label="Key Information Extraction Model", choices=[m for m in textkie_model_list],
185
  # default='SDMGR')
186
+ # kie_weights = gr.inputs.Textbox(default=None)
187
  device = gr.inputs.Radio(choices=["cpu", "cuda"], label="Device used for inference", default="cpu")
188
  batch_size = gr.inputs.Number(default=1, label="Inference batch size")
189
  output_image = gr.outputs.Image(type="pil", label="Output Image")
190
  output_json = gr.outputs.Textbox()
191
  download_test_image()
192
+ examples = [["demo_text_ocr.jpg", "results", "DBNet", None, "CRNN", None, "cpu", 1],
193
+ ["demo_text_det.jpg", "results", "FCENet", None, "ASTER", None, "cpu", 1],
194
+ ["demo_text_recog.jpg", "results", "PANet", None, "MASTER", None, "cpu", 1],
195
+ ["demo_densetext_det.jpg", "results", "PSENet", None, "CRNN", None, "cpu", 1],
196
+ ["demo_kie.jpg", "results", "TextSnake", None, "RobustScanner", None, "cpu", 1]
197
  ]
198
 
199
  title = "MMOCR web demo"
 
207
  iface = gr.Interface(
208
  fn=ocr_inference,
209
  inputs=[
210
+ input_image, out_dir, det, det_weights, rec, rec_weights, device, batch_size
 
211
  ],
212
  outputs=[output_image, output_json], examples=examples,
213
  title=title, description=description, article=article,