stalyn314 commited on
Commit
10d1a79
·
verified ·
1 Parent(s): 10b9fa1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -24
app.py CHANGED
@@ -55,35 +55,29 @@ for model in models:
55
  model["cfg"].MODEL.DEVICE = "cpu"
56
 
57
 
58
- def inference(image_url, image, min_score, model_name):
59
- if image_url:
60
- r = requests.get(image_url)
61
- if r:
62
- im = np.frombuffer(r.content, dtype="uint8")
63
- im = cv2.imdecode(im, cv2.IMREAD_COLOR_BGR2RGB)
64
- else:
65
  # Model expect BGR!
66
- im = image[:,:,::-1]
67
 
68
- model_id = model_name_to_id[model_name]
69
 
70
- models[model_id]["cfg"].MODEL.ROI_HEADS.SCORE_THRESH_TEST = min_score
71
- predictor = DefaultPredictor(models[model_id]["cfg"])
72
 
73
- outputs = predictor(im)
74
 
75
- v = Visualizer(im, models[model_id]["metadata"], scale=1.2)
76
- out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
77
-
78
- return out.get_image()
79
 
 
 
80
 
81
  title = "# DBMDZ Detectron2 Model Demo"
82
  description = """
83
  This demo introduces an interactive playground for our trained Detectron2 model.
84
-
85
  Currently, two models are supported that were trained on manually annotated segments from digitized books:
86
-
87
  * [Version 1 (2-class)](https://huggingface.co/dbmdz/detectron2-model): This model can detect *Illustration* or *Illumination* segments on a given page.
88
  * [Version 2 (4-class)](https://huggingface.co/dbmdz/detectron2-v2-model): This model is more powerful and can detect *Illustration*, *Stamp*, *Initial* or *Other* segments on a given page.
89
  """
@@ -93,21 +87,18 @@ with gr.Blocks() as demo:
93
  gr.Markdown(title)
94
  gr.Markdown(description)
95
 
96
- with gr.Tab("From URL"):
97
- url_input = gr.Textbox(label="Image URL", placeholder="https://api.digitale-sammlungen.de/iiif/image/v2/bsb10483966_00008/full/500,/0/default.jpg")
98
-
99
  with gr.Tab("From Image"):
100
- image_input = gr.Image(type="numpy", label="Input Image")
101
 
102
  min_score = gr.Slider(minimum=0.0, maximum=1.0, value=0.5, label="Minimum score")
103
 
104
  model_name = gr.Radio(choices=[model["name"] for model in models], value=models[0]["name"], label="Select Detectron2 model")
105
 
106
- output_image = gr.Image(type="pil", label="Output")
107
 
108
  inference_button = gr.Button("Submit")
109
 
110
- inference_button.click(fn=inference, inputs=[url_input, image_input, min_score, model_name], outputs=output_image)
111
 
112
  gr.Markdown(footer)
113
 
 
55
  model["cfg"].MODEL.DEVICE = "cpu"
56
 
57
 
58
+ def inference(images, min_score, model_name):
59
+ results = []
60
+ for image in images:
 
 
 
 
61
  # Model expect BGR!
62
+ im = image[:, :, ::-1]
63
 
64
+ model_id = model_name_to_id[model_name]
65
 
66
+ models[model_id]["cfg"].MODEL.ROI_HEADS.SCORE_THRESH_TEST = min_score
67
+ predictor = DefaultPredictor(models[model_id]["cfg"])
68
 
69
+ outputs = predictor(im)
70
 
71
+ v = Visualizer(im, models[model_id]["metadata"], scale=1.2)
72
+ out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
 
 
73
 
74
+ results.append(out.get_image())
75
+ return results
76
 
77
  title = "# DBMDZ Detectron2 Model Demo"
78
  description = """
79
  This demo introduces an interactive playground for our trained Detectron2 model.
 
80
  Currently, two models are supported that were trained on manually annotated segments from digitized books:
 
81
  * [Version 1 (2-class)](https://huggingface.co/dbmdz/detectron2-model): This model can detect *Illustration* or *Illumination* segments on a given page.
82
  * [Version 2 (4-class)](https://huggingface.co/dbmdz/detectron2-v2-model): This model is more powerful and can detect *Illustration*, *Stamp*, *Initial* or *Other* segments on a given page.
83
  """
 
87
  gr.Markdown(title)
88
  gr.Markdown(description)
89
 
 
 
 
90
  with gr.Tab("From Image"):
91
+ image_input = gr.Gallery(label="Input Images", type="numpy")
92
 
93
  min_score = gr.Slider(minimum=0.0, maximum=1.0, value=0.5, label="Minimum score")
94
 
95
  model_name = gr.Radio(choices=[model["name"] for model in models], value=models[0]["name"], label="Select Detectron2 model")
96
 
97
+ output_gallery = gr.Gallery(label="Output Images")
98
 
99
  inference_button = gr.Button("Submit")
100
 
101
+ inference_button.click(fn=inference, inputs=[image_input, min_score, model_name], outputs=output_gallery)
102
 
103
  gr.Markdown(footer)
104