skallewag commited on
Commit
3d2d774
·
verified ·
1 Parent(s): 0d3376c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -13
app.py CHANGED
@@ -373,26 +373,26 @@ def inference(image, task, *args, **kwargs):
373
 
374
  class ImageMask(gr.components.Image):
375
  """
376
- Sets: source="canvas", tool="sketch"
377
  """
378
 
379
  is_template = True
380
 
381
  def __init__(self, **kwargs):
382
- super().__init__(source="upload", tool="sketch", interactive=True, **kwargs)
383
 
384
  def preprocess(self, x):
385
  return super().preprocess(x)
386
 
387
  class Video(gr.components.Video):
388
  """
389
- Sets: source="canvas", tool="sketch"
390
  """
391
 
392
  is_template = True
393
 
394
  def __init__(self, **kwargs):
395
- super().__init__(source="upload", **kwargs)
396
 
397
  def preprocess(self, x):
398
  return super().preprocess(x)
@@ -423,18 +423,24 @@ description = f"""
423
  """
424
 
425
  article = "SEEM Demo" + (" (Simplified Interface)" if not model_loaded else "")
426
- inputs = [ImageMask(label="[Stroke] Draw on Image",type="pil"), gr.inputs.CheckboxGroup(choices=["Stroke", "Example", "Text", "Audio", "Video", "Panoptic"], type="value", label="Interative Mode"), ImageMask(label="[Example] Draw on Referring Image",type="pil"), gr.Textbox(label="[Text] Referring Text"), gr.Audio(label="[Audio] Referring Audio", source="microphone", type="filepath"), gr.Video(label="[Video] Referring Video Segmentation",format="mp4",interactive=True)]
 
 
 
 
 
 
 
 
 
 
 
 
 
427
  gr.Interface(
428
  fn=inference,
429
  inputs=inputs,
430
- outputs=[
431
- gr.outputs.Image(
432
- type="pil",
433
- label="Segmentation Results (COCO classes as label)"),
434
- gr.Video(
435
- label="Video Segmentation Results (COCO classes as label)", format="mp4"
436
- ),
437
- ],
438
  examples=[
439
  ["examples/corgi1.webp", ["Text"], "examples/corgi2.jpg", "The corgi.", None, None],
440
  ["examples/river1.png", ["Text", "Audio"], "examples/river2.png", "The green trees.", "examples/river1.wav", None],
 
373
 
374
  class ImageMask(gr.components.Image):
375
  """
376
+ Custom Image component with sketch tool enabled
377
  """
378
 
379
  is_template = True
380
 
381
  def __init__(self, **kwargs):
382
+ super().__init__(tool="sketch", interactive=True, **kwargs)
383
 
384
  def preprocess(self, x):
385
  return super().preprocess(x)
386
 
387
  class Video(gr.components.Video):
388
  """
389
+ Custom Video component
390
  """
391
 
392
  is_template = True
393
 
394
  def __init__(self, **kwargs):
395
+ super().__init__(interactive=True, **kwargs)
396
 
397
  def preprocess(self, x):
398
  return super().preprocess(x)
 
423
  """
424
 
425
  article = "SEEM Demo" + (" (Simplified Interface)" if not model_loaded else "")
426
+ inputs = [
427
+ ImageMask(label="[Stroke] Draw on Image", type="pil"),
428
+ gr.CheckboxGroup(choices=["Stroke", "Example", "Text", "Audio", "Video", "Panoptic"], label="Interactive Mode"),
429
+ ImageMask(label="[Example] Draw on Referring Image", type="pil"),
430
+ gr.Textbox(label="[Text] Referring Text"),
431
+ gr.Audio(label="[Audio] Referring Audio", type="filepath", sources=["microphone"]),
432
+ gr.Video(label="[Video] Referring Video Segmentation", format="mp4")
433
+ ]
434
+
435
+ outputs = [
436
+ gr.Image(type="pil", label="Segmentation Results (COCO classes as label)"),
437
+ gr.Video(label="Video Segmentation Results (COCO classes as label)", format="mp4")
438
+ ]
439
+
440
  gr.Interface(
441
  fn=inference,
442
  inputs=inputs,
443
+ outputs=outputs,
 
 
 
 
 
 
 
444
  examples=[
445
  ["examples/corgi1.webp", ["Text"], "examples/corgi2.jpg", "The corgi.", None, None],
446
  ["examples/river1.png", ["Text", "Audio"], "examples/river2.png", "The green trees.", "examples/river1.wav", None],