Rajagopal commited on
Commit
7233a34
·
1 Parent(s): 07ae18d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -8
app.py CHANGED
@@ -111,14 +111,14 @@ def inference(
111
  task,
112
  text_list=None,
113
  image=None,
114
- audio=None,
115
  image2=None,
 
116
  ):
117
  if task == "image-text":
118
  result = image_text_zeroshot(image, text_list)
119
  elif task == "audio-text":
120
  result = audio_text_zeroshot(audio, text_list)
121
- elif task == "video-text":
122
  result = doubleimage_text_zeroshot(image, image2, text_list)
123
  else:
124
  raise NotImplementedError
@@ -131,7 +131,7 @@ def main():
131
  choices=[
132
  "image-text",
133
  "audio-text",
134
- "video-text",
135
  ],
136
  type="value",
137
  default="image-text",
@@ -139,18 +139,16 @@ def main():
139
  ),
140
  gr.inputs.Textbox(lines=1, label="Candidate texts"),
141
  gr.inputs.Image(type="filepath", label="Input image"),
142
- gr.inputs.Audio(type="filepath", label="Input audio"),
143
  gr.inputs.Image(type="filepath", label="Input image2"),
 
 
144
  ]
145
 
146
  iface = gr.Interface(
147
  inference,
148
  inputs,
149
  "label",
150
- description="""<p>This is a simple demo of ImageBind for zero-shot cross-modal understanding (now including image classification, audio classification, and video classification). Please refer to the original <a href='https://arxiv.org/abs/2305.05665' target='_blank'>paper</a> and <a href='https://github.com/facebookresearch/ImageBind' target='_blank'>repo</a> for more details.<br>
151
- To test your own cases, you can upload an image, an audio or a video, and provide the candidate texts separated by "|".<br>
152
- You can duplicate this space and run it privately: <a href='https://huggingface.co/spaces/OFA-Sys/chinese-clip-zero-shot-image-classification?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14' alt='Duplicate Space'></a></p>""",
153
- title="ImageBind: Zero-shot Cross-modal Understanding",
154
  )
155
 
156
  iface.launch()
 
111
  task,
112
  text_list=None,
113
  image=None,
 
114
  image2=None,
115
+ audio=None,
116
  ):
117
  if task == "image-text":
118
  result = image_text_zeroshot(image, text_list)
119
  elif task == "audio-text":
120
  result = audio_text_zeroshot(audio, text_list)
121
+ elif task == "embeddings of two images and textlist":
122
  result = doubleimage_text_zeroshot(image, image2, text_list)
123
  else:
124
  raise NotImplementedError
 
131
  choices=[
132
  "image-text",
133
  "audio-text",
134
+ "embeddings of two images and textlist",
135
  ],
136
  type="value",
137
  default="image-text",
 
139
  ),
140
  gr.inputs.Textbox(lines=1, label="Candidate texts"),
141
  gr.inputs.Image(type="filepath", label="Input image"),
 
142
  gr.inputs.Image(type="filepath", label="Input image2"),
143
+ gr.inputs.Audio(type="filepath", label="Input audio"),
144
+
145
  ]
146
 
147
  iface = gr.Interface(
148
  inference,
149
  inputs,
150
  "label",
151
+ title="Multimodal AI assitive agents for Learning Disorders : Demo with embeddings of ImageBind: ",
 
 
 
152
  )
153
 
154
  iface.launch()