FengHou97 commited on
Commit
bbdbb0c
·
verified ·
1 Parent(s): c07e25f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -71,11 +71,13 @@ def shot(image, labels_text, model_name, hypothesis_template_prefix, hypothesis_
71
  hypothesis_template=hypothesis_template)
72
  return {dic["label"]: dic["score"] for dic in res}
73
 
 
 
74
  iface = gr.Interface(shot,
75
  inputs,
76
  "label",
77
  examples=[["festival.jpg", "lantern, firecracker, couplet", "ViT/B-16", "a photo of a {}", "in {} {} {} from {} with {}.", "clear, autumn, day, side, light occlusion"],
78
- ["car.png", "car, bike, truck", "ViT/B-16", "a photo of a {}", "in {} {} {} from {} with {}.", "clear, winter, day, front, moderate occlusion"]],
79
  description="""<p>Chinese CLIP is a contrastive-learning-based vision-language foundation model pretrained on large-scale Chinese data. For more information, please refer to the paper and official github. Also, Chinese CLIP has already been merged into Huggingface Transformers! <br><br>
80
  Paper: <a href='https://arxiv.org/pdf/2403.02714'>https://arxiv.org/pdf/2403.02714</a> <br>
81
  To begin with the demo, provide a picture (either upload manually, or select from the given examples) and add class labels one by one. Optionally, you can also add template as a prefix to the class labels. <br>""",
 
71
  hypothesis_template=hypothesis_template)
72
  return {dic["label"]: dic["score"] for dic in res}
73
 
74
+ #clear, winter, day, front, moderate occlusion
75
+
76
  iface = gr.Interface(shot,
77
  inputs,
78
  "label",
79
  examples=[["festival.jpg", "lantern, firecracker, couplet", "ViT/B-16", "a photo of a {}", "in {} {} {} from {} with {}.", "clear, autumn, day, side, light occlusion"],
80
+ ["car.png", "car, bike, truck", "ViT/B-16", "a photo of a {}", "in {} {} {} from {} with {}.", ""]],
81
  description="""<p>Chinese CLIP is a contrastive-learning-based vision-language foundation model pretrained on large-scale Chinese data. For more information, please refer to the paper and official github. Also, Chinese CLIP has already been merged into Huggingface Transformers! <br><br>
82
  Paper: <a href='https://arxiv.org/pdf/2403.02714'>https://arxiv.org/pdf/2403.02714</a> <br>
83
  To begin with the demo, provide a picture (either upload manually, or select from the given examples) and add class labels one by one. Optionally, you can also add template as a prefix to the class labels. <br>""",