prithivMLmods commited on
Commit
95fa090
·
verified ·
1 Parent(s): 5c8abbf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -229,8 +229,7 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
229
  gr.Examples(
230
  examples=video_examples,
231
  inputs=[video_query, video_upload]
232
- )
233
-
234
  with gr.Accordion("Advanced options", open=False):
235
  max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
236
  temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6)
@@ -240,11 +239,11 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
240
 
241
  with gr.Column():
242
  with gr.Column(elem_classes="canvas-output"):
243
- gr.Markdown("## Result.Md")
244
- output = gr.Textbox(label="Raw Output", interactive=False, lines=2, scale=2)
245
 
246
- with gr.Accordion("Formatted Result", open=False):
247
- markdown_output = gr.Markdown()
248
 
249
  model_choice = gr.Radio(
250
  choices=["Qwen2.5-VL-7B-Instruct", "Qwen2.5-VL-3B-Instruct"],
@@ -254,7 +253,8 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
254
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Qwen2.5-VL/discussions)")
255
  gr.Markdown("> [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct): The Qwen2.5-VL-7B-Instruct model is a multimodal AI model developed by Alibaba Cloud that excels at understanding both text and images. It's a Vision-Language Model (VLM) designed to handle various visual understanding tasks, including image understanding, video analysis, and even multilingual support.")
256
  gr.Markdown("> [Qwen2.5-VL-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-3B-Instruct): Qwen2.5-VL-3B-Instruct is an instruction-tuned vision-language model from Alibaba Cloud, built upon the Qwen2-VL series. It excels at understanding and generating text related to both visual and textual inputs, making it capable of tasks like image captioning, visual question answering, and object localization. The model also supports long video understanding and structured data extraction")
257
-
 
258
  image_submit.click(
259
  fn=generate_image,
260
  inputs=[model_choice, image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
 
229
  gr.Examples(
230
  examples=video_examples,
231
  inputs=[video_query, video_upload]
232
+ )
 
233
  with gr.Accordion("Advanced options", open=False):
234
  max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
235
  temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6)
 
239
 
240
  with gr.Column():
241
  with gr.Column(elem_classes="canvas-output"):
242
+ gr.Markdown("## Result.Md")
243
+ output = gr.Textbox(label="Raw Output", interactive=False, lines=2, scale=2)
244
 
245
+ with gr.Accordion("Formatted Result", open=False):
246
+ markdown_output = gr.Markdown()
247
 
248
  model_choice = gr.Radio(
249
  choices=["Qwen2.5-VL-7B-Instruct", "Qwen2.5-VL-3B-Instruct"],
 
253
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Qwen2.5-VL/discussions)")
254
  gr.Markdown("> [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct): The Qwen2.5-VL-7B-Instruct model is a multimodal AI model developed by Alibaba Cloud that excels at understanding both text and images. It's a Vision-Language Model (VLM) designed to handle various visual understanding tasks, including image understanding, video analysis, and even multilingual support.")
255
  gr.Markdown("> [Qwen2.5-VL-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-3B-Instruct): Qwen2.5-VL-3B-Instruct is an instruction-tuned vision-language model from Alibaba Cloud, built upon the Qwen2-VL series. It excels at understanding and generating text related to both visual and textual inputs, making it capable of tasks like image captioning, visual question answering, and object localization. The model also supports long video understanding and structured data extraction")
256
+ gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
257
+
258
  image_submit.click(
259
  fn=generate_image,
260
  inputs=[model_choice, image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],