import gradio as gr def general_chat(text: str, gr_history: list): yield text def online_chat(gr_webcam_image, gr_history: list): if gr_webcam_image: query = {"role": "user", "content": (gr_webcam_image, )} response = {"role": "assistant", "content": 'this is a frame.'} gr_history.extend([query, response]) yield gr_history with gr.Blocks() as demo: gr.Markdown("## Simple Streaming Record and Response") with gr.Row(): with gr.Column(): gr_webcam_image = gr.Image( label='WebCam', sources="webcam", height=250, type='filepath') gr_chatinterface_ol = gr.ChatInterface( fn=general_chat, type="messages", multimodal=True, ) gr_webcam_image.stream( fn=online_chat, inputs=[gr_webcam_image, gr_chatinterface_ol.chatbot], outputs=[gr_chatinterface_ol.chatbot], stream_every=1, concurrency_limit=30, ) demo.queue(default_concurrency_limit=100, max_size=100).launch(share=True, max_threads=100, ssr_mode=False)