freddyaboulton HF Staff commited on
Commit
a77183b
·
verified ·
1 Parent(s): ba7da69

Upload folder using huggingface_hub

Browse files
messages_testcase.py CHANGED
@@ -3,6 +3,10 @@ import gradio as gr
3
 
4
  runs = 0
5
 
 
 
 
 
6
  def slow_echo(message, history):
7
  global runs # i didn't want to add state or anything to this demo
8
  runs = runs + 1
@@ -10,7 +14,16 @@ def slow_echo(message, history):
10
  time.sleep(0.05)
11
  yield f"Run {runs} - You typed: " + message[: i + 1]
12
 
13
- demo = gr.ChatInterface(slow_echo, type="messages")
 
 
 
 
 
 
 
 
 
14
 
15
  if __name__ == "__main__":
16
  demo.launch()
 
3
 
4
  runs = 0
5
 
6
+ def reset_runs():
7
+ global runs
8
+ runs = 0
9
+
10
  def slow_echo(message, history):
11
  global runs # i didn't want to add state or anything to this demo
12
  runs = runs + 1
 
14
  time.sleep(0.05)
15
  yield f"Run {runs} - You typed: " + message[: i + 1]
16
 
17
+ chat = gr.ChatInterface(slow_echo, type="messages")
18
+
19
+ with gr.Blocks() as demo:
20
+ chat.render()
21
+ # We reset the global variable to minimize flakes
22
+ # this works because CI runs only one test at at time
23
+ # need to use gr.State if we want to parallelize this test
24
+ # currently chatinterface does not support that
25
+ demo.unload(reset_runs)
26
+
27
 
28
  if __name__ == "__main__":
29
  demo.launch()
multimodal_messages_testcase.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ runs = 0
4
+
5
+ def reset_runs():
6
+ global runs
7
+ runs = 0
8
+
9
+ def slow_echo(message, history):
10
+ global runs # i didn't want to add state or anything to this demo
11
+ runs = runs + 1
12
+ for i in range(len(message['text'])):
13
+ yield f"Run {runs} - You typed: " + message['text'][: i + 1]
14
+
15
+ chat = gr.ChatInterface(slow_echo, multimodal=True, type="messages")
16
+
17
+ with gr.Blocks() as demo:
18
+ chat.render()
19
+ # We reset the global variable to minimize flakes
20
+ # this works because CI runs only one test at at time
21
+ # need to use gr.State if we want to parallelize this test
22
+ # currently chatinterface does not support that
23
+ demo.unload(reset_runs)
24
+
25
+ if __name__ == "__main__":
26
+ demo.launch()
multimodal_non_stream_testcase.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ runs = 0
4
+
5
+ def reset_runs():
6
+ global runs
7
+ runs = 0
8
+
9
+ def slow_echo(message, history):
10
+ global runs # i didn't want to add state or anything to this demo
11
+ runs = runs + 1
12
+ return f"Run {runs} - You typed: " + message['text']
13
+
14
+ chat = gr.ChatInterface(slow_echo, multimodal=True, type="tuples")
15
+
16
+ with gr.Blocks() as demo:
17
+ chat.render()
18
+ # We reset the global variable to minimize flakes
19
+ # this works because CI runs only one test at at time
20
+ # need to use gr.State if we want to parallelize this test
21
+ # currently chatinterface does not support that
22
+ demo.unload(reset_runs)
23
+
24
+ if __name__ == "__main__":
25
+ demo.launch()
multimodal_tuples_testcase.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ runs = 0
4
+
5
+ def reset_runs():
6
+ global runs
7
+ runs = 0
8
+
9
+ def slow_echo(message, history):
10
+ global runs # i didn't want to add state or anything to this demo
11
+ runs = runs + 1
12
+ for i in range(len(message['text'])):
13
+ yield f"Run {runs} - You typed: " + message['text'][: i + 1]
14
+
15
+ chat = gr.ChatInterface(slow_echo, multimodal=True, type="tuples")
16
+
17
+ with gr.Blocks() as demo:
18
+ chat.render()
19
+ # We reset the global variable to minimize flakes
20
+ # this works because CI runs only one test at at time
21
+ # need to use gr.State if we want to parallelize this test
22
+ # currently chatinterface does not support that
23
+ demo.unload(reset_runs)
24
+
25
+ if __name__ == "__main__":
26
+ demo.launch()
requirements.txt CHANGED
@@ -1,2 +1,2 @@
1
- gradio-client @ git+https://github.com/gradio-app/gradio@e1c404da1143fb52b659d03e028bdba1badf443d#subdirectory=client/python
2
- https://gradio-pypi-previews.s3.amazonaws.com/e1c404da1143fb52b659d03e028bdba1badf443d/gradio-4.41.0-py3-none-any.whl
 
1
+ gradio-client @ git+https://github.com/gradio-app/gradio@30b5d6f2b75e1ff0ea03c7a6567e43a022871c9f#subdirectory=client/python
2
+ https://gradio-pypi-previews.s3.amazonaws.com/30b5d6f2b75e1ff0ea03c7a6567e43a022871c9f/gradio-4.41.0-py3-none-any.whl
run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: test_chatinterface_streaming_echo"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/test_chatinterface_streaming_echo/messages_testcase.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "runs = 0\n", "\n", "def reset_runs():\n", " global runs\n", " runs = 0\n", "\n", "def slow_echo(message, history):\n", " global runs # i didn't want to add state or anything to this demo\n", " runs = runs + 1\n", " for i in range(len(message)):\n", " yield f\"Run {runs} - You typed: \" + message[: i + 1]\n", "\n", "chat = gr.ChatInterface(slow_echo, fill_height=True)\n", "\n", "with gr.Blocks() as demo:\n", " chat.render()\n", " # We reset the global variable to minimize flakes\n", " # this works because CI runs only one test at at time\n", " # need to use gr.State if we want to parallelize this test\n", " # currently chatinterface does not support that\n", " demo.unload(reset_runs)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: test_chatinterface_streaming_echo"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/test_chatinterface_streaming_echo/messages_testcase.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/test_chatinterface_streaming_echo/multimodal_messages_testcase.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/test_chatinterface_streaming_echo/multimodal_non_stream_testcase.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/test_chatinterface_streaming_echo/multimodal_tuples_testcase.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "runs = 0\n", "\n", "def reset_runs():\n", " global runs\n", " runs = 0\n", "\n", "def slow_echo(message, history):\n", " global runs # i didn't want to add state or anything to this demo\n", " runs = runs + 1\n", " for i in range(len(message)):\n", " yield f\"Run {runs} - You typed: \" + message[: i + 1]\n", "\n", "chat = gr.ChatInterface(slow_echo, fill_height=True)\n", "\n", "with gr.Blocks() as demo:\n", " chat.render()\n", " # We reset the global variable to minimize flakes\n", " # this works because CI runs only one test at at time\n", " # need to use gr.State if we want to parallelize this test\n", " # currently chatinterface does not support that\n", " demo.unload(reset_runs)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}