freddyaboulton HF Staff commited on
Commit
77658aa
·
verified ·
1 Parent(s): 2eaa148

Commit 2: Add 50 file(s)

Browse files
Files changed (50) hide show
  1. demos/image_segmentation/requirements.txt +1 -0
  2. demos/image_segmentation/run.ipynb +1 -0
  3. demos/image_segmentation/run.py +61 -0
  4. demos/interface_random_slider/run.ipynb +1 -0
  5. demos/interface_random_slider/run.py +20 -0
  6. demos/kitchen_sink/requirements.txt +1 -0
  7. demos/kitchen_sink/run.ipynb +1 -0
  8. demos/kitchen_sink/run.py +165 -0
  9. demos/kitchen_sink_random/__init__.py +0 -0
  10. demos/kitchen_sink_random/constants.py +59 -0
  11. demos/kitchen_sink_random/requirements.txt +2 -0
  12. demos/kitchen_sink_random/run.py +92 -0
  13. demos/matrix_transpose/run.ipynb +1 -0
  14. demos/matrix_transpose/run.py +23 -0
  15. demos/matrix_transpose/screenshot.png +0 -0
  16. demos/mini_leaderboard/assets/__init__.py +0 -0
  17. demos/mini_leaderboard/assets/custom_css.css +87 -0
  18. demos/mini_leaderboard/assets/leaderboard_data.json +0 -0
  19. demos/mini_leaderboard/requirements.txt +1 -0
  20. demos/mini_leaderboard/run.ipynb +1 -0
  21. demos/mini_leaderboard/run.py +237 -0
  22. demos/model3D/run.ipynb +1 -0
  23. demos/model3D/run.py +33 -0
  24. demos/native_plots/bar_plot_demo.py +80 -0
  25. demos/native_plots/data.py +20 -0
  26. demos/native_plots/line_plot_demo.py +71 -0
  27. demos/native_plots/requirements.txt +2 -0
  28. demos/native_plots/run.ipynb +1 -0
  29. demos/native_plots/run.py +17 -0
  30. demos/native_plots/scatter_plot_demo.py +72 -0
  31. demos/reverse_audio/requirements.txt +1 -0
  32. demos/reverse_audio/run.ipynb +1 -0
  33. demos/reverse_audio/run.py +26 -0
  34. demos/reverse_audio/screenshot.png +0 -0
  35. demos/stream_audio/requirements.txt +1 -0
  36. demos/stream_audio/run.ipynb +1 -0
  37. demos/stream_audio/run.py +25 -0
  38. demos/stream_frames/requirements.txt +1 -0
  39. demos/stream_frames/run.ipynb +1 -0
  40. demos/stream_frames/run.py +14 -0
  41. demos/stt_or_tts/run.ipynb +1 -0
  42. demos/stt_or_tts/run.py +27 -0
  43. demos/video_component/run.ipynb +1 -0
  44. demos/video_component/run.py +18 -0
  45. demos/zip_files/run.ipynb +1 -0
  46. demos/zip_files/run.py +22 -0
  47. demos/zip_files/screenshot.png +0 -0
  48. image.png +0 -0
  49. requirements.txt +7 -0
  50. run.py +46 -0
demos/image_segmentation/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ numpy
demos/image_segmentation/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_segmentation\n", "### Simple image segmentation using gradio's AnnotatedImage component.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import random\n", "\n", "with gr.Blocks() as demo:\n", " section_labels = [\n", " \"apple\",\n", " \"banana\",\n", " \"carrot\",\n", " \"donut\",\n", " \"eggplant\",\n", " \"fish\",\n", " \"grapes\",\n", " \"hamburger\",\n", " \"ice cream\",\n", " \"juice\",\n", " ]\n", "\n", " with gr.Row():\n", " num_boxes = gr.Slider(0, 5, 2, step=1, label=\"Number of boxes\")\n", " num_segments = gr.Slider(0, 5, 1, step=1, label=\"Number of segments\")\n", "\n", " with gr.Row():\n", " img_input = gr.Image()\n", " img_output = gr.AnnotatedImage(\n", " color_map={\"banana\": \"#a89a00\", \"carrot\": \"#ffae00\"}\n", " )\n", "\n", " section_btn = gr.Button(\"Identify Sections\")\n", " selected_section = gr.Textbox(label=\"Selected Section\")\n", "\n", " def section(img, num_boxes, num_segments):\n", " sections = []\n", " for a in range(num_boxes):\n", " x = random.randint(0, img.shape[1])\n", " y = random.randint(0, img.shape[0])\n", " w = random.randint(0, img.shape[1] - x)\n", " h = random.randint(0, img.shape[0] - y)\n", " sections.append(((x, y, x + w, y + h), section_labels[a]))\n", " for b in range(num_segments):\n", " x = random.randint(0, img.shape[1])\n", " y = random.randint(0, img.shape[0])\n", " r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))\n", " mask = np.zeros(img.shape[:2])\n", " for i in range(img.shape[0]):\n", " for j in range(img.shape[1]):\n", " dist_square = (i - y) ** 2 + (j - x) ** 2\n", " if dist_square < r**2:\n", " mask[i, j] = round((r**2 - dist_square) / r**2 * 4) / 4\n", " sections.append((mask, section_labels[b + num_boxes]))\n", " return (img, sections)\n", "\n", " section_btn.click(section, [img_input, num_boxes, num_segments], img_output)\n", "\n", " def select_section(evt: gr.SelectData):\n", " return section_labels[evt.index]\n", "\n", " img_output.select(select_section, None, selected_section)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/image_segmentation/run.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import random
4
+
5
+ with gr.Blocks() as demo:
6
+ section_labels = [
7
+ "apple",
8
+ "banana",
9
+ "carrot",
10
+ "donut",
11
+ "eggplant",
12
+ "fish",
13
+ "grapes",
14
+ "hamburger",
15
+ "ice cream",
16
+ "juice",
17
+ ]
18
+
19
+ with gr.Row():
20
+ num_boxes = gr.Slider(0, 5, 2, step=1, label="Number of boxes")
21
+ num_segments = gr.Slider(0, 5, 1, step=1, label="Number of segments")
22
+
23
+ with gr.Row():
24
+ img_input = gr.Image()
25
+ img_output = gr.AnnotatedImage(
26
+ color_map={"banana": "#a89a00", "carrot": "#ffae00"}
27
+ )
28
+
29
+ section_btn = gr.Button("Identify Sections")
30
+ selected_section = gr.Textbox(label="Selected Section")
31
+
32
+ def section(img, num_boxes, num_segments):
33
+ sections = []
34
+ for a in range(num_boxes):
35
+ x = random.randint(0, img.shape[1])
36
+ y = random.randint(0, img.shape[0])
37
+ w = random.randint(0, img.shape[1] - x)
38
+ h = random.randint(0, img.shape[0] - y)
39
+ sections.append(((x, y, x + w, y + h), section_labels[a]))
40
+ for b in range(num_segments):
41
+ x = random.randint(0, img.shape[1])
42
+ y = random.randint(0, img.shape[0])
43
+ r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))
44
+ mask = np.zeros(img.shape[:2])
45
+ for i in range(img.shape[0]):
46
+ for j in range(img.shape[1]):
47
+ dist_square = (i - y) ** 2 + (j - x) ** 2
48
+ if dist_square < r**2:
49
+ mask[i, j] = round((r**2 - dist_square) / r**2 * 4) / 4
50
+ sections.append((mask, section_labels[b + num_boxes]))
51
+ return (img, sections)
52
+
53
+ section_btn.click(section, [img_input, num_boxes, num_segments], img_output)
54
+
55
+ def select_section(evt: gr.SelectData):
56
+ return section_labels[evt.index]
57
+
58
+ img_output.select(select_section, None, selected_section)
59
+
60
+ if __name__ == "__main__":
61
+ demo.launch()
demos/interface_random_slider/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: interface_random_slider"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def func(slider_1, slider_2, *args):\n", " return slider_1 + slider_2 * 5\n", "\n", "demo = gr.Interface(\n", " func,\n", " [\n", " gr.Slider(minimum=1.5, maximum=250000.89, randomize=True, label=\"Random Big Range\"),\n", " gr.Slider(minimum=-1, maximum=1, randomize=True, step=0.05, label=\"Random only multiple of 0.05 allowed\"),\n", " gr.Slider(minimum=0, maximum=1, randomize=True, step=0.25, label=\"Random only multiples of 0.25 allowed\"),\n", " gr.Slider(minimum=-100, maximum=100, randomize=True, step=3, label=\"Random between -100 and 100 step 3\"),\n", " gr.Slider(minimum=-100, maximum=100, randomize=True, label=\"Random between -100 and 100\"),\n", " gr.Slider(value=0.25, minimum=5, maximum=30, step=-1),\n", " ],\n", " \"number\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/interface_random_slider/run.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def func(slider_1, slider_2, *args):
4
+ return slider_1 + slider_2 * 5
5
+
6
+ demo = gr.Interface(
7
+ func,
8
+ [
9
+ gr.Slider(minimum=1.5, maximum=250000.89, randomize=True, label="Random Big Range"),
10
+ gr.Slider(minimum=-1, maximum=1, randomize=True, step=0.05, label="Random only multiple of 0.05 allowed"),
11
+ gr.Slider(minimum=0, maximum=1, randomize=True, step=0.25, label="Random only multiples of 0.25 allowed"),
12
+ gr.Slider(minimum=-100, maximum=100, randomize=True, step=3, label="Random between -100 and 100 step 3"),
13
+ gr.Slider(minimum=-100, maximum=100, randomize=True, label="Random between -100 and 100"),
14
+ gr.Slider(value=0.25, minimum=5, maximum=30, step=-1),
15
+ ],
16
+ "number",
17
+ )
18
+
19
+ if __name__ == "__main__":
20
+ demo.launch()
demos/kitchen_sink/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ numpy
demos/kitchen_sink/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: kitchen_sink"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import os\n", "import json\n", "\n", "import numpy as np\n", "\n", "import gradio as gr\n", "from gradio.media import get_image, get_video, get_audio, get_file\n", "\n", "CHOICES = [\"foo\", \"bar\", \"baz\"]\n", "JSONOBJ = \"\"\"{\"items\":{\"item\":[{\"id\": \"0001\",\"type\": null,\"is_good\": false,\"ppu\": 0.55,\"batters\":{\"batter\":[{ \"id\": \"1001\", \"type\": \"Regular\" },{ \"id\": \"1002\", \"type\": \"Chocolate\" },{ \"id\": \"1003\", \"type\": \"Blueberry\" },{ \"id\": \"1004\", \"type\": \"Devil's Food\" }]},\"topping\":[{ \"id\": \"5001\", \"type\": \"None\" },{ \"id\": \"5002\", \"type\": \"Glazed\" },{ \"id\": \"5005\", \"type\": \"Sugar\" },{ \"id\": \"5007\", \"type\": \"Powdered Sugar\" },{ \"id\": \"5006\", \"type\": \"Chocolate with Sprinkles\" },{ \"id\": \"5003\", \"type\": \"Chocolate\" },{ \"id\": \"5004\", \"type\": \"Maple\" }]}]}}\"\"\"\n", "\n", "def fn(\n", " text1,\n", " text2,\n", " num,\n", " slider1,\n", " slider2,\n", " single_checkbox,\n", " checkboxes,\n", " radio,\n", " dropdown,\n", " multi_dropdown,\n", " im1,\n", " # im2,\n", " # im3,\n", " im4,\n", " video,\n", " audio1,\n", " audio2,\n", " file,\n", " df1,\n", " time,\n", "):\n", " return (\n", " (text1 if single_checkbox else text2)\n", " + \", selected:\"\n", " + \", \".join(checkboxes), # Text\n", " {\n", " \"positive\": num / (num + slider1 + slider2),\n", " \"negative\": slider1 / (num + slider1 + slider2),\n", " \"neutral\": slider2 / (num + slider1 + slider2),\n", " }, # Label\n", " (audio1[0], np.flipud(audio1[1]))\n", " if audio1 is not None\n", " else get_audio(\"cantina.wav\"), # Audio\n", " np.flipud(im1)\n", " if im1 is not None\n", " else get_image(\"cheetah1.jpg\"), # Image\n", " video\n", " if video is not None\n", " else get_video(\"world.mp4\"), # Video\n", " [\n", " (\"The\", \"art\"),\n", " (\"quick brown\", \"adj\"),\n", " (\"fox\", \"nn\"),\n", " (\"jumped\", \"vrb\"),\n", " (\"testing testing testing\", None),\n", " (\"over\", \"prp\"),\n", " (\"the\", \"art\"),\n", " (\"testing\", None),\n", " (\"lazy\", \"adj\"),\n", " (\"dogs\", \"nn\"),\n", " (\".\", \"punc\"),\n", " ]\n", " + [(f\"test {x}\", f\"test {x}\") for x in range(10)], # HighlightedText\n", " # [(\"The testing testing testing\", None), (\"quick brown\", 0.2), (\"fox\", 1), (\"jumped\", -1), (\"testing testing testing\", 0), (\"over\", 0), (\"the\", 0), (\"testing\", 0), (\"lazy\", 1), (\"dogs\", 0), (\".\", 1)] + [(f\"test {x}\", x/10) for x in range(-10, 10)], # HighlightedText\n", " [\n", " (\"The testing testing testing\", None),\n", " (\"over\", 0.6),\n", " (\"the\", 0.2),\n", " (\"testing\", None),\n", " (\"lazy\", -0.1),\n", " (\"dogs\", 0.4),\n", " (\".\", 0),\n", " ]\n", " + [(\"test\", x / 10) for x in range(-10, 10)], # HighlightedText\n", " json.loads(JSONOBJ), # JSON\n", " \"<button style='background-color: red'>Click Me: \"\n", " + radio\n", " + \"</button>\", # HTML\n", " get_file(\"titanic.csv\"), # File\n", " df1, # Dataframe\n", " np.random.randint(0, 10, (4, 4)), # Dataframe\n", " time, # DateTime\n", " )\n", "\n", "demo = gr.Interface(\n", " fn,\n", " inputs=[\n", " gr.Textbox(value=\"Lorem ipsum\", label=\"Textbox\"),\n", " gr.Textbox(lines=3, placeholder=\"Type here..\", label=\"Textbox 2\"),\n", " gr.Number(label=\"Number\", value=42),\n", " gr.Slider(10, 20, value=15, label=\"Slider: 10 - 20\"),\n", " gr.Slider(maximum=20, step=0.04, label=\"Slider: step @ 0.04\"),\n", " gr.Checkbox(label=\"Checkbox\"),\n", " gr.CheckboxGroup(label=\"CheckboxGroup\", choices=CHOICES, value=CHOICES[0:2]),\n", " gr.Radio(label=\"Radio\", choices=CHOICES, value=CHOICES[2]),\n", " gr.Dropdown(label=\"Dropdown\", choices=CHOICES),\n", " gr.Dropdown(\n", " label=\"Multiselect Dropdown (Max choice: 2)\",\n", " choices=CHOICES,\n", " multiselect=True,\n", " max_choices=2,\n", " ),\n", " gr.Image(label=\"Image\"),\n", " # gr.Image(label=\"Image w/ Cropper\", tool=\"select\"),\n", " # gr.Image(label=\"Sketchpad\", source=\"canvas\"),\n", " gr.Image(label=\"Webcam\", sources=[\"webcam\"]),\n", " gr.Video(label=\"Video\"),\n", " gr.Audio(label=\"Audio\"),\n", " gr.Audio(label=\"Microphone\", sources=[\"microphone\"]),\n", " gr.File(label=\"File\"),\n", " gr.Dataframe(label=\"Dataframe\", headers=[\"Name\", \"Age\", \"Gender\"]),\n", " gr.DateTime(label=\"DateTime\"),\n", " ],\n", " outputs=[\n", " gr.Textbox(label=\"Textbox\"),\n", " gr.Label(label=\"Label\"),\n", " gr.Audio(label=\"Audio\"),\n", " gr.Image(label=\"Image\", elem_id=\"output-img\"),\n", " gr.Video(label=\"Video\"),\n", " gr.HighlightedText(\n", " label=\"HighlightedText\", color_map={\"punc\": \"pink\", \"test 0\": \"blue\"}\n", " ),\n", " gr.HighlightedText(label=\"HighlightedText\", show_legend=True),\n", " gr.JSON(label=\"JSON\", show_indices=True),\n", " gr.HTML(label=\"HTML\"),\n", " gr.File(label=\"File\"),\n", " gr.Dataframe(label=\"Dataframe\"),\n", " gr.Dataframe(label=\"Numpy\"),\n", " gr.DateTime(label=\"DateTime\"),\n", " ],\n", " examples=[\n", " [\n", " \"the quick brown fox\",\n", " \"jumps over the lazy dog\",\n", " 10,\n", " 12,\n", " 4,\n", " True,\n", " [\"foo\", \"baz\"],\n", " \"baz\",\n", " \"bar\",\n", " [\"foo\", \"bar\"],\n", " get_image(\"cheetah1.jpg\"),\n", " # get_image(\"cheetah1.jpg\"),\n", " # get_image(\"cheetah1.jpg\"),\n", " get_image(\"cheetah1.jpg\"),\n", " get_video(\"world.mp4\"),\n", " get_audio(\"cantina.wav\"),\n", " get_audio(\"cantina.wav\"),\n", " get_file(\"titanic.csv\"),\n", " [[1, 2, 3, 4], [4, 5, 6, 7], [8, 9, 1, 2], [3, 4, 5, 6]],\n", " \"2025-06-10 12:00:00\",\n", " ]\n", " ]\n", " * 3,\n", " title=\"Kitchen Sink\",\n", " description=\"Try out all the components!\",\n", " article=\"Learn more about [Gradio](http://gradio.app)\",\n", " cache_examples=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/kitchen_sink/run.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ import numpy as np
5
+
6
+ import gradio as gr
7
+ from gradio.media import get_image, get_video, get_audio, get_file
8
+
9
+ CHOICES = ["foo", "bar", "baz"]
10
+ JSONOBJ = """{"items":{"item":[{"id": "0001","type": null,"is_good": false,"ppu": 0.55,"batters":{"batter":[{ "id": "1001", "type": "Regular" },{ "id": "1002", "type": "Chocolate" },{ "id": "1003", "type": "Blueberry" },{ "id": "1004", "type": "Devil's Food" }]},"topping":[{ "id": "5001", "type": "None" },{ "id": "5002", "type": "Glazed" },{ "id": "5005", "type": "Sugar" },{ "id": "5007", "type": "Powdered Sugar" },{ "id": "5006", "type": "Chocolate with Sprinkles" },{ "id": "5003", "type": "Chocolate" },{ "id": "5004", "type": "Maple" }]}]}}"""
11
+
12
+ def fn(
13
+ text1,
14
+ text2,
15
+ num,
16
+ slider1,
17
+ slider2,
18
+ single_checkbox,
19
+ checkboxes,
20
+ radio,
21
+ dropdown,
22
+ multi_dropdown,
23
+ im1,
24
+ # im2,
25
+ # im3,
26
+ im4,
27
+ video,
28
+ audio1,
29
+ audio2,
30
+ file,
31
+ df1,
32
+ time,
33
+ ):
34
+ return (
35
+ (text1 if single_checkbox else text2)
36
+ + ", selected:"
37
+ + ", ".join(checkboxes), # Text
38
+ {
39
+ "positive": num / (num + slider1 + slider2),
40
+ "negative": slider1 / (num + slider1 + slider2),
41
+ "neutral": slider2 / (num + slider1 + slider2),
42
+ }, # Label
43
+ (audio1[0], np.flipud(audio1[1]))
44
+ if audio1 is not None
45
+ else get_audio("cantina.wav"), # Audio
46
+ np.flipud(im1)
47
+ if im1 is not None
48
+ else get_image("cheetah1.jpg"), # Image
49
+ video
50
+ if video is not None
51
+ else get_video("world.mp4"), # Video
52
+ [
53
+ ("The", "art"),
54
+ ("quick brown", "adj"),
55
+ ("fox", "nn"),
56
+ ("jumped", "vrb"),
57
+ ("testing testing testing", None),
58
+ ("over", "prp"),
59
+ ("the", "art"),
60
+ ("testing", None),
61
+ ("lazy", "adj"),
62
+ ("dogs", "nn"),
63
+ (".", "punc"),
64
+ ]
65
+ + [(f"test {x}", f"test {x}") for x in range(10)], # HighlightedText
66
+ # [("The testing testing testing", None), ("quick brown", 0.2), ("fox", 1), ("jumped", -1), ("testing testing testing", 0), ("over", 0), ("the", 0), ("testing", 0), ("lazy", 1), ("dogs", 0), (".", 1)] + [(f"test {x}", x/10) for x in range(-10, 10)], # HighlightedText
67
+ [
68
+ ("The testing testing testing", None),
69
+ ("over", 0.6),
70
+ ("the", 0.2),
71
+ ("testing", None),
72
+ ("lazy", -0.1),
73
+ ("dogs", 0.4),
74
+ (".", 0),
75
+ ]
76
+ + [("test", x / 10) for x in range(-10, 10)], # HighlightedText
77
+ json.loads(JSONOBJ), # JSON
78
+ "<button style='background-color: red'>Click Me: "
79
+ + radio
80
+ + "</button>", # HTML
81
+ get_file("titanic.csv"), # File
82
+ df1, # Dataframe
83
+ np.random.randint(0, 10, (4, 4)), # Dataframe
84
+ time, # DateTime
85
+ )
86
+
87
+ demo = gr.Interface(
88
+ fn,
89
+ inputs=[
90
+ gr.Textbox(value="Lorem ipsum", label="Textbox"),
91
+ gr.Textbox(lines=3, placeholder="Type here..", label="Textbox 2"),
92
+ gr.Number(label="Number", value=42),
93
+ gr.Slider(10, 20, value=15, label="Slider: 10 - 20"),
94
+ gr.Slider(maximum=20, step=0.04, label="Slider: step @ 0.04"),
95
+ gr.Checkbox(label="Checkbox"),
96
+ gr.CheckboxGroup(label="CheckboxGroup", choices=CHOICES, value=CHOICES[0:2]),
97
+ gr.Radio(label="Radio", choices=CHOICES, value=CHOICES[2]),
98
+ gr.Dropdown(label="Dropdown", choices=CHOICES),
99
+ gr.Dropdown(
100
+ label="Multiselect Dropdown (Max choice: 2)",
101
+ choices=CHOICES,
102
+ multiselect=True,
103
+ max_choices=2,
104
+ ),
105
+ gr.Image(label="Image"),
106
+ # gr.Image(label="Image w/ Cropper", tool="select"),
107
+ # gr.Image(label="Sketchpad", source="canvas"),
108
+ gr.Image(label="Webcam", sources=["webcam"]),
109
+ gr.Video(label="Video"),
110
+ gr.Audio(label="Audio"),
111
+ gr.Audio(label="Microphone", sources=["microphone"]),
112
+ gr.File(label="File"),
113
+ gr.Dataframe(label="Dataframe", headers=["Name", "Age", "Gender"]),
114
+ gr.DateTime(label="DateTime"),
115
+ ],
116
+ outputs=[
117
+ gr.Textbox(label="Textbox"),
118
+ gr.Label(label="Label"),
119
+ gr.Audio(label="Audio"),
120
+ gr.Image(label="Image", elem_id="output-img"),
121
+ gr.Video(label="Video"),
122
+ gr.HighlightedText(
123
+ label="HighlightedText", color_map={"punc": "pink", "test 0": "blue"}
124
+ ),
125
+ gr.HighlightedText(label="HighlightedText", show_legend=True),
126
+ gr.JSON(label="JSON", show_indices=True),
127
+ gr.HTML(label="HTML"),
128
+ gr.File(label="File"),
129
+ gr.Dataframe(label="Dataframe"),
130
+ gr.Dataframe(label="Numpy"),
131
+ gr.DateTime(label="DateTime"),
132
+ ],
133
+ examples=[
134
+ [
135
+ "the quick brown fox",
136
+ "jumps over the lazy dog",
137
+ 10,
138
+ 12,
139
+ 4,
140
+ True,
141
+ ["foo", "baz"],
142
+ "baz",
143
+ "bar",
144
+ ["foo", "bar"],
145
+ get_image("cheetah1.jpg"),
146
+ # get_image("cheetah1.jpg"),
147
+ # get_image("cheetah1.jpg"),
148
+ get_image("cheetah1.jpg"),
149
+ get_video("world.mp4"),
150
+ get_audio("cantina.wav"),
151
+ get_audio("cantina.wav"),
152
+ get_file("titanic.csv"),
153
+ [[1, 2, 3, 4], [4, 5, 6, 7], [8, 9, 1, 2], [3, 4, 5, 6]],
154
+ "2025-06-10 12:00:00",
155
+ ]
156
+ ]
157
+ * 3,
158
+ title="Kitchen Sink",
159
+ description="Try out all the components!",
160
+ article="Learn more about [Gradio](http://gradio.app)",
161
+ cache_examples=True,
162
+ )
163
+
164
+ if __name__ == "__main__":
165
+ demo.launch()
demos/kitchen_sink_random/__init__.py ADDED
File without changes
demos/kitchen_sink_random/constants.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import matplotlib.pyplot as plt
3
+ import random
4
+ from gradio.media import get_model3d
5
+
6
+ def random_plot():
7
+ start_year = 2020
8
+ x = np.arange(start_year, start_year + random.randint(0, 10))
9
+ year_count = x.shape[0]
10
+ plt_format = "-"
11
+ fig = plt.figure()
12
+ ax = fig.add_subplot(111)
13
+ series = np.arange(0, year_count, dtype=float)
14
+ series = series**2
15
+ series += np.random.rand(year_count)
16
+ ax.plot(x, series, plt_format)
17
+ return fig
18
+
19
+ highlighted_text_output_1 = [
20
+ {
21
+ "entity": "I-LOC",
22
+ "score": 0.9988978,
23
+ "index": 2,
24
+ "word": "Chicago",
25
+ "start": 5,
26
+ "end": 12,
27
+ },
28
+ {
29
+ "entity": "I-MISC",
30
+ "score": 0.9958592,
31
+ "index": 5,
32
+ "word": "Pakistani",
33
+ "start": 22,
34
+ "end": 31,
35
+ },
36
+ ]
37
+ highlighted_text_output_2 = [
38
+ {
39
+ "entity": "I-LOC",
40
+ "score": 0.9988978,
41
+ "index": 2,
42
+ "word": "Chicago",
43
+ "start": 5,
44
+ "end": 12,
45
+ },
46
+ {
47
+ "entity": "I-LOC",
48
+ "score": 0.9958592,
49
+ "index": 5,
50
+ "word": "Pakistan",
51
+ "start": 22,
52
+ "end": 30,
53
+ },
54
+ ]
55
+
56
+ highlighted_text = "Does Chicago have any Pakistani restaurants"
57
+
58
+ def random_model3d():
59
+ return get_model3d()
demos/kitchen_sink_random/requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ matplotlib
2
+ pandas
demos/kitchen_sink_random/run.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from datetime import datetime
3
+ import random
4
+ import string
5
+ import pandas as pd
6
+
7
+ # get_audio(), get_video(), get_image(), get_model3d(), get_file() return file paths to sample media included with Gradio
8
+ from gradio.media import get_audio, get_video, get_image, get_model3d, get_file
9
+
10
+ from constants import ( # type: ignore
11
+ highlighted_text,
12
+ highlighted_text_output_2,
13
+ highlighted_text_output_1,
14
+ random_plot,
15
+ )
16
+
17
+ demo = gr.Interface(
18
+ lambda *args: args[0],
19
+ inputs=[
20
+ gr.Textbox(value=lambda: datetime.now(), label="Current Time"),
21
+ gr.Number(value=lambda: random.random(), label="Ranom Percentage"),
22
+ gr.Slider(minimum=-1, maximum=1, randomize=True, label="Slider with randomize"),
23
+ gr.Slider(
24
+ minimum=0,
25
+ maximum=1,
26
+ value=lambda: random.random(),
27
+ label="Slider with value func",
28
+ ),
29
+ gr.Checkbox(value=lambda: random.random() > 0.5, label="Random Checkbox"),
30
+ gr.CheckboxGroup(
31
+ choices=["a", "b", "c", "d"],
32
+ value=lambda: random.choice(["a", "b", "c", "d"]),
33
+ label="Random CheckboxGroup",
34
+ ),
35
+ gr.Radio(
36
+ choices=list(string.ascii_lowercase),
37
+ value=lambda: random.choice(string.ascii_lowercase),
38
+ ),
39
+ gr.Dropdown(
40
+ choices=["a", "b", "c", "d", "e"],
41
+ value=lambda: random.choice(["a", "b", "c"]),
42
+ ),
43
+ gr.Image(
44
+ value=lambda: get_image()
45
+ ),
46
+ gr.Video(value=lambda: get_video("world.mp4")),
47
+ gr.Audio(value=lambda: get_audio("cantina.wav")),
48
+ gr.File(
49
+ value=lambda: get_file("titanic.csv")
50
+ ),
51
+ gr.Dataframe(
52
+ value=lambda: pd.DataFrame(
53
+ {"random_number_rows": range(random.randint(0, 10))}
54
+ )
55
+ ),
56
+ gr.State(value=lambda: random.choice(string.ascii_lowercase)),
57
+ gr.ColorPicker(value=lambda: random.choice(["#000000", "#ff0000", "#0000FF"])),
58
+ gr.Label(value=lambda: random.choice(["Pedestrian", "Car", "Cyclist"])),
59
+ gr.HighlightedText(
60
+ value=lambda: random.choice(
61
+ [
62
+ {"text": highlighted_text, "entities": highlighted_text_output_1},
63
+ {"text": highlighted_text, "entities": highlighted_text_output_2},
64
+ ]
65
+ ),
66
+ ),
67
+ gr.JSON(value=lambda: random.choice([{"a": 1}, {"b": 2}])),
68
+ gr.HTML(
69
+ value=lambda: random.choice(
70
+ [
71
+ '<p style="color:red;">I am red</p>',
72
+ '<p style="color:blue;">I am blue</p>',
73
+ ]
74
+ )
75
+ ),
76
+ gr.Gallery(
77
+ value=lambda: [get_image() for _ in range(3)]
78
+ ),
79
+ gr.Chatbot(
80
+ value=lambda: random.choice([[("hello", "hi!")], [("bye", "goodbye!")]])
81
+ ),
82
+ gr.Model3D(value=lambda: get_model3d()),
83
+ gr.Plot(value=random_plot),
84
+ gr.Markdown(value=lambda: f"### {random.choice(['Hello', 'Hi', 'Goodbye!'])}"),
85
+ ],
86
+ outputs=[
87
+ gr.State(value=lambda: random.choice(string.ascii_lowercase))
88
+ ],
89
+ )
90
+
91
+ if __name__ == "__main__":
92
+ demo.launch()
demos/matrix_transpose/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: matrix_transpose"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "\n", "import gradio as gr\n", "\n", "def transpose(matrix):\n", " return matrix.T\n", "\n", "demo = gr.Interface(\n", " transpose,\n", " gr.Dataframe(type=\"numpy\", datatype=\"number\", row_count=5, col_count=3, show_fullscreen_button=True),\n", " \"numpy\",\n", " examples=[\n", " [np.zeros((30, 30)).tolist()],\n", " [np.ones((2, 2)).tolist()],\n", " [np.random.randint(0, 10, (3, 10)).tolist()],\n", " [np.random.randint(0, 10, (10, 3)).tolist()],\n", " [np.random.randint(0, 10, (10, 10)).tolist()],\n", " ],\n", " cache_examples=False\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/matrix_transpose/run.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ import gradio as gr
4
+
5
+ def transpose(matrix):
6
+ return matrix.T
7
+
8
+ demo = gr.Interface(
9
+ transpose,
10
+ gr.Dataframe(type="numpy", datatype="number", row_count=5, col_count=3, show_fullscreen_button=True),
11
+ "numpy",
12
+ examples=[
13
+ [np.zeros((30, 30)).tolist()],
14
+ [np.ones((2, 2)).tolist()],
15
+ [np.random.randint(0, 10, (3, 10)).tolist()],
16
+ [np.random.randint(0, 10, (10, 3)).tolist()],
17
+ [np.random.randint(0, 10, (10, 10)).tolist()],
18
+ ],
19
+ cache_examples=False
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
demos/matrix_transpose/screenshot.png ADDED
demos/mini_leaderboard/assets/__init__.py ADDED
File without changes
demos/mini_leaderboard/assets/custom_css.css ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Hides the final AutoEvalColumn */
2
+ #llm-benchmark-tab-table table td:last-child,
3
+ #llm-benchmark-tab-table table th:last-child {
4
+ display: none;
5
+ }
6
+
7
+ /* Limit the width of the first AutoEvalColumn so that names don't expand too much */
8
+ table td:first-child,
9
+ table th:first-child {
10
+ max-width: 400px;
11
+ overflow: auto;
12
+ white-space: nowrap;
13
+ }
14
+
15
+ /* Full width space */
16
+ .gradio-container {
17
+ max-width: 95%!important;
18
+ }
19
+
20
+ /* Text style and margins */
21
+ .markdown-text {
22
+ font-size: 16px !important;
23
+ }
24
+
25
+ #models-to-add-text {
26
+ font-size: 18px !important;
27
+ }
28
+
29
+ #citation-button span {
30
+ font-size: 16px !important;
31
+ }
32
+
33
+ #citation-button textarea {
34
+ font-size: 16px !important;
35
+ }
36
+
37
+ #citation-button > label > button {
38
+ margin: 6px;
39
+ transform: scale(1.3);
40
+ }
41
+
42
+ #search-bar-table-box > div:first-child {
43
+ background: none;
44
+ border: none;
45
+ }
46
+
47
+ #search-bar {
48
+ padding: 0px;
49
+ }
50
+
51
+ .tab-buttons button {
52
+ font-size: 20px;
53
+ }
54
+
55
+ /* Filters style */
56
+ #filter_type{
57
+ border: 0;
58
+ padding-left: 0;
59
+ padding-top: 0;
60
+ }
61
+ #filter_type label {
62
+ display: flex;
63
+ }
64
+ #filter_type label > span{
65
+ margin-top: var(--spacing-lg);
66
+ margin-right: 0.5em;
67
+ }
68
+ #filter_type label > .wrap{
69
+ width: 103px;
70
+ }
71
+ #filter_type label > .wrap .wrap-inner{
72
+ padding: 2px;
73
+ }
74
+ #filter_type label > .wrap .wrap-inner input{
75
+ width: 1px
76
+ }
77
+ #filter-columns-type{
78
+ border:0;
79
+ padding:0.5;
80
+ }
81
+ #filter-columns-size{
82
+ border:0;
83
+ padding:0.5;
84
+ }
85
+ #box-filter > .form{
86
+ border: 0
87
+ }
demos/mini_leaderboard/assets/leaderboard_data.json ADDED
The diff for this file is too large to render. See raw diff
 
demos/mini_leaderboard/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ pandas
demos/mini_leaderboard/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: mini_leaderboard"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio pandas "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('assets')\n", "!wget -q -O assets/__init__.py https://github.com/gradio-app/gradio/raw/main/demo/mini_leaderboard/assets/__init__.py\n", "!wget -q -O assets/custom_css.css https://github.com/gradio-app/gradio/raw/main/demo/mini_leaderboard/assets/custom_css.css\n", "!wget -q -O assets/leaderboard_data.json https://github.com/gradio-app/gradio/raw/main/demo/mini_leaderboard/assets/leaderboard_data.json"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["# type: ignore\n", "import gradio as gr\n", "import pandas as pd\n", "from pathlib import Path\n", "\n", "abs_path = Path(__file__).parent.absolute()\n", "\n", "df = pd.read_json(str(abs_path / \"assets/leaderboard_data.json\"))\n", "invisible_df = df.copy()\n", "\n", "COLS = [\n", " \"T\",\n", " \"Model\",\n", " \"Average \u2b06\ufe0f\",\n", " \"ARC\",\n", " \"HellaSwag\",\n", " \"MMLU\",\n", " \"TruthfulQA\",\n", " \"Winogrande\",\n", " \"GSM8K\",\n", " \"Type\",\n", " \"Architecture\",\n", " \"Precision\",\n", " \"Merged\",\n", " \"Hub License\",\n", " \"#Params (B)\",\n", " \"Hub \u2764\ufe0f\",\n", " \"Model sha\",\n", " \"model_name_for_query\",\n", "]\n", "ON_LOAD_COLS = [\n", " \"T\",\n", " \"Model\",\n", " \"Average \u2b06\ufe0f\",\n", " \"ARC\",\n", " \"HellaSwag\",\n", " \"MMLU\",\n", " \"TruthfulQA\",\n", " \"Winogrande\",\n", " \"GSM8K\",\n", " \"model_name_for_query\",\n", "]\n", "TYPES = [\n", " \"str\",\n", " \"markdown\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"number\",\n", " \"str\",\n", " \"str\",\n", " \"str\",\n", " \"str\",\n", " \"bool\",\n", " \"str\",\n", " \"number\",\n", " \"number\",\n", " \"bool\",\n", " \"str\",\n", " \"bool\",\n", " \"bool\",\n", " \"str\",\n", "]\n", "NUMERIC_INTERVALS = {\n", " \"?\": pd.Interval(-1, 0, closed=\"right\"),\n", " \"~1.5\": pd.Interval(0, 2, closed=\"right\"),\n", " \"~3\": pd.Interval(2, 4, closed=\"right\"),\n", " \"~7\": pd.Interval(4, 9, closed=\"right\"),\n", " \"~13\": pd.Interval(9, 20, closed=\"right\"),\n", " \"~35\": pd.Interval(20, 45, closed=\"right\"),\n", " \"~60\": pd.Interval(45, 70, closed=\"right\"),\n", " \"70+\": pd.Interval(70, 10000, closed=\"right\"),\n", "}\n", "MODEL_TYPE = [str(s) for s in df[\"T\"].unique()]\n", "Precision = [str(s) for s in df[\"Precision\"].unique()]\n", "\n", "# Searching and filtering\n", "def update_table(\n", " hidden_df: pd.DataFrame,\n", " columns: list,\n", " type_query: list,\n", " precision_query: str,\n", " size_query: list,\n", " query: str,\n", "):\n", " filtered_df = filter_models(hidden_df, type_query, size_query, precision_query) # type: ignore\n", " filtered_df = filter_queries(query, filtered_df)\n", " df = select_columns(filtered_df, columns)\n", " return df\n", "\n", "def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:\n", " return df[(df[\"model_name_for_query\"].str.contains(query, case=False))] # type: ignore\n", "\n", "def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:\n", " # We use COLS to maintain sorting\n", " filtered_df = df[[c for c in COLS if c in df.columns and c in columns]]\n", " return filtered_df # type: ignore\n", "\n", "def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:\n", " final_df = []\n", " if query != \"\":\n", " queries = [q.strip() for q in query.split(\";\")]\n", " for _q in queries:\n", " _q = _q.strip()\n", " if _q != \"\":\n", " temp_filtered_df = search_table(filtered_df, _q)\n", " if len(temp_filtered_df) > 0:\n", " final_df.append(temp_filtered_df)\n", " if len(final_df) > 0:\n", " filtered_df = pd.concat(final_df)\n", " filtered_df = filtered_df.drop_duplicates( # type: ignore\n", " subset=[\"Model\", \"Precision\", \"Model sha\"]\n", " )\n", "\n", " return filtered_df\n", "\n", "def filter_models(\n", " df: pd.DataFrame,\n", " type_query: list,\n", " size_query: list,\n", " precision_query: list,\n", ") -> pd.DataFrame:\n", " # Show all models\n", " filtered_df = df\n", "\n", " type_emoji = [t[0] for t in type_query]\n", " filtered_df = filtered_df.loc[df[\"T\"].isin(type_emoji)]\n", " filtered_df = filtered_df.loc[df[\"Precision\"].isin(precision_query + [\"None\"])]\n", "\n", " numeric_interval = pd.IntervalIndex(\n", " sorted([NUMERIC_INTERVALS[s] for s in size_query]) # type: ignore\n", " )\n", " params_column = pd.to_numeric(df[\"#Params (B)\"], errors=\"coerce\")\n", " mask = params_column.apply(lambda x: any(numeric_interval.contains(x))) # type: ignore\n", " filtered_df = filtered_df.loc[mask]\n", "\n", " return filtered_df\n", "\n", "demo = gr.Blocks(css=str(abs_path / \"assets/leaderboard_data.json\"))\n", "with demo:\n", " gr.Markdown(\"\"\"Test Space of the LLM Leaderboard\"\"\", elem_classes=\"markdown-text\")\n", "\n", " with gr.Tabs(elem_classes=\"tab-buttons\") as tabs:\n", " with gr.TabItem(\"\ud83c\udfc5 LLM Benchmark\", elem_id=\"llm-benchmark-tab-table\", id=0):\n", " with gr.Row():\n", " with gr.Column():\n", " with gr.Row():\n", " search_bar = gr.Textbox(\n", " placeholder=\" \ud83d\udd0d Search for your model (separate multiple queries with `;`) and press ENTER...\",\n", " show_label=False,\n", " elem_id=\"search-bar\",\n", " )\n", " with gr.Row():\n", " shown_columns = gr.CheckboxGroup(\n", " choices=COLS,\n", " value=ON_LOAD_COLS,\n", " label=\"Select columns to show\",\n", " elem_id=\"column-select\",\n", " interactive=True,\n", " )\n", " with gr.Column(min_width=320):\n", " filter_columns_type = gr.CheckboxGroup(\n", " label=\"Model types\",\n", " choices=MODEL_TYPE,\n", " value=MODEL_TYPE,\n", " interactive=True,\n", " elem_id=\"filter-columns-type\",\n", " )\n", " filter_columns_precision = gr.CheckboxGroup(\n", " label=\"Precision\",\n", " choices=Precision,\n", " value=Precision,\n", " interactive=True,\n", " elem_id=\"filter-columns-precision\",\n", " )\n", " filter_columns_size = gr.CheckboxGroup(\n", " label=\"Model sizes (in billions of parameters)\",\n", " choices=list(NUMERIC_INTERVALS.keys()),\n", " value=list(NUMERIC_INTERVALS.keys()),\n", " interactive=True,\n", " elem_id=\"filter-columns-size\",\n", " )\n", "\n", " leaderboard_table = gr.components.Dataframe(\n", " value=df[ON_LOAD_COLS], # type: ignore\n", " headers=ON_LOAD_COLS,\n", " datatype=TYPES,\n", " elem_id=\"leaderboard-table\",\n", " interactive=False,\n", " visible=True,\n", " column_widths=[\"2%\", \"33%\"],\n", " )\n", "\n", " # Dummy leaderboard for handling the case when the user uses backspace key\n", " hidden_leaderboard_table_for_search = gr.components.Dataframe(\n", " value=invisible_df[COLS], # type: ignore\n", " headers=COLS,\n", " datatype=TYPES,\n", " visible=False,\n", " )\n", " search_bar.submit(\n", " update_table,\n", " [\n", " hidden_leaderboard_table_for_search,\n", " shown_columns,\n", " filter_columns_type,\n", " filter_columns_precision,\n", " filter_columns_size,\n", " search_bar,\n", " ],\n", " leaderboard_table,\n", " )\n", " for selector in [\n", " shown_columns,\n", " filter_columns_type,\n", " filter_columns_precision,\n", " filter_columns_size,\n", " ]:\n", " selector.change(\n", " update_table,\n", " [\n", " hidden_leaderboard_table_for_search,\n", " shown_columns,\n", " filter_columns_type,\n", " filter_columns_precision,\n", " filter_columns_size,\n", " search_bar,\n", " ],\n", " leaderboard_table,\n", " queue=True,\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.queue(default_concurrency_limit=40).launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/mini_leaderboard/run.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # type: ignore
2
+ import gradio as gr
3
+ import pandas as pd
4
+ from pathlib import Path
5
+
6
+ abs_path = Path(__file__).parent.absolute()
7
+
8
+ df = pd.read_json(str(abs_path / "assets/leaderboard_data.json"))
9
+ invisible_df = df.copy()
10
+
11
+ COLS = [
12
+ "T",
13
+ "Model",
14
+ "Average ⬆️",
15
+ "ARC",
16
+ "HellaSwag",
17
+ "MMLU",
18
+ "TruthfulQA",
19
+ "Winogrande",
20
+ "GSM8K",
21
+ "Type",
22
+ "Architecture",
23
+ "Precision",
24
+ "Merged",
25
+ "Hub License",
26
+ "#Params (B)",
27
+ "Hub ❤️",
28
+ "Model sha",
29
+ "model_name_for_query",
30
+ ]
31
+ ON_LOAD_COLS = [
32
+ "T",
33
+ "Model",
34
+ "Average ⬆️",
35
+ "ARC",
36
+ "HellaSwag",
37
+ "MMLU",
38
+ "TruthfulQA",
39
+ "Winogrande",
40
+ "GSM8K",
41
+ "model_name_for_query",
42
+ ]
43
+ TYPES = [
44
+ "str",
45
+ "markdown",
46
+ "number",
47
+ "number",
48
+ "number",
49
+ "number",
50
+ "number",
51
+ "number",
52
+ "number",
53
+ "str",
54
+ "str",
55
+ "str",
56
+ "str",
57
+ "bool",
58
+ "str",
59
+ "number",
60
+ "number",
61
+ "bool",
62
+ "str",
63
+ "bool",
64
+ "bool",
65
+ "str",
66
+ ]
67
+ NUMERIC_INTERVALS = {
68
+ "?": pd.Interval(-1, 0, closed="right"),
69
+ "~1.5": pd.Interval(0, 2, closed="right"),
70
+ "~3": pd.Interval(2, 4, closed="right"),
71
+ "~7": pd.Interval(4, 9, closed="right"),
72
+ "~13": pd.Interval(9, 20, closed="right"),
73
+ "~35": pd.Interval(20, 45, closed="right"),
74
+ "~60": pd.Interval(45, 70, closed="right"),
75
+ "70+": pd.Interval(70, 10000, closed="right"),
76
+ }
77
+ MODEL_TYPE = [str(s) for s in df["T"].unique()]
78
+ Precision = [str(s) for s in df["Precision"].unique()]
79
+
80
+ # Searching and filtering
81
+ def update_table(
82
+ hidden_df: pd.DataFrame,
83
+ columns: list,
84
+ type_query: list,
85
+ precision_query: str,
86
+ size_query: list,
87
+ query: str,
88
+ ):
89
+ filtered_df = filter_models(hidden_df, type_query, size_query, precision_query) # type: ignore
90
+ filtered_df = filter_queries(query, filtered_df)
91
+ df = select_columns(filtered_df, columns)
92
+ return df
93
+
94
+ def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
95
+ return df[(df["model_name_for_query"].str.contains(query, case=False))] # type: ignore
96
+
97
+ def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
98
+ # We use COLS to maintain sorting
99
+ filtered_df = df[[c for c in COLS if c in df.columns and c in columns]]
100
+ return filtered_df # type: ignore
101
+
102
+ def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
103
+ final_df = []
104
+ if query != "":
105
+ queries = [q.strip() for q in query.split(";")]
106
+ for _q in queries:
107
+ _q = _q.strip()
108
+ if _q != "":
109
+ temp_filtered_df = search_table(filtered_df, _q)
110
+ if len(temp_filtered_df) > 0:
111
+ final_df.append(temp_filtered_df)
112
+ if len(final_df) > 0:
113
+ filtered_df = pd.concat(final_df)
114
+ filtered_df = filtered_df.drop_duplicates( # type: ignore
115
+ subset=["Model", "Precision", "Model sha"]
116
+ )
117
+
118
+ return filtered_df
119
+
120
+ def filter_models(
121
+ df: pd.DataFrame,
122
+ type_query: list,
123
+ size_query: list,
124
+ precision_query: list,
125
+ ) -> pd.DataFrame:
126
+ # Show all models
127
+ filtered_df = df
128
+
129
+ type_emoji = [t[0] for t in type_query]
130
+ filtered_df = filtered_df.loc[df["T"].isin(type_emoji)]
131
+ filtered_df = filtered_df.loc[df["Precision"].isin(precision_query + ["None"])]
132
+
133
+ numeric_interval = pd.IntervalIndex(
134
+ sorted([NUMERIC_INTERVALS[s] for s in size_query]) # type: ignore
135
+ )
136
+ params_column = pd.to_numeric(df["#Params (B)"], errors="coerce")
137
+ mask = params_column.apply(lambda x: any(numeric_interval.contains(x))) # type: ignore
138
+ filtered_df = filtered_df.loc[mask]
139
+
140
+ return filtered_df
141
+
142
+ demo = gr.Blocks(css=str(abs_path / "assets/leaderboard_data.json"))
143
+ with demo:
144
+ gr.Markdown("""Test Space of the LLM Leaderboard""", elem_classes="markdown-text")
145
+
146
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
147
+ with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
148
+ with gr.Row():
149
+ with gr.Column():
150
+ with gr.Row():
151
+ search_bar = gr.Textbox(
152
+ placeholder=" 🔍 Search for your model (separate multiple queries with `;`) and press ENTER...",
153
+ show_label=False,
154
+ elem_id="search-bar",
155
+ )
156
+ with gr.Row():
157
+ shown_columns = gr.CheckboxGroup(
158
+ choices=COLS,
159
+ value=ON_LOAD_COLS,
160
+ label="Select columns to show",
161
+ elem_id="column-select",
162
+ interactive=True,
163
+ )
164
+ with gr.Column(min_width=320):
165
+ filter_columns_type = gr.CheckboxGroup(
166
+ label="Model types",
167
+ choices=MODEL_TYPE,
168
+ value=MODEL_TYPE,
169
+ interactive=True,
170
+ elem_id="filter-columns-type",
171
+ )
172
+ filter_columns_precision = gr.CheckboxGroup(
173
+ label="Precision",
174
+ choices=Precision,
175
+ value=Precision,
176
+ interactive=True,
177
+ elem_id="filter-columns-precision",
178
+ )
179
+ filter_columns_size = gr.CheckboxGroup(
180
+ label="Model sizes (in billions of parameters)",
181
+ choices=list(NUMERIC_INTERVALS.keys()),
182
+ value=list(NUMERIC_INTERVALS.keys()),
183
+ interactive=True,
184
+ elem_id="filter-columns-size",
185
+ )
186
+
187
+ leaderboard_table = gr.components.Dataframe(
188
+ value=df[ON_LOAD_COLS], # type: ignore
189
+ headers=ON_LOAD_COLS,
190
+ datatype=TYPES,
191
+ elem_id="leaderboard-table",
192
+ interactive=False,
193
+ visible=True,
194
+ column_widths=["2%", "33%"],
195
+ )
196
+
197
+ # Dummy leaderboard for handling the case when the user uses backspace key
198
+ hidden_leaderboard_table_for_search = gr.components.Dataframe(
199
+ value=invisible_df[COLS], # type: ignore
200
+ headers=COLS,
201
+ datatype=TYPES,
202
+ visible=False,
203
+ )
204
+ search_bar.submit(
205
+ update_table,
206
+ [
207
+ hidden_leaderboard_table_for_search,
208
+ shown_columns,
209
+ filter_columns_type,
210
+ filter_columns_precision,
211
+ filter_columns_size,
212
+ search_bar,
213
+ ],
214
+ leaderboard_table,
215
+ )
216
+ for selector in [
217
+ shown_columns,
218
+ filter_columns_type,
219
+ filter_columns_precision,
220
+ filter_columns_size,
221
+ ]:
222
+ selector.change(
223
+ update_table,
224
+ [
225
+ hidden_leaderboard_table_for_search,
226
+ shown_columns,
227
+ filter_columns_type,
228
+ filter_columns_precision,
229
+ filter_columns_size,
230
+ search_bar,
231
+ ],
232
+ leaderboard_table,
233
+ queue=True,
234
+ )
235
+
236
+ if __name__ == "__main__":
237
+ demo.queue(default_concurrency_limit=40).launch()
demos/model3D/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: model3D"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "# get_model3d() returns the file path to sample 3D models included with Gradio\n", "from gradio.media import get_model3d, MEDIA_ROOT\n", "\n", "\n", "def load_mesh(mesh_file_name):\n", " return mesh_file_name\n", "\n", "\n", "demo = gr.Interface(\n", " fn=load_mesh,\n", " inputs=gr.Model3D(label=\"Other name\", display_mode=\"wireframe\"),\n", " outputs=gr.Model3D(\n", " clear_color=(0.0, 0.0, 0.0, 0.0), label=\"3D Model\", display_mode=\"wireframe\"\n", " ),\n", " examples=[\n", " [get_model3d(\"Bunny.obj\")],\n", " [get_model3d(\"Duck.glb\")],\n", " [get_model3d(\"Fox.gltf\")],\n", " [get_model3d(\"face.obj\")],\n", " [get_model3d(\"sofia.stl\")],\n", " [\n", " \"https://huggingface.co/datasets/dylanebert/3dgs/resolve/main/bonsai/bonsai-7k-mini.splat\"\n", " ],\n", " [\n", " \"https://huggingface.co/datasets/dylanebert/3dgs/resolve/main/luigi/luigi.ply\"\n", " ],\n", " ],\n", " cache_examples=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch(allowed_paths=[str(MEDIA_ROOT)])\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/model3D/run.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ # get_model3d() returns the file path to sample 3D models included with Gradio
3
+ from gradio.media import get_model3d, MEDIA_ROOT
4
+
5
+
6
+ def load_mesh(mesh_file_name):
7
+ return mesh_file_name
8
+
9
+
10
+ demo = gr.Interface(
11
+ fn=load_mesh,
12
+ inputs=gr.Model3D(label="Other name", display_mode="wireframe"),
13
+ outputs=gr.Model3D(
14
+ clear_color=(0.0, 0.0, 0.0, 0.0), label="3D Model", display_mode="wireframe"
15
+ ),
16
+ examples=[
17
+ [get_model3d("Bunny.obj")],
18
+ [get_model3d("Duck.glb")],
19
+ [get_model3d("Fox.gltf")],
20
+ [get_model3d("face.obj")],
21
+ [get_model3d("sofia.stl")],
22
+ [
23
+ "https://huggingface.co/datasets/dylanebert/3dgs/resolve/main/bonsai/bonsai-7k-mini.splat"
24
+ ],
25
+ [
26
+ "https://huggingface.co/datasets/dylanebert/3dgs/resolve/main/luigi/luigi.ply"
27
+ ],
28
+ ],
29
+ cache_examples=True,
30
+ )
31
+
32
+ if __name__ == "__main__":
33
+ demo.launch(allowed_paths=[str(MEDIA_ROOT)])
demos/native_plots/bar_plot_demo.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from data import temp_sensor_data, food_rating_data # type: ignore
3
+
4
+ with gr.Blocks() as bar_plots:
5
+ with gr.Row():
6
+ start = gr.DateTime("2021-01-01 00:00:00", label="Start")
7
+ end = gr.DateTime("2021-01-05 00:00:00", label="End")
8
+ apply_btn = gr.Button("Apply", scale=0)
9
+ with gr.Row():
10
+ group_by = gr.Radio(["None", "30m", "1h", "4h", "1d"], value="None", label="Group by")
11
+ aggregate = gr.Radio(["sum", "mean", "median", "min", "max"], value="sum", label="Aggregation")
12
+
13
+ with gr.Draggable():
14
+ temp_by_time = gr.BarPlot(
15
+ temp_sensor_data,
16
+ x="time",
17
+ y="temperature",
18
+ show_export_button=True,
19
+ )
20
+ temp_by_time_location = gr.BarPlot(
21
+ temp_sensor_data,
22
+ x="time",
23
+ y="temperature",
24
+ color="location",
25
+ show_export_button=True,
26
+ )
27
+
28
+ time_graphs = [temp_by_time, temp_by_time_location]
29
+ group_by.change(
30
+ lambda group: [gr.BarPlot(x_bin=None if group == "None" else group)] * len(time_graphs),
31
+ group_by,
32
+ time_graphs
33
+ )
34
+ aggregate.change(
35
+ lambda aggregate: [gr.BarPlot(y_aggregate=aggregate)] * len(time_graphs),
36
+ aggregate,
37
+ time_graphs
38
+ )
39
+
40
+ def rescale(select: gr.SelectData):
41
+ return select.index
42
+ rescale_evt = gr.on([plot.select for plot in time_graphs], rescale, None, [start, end])
43
+
44
+ for trigger in [apply_btn.click, rescale_evt.then]:
45
+ trigger(
46
+ lambda start, end: [gr.BarPlot(x_lim=[start, end])] * len(time_graphs), [start, end], time_graphs
47
+ )
48
+
49
+ with gr.Row():
50
+ price_by_cuisine = gr.BarPlot(
51
+ food_rating_data,
52
+ x="cuisine",
53
+ y="price",
54
+ show_export_button=True,
55
+ )
56
+ with gr.Column(scale=0):
57
+ gr.Button("Sort $ > $$$").click(lambda: gr.BarPlot(sort="y"), None, price_by_cuisine)
58
+ gr.Button("Sort $$$ > $").click(lambda: gr.BarPlot(sort="-y"), None, price_by_cuisine)
59
+ gr.Button("Sort A > Z").click(lambda: gr.BarPlot(sort=["Chinese", "Italian", "Mexican"]), None, price_by_cuisine)
60
+
61
+ with gr.Row():
62
+ price_by_rating = gr.BarPlot(
63
+ food_rating_data,
64
+ x="rating",
65
+ y="price",
66
+ x_bin=1,
67
+ show_export_button=True,
68
+ )
69
+ price_by_rating_color = gr.BarPlot(
70
+ food_rating_data,
71
+ x="rating",
72
+ y="price",
73
+ color="cuisine",
74
+ x_bin=1,
75
+ color_map={"Italian": "red", "Mexican": "green", "Chinese": "blue"},
76
+ show_export_button=True,
77
+ )
78
+
79
+ if __name__ == "__main__":
80
+ bar_plots.launch()
demos/native_plots/data.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from random import randint, random
3
+
4
+ temp_sensor_data = pd.DataFrame(
5
+ {
6
+ "time": pd.date_range("2021-01-01", end="2021-01-05", periods=200),
7
+ "temperature": [randint(50 + 10 * (i % 2), 65 + 15 * (i % 2)) for i in range(200)],
8
+ "humidity": [randint(50 + 10 * (i % 2), 65 + 15 * (i % 2)) for i in range(200)],
9
+ "location": ["indoor", "outdoor"] * 100,
10
+ }
11
+ )
12
+
13
+ food_rating_data = pd.DataFrame(
14
+ {
15
+ "cuisine": [["Italian", "Mexican", "Chinese"][i % 3] for i in range(100)],
16
+ "rating": [random() * 4 + 0.5 * (i % 3) for i in range(100)],
17
+ "price": [randint(10, 50) + 4 * (i % 3) for i in range(100)],
18
+ "wait": [random() for i in range(100)],
19
+ }
20
+ )
demos/native_plots/line_plot_demo.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from data import temp_sensor_data, food_rating_data # type: ignore
3
+
4
+ with gr.Blocks() as line_plots:
5
+ with gr.Row():
6
+ start = gr.DateTime("2021-01-01 00:00:00", label="Start")
7
+ end = gr.DateTime("2021-01-05 00:00:00", label="End")
8
+ apply_btn = gr.Button("Apply", scale=0)
9
+ with gr.Row():
10
+ group_by = gr.Radio(["None", "30m", "1h", "4h", "1d"], value="None", label="Group by")
11
+ aggregate = gr.Radio(["sum", "mean", "median", "min", "max"], value="sum", label="Aggregation")
12
+
13
+ temp_by_time = gr.LinePlot(
14
+ temp_sensor_data,
15
+ x="time",
16
+ y="temperature",
17
+ show_export_button=True,
18
+ )
19
+ temp_by_time_location = gr.LinePlot(
20
+ temp_sensor_data,
21
+ x="time",
22
+ y="temperature",
23
+ color="location",
24
+ show_export_button=True,
25
+ )
26
+
27
+ time_graphs = [temp_by_time, temp_by_time_location]
28
+ group_by.change(
29
+ lambda group: [gr.LinePlot(x_bin=None if group == "None" else group)] * len(time_graphs),
30
+ group_by,
31
+ time_graphs
32
+ )
33
+ aggregate.change(
34
+ lambda aggregate: [gr.LinePlot(y_aggregate=aggregate)] * len(time_graphs),
35
+ aggregate,
36
+ time_graphs
37
+ )
38
+
39
+ def rescale(select: gr.SelectData):
40
+ return select.index
41
+ rescale_evt = gr.on([plot.select for plot in time_graphs], rescale, None, [start, end])
42
+
43
+ for trigger in [apply_btn.click, rescale_evt.then]:
44
+ trigger(
45
+ lambda start, end: [gr.LinePlot(x_lim=[start, end])] * len(time_graphs), [start, end], time_graphs
46
+ )
47
+
48
+ price_by_cuisine = gr.LinePlot(
49
+ food_rating_data,
50
+ x="cuisine",
51
+ y="price",
52
+ show_export_button=True,
53
+ )
54
+ with gr.Row():
55
+ price_by_rating = gr.LinePlot(
56
+ food_rating_data,
57
+ x="rating",
58
+ y="price",
59
+ show_export_button=True,
60
+ )
61
+ price_by_rating_color = gr.LinePlot(
62
+ food_rating_data,
63
+ x="rating",
64
+ y="price",
65
+ color="cuisine",
66
+ color_map={"Italian": "red", "Mexican": "green", "Chinese": "blue"},
67
+ show_export_button=True,
68
+ )
69
+
70
+ if __name__ == "__main__":
71
+ line_plots.launch()
demos/native_plots/requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ vega_datasets
2
+ pandas
demos/native_plots/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: native_plots"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio vega_datasets pandas "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/bar_plot_demo.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/data.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/line_plot_demo.py\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/native_plots/scatter_plot_demo.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "from scatter_plot_demo import scatter_plots # type: ignore\n", "from line_plot_demo import line_plots # type: ignore\n", "from bar_plot_demo import bar_plots # type: ignore\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Tabs():\n", " with gr.TabItem(\"Line Plot\"):\n", " line_plots.render()\n", " with gr.TabItem(\"Scatter Plot\"):\n", " scatter_plots.render()\n", " with gr.TabItem(\"Bar Plot\"):\n", " bar_plots.render()\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/native_plots/run.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from scatter_plot_demo import scatter_plots # type: ignore
4
+ from line_plot_demo import line_plots # type: ignore
5
+ from bar_plot_demo import bar_plots # type: ignore
6
+
7
+ with gr.Blocks() as demo:
8
+ with gr.Tabs():
9
+ with gr.TabItem("Line Plot"):
10
+ line_plots.render()
11
+ with gr.TabItem("Scatter Plot"):
12
+ scatter_plots.render()
13
+ with gr.TabItem("Bar Plot"):
14
+ bar_plots.render()
15
+
16
+ if __name__ == "__main__":
17
+ demo.launch()
demos/native_plots/scatter_plot_demo.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from data import temp_sensor_data, food_rating_data # type: ignore
3
+
4
+ with gr.Blocks() as scatter_plots:
5
+ with gr.Row():
6
+ start = gr.DateTime("2021-01-01 00:00:00", label="Start")
7
+ end = gr.DateTime("2021-01-05 00:00:00", label="End")
8
+ apply_btn = gr.Button("Apply", scale=0)
9
+ with gr.Row():
10
+ group_by = gr.Radio(["None", "30m", "1h", "4h", "1d"], value="None", label="Group by")
11
+ aggregate = gr.Radio(["sum", "mean", "median", "min", "max"], value="sum", label="Aggregation")
12
+
13
+ temp_by_time = gr.ScatterPlot(
14
+ temp_sensor_data,
15
+ x="time",
16
+ y="temperature",
17
+ show_export_button=True,
18
+ )
19
+ temp_by_time_location = gr.ScatterPlot(
20
+ temp_sensor_data,
21
+ x="time",
22
+ y="temperature",
23
+ color="location",
24
+ show_export_button=True,
25
+ )
26
+
27
+ time_graphs = [temp_by_time, temp_by_time_location]
28
+ group_by.change(
29
+ lambda group: [gr.ScatterPlot(x_bin=None if group == "None" else group)] * len(time_graphs),
30
+ group_by,
31
+ time_graphs
32
+ )
33
+ aggregate.change(
34
+ lambda aggregate: [gr.ScatterPlot(y_aggregate=aggregate)] * len(time_graphs),
35
+ aggregate,
36
+ time_graphs
37
+ )
38
+
39
+ # def rescale(select: gr.SelectData):
40
+ # return select.index
41
+ # rescale_evt = gr.on([plot.select for plot in time_graphs], rescale, None, [start, end])
42
+
43
+ # for trigger in [apply_btn.click, rescale_evt.then]:
44
+ # trigger(
45
+ # lambda start, end: [gr.ScatterPlot(x_lim=[start, end])] * len(time_graphs), [start, end], time_graphs
46
+ # )
47
+
48
+ price_by_cuisine = gr.ScatterPlot(
49
+ food_rating_data,
50
+ x="cuisine",
51
+ y="price",
52
+ show_export_button=True,
53
+ )
54
+ with gr.Row():
55
+ price_by_rating = gr.ScatterPlot(
56
+ food_rating_data,
57
+ x="rating",
58
+ y="price",
59
+ color="wait",
60
+ show_actions_button=True,
61
+ show_export_button=True,
62
+ )
63
+ price_by_rating_color = gr.ScatterPlot(
64
+ food_rating_data,
65
+ x="rating",
66
+ y="price",
67
+ color="cuisine",
68
+ show_export_button=True,
69
+ )
70
+
71
+ if __name__ == "__main__":
72
+ scatter_plots.launch()
demos/reverse_audio/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ numpy
demos/reverse_audio/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: reverse_audio"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["\n", "import numpy as np\n", "\n", "import gradio as gr\n", "\n", "def reverse_audio(audio):\n", " sr, data = audio\n", " return (sr, np.flipud(data))\n", "\n", "input_audio = gr.Audio(\n", " sources=[\"microphone\"],\n", " waveform_options=gr.WaveformOptions(\n", " waveform_color=\"#01C6FF\",\n", " waveform_progress_color=\"#0066B4\",\n", " skip_length=2,\n", " show_controls=False,\n", " ),\n", ")\n", "demo = gr.Interface(\n", " fn=reverse_audio,\n", " inputs=input_audio,\n", " outputs=\"audio\"\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/reverse_audio/run.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ import gradio as gr
5
+
6
+ def reverse_audio(audio):
7
+ sr, data = audio
8
+ return (sr, np.flipud(data))
9
+
10
+ input_audio = gr.Audio(
11
+ sources=["microphone"],
12
+ waveform_options=gr.WaveformOptions(
13
+ waveform_color="#01C6FF",
14
+ waveform_progress_color="#0066B4",
15
+ skip_length=2,
16
+ show_controls=False,
17
+ ),
18
+ )
19
+ demo = gr.Interface(
20
+ fn=reverse_audio,
21
+ inputs=input_audio,
22
+ outputs="audio"
23
+ )
24
+
25
+ if __name__ == "__main__":
26
+ demo.launch()
demos/reverse_audio/screenshot.png ADDED
demos/stream_audio/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ numpy
demos/stream_audio/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_audio"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import time\n", "\n", "def add_to_stream(audio, instream):\n", " time.sleep(1)\n", " if audio is None:\n", " return gr.Audio(), instream\n", " if instream is None:\n", " ret = audio\n", " else:\n", " ret = (audio[0], np.concatenate((instream[1], audio[1])))\n", " return ret, ret\n", "\n", "with gr.Blocks() as demo:\n", " inp = gr.Audio(sources=[\"microphone\"])\n", " out = gr.Audio()\n", " stream = gr.State()\n", " clear = gr.Button(\"Clear\")\n", "\n", " inp.stream(add_to_stream, [inp, stream], [out, stream])\n", " clear.click(lambda: [None, None, None], None, [inp, out, stream])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/stream_audio/run.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import time
4
+
5
+ def add_to_stream(audio, instream):
6
+ time.sleep(1)
7
+ if audio is None:
8
+ return gr.Audio(), instream
9
+ if instream is None:
10
+ ret = audio
11
+ else:
12
+ ret = (audio[0], np.concatenate((instream[1], audio[1])))
13
+ return ret, ret
14
+
15
+ with gr.Blocks() as demo:
16
+ inp = gr.Audio(sources=["microphone"])
17
+ out = gr.Audio()
18
+ stream = gr.State()
19
+ clear = gr.Button("Clear")
20
+
21
+ inp.stream(add_to_stream, [inp, stream], [out, stream])
22
+ clear.click(lambda: [None, None, None], None, [inp, out, stream])
23
+
24
+ if __name__ == "__main__":
25
+ demo.launch()
demos/stream_frames/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ numpy
demos/stream_frames/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_frames"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "def flip(im):\n", " return np.flipud(im)\n", "\n", "demo = gr.Interface(\n", " flip,\n", " gr.Image(sources=[\"webcam\"], streaming=True),\n", " \"image\",\n", " live=True\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/stream_frames/run.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+
4
+ def flip(im):
5
+ return np.flipud(im)
6
+
7
+ demo = gr.Interface(
8
+ flip,
9
+ gr.Image(sources=["webcam"], streaming=True),
10
+ "image",
11
+ live=True
12
+ )
13
+ if __name__ == "__main__":
14
+ demo.launch()
demos/stt_or_tts/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stt_or_tts"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "tts_examples = [\n", " \"I love learning machine learning\",\n", " \"How do you do?\",\n", "]\n", "\n", "tts_demo = gr.load(\n", " \"huggingface/facebook/fastspeech2-en-ljspeech\",\n", " title=None,\n", " examples=tts_examples,\n", " description=\"Give me something to say!\",\n", " cache_examples=False\n", ")\n", "\n", "stt_demo = gr.load(\n", " \"huggingface/facebook/wav2vec2-base-960h\",\n", " title=None,\n", " inputs=gr.Microphone(type=\"filepath\"),\n", " description=\"Let me try to guess what you're saying!\",\n", " cache_examples=False\n", ")\n", "\n", "demo = gr.TabbedInterface([tts_demo, stt_demo], [\"Text-to-speech\", \"Speech-to-text\"])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/stt_or_tts/run.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ tts_examples = [
4
+ "I love learning machine learning",
5
+ "How do you do?",
6
+ ]
7
+
8
+ tts_demo = gr.load(
9
+ "huggingface/facebook/fastspeech2-en-ljspeech",
10
+ title=None,
11
+ examples=tts_examples,
12
+ description="Give me something to say!",
13
+ cache_examples=False
14
+ )
15
+
16
+ stt_demo = gr.load(
17
+ "huggingface/facebook/wav2vec2-base-960h",
18
+ title=None,
19
+ inputs=gr.Microphone(type="filepath"),
20
+ description="Let me try to guess what you're saying!",
21
+ cache_examples=False
22
+ )
23
+
24
+ demo = gr.TabbedInterface([tts_demo, stt_demo], ["Text-to-speech", "Speech-to-text"])
25
+
26
+ if __name__ == "__main__":
27
+ demo.launch()
demos/video_component/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: video_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "# get_video() returns the file path to sample videos included with Gradio\n", "from gradio.media import get_video\n", "\n", "demo = gr.Interface(\n", " fn=lambda x: x,\n", " inputs=gr.Video(),\n", " outputs=gr.Video(),\n", " examples=[\n", " [get_video(\"world.mp4\")],\n", " [get_video(\"a.mp4\")],\n", " [get_video(\"b.mp4\")],\n", " ],\n", " cache_examples=True\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/video_component/run.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ # get_video() returns the file path to sample videos included with Gradio
3
+ from gradio.media import get_video
4
+
5
+ demo = gr.Interface(
6
+ fn=lambda x: x,
7
+ inputs=gr.Video(),
8
+ outputs=gr.Video(),
9
+ examples=[
10
+ [get_video("world.mp4")],
11
+ [get_video("a.mp4")],
12
+ [get_video("b.mp4")],
13
+ ],
14
+ cache_examples=True
15
+ )
16
+
17
+ if __name__ == "__main__":
18
+ demo.launch()
demos/zip_files/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: zip_files"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from zipfile import ZipFile\n", "\n", "import gradio as gr\n", "\n", "def zip_files(files):\n", " with ZipFile(\"tmp.zip\", \"w\") as zip_obj:\n", " for file in files:\n", " zip_obj.write(file.name, file.name.split(\"/\")[-1])\n", " return \"tmp.zip\"\n", "\n", "demo = gr.Interface(\n", " zip_files,\n", " gr.File(file_count=\"multiple\", file_types=[\"text\", \".json\", \".csv\"]),\n", " \"file\",\n", " examples=[[[gr.get_file(\"titanic.csv\"),\n", " gr.get_file(\"titanic.csv\"),\n", " gr.get_file(\"titanic.csv\")]]],\n", " cache_examples=True\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/zip_files/run.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from zipfile import ZipFile
2
+
3
+ import gradio as gr
4
+
5
+ def zip_files(files):
6
+ with ZipFile("tmp.zip", "w") as zip_obj:
7
+ for file in files:
8
+ zip_obj.write(file.name, file.name.split("/")[-1])
9
+ return "tmp.zip"
10
+
11
+ demo = gr.Interface(
12
+ zip_files,
13
+ gr.File(file_count="multiple", file_types=["text", ".json", ".csv"]),
14
+ "file",
15
+ examples=[[[gr.get_file("titanic.csv"),
16
+ gr.get_file("titanic.csv"),
17
+ gr.get_file("titanic.csv")]]],
18
+ cache_examples=True
19
+ )
20
+
21
+ if __name__ == "__main__":
22
+ demo.launch()
demos/zip_files/screenshot.png ADDED
image.png ADDED
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gradio-client @ git+https://github.com/gradio-app/gradio@4d07126427e395fcc058b1a3a30113aac3ca036e#subdirectory=client/python
2
+ https://gradio-pypi-previews.s3.amazonaws.com/4d07126427e395fcc058b1a3a30113aac3ca036e/gradio-5.49.0-py3-none-any.whl
3
+ pypistats==1.1.0
4
+ plotly
5
+ matplotlib
6
+ altair
7
+ vega_datasets
run.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import gradio as gr
3
+ import os
4
+ import sys
5
+ import copy
6
+ import pathlib
7
+ from gradio.media import MEDIA_ROOT
8
+
9
+ os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
10
+
11
+ demo_dir = pathlib.Path(__file__).parent / "demos"
12
+
13
+ names = sorted(os.listdir("./demos"))
14
+
15
+ all_demos = []
16
+ demo_module = None
17
+ for p in sorted(os.listdir("./demos")):
18
+ old_path = copy.deepcopy(sys.path)
19
+ sys.path = [os.path.join(demo_dir, p)] + sys.path
20
+ try: # Some demos may not be runnable because of 429 timeouts, etc.
21
+ if demo_module is None:
22
+ demo_module = importlib.import_module("run")
23
+ else:
24
+ demo_module = importlib.reload(demo_module)
25
+ all_demos.append((p, demo_module.demo, False)) # type: ignore
26
+ except Exception as e:
27
+ with gr.Blocks() as demo:
28
+ gr.Markdown(f"Error loading demo: {e}")
29
+ all_demos.append((p, demo, True))
30
+
31
+ app = gr.Blocks()
32
+
33
+ with app:
34
+ gr.Markdown("""
35
+ # Deployed Demos
36
+ ## Click through demos to test them out!
37
+ """)
38
+
39
+ for demo_name, demo, _ in all_demos:
40
+ with app.route(demo_name):
41
+ demo.render()
42
+
43
+ # app = gr.mount_gradio_app(app, demo, f"/demo/{demo_name}")
44
+
45
+ if __name__ == "__main__":
46
+ app.launch(allowed_paths=[str(MEDIA_ROOT)])