Spaces:
Running
on
Zero
Running
on
Zero
File size: 6,874 Bytes
ecf8904 b21a094 ecf8904 424c171 7dd75d0 424c171 ecf8904 b21a094 2a7707c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 |
"""
Demonstrates integrating Rerun visualization with Gradio.
Provides example implementations of data streaming, keypoint annotation, and dynamic
visualization across multiple Gradio tabs using Rerun's recording and visualization capabilities.
"""
import math
import os
import tempfile
import time
import uuid
import cv2
import gradio as gr
import rerun as rr
import rerun.blueprint as rrb
from gradio_rerun import Rerun
from gradio_rerun.events import (
SelectionChange,
TimelineChange,
TimeUpdate,
)
# Whenever we need a recording, we construct a new recording stream.
# As long as the app and recording IDs remain the same, the data
# will be merged by the Viewer.
def get_recording(recording_id: str) -> rr.RecordingStream:
return rr.RecordingStream(application_id="rerun_example_gradio", recording_id=recording_id)
# A task can directly log to a binary stream, which is routed to the embedded viewer.
# Incremental chunks are yielded to the viewer using `yield stream.read()`.
#
# This is the preferred way to work with Rerun in Gradio since your data can be immediately and
# incrementally seen by the viewer. Also, there are no ephemeral RRDs to cleanup or manage.
def streaming_repeated_blur(recording_id: str, img):
# Here we get a recording using the provided recording id.
rec = get_recording(recording_id)
stream = rec.binary_stream()
if img is None:
raise gr.Error("Must provide an image to blur.")
blueprint = rrb.Blueprint(
rrb.Horizontal(
rrb.Spatial2DView(origin="image/original"),
rrb.Spatial2DView(origin="image/blurred"),
),
collapse_panels=True,
)
rec.send_blueprint(blueprint)
rec.set_time("iteration", sequence=0)
rec.log("image/original", rr.Image(img))
yield stream.read()
blur = img
for i in range(100):
rec.set_time("iteration", sequence=i)
# Pretend blurring takes a while so we can see streaming in action.
time.sleep(0.1)
blur = cv2.GaussianBlur(blur, (5, 5), 0)
rec.log("image/blurred", rr.Image(blur))
# Each time we yield bytes from the stream back to Gradio, they
# are incrementally sent to the viewer. Make sure to yield any time
# you want the user to be able to see progress.
yield stream.read()
# Ensure we consume everything from the recording.
stream.flush()
yield stream.read()
# In this example the user is able to add keypoints to an image visualized in Rerun.
# These keypoints are stored in the global state, we use the session id to keep track of which keypoints belong
# to a specific session (https://www.gradio.app/guides/state-in-blocks).
#
# The current session can be obtained by adding a parameter of type `gradio.Request` to your event listener functions.
Keypoint = tuple[float, float]
keypoints_per_session_per_sequence_index: dict[str, dict[int, list[Keypoint]]] = {}
def get_keypoints_for_user_at_sequence_index(request: gr.Request, sequence: int) -> list[Keypoint]:
per_sequence = keypoints_per_session_per_sequence_index[request.session_hash]
if sequence not in per_sequence:
per_sequence[sequence] = []
return per_sequence[sequence]
def initialize_instance(request: gr.Request) -> None:
keypoints_per_session_per_sequence_index[request.session_hash] = {}
def cleanup_instance(request: gr.Request) -> None:
if request.session_hash in keypoints_per_session_per_sequence_index:
del keypoints_per_session_per_sequence_index[request.session_hash]
# In this function, the `request` and `evt` parameters will be automatically injected by Gradio when this
# event listener is fired.
#
# `SelectionChange` is a subclass of `EventData`: https://www.gradio.app/docs/gradio/eventdata
# `gr.Request`: https://www.gradio.app/main/docs/gradio/request
def register_keypoint(
active_recording_id: str,
current_timeline: str,
current_time: float,
request: gr.Request,
evt: SelectionChange,
):
if active_recording_id == "":
return
if current_timeline != "iteration":
return
# We can only log a keypoint if the user selected only a single item.
if len(evt.items) != 1:
return
item = evt.items[0]
# If the selected item isn't an entity, or we don't have its position, then bail out.
if item.kind != "entity" or item.position is None:
return
# Now we can produce a valid keypoint.
rec = get_recording(active_recording_id)
stream = rec.binary_stream()
# We round `current_time` toward 0, because that gives us the sequence index
# that the user is currently looking at, due to the Viewer's latest-at semantics.
index = math.floor(current_time)
# We keep track of the keypoints per sequence index for each user manually.
keypoints = get_keypoints_for_user_at_sequence_index(request, index)
keypoints.append(item.position[0:2])
rec.set_time("iteration", sequence=index)
rec.log(f"{item.entity_path}/keypoint", rr.Points2D(keypoints, radii=2))
# Ensure we consume everything from the recording.
stream.flush()
yield stream.read()
def track_current_time(evt: TimeUpdate):
return evt.time
def track_current_timeline_and_time(evt: TimelineChange):
return evt.timeline, evt.time
@spaces.GPU
def run_inference(img):
print("running inference")
pass
with gr.Blocks() as demo:
with gr.Row():
img = gr.Image(interactive=True, label="Image")
with gr.Column():
stream_blur = gr.Button("Stream Repeated Blur")
with gr.Row():
viewer = Rerun(
streaming=True,
panel_states={
"time": "collapsed",
"blueprint": "hidden",
"selection": "hidden",
},
)
# We make a new recording id, and store it in a Gradio's session state.
recording_id = gr.State(uuid.uuid4())
# Also store the current timeline and time of the viewer in the session state.
current_timeline = gr.State("")
current_time = gr.State(0.0)
# When registering the event listeners, we pass the `recording_id` in as input in order to create
# a recording stream using that id.
stream_blur.click(
# Using the `viewer` as an output allows us to stream data to it by yielding bytes from the callback.
streaming_repeated_blur,
inputs=[recording_id, img],
outputs=[viewer],
)
viewer.selection_change(
register_keypoint,
inputs=[recording_id, current_timeline, current_time],
outputs=[viewer],
)
viewer.time_update(track_current_time, outputs=[current_time])
viewer.timeline_change(track_current_timeline_and_time, outputs=[current_timeline, current_time])
if __name__ == "__main__":
demo.launch(ssr=False, share=True)
|