elismasilva commited on
Commit
df2fd36
·
verified ·
1 Parent(s): 3907f66

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -10,23 +10,33 @@ app_file: space.py
10
  ---
11
 
12
  # `gradio_livelog`
13
- <img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0.1%20-%20blue"> <a href="https://huggingface.co/spaces/elismasilva/gradio_livelog"><img src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Demo-blue"></a><p><span>💻 <a href='https://github.com/DEVAIEXP/gradio_component_livelog'>Component GitHub Code</a></span></p>
14
 
15
  A Live Log Component for Gradio Interface
16
 
17
  ## Key Features
18
 
19
- `LiveLog` provides a rich, terminal-like experience directly in your Gradio UI.
20
 
21
- - **Real-time Log Streaming:** Display log messages as they are generated in your Python backend, perfect for monitoring long-running tasks.
22
- - **Integrated Progress Bar:** Track the progress of loops and processes with a familiar, `tqdm`-style progress bar.
23
- - Includes iterations per second (it/s) calculation.
24
- - The bar automatically changes color to green on success or red on error for clear visual feedback.
25
- - **Automatic `logging` Capture:** Effortlessly capture all logs generated by Python's standard `logging` module (`logging.info`, `logging.error`, etc.) using the `capture_logs` context manager.
26
- - **Flexible Display Modes:** Choose to display the full component (logs + progress bar), logs only, or the progress bar only to fit different UI layouts.
27
- - **Highly Customizable:** Tailor the component's appearance with properties for background color, line numbers, and autoscrolling.
28
- - **Utility Controls:** Comes with built-in buttons to clear the log, copy all content to the clipboard, and download logs as a text file.
29
- - **Console Silencing:** Optionally suppress log output in your Python console to keep it clean while still displaying everything in the UI.
 
 
 
 
 
 
 
 
 
 
30
 
31
  ## Installation
32
 
@@ -39,9 +49,9 @@ pip install gradio_livelog
39
  ```python
40
  # demo/app.py
41
 
 
42
  import gradio as gr
43
  import torch
44
- import time
45
  import logging
46
  import random
47
  import numpy as np
@@ -53,24 +63,28 @@ import spaces
53
 
54
  # Import the component and ALL its utilities
55
  from gradio_livelog import LiveLog
56
- from gradio_livelog.utils import ProgressTracker, capture_logs
57
 
58
  # --- 1. SETUP ---
59
  MODEL_ID = "SG161222/RealVisXL_V5.0_Lightning"
60
  MAX_SEED = np.iinfo(np.int32).max
61
 
62
  # --- 2. LOGIC FOR THE "LIVELOG FEATURE DEMO" TAB ---
63
- logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
64
-
65
- async def run_process(disable_console: bool, run_error_case: bool):
66
- with capture_logs(disable_console=disable_console) as get_logs:
 
 
 
 
67
  total_steps = 100
68
- tracker = ProgressTracker(total=total_steps, description="Simulating a process...")
69
  all_logs = []
70
  last_log_content = None
71
 
72
  initial_log = f"Starting simulated process with {total_steps} steps..."
73
- logging.info(initial_log)
74
  logs = [
75
  {
76
  "type": "log",
@@ -88,14 +102,14 @@ async def run_process(disable_console: bool, run_error_case: bool):
88
  current_step = i + 1
89
 
90
  if current_step == 10:
91
- logging.warning(f"Low disk space warning at step {current_step}.")
92
  elif current_step == 30:
93
- logging.log(logging.INFO + 5, f"Asset pack loaded successfully at step {current_step}.")
94
  elif current_step == 75:
95
- logging.critical(f"Checksum mismatch! Data may be corrupt at step {current_step}.")
96
 
97
  if run_error_case and current_step == 50:
98
- logging.error("A fatal simulation error occurred! Aborting.")
99
  logs = [
100
  {
101
  "type": "log",
@@ -123,7 +137,7 @@ async def run_process(disable_console: bool, run_error_case: bool):
123
  yield tracker.update(advance=1, status="running", logs=all_logs, log_content=last_log_content)
124
 
125
  final_log = "Process completed successfully!"
126
- logging.log(logging.INFO + 5, final_log)
127
  logs = [
128
  {
129
  "type": "log",
@@ -142,47 +156,60 @@ def update_livelog_properties(mode, color, lines, scroll):
142
  def clear_output():
143
  return None
144
 
145
- async def run_success_case(disable_console: bool):
146
  yield None
147
- async for update in run_process(disable_console=disable_console, run_error_case=False):
148
  yield update
149
 
150
- async def run_error_case(disable_console: bool):
151
  yield None
152
- async for update in run_process(disable_console=disable_console, run_error_case=True):
153
  yield update
154
 
155
 
156
  # --- 3. LOGIC FOR THE "DIFFUSION PIPELINE INTEGRATION" TAB ---
157
  diffusion_pipeline = None
 
158
  def load_pipeline(on_load=True):
159
  """A function to load the model, ensuring it's only done once."""
160
  global diffusion_pipeline
161
- if diffusion_pipeline is None:
162
- print("Loading Stable Diffusion model for the first time...")
163
- pipe = StableDiffusionXLPipeline.from_pretrained(
164
- MODEL_ID, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False
165
- )
166
- pipe.enable_vae_tiling()
167
- pipe.enable_model_cpu_offload()
168
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
169
- pipe.set_progress_bar_config(disable=True)
170
- diffusion_pipeline = pipe
171
- print("Model loaded successfully.")
172
-
173
  if not on_load:
174
  return diffusion_pipeline
175
 
176
  @spaces.GPU(duration=60, enable_queue=True)
177
- def run_diffusion_in_thread(prompt: str, update_queue: queue.Queue):
178
  """
179
  This function now uses capture_logs to listen to internal diffusers logs
180
  while retaining the superior data structure you designed.
181
  """
182
  tracker = None
183
- with capture_logs() as get_logs:
184
- try:
185
  pipe = load_pipeline(on_load=False)
 
 
 
 
 
 
 
 
 
 
 
 
186
  seed = random.randint(0, MAX_SEED)
187
  generator = torch.Generator(device="cuda").manual_seed(seed)
188
  prompt_style = f"hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic"
@@ -190,67 +217,110 @@ def run_diffusion_in_thread(prompt: str, update_queue: queue.Queue):
190
  num_inference_steps = 10
191
 
192
  all_logs = []
193
- last_log_content = None
194
 
195
  # Helper function to process and store new logs
196
- def process_and_store_logs():
197
- nonlocal all_logs, last_log_content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
  new_records = get_logs()
199
  if new_records:
200
- new_logs = [{"type": "log", "level": r.levelname, "content": r.getMessage()} for r in new_records]
 
 
 
 
201
  all_logs.extend(new_logs)
202
- last_log_content = all_logs[-1]["content"]
203
-
204
- logging.info(f"Using seed: {seed}")
205
- process_and_store_logs()
206
- update_queue.put((None, {"type": "progress", "logs": all_logs, "current": 0, "total": num_inference_steps, "desc": "Diffusion Steps"}))
207
-
208
- logging.info("Starting diffusion process...")
209
- process_and_store_logs()
210
- update_queue.put((None, {"type": "progress", "logs": all_logs, "current": 0, "total": num_inference_steps, "desc": "Diffusion Steps"}))
211
-
212
- tracker = ProgressTracker(total=num_inference_steps, description="Diffusion Steps")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
 
214
  def progress_callback(pipe_instance, step, timestep, callback_kwargs):
215
- process_and_store_logs() # Check for new logs from diffusers at each step
216
- update_dict = tracker.update(logs=all_logs)
217
- update_queue.put((None, update_dict))
218
  return callback_kwargs
219
-
220
  images = pipe(
221
  prompt=prompt_style, negative_prompt=negative_prompt_style, width=1024, height=1024,
222
  guidance_scale=3.0, num_inference_steps=num_inference_steps,
223
  generator=generator, callback_on_step_end=progress_callback
224
  ).images
225
 
226
- logging.log(logging.INFO + 5, "Image generated successfully!")
227
- process_and_store_logs()
228
-
229
- final_update = tracker.update(advance=0, status="success", logs=all_logs, log_content=last_log_content)
230
- update_queue.put((images, final_update))
231
 
232
  except Exception as e:
233
- logging.error(f"Error in diffusion thread: {e}", exc_info=True)
234
- process_and_store_logs() # Capture the final error log
235
- if tracker:
236
- error_update = tracker.update(advance=0, status="error", logs=all_logs, log_content=f"An error occurred: {e}")
237
- update_queue.put((None, error_update))
238
  finally:
239
  update_queue.put(None)
240
 
 
241
  @spaces.GPU(duration=60, enable_queue=True)
242
  def generate(prompt):
243
  """This function starts the worker thread and yields updates from the queue."""
244
- yield None, None
245
- yield None, {"type": "log", "level": "INFO", "content": "Preparing generation..."}
246
  update_queue = queue.Queue()
247
- diffusion_thread = threading.Thread(target=run_diffusion_in_thread, args=(prompt, update_queue))
248
  diffusion_thread.start()
 
249
  while True:
250
  update = update_queue.get()
251
- if update is None: break
252
- yield update
253
-
 
 
 
 
 
 
 
 
254
 
255
  # --- 4. THE COMBINED GRADIO UI with TABS ---
256
  with gr.Blocks(theme=gr.themes.Ocean()) as demo:
@@ -269,17 +339,18 @@ with gr.Blocks(theme=gr.themes.Ocean()) as demo:
269
  with gr.Group():
270
  gr.Markdown("### Component Properties")
271
  display_mode_radio = gr.Radio(["full", "log", "progress"], label="Display Mode", value="full")
 
272
  bg_color_picker = gr.ColorPicker(label="Background Color", value="#000000")
273
  line_numbers_checkbox = gr.Checkbox(label="Show Line Numbers", value=True)
274
  autoscroll_checkbox = gr.Checkbox(label="Autoscroll", value=True)
 
275
  with gr.Group():
276
  gr.Markdown("### Simulation Controls")
277
- disable_console_checkbox = gr.Checkbox(label="Disable Python Console Output", value=False)
278
  start_btn = gr.Button("Run Success Case", variant="primary")
279
  error_btn = gr.Button("Run Error Case")
280
 
281
- start_btn.click(fn=run_success_case, inputs=[disable_console_checkbox], outputs=feature_logger)
282
- error_btn.click(fn=run_error_case, inputs=[disable_console_checkbox], outputs=feature_logger)
283
  feature_logger.clear(fn=clear_output, inputs=None, outputs=feature_logger)
284
  controls = [display_mode_radio, bg_color_picker, line_numbers_checkbox, autoscroll_checkbox]
285
  for control in controls:
@@ -306,7 +377,7 @@ with gr.Blocks(theme=gr.themes.Ocean()) as demo:
306
  label="Result", columns=1, show_label=False, height=500, min_width=768, preview=True, allow_preview=True
307
  )
308
 
309
- run_button.click(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
310
  prompt.submit(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
311
  livelog_viewer.clear(fn=clear_output, inputs=None, outputs=[livelog_viewer])
312
 
 
10
  ---
11
 
12
  # `gradio_livelog`
13
+ <img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0.3%20-%20blue"> <a href="https://huggingface.co/spaces/elismasilva/gradio_livelog"><img src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Demo-blue"></a><p><span>💻 <a href='https://github.com/DEVAIEXP/gradio_component_livelog'>Component GitHub Code</a></span></p>
14
 
15
  A Live Log Component for Gradio Interface
16
 
17
  ## Key Features
18
 
19
+ `LiveLog` elevates Gradio applications by providing a powerful, terminal-like monitoring experience directly in your UI. It's designed for both simple progress tracking and complex pipeline introspection.
20
 
21
+ - **Dual-Mode Progress Tracking:** A sophisticated progress bar that operates in two modes for maximum accuracy:
22
+ - **Internal Rate Calculation:** For simple loops, it features a built-in, `tqdm`-style progress calculator with **Exponential Moving Average (EMA)** smoothing for a stable and realistic `it/s` or `s/it` display.
23
+ - **External `tqdm` Capture:** For deep integration, it can **directly capture and display the *exact* rate** from an existing `tqdm` instance running inside a backend library (like `diffusers`). This eliminates measurement overhead and provides a perfectly synchronized view of your pipeline's true performance.
24
+
25
+ - **Rich, Real-time Log Streaming:** Display log messages as they are generated.
26
+ - Supports standard Python log levels (`INFO`, `WARNING`, `ERROR`, etc.) with corresponding colors.
27
+ - Includes support for **custom log levels** (like "SUCCESS") for enhanced visual feedback.
28
+
29
+ - **Advanced Multi-Logger Capture:** The `capture_logs` utility is designed for complex applications.
30
+ - Effortlessly capture logs from **multiple, independent Python loggers** simultaneously (e.g., your app's logger and a library's internal logger).
31
+ - Correctly handles logger hierarchies and propagation, making it robust for any logging setup.
32
+
33
+ - **Flexible Display & Layout Control:** Adapt the component to any UI layout.
34
+ - **Three Display Modes:** Show the full component (`logs + progress`), `logs only`, or `progress bar only`.
35
+ - Highly customizable appearance with properties for `height`, `background_color`, `line_numbers`, and `autoscrolling`.
36
+
37
+ - **Comprehensive Utility Controls:**
38
+ - Built-in header buttons to **Clear**, **Copy**, and **Download** log content.
39
+ - Optionally suppress log output in your Python console to keep it clean while still displaying everything in the UI.
40
 
41
  ## Installation
42
 
 
49
  ```python
50
  # demo/app.py
51
 
52
+ import sys
53
  import gradio as gr
54
  import torch
 
55
  import logging
56
  import random
57
  import numpy as np
 
63
 
64
  # Import the component and ALL its utilities
65
  from gradio_livelog import LiveLog
66
+ from gradio_livelog.utils import ProgressTracker, Tee, TqdmToQueueWriter, capture_logs
67
 
68
  # --- 1. SETUP ---
69
  MODEL_ID = "SG161222/RealVisXL_V5.0_Lightning"
70
  MAX_SEED = np.iinfo(np.int32).max
71
 
72
  # --- 2. LOGIC FOR THE "LIVELOG FEATURE DEMO" TAB ---
73
+ app_logger = logging.getLogger("logging_app")
74
+ app_logger.setLevel(logging.INFO)
75
+ console_handler = logging.StreamHandler()
76
+ console_handler.flush = sys.stderr.flush
77
+ app_logger.addHandler(console_handler)
78
+
79
+ async def run_process(disable_console: bool, rate_unit: str, run_error_case: bool):
80
+ with capture_logs(log_level=logging.INFO, log_name=["logging_app"], disable_console=disable_console) as get_logs: #You can watch more than one log if you wish in log_name. If you do not pass log_name, the default log will be used.
81
  total_steps = 100
82
+ tracker = ProgressTracker(total=total_steps, description="Simulating a process...", rate_unit=rate_unit)
83
  all_logs = []
84
  last_log_content = None
85
 
86
  initial_log = f"Starting simulated process with {total_steps} steps..."
87
+ app_logger.info(initial_log)
88
  logs = [
89
  {
90
  "type": "log",
 
102
  current_step = i + 1
103
 
104
  if current_step == 10:
105
+ app_logger.warning(f"Low disk space warning at step {current_step}.")
106
  elif current_step == 30:
107
+ app_logger.log(logging.INFO + 5, f"Asset pack loaded successfully at step {current_step}.")
108
  elif current_step == 75:
109
+ app_logger.critical(f"Checksum mismatch! Data may be corrupt at step {current_step}.")
110
 
111
  if run_error_case and current_step == 50:
112
+ app_logger.error("A fatal simulation error occurred! Aborting.")
113
  logs = [
114
  {
115
  "type": "log",
 
137
  yield tracker.update(advance=1, status="running", logs=all_logs, log_content=last_log_content)
138
 
139
  final_log = "Process completed successfully!"
140
+ app_logger.log(logging.INFO + 5, final_log)
141
  logs = [
142
  {
143
  "type": "log",
 
156
  def clear_output():
157
  return None
158
 
159
+ async def run_success_case(disable_console: bool, rate_unit: str):
160
  yield None
161
+ async for update in run_process(disable_console=disable_console, rate_unit=rate_unit, run_error_case=False):
162
  yield update
163
 
164
+ async def run_error_case(disable_console: bool, rate_unit: str):
165
  yield None
166
+ async for update in run_process(disable_console=disable_console, rate_unit=rate_unit, run_error_case=True):
167
  yield update
168
 
169
 
170
  # --- 3. LOGIC FOR THE "DIFFUSION PIPELINE INTEGRATION" TAB ---
171
  diffusion_pipeline = None
172
+ pipeline_lock = threading.Lock()
173
  def load_pipeline(on_load=True):
174
  """A function to load the model, ensuring it's only done once."""
175
  global diffusion_pipeline
176
+ with pipeline_lock:
177
+ if diffusion_pipeline is None:
178
+ print("Loading Stable Diffusion model for the first time...")
179
+ pipe = StableDiffusionXLPipeline.from_pretrained(
180
+ MODEL_ID, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False
181
+ )
182
+ pipe.enable_vae_tiling()
183
+ pipe.enable_model_cpu_offload()
184
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
185
+ diffusion_pipeline = pipe
186
+ print("Model loaded successfully!")
187
+
188
  if not on_load:
189
  return diffusion_pipeline
190
 
191
  @spaces.GPU(duration=60, enable_queue=True)
192
+ def run_diffusion_in_thread(prompt: str, disable_console: bool, update_queue: queue.Queue):
193
  """
194
  This function now uses capture_logs to listen to internal diffusers logs
195
  while retaining the superior data structure you designed.
196
  """
197
  tracker = None
198
+ with capture_logs(log_level=logging.INFO, log_name=["logging_app"], disable_console=disable_console) as get_logs: #You can watch more than one log if you wish in log_name. If you do not pass log_name, the default log will be used.
199
+ try:
200
  pipe = load_pipeline(on_load=False)
201
+
202
+ #We will capture pipeline tqdm s/it progress instead
203
+ rate_queue = queue.Queue()
204
+ tqdm_writer = TqdmToQueueWriter(rate_queue)
205
+
206
+ progress_bar_handler = Tee(sys.stderr, tqdm_writer)
207
+ pipe.set_progress_bar_config(file=progress_bar_handler, #if you dont need to see the tqdm progress on console set file=tqdm_writer instead
208
+ disable=False,
209
+ ncols=100,
210
+ dynamic_ncols=True,
211
+ ascii=" █")
212
+
213
  seed = random.randint(0, MAX_SEED)
214
  generator = torch.Generator(device="cuda").manual_seed(seed)
215
  prompt_style = f"hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic"
 
217
  num_inference_steps = 10
218
 
219
  all_logs = []
220
+ last_known_rate_data = None
221
 
222
  # Helper function to process and store new logs
223
+ def process_and_send_updates(status="running", advance=0, final_image_payload=None):
224
+ """
225
+ This is the core callback function. It captures new logs, formats them,
226
+ and sends a complete update object (logs + progress) to the UI queue.
227
+ This should also be called after the log record.
228
+ """
229
+ nonlocal all_logs, last_known_rate_data
230
+ new_rate_data = None
231
+ while not rate_queue.empty():
232
+ try:
233
+ new_rate_data = rate_queue.get_nowait()
234
+ except queue.Empty:
235
+ break
236
+
237
+ if new_rate_data is not None:
238
+ last_known_rate_data = new_rate_data
239
+
240
  new_records = get_logs()
241
  if new_records:
242
+ new_logs = [{
243
+ "type": "log",
244
+ "level": "SUCCESS" if r.levelno == logging.INFO + 5 else r.levelname,
245
+ "content": r.getMessage()
246
+ } for r in new_records]
247
  all_logs.extend(new_logs)
248
+
249
+ # Use the tracker to generate the progress update dictionary if it exists.
250
+ # If not, create a preliminary update dictionary.
251
+ update_dict = {}
252
+
253
+ if tracker:
254
+ update_dict = tracker.update(
255
+ advance=advance,
256
+ status=status,
257
+ logs=all_logs,
258
+ rate_data=last_known_rate_data
259
+ )
260
+ else:
261
+ # Initial state before the tracker is created.
262
+ update_dict = {
263
+ "type": "progress",
264
+ "logs": all_logs,
265
+ "current": 0,
266
+ "total": num_inference_steps,
267
+ "desc": "Diffusion Steps" # Description is sent once
268
+ }
269
+
270
+ # Put the update on the queue. The image payload is usually None
271
+ # until the very end.
272
+ update_queue.put((final_image_payload, update_dict))
273
+
274
+ app_logger.info(f"Using seed: {seed}")
275
+ process_and_send_updates()
276
+
277
+ app_logger.info("Starting diffusion process...")
278
+ process_and_send_updates()
279
+
280
+ tracker = ProgressTracker(total=num_inference_steps, description="Diffusion Steps", rate_unit='it/s')
281
 
282
  def progress_callback(pipe_instance, step, timestep, callback_kwargs):
283
+ process_and_send_updates(advance=1)
 
 
284
  return callback_kwargs
285
+
286
  images = pipe(
287
  prompt=prompt_style, negative_prompt=negative_prompt_style, width=1024, height=1024,
288
  guidance_scale=3.0, num_inference_steps=num_inference_steps,
289
  generator=generator, callback_on_step_end=progress_callback
290
  ).images
291
 
292
+ app_logger.log(logging.INFO + 5, "Image generated successfully!")
293
+ process_and_send_updates(status="success", final_image_payload=images)
294
+
 
 
295
 
296
  except Exception as e:
297
+ app_logger.error(f"Error in diffusion thread: {e}, process aborted!", exc_info=True)
298
+ process_and_send_updates(status="error")
 
 
 
299
  finally:
300
  update_queue.put(None)
301
 
302
+
303
  @spaces.GPU(duration=60, enable_queue=True)
304
  def generate(prompt):
305
  """This function starts the worker thread and yields updates from the queue."""
306
+ yield None, None, gr.update(interactive=False)
 
307
  update_queue = queue.Queue()
308
+ diffusion_thread = threading.Thread(target=run_diffusion_in_thread, args=(prompt, False, update_queue))
309
  diffusion_thread.start()
310
+ final_images = None
311
  while True:
312
  update = update_queue.get()
313
+ if update is None:
314
+ break
315
+
316
+ images, log_update = update
317
+
318
+ if images:
319
+ final_images = images
320
+
321
+ yield final_images, log_update, gr.skip()
322
+
323
+ yield final_images, log_update, gr.update(interactive=True)
324
 
325
  # --- 4. THE COMBINED GRADIO UI with TABS ---
326
  with gr.Blocks(theme=gr.themes.Ocean()) as demo:
 
339
  with gr.Group():
340
  gr.Markdown("### Component Properties")
341
  display_mode_radio = gr.Radio(["full", "log", "progress"], label="Display Mode", value="full")
342
+ rate_unit = gr.Radio(["it/s","s/it"], label="Progress rate unit", value="it/s")
343
  bg_color_picker = gr.ColorPicker(label="Background Color", value="#000000")
344
  line_numbers_checkbox = gr.Checkbox(label="Show Line Numbers", value=True)
345
  autoscroll_checkbox = gr.Checkbox(label="Autoscroll", value=True)
346
+ disable_console_checkbox = gr.Checkbox(label="Disable Python Console Output", value=False)
347
  with gr.Group():
348
  gr.Markdown("### Simulation Controls")
 
349
  start_btn = gr.Button("Run Success Case", variant="primary")
350
  error_btn = gr.Button("Run Error Case")
351
 
352
+ start_btn.click(fn=run_success_case, inputs=[disable_console_checkbox, rate_unit], outputs=feature_logger)
353
+ error_btn.click(fn=run_error_case, inputs=[disable_console_checkbox, rate_unit], outputs=feature_logger)
354
  feature_logger.clear(fn=clear_output, inputs=None, outputs=feature_logger)
355
  controls = [display_mode_radio, bg_color_picker, line_numbers_checkbox, autoscroll_checkbox]
356
  for control in controls:
 
377
  label="Result", columns=1, show_label=False, height=500, min_width=768, preview=True, allow_preview=True
378
  )
379
 
380
+ run_button.click(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer, run_button])
381
  prompt.submit(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
382
  livelog_viewer.clear(fn=clear_output, inputs=None, outputs=[livelog_viewer])
383
 
app.py CHANGED
@@ -1,278 +1,339 @@
1
- # demo/app.py
2
-
3
- import gradio as gr
4
- import torch
5
- import time
6
- import logging
7
- import random
8
- import numpy as np
9
- from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
10
- import threading
11
- import queue
12
- import asyncio
13
- import spaces
14
-
15
- # Import the component and ALL its utilities
16
- from gradio_livelog import LiveLog
17
- from gradio_livelog.utils import ProgressTracker, capture_logs
18
-
19
- # --- 1. SETUP ---
20
- MODEL_ID = "SG161222/RealVisXL_V5.0_Lightning"
21
- MAX_SEED = np.iinfo(np.int32).max
22
-
23
- # --- 2. LOGIC FOR THE "LIVELOG FEATURE DEMO" TAB ---
24
- logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
25
-
26
- async def run_process(disable_console: bool, run_error_case: bool):
27
- with capture_logs(disable_console=disable_console) as get_logs:
28
- total_steps = 100
29
- tracker = ProgressTracker(total=total_steps, description="Simulating a process...")
30
- all_logs = []
31
- last_log_content = None
32
-
33
- initial_log = f"Starting simulated process with {total_steps} steps..."
34
- logging.info(initial_log)
35
- logs = [
36
- {
37
- "type": "log",
38
- "level": "SUCCESS" if record.levelno == logging.INFO + 5 else record.levelname,
39
- "content": record.getMessage()
40
- }
41
- for record in get_logs()
42
- ]
43
- all_logs.extend(logs)
44
- last_log_content = logs[-1]["content"] if logs else None
45
- yield tracker.update(advance=0, status="running", logs=all_logs, log_content=None)
46
-
47
- for i in range(total_steps):
48
- await asyncio.sleep(0.03)
49
- current_step = i + 1
50
-
51
- if current_step == 10:
52
- logging.warning(f"Low disk space warning at step {current_step}.")
53
- elif current_step == 30:
54
- logging.log(logging.INFO + 5, f"Asset pack loaded successfully at step {current_step}.")
55
- elif current_step == 75:
56
- logging.critical(f"Checksum mismatch! Data may be corrupt at step {current_step}.")
57
-
58
- if run_error_case and current_step == 50:
59
- logging.error("A fatal simulation error occurred! Aborting.")
60
- logs = [
61
- {
62
- "type": "log",
63
- "level": "SUCCESS" if record.levelno == logging.INFO + 5 else record.levelname,
64
- "content": record.getMessage()
65
- }
66
- for record in get_logs()
67
- ]
68
- all_logs.extend(logs)
69
- last_log_content = logs[-1]["content"] if logs else last_log_content
70
- yield tracker.update(advance=0, status="error", logs=all_logs, log_content=last_log_content)
71
- return
72
-
73
- logs = [
74
- {
75
- "type": "log",
76
- "level": "SUCCESS" if record.levelno == logging.INFO + 5 else record.levelname,
77
- "content": record.getMessage()
78
- }
79
- for record in get_logs()
80
- ]
81
- all_logs.extend(logs)
82
- if logs:
83
- last_log_content = logs[-1]["content"]
84
- yield tracker.update(advance=1, status="running", logs=all_logs, log_content=last_log_content)
85
-
86
- final_log = "Process completed successfully!"
87
- logging.log(logging.INFO + 5, final_log)
88
- logs = [
89
- {
90
- "type": "log",
91
- "level": "SUCCESS" if record.levelno == logging.INFO + 5 else record.levelname,
92
- "content": record.getMessage()
93
- }
94
- for record in get_logs()
95
- ]
96
- all_logs.extend(logs)
97
- last_log_content = logs[-1]["content"] if logs else last_log_content
98
- yield tracker.update(advance=0, status="success", logs=all_logs, log_content=last_log_content)
99
-
100
- def update_livelog_properties(mode, color, lines, scroll):
101
- return gr.update(display_mode=mode, background_color=color, line_numbers=lines, autoscroll=scroll)
102
-
103
- def clear_output():
104
- return None
105
-
106
- async def run_success_case(disable_console: bool):
107
- yield None
108
- async for update in run_process(disable_console=disable_console, run_error_case=False):
109
- yield update
110
-
111
- async def run_error_case(disable_console: bool):
112
- yield None
113
- async for update in run_process(disable_console=disable_console, run_error_case=True):
114
- yield update
115
-
116
-
117
- # --- 3. LOGIC FOR THE "DIFFUSION PIPELINE INTEGRATION" TAB ---
118
- diffusion_pipeline = None
119
- def load_pipeline(on_load=True):
120
- """A function to load the model, ensuring it's only done once."""
121
- global diffusion_pipeline
122
- if diffusion_pipeline is None:
123
- print("Loading Stable Diffusion model for the first time...")
124
- pipe = StableDiffusionXLPipeline.from_pretrained(
125
- MODEL_ID, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False
126
- )
127
- pipe.enable_vae_tiling()
128
- pipe.enable_model_cpu_offload()
129
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
130
- pipe.set_progress_bar_config(disable=True)
131
- diffusion_pipeline = pipe
132
- print("Model loaded successfully.")
133
-
134
- if not on_load:
135
- return diffusion_pipeline
136
-
137
- @spaces.GPU(duration=60, enable_queue=True)
138
- def run_diffusion_in_thread(prompt: str, update_queue: queue.Queue):
139
- """
140
- This function now uses capture_logs to listen to internal diffusers logs
141
- while retaining the superior data structure you designed.
142
- """
143
- tracker = None
144
- with capture_logs() as get_logs:
145
- try:
146
- pipe = load_pipeline(on_load=False)
147
- seed = random.randint(0, MAX_SEED)
148
- generator = torch.Generator(device="cuda").manual_seed(seed)
149
- prompt_style = f"hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic"
150
- negative_prompt_style = "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly"
151
- num_inference_steps = 10
152
-
153
- all_logs = []
154
- last_log_content = None
155
-
156
- # Helper function to process and store new logs
157
- def process_and_store_logs():
158
- nonlocal all_logs, last_log_content
159
- new_records = get_logs()
160
- if new_records:
161
- new_logs = [{"type": "log", "level": r.levelname, "content": r.getMessage()} for r in new_records]
162
- all_logs.extend(new_logs)
163
- last_log_content = all_logs[-1]["content"]
164
-
165
- logging.info(f"Using seed: {seed}")
166
- process_and_store_logs()
167
- update_queue.put((None, {"type": "progress", "logs": all_logs, "current": 0, "total": num_inference_steps, "desc": "Diffusion Steps"}))
168
-
169
- logging.info("Starting diffusion process...")
170
- process_and_store_logs()
171
- update_queue.put((None, {"type": "progress", "logs": all_logs, "current": 0, "total": num_inference_steps, "desc": "Diffusion Steps"}))
172
-
173
- tracker = ProgressTracker(total=num_inference_steps, description="Diffusion Steps")
174
-
175
- def progress_callback(pipe_instance, step, timestep, callback_kwargs):
176
- process_and_store_logs() # Check for new logs from diffusers at each step
177
- update_dict = tracker.update(logs=all_logs)
178
- update_queue.put((None, update_dict))
179
- return callback_kwargs
180
-
181
- images = pipe(
182
- prompt=prompt_style, negative_prompt=negative_prompt_style, width=1024, height=1024,
183
- guidance_scale=3.0, num_inference_steps=num_inference_steps,
184
- generator=generator, callback_on_step_end=progress_callback
185
- ).images
186
-
187
- logging.log(logging.INFO + 5, "Image generated successfully!")
188
- process_and_store_logs()
189
-
190
- final_update = tracker.update(advance=0, status="success", logs=all_logs, log_content=last_log_content)
191
- update_queue.put((images, final_update))
192
-
193
- except Exception as e:
194
- logging.error(f"Error in diffusion thread: {e}", exc_info=True)
195
- process_and_store_logs() # Capture the final error log
196
- if tracker:
197
- error_update = tracker.update(advance=0, status="error", logs=all_logs, log_content=f"An error occurred: {e}")
198
- update_queue.put((None, error_update))
199
- finally:
200
- update_queue.put(None)
201
-
202
- @spaces.GPU(duration=60, enable_queue=True)
203
- def generate(prompt):
204
- """This function starts the worker thread and yields updates from the queue."""
205
- yield None, None
206
- yield None, {"type": "log", "level": "INFO", "content": "Preparing generation..."}
207
- update_queue = queue.Queue()
208
- diffusion_thread = threading.Thread(target=run_diffusion_in_thread, args=(prompt, update_queue))
209
- diffusion_thread.start()
210
- while True:
211
- update = update_queue.get()
212
- if update is None: break
213
- yield update
214
-
215
-
216
- # --- 4. THE COMBINED GRADIO UI with TABS ---
217
- with gr.Blocks(theme=gr.themes.Ocean()) as demo:
218
- gr.HTML("<h1><center>LiveLog Component Showcase</center></h1>")
219
-
220
- with gr.Tabs():
221
- with gr.TabItem("LiveLog Feature Demo"):
222
- gr.Markdown("### Test all features of the LiveLog component interactively.")
223
- with gr.Row():
224
- with gr.Column(scale=3):
225
- feature_logger = LiveLog(
226
- label="Process Output", line_numbers=True, height=550,
227
- background_color="#000000", display_mode="full"
228
- )
229
- with gr.Column(scale=1):
230
- with gr.Group():
231
- gr.Markdown("### Component Properties")
232
- display_mode_radio = gr.Radio(["full", "log", "progress"], label="Display Mode", value="full")
233
- bg_color_picker = gr.ColorPicker(label="Background Color", value="#000000")
234
- line_numbers_checkbox = gr.Checkbox(label="Show Line Numbers", value=True)
235
- autoscroll_checkbox = gr.Checkbox(label="Autoscroll", value=True)
236
- with gr.Group():
237
- gr.Markdown("### Simulation Controls")
238
- disable_console_checkbox = gr.Checkbox(label="Disable Python Console Output", value=False)
239
- start_btn = gr.Button("Run Success Case", variant="primary")
240
- error_btn = gr.Button("Run Error Case")
241
-
242
- start_btn.click(fn=run_success_case, inputs=[disable_console_checkbox], outputs=feature_logger)
243
- error_btn.click(fn=run_error_case, inputs=[disable_console_checkbox], outputs=feature_logger)
244
- feature_logger.clear(fn=clear_output, inputs=None, outputs=feature_logger)
245
- controls = [display_mode_radio, bg_color_picker, line_numbers_checkbox, autoscroll_checkbox]
246
- for control in controls:
247
- control.change(fn=update_livelog_properties, inputs=controls, outputs=feature_logger)
248
-
249
- with gr.TabItem("Diffusion Pipeline Integration"):
250
- gr.Markdown("### Use `LiveLog` to monitor a real image generation process.")
251
- with gr.Row():
252
- with gr.Column(scale=3):
253
- with gr.Group():
254
- prompt = gr.Textbox(
255
- label="Enter your prompt", show_label=False,
256
- placeholder="A cinematic photo of a robot in a floral garden...",
257
- scale=8, container=False
258
- )
259
- run_button = gr.Button("Generate", scale=1, variant="primary")
260
-
261
- livelog_viewer = LiveLog(
262
- label="Process Monitor", height=250, display_mode="full", line_numbers=False
263
- )
264
-
265
- with gr.Column(scale=2):
266
- result_gallery = gr.Gallery(
267
- label="Result", columns=1, show_label=False, height=500, min_width=768, preview=True, allow_preview=True
268
- )
269
-
270
- run_button.click(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
271
- prompt.submit(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
272
- livelog_viewer.clear(fn=clear_output, inputs=None, outputs=[livelog_viewer])
273
-
274
- # This ensures the model is downloaded/loaded once when the app starts.
275
- demo.load(load_pipeline, None, None)
276
-
277
- if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278
  demo.queue(max_size=50).launch(debug=True)
 
1
+ # demo/app.py
2
+
3
+ import sys
4
+ import gradio as gr
5
+ import torch
6
+ import logging
7
+ import random
8
+ import numpy as np
9
+ from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
10
+ import threading
11
+ import queue
12
+ import asyncio
13
+ import spaces
14
+
15
+ # Import the component and ALL its utilities
16
+ from gradio_livelog import LiveLog
17
+ from gradio_livelog.utils import ProgressTracker, Tee, TqdmToQueueWriter, capture_logs
18
+
19
+ # --- 1. SETUP ---
20
+ MODEL_ID = "SG161222/RealVisXL_V5.0_Lightning"
21
+ MAX_SEED = np.iinfo(np.int32).max
22
+
23
+ # --- 2. LOGIC FOR THE "LIVELOG FEATURE DEMO" TAB ---
24
+ app_logger = logging.getLogger("logging_app")
25
+ app_logger.setLevel(logging.INFO)
26
+ console_handler = logging.StreamHandler()
27
+ console_handler.flush = sys.stderr.flush
28
+ app_logger.addHandler(console_handler)
29
+
30
+ async def run_process(disable_console: bool, rate_unit: str, run_error_case: bool):
31
+ with capture_logs(log_level=logging.INFO, log_name=["logging_app"], disable_console=disable_console) as get_logs: #You can watch more than one log if you wish in log_name. If you do not pass log_name, the default log will be used.
32
+ total_steps = 100
33
+ tracker = ProgressTracker(total=total_steps, description="Simulating a process...", rate_unit=rate_unit)
34
+ all_logs = []
35
+ last_log_content = None
36
+
37
+ initial_log = f"Starting simulated process with {total_steps} steps..."
38
+ app_logger.info(initial_log)
39
+ logs = [
40
+ {
41
+ "type": "log",
42
+ "level": "SUCCESS" if record.levelno == logging.INFO + 5 else record.levelname,
43
+ "content": record.getMessage()
44
+ }
45
+ for record in get_logs()
46
+ ]
47
+ all_logs.extend(logs)
48
+ last_log_content = logs[-1]["content"] if logs else None
49
+ yield tracker.update(advance=0, status="running", logs=all_logs, log_content=None)
50
+
51
+ for i in range(total_steps):
52
+ await asyncio.sleep(0.03)
53
+ current_step = i + 1
54
+
55
+ if current_step == 10:
56
+ app_logger.warning(f"Low disk space warning at step {current_step}.")
57
+ elif current_step == 30:
58
+ app_logger.log(logging.INFO + 5, f"Asset pack loaded successfully at step {current_step}.")
59
+ elif current_step == 75:
60
+ app_logger.critical(f"Checksum mismatch! Data may be corrupt at step {current_step}.")
61
+
62
+ if run_error_case and current_step == 50:
63
+ app_logger.error("A fatal simulation error occurred! Aborting.")
64
+ logs = [
65
+ {
66
+ "type": "log",
67
+ "level": "SUCCESS" if record.levelno == logging.INFO + 5 else record.levelname,
68
+ "content": record.getMessage()
69
+ }
70
+ for record in get_logs()
71
+ ]
72
+ all_logs.extend(logs)
73
+ last_log_content = logs[-1]["content"] if logs else last_log_content
74
+ yield tracker.update(advance=0, status="error", logs=all_logs, log_content=last_log_content)
75
+ return
76
+
77
+ logs = [
78
+ {
79
+ "type": "log",
80
+ "level": "SUCCESS" if record.levelno == logging.INFO + 5 else record.levelname,
81
+ "content": record.getMessage()
82
+ }
83
+ for record in get_logs()
84
+ ]
85
+ all_logs.extend(logs)
86
+ if logs:
87
+ last_log_content = logs[-1]["content"]
88
+ yield tracker.update(advance=1, status="running", logs=all_logs, log_content=last_log_content)
89
+
90
+ final_log = "Process completed successfully!"
91
+ app_logger.log(logging.INFO + 5, final_log)
92
+ logs = [
93
+ {
94
+ "type": "log",
95
+ "level": "SUCCESS" if record.levelno == logging.INFO + 5 else record.levelname,
96
+ "content": record.getMessage()
97
+ }
98
+ for record in get_logs()
99
+ ]
100
+ all_logs.extend(logs)
101
+ last_log_content = logs[-1]["content"] if logs else last_log_content
102
+ yield tracker.update(advance=0, status="success", logs=all_logs, log_content=last_log_content)
103
+
104
+ def update_livelog_properties(mode, color, lines, scroll):
105
+ return gr.update(display_mode=mode, background_color=color, line_numbers=lines, autoscroll=scroll)
106
+
107
+ def clear_output():
108
+ return None
109
+
110
+ async def run_success_case(disable_console: bool, rate_unit: str):
111
+ yield None
112
+ async for update in run_process(disable_console=disable_console, rate_unit=rate_unit, run_error_case=False):
113
+ yield update
114
+
115
+ async def run_error_case(disable_console: bool, rate_unit: str):
116
+ yield None
117
+ async for update in run_process(disable_console=disable_console, rate_unit=rate_unit, run_error_case=True):
118
+ yield update
119
+
120
+
121
+ # --- 3. LOGIC FOR THE "DIFFUSION PIPELINE INTEGRATION" TAB ---
122
+ diffusion_pipeline = None
123
+ pipeline_lock = threading.Lock()
124
+ def load_pipeline(on_load=True):
125
+ """A function to load the model, ensuring it's only done once."""
126
+ global diffusion_pipeline
127
+ with pipeline_lock:
128
+ if diffusion_pipeline is None:
129
+ print("Loading Stable Diffusion model for the first time...")
130
+ pipe = StableDiffusionXLPipeline.from_pretrained(
131
+ MODEL_ID, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False
132
+ )
133
+ pipe.enable_vae_tiling()
134
+ pipe.enable_model_cpu_offload()
135
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
136
+ diffusion_pipeline = pipe
137
+ print("Model loaded successfully!")
138
+
139
+ if not on_load:
140
+ return diffusion_pipeline
141
+
142
+ @spaces.GPU(duration=60, enable_queue=True)
143
+ def run_diffusion_in_thread(prompt: str, disable_console: bool, update_queue: queue.Queue):
144
+ """
145
+ This function now uses capture_logs to listen to internal diffusers logs
146
+ while retaining the superior data structure you designed.
147
+ """
148
+ tracker = None
149
+ with capture_logs(log_level=logging.INFO, log_name=["logging_app"], disable_console=disable_console) as get_logs: #You can watch more than one log if you wish in log_name. If you do not pass log_name, the default log will be used.
150
+ try:
151
+ pipe = load_pipeline(on_load=False)
152
+
153
+ #We will capture pipeline tqdm s/it progress instead
154
+ rate_queue = queue.Queue()
155
+ tqdm_writer = TqdmToQueueWriter(rate_queue)
156
+
157
+ progress_bar_handler = Tee(sys.stderr, tqdm_writer)
158
+ pipe.set_progress_bar_config(file=progress_bar_handler, #if you dont need to see the tqdm progress on console set file=tqdm_writer instead
159
+ disable=False,
160
+ ncols=100,
161
+ dynamic_ncols=True,
162
+ ascii=" █")
163
+
164
+ seed = random.randint(0, MAX_SEED)
165
+ generator = torch.Generator(device="cuda").manual_seed(seed)
166
+ prompt_style = f"hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic"
167
+ negative_prompt_style = "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly"
168
+ num_inference_steps = 10
169
+
170
+ all_logs = []
171
+ last_known_rate_data = None
172
+
173
+ # Helper function to process and store new logs
174
+ def process_and_send_updates(status="running", advance=0, final_image_payload=None):
175
+ """
176
+ This is the core callback function. It captures new logs, formats them,
177
+ and sends a complete update object (logs + progress) to the UI queue.
178
+ This should also be called after the log record.
179
+ """
180
+ nonlocal all_logs, last_known_rate_data
181
+ new_rate_data = None
182
+ while not rate_queue.empty():
183
+ try:
184
+ new_rate_data = rate_queue.get_nowait()
185
+ except queue.Empty:
186
+ break
187
+
188
+ if new_rate_data is not None:
189
+ last_known_rate_data = new_rate_data
190
+
191
+ new_records = get_logs()
192
+ if new_records:
193
+ new_logs = [{
194
+ "type": "log",
195
+ "level": "SUCCESS" if r.levelno == logging.INFO + 5 else r.levelname,
196
+ "content": r.getMessage()
197
+ } for r in new_records]
198
+ all_logs.extend(new_logs)
199
+
200
+ # Use the tracker to generate the progress update dictionary if it exists.
201
+ # If not, create a preliminary update dictionary.
202
+ update_dict = {}
203
+
204
+ if tracker:
205
+ update_dict = tracker.update(
206
+ advance=advance,
207
+ status=status,
208
+ logs=all_logs,
209
+ rate_data=last_known_rate_data
210
+ )
211
+ else:
212
+ # Initial state before the tracker is created.
213
+ update_dict = {
214
+ "type": "progress",
215
+ "logs": all_logs,
216
+ "current": 0,
217
+ "total": num_inference_steps,
218
+ "desc": "Diffusion Steps" # Description is sent once
219
+ }
220
+
221
+ # Put the update on the queue. The image payload is usually None
222
+ # until the very end.
223
+ update_queue.put((final_image_payload, update_dict))
224
+
225
+ app_logger.info(f"Using seed: {seed}")
226
+ process_and_send_updates()
227
+
228
+ app_logger.info("Starting diffusion process...")
229
+ process_and_send_updates()
230
+
231
+ tracker = ProgressTracker(total=num_inference_steps, description="Diffusion Steps", rate_unit='it/s')
232
+
233
+ def progress_callback(pipe_instance, step, timestep, callback_kwargs):
234
+ process_and_send_updates(advance=1)
235
+ return callback_kwargs
236
+
237
+ images = pipe(
238
+ prompt=prompt_style, negative_prompt=negative_prompt_style, width=1024, height=1024,
239
+ guidance_scale=3.0, num_inference_steps=num_inference_steps,
240
+ generator=generator, callback_on_step_end=progress_callback
241
+ ).images
242
+
243
+ app_logger.log(logging.INFO + 5, "Image generated successfully!")
244
+ process_and_send_updates(status="success", final_image_payload=images)
245
+
246
+
247
+ except Exception as e:
248
+ app_logger.error(f"Error in diffusion thread: {e}, process aborted!", exc_info=True)
249
+ process_and_send_updates(status="error")
250
+ finally:
251
+ update_queue.put(None)
252
+
253
+
254
+ @spaces.GPU(duration=60, enable_queue=True)
255
+ def generate(prompt):
256
+ """This function starts the worker thread and yields updates from the queue."""
257
+ yield None, None, gr.update(interactive=False)
258
+ update_queue = queue.Queue()
259
+ diffusion_thread = threading.Thread(target=run_diffusion_in_thread, args=(prompt, False, update_queue))
260
+ diffusion_thread.start()
261
+ final_images = None
262
+ while True:
263
+ update = update_queue.get()
264
+ if update is None:
265
+ break
266
+
267
+ images, log_update = update
268
+
269
+ if images:
270
+ final_images = images
271
+
272
+ yield final_images, log_update, gr.skip()
273
+
274
+ yield final_images, log_update, gr.update(interactive=True)
275
+
276
+ # --- 4. THE COMBINED GRADIO UI with TABS ---
277
+ with gr.Blocks(theme=gr.themes.Ocean()) as demo:
278
+ gr.HTML("<h1><center>LiveLog Component Showcase</center></h1>")
279
+
280
+ with gr.Tabs():
281
+ with gr.TabItem("LiveLog Feature Demo"):
282
+ gr.Markdown("### Test all features of the LiveLog component interactively.")
283
+ with gr.Row():
284
+ with gr.Column(scale=3):
285
+ feature_logger = LiveLog(
286
+ label="Process Output", line_numbers=True, height=550,
287
+ background_color="#000000", display_mode="full"
288
+ )
289
+ with gr.Column(scale=1):
290
+ with gr.Group():
291
+ gr.Markdown("### Component Properties")
292
+ display_mode_radio = gr.Radio(["full", "log", "progress"], label="Display Mode", value="full")
293
+ rate_unit = gr.Radio(["it/s","s/it"], label="Progress rate unit", value="it/s")
294
+ bg_color_picker = gr.ColorPicker(label="Background Color", value="#000000")
295
+ line_numbers_checkbox = gr.Checkbox(label="Show Line Numbers", value=True)
296
+ autoscroll_checkbox = gr.Checkbox(label="Autoscroll", value=True)
297
+ disable_console_checkbox = gr.Checkbox(label="Disable Python Console Output", value=False)
298
+ with gr.Group():
299
+ gr.Markdown("### Simulation Controls")
300
+ start_btn = gr.Button("Run Success Case", variant="primary")
301
+ error_btn = gr.Button("Run Error Case")
302
+
303
+ start_btn.click(fn=run_success_case, inputs=[disable_console_checkbox, rate_unit], outputs=feature_logger)
304
+ error_btn.click(fn=run_error_case, inputs=[disable_console_checkbox, rate_unit], outputs=feature_logger)
305
+ feature_logger.clear(fn=clear_output, inputs=None, outputs=feature_logger)
306
+ controls = [display_mode_radio, bg_color_picker, line_numbers_checkbox, autoscroll_checkbox]
307
+ for control in controls:
308
+ control.change(fn=update_livelog_properties, inputs=controls, outputs=feature_logger)
309
+
310
+ with gr.TabItem("Diffusion Pipeline Integration"):
311
+ gr.Markdown("### Use `LiveLog` to monitor a real image generation process.")
312
+ with gr.Row():
313
+ with gr.Column(scale=3):
314
+ with gr.Group():
315
+ prompt = gr.Textbox(
316
+ label="Enter your prompt", show_label=False,
317
+ placeholder="A cinematic photo of a robot in a floral garden...",
318
+ scale=8, container=False
319
+ )
320
+ run_button = gr.Button("Generate", scale=1, variant="primary")
321
+
322
+ livelog_viewer = LiveLog(
323
+ label="Process Monitor", height=250, display_mode="full", line_numbers=False
324
+ )
325
+
326
+ with gr.Column(scale=2):
327
+ result_gallery = gr.Gallery(
328
+ label="Result", columns=1, show_label=False, height=500, min_width=768, preview=True, allow_preview=True
329
+ )
330
+
331
+ run_button.click(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer, run_button])
332
+ prompt.submit(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
333
+ livelog_viewer.clear(fn=clear_output, inputs=None, outputs=[livelog_viewer])
334
+
335
+ # This ensures the model is downloaded/loaded once when the app starts.
336
+ demo.load(load_pipeline, None, None)
337
+
338
+ if __name__ == "__main__":
339
  demo.queue(max_size=50).launch(debug=True)
requirements.txt CHANGED
@@ -1,8 +1,9 @@
1
- diffusers
2
- safetensors
3
- transformers
4
- accelerate
5
- gradio_livelog>=0.0.2
6
- torch
7
- torchvision
8
- torchaudio
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu124
2
+ diffusers
3
+ safetensors
4
+ transformers
5
+ accelerate
6
+ gradio_livelog
7
+ torch==2.6.0+cu124
8
+ torchvision==0.21.0+cu124
9
+ torchaudio==2.6.0+cu124
space.py CHANGED
@@ -9,7 +9,7 @@ abs_path = os.path.join(os.path.dirname(__file__), "css.css")
9
 
10
  with gr.Blocks(
11
  css=abs_path,
12
- theme=gr.themes.Ocean(
13
  font_mono=[
14
  gr.themes.GoogleFont("Inconsolata"),
15
  "monospace",
@@ -40,9 +40,9 @@ pip install gradio_livelog
40
  ```python
41
  # demo/app.py
42
 
 
43
  import gradio as gr
44
  import torch
45
- import time
46
  import logging
47
  import random
48
  import numpy as np
@@ -54,24 +54,28 @@ import spaces
54
 
55
  # Import the component and ALL its utilities
56
  from gradio_livelog import LiveLog
57
- from gradio_livelog.utils import ProgressTracker, capture_logs
58
 
59
  # --- 1. SETUP ---
60
  MODEL_ID = "SG161222/RealVisXL_V5.0_Lightning"
61
  MAX_SEED = np.iinfo(np.int32).max
62
 
63
  # --- 2. LOGIC FOR THE "LIVELOG FEATURE DEMO" TAB ---
64
- logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
65
-
66
- async def run_process(disable_console: bool, run_error_case: bool):
67
- with capture_logs(disable_console=disable_console) as get_logs:
 
 
 
 
68
  total_steps = 100
69
- tracker = ProgressTracker(total=total_steps, description="Simulating a process...")
70
  all_logs = []
71
  last_log_content = None
72
 
73
  initial_log = f"Starting simulated process with {total_steps} steps..."
74
- logging.info(initial_log)
75
  logs = [
76
  {
77
  "type": "log",
@@ -89,14 +93,14 @@ async def run_process(disable_console: bool, run_error_case: bool):
89
  current_step = i + 1
90
 
91
  if current_step == 10:
92
- logging.warning(f"Low disk space warning at step {current_step}.")
93
  elif current_step == 30:
94
- logging.log(logging.INFO + 5, f"Asset pack loaded successfully at step {current_step}.")
95
  elif current_step == 75:
96
- logging.critical(f"Checksum mismatch! Data may be corrupt at step {current_step}.")
97
 
98
  if run_error_case and current_step == 50:
99
- logging.error("A fatal simulation error occurred! Aborting.")
100
  logs = [
101
  {
102
  "type": "log",
@@ -124,7 +128,7 @@ async def run_process(disable_console: bool, run_error_case: bool):
124
  yield tracker.update(advance=1, status="running", logs=all_logs, log_content=last_log_content)
125
 
126
  final_log = "Process completed successfully!"
127
- logging.log(logging.INFO + 5, final_log)
128
  logs = [
129
  {
130
  "type": "log",
@@ -143,47 +147,60 @@ def update_livelog_properties(mode, color, lines, scroll):
143
  def clear_output():
144
  return None
145
 
146
- async def run_success_case(disable_console: bool):
147
  yield None
148
- async for update in run_process(disable_console=disable_console, run_error_case=False):
149
  yield update
150
 
151
- async def run_error_case(disable_console: bool):
152
  yield None
153
- async for update in run_process(disable_console=disable_console, run_error_case=True):
154
  yield update
155
 
156
 
157
  # --- 3. LOGIC FOR THE "DIFFUSION PIPELINE INTEGRATION" TAB ---
158
  diffusion_pipeline = None
 
159
  def load_pipeline(on_load=True):
160
  \"\"\"A function to load the model, ensuring it's only done once.\"\"\"
161
  global diffusion_pipeline
162
- if diffusion_pipeline is None:
163
- print("Loading Stable Diffusion model for the first time...")
164
- pipe = StableDiffusionXLPipeline.from_pretrained(
165
- MODEL_ID, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False
166
- )
167
- pipe.enable_vae_tiling()
168
- pipe.enable_model_cpu_offload()
169
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
170
- pipe.set_progress_bar_config(disable=True)
171
- diffusion_pipeline = pipe
172
- print("Model loaded successfully.")
173
-
174
  if not on_load:
175
  return diffusion_pipeline
176
 
177
  @spaces.GPU(duration=60, enable_queue=True)
178
- def run_diffusion_in_thread(prompt: str, update_queue: queue.Queue):
179
  \"\"\"
180
  This function now uses capture_logs to listen to internal diffusers logs
181
  while retaining the superior data structure you designed.
182
  \"\"\"
183
  tracker = None
184
- with capture_logs() as get_logs:
185
- try:
186
  pipe = load_pipeline(on_load=False)
 
 
 
 
 
 
 
 
 
 
 
 
187
  seed = random.randint(0, MAX_SEED)
188
  generator = torch.Generator(device="cuda").manual_seed(seed)
189
  prompt_style = f"hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic"
@@ -191,67 +208,110 @@ def run_diffusion_in_thread(prompt: str, update_queue: queue.Queue):
191
  num_inference_steps = 10
192
 
193
  all_logs = []
194
- last_log_content = None
195
 
196
  # Helper function to process and store new logs
197
- def process_and_store_logs():
198
- nonlocal all_logs, last_log_content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
  new_records = get_logs()
200
  if new_records:
201
- new_logs = [{"type": "log", "level": r.levelname, "content": r.getMessage()} for r in new_records]
 
 
 
 
202
  all_logs.extend(new_logs)
203
- last_log_content = all_logs[-1]["content"]
204
-
205
- logging.info(f"Using seed: {seed}")
206
- process_and_store_logs()
207
- update_queue.put((None, {"type": "progress", "logs": all_logs, "current": 0, "total": num_inference_steps, "desc": "Diffusion Steps"}))
208
-
209
- logging.info("Starting diffusion process...")
210
- process_and_store_logs()
211
- update_queue.put((None, {"type": "progress", "logs": all_logs, "current": 0, "total": num_inference_steps, "desc": "Diffusion Steps"}))
212
-
213
- tracker = ProgressTracker(total=num_inference_steps, description="Diffusion Steps")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
 
215
  def progress_callback(pipe_instance, step, timestep, callback_kwargs):
216
- process_and_store_logs() # Check for new logs from diffusers at each step
217
- update_dict = tracker.update(logs=all_logs)
218
- update_queue.put((None, update_dict))
219
  return callback_kwargs
220
-
221
  images = pipe(
222
  prompt=prompt_style, negative_prompt=negative_prompt_style, width=1024, height=1024,
223
  guidance_scale=3.0, num_inference_steps=num_inference_steps,
224
  generator=generator, callback_on_step_end=progress_callback
225
  ).images
226
 
227
- logging.log(logging.INFO + 5, "Image generated successfully!")
228
- process_and_store_logs()
229
-
230
- final_update = tracker.update(advance=0, status="success", logs=all_logs, log_content=last_log_content)
231
- update_queue.put((images, final_update))
232
 
233
  except Exception as e:
234
- logging.error(f"Error in diffusion thread: {e}", exc_info=True)
235
- process_and_store_logs() # Capture the final error log
236
- if tracker:
237
- error_update = tracker.update(advance=0, status="error", logs=all_logs, log_content=f"An error occurred: {e}")
238
- update_queue.put((None, error_update))
239
  finally:
240
  update_queue.put(None)
241
 
 
242
  @spaces.GPU(duration=60, enable_queue=True)
243
  def generate(prompt):
244
  \"\"\"This function starts the worker thread and yields updates from the queue.\"\"\"
245
- yield None, None
246
- yield None, {"type": "log", "level": "INFO", "content": "Preparing generation..."}
247
  update_queue = queue.Queue()
248
- diffusion_thread = threading.Thread(target=run_diffusion_in_thread, args=(prompt, update_queue))
249
  diffusion_thread.start()
 
250
  while True:
251
  update = update_queue.get()
252
- if update is None: break
253
- yield update
254
-
 
 
 
 
 
 
 
 
255
 
256
  # --- 4. THE COMBINED GRADIO UI with TABS ---
257
  with gr.Blocks(theme=gr.themes.Ocean()) as demo:
@@ -270,17 +330,18 @@ with gr.Blocks(theme=gr.themes.Ocean()) as demo:
270
  with gr.Group():
271
  gr.Markdown("### Component Properties")
272
  display_mode_radio = gr.Radio(["full", "log", "progress"], label="Display Mode", value="full")
 
273
  bg_color_picker = gr.ColorPicker(label="Background Color", value="#000000")
274
  line_numbers_checkbox = gr.Checkbox(label="Show Line Numbers", value=True)
275
  autoscroll_checkbox = gr.Checkbox(label="Autoscroll", value=True)
 
276
  with gr.Group():
277
  gr.Markdown("### Simulation Controls")
278
- disable_console_checkbox = gr.Checkbox(label="Disable Python Console Output", value=False)
279
  start_btn = gr.Button("Run Success Case", variant="primary")
280
  error_btn = gr.Button("Run Error Case")
281
 
282
- start_btn.click(fn=run_success_case, inputs=[disable_console_checkbox], outputs=feature_logger)
283
- error_btn.click(fn=run_error_case, inputs=[disable_console_checkbox], outputs=feature_logger)
284
  feature_logger.clear(fn=clear_output, inputs=None, outputs=feature_logger)
285
  controls = [display_mode_radio, bg_color_picker, line_numbers_checkbox, autoscroll_checkbox]
286
  for control in controls:
@@ -307,7 +368,7 @@ with gr.Blocks(theme=gr.themes.Ocean()) as demo:
307
  label="Result", columns=1, show_label=False, height=500, min_width=768, preview=True, allow_preview=True
308
  )
309
 
310
- run_button.click(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
311
  prompt.submit(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
312
  livelog_viewer.clear(fn=clear_output, inputs=None, outputs=[livelog_viewer])
313
 
 
9
 
10
  with gr.Blocks(
11
  css=abs_path,
12
+ theme=gr.themes.Default(
13
  font_mono=[
14
  gr.themes.GoogleFont("Inconsolata"),
15
  "monospace",
 
40
  ```python
41
  # demo/app.py
42
 
43
+ import sys
44
  import gradio as gr
45
  import torch
 
46
  import logging
47
  import random
48
  import numpy as np
 
54
 
55
  # Import the component and ALL its utilities
56
  from gradio_livelog import LiveLog
57
+ from gradio_livelog.utils import ProgressTracker, Tee, TqdmToQueueWriter, capture_logs
58
 
59
  # --- 1. SETUP ---
60
  MODEL_ID = "SG161222/RealVisXL_V5.0_Lightning"
61
  MAX_SEED = np.iinfo(np.int32).max
62
 
63
  # --- 2. LOGIC FOR THE "LIVELOG FEATURE DEMO" TAB ---
64
+ app_logger = logging.getLogger("logging_app")
65
+ app_logger.setLevel(logging.INFO)
66
+ console_handler = logging.StreamHandler()
67
+ console_handler.flush = sys.stderr.flush
68
+ app_logger.addHandler(console_handler)
69
+
70
+ async def run_process(disable_console: bool, rate_unit: str, run_error_case: bool):
71
+ with capture_logs(log_level=logging.INFO, log_name=["logging_app"], disable_console=disable_console) as get_logs: #You can watch more than one log if you wish in log_name. If you do not pass log_name, the default log will be used.
72
  total_steps = 100
73
+ tracker = ProgressTracker(total=total_steps, description="Simulating a process...", rate_unit=rate_unit)
74
  all_logs = []
75
  last_log_content = None
76
 
77
  initial_log = f"Starting simulated process with {total_steps} steps..."
78
+ app_logger.info(initial_log)
79
  logs = [
80
  {
81
  "type": "log",
 
93
  current_step = i + 1
94
 
95
  if current_step == 10:
96
+ app_logger.warning(f"Low disk space warning at step {current_step}.")
97
  elif current_step == 30:
98
+ app_logger.log(logging.INFO + 5, f"Asset pack loaded successfully at step {current_step}.")
99
  elif current_step == 75:
100
+ app_logger.critical(f"Checksum mismatch! Data may be corrupt at step {current_step}.")
101
 
102
  if run_error_case and current_step == 50:
103
+ app_logger.error("A fatal simulation error occurred! Aborting.")
104
  logs = [
105
  {
106
  "type": "log",
 
128
  yield tracker.update(advance=1, status="running", logs=all_logs, log_content=last_log_content)
129
 
130
  final_log = "Process completed successfully!"
131
+ app_logger.log(logging.INFO + 5, final_log)
132
  logs = [
133
  {
134
  "type": "log",
 
147
  def clear_output():
148
  return None
149
 
150
+ async def run_success_case(disable_console: bool, rate_unit: str):
151
  yield None
152
+ async for update in run_process(disable_console=disable_console, rate_unit=rate_unit, run_error_case=False):
153
  yield update
154
 
155
+ async def run_error_case(disable_console: bool, rate_unit: str):
156
  yield None
157
+ async for update in run_process(disable_console=disable_console, rate_unit=rate_unit, run_error_case=True):
158
  yield update
159
 
160
 
161
  # --- 3. LOGIC FOR THE "DIFFUSION PIPELINE INTEGRATION" TAB ---
162
  diffusion_pipeline = None
163
+ pipeline_lock = threading.Lock()
164
  def load_pipeline(on_load=True):
165
  \"\"\"A function to load the model, ensuring it's only done once.\"\"\"
166
  global diffusion_pipeline
167
+ with pipeline_lock:
168
+ if diffusion_pipeline is None:
169
+ print("Loading Stable Diffusion model for the first time...")
170
+ pipe = StableDiffusionXLPipeline.from_pretrained(
171
+ MODEL_ID, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False
172
+ )
173
+ pipe.enable_vae_tiling()
174
+ pipe.enable_model_cpu_offload()
175
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
176
+ diffusion_pipeline = pipe
177
+ print("Model loaded successfully!")
178
+
179
  if not on_load:
180
  return diffusion_pipeline
181
 
182
  @spaces.GPU(duration=60, enable_queue=True)
183
+ def run_diffusion_in_thread(prompt: str, disable_console: bool, update_queue: queue.Queue):
184
  \"\"\"
185
  This function now uses capture_logs to listen to internal diffusers logs
186
  while retaining the superior data structure you designed.
187
  \"\"\"
188
  tracker = None
189
+ with capture_logs(log_level=logging.INFO, log_name=["logging_app"], disable_console=disable_console) as get_logs: #You can watch more than one log if you wish in log_name. If you do not pass log_name, the default log will be used.
190
+ try:
191
  pipe = load_pipeline(on_load=False)
192
+
193
+ #We will capture pipeline tqdm s/it progress instead
194
+ rate_queue = queue.Queue()
195
+ tqdm_writer = TqdmToQueueWriter(rate_queue)
196
+
197
+ progress_bar_handler = Tee(sys.stderr, tqdm_writer)
198
+ pipe.set_progress_bar_config(file=progress_bar_handler, #if you dont need to see the tqdm progress on console set file=tqdm_writer instead
199
+ disable=False,
200
+ ncols=100,
201
+ dynamic_ncols=True,
202
+ ascii=" █")
203
+
204
  seed = random.randint(0, MAX_SEED)
205
  generator = torch.Generator(device="cuda").manual_seed(seed)
206
  prompt_style = f"hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic"
 
208
  num_inference_steps = 10
209
 
210
  all_logs = []
211
+ last_known_rate_data = None
212
 
213
  # Helper function to process and store new logs
214
+ def process_and_send_updates(status="running", advance=0, final_image_payload=None):
215
+ \"\"\"
216
+ This is the core callback function. It captures new logs, formats them,
217
+ and sends a complete update object (logs + progress) to the UI queue.
218
+ This should also be called after the log record.
219
+ \"\"\"
220
+ nonlocal all_logs, last_known_rate_data
221
+ new_rate_data = None
222
+ while not rate_queue.empty():
223
+ try:
224
+ new_rate_data = rate_queue.get_nowait()
225
+ except queue.Empty:
226
+ break
227
+
228
+ if new_rate_data is not None:
229
+ last_known_rate_data = new_rate_data
230
+
231
  new_records = get_logs()
232
  if new_records:
233
+ new_logs = [{
234
+ "type": "log",
235
+ "level": "SUCCESS" if r.levelno == logging.INFO + 5 else r.levelname,
236
+ "content": r.getMessage()
237
+ } for r in new_records]
238
  all_logs.extend(new_logs)
239
+
240
+ # Use the tracker to generate the progress update dictionary if it exists.
241
+ # If not, create a preliminary update dictionary.
242
+ update_dict = {}
243
+
244
+ if tracker:
245
+ update_dict = tracker.update(
246
+ advance=advance,
247
+ status=status,
248
+ logs=all_logs,
249
+ rate_data=last_known_rate_data
250
+ )
251
+ else:
252
+ # Initial state before the tracker is created.
253
+ update_dict = {
254
+ "type": "progress",
255
+ "logs": all_logs,
256
+ "current": 0,
257
+ "total": num_inference_steps,
258
+ "desc": "Diffusion Steps" # Description is sent once
259
+ }
260
+
261
+ # Put the update on the queue. The image payload is usually None
262
+ # until the very end.
263
+ update_queue.put((final_image_payload, update_dict))
264
+
265
+ app_logger.info(f"Using seed: {seed}")
266
+ process_and_send_updates()
267
+
268
+ app_logger.info("Starting diffusion process...")
269
+ process_and_send_updates()
270
+
271
+ tracker = ProgressTracker(total=num_inference_steps, description="Diffusion Steps", rate_unit='it/s')
272
 
273
  def progress_callback(pipe_instance, step, timestep, callback_kwargs):
274
+ process_and_send_updates(advance=1)
 
 
275
  return callback_kwargs
276
+
277
  images = pipe(
278
  prompt=prompt_style, negative_prompt=negative_prompt_style, width=1024, height=1024,
279
  guidance_scale=3.0, num_inference_steps=num_inference_steps,
280
  generator=generator, callback_on_step_end=progress_callback
281
  ).images
282
 
283
+ app_logger.log(logging.INFO + 5, "Image generated successfully!")
284
+ process_and_send_updates(status="success", final_image_payload=images)
285
+
 
 
286
 
287
  except Exception as e:
288
+ app_logger.error(f"Error in diffusion thread: {e}, process aborted!", exc_info=True)
289
+ process_and_send_updates(status="error")
 
 
 
290
  finally:
291
  update_queue.put(None)
292
 
293
+
294
  @spaces.GPU(duration=60, enable_queue=True)
295
  def generate(prompt):
296
  \"\"\"This function starts the worker thread and yields updates from the queue.\"\"\"
297
+ yield None, None, gr.update(interactive=False)
 
298
  update_queue = queue.Queue()
299
+ diffusion_thread = threading.Thread(target=run_diffusion_in_thread, args=(prompt, False, update_queue))
300
  diffusion_thread.start()
301
+ final_images = None
302
  while True:
303
  update = update_queue.get()
304
+ if update is None:
305
+ break
306
+
307
+ images, log_update = update
308
+
309
+ if images:
310
+ final_images = images
311
+
312
+ yield final_images, log_update, gr.skip()
313
+
314
+ yield final_images, log_update, gr.update(interactive=True)
315
 
316
  # --- 4. THE COMBINED GRADIO UI with TABS ---
317
  with gr.Blocks(theme=gr.themes.Ocean()) as demo:
 
330
  with gr.Group():
331
  gr.Markdown("### Component Properties")
332
  display_mode_radio = gr.Radio(["full", "log", "progress"], label="Display Mode", value="full")
333
+ rate_unit = gr.Radio(["it/s","s/it"], label="Progress rate unit", value="it/s")
334
  bg_color_picker = gr.ColorPicker(label="Background Color", value="#000000")
335
  line_numbers_checkbox = gr.Checkbox(label="Show Line Numbers", value=True)
336
  autoscroll_checkbox = gr.Checkbox(label="Autoscroll", value=True)
337
+ disable_console_checkbox = gr.Checkbox(label="Disable Python Console Output", value=False)
338
  with gr.Group():
339
  gr.Markdown("### Simulation Controls")
 
340
  start_btn = gr.Button("Run Success Case", variant="primary")
341
  error_btn = gr.Button("Run Error Case")
342
 
343
+ start_btn.click(fn=run_success_case, inputs=[disable_console_checkbox, rate_unit], outputs=feature_logger)
344
+ error_btn.click(fn=run_error_case, inputs=[disable_console_checkbox, rate_unit], outputs=feature_logger)
345
  feature_logger.clear(fn=clear_output, inputs=None, outputs=feature_logger)
346
  controls = [display_mode_radio, bg_color_picker, line_numbers_checkbox, autoscroll_checkbox]
347
  for control in controls:
 
368
  label="Result", columns=1, show_label=False, height=500, min_width=768, preview=True, allow_preview=True
369
  )
370
 
371
+ run_button.click(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer, run_button])
372
  prompt.submit(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
373
  livelog_viewer.clear(fn=clear_output, inputs=None, outputs=[livelog_viewer])
374
 
src/README.md CHANGED
@@ -10,23 +10,33 @@ app_file: space.py
10
  ---
11
 
12
  # `gradio_livelog`
13
- <img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0.1%20-%20blue"> <a href="https://huggingface.co/spaces/elismasilva/gradio_livelog"><img src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Demo-blue"></a><p><span>💻 <a href='https://github.com/DEVAIEXP/gradio_component_livelog'>Component GitHub Code</a></span></p>
14
 
15
  A Live Log Component for Gradio Interface
16
 
17
  ## Key Features
18
 
19
- `LiveLog` provides a rich, terminal-like experience directly in your Gradio UI.
20
 
21
- - **Real-time Log Streaming:** Display log messages as they are generated in your Python backend, perfect for monitoring long-running tasks.
22
- - **Integrated Progress Bar:** Track the progress of loops and processes with a familiar, `tqdm`-style progress bar.
23
- - Includes iterations per second (it/s) calculation.
24
- - The bar automatically changes color to green on success or red on error for clear visual feedback.
25
- - **Automatic `logging` Capture:** Effortlessly capture all logs generated by Python's standard `logging` module (`logging.info`, `logging.error`, etc.) using the `capture_logs` context manager.
26
- - **Flexible Display Modes:** Choose to display the full component (logs + progress bar), logs only, or the progress bar only to fit different UI layouts.
27
- - **Highly Customizable:** Tailor the component's appearance with properties for background color, line numbers, and autoscrolling.
28
- - **Utility Controls:** Comes with built-in buttons to clear the log, copy all content to the clipboard, and download logs as a text file.
29
- - **Console Silencing:** Optionally suppress log output in your Python console to keep it clean while still displaying everything in the UI.
 
 
 
 
 
 
 
 
 
 
30
 
31
  ## Installation
32
 
@@ -39,9 +49,9 @@ pip install gradio_livelog
39
  ```python
40
  # demo/app.py
41
 
 
42
  import gradio as gr
43
  import torch
44
- import time
45
  import logging
46
  import random
47
  import numpy as np
@@ -53,24 +63,28 @@ import spaces
53
 
54
  # Import the component and ALL its utilities
55
  from gradio_livelog import LiveLog
56
- from gradio_livelog.utils import ProgressTracker, capture_logs
57
 
58
  # --- 1. SETUP ---
59
  MODEL_ID = "SG161222/RealVisXL_V5.0_Lightning"
60
  MAX_SEED = np.iinfo(np.int32).max
61
 
62
  # --- 2. LOGIC FOR THE "LIVELOG FEATURE DEMO" TAB ---
63
- logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
64
-
65
- async def run_process(disable_console: bool, run_error_case: bool):
66
- with capture_logs(disable_console=disable_console) as get_logs:
 
 
 
 
67
  total_steps = 100
68
- tracker = ProgressTracker(total=total_steps, description="Simulating a process...")
69
  all_logs = []
70
  last_log_content = None
71
 
72
  initial_log = f"Starting simulated process with {total_steps} steps..."
73
- logging.info(initial_log)
74
  logs = [
75
  {
76
  "type": "log",
@@ -88,14 +102,14 @@ async def run_process(disable_console: bool, run_error_case: bool):
88
  current_step = i + 1
89
 
90
  if current_step == 10:
91
- logging.warning(f"Low disk space warning at step {current_step}.")
92
  elif current_step == 30:
93
- logging.log(logging.INFO + 5, f"Asset pack loaded successfully at step {current_step}.")
94
  elif current_step == 75:
95
- logging.critical(f"Checksum mismatch! Data may be corrupt at step {current_step}.")
96
 
97
  if run_error_case and current_step == 50:
98
- logging.error("A fatal simulation error occurred! Aborting.")
99
  logs = [
100
  {
101
  "type": "log",
@@ -123,7 +137,7 @@ async def run_process(disable_console: bool, run_error_case: bool):
123
  yield tracker.update(advance=1, status="running", logs=all_logs, log_content=last_log_content)
124
 
125
  final_log = "Process completed successfully!"
126
- logging.log(logging.INFO + 5, final_log)
127
  logs = [
128
  {
129
  "type": "log",
@@ -142,47 +156,60 @@ def update_livelog_properties(mode, color, lines, scroll):
142
  def clear_output():
143
  return None
144
 
145
- async def run_success_case(disable_console: bool):
146
  yield None
147
- async for update in run_process(disable_console=disable_console, run_error_case=False):
148
  yield update
149
 
150
- async def run_error_case(disable_console: bool):
151
  yield None
152
- async for update in run_process(disable_console=disable_console, run_error_case=True):
153
  yield update
154
 
155
 
156
  # --- 3. LOGIC FOR THE "DIFFUSION PIPELINE INTEGRATION" TAB ---
157
  diffusion_pipeline = None
 
158
  def load_pipeline(on_load=True):
159
  """A function to load the model, ensuring it's only done once."""
160
  global diffusion_pipeline
161
- if diffusion_pipeline is None:
162
- print("Loading Stable Diffusion model for the first time...")
163
- pipe = StableDiffusionXLPipeline.from_pretrained(
164
- MODEL_ID, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False
165
- )
166
- pipe.enable_vae_tiling()
167
- pipe.enable_model_cpu_offload()
168
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
169
- pipe.set_progress_bar_config(disable=True)
170
- diffusion_pipeline = pipe
171
- print("Model loaded successfully.")
172
-
173
  if not on_load:
174
  return diffusion_pipeline
175
 
176
  @spaces.GPU(duration=60, enable_queue=True)
177
- def run_diffusion_in_thread(prompt: str, update_queue: queue.Queue):
178
  """
179
  This function now uses capture_logs to listen to internal diffusers logs
180
  while retaining the superior data structure you designed.
181
  """
182
  tracker = None
183
- with capture_logs() as get_logs:
184
- try:
185
  pipe = load_pipeline(on_load=False)
 
 
 
 
 
 
 
 
 
 
 
 
186
  seed = random.randint(0, MAX_SEED)
187
  generator = torch.Generator(device="cuda").manual_seed(seed)
188
  prompt_style = f"hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic"
@@ -190,67 +217,110 @@ def run_diffusion_in_thread(prompt: str, update_queue: queue.Queue):
190
  num_inference_steps = 10
191
 
192
  all_logs = []
193
- last_log_content = None
194
 
195
  # Helper function to process and store new logs
196
- def process_and_store_logs():
197
- nonlocal all_logs, last_log_content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
  new_records = get_logs()
199
  if new_records:
200
- new_logs = [{"type": "log", "level": r.levelname, "content": r.getMessage()} for r in new_records]
 
 
 
 
201
  all_logs.extend(new_logs)
202
- last_log_content = all_logs[-1]["content"]
203
-
204
- logging.info(f"Using seed: {seed}")
205
- process_and_store_logs()
206
- update_queue.put((None, {"type": "progress", "logs": all_logs, "current": 0, "total": num_inference_steps, "desc": "Diffusion Steps"}))
207
-
208
- logging.info("Starting diffusion process...")
209
- process_and_store_logs()
210
- update_queue.put((None, {"type": "progress", "logs": all_logs, "current": 0, "total": num_inference_steps, "desc": "Diffusion Steps"}))
211
-
212
- tracker = ProgressTracker(total=num_inference_steps, description="Diffusion Steps")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
 
214
  def progress_callback(pipe_instance, step, timestep, callback_kwargs):
215
- process_and_store_logs() # Check for new logs from diffusers at each step
216
- update_dict = tracker.update(logs=all_logs)
217
- update_queue.put((None, update_dict))
218
  return callback_kwargs
219
-
220
  images = pipe(
221
  prompt=prompt_style, negative_prompt=negative_prompt_style, width=1024, height=1024,
222
  guidance_scale=3.0, num_inference_steps=num_inference_steps,
223
  generator=generator, callback_on_step_end=progress_callback
224
  ).images
225
 
226
- logging.log(logging.INFO + 5, "Image generated successfully!")
227
- process_and_store_logs()
228
-
229
- final_update = tracker.update(advance=0, status="success", logs=all_logs, log_content=last_log_content)
230
- update_queue.put((images, final_update))
231
 
232
  except Exception as e:
233
- logging.error(f"Error in diffusion thread: {e}", exc_info=True)
234
- process_and_store_logs() # Capture the final error log
235
- if tracker:
236
- error_update = tracker.update(advance=0, status="error", logs=all_logs, log_content=f"An error occurred: {e}")
237
- update_queue.put((None, error_update))
238
  finally:
239
  update_queue.put(None)
240
 
 
241
  @spaces.GPU(duration=60, enable_queue=True)
242
  def generate(prompt):
243
  """This function starts the worker thread and yields updates from the queue."""
244
- yield None, None
245
- yield None, {"type": "log", "level": "INFO", "content": "Preparing generation..."}
246
  update_queue = queue.Queue()
247
- diffusion_thread = threading.Thread(target=run_diffusion_in_thread, args=(prompt, update_queue))
248
  diffusion_thread.start()
 
249
  while True:
250
  update = update_queue.get()
251
- if update is None: break
252
- yield update
253
-
 
 
 
 
 
 
 
 
254
 
255
  # --- 4. THE COMBINED GRADIO UI with TABS ---
256
  with gr.Blocks(theme=gr.themes.Ocean()) as demo:
@@ -269,17 +339,18 @@ with gr.Blocks(theme=gr.themes.Ocean()) as demo:
269
  with gr.Group():
270
  gr.Markdown("### Component Properties")
271
  display_mode_radio = gr.Radio(["full", "log", "progress"], label="Display Mode", value="full")
 
272
  bg_color_picker = gr.ColorPicker(label="Background Color", value="#000000")
273
  line_numbers_checkbox = gr.Checkbox(label="Show Line Numbers", value=True)
274
  autoscroll_checkbox = gr.Checkbox(label="Autoscroll", value=True)
 
275
  with gr.Group():
276
  gr.Markdown("### Simulation Controls")
277
- disable_console_checkbox = gr.Checkbox(label="Disable Python Console Output", value=False)
278
  start_btn = gr.Button("Run Success Case", variant="primary")
279
  error_btn = gr.Button("Run Error Case")
280
 
281
- start_btn.click(fn=run_success_case, inputs=[disable_console_checkbox], outputs=feature_logger)
282
- error_btn.click(fn=run_error_case, inputs=[disable_console_checkbox], outputs=feature_logger)
283
  feature_logger.clear(fn=clear_output, inputs=None, outputs=feature_logger)
284
  controls = [display_mode_radio, bg_color_picker, line_numbers_checkbox, autoscroll_checkbox]
285
  for control in controls:
@@ -306,7 +377,7 @@ with gr.Blocks(theme=gr.themes.Ocean()) as demo:
306
  label="Result", columns=1, show_label=False, height=500, min_width=768, preview=True, allow_preview=True
307
  )
308
 
309
- run_button.click(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
310
  prompt.submit(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
311
  livelog_viewer.clear(fn=clear_output, inputs=None, outputs=[livelog_viewer])
312
 
 
10
  ---
11
 
12
  # `gradio_livelog`
13
+ <img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0.3%20-%20blue"> <a href="https://huggingface.co/spaces/elismasilva/gradio_livelog"><img src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Demo-blue"></a><p><span>💻 <a href='https://github.com/DEVAIEXP/gradio_component_livelog'>Component GitHub Code</a></span></p>
14
 
15
  A Live Log Component for Gradio Interface
16
 
17
  ## Key Features
18
 
19
+ `LiveLog` elevates Gradio applications by providing a powerful, terminal-like monitoring experience directly in your UI. It's designed for both simple progress tracking and complex pipeline introspection.
20
 
21
+ - **Dual-Mode Progress Tracking:** A sophisticated progress bar that operates in two modes for maximum accuracy:
22
+ - **Internal Rate Calculation:** For simple loops, it features a built-in, `tqdm`-style progress calculator with **Exponential Moving Average (EMA)** smoothing for a stable and realistic `it/s` or `s/it` display.
23
+ - **External `tqdm` Capture:** For deep integration, it can **directly capture and display the *exact* rate** from an existing `tqdm` instance running inside a backend library (like `diffusers`). This eliminates measurement overhead and provides a perfectly synchronized view of your pipeline's true performance.
24
+
25
+ - **Rich, Real-time Log Streaming:** Display log messages as they are generated.
26
+ - Supports standard Python log levels (`INFO`, `WARNING`, `ERROR`, etc.) with corresponding colors.
27
+ - Includes support for **custom log levels** (like "SUCCESS") for enhanced visual feedback.
28
+
29
+ - **Advanced Multi-Logger Capture:** The `capture_logs` utility is designed for complex applications.
30
+ - Effortlessly capture logs from **multiple, independent Python loggers** simultaneously (e.g., your app's logger and a library's internal logger).
31
+ - Correctly handles logger hierarchies and propagation, making it robust for any logging setup.
32
+
33
+ - **Flexible Display & Layout Control:** Adapt the component to any UI layout.
34
+ - **Three Display Modes:** Show the full component (`logs + progress`), `logs only`, or `progress bar only`.
35
+ - Highly customizable appearance with properties for `height`, `background_color`, `line_numbers`, and `autoscrolling`.
36
+
37
+ - **Comprehensive Utility Controls:**
38
+ - Built-in header buttons to **Clear**, **Copy**, and **Download** log content.
39
+ - Optionally suppress log output in your Python console to keep it clean while still displaying everything in the UI.
40
 
41
  ## Installation
42
 
 
49
  ```python
50
  # demo/app.py
51
 
52
+ import sys
53
  import gradio as gr
54
  import torch
 
55
  import logging
56
  import random
57
  import numpy as np
 
63
 
64
  # Import the component and ALL its utilities
65
  from gradio_livelog import LiveLog
66
+ from gradio_livelog.utils import ProgressTracker, Tee, TqdmToQueueWriter, capture_logs
67
 
68
  # --- 1. SETUP ---
69
  MODEL_ID = "SG161222/RealVisXL_V5.0_Lightning"
70
  MAX_SEED = np.iinfo(np.int32).max
71
 
72
  # --- 2. LOGIC FOR THE "LIVELOG FEATURE DEMO" TAB ---
73
+ app_logger = logging.getLogger("logging_app")
74
+ app_logger.setLevel(logging.INFO)
75
+ console_handler = logging.StreamHandler()
76
+ console_handler.flush = sys.stderr.flush
77
+ app_logger.addHandler(console_handler)
78
+
79
+ async def run_process(disable_console: bool, rate_unit: str, run_error_case: bool):
80
+ with capture_logs(log_level=logging.INFO, log_name=["logging_app"], disable_console=disable_console) as get_logs: #You can watch more than one log if you wish in log_name. If you do not pass log_name, the default log will be used.
81
  total_steps = 100
82
+ tracker = ProgressTracker(total=total_steps, description="Simulating a process...", rate_unit=rate_unit)
83
  all_logs = []
84
  last_log_content = None
85
 
86
  initial_log = f"Starting simulated process with {total_steps} steps..."
87
+ app_logger.info(initial_log)
88
  logs = [
89
  {
90
  "type": "log",
 
102
  current_step = i + 1
103
 
104
  if current_step == 10:
105
+ app_logger.warning(f"Low disk space warning at step {current_step}.")
106
  elif current_step == 30:
107
+ app_logger.log(logging.INFO + 5, f"Asset pack loaded successfully at step {current_step}.")
108
  elif current_step == 75:
109
+ app_logger.critical(f"Checksum mismatch! Data may be corrupt at step {current_step}.")
110
 
111
  if run_error_case and current_step == 50:
112
+ app_logger.error("A fatal simulation error occurred! Aborting.")
113
  logs = [
114
  {
115
  "type": "log",
 
137
  yield tracker.update(advance=1, status="running", logs=all_logs, log_content=last_log_content)
138
 
139
  final_log = "Process completed successfully!"
140
+ app_logger.log(logging.INFO + 5, final_log)
141
  logs = [
142
  {
143
  "type": "log",
 
156
  def clear_output():
157
  return None
158
 
159
+ async def run_success_case(disable_console: bool, rate_unit: str):
160
  yield None
161
+ async for update in run_process(disable_console=disable_console, rate_unit=rate_unit, run_error_case=False):
162
  yield update
163
 
164
+ async def run_error_case(disable_console: bool, rate_unit: str):
165
  yield None
166
+ async for update in run_process(disable_console=disable_console, rate_unit=rate_unit, run_error_case=True):
167
  yield update
168
 
169
 
170
  # --- 3. LOGIC FOR THE "DIFFUSION PIPELINE INTEGRATION" TAB ---
171
  diffusion_pipeline = None
172
+ pipeline_lock = threading.Lock()
173
  def load_pipeline(on_load=True):
174
  """A function to load the model, ensuring it's only done once."""
175
  global diffusion_pipeline
176
+ with pipeline_lock:
177
+ if diffusion_pipeline is None:
178
+ print("Loading Stable Diffusion model for the first time...")
179
+ pipe = StableDiffusionXLPipeline.from_pretrained(
180
+ MODEL_ID, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False
181
+ )
182
+ pipe.enable_vae_tiling()
183
+ pipe.enable_model_cpu_offload()
184
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
185
+ diffusion_pipeline = pipe
186
+ print("Model loaded successfully!")
187
+
188
  if not on_load:
189
  return diffusion_pipeline
190
 
191
  @spaces.GPU(duration=60, enable_queue=True)
192
+ def run_diffusion_in_thread(prompt: str, disable_console: bool, update_queue: queue.Queue):
193
  """
194
  This function now uses capture_logs to listen to internal diffusers logs
195
  while retaining the superior data structure you designed.
196
  """
197
  tracker = None
198
+ with capture_logs(log_level=logging.INFO, log_name=["logging_app"], disable_console=disable_console) as get_logs: #You can watch more than one log if you wish in log_name. If you do not pass log_name, the default log will be used.
199
+ try:
200
  pipe = load_pipeline(on_load=False)
201
+
202
+ #We will capture pipeline tqdm s/it progress instead
203
+ rate_queue = queue.Queue()
204
+ tqdm_writer = TqdmToQueueWriter(rate_queue)
205
+
206
+ progress_bar_handler = Tee(sys.stderr, tqdm_writer)
207
+ pipe.set_progress_bar_config(file=progress_bar_handler, #if you dont need to see the tqdm progress on console set file=tqdm_writer instead
208
+ disable=False,
209
+ ncols=100,
210
+ dynamic_ncols=True,
211
+ ascii=" █")
212
+
213
  seed = random.randint(0, MAX_SEED)
214
  generator = torch.Generator(device="cuda").manual_seed(seed)
215
  prompt_style = f"hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic"
 
217
  num_inference_steps = 10
218
 
219
  all_logs = []
220
+ last_known_rate_data = None
221
 
222
  # Helper function to process and store new logs
223
+ def process_and_send_updates(status="running", advance=0, final_image_payload=None):
224
+ """
225
+ This is the core callback function. It captures new logs, formats them,
226
+ and sends a complete update object (logs + progress) to the UI queue.
227
+ This should also be called after the log record.
228
+ """
229
+ nonlocal all_logs, last_known_rate_data
230
+ new_rate_data = None
231
+ while not rate_queue.empty():
232
+ try:
233
+ new_rate_data = rate_queue.get_nowait()
234
+ except queue.Empty:
235
+ break
236
+
237
+ if new_rate_data is not None:
238
+ last_known_rate_data = new_rate_data
239
+
240
  new_records = get_logs()
241
  if new_records:
242
+ new_logs = [{
243
+ "type": "log",
244
+ "level": "SUCCESS" if r.levelno == logging.INFO + 5 else r.levelname,
245
+ "content": r.getMessage()
246
+ } for r in new_records]
247
  all_logs.extend(new_logs)
248
+
249
+ # Use the tracker to generate the progress update dictionary if it exists.
250
+ # If not, create a preliminary update dictionary.
251
+ update_dict = {}
252
+
253
+ if tracker:
254
+ update_dict = tracker.update(
255
+ advance=advance,
256
+ status=status,
257
+ logs=all_logs,
258
+ rate_data=last_known_rate_data
259
+ )
260
+ else:
261
+ # Initial state before the tracker is created.
262
+ update_dict = {
263
+ "type": "progress",
264
+ "logs": all_logs,
265
+ "current": 0,
266
+ "total": num_inference_steps,
267
+ "desc": "Diffusion Steps" # Description is sent once
268
+ }
269
+
270
+ # Put the update on the queue. The image payload is usually None
271
+ # until the very end.
272
+ update_queue.put((final_image_payload, update_dict))
273
+
274
+ app_logger.info(f"Using seed: {seed}")
275
+ process_and_send_updates()
276
+
277
+ app_logger.info("Starting diffusion process...")
278
+ process_and_send_updates()
279
+
280
+ tracker = ProgressTracker(total=num_inference_steps, description="Diffusion Steps", rate_unit='it/s')
281
 
282
  def progress_callback(pipe_instance, step, timestep, callback_kwargs):
283
+ process_and_send_updates(advance=1)
 
 
284
  return callback_kwargs
285
+
286
  images = pipe(
287
  prompt=prompt_style, negative_prompt=negative_prompt_style, width=1024, height=1024,
288
  guidance_scale=3.0, num_inference_steps=num_inference_steps,
289
  generator=generator, callback_on_step_end=progress_callback
290
  ).images
291
 
292
+ app_logger.log(logging.INFO + 5, "Image generated successfully!")
293
+ process_and_send_updates(status="success", final_image_payload=images)
294
+
 
 
295
 
296
  except Exception as e:
297
+ app_logger.error(f"Error in diffusion thread: {e}, process aborted!", exc_info=True)
298
+ process_and_send_updates(status="error")
 
 
 
299
  finally:
300
  update_queue.put(None)
301
 
302
+
303
  @spaces.GPU(duration=60, enable_queue=True)
304
  def generate(prompt):
305
  """This function starts the worker thread and yields updates from the queue."""
306
+ yield None, None, gr.update(interactive=False)
 
307
  update_queue = queue.Queue()
308
+ diffusion_thread = threading.Thread(target=run_diffusion_in_thread, args=(prompt, False, update_queue))
309
  diffusion_thread.start()
310
+ final_images = None
311
  while True:
312
  update = update_queue.get()
313
+ if update is None:
314
+ break
315
+
316
+ images, log_update = update
317
+
318
+ if images:
319
+ final_images = images
320
+
321
+ yield final_images, log_update, gr.skip()
322
+
323
+ yield final_images, log_update, gr.update(interactive=True)
324
 
325
  # --- 4. THE COMBINED GRADIO UI with TABS ---
326
  with gr.Blocks(theme=gr.themes.Ocean()) as demo:
 
339
  with gr.Group():
340
  gr.Markdown("### Component Properties")
341
  display_mode_radio = gr.Radio(["full", "log", "progress"], label="Display Mode", value="full")
342
+ rate_unit = gr.Radio(["it/s","s/it"], label="Progress rate unit", value="it/s")
343
  bg_color_picker = gr.ColorPicker(label="Background Color", value="#000000")
344
  line_numbers_checkbox = gr.Checkbox(label="Show Line Numbers", value=True)
345
  autoscroll_checkbox = gr.Checkbox(label="Autoscroll", value=True)
346
+ disable_console_checkbox = gr.Checkbox(label="Disable Python Console Output", value=False)
347
  with gr.Group():
348
  gr.Markdown("### Simulation Controls")
 
349
  start_btn = gr.Button("Run Success Case", variant="primary")
350
  error_btn = gr.Button("Run Error Case")
351
 
352
+ start_btn.click(fn=run_success_case, inputs=[disable_console_checkbox, rate_unit], outputs=feature_logger)
353
+ error_btn.click(fn=run_error_case, inputs=[disable_console_checkbox, rate_unit], outputs=feature_logger)
354
  feature_logger.clear(fn=clear_output, inputs=None, outputs=feature_logger)
355
  controls = [display_mode_radio, bg_color_picker, line_numbers_checkbox, autoscroll_checkbox]
356
  for control in controls:
 
377
  label="Result", columns=1, show_label=False, height=500, min_width=768, preview=True, allow_preview=True
378
  )
379
 
380
+ run_button.click(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer, run_button])
381
  prompt.submit(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
382
  livelog_viewer.clear(fn=clear_output, inputs=None, outputs=[livelog_viewer])
383
 
src/backend/gradio_livelog/templates/component/index.js CHANGED
The diff for this file is too large to render. See raw diff
 
src/backend/gradio_livelog/templates/component/style.css CHANGED
@@ -1 +1 @@
1
- .block.svelte-239wnu{position:relative;margin:0;box-shadow:var(--block-shadow);border-width:var(--block-border-width);border-color:var(--block-border-color);border-radius:var(--block-radius);background:var(--block-background-fill);width:100%;line-height:var(--line-sm)}.block.fullscreen.svelte-239wnu{border-radius:0}.auto-margin.svelte-239wnu{margin-left:auto;margin-right:auto}.block.border_focus.svelte-239wnu{border-color:var(--color-accent)}.block.border_contrast.svelte-239wnu{border-color:var(--body-text-color)}.padded.svelte-239wnu{padding:var(--block-padding)}.hidden.svelte-239wnu{display:none}.flex.svelte-239wnu{display:flex;flex-direction:column}.hide-container.svelte-239wnu:not(.fullscreen){margin:0;box-shadow:none;--block-border-width:0;background:transparent;padding:0;overflow:visible}.resize-handle.svelte-239wnu{position:absolute;bottom:0;right:0;width:10px;height:10px;fill:var(--block-border-color);cursor:nwse-resize}.fullscreen.svelte-239wnu{position:fixed;top:0;left:0;width:100vw;height:100vh;z-index:1000;overflow:auto}.animating.svelte-239wnu{animation:svelte-239wnu-pop-out .1s ease-out forwards}@keyframes svelte-239wnu-pop-out{0%{position:fixed;top:var(--start-top);left:var(--start-left);width:var(--start-width);height:var(--start-height);z-index:100}to{position:fixed;top:0vh;left:0vw;width:100vw;height:100vh;z-index:1000}}.placeholder.svelte-239wnu{border-radius:var(--block-radius);border-width:var(--block-border-width);border-color:var(--block-border-color);border-style:dashed}Tables */ table,tr,td,th{margin-top:var(--spacing-sm);margin-bottom:var(--spacing-sm);padding:var(--spacing-xl)}.md code,.md pre{background:none;font-family:var(--font-mono);font-size:var(--text-sm);text-align:left;white-space:pre;word-spacing:normal;word-break:normal;word-wrap:normal;line-height:1.5;-moz-tab-size:2;tab-size:2;-webkit-hyphens:none;hyphens:none}.md pre[class*=language-]::selection,.md pre[class*=language-] ::selection,.md code[class*=language-]::selection,.md code[class*=language-] ::selection{text-shadow:none;background:#b3d4fc}.md pre{padding:1em;margin:.5em 0;overflow:auto;position:relative;margin-top:var(--spacing-sm);margin-bottom:var(--spacing-sm);box-shadow:none;border:none;border-radius:var(--radius-md);background:var(--code-background-fill);padding:var(--spacing-xxl);font-family:var(--font-mono);text-shadow:none;border-radius:var(--radius-sm);white-space:nowrap;display:block;white-space:pre}.md :not(pre)>code{padding:.1em;border-radius:var(--radius-xs);white-space:normal;background:var(--code-background-fill);border:1px solid var(--panel-border-color);padding:var(--spacing-xxs) var(--spacing-xs)}.md .token.comment,.md .token.prolog,.md .token.doctype,.md .token.cdata{color:#708090}.md .token.punctuation{color:#999}.md .token.namespace{opacity:.7}.md .token.property,.md .token.tag,.md .token.boolean,.md .token.number,.md .token.constant,.md .token.symbol,.md .token.deleted{color:#905}.md .token.selector,.md .token.attr-name,.md .token.string,.md .token.char,.md .token.builtin,.md .token.inserted{color:#690}.md .token.atrule,.md .token.attr-value,.md .token.keyword{color:#07a}.md .token.function,.md .token.class-name{color:#dd4a68}.md .token.regex,.md .token.important,.md .token.variable{color:#e90}.md .token.important,.md .token.bold{font-weight:700}.md .token.italic{font-style:italic}.md .token.entity{cursor:help}.dark .md .token.comment,.dark .md .token.prolog,.dark .md .token.cdata{color:#5c6370}.dark .md .token.doctype,.dark .md .token.punctuation,.dark .md .token.entity{color:#abb2bf}.dark .md .token.attr-name,.dark .md .token.class-name,.dark .md .token.boolean,.dark .md .token.constant,.dark .md .token.number,.dark .md .token.atrule{color:#d19a66}.dark .md .token.keyword{color:#c678dd}.dark .md .token.property,.dark .md .token.tag,.dark .md .token.symbol,.dark .md .token.deleted,.dark .md .token.important{color:#e06c75}.dark .md .token.selector,.dark .md .token.string,.dark .md .token.char,.dark .md .token.builtin,.dark .md .token.inserted,.dark .md .token.regex,.dark .md .token.attr-value,.dark .md .token.attr-value>.token.punctuation{color:#98c379}.dark .md .token.variable,.dark .md .token.operator,.dark .md .token.function{color:#61afef}.dark .md .token.url{color:#56b6c2}span.svelte-1m32c2s div[class*=code_wrap]{position:relative}span.svelte-1m32c2s span.katex{font-size:var(--text-lg);direction:ltr}span.svelte-1m32c2s div[class*=code_wrap]>button{z-index:1;cursor:pointer;border-bottom-left-radius:var(--radius-sm);padding:var(--spacing-md);width:25px;height:25px;position:absolute;right:0}span.svelte-1m32c2s .check{opacity:0;z-index:var(--layer-top);transition:opacity .2s;background:var(--code-background-fill);color:var(--body-text-color);position:absolute;top:var(--size-1-5);left:var(--size-1-5)}span.svelte-1m32c2s p:not(:first-child){margin-top:var(--spacing-xxl)}span.svelte-1m32c2s .md-header-anchor{margin-left:-25px;padding-right:8px;line-height:1;color:var(--body-text-color-subdued);opacity:0}span.svelte-1m32c2s h1:hover .md-header-anchor,span.svelte-1m32c2s h2:hover .md-header-anchor,span.svelte-1m32c2s h3:hover .md-header-anchor,span.svelte-1m32c2s h4:hover .md-header-anchor,span.svelte-1m32c2s h5:hover .md-header-anchor,span.svelte-1m32c2s h6:hover .md-header-anchor{opacity:1}span.md.svelte-1m32c2s .md-header-anchor>svg{color:var(--body-text-color-subdued)}span.svelte-1m32c2s table{word-break:break-word}div.svelte-17qq50w>.md.prose{font-weight:var(--block-info-text-weight);font-size:var(--block-info-text-size);line-height:var(--line-sm)}div.svelte-17qq50w>.md.prose *{color:var(--block-info-text-color)}div.svelte-17qq50w{margin-bottom:var(--spacing-md)}span.has-info.svelte-zgrq3{margin-bottom:var(--spacing-xs)}span.svelte-zgrq3:not(.has-info){margin-bottom:var(--spacing-lg)}span.svelte-zgrq3{display:inline-block;position:relative;z-index:var(--layer-4);border:solid var(--block-title-border-width) var(--block-title-border-color);border-radius:var(--block-title-radius);background:var(--block-title-background-fill);padding:var(--block-title-padding);color:var(--block-title-text-color);font-weight:var(--block-title-text-weight);font-size:var(--block-title-text-size);line-height:var(--line-sm)}span[dir=rtl].svelte-zgrq3{display:block}.hide.svelte-zgrq3{margin:0;height:0}label.svelte-13ao5pu.svelte-13ao5pu{display:inline-flex;align-items:center;z-index:var(--layer-2);box-shadow:var(--block-label-shadow);border:var(--block-label-border-width) solid var(--block-label-border-color);border-top:none;border-left:none;border-radius:var(--block-label-radius);background:var(--block-label-background-fill);padding:var(--block-label-padding);pointer-events:none;color:var(--block-label-text-color);font-weight:var(--block-label-text-weight);font-size:var(--block-label-text-size);line-height:var(--line-sm)}.gr-group label.svelte-13ao5pu.svelte-13ao5pu{border-top-left-radius:0}label.float.svelte-13ao5pu.svelte-13ao5pu{position:absolute;top:var(--block-label-margin);left:var(--block-label-margin)}label.svelte-13ao5pu.svelte-13ao5pu:not(.float){position:static;margin-top:var(--block-label-margin);margin-left:var(--block-label-margin)}.hide.svelte-13ao5pu.svelte-13ao5pu{height:0}span.svelte-13ao5pu.svelte-13ao5pu{opacity:.8;margin-right:var(--size-2);width:calc(var(--block-label-text-size) - 1px);height:calc(var(--block-label-text-size) - 1px)}.hide-label.svelte-13ao5pu.svelte-13ao5pu{box-shadow:none;border-width:0;background:transparent;overflow:visible}label[dir=rtl].svelte-13ao5pu.svelte-13ao5pu{border:var(--block-label-border-width) solid var(--block-label-border-color);border-top:none;border-right:none;border-bottom-left-radius:var(--block-radius);border-bottom-right-radius:var(--block-label-radius);border-top-left-radius:var(--block-label-radius)}label[dir=rtl].svelte-13ao5pu span.svelte-13ao5pu{margin-left:var(--size-2);margin-right:0}button.svelte-qgco6m{display:flex;justify-content:center;align-items:center;gap:1px;z-index:var(--layer-2);border-radius:var(--radius-xs);color:var(--block-label-text-color);border:1px solid transparent;padding:var(--spacing-xxs)}button.svelte-qgco6m:hover{background-color:var(--background-fill-secondary)}button[disabled].svelte-qgco6m{opacity:.5;box-shadow:none}button[disabled].svelte-qgco6m:hover{cursor:not-allowed}.padded.svelte-qgco6m{background:var(--bg-color)}button.svelte-qgco6m:hover,button.highlight.svelte-qgco6m{cursor:pointer;color:var(--color-accent)}.padded.svelte-qgco6m:hover{color:var(--block-label-text-color)}span.svelte-qgco6m{padding:0 1px;font-size:10px}div.svelte-qgco6m{display:flex;align-items:center;justify-content:center;transition:filter .2s ease-in-out}.x-small.svelte-qgco6m{width:10px;height:10px}.small.svelte-qgco6m{width:14px;height:14px}.medium.svelte-qgco6m{width:20px;height:20px}.large.svelte-qgco6m{width:22px;height:22px}.pending.svelte-qgco6m{animation:svelte-qgco6m-flash .5s infinite}@keyframes svelte-qgco6m-flash{0%{opacity:.5}50%{opacity:1}to{opacity:.5}}.transparent.svelte-qgco6m{background:transparent;border:none;box-shadow:none}.empty.svelte-3w3rth{display:flex;justify-content:center;align-items:center;margin-top:calc(0px - var(--size-6));height:var(--size-full)}.icon.svelte-3w3rth{opacity:.5;height:var(--size-5);color:var(--body-text-color)}.small.svelte-3w3rth{min-height:calc(var(--size-32) - 20px)}.large.svelte-3w3rth{min-height:calc(var(--size-64) - 20px)}.unpadded_box.svelte-3w3rth{margin-top:0}.small_parent.svelte-3w3rth{min-height:100%!important}.dropdown-arrow.svelte-145leq6,.dropdown-arrow.svelte-ihhdbf{fill:currentColor}.circle.svelte-ihhdbf{fill:currentColor;opacity:.1}svg.svelte-pb9pol{animation:svelte-pb9pol-spin 1.5s linear infinite}@keyframes svelte-pb9pol-spin{0%{transform:rotate(0)}to{transform:rotate(360deg)}}h2.svelte-1xg7h5n{font-size:var(--text-xl)!important}p.svelte-1xg7h5n,h2.svelte-1xg7h5n{white-space:pre-line}.wrap.svelte-1xg7h5n{display:flex;flex-direction:column;justify-content:center;align-items:center;min-height:var(--size-60);color:var(--block-label-text-color);line-height:var(--line-md);height:100%;padding-top:var(--size-3);text-align:center;margin:auto var(--spacing-lg)}.or.svelte-1xg7h5n{color:var(--body-text-color-subdued);display:flex}.icon-wrap.svelte-1xg7h5n{width:30px;margin-bottom:var(--spacing-lg)}@media (--screen-md){.wrap.svelte-1xg7h5n{font-size:var(--text-lg)}}.hovered.svelte-1xg7h5n{color:var(--color-accent)}div.svelte-q32hvf{border-top:1px solid transparent;display:flex;max-height:100%;justify-content:center;align-items:center;gap:var(--spacing-sm);height:auto;align-items:flex-end;color:var(--block-label-text-color);flex-shrink:0}.show_border.svelte-q32hvf{border-top:1px solid var(--block-border-color);margin-top:var(--spacing-xxl);box-shadow:var(--shadow-drop)}.source-selection.svelte-15ls1gu{display:flex;align-items:center;justify-content:center;border-top:1px solid var(--border-color-primary);width:100%;margin-left:auto;margin-right:auto;height:var(--size-10)}.icon.svelte-15ls1gu{width:22px;height:22px;margin:var(--spacing-lg) var(--spacing-xs);padding:var(--spacing-xs);color:var(--neutral-400);border-radius:var(--radius-md)}.selected.svelte-15ls1gu{color:var(--color-accent)}.icon.svelte-15ls1gu:hover,.icon.svelte-15ls1gu:focus{color:var(--color-accent)}.icon-button-wrapper.svelte-109se4{display:flex;flex-direction:row;align-items:center;justify-content:center;z-index:var(--layer-3);gap:var(--spacing-sm);box-shadow:var(--shadow-drop);border:1px solid var(--border-color-primary);background:var(--block-background-fill);padding:var(--spacing-xxs)}.icon-button-wrapper.hide-top-corner.svelte-109se4{border-top:none;border-right:none;border-radius:var(--block-label-right-radius)}.icon-button-wrapper.display-top-corner.svelte-109se4{border-radius:var(--radius-sm) 0 0 var(--radius-sm);top:var(--spacing-sm);right:-1px}.icon-button-wrapper.svelte-109se4:not(.top-panel){border:1px solid var(--border-color-primary);border-radius:var(--radius-sm)}.top-panel.svelte-109se4{position:absolute;top:var(--block-label-margin);right:var(--block-label-margin);margin:0}.icon-button-wrapper.svelte-109se4 button{margin:var(--spacing-xxs);border-radius:var(--radius-xs);position:relative}.icon-button-wrapper.svelte-109se4 a.download-link:not(:last-child),.icon-button-wrapper.svelte-109se4 button:not(:last-child){margin-right:var(--spacing-xxs)}.icon-button-wrapper.svelte-109se4 a.download-link:not(:last-child):not(.no-border *):after,.icon-button-wrapper.svelte-109se4 button:not(:last-child):not(.no-border *):after{content:"";position:absolute;right:-4.5px;top:15%;height:70%;width:1px;background-color:var(--border-color-primary)}.icon-button-wrapper.svelte-109se4>*{height:100%}svg.svelte-43sxxs.svelte-43sxxs{width:var(--size-20);height:var(--size-20)}svg.svelte-43sxxs path.svelte-43sxxs{fill:var(--loader-color)}div.svelte-43sxxs.svelte-43sxxs{z-index:var(--layer-2)}.margin.svelte-43sxxs.svelte-43sxxs{margin:var(--size-4)}.wrap.svelte-17v219f.svelte-17v219f{display:flex;flex-direction:column;justify-content:center;align-items:center;z-index:var(--layer-2);transition:opacity .1s ease-in-out;border-radius:var(--block-radius);background:var(--block-background-fill);padding:0 var(--size-6);max-height:var(--size-screen-h);overflow:hidden}.wrap.center.svelte-17v219f.svelte-17v219f{top:0;right:0;left:0}.wrap.default.svelte-17v219f.svelte-17v219f{top:0;right:0;bottom:0;left:0}.hide.svelte-17v219f.svelte-17v219f{opacity:0;pointer-events:none}.generating.svelte-17v219f.svelte-17v219f{animation:svelte-17v219f-pulseStart 1s cubic-bezier(.4,0,.6,1),svelte-17v219f-pulse 2s cubic-bezier(.4,0,.6,1) 1s infinite;border:2px solid var(--color-accent);background:transparent;z-index:var(--layer-1);pointer-events:none}.translucent.svelte-17v219f.svelte-17v219f{background:none}@keyframes svelte-17v219f-pulseStart{0%{opacity:0}to{opacity:1}}@keyframes svelte-17v219f-pulse{0%,to{opacity:1}50%{opacity:.5}}.loading.svelte-17v219f.svelte-17v219f{z-index:var(--layer-2);color:var(--body-text-color)}.eta-bar.svelte-17v219f.svelte-17v219f{position:absolute;top:0;right:0;bottom:0;left:0;transform-origin:left;opacity:.8;z-index:var(--layer-1);transition:10ms;background:var(--background-fill-secondary)}.progress-bar-wrap.svelte-17v219f.svelte-17v219f{border:1px solid var(--border-color-primary);background:var(--background-fill-primary);width:55.5%;height:var(--size-4)}.progress-bar.svelte-17v219f.svelte-17v219f{transform-origin:left;background-color:var(--loader-color);width:var(--size-full);height:var(--size-full)}.progress-level.svelte-17v219f.svelte-17v219f{display:flex;flex-direction:column;align-items:center;gap:1;z-index:var(--layer-2);width:var(--size-full)}.progress-level-inner.svelte-17v219f.svelte-17v219f{margin:var(--size-2) auto;color:var(--body-text-color);font-size:var(--text-sm);font-family:var(--font-mono)}.meta-text.svelte-17v219f.svelte-17v219f{position:absolute;bottom:0;right:0;z-index:var(--layer-2);padding:var(--size-1) var(--size-2);font-size:var(--text-sm);font-family:var(--font-mono)}.meta-text-center.svelte-17v219f.svelte-17v219f{display:flex;position:absolute;top:0;right:0;justify-content:center;align-items:center;transform:translateY(var(--size-6));z-index:var(--layer-2);padding:var(--size-1) var(--size-2);font-size:var(--text-sm);font-family:var(--font-mono);text-align:center}.error.svelte-17v219f.svelte-17v219f{box-shadow:var(--shadow-drop);border:solid 1px var(--error-border-color);border-radius:var(--radius-full);background:var(--error-background-fill);padding-right:var(--size-4);padding-left:var(--size-4);color:var(--error-text-color);font-weight:var(--weight-semibold);font-size:var(--text-lg);line-height:var(--line-lg);font-family:var(--font)}.minimal.svelte-17v219f.svelte-17v219f{pointer-events:none}.minimal.svelte-17v219f .progress-text.svelte-17v219f{background:var(--block-background-fill)}.border.svelte-17v219f.svelte-17v219f{border:1px solid var(--border-color-primary)}.clear-status.svelte-17v219f.svelte-17v219f{position:absolute;display:flex;top:var(--size-2);right:var(--size-2);justify-content:flex-end;gap:var(--spacing-sm);z-index:var(--layer-1)}.toast-body.svelte-syezpc{display:flex;position:relative;right:0;left:0;align-items:center;margin:var(--size-6) var(--size-4);margin:auto;border-radius:var(--container-radius);overflow:hidden;pointer-events:auto}.toast-body.error.svelte-syezpc{border:1px solid var(--color-red-700);background:var(--color-red-50)}.dark .toast-body.error.svelte-syezpc{border:1px solid var(--color-red-500);background-color:var(--color-grey-950)}.toast-body.warning.svelte-syezpc{border:1px solid var(--color-yellow-700);background:var(--color-yellow-50)}.dark .toast-body.warning.svelte-syezpc{border:1px solid var(--color-yellow-500);background-color:var(--color-grey-950)}.toast-body.info.svelte-syezpc{border:1px solid var(--color-grey-700);background:var(--color-grey-50)}.dark .toast-body.info.svelte-syezpc{border:1px solid var(--color-grey-500);background-color:var(--color-grey-950)}.toast-body.success.svelte-syezpc{border:1px solid var(--color-green-700);background:var(--color-green-50)}.dark .toast-body.success.svelte-syezpc{border:1px solid var(--color-green-500);background-color:var(--color-grey-950)}.toast-title.svelte-syezpc{display:flex;align-items:center;font-weight:var(--weight-bold);font-size:var(--text-lg);line-height:var(--line-sm)}.toast-title.error.svelte-syezpc{color:var(--color-red-700)}.dark .toast-title.error.svelte-syezpc{color:var(--color-red-50)}.toast-title.warning.svelte-syezpc{color:var(--color-yellow-700)}.dark .toast-title.warning.svelte-syezpc{color:var(--color-yellow-50)}.toast-title.info.svelte-syezpc{color:var(--color-grey-700)}.dark .toast-title.info.svelte-syezpc{color:var(--color-grey-50)}.toast-title.success.svelte-syezpc{color:var(--color-green-700)}.dark .toast-title.success.svelte-syezpc{color:var(--color-green-50)}.toast-close.svelte-syezpc{margin:0 var(--size-3);border-radius:var(--size-3);padding:0px var(--size-1-5);font-size:var(--size-5);line-height:var(--size-5)}.toast-close.error.svelte-syezpc{color:var(--color-red-700)}.dark .toast-close.error.svelte-syezpc{color:var(--color-red-500)}.toast-close.warning.svelte-syezpc{color:var(--color-yellow-700)}.dark .toast-close.warning.svelte-syezpc{color:var(--color-yellow-500)}.toast-close.info.svelte-syezpc{color:var(--color-grey-700)}.dark .toast-close.info.svelte-syezpc{color:var(--color-grey-500)}.toast-close.success.svelte-syezpc{color:var(--color-green-700)}.dark .toast-close.success.svelte-syezpc{color:var(--color-green-500)}.toast-text.svelte-syezpc{font-size:var(--text-lg);word-wrap:break-word;overflow-wrap:break-word;word-break:break-word}.toast-text.error.svelte-syezpc{color:var(--color-red-700)}.dark .toast-text.error.svelte-syezpc{color:var(--color-red-50)}.toast-text.warning.svelte-syezpc{color:var(--color-yellow-700)}.dark .toast-text.warning.svelte-syezpc{color:var(--color-yellow-50)}.toast-text.info.svelte-syezpc{color:var(--color-grey-700)}.dark .toast-text.info.svelte-syezpc{color:var(--color-grey-50)}.toast-text.success.svelte-syezpc{color:var(--color-green-700)}.dark .toast-text.success.svelte-syezpc{color:var(--color-green-50)}.toast-details.svelte-syezpc{margin:var(--size-3) var(--size-3) var(--size-3) 0;width:100%}.toast-icon.svelte-syezpc{display:flex;position:absolute;position:relative;flex-shrink:0;justify-content:center;align-items:center;margin:var(--size-2);border-radius:var(--radius-full);padding:var(--size-1);padding-left:calc(var(--size-1) - 1px);width:35px;height:35px}.toast-icon.error.svelte-syezpc{color:var(--color-red-700)}.dark .toast-icon.error.svelte-syezpc{color:var(--color-red-500)}.toast-icon.warning.svelte-syezpc{color:var(--color-yellow-700)}.dark .toast-icon.warning.svelte-syezpc{color:var(--color-yellow-500)}.toast-icon.info.svelte-syezpc{color:var(--color-grey-700)}.dark .toast-icon.info.svelte-syezpc{color:var(--color-grey-500)}.toast-icon.success.svelte-syezpc{color:var(--color-green-700)}.dark .toast-icon.success.svelte-syezpc{color:var(--color-green-500)}@keyframes svelte-syezpc-countdown{0%{transform:scaleX(1)}to{transform:scaleX(0)}}.timer.svelte-syezpc{position:absolute;bottom:0;left:0;transform-origin:0 0;animation:svelte-syezpc-countdown 10s linear forwards;width:100%;height:var(--size-1)}.timer.error.svelte-syezpc{background:var(--color-red-700)}.dark .timer.error.svelte-syezpc{background:var(--color-red-500)}.timer.warning.svelte-syezpc{background:var(--color-yellow-700)}.dark .timer.warning.svelte-syezpc{background:var(--color-yellow-500)}.timer.info.svelte-syezpc{background:var(--color-grey-700)}.dark .timer.info.svelte-syezpc{background:var(--color-grey-500)}.timer.success.svelte-syezpc{background:var(--color-green-700)}.dark .timer.success.svelte-syezpc{background:var(--color-green-500)}.hidden.svelte-syezpc{display:none}.toast-text.svelte-syezpc a{text-decoration:underline}.toast-wrap.svelte-gatr8h{display:flex;position:fixed;top:var(--size-4);right:var(--size-4);flex-direction:column;align-items:end;gap:var(--size-2);z-index:var(--layer-top);width:calc(100% - var(--size-8))}@media (--screen-sm){.toast-wrap.svelte-gatr8h{width:calc(var(--size-96) + var(--size-10))}}.streaming-bar.svelte-ga0jj6{position:absolute;bottom:0;left:0;right:0;height:4px;background-color:var(--primary-600);animation:svelte-ga0jj6-countdown linear forwards;z-index:1}@keyframes svelte-ga0jj6-countdown{0%{transform:translate(0)}to{transform:translate(-100%)}}.unstyled-link.svelte-151nsdd{all:unset;cursor:pointer}.panel-container.svelte-fg67bo{display:flex;flex-direction:column;border:1px solid var(--border-color-primary);border-radius:0!important;background-color:var(--background-fill-primary);overflow:hidden}.log-view-container.svelte-fg67bo{display:flex;flex-direction:column;flex-grow:1;min-height:0}.header.svelte-fg67bo{border-bottom:1px solid var(--border-color-primary);background-color:var(--background-fill-secondary);display:flex;justify-content:flex-end;flex-shrink:0}.log-panel.svelte-fg67bo{flex-grow:1;font-family:var(--font-mono, monospace);font-size:var(--text-sm);overflow-y:auto;color:#f8f8f8}.log-line.svelte-fg67bo{display:flex}.line-number.svelte-fg67bo{opacity:.6;padding-right:var(--spacing-lg);-webkit-user-select:none;user-select:none;text-align:right;min-width:3ch}.log-content.svelte-fg67bo{margin:0;padding-left:5px;white-space:pre-wrap;word-break:break-word}.log-level-info.svelte-fg67bo{color:inherit}.log-level-debug.svelte-fg67bo{color:#888}.log-level-warning.svelte-fg67bo{color:#facc15}.log-level-error.svelte-fg67bo{color:#ef4444}.log-level-critical.svelte-fg67bo{background-color:#ef4444;color:#fff;font-weight:700;padding:0 .25rem}.log-level-success.svelte-fg67bo{color:#22c55e}.progress-container.svelte-fg67bo{padding:var(--spacing-sm) var(--spacing-md);border-top:1px solid var(--border-color-primary);background:var(--background-fill-secondary)}.progress-label-top.svelte-fg67bo,.progress-label-bottom.svelte-fg67bo{display:flex;justify-content:space-between;font-size:var(--text-sm);color:var(--body-text-color-subdued)}.progress-label-top.svelte-fg67bo{margin-bottom:var(--spacing-xs)}.progress-label-bottom.svelte-fg67bo{margin-top:var(--spacing-xs)}.progress-bar-background.svelte-fg67bo{width:100%;height:8px;background-color:var(--background-fill-primary);border-radius:var(--radius-full);overflow:hidden}.progress-bar-fill.svelte-fg67bo{height:100%;background-color:var(--color-accent);border-radius:var(--radius-full);transition:width .1s linear,background-color .3s ease}.progress-bar-fill.success.svelte-fg67bo{background-color:var(--color-success, #22c55e)}.progress-bar-fill.error.svelte-fg67bo{background-color:var(--color-error, #ef4444)}.block-label-wrapper.svelte-10ojysx{padding-bottom:24px}
 
1
+ .block.svelte-239wnu{position:relative;margin:0;box-shadow:var(--block-shadow);border-width:var(--block-border-width);border-color:var(--block-border-color);border-radius:var(--block-radius);background:var(--block-background-fill);width:100%;line-height:var(--line-sm)}.block.fullscreen.svelte-239wnu{border-radius:0}.auto-margin.svelte-239wnu{margin-left:auto;margin-right:auto}.block.border_focus.svelte-239wnu{border-color:var(--color-accent)}.block.border_contrast.svelte-239wnu{border-color:var(--body-text-color)}.padded.svelte-239wnu{padding:var(--block-padding)}.hidden.svelte-239wnu{display:none}.flex.svelte-239wnu{display:flex;flex-direction:column}.hide-container.svelte-239wnu:not(.fullscreen){margin:0;box-shadow:none;--block-border-width:0;background:transparent;padding:0;overflow:visible}.resize-handle.svelte-239wnu{position:absolute;bottom:0;right:0;width:10px;height:10px;fill:var(--block-border-color);cursor:nwse-resize}.fullscreen.svelte-239wnu{position:fixed;top:0;left:0;width:100vw;height:100vh;z-index:1000;overflow:auto}.animating.svelte-239wnu{animation:svelte-239wnu-pop-out .1s ease-out forwards}@keyframes svelte-239wnu-pop-out{0%{position:fixed;top:var(--start-top);left:var(--start-left);width:var(--start-width);height:var(--start-height);z-index:100}to{position:fixed;top:0vh;left:0vw;width:100vw;height:100vh;z-index:1000}}.placeholder.svelte-239wnu{border-radius:var(--block-radius);border-width:var(--block-border-width);border-color:var(--block-border-color);border-style:dashed}Tables */ table,tr,td,th{margin-top:var(--spacing-sm);margin-bottom:var(--spacing-sm);padding:var(--spacing-xl)}.md code,.md pre{background:none;font-family:var(--font-mono);font-size:var(--text-sm);text-align:left;white-space:pre;word-spacing:normal;word-break:normal;word-wrap:normal;line-height:1.5;-moz-tab-size:2;tab-size:2;-webkit-hyphens:none;hyphens:none}.md pre[class*=language-]::selection,.md pre[class*=language-] ::selection,.md code[class*=language-]::selection,.md code[class*=language-] ::selection{text-shadow:none;background:#b3d4fc}.md pre{padding:1em;margin:.5em 0;overflow:auto;position:relative;margin-top:var(--spacing-sm);margin-bottom:var(--spacing-sm);box-shadow:none;border:none;border-radius:var(--radius-md);background:var(--code-background-fill);padding:var(--spacing-xxl);font-family:var(--font-mono);text-shadow:none;border-radius:var(--radius-sm);white-space:nowrap;display:block;white-space:pre}.md :not(pre)>code{padding:.1em;border-radius:var(--radius-xs);white-space:normal;background:var(--code-background-fill);border:1px solid var(--panel-border-color);padding:var(--spacing-xxs) var(--spacing-xs)}.md .token.comment,.md .token.prolog,.md .token.doctype,.md .token.cdata{color:#708090}.md .token.punctuation{color:#999}.md .token.namespace{opacity:.7}.md .token.property,.md .token.tag,.md .token.boolean,.md .token.number,.md .token.constant,.md .token.symbol,.md .token.deleted{color:#905}.md .token.selector,.md .token.attr-name,.md .token.string,.md .token.char,.md .token.builtin,.md .token.inserted{color:#690}.md .token.atrule,.md .token.attr-value,.md .token.keyword{color:#07a}.md .token.function,.md .token.class-name{color:#dd4a68}.md .token.regex,.md .token.important,.md .token.variable{color:#e90}.md .token.important,.md .token.bold{font-weight:700}.md .token.italic{font-style:italic}.md .token.entity{cursor:help}.dark .md .token.comment,.dark .md .token.prolog,.dark .md .token.cdata{color:#5c6370}.dark .md .token.doctype,.dark .md .token.punctuation,.dark .md .token.entity{color:#abb2bf}.dark .md .token.attr-name,.dark .md .token.class-name,.dark .md .token.boolean,.dark .md .token.constant,.dark .md .token.number,.dark .md .token.atrule{color:#d19a66}.dark .md .token.keyword{color:#c678dd}.dark .md .token.property,.dark .md .token.tag,.dark .md .token.symbol,.dark .md .token.deleted,.dark .md .token.important{color:#e06c75}.dark .md .token.selector,.dark .md .token.string,.dark .md .token.char,.dark .md .token.builtin,.dark .md .token.inserted,.dark .md .token.regex,.dark .md .token.attr-value,.dark .md .token.attr-value>.token.punctuation{color:#98c379}.dark .md .token.variable,.dark .md .token.operator,.dark .md .token.function{color:#61afef}.dark .md .token.url{color:#56b6c2}span.svelte-1m32c2s div[class*=code_wrap]{position:relative}span.svelte-1m32c2s span.katex{font-size:var(--text-lg);direction:ltr}span.svelte-1m32c2s div[class*=code_wrap]>button{z-index:1;cursor:pointer;border-bottom-left-radius:var(--radius-sm);padding:var(--spacing-md);width:25px;height:25px;position:absolute;right:0}span.svelte-1m32c2s .check{opacity:0;z-index:var(--layer-top);transition:opacity .2s;background:var(--code-background-fill);color:var(--body-text-color);position:absolute;top:var(--size-1-5);left:var(--size-1-5)}span.svelte-1m32c2s p:not(:first-child){margin-top:var(--spacing-xxl)}span.svelte-1m32c2s .md-header-anchor{margin-left:-25px;padding-right:8px;line-height:1;color:var(--body-text-color-subdued);opacity:0}span.svelte-1m32c2s h1:hover .md-header-anchor,span.svelte-1m32c2s h2:hover .md-header-anchor,span.svelte-1m32c2s h3:hover .md-header-anchor,span.svelte-1m32c2s h4:hover .md-header-anchor,span.svelte-1m32c2s h5:hover .md-header-anchor,span.svelte-1m32c2s h6:hover .md-header-anchor{opacity:1}span.md.svelte-1m32c2s .md-header-anchor>svg{color:var(--body-text-color-subdued)}span.svelte-1m32c2s table{word-break:break-word}div.svelte-17qq50w>.md.prose{font-weight:var(--block-info-text-weight);font-size:var(--block-info-text-size);line-height:var(--line-sm)}div.svelte-17qq50w>.md.prose *{color:var(--block-info-text-color)}div.svelte-17qq50w{margin-bottom:var(--spacing-md)}span.has-info.svelte-zgrq3{margin-bottom:var(--spacing-xs)}span.svelte-zgrq3:not(.has-info){margin-bottom:var(--spacing-lg)}span.svelte-zgrq3{display:inline-block;position:relative;z-index:var(--layer-4);border:solid var(--block-title-border-width) var(--block-title-border-color);border-radius:var(--block-title-radius);background:var(--block-title-background-fill);padding:var(--block-title-padding);color:var(--block-title-text-color);font-weight:var(--block-title-text-weight);font-size:var(--block-title-text-size);line-height:var(--line-sm)}span[dir=rtl].svelte-zgrq3{display:block}.hide.svelte-zgrq3{margin:0;height:0}label.svelte-13ao5pu.svelte-13ao5pu{display:inline-flex;align-items:center;z-index:var(--layer-2);box-shadow:var(--block-label-shadow);border:var(--block-label-border-width) solid var(--block-label-border-color);border-top:none;border-left:none;border-radius:var(--block-label-radius);background:var(--block-label-background-fill);padding:var(--block-label-padding);pointer-events:none;color:var(--block-label-text-color);font-weight:var(--block-label-text-weight);font-size:var(--block-label-text-size);line-height:var(--line-sm)}.gr-group label.svelte-13ao5pu.svelte-13ao5pu{border-top-left-radius:0}label.float.svelte-13ao5pu.svelte-13ao5pu{position:absolute;top:var(--block-label-margin);left:var(--block-label-margin)}label.svelte-13ao5pu.svelte-13ao5pu:not(.float){position:static;margin-top:var(--block-label-margin);margin-left:var(--block-label-margin)}.hide.svelte-13ao5pu.svelte-13ao5pu{height:0}span.svelte-13ao5pu.svelte-13ao5pu{opacity:.8;margin-right:var(--size-2);width:calc(var(--block-label-text-size) - 1px);height:calc(var(--block-label-text-size) - 1px)}.hide-label.svelte-13ao5pu.svelte-13ao5pu{box-shadow:none;border-width:0;background:transparent;overflow:visible}label[dir=rtl].svelte-13ao5pu.svelte-13ao5pu{border:var(--block-label-border-width) solid var(--block-label-border-color);border-top:none;border-right:none;border-bottom-left-radius:var(--block-radius);border-bottom-right-radius:var(--block-label-radius);border-top-left-radius:var(--block-label-radius)}label[dir=rtl].svelte-13ao5pu span.svelte-13ao5pu{margin-left:var(--size-2);margin-right:0}button.svelte-qgco6m{display:flex;justify-content:center;align-items:center;gap:1px;z-index:var(--layer-2);border-radius:var(--radius-xs);color:var(--block-label-text-color);border:1px solid transparent;padding:var(--spacing-xxs)}button.svelte-qgco6m:hover{background-color:var(--background-fill-secondary)}button[disabled].svelte-qgco6m{opacity:.5;box-shadow:none}button[disabled].svelte-qgco6m:hover{cursor:not-allowed}.padded.svelte-qgco6m{background:var(--bg-color)}button.svelte-qgco6m:hover,button.highlight.svelte-qgco6m{cursor:pointer;color:var(--color-accent)}.padded.svelte-qgco6m:hover{color:var(--block-label-text-color)}span.svelte-qgco6m{padding:0 1px;font-size:10px}div.svelte-qgco6m{display:flex;align-items:center;justify-content:center;transition:filter .2s ease-in-out}.x-small.svelte-qgco6m{width:10px;height:10px}.small.svelte-qgco6m{width:14px;height:14px}.medium.svelte-qgco6m{width:20px;height:20px}.large.svelte-qgco6m{width:22px;height:22px}.pending.svelte-qgco6m{animation:svelte-qgco6m-flash .5s infinite}@keyframes svelte-qgco6m-flash{0%{opacity:.5}50%{opacity:1}to{opacity:.5}}.transparent.svelte-qgco6m{background:transparent;border:none;box-shadow:none}.empty.svelte-3w3rth{display:flex;justify-content:center;align-items:center;margin-top:calc(0px - var(--size-6));height:var(--size-full)}.icon.svelte-3w3rth{opacity:.5;height:var(--size-5);color:var(--body-text-color)}.small.svelte-3w3rth{min-height:calc(var(--size-32) - 20px)}.large.svelte-3w3rth{min-height:calc(var(--size-64) - 20px)}.unpadded_box.svelte-3w3rth{margin-top:0}.small_parent.svelte-3w3rth{min-height:100%!important}.dropdown-arrow.svelte-145leq6,.dropdown-arrow.svelte-ihhdbf{fill:currentColor}.circle.svelte-ihhdbf{fill:currentColor;opacity:.1}svg.svelte-pb9pol{animation:svelte-pb9pol-spin 1.5s linear infinite}@keyframes svelte-pb9pol-spin{0%{transform:rotate(0)}to{transform:rotate(360deg)}}h2.svelte-1xg7h5n{font-size:var(--text-xl)!important}p.svelte-1xg7h5n,h2.svelte-1xg7h5n{white-space:pre-line}.wrap.svelte-1xg7h5n{display:flex;flex-direction:column;justify-content:center;align-items:center;min-height:var(--size-60);color:var(--block-label-text-color);line-height:var(--line-md);height:100%;padding-top:var(--size-3);text-align:center;margin:auto var(--spacing-lg)}.or.svelte-1xg7h5n{color:var(--body-text-color-subdued);display:flex}.icon-wrap.svelte-1xg7h5n{width:30px;margin-bottom:var(--spacing-lg)}@media (--screen-md){.wrap.svelte-1xg7h5n{font-size:var(--text-lg)}}.hovered.svelte-1xg7h5n{color:var(--color-accent)}div.svelte-q32hvf{border-top:1px solid transparent;display:flex;max-height:100%;justify-content:center;align-items:center;gap:var(--spacing-sm);height:auto;align-items:flex-end;color:var(--block-label-text-color);flex-shrink:0}.show_border.svelte-q32hvf{border-top:1px solid var(--block-border-color);margin-top:var(--spacing-xxl);box-shadow:var(--shadow-drop)}.source-selection.svelte-15ls1gu{display:flex;align-items:center;justify-content:center;border-top:1px solid var(--border-color-primary);width:100%;margin-left:auto;margin-right:auto;height:var(--size-10)}.icon.svelte-15ls1gu{width:22px;height:22px;margin:var(--spacing-lg) var(--spacing-xs);padding:var(--spacing-xs);color:var(--neutral-400);border-radius:var(--radius-md)}.selected.svelte-15ls1gu{color:var(--color-accent)}.icon.svelte-15ls1gu:hover,.icon.svelte-15ls1gu:focus{color:var(--color-accent)}.icon-button-wrapper.svelte-109se4{display:flex;flex-direction:row;align-items:center;justify-content:center;z-index:var(--layer-3);gap:var(--spacing-sm);box-shadow:var(--shadow-drop);border:1px solid var(--border-color-primary);background:var(--block-background-fill);padding:var(--spacing-xxs)}.icon-button-wrapper.hide-top-corner.svelte-109se4{border-top:none;border-right:none;border-radius:var(--block-label-right-radius)}.icon-button-wrapper.display-top-corner.svelte-109se4{border-radius:var(--radius-sm) 0 0 var(--radius-sm);top:var(--spacing-sm);right:-1px}.icon-button-wrapper.svelte-109se4:not(.top-panel){border:1px solid var(--border-color-primary);border-radius:var(--radius-sm)}.top-panel.svelte-109se4{position:absolute;top:var(--block-label-margin);right:var(--block-label-margin);margin:0}.icon-button-wrapper.svelte-109se4 button{margin:var(--spacing-xxs);border-radius:var(--radius-xs);position:relative}.icon-button-wrapper.svelte-109se4 a.download-link:not(:last-child),.icon-button-wrapper.svelte-109se4 button:not(:last-child){margin-right:var(--spacing-xxs)}.icon-button-wrapper.svelte-109se4 a.download-link:not(:last-child):not(.no-border *):after,.icon-button-wrapper.svelte-109se4 button:not(:last-child):not(.no-border *):after{content:"";position:absolute;right:-4.5px;top:15%;height:70%;width:1px;background-color:var(--border-color-primary)}.icon-button-wrapper.svelte-109se4>*{height:100%}svg.svelte-43sxxs.svelte-43sxxs{width:var(--size-20);height:var(--size-20)}svg.svelte-43sxxs path.svelte-43sxxs{fill:var(--loader-color)}div.svelte-43sxxs.svelte-43sxxs{z-index:var(--layer-2)}.margin.svelte-43sxxs.svelte-43sxxs{margin:var(--size-4)}.wrap.svelte-17v219f.svelte-17v219f{display:flex;flex-direction:column;justify-content:center;align-items:center;z-index:var(--layer-2);transition:opacity .1s ease-in-out;border-radius:var(--block-radius);background:var(--block-background-fill);padding:0 var(--size-6);max-height:var(--size-screen-h);overflow:hidden}.wrap.center.svelte-17v219f.svelte-17v219f{top:0;right:0;left:0}.wrap.default.svelte-17v219f.svelte-17v219f{top:0;right:0;bottom:0;left:0}.hide.svelte-17v219f.svelte-17v219f{opacity:0;pointer-events:none}.generating.svelte-17v219f.svelte-17v219f{animation:svelte-17v219f-pulseStart 1s cubic-bezier(.4,0,.6,1),svelte-17v219f-pulse 2s cubic-bezier(.4,0,.6,1) 1s infinite;border:2px solid var(--color-accent);background:transparent;z-index:var(--layer-1);pointer-events:none}.translucent.svelte-17v219f.svelte-17v219f{background:none}@keyframes svelte-17v219f-pulseStart{0%{opacity:0}to{opacity:1}}@keyframes svelte-17v219f-pulse{0%,to{opacity:1}50%{opacity:.5}}.loading.svelte-17v219f.svelte-17v219f{z-index:var(--layer-2);color:var(--body-text-color)}.eta-bar.svelte-17v219f.svelte-17v219f{position:absolute;top:0;right:0;bottom:0;left:0;transform-origin:left;opacity:.8;z-index:var(--layer-1);transition:10ms;background:var(--background-fill-secondary)}.progress-bar-wrap.svelte-17v219f.svelte-17v219f{border:1px solid var(--border-color-primary);background:var(--background-fill-primary);width:55.5%;height:var(--size-4)}.progress-bar.svelte-17v219f.svelte-17v219f{transform-origin:left;background-color:var(--loader-color);width:var(--size-full);height:var(--size-full)}.progress-level.svelte-17v219f.svelte-17v219f{display:flex;flex-direction:column;align-items:center;gap:1;z-index:var(--layer-2);width:var(--size-full)}.progress-level-inner.svelte-17v219f.svelte-17v219f{margin:var(--size-2) auto;color:var(--body-text-color);font-size:var(--text-sm);font-family:var(--font-mono)}.meta-text.svelte-17v219f.svelte-17v219f{position:absolute;bottom:0;right:0;z-index:var(--layer-2);padding:var(--size-1) var(--size-2);font-size:var(--text-sm);font-family:var(--font-mono)}.meta-text-center.svelte-17v219f.svelte-17v219f{display:flex;position:absolute;top:0;right:0;justify-content:center;align-items:center;transform:translateY(var(--size-6));z-index:var(--layer-2);padding:var(--size-1) var(--size-2);font-size:var(--text-sm);font-family:var(--font-mono);text-align:center}.error.svelte-17v219f.svelte-17v219f{box-shadow:var(--shadow-drop);border:solid 1px var(--error-border-color);border-radius:var(--radius-full);background:var(--error-background-fill);padding-right:var(--size-4);padding-left:var(--size-4);color:var(--error-text-color);font-weight:var(--weight-semibold);font-size:var(--text-lg);line-height:var(--line-lg);font-family:var(--font)}.minimal.svelte-17v219f.svelte-17v219f{pointer-events:none}.minimal.svelte-17v219f .progress-text.svelte-17v219f{background:var(--block-background-fill)}.border.svelte-17v219f.svelte-17v219f{border:1px solid var(--border-color-primary)}.clear-status.svelte-17v219f.svelte-17v219f{position:absolute;display:flex;top:var(--size-2);right:var(--size-2);justify-content:flex-end;gap:var(--spacing-sm);z-index:var(--layer-1)}.toast-body.svelte-syezpc{display:flex;position:relative;right:0;left:0;align-items:center;margin:var(--size-6) var(--size-4);margin:auto;border-radius:var(--container-radius);overflow:hidden;pointer-events:auto}.toast-body.error.svelte-syezpc{border:1px solid var(--color-red-700);background:var(--color-red-50)}.dark .toast-body.error.svelte-syezpc{border:1px solid var(--color-red-500);background-color:var(--color-grey-950)}.toast-body.warning.svelte-syezpc{border:1px solid var(--color-yellow-700);background:var(--color-yellow-50)}.dark .toast-body.warning.svelte-syezpc{border:1px solid var(--color-yellow-500);background-color:var(--color-grey-950)}.toast-body.info.svelte-syezpc{border:1px solid var(--color-grey-700);background:var(--color-grey-50)}.dark .toast-body.info.svelte-syezpc{border:1px solid var(--color-grey-500);background-color:var(--color-grey-950)}.toast-body.success.svelte-syezpc{border:1px solid var(--color-green-700);background:var(--color-green-50)}.dark .toast-body.success.svelte-syezpc{border:1px solid var(--color-green-500);background-color:var(--color-grey-950)}.toast-title.svelte-syezpc{display:flex;align-items:center;font-weight:var(--weight-bold);font-size:var(--text-lg);line-height:var(--line-sm)}.toast-title.error.svelte-syezpc{color:var(--color-red-700)}.dark .toast-title.error.svelte-syezpc{color:var(--color-red-50)}.toast-title.warning.svelte-syezpc{color:var(--color-yellow-700)}.dark .toast-title.warning.svelte-syezpc{color:var(--color-yellow-50)}.toast-title.info.svelte-syezpc{color:var(--color-grey-700)}.dark .toast-title.info.svelte-syezpc{color:var(--color-grey-50)}.toast-title.success.svelte-syezpc{color:var(--color-green-700)}.dark .toast-title.success.svelte-syezpc{color:var(--color-green-50)}.toast-close.svelte-syezpc{margin:0 var(--size-3);border-radius:var(--size-3);padding:0px var(--size-1-5);font-size:var(--size-5);line-height:var(--size-5)}.toast-close.error.svelte-syezpc{color:var(--color-red-700)}.dark .toast-close.error.svelte-syezpc{color:var(--color-red-500)}.toast-close.warning.svelte-syezpc{color:var(--color-yellow-700)}.dark .toast-close.warning.svelte-syezpc{color:var(--color-yellow-500)}.toast-close.info.svelte-syezpc{color:var(--color-grey-700)}.dark .toast-close.info.svelte-syezpc{color:var(--color-grey-500)}.toast-close.success.svelte-syezpc{color:var(--color-green-700)}.dark .toast-close.success.svelte-syezpc{color:var(--color-green-500)}.toast-text.svelte-syezpc{font-size:var(--text-lg);word-wrap:break-word;overflow-wrap:break-word;word-break:break-word}.toast-text.error.svelte-syezpc{color:var(--color-red-700)}.dark .toast-text.error.svelte-syezpc{color:var(--color-red-50)}.toast-text.warning.svelte-syezpc{color:var(--color-yellow-700)}.dark .toast-text.warning.svelte-syezpc{color:var(--color-yellow-50)}.toast-text.info.svelte-syezpc{color:var(--color-grey-700)}.dark .toast-text.info.svelte-syezpc{color:var(--color-grey-50)}.toast-text.success.svelte-syezpc{color:var(--color-green-700)}.dark .toast-text.success.svelte-syezpc{color:var(--color-green-50)}.toast-details.svelte-syezpc{margin:var(--size-3) var(--size-3) var(--size-3) 0;width:100%}.toast-icon.svelte-syezpc{display:flex;position:absolute;position:relative;flex-shrink:0;justify-content:center;align-items:center;margin:var(--size-2);border-radius:var(--radius-full);padding:var(--size-1);padding-left:calc(var(--size-1) - 1px);width:35px;height:35px}.toast-icon.error.svelte-syezpc{color:var(--color-red-700)}.dark .toast-icon.error.svelte-syezpc{color:var(--color-red-500)}.toast-icon.warning.svelte-syezpc{color:var(--color-yellow-700)}.dark .toast-icon.warning.svelte-syezpc{color:var(--color-yellow-500)}.toast-icon.info.svelte-syezpc{color:var(--color-grey-700)}.dark .toast-icon.info.svelte-syezpc{color:var(--color-grey-500)}.toast-icon.success.svelte-syezpc{color:var(--color-green-700)}.dark .toast-icon.success.svelte-syezpc{color:var(--color-green-500)}@keyframes svelte-syezpc-countdown{0%{transform:scaleX(1)}to{transform:scaleX(0)}}.timer.svelte-syezpc{position:absolute;bottom:0;left:0;transform-origin:0 0;animation:svelte-syezpc-countdown 10s linear forwards;width:100%;height:var(--size-1)}.timer.error.svelte-syezpc{background:var(--color-red-700)}.dark .timer.error.svelte-syezpc{background:var(--color-red-500)}.timer.warning.svelte-syezpc{background:var(--color-yellow-700)}.dark .timer.warning.svelte-syezpc{background:var(--color-yellow-500)}.timer.info.svelte-syezpc{background:var(--color-grey-700)}.dark .timer.info.svelte-syezpc{background:var(--color-grey-500)}.timer.success.svelte-syezpc{background:var(--color-green-700)}.dark .timer.success.svelte-syezpc{background:var(--color-green-500)}.hidden.svelte-syezpc{display:none}.toast-text.svelte-syezpc a{text-decoration:underline}.toast-wrap.svelte-gatr8h{display:flex;position:fixed;top:var(--size-4);right:var(--size-4);flex-direction:column;align-items:end;gap:var(--size-2);z-index:var(--layer-top);width:calc(100% - var(--size-8))}@media (--screen-sm){.toast-wrap.svelte-gatr8h{width:calc(var(--size-96) + var(--size-10))}}.streaming-bar.svelte-ga0jj6{position:absolute;bottom:0;left:0;right:0;height:4px;background-color:var(--primary-600);animation:svelte-ga0jj6-countdown linear forwards;z-index:1}@keyframes svelte-ga0jj6-countdown{0%{transform:translate(0)}to{transform:translate(-100%)}}.unstyled-link.svelte-151nsdd{all:unset;cursor:pointer}.panel-container.svelte-1gf2ex4{display:flex;flex-direction:column;border:1px solid var(--border-color-primary);border-radius:0!important;background-color:var(--background-fill-primary);overflow:hidden}.log-view-container.svelte-1gf2ex4{display:flex;flex-direction:column;flex-grow:1;min-height:0}.header.svelte-1gf2ex4{border-bottom:1px solid var(--border-color-primary);background-color:var(--background-fill-secondary);display:flex;justify-content:flex-end;flex-shrink:0}.log-panel.svelte-1gf2ex4{flex-grow:1;font-family:var(--font-mono, monospace);font-size:var(--text-sm);overflow-y:auto;color:#f8f8f8}.log-line.svelte-1gf2ex4{display:flex}.line-number.svelte-1gf2ex4{opacity:.6;padding-right:var(--spacing-lg);-webkit-user-select:none;user-select:none;text-align:right;min-width:3ch}.log-content.svelte-1gf2ex4{margin:0;padding-left:5px;white-space:pre-wrap;word-break:break-word}.log-level-info.svelte-1gf2ex4{color:inherit}.log-level-debug.svelte-1gf2ex4{color:#888}.log-level-warning.svelte-1gf2ex4{color:#facc15}.log-level-error.svelte-1gf2ex4{color:#ef4444}.log-level-critical.svelte-1gf2ex4{background-color:#ef4444;color:#fff;font-weight:700;padding:0 .25rem}.log-level-success.svelte-1gf2ex4{color:#22c55e}.progress-container.svelte-1gf2ex4{padding:var(--spacing-sm) var(--spacing-md);border-top:1px solid var(--border-color-primary);background:var(--background-fill-secondary)}.progress-label-top.svelte-1gf2ex4,.progress-label-bottom.svelte-1gf2ex4{display:flex;justify-content:space-between;font-size:var(--text-sm);color:var(--body-text-color-subdued)}.progress-label-top.svelte-1gf2ex4{margin-bottom:var(--spacing-xs)}.progress-label-bottom.svelte-1gf2ex4{margin-top:var(--spacing-xs)}.progress-bar-background.svelte-1gf2ex4{width:100%;height:8px;background-color:var(--background-fill-primary);border-radius:var(--radius-full);overflow:hidden}.progress-bar-fill.svelte-1gf2ex4{height:100%;background-color:var(--color-accent);border-radius:var(--radius-full);transition:width .1s linear,background-color .3s ease}.progress-bar-fill.success.svelte-1gf2ex4{background-color:var(--color-success, #22c55e)}.progress-bar-fill.error.svelte-1gf2ex4{background-color:var(--color-error, #ef4444)}.rate-info.svelte-1gf2ex4{display:flex;align-items:center;gap:.5ch}.extra-info.svelte-1gf2ex4{color:var(--body-text-color-subdued);font-size:.9em;white-space:nowrap;overflow:hidden;text-overflow:ellipsis;max-width:200px}.block-label-wrapper.svelte-10ojysx{padding-bottom:24px}
src/backend/gradio_livelog/utils.py CHANGED
@@ -1,10 +1,12 @@
1
  # backend/gradio_livelog/utils.py
2
 
 
3
  import logging
4
  import queue
 
5
  import time
6
  from contextlib import contextmanager
7
- from typing import Callable, List, Iterator, Dict, Any
8
 
9
  class _QueueLogHandler(logging.Handler):
10
  """A private logging handler that directs log records into a queue."""
@@ -16,119 +18,359 @@ class _QueueLogHandler(logging.Handler):
16
  self.log_queue.put(record)
17
 
18
  @contextmanager
19
- def capture_logs(disable_console: bool = False) -> Iterator[Callable[[], List[logging.LogRecord]]]:
 
 
 
 
20
  """
21
- A context manager to capture logs from the root logger.
22
 
23
- Temporarily attaches a handler to the root logger to intercept all log
24
- messages. If `disable_console` is True, it will also temporarily remove
25
- other console-based StreamHandlers to prevent duplicate output.
 
26
 
27
  Args:
28
- disable_console: If True, prevents logs from also being printed to the console.
 
 
 
 
 
 
29
 
30
  Yields:
31
- A function that, when called, returns a list of all log records captured
32
- since the last call.
 
 
 
 
 
 
 
 
33
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  log_queue = queue.Queue()
35
  queue_handler = _QueueLogHandler(log_queue)
36
- root_logger = logging.getLogger()
37
 
38
- original_level = root_logger.level
39
- root_logger.setLevel(logging.DEBUG)
40
- root_logger.addHandler(queue_handler)
 
 
 
 
 
41
 
42
- removed_handlers = []
43
- if disable_console:
44
- for handler in root_logger.handlers[:]: # Iterate over a copy
45
- if isinstance(handler, logging.StreamHandler) and handler is not queue_handler:
46
- removed_handlers.append(handler)
47
- root_logger.removeHandler(handler)
 
 
 
48
 
49
- all_captured = []
50
- last_returned = 0
 
 
51
 
52
  try:
53
- def get_captured_records():
54
- nonlocal last_returned
55
- while True:
 
 
 
 
 
 
56
  try:
57
- all_captured.append(log_queue.get_nowait())
 
58
  except queue.Empty:
59
- break
60
- new_records = all_captured[last_returned:]
61
- last_returned = len(all_captured)
 
 
 
 
 
 
62
  return new_records
 
 
63
  yield get_captured_records
 
64
  finally:
65
- root_logger.removeHandler(queue_handler)
66
- root_logger.setLevel(original_level)
67
- for handler in removed_handlers:
68
- root_logger.addHandler(handler)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  class ProgressTracker:
71
  """
72
  A helper class to track and format progress updates for the LiveLog component.
73
 
74
- This class mimics some of the behavior of `tqdm`, calculating the rate of
75
- iterations per second and providing a structured dictionary for easy use
76
- with Gradio's `yield` mechanism.
 
 
 
 
 
 
 
 
 
77
  """
78
- def __init__(self, total: int, description: str = "Processing..."):
 
 
79
  """
80
  Initializes the progress tracker.
81
 
82
  Args:
83
- total: The total number of iterations for the process.
84
- description: A short, fixed description of the task being performed.
 
 
 
 
 
 
85
  """
86
  self.total = total
87
  self.description = description
 
 
 
88
  self.current = 0
89
  self.start_time = time.time()
90
  self.last_update_time = self.start_time
91
  self.last_update_item = 0
 
 
92
  self.rate = 0.0
 
 
93
 
94
- def update(self, advance: int = 1, status: str = "running", logs: List[Dict] = None, log_content: str = None) -> Dict[str, Any]:
 
 
95
  """
96
  Advances the progress and returns a dictionary formatted for the LiveLog component.
97
 
 
 
 
98
  Args:
99
- advance: The number of steps to advance the progress by (default is 1).
100
- status: The current status of the process ("running", "success", "error").
101
- logs: An optional list of all log dictionaries generated so far. If provided,
102
- this list will be passed to the frontend, allowing the log view to be
103
- updated simultaneously with the progress bar.
104
- log_content: An optional string to use as the progress bar's description for this
105
- specific update, overriding the fixed description. This is useful
106
- for showing the most recent log message as the progress description.
107
 
108
  Returns:
109
- A dictionary formatted for the LiveLog component's frontend.
110
  """
111
  self.current += advance
112
  self.current = min(self.current, self.total)
113
 
114
  now = time.time()
115
- delta_time = now - self.last_update_time
116
- delta_items = self.current - self.last_update_item
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
 
118
- # Stabilize the rate calculation by updating it periodically or at the very end.
119
- if delta_time > 0.1 or self.current == self.total:
120
- self.rate = delta_items / delta_time if delta_time > 0 else 0.0
121
  self.last_update_time = now
122
  self.last_update_item = self.current
123
 
 
 
 
 
124
  desc = log_content if log_content is not None else self.description
125
 
 
126
  return {
127
  "type": "progress",
128
  "current": self.current,
129
  "total": self.total,
130
  "desc": desc,
131
  "rate": self.rate,
 
 
132
  "status": status,
133
  "logs": logs or [],
134
  }
 
1
  # backend/gradio_livelog/utils.py
2
 
3
+ import io
4
  import logging
5
  import queue
6
+ import re
7
  import time
8
  from contextlib import contextmanager
9
+ from typing import Callable, List, Iterator, Dict, Any, Literal, Optional, Union
10
 
11
  class _QueueLogHandler(logging.Handler):
12
  """A private logging handler that directs log records into a queue."""
 
18
  self.log_queue.put(record)
19
 
20
  @contextmanager
21
+ def capture_logs(
22
+ log_name: Union[str, List[str], None] = None,
23
+ log_level: int = logging.INFO,
24
+ disable_console: bool = False
25
+ ) -> Iterator[Callable[[], List[logging.LogRecord]]]:
26
  """
27
+ A context manager to capture logs from one or more specified loggers.
28
 
29
+ This function temporarily attaches a thread-safe, queue-based handler to the
30
+ target logger(s) to intercept all log messages. If `disable_console` is True,
31
+ it will also temporarily remove other console-based StreamHandlers from the
32
+ target loggers to prevent duplicate output to the terminal.
33
 
34
  Args:
35
+ log_name: The name of the logger(s) to capture.
36
+ - `str`: Captures logs from a single named logger.
37
+ - `List[str]`: Captures logs from multiple named loggers.
38
+ - `None` or `""`: Captures logs from the root logger.
39
+ log_level: The minimum level of logs to capture (e.g., `logging.INFO`).
40
+ disable_console: If True, prevents the captured logs from also being
41
+ printed to the console by other handlers on the same logger.
42
 
43
  Yields:
44
+ A callable function. When this function is called, it returns a list
45
+ of all log records captured since the last time it was called, effectively
46
+ acting as a "get new logs" utility.
47
+
48
+ Example:
49
+ >>> with capture_logs(log_name=["my_app", "my_library"]) as get_logs:
50
+ ... logging.getLogger("my_app").info("Starting process.")
51
+ ... new_logs = get_logs() # Contains the first log record
52
+ ... logging.getLogger("my_library").warning("A potential issue.")
53
+ ... more_logs = get_logs() # Contains only the warning record
54
  """
55
+ # Step 1: Determine the target loggers based on the `log_name` argument.
56
+ target_loggers: List[logging.Logger] = []
57
+ log_names_to_process = []
58
+ if log_name is None or log_name == "":
59
+ log_names_to_process.append(None) # `None` is the identifier for the root logger
60
+ elif isinstance(log_name, list):
61
+ log_names_to_process.extend(log_name)
62
+ elif isinstance(log_name, str):
63
+ log_names_to_process.append(log_name)
64
+
65
+ # Get the actual logger objects from their names.
66
+ for name in set(log_names_to_process): # Use set to avoid duplicates
67
+ target_loggers.append(logging.getLogger(name))
68
+
69
+ # Step 2: Set up the thread-safe queue and the custom handler.
70
  log_queue = queue.Queue()
71
  queue_handler = _QueueLogHandler(log_queue)
 
72
 
73
+ # Step 3: Store the original state of each logger to restore it later.
74
+ original_levels = {logger.name: logger.level for logger in target_loggers}
75
+ original_handlers = {logger.name: logger.handlers[:] for logger in target_loggers}
76
+
77
+ # Step 4: Modify the target loggers for the duration of the context.
78
+ for logger in target_loggers:
79
+ # Set the desired capture level.
80
+ logger.setLevel(log_level)
81
 
82
+ if disable_console:
83
+ # If disabling console, remove all existing StreamHandlers.
84
+ # We keep other handlers (e.g., FileHandler) intact.
85
+ logger.handlers = [
86
+ h for h in logger.handlers if not isinstance(h, logging.StreamHandler)
87
+ ]
88
+
89
+ # Add our custom queue handler to start capturing logs.
90
+ logger.addHandler(queue_handler)
91
 
92
+ # This holds all records captured during the context's lifetime.
93
+ all_captured: List[logging.LogRecord] = []
94
+ # This index tracks the last record that was returned to the caller.
95
+ last_returned_index = 0
96
 
97
  try:
98
+ def get_captured_records() -> List[logging.LogRecord]:
99
+ """
100
+ Retrieves new log records from the queue and returns them.
101
+ This function is what the context manager yields to the user.
102
+ """
103
+ nonlocal last_returned_index
104
+
105
+ # Drain the queue into our master list of captured records.
106
+ while not log_queue.empty():
107
  try:
108
+ record = log_queue.get_nowait()
109
+ all_captured.append(record)
110
  except queue.Empty:
111
+ # This handles a rare race condition where the queue becomes empty
112
+ # between the `empty()` check and `get_nowait()`.
113
+ break
114
+
115
+ # Slice the master list to get only the new records.
116
+ new_records = all_captured[last_returned_index:]
117
+ # Update the index to the end of the list for the next call.
118
+ last_returned_index = len(all_captured)
119
+
120
  return new_records
121
+
122
+ # Yield the function to the `with` block.
123
  yield get_captured_records
124
+
125
  finally:
126
+ # Step 5: Restore the loggers to their original state, ensuring no side effects.
127
+ for logger in target_loggers:
128
+ # Remove our custom handler.
129
+ logger.removeHandler(queue_handler)
130
+
131
+ # Restore the original log level.
132
+ if logger.name in original_levels:
133
+ logger.setLevel(original_levels[logger.name])
134
+
135
+ # If we disabled the console, restore the original handlers.
136
+ if disable_console and logger.name in original_handlers:
137
+ # It's safest to clear handlers and then re-add the originals.
138
+ logger.handlers = []
139
+ for handler in original_handlers[logger.name]:
140
+ logger.addHandler(handler)
141
+
142
+ class Tee(io.StringIO):
143
+ """
144
+ A file-like object that acts like the Unix 'tee' command.
145
+ It writes to multiple file-like objects simultaneously.
146
+ """
147
+ def __init__(self, *files):
148
+ """
149
+ Initializes the Tee object.
150
+ Args:
151
+ *files: A variable number of file-like objects (e.g., sys.stderr,
152
+ a TqdmToQueueWriter instance, etc.).
153
+ """
154
+ super().__init__()
155
+ self.files = files
156
+
157
+ def write(self, s: str) -> int:
158
+ """
159
+ Writes the string 's' to all managed files.
160
+ """
161
+ for f in self.files:
162
+ f.write(s)
163
+ # Some file-like objects, like the console, might need to be flushed.
164
+ if hasattr(f, 'flush'):
165
+ f.flush()
166
+ return len(s)
167
+
168
+ def flush(self):
169
+ """Flushes all managed files."""
170
+ for f in self.files:
171
+ if hasattr(f, 'flush'):
172
+ f.flush()
173
+ class TqdmToQueueWriter(io.StringIO):
174
+ """
175
+ A custom, thread-safe, file-like object that intercepts tqdm's output.
176
+
177
+ This class is designed to be passed to a `tqdm` instance (or a library
178
+ that uses `tqdm`, like `diffusers`) via its `file` argument. It uses a
179
+ regular expression to parse the formatted progress string in real-time.
180
+
181
+ It extracts key metrics:
182
+ - The iteration rate value (e.g., 2.73).
183
+ - The rate unit ("it/s" or "s/it").
184
+ - Any additional status information that follows the rate.
185
+
186
+ The extracted data is packaged into a dictionary and put onto a
187
+ `queue.Queue`, allowing a consumer thread (like a Gradio UI thread)
188
+ to receive real-time progress data from a worker thread.
189
+ """
190
+ def __init__(self, rate_queue: queue.Queue):
191
+ """
192
+ Initializes the writer with a queue for communication.
193
+
194
+ Args:
195
+ rate_queue (queue.Queue): The thread-safe queue to which the
196
+ extracted rate data will be sent.
197
+ """
198
+ super().__init__()
199
+ self.rate_queue = rate_queue
200
+ # Regex Explanation:
201
+ # (\d+\.?\d*): Group 1, captures the rate value (float or integer).
202
+ # \s*: Matches any whitespace.
203
+ # (it/s|s/it): Group 2, captures the rate unit, which can be "it/s" or "s/it".
204
+ # ,\s*: Matches the comma and whitespace separator.
205
+ # (.*): Group 3, greedily captures all remaining characters in the line.
206
+ self.rate_regex = re.compile(r"(\d+\.?\d*)\s*(it/s|s/it)(?:,\s*(.*))?")
207
+
208
+ def write(self, s: str) -> int:
209
+ """
210
+ This method is called by `tqdm` whenever it updates the progress bar.
211
+ It receives the full, formatted progress string.
212
+
213
+ Args:
214
+ s (str): The string output from `tqdm` (e.g., "75%|...| 2.73it/s, ...").
215
+
216
+ Returns:
217
+ int: The number of characters written, as required by the file-like
218
+ object interface.
219
+ """
220
+ match = self.rate_regex.search(s)
221
+ if match:
222
+ try:
223
+ rate_value = float(match.group(1))
224
+ rate_unit = match.group(2)
225
+ extra_info_raw = match.group(3)
226
+
227
+ cleaned_info = ""
228
+ if extra_info_raw:
229
+ cleaned_info = extra_info_raw.rstrip(']').strip()
230
+ cleaned_info = cleaned_info.replace('[', ' ').replace(']', ' ')
231
+ cleaned_info = re.sub(r'\s+', ' ', cleaned_info).strip()
232
 
233
+ # Put a structured dictionary onto the queue.
234
+ self.rate_queue.put({
235
+ "rate": rate_value,
236
+ "unit": rate_unit,
237
+ "extra": cleaned_info
238
+ })
239
+ except (ValueError, IndexError):
240
+ # Silently ignore parsing errors if the regex match is malformed.
241
+ pass
242
+
243
+ # Fulfill the file-like object contract.
244
+ return len(s)
245
+
246
  class ProgressTracker:
247
  """
248
  A helper class to track and format progress updates for the LiveLog component.
249
 
250
+ This versatile class operates in a hybrid mode for calculating iteration rates:
251
+ 1. **Internal Calculation (Default):** It uses an Exponential Moving Average (EMA)
252
+ to compute a smoothed, stable rate. The unit for this internal calculation
253
+ (`it/s` or `s/it`) can be specified during initialization, making it flexible
254
+ for different types of processes.
255
+ 2. **External Override (Preferred):** It can accept a dictionary of externally
256
+ captured rate data (e.g., from a `tqdm` instance). This provides the most
257
+ accurate possible display by sourcing the rate and its unit directly from
258
+ the process being monitored, overriding any internal calculations.
259
+
260
+ The tracker also intelligently "freezes" the last known rate when the process
261
+ status changes to 'success' or 'error', ensuring the final speed remains visible on the UI.
262
  """
263
+ def __init__(self, total: int, description: str = "Processing...",
264
+ smoothing_factor: float = 0.3,
265
+ rate_unit: Literal["it/s", "s/it"] = "s/it"):
266
  """
267
  Initializes the progress tracker.
268
 
269
  Args:
270
+ total (int): The total number of iterations for the process.
271
+ description (str): A short, fixed description of the task being performed.
272
+ smoothing_factor (float): The EMA smoothing factor used for the internal
273
+ rate calculation. A smaller value (e.g., 0.1)
274
+ results in smoother but less responsive updates.
275
+ rate_unit (Literal["it/s", "s/it"]): The preferred unit for the
276
+ internal rate calculation when no
277
+ external data is provided. Defaults to "it/s".
278
  """
279
  self.total = total
280
  self.description = description
281
+ self.smoothing_factor = smoothing_factor
282
+ self.preferred_rate_unit = rate_unit # Stores the user's preference for internal calculations.
283
+
284
  self.current = 0
285
  self.start_time = time.time()
286
  self.last_update_time = self.start_time
287
  self.last_update_item = 0
288
+
289
+ # State fields that will be updated and returned.
290
  self.rate = 0.0
291
+ self.rate_unit = self.preferred_rate_unit # Sets the initial unit.
292
+ self.extra_info = ""
293
 
294
+ def update(self, advance: int = 1, status: str = "running",
295
+ logs: Optional[List[Dict]] = None, log_content: Optional[str] = None,
296
+ rate_data: Optional[Dict] = None) -> Dict[str, Any]:
297
  """
298
  Advances the progress and returns a dictionary formatted for the LiveLog component.
299
 
300
+ This method forms the core of the tracker, updating the current progress
301
+ and determining the iteration rate based on the provided parameters.
302
+
303
  Args:
304
+ advance (int): The number of steps to advance the progress by (default is 1).
305
+ status (str): The current status of the process ("running", "success", "error").
306
+ logs (Optional[List[Dict]]): An optional list of log dictionaries to pass to the frontend.
307
+ log_content (Optional[str]): An optional string to override the fixed description for this update.
308
+ rate_data (Optional[Dict]): A dictionary from an external source (like `tqdm`)
309
+ containing keys like 'rate', 'unit', and 'extra'.
310
+ If provided, this data will override all internal
311
+ rate calculations.
312
 
313
  Returns:
314
+ Dict[str, Any]: A state dictionary formatted for the LiveLog component's frontend.
315
  """
316
  self.current += advance
317
  self.current = min(self.current, self.total)
318
 
319
  now = time.time()
320
+
321
+ # --- Enhanced Hybrid Rate Logic ---
322
+
323
+ # Priority 1: Use the externally provided rate data if it exists. This is the most accurate source.
324
+ if rate_data:
325
+ self.rate = rate_data.get("rate", self.rate)
326
+ self.rate_unit = rate_data.get("unit", self.rate_unit)
327
+ self.extra_info = rate_data.get("extra", self.extra_info)
328
+
329
+ # Priority 2: If no external data, perform internal calculation, but only if the
330
+ # process is actively running and has advanced.
331
+ elif status == "running" and advance > 0:
332
+ delta_time = now - self.last_update_time
333
+ delta_items = self.current - self.last_update_item
334
+
335
+ # Only calculate if time has passed and items have progressed to avoid division by zero.
336
+ if delta_time > 0 and delta_items > 0:
337
+
338
+ # Calculate the rate based on the user's preferred unit.
339
+ if self.preferred_rate_unit == "it/s":
340
+ instant_rate = delta_items / delta_time
341
+ self.rate_unit = "it/s"
342
+ else: # "s/it"
343
+ instant_rate = delta_time / delta_items
344
+ self.rate_unit = "s/it"
345
+
346
+ # Apply Exponential Moving Average (EMA) for a smoother reading.
347
+ if self.rate == 0.0: # Use the first measurement as the initial value.
348
+ self.rate = instant_rate
349
+ else:
350
+ self.rate = (self.smoothing_factor * instant_rate) + \
351
+ ((1 - self.smoothing_factor) * self.rate)
352
+
353
+ # Internal calculations do not produce extra info.
354
+ self.extra_info = ""
355
 
 
 
 
356
  self.last_update_time = now
357
  self.last_update_item = self.current
358
 
359
+ # Priority 3: If status is 'success' or 'error', or if advance is 0,
360
+ # do nothing to the rate fields. This "freezes" them at their last known values.
361
+
362
+ # Determine the description to display for this specific update.
363
  desc = log_content if log_content is not None else self.description
364
 
365
+ # Assemble and return the final state dictionary for the frontend.
366
  return {
367
  "type": "progress",
368
  "current": self.current,
369
  "total": self.total,
370
  "desc": desc,
371
  "rate": self.rate,
372
+ "rate_unit": self.rate_unit,
373
+ "extra_info": self.extra_info,
374
  "status": status,
375
  "logs": logs or [],
376
  }
src/demo/app.py CHANGED
@@ -1,8 +1,8 @@
1
  # demo/app.py
2
 
 
3
  import gradio as gr
4
  import torch
5
- import time
6
  import logging
7
  import random
8
  import numpy as np
@@ -14,24 +14,28 @@ import spaces
14
 
15
  # Import the component and ALL its utilities
16
  from gradio_livelog import LiveLog
17
- from gradio_livelog.utils import ProgressTracker, capture_logs
18
 
19
  # --- 1. SETUP ---
20
  MODEL_ID = "SG161222/RealVisXL_V5.0_Lightning"
21
  MAX_SEED = np.iinfo(np.int32).max
22
 
23
  # --- 2. LOGIC FOR THE "LIVELOG FEATURE DEMO" TAB ---
24
- logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
 
 
 
 
25
 
26
- async def run_process(disable_console: bool, run_error_case: bool):
27
- with capture_logs(disable_console=disable_console) as get_logs:
28
  total_steps = 100
29
- tracker = ProgressTracker(total=total_steps, description="Simulating a process...")
30
  all_logs = []
31
  last_log_content = None
32
 
33
  initial_log = f"Starting simulated process with {total_steps} steps..."
34
- logging.info(initial_log)
35
  logs = [
36
  {
37
  "type": "log",
@@ -49,14 +53,14 @@ async def run_process(disable_console: bool, run_error_case: bool):
49
  current_step = i + 1
50
 
51
  if current_step == 10:
52
- logging.warning(f"Low disk space warning at step {current_step}.")
53
  elif current_step == 30:
54
- logging.log(logging.INFO + 5, f"Asset pack loaded successfully at step {current_step}.")
55
  elif current_step == 75:
56
- logging.critical(f"Checksum mismatch! Data may be corrupt at step {current_step}.")
57
 
58
  if run_error_case and current_step == 50:
59
- logging.error("A fatal simulation error occurred! Aborting.")
60
  logs = [
61
  {
62
  "type": "log",
@@ -84,7 +88,7 @@ async def run_process(disable_console: bool, run_error_case: bool):
84
  yield tracker.update(advance=1, status="running", logs=all_logs, log_content=last_log_content)
85
 
86
  final_log = "Process completed successfully!"
87
- logging.log(logging.INFO + 5, final_log)
88
  logs = [
89
  {
90
  "type": "log",
@@ -103,47 +107,60 @@ def update_livelog_properties(mode, color, lines, scroll):
103
  def clear_output():
104
  return None
105
 
106
- async def run_success_case(disable_console: bool):
107
  yield None
108
- async for update in run_process(disable_console=disable_console, run_error_case=False):
109
  yield update
110
 
111
- async def run_error_case(disable_console: bool):
112
  yield None
113
- async for update in run_process(disable_console=disable_console, run_error_case=True):
114
  yield update
115
 
116
 
117
  # --- 3. LOGIC FOR THE "DIFFUSION PIPELINE INTEGRATION" TAB ---
118
  diffusion_pipeline = None
 
119
  def load_pipeline(on_load=True):
120
  """A function to load the model, ensuring it's only done once."""
121
  global diffusion_pipeline
122
- if diffusion_pipeline is None:
123
- print("Loading Stable Diffusion model for the first time...")
124
- pipe = StableDiffusionXLPipeline.from_pretrained(
125
- MODEL_ID, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False
126
- )
127
- pipe.enable_vae_tiling()
128
- pipe.enable_model_cpu_offload()
129
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
130
- pipe.set_progress_bar_config(disable=True)
131
- diffusion_pipeline = pipe
132
- print("Model loaded successfully.")
133
-
134
  if not on_load:
135
  return diffusion_pipeline
136
 
137
  @spaces.GPU(duration=60, enable_queue=True)
138
- def run_diffusion_in_thread(prompt: str, update_queue: queue.Queue):
139
  """
140
  This function now uses capture_logs to listen to internal diffusers logs
141
  while retaining the superior data structure you designed.
142
  """
143
  tracker = None
144
- with capture_logs() as get_logs:
145
- try:
146
  pipe = load_pipeline(on_load=False)
 
 
 
 
 
 
 
 
 
 
 
 
147
  seed = random.randint(0, MAX_SEED)
148
  generator = torch.Generator(device="cuda").manual_seed(seed)
149
  prompt_style = f"hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic"
@@ -151,67 +168,110 @@ def run_diffusion_in_thread(prompt: str, update_queue: queue.Queue):
151
  num_inference_steps = 10
152
 
153
  all_logs = []
154
- last_log_content = None
155
 
156
  # Helper function to process and store new logs
157
- def process_and_store_logs():
158
- nonlocal all_logs, last_log_content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  new_records = get_logs()
160
  if new_records:
161
- new_logs = [{"type": "log", "level": r.levelname, "content": r.getMessage()} for r in new_records]
 
 
 
 
162
  all_logs.extend(new_logs)
163
- last_log_content = all_logs[-1]["content"]
164
-
165
- logging.info(f"Using seed: {seed}")
166
- process_and_store_logs()
167
- update_queue.put((None, {"type": "progress", "logs": all_logs, "current": 0, "total": num_inference_steps, "desc": "Diffusion Steps"}))
168
-
169
- logging.info("Starting diffusion process...")
170
- process_and_store_logs()
171
- update_queue.put((None, {"type": "progress", "logs": all_logs, "current": 0, "total": num_inference_steps, "desc": "Diffusion Steps"}))
172
-
173
- tracker = ProgressTracker(total=num_inference_steps, description="Diffusion Steps")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
 
175
  def progress_callback(pipe_instance, step, timestep, callback_kwargs):
176
- process_and_store_logs() # Check for new logs from diffusers at each step
177
- update_dict = tracker.update(logs=all_logs)
178
- update_queue.put((None, update_dict))
179
  return callback_kwargs
180
-
181
  images = pipe(
182
  prompt=prompt_style, negative_prompt=negative_prompt_style, width=1024, height=1024,
183
  guidance_scale=3.0, num_inference_steps=num_inference_steps,
184
  generator=generator, callback_on_step_end=progress_callback
185
  ).images
186
 
187
- logging.log(logging.INFO + 5, "Image generated successfully!")
188
- process_and_store_logs()
189
-
190
- final_update = tracker.update(advance=0, status="success", logs=all_logs, log_content=last_log_content)
191
- update_queue.put((images, final_update))
192
 
193
  except Exception as e:
194
- logging.error(f"Error in diffusion thread: {e}", exc_info=True)
195
- process_and_store_logs() # Capture the final error log
196
- if tracker:
197
- error_update = tracker.update(advance=0, status="error", logs=all_logs, log_content=f"An error occurred: {e}")
198
- update_queue.put((None, error_update))
199
  finally:
200
  update_queue.put(None)
201
 
 
202
  @spaces.GPU(duration=60, enable_queue=True)
203
  def generate(prompt):
204
  """This function starts the worker thread and yields updates from the queue."""
205
- yield None, None
206
- yield None, {"type": "log", "level": "INFO", "content": "Preparing generation..."}
207
  update_queue = queue.Queue()
208
- diffusion_thread = threading.Thread(target=run_diffusion_in_thread, args=(prompt, update_queue))
209
  diffusion_thread.start()
 
210
  while True:
211
  update = update_queue.get()
212
- if update is None: break
213
- yield update
214
-
 
 
 
 
 
 
 
 
215
 
216
  # --- 4. THE COMBINED GRADIO UI with TABS ---
217
  with gr.Blocks(theme=gr.themes.Ocean()) as demo:
@@ -230,17 +290,18 @@ with gr.Blocks(theme=gr.themes.Ocean()) as demo:
230
  with gr.Group():
231
  gr.Markdown("### Component Properties")
232
  display_mode_radio = gr.Radio(["full", "log", "progress"], label="Display Mode", value="full")
 
233
  bg_color_picker = gr.ColorPicker(label="Background Color", value="#000000")
234
  line_numbers_checkbox = gr.Checkbox(label="Show Line Numbers", value=True)
235
  autoscroll_checkbox = gr.Checkbox(label="Autoscroll", value=True)
 
236
  with gr.Group():
237
  gr.Markdown("### Simulation Controls")
238
- disable_console_checkbox = gr.Checkbox(label="Disable Python Console Output", value=False)
239
  start_btn = gr.Button("Run Success Case", variant="primary")
240
  error_btn = gr.Button("Run Error Case")
241
 
242
- start_btn.click(fn=run_success_case, inputs=[disable_console_checkbox], outputs=feature_logger)
243
- error_btn.click(fn=run_error_case, inputs=[disable_console_checkbox], outputs=feature_logger)
244
  feature_logger.clear(fn=clear_output, inputs=None, outputs=feature_logger)
245
  controls = [display_mode_radio, bg_color_picker, line_numbers_checkbox, autoscroll_checkbox]
246
  for control in controls:
@@ -267,7 +328,7 @@ with gr.Blocks(theme=gr.themes.Ocean()) as demo:
267
  label="Result", columns=1, show_label=False, height=500, min_width=768, preview=True, allow_preview=True
268
  )
269
 
270
- run_button.click(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
271
  prompt.submit(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
272
  livelog_viewer.clear(fn=clear_output, inputs=None, outputs=[livelog_viewer])
273
 
 
1
  # demo/app.py
2
 
3
+ import sys
4
  import gradio as gr
5
  import torch
 
6
  import logging
7
  import random
8
  import numpy as np
 
14
 
15
  # Import the component and ALL its utilities
16
  from gradio_livelog import LiveLog
17
+ from gradio_livelog.utils import ProgressTracker, Tee, TqdmToQueueWriter, capture_logs
18
 
19
  # --- 1. SETUP ---
20
  MODEL_ID = "SG161222/RealVisXL_V5.0_Lightning"
21
  MAX_SEED = np.iinfo(np.int32).max
22
 
23
  # --- 2. LOGIC FOR THE "LIVELOG FEATURE DEMO" TAB ---
24
+ app_logger = logging.getLogger("logging_app")
25
+ app_logger.setLevel(logging.INFO)
26
+ console_handler = logging.StreamHandler()
27
+ console_handler.flush = sys.stderr.flush
28
+ app_logger.addHandler(console_handler)
29
 
30
+ async def run_process(disable_console: bool, rate_unit: str, run_error_case: bool):
31
+ with capture_logs(log_level=logging.INFO, log_name=["logging_app"], disable_console=disable_console) as get_logs: #You can watch more than one log if you wish in log_name. If you do not pass log_name, the default log will be used.
32
  total_steps = 100
33
+ tracker = ProgressTracker(total=total_steps, description="Simulating a process...", rate_unit=rate_unit)
34
  all_logs = []
35
  last_log_content = None
36
 
37
  initial_log = f"Starting simulated process with {total_steps} steps..."
38
+ app_logger.info(initial_log)
39
  logs = [
40
  {
41
  "type": "log",
 
53
  current_step = i + 1
54
 
55
  if current_step == 10:
56
+ app_logger.warning(f"Low disk space warning at step {current_step}.")
57
  elif current_step == 30:
58
+ app_logger.log(logging.INFO + 5, f"Asset pack loaded successfully at step {current_step}.")
59
  elif current_step == 75:
60
+ app_logger.critical(f"Checksum mismatch! Data may be corrupt at step {current_step}.")
61
 
62
  if run_error_case and current_step == 50:
63
+ app_logger.error("A fatal simulation error occurred! Aborting.")
64
  logs = [
65
  {
66
  "type": "log",
 
88
  yield tracker.update(advance=1, status="running", logs=all_logs, log_content=last_log_content)
89
 
90
  final_log = "Process completed successfully!"
91
+ app_logger.log(logging.INFO + 5, final_log)
92
  logs = [
93
  {
94
  "type": "log",
 
107
  def clear_output():
108
  return None
109
 
110
+ async def run_success_case(disable_console: bool, rate_unit: str):
111
  yield None
112
+ async for update in run_process(disable_console=disable_console, rate_unit=rate_unit, run_error_case=False):
113
  yield update
114
 
115
+ async def run_error_case(disable_console: bool, rate_unit: str):
116
  yield None
117
+ async for update in run_process(disable_console=disable_console, rate_unit=rate_unit, run_error_case=True):
118
  yield update
119
 
120
 
121
  # --- 3. LOGIC FOR THE "DIFFUSION PIPELINE INTEGRATION" TAB ---
122
  diffusion_pipeline = None
123
+ pipeline_lock = threading.Lock()
124
  def load_pipeline(on_load=True):
125
  """A function to load the model, ensuring it's only done once."""
126
  global diffusion_pipeline
127
+ with pipeline_lock:
128
+ if diffusion_pipeline is None:
129
+ print("Loading Stable Diffusion model for the first time...")
130
+ pipe = StableDiffusionXLPipeline.from_pretrained(
131
+ MODEL_ID, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False
132
+ )
133
+ pipe.enable_vae_tiling()
134
+ pipe.enable_model_cpu_offload()
135
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
136
+ diffusion_pipeline = pipe
137
+ print("Model loaded successfully!")
138
+
139
  if not on_load:
140
  return diffusion_pipeline
141
 
142
  @spaces.GPU(duration=60, enable_queue=True)
143
+ def run_diffusion_in_thread(prompt: str, disable_console: bool, update_queue: queue.Queue):
144
  """
145
  This function now uses capture_logs to listen to internal diffusers logs
146
  while retaining the superior data structure you designed.
147
  """
148
  tracker = None
149
+ with capture_logs(log_level=logging.INFO, log_name=["logging_app"], disable_console=disable_console) as get_logs: #You can watch more than one log if you wish in log_name. If you do not pass log_name, the default log will be used.
150
+ try:
151
  pipe = load_pipeline(on_load=False)
152
+
153
+ #We will capture pipeline tqdm s/it progress instead
154
+ rate_queue = queue.Queue()
155
+ tqdm_writer = TqdmToQueueWriter(rate_queue)
156
+
157
+ progress_bar_handler = Tee(sys.stderr, tqdm_writer)
158
+ pipe.set_progress_bar_config(file=progress_bar_handler, #if you dont need to see the tqdm progress on console set file=tqdm_writer instead
159
+ disable=False,
160
+ ncols=100,
161
+ dynamic_ncols=True,
162
+ ascii=" █")
163
+
164
  seed = random.randint(0, MAX_SEED)
165
  generator = torch.Generator(device="cuda").manual_seed(seed)
166
  prompt_style = f"hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic"
 
168
  num_inference_steps = 10
169
 
170
  all_logs = []
171
+ last_known_rate_data = None
172
 
173
  # Helper function to process and store new logs
174
+ def process_and_send_updates(status="running", advance=0, final_image_payload=None):
175
+ """
176
+ This is the core callback function. It captures new logs, formats them,
177
+ and sends a complete update object (logs + progress) to the UI queue.
178
+ This should also be called after the log record.
179
+ """
180
+ nonlocal all_logs, last_known_rate_data
181
+ new_rate_data = None
182
+ while not rate_queue.empty():
183
+ try:
184
+ new_rate_data = rate_queue.get_nowait()
185
+ except queue.Empty:
186
+ break
187
+
188
+ if new_rate_data is not None:
189
+ last_known_rate_data = new_rate_data
190
+
191
  new_records = get_logs()
192
  if new_records:
193
+ new_logs = [{
194
+ "type": "log",
195
+ "level": "SUCCESS" if r.levelno == logging.INFO + 5 else r.levelname,
196
+ "content": r.getMessage()
197
+ } for r in new_records]
198
  all_logs.extend(new_logs)
199
+
200
+ # Use the tracker to generate the progress update dictionary if it exists.
201
+ # If not, create a preliminary update dictionary.
202
+ update_dict = {}
203
+
204
+ if tracker:
205
+ update_dict = tracker.update(
206
+ advance=advance,
207
+ status=status,
208
+ logs=all_logs,
209
+ rate_data=last_known_rate_data
210
+ )
211
+ else:
212
+ # Initial state before the tracker is created.
213
+ update_dict = {
214
+ "type": "progress",
215
+ "logs": all_logs,
216
+ "current": 0,
217
+ "total": num_inference_steps,
218
+ "desc": "Diffusion Steps" # Description is sent once
219
+ }
220
+
221
+ # Put the update on the queue. The image payload is usually None
222
+ # until the very end.
223
+ update_queue.put((final_image_payload, update_dict))
224
+
225
+ app_logger.info(f"Using seed: {seed}")
226
+ process_and_send_updates()
227
+
228
+ app_logger.info("Starting diffusion process...")
229
+ process_and_send_updates()
230
+
231
+ tracker = ProgressTracker(total=num_inference_steps, description="Diffusion Steps", rate_unit='it/s')
232
 
233
  def progress_callback(pipe_instance, step, timestep, callback_kwargs):
234
+ process_and_send_updates(advance=1)
 
 
235
  return callback_kwargs
236
+
237
  images = pipe(
238
  prompt=prompt_style, negative_prompt=negative_prompt_style, width=1024, height=1024,
239
  guidance_scale=3.0, num_inference_steps=num_inference_steps,
240
  generator=generator, callback_on_step_end=progress_callback
241
  ).images
242
 
243
+ app_logger.log(logging.INFO + 5, "Image generated successfully!")
244
+ process_and_send_updates(status="success", final_image_payload=images)
245
+
 
 
246
 
247
  except Exception as e:
248
+ app_logger.error(f"Error in diffusion thread: {e}, process aborted!", exc_info=True)
249
+ process_and_send_updates(status="error")
 
 
 
250
  finally:
251
  update_queue.put(None)
252
 
253
+
254
  @spaces.GPU(duration=60, enable_queue=True)
255
  def generate(prompt):
256
  """This function starts the worker thread and yields updates from the queue."""
257
+ yield None, None, gr.update(interactive=False)
 
258
  update_queue = queue.Queue()
259
+ diffusion_thread = threading.Thread(target=run_diffusion_in_thread, args=(prompt, False, update_queue))
260
  diffusion_thread.start()
261
+ final_images = None
262
  while True:
263
  update = update_queue.get()
264
+ if update is None:
265
+ break
266
+
267
+ images, log_update = update
268
+
269
+ if images:
270
+ final_images = images
271
+
272
+ yield final_images, log_update, gr.skip()
273
+
274
+ yield final_images, log_update, gr.update(interactive=True)
275
 
276
  # --- 4. THE COMBINED GRADIO UI with TABS ---
277
  with gr.Blocks(theme=gr.themes.Ocean()) as demo:
 
290
  with gr.Group():
291
  gr.Markdown("### Component Properties")
292
  display_mode_radio = gr.Radio(["full", "log", "progress"], label="Display Mode", value="full")
293
+ rate_unit = gr.Radio(["it/s","s/it"], label="Progress rate unit", value="it/s")
294
  bg_color_picker = gr.ColorPicker(label="Background Color", value="#000000")
295
  line_numbers_checkbox = gr.Checkbox(label="Show Line Numbers", value=True)
296
  autoscroll_checkbox = gr.Checkbox(label="Autoscroll", value=True)
297
+ disable_console_checkbox = gr.Checkbox(label="Disable Python Console Output", value=False)
298
  with gr.Group():
299
  gr.Markdown("### Simulation Controls")
 
300
  start_btn = gr.Button("Run Success Case", variant="primary")
301
  error_btn = gr.Button("Run Error Case")
302
 
303
+ start_btn.click(fn=run_success_case, inputs=[disable_console_checkbox, rate_unit], outputs=feature_logger)
304
+ error_btn.click(fn=run_error_case, inputs=[disable_console_checkbox, rate_unit], outputs=feature_logger)
305
  feature_logger.clear(fn=clear_output, inputs=None, outputs=feature_logger)
306
  controls = [display_mode_radio, bg_color_picker, line_numbers_checkbox, autoscroll_checkbox]
307
  for control in controls:
 
328
  label="Result", columns=1, show_label=False, height=500, min_width=768, preview=True, allow_preview=True
329
  )
330
 
331
+ run_button.click(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer, run_button])
332
  prompt.submit(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
333
  livelog_viewer.clear(fn=clear_output, inputs=None, outputs=[livelog_viewer])
334
 
src/demo/space.py CHANGED
@@ -9,7 +9,7 @@ abs_path = os.path.join(os.path.dirname(__file__), "css.css")
9
 
10
  with gr.Blocks(
11
  css=abs_path,
12
- theme=gr.themes.Ocean(
13
  font_mono=[
14
  gr.themes.GoogleFont("Inconsolata"),
15
  "monospace",
@@ -40,9 +40,9 @@ pip install gradio_livelog
40
  ```python
41
  # demo/app.py
42
 
 
43
  import gradio as gr
44
  import torch
45
- import time
46
  import logging
47
  import random
48
  import numpy as np
@@ -54,24 +54,28 @@ import spaces
54
 
55
  # Import the component and ALL its utilities
56
  from gradio_livelog import LiveLog
57
- from gradio_livelog.utils import ProgressTracker, capture_logs
58
 
59
  # --- 1. SETUP ---
60
  MODEL_ID = "SG161222/RealVisXL_V5.0_Lightning"
61
  MAX_SEED = np.iinfo(np.int32).max
62
 
63
  # --- 2. LOGIC FOR THE "LIVELOG FEATURE DEMO" TAB ---
64
- logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
65
-
66
- async def run_process(disable_console: bool, run_error_case: bool):
67
- with capture_logs(disable_console=disable_console) as get_logs:
 
 
 
 
68
  total_steps = 100
69
- tracker = ProgressTracker(total=total_steps, description="Simulating a process...")
70
  all_logs = []
71
  last_log_content = None
72
 
73
  initial_log = f"Starting simulated process with {total_steps} steps..."
74
- logging.info(initial_log)
75
  logs = [
76
  {
77
  "type": "log",
@@ -89,14 +93,14 @@ async def run_process(disable_console: bool, run_error_case: bool):
89
  current_step = i + 1
90
 
91
  if current_step == 10:
92
- logging.warning(f"Low disk space warning at step {current_step}.")
93
  elif current_step == 30:
94
- logging.log(logging.INFO + 5, f"Asset pack loaded successfully at step {current_step}.")
95
  elif current_step == 75:
96
- logging.critical(f"Checksum mismatch! Data may be corrupt at step {current_step}.")
97
 
98
  if run_error_case and current_step == 50:
99
- logging.error("A fatal simulation error occurred! Aborting.")
100
  logs = [
101
  {
102
  "type": "log",
@@ -124,7 +128,7 @@ async def run_process(disable_console: bool, run_error_case: bool):
124
  yield tracker.update(advance=1, status="running", logs=all_logs, log_content=last_log_content)
125
 
126
  final_log = "Process completed successfully!"
127
- logging.log(logging.INFO + 5, final_log)
128
  logs = [
129
  {
130
  "type": "log",
@@ -143,47 +147,60 @@ def update_livelog_properties(mode, color, lines, scroll):
143
  def clear_output():
144
  return None
145
 
146
- async def run_success_case(disable_console: bool):
147
  yield None
148
- async for update in run_process(disable_console=disable_console, run_error_case=False):
149
  yield update
150
 
151
- async def run_error_case(disable_console: bool):
152
  yield None
153
- async for update in run_process(disable_console=disable_console, run_error_case=True):
154
  yield update
155
 
156
 
157
  # --- 3. LOGIC FOR THE "DIFFUSION PIPELINE INTEGRATION" TAB ---
158
  diffusion_pipeline = None
 
159
  def load_pipeline(on_load=True):
160
  \"\"\"A function to load the model, ensuring it's only done once.\"\"\"
161
  global diffusion_pipeline
162
- if diffusion_pipeline is None:
163
- print("Loading Stable Diffusion model for the first time...")
164
- pipe = StableDiffusionXLPipeline.from_pretrained(
165
- MODEL_ID, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False
166
- )
167
- pipe.enable_vae_tiling()
168
- pipe.enable_model_cpu_offload()
169
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
170
- pipe.set_progress_bar_config(disable=True)
171
- diffusion_pipeline = pipe
172
- print("Model loaded successfully.")
173
-
174
  if not on_load:
175
  return diffusion_pipeline
176
 
177
  @spaces.GPU(duration=60, enable_queue=True)
178
- def run_diffusion_in_thread(prompt: str, update_queue: queue.Queue):
179
  \"\"\"
180
  This function now uses capture_logs to listen to internal diffusers logs
181
  while retaining the superior data structure you designed.
182
  \"\"\"
183
  tracker = None
184
- with capture_logs() as get_logs:
185
- try:
186
  pipe = load_pipeline(on_load=False)
 
 
 
 
 
 
 
 
 
 
 
 
187
  seed = random.randint(0, MAX_SEED)
188
  generator = torch.Generator(device="cuda").manual_seed(seed)
189
  prompt_style = f"hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic"
@@ -191,67 +208,110 @@ def run_diffusion_in_thread(prompt: str, update_queue: queue.Queue):
191
  num_inference_steps = 10
192
 
193
  all_logs = []
194
- last_log_content = None
195
 
196
  # Helper function to process and store new logs
197
- def process_and_store_logs():
198
- nonlocal all_logs, last_log_content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
  new_records = get_logs()
200
  if new_records:
201
- new_logs = [{"type": "log", "level": r.levelname, "content": r.getMessage()} for r in new_records]
 
 
 
 
202
  all_logs.extend(new_logs)
203
- last_log_content = all_logs[-1]["content"]
204
-
205
- logging.info(f"Using seed: {seed}")
206
- process_and_store_logs()
207
- update_queue.put((None, {"type": "progress", "logs": all_logs, "current": 0, "total": num_inference_steps, "desc": "Diffusion Steps"}))
208
-
209
- logging.info("Starting diffusion process...")
210
- process_and_store_logs()
211
- update_queue.put((None, {"type": "progress", "logs": all_logs, "current": 0, "total": num_inference_steps, "desc": "Diffusion Steps"}))
212
-
213
- tracker = ProgressTracker(total=num_inference_steps, description="Diffusion Steps")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
 
215
  def progress_callback(pipe_instance, step, timestep, callback_kwargs):
216
- process_and_store_logs() # Check for new logs from diffusers at each step
217
- update_dict = tracker.update(logs=all_logs)
218
- update_queue.put((None, update_dict))
219
  return callback_kwargs
220
-
221
  images = pipe(
222
  prompt=prompt_style, negative_prompt=negative_prompt_style, width=1024, height=1024,
223
  guidance_scale=3.0, num_inference_steps=num_inference_steps,
224
  generator=generator, callback_on_step_end=progress_callback
225
  ).images
226
 
227
- logging.log(logging.INFO + 5, "Image generated successfully!")
228
- process_and_store_logs()
229
-
230
- final_update = tracker.update(advance=0, status="success", logs=all_logs, log_content=last_log_content)
231
- update_queue.put((images, final_update))
232
 
233
  except Exception as e:
234
- logging.error(f"Error in diffusion thread: {e}", exc_info=True)
235
- process_and_store_logs() # Capture the final error log
236
- if tracker:
237
- error_update = tracker.update(advance=0, status="error", logs=all_logs, log_content=f"An error occurred: {e}")
238
- update_queue.put((None, error_update))
239
  finally:
240
  update_queue.put(None)
241
 
 
242
  @spaces.GPU(duration=60, enable_queue=True)
243
  def generate(prompt):
244
  \"\"\"This function starts the worker thread and yields updates from the queue.\"\"\"
245
- yield None, None
246
- yield None, {"type": "log", "level": "INFO", "content": "Preparing generation..."}
247
  update_queue = queue.Queue()
248
- diffusion_thread = threading.Thread(target=run_diffusion_in_thread, args=(prompt, update_queue))
249
  diffusion_thread.start()
 
250
  while True:
251
  update = update_queue.get()
252
- if update is None: break
253
- yield update
254
-
 
 
 
 
 
 
 
 
255
 
256
  # --- 4. THE COMBINED GRADIO UI with TABS ---
257
  with gr.Blocks(theme=gr.themes.Ocean()) as demo:
@@ -270,17 +330,18 @@ with gr.Blocks(theme=gr.themes.Ocean()) as demo:
270
  with gr.Group():
271
  gr.Markdown("### Component Properties")
272
  display_mode_radio = gr.Radio(["full", "log", "progress"], label="Display Mode", value="full")
 
273
  bg_color_picker = gr.ColorPicker(label="Background Color", value="#000000")
274
  line_numbers_checkbox = gr.Checkbox(label="Show Line Numbers", value=True)
275
  autoscroll_checkbox = gr.Checkbox(label="Autoscroll", value=True)
 
276
  with gr.Group():
277
  gr.Markdown("### Simulation Controls")
278
- disable_console_checkbox = gr.Checkbox(label="Disable Python Console Output", value=False)
279
  start_btn = gr.Button("Run Success Case", variant="primary")
280
  error_btn = gr.Button("Run Error Case")
281
 
282
- start_btn.click(fn=run_success_case, inputs=[disable_console_checkbox], outputs=feature_logger)
283
- error_btn.click(fn=run_error_case, inputs=[disable_console_checkbox], outputs=feature_logger)
284
  feature_logger.clear(fn=clear_output, inputs=None, outputs=feature_logger)
285
  controls = [display_mode_radio, bg_color_picker, line_numbers_checkbox, autoscroll_checkbox]
286
  for control in controls:
@@ -307,7 +368,7 @@ with gr.Blocks(theme=gr.themes.Ocean()) as demo:
307
  label="Result", columns=1, show_label=False, height=500, min_width=768, preview=True, allow_preview=True
308
  )
309
 
310
- run_button.click(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
311
  prompt.submit(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
312
  livelog_viewer.clear(fn=clear_output, inputs=None, outputs=[livelog_viewer])
313
 
 
9
 
10
  with gr.Blocks(
11
  css=abs_path,
12
+ theme=gr.themes.Default(
13
  font_mono=[
14
  gr.themes.GoogleFont("Inconsolata"),
15
  "monospace",
 
40
  ```python
41
  # demo/app.py
42
 
43
+ import sys
44
  import gradio as gr
45
  import torch
 
46
  import logging
47
  import random
48
  import numpy as np
 
54
 
55
  # Import the component and ALL its utilities
56
  from gradio_livelog import LiveLog
57
+ from gradio_livelog.utils import ProgressTracker, Tee, TqdmToQueueWriter, capture_logs
58
 
59
  # --- 1. SETUP ---
60
  MODEL_ID = "SG161222/RealVisXL_V5.0_Lightning"
61
  MAX_SEED = np.iinfo(np.int32).max
62
 
63
  # --- 2. LOGIC FOR THE "LIVELOG FEATURE DEMO" TAB ---
64
+ app_logger = logging.getLogger("logging_app")
65
+ app_logger.setLevel(logging.INFO)
66
+ console_handler = logging.StreamHandler()
67
+ console_handler.flush = sys.stderr.flush
68
+ app_logger.addHandler(console_handler)
69
+
70
+ async def run_process(disable_console: bool, rate_unit: str, run_error_case: bool):
71
+ with capture_logs(log_level=logging.INFO, log_name=["logging_app"], disable_console=disable_console) as get_logs: #You can watch more than one log if you wish in log_name. If you do not pass log_name, the default log will be used.
72
  total_steps = 100
73
+ tracker = ProgressTracker(total=total_steps, description="Simulating a process...", rate_unit=rate_unit)
74
  all_logs = []
75
  last_log_content = None
76
 
77
  initial_log = f"Starting simulated process with {total_steps} steps..."
78
+ app_logger.info(initial_log)
79
  logs = [
80
  {
81
  "type": "log",
 
93
  current_step = i + 1
94
 
95
  if current_step == 10:
96
+ app_logger.warning(f"Low disk space warning at step {current_step}.")
97
  elif current_step == 30:
98
+ app_logger.log(logging.INFO + 5, f"Asset pack loaded successfully at step {current_step}.")
99
  elif current_step == 75:
100
+ app_logger.critical(f"Checksum mismatch! Data may be corrupt at step {current_step}.")
101
 
102
  if run_error_case and current_step == 50:
103
+ app_logger.error("A fatal simulation error occurred! Aborting.")
104
  logs = [
105
  {
106
  "type": "log",
 
128
  yield tracker.update(advance=1, status="running", logs=all_logs, log_content=last_log_content)
129
 
130
  final_log = "Process completed successfully!"
131
+ app_logger.log(logging.INFO + 5, final_log)
132
  logs = [
133
  {
134
  "type": "log",
 
147
  def clear_output():
148
  return None
149
 
150
+ async def run_success_case(disable_console: bool, rate_unit: str):
151
  yield None
152
+ async for update in run_process(disable_console=disable_console, rate_unit=rate_unit, run_error_case=False):
153
  yield update
154
 
155
+ async def run_error_case(disable_console: bool, rate_unit: str):
156
  yield None
157
+ async for update in run_process(disable_console=disable_console, rate_unit=rate_unit, run_error_case=True):
158
  yield update
159
 
160
 
161
  # --- 3. LOGIC FOR THE "DIFFUSION PIPELINE INTEGRATION" TAB ---
162
  diffusion_pipeline = None
163
+ pipeline_lock = threading.Lock()
164
  def load_pipeline(on_load=True):
165
  \"\"\"A function to load the model, ensuring it's only done once.\"\"\"
166
  global diffusion_pipeline
167
+ with pipeline_lock:
168
+ if diffusion_pipeline is None:
169
+ print("Loading Stable Diffusion model for the first time...")
170
+ pipe = StableDiffusionXLPipeline.from_pretrained(
171
+ MODEL_ID, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False
172
+ )
173
+ pipe.enable_vae_tiling()
174
+ pipe.enable_model_cpu_offload()
175
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
176
+ diffusion_pipeline = pipe
177
+ print("Model loaded successfully!")
178
+
179
  if not on_load:
180
  return diffusion_pipeline
181
 
182
  @spaces.GPU(duration=60, enable_queue=True)
183
+ def run_diffusion_in_thread(prompt: str, disable_console: bool, update_queue: queue.Queue):
184
  \"\"\"
185
  This function now uses capture_logs to listen to internal diffusers logs
186
  while retaining the superior data structure you designed.
187
  \"\"\"
188
  tracker = None
189
+ with capture_logs(log_level=logging.INFO, log_name=["logging_app"], disable_console=disable_console) as get_logs: #You can watch more than one log if you wish in log_name. If you do not pass log_name, the default log will be used.
190
+ try:
191
  pipe = load_pipeline(on_load=False)
192
+
193
+ #We will capture pipeline tqdm s/it progress instead
194
+ rate_queue = queue.Queue()
195
+ tqdm_writer = TqdmToQueueWriter(rate_queue)
196
+
197
+ progress_bar_handler = Tee(sys.stderr, tqdm_writer)
198
+ pipe.set_progress_bar_config(file=progress_bar_handler, #if you dont need to see the tqdm progress on console set file=tqdm_writer instead
199
+ disable=False,
200
+ ncols=100,
201
+ dynamic_ncols=True,
202
+ ascii=" █")
203
+
204
  seed = random.randint(0, MAX_SEED)
205
  generator = torch.Generator(device="cuda").manual_seed(seed)
206
  prompt_style = f"hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic"
 
208
  num_inference_steps = 10
209
 
210
  all_logs = []
211
+ last_known_rate_data = None
212
 
213
  # Helper function to process and store new logs
214
+ def process_and_send_updates(status="running", advance=0, final_image_payload=None):
215
+ \"\"\"
216
+ This is the core callback function. It captures new logs, formats them,
217
+ and sends a complete update object (logs + progress) to the UI queue.
218
+ This should also be called after the log record.
219
+ \"\"\"
220
+ nonlocal all_logs, last_known_rate_data
221
+ new_rate_data = None
222
+ while not rate_queue.empty():
223
+ try:
224
+ new_rate_data = rate_queue.get_nowait()
225
+ except queue.Empty:
226
+ break
227
+
228
+ if new_rate_data is not None:
229
+ last_known_rate_data = new_rate_data
230
+
231
  new_records = get_logs()
232
  if new_records:
233
+ new_logs = [{
234
+ "type": "log",
235
+ "level": "SUCCESS" if r.levelno == logging.INFO + 5 else r.levelname,
236
+ "content": r.getMessage()
237
+ } for r in new_records]
238
  all_logs.extend(new_logs)
239
+
240
+ # Use the tracker to generate the progress update dictionary if it exists.
241
+ # If not, create a preliminary update dictionary.
242
+ update_dict = {}
243
+
244
+ if tracker:
245
+ update_dict = tracker.update(
246
+ advance=advance,
247
+ status=status,
248
+ logs=all_logs,
249
+ rate_data=last_known_rate_data
250
+ )
251
+ else:
252
+ # Initial state before the tracker is created.
253
+ update_dict = {
254
+ "type": "progress",
255
+ "logs": all_logs,
256
+ "current": 0,
257
+ "total": num_inference_steps,
258
+ "desc": "Diffusion Steps" # Description is sent once
259
+ }
260
+
261
+ # Put the update on the queue. The image payload is usually None
262
+ # until the very end.
263
+ update_queue.put((final_image_payload, update_dict))
264
+
265
+ app_logger.info(f"Using seed: {seed}")
266
+ process_and_send_updates()
267
+
268
+ app_logger.info("Starting diffusion process...")
269
+ process_and_send_updates()
270
+
271
+ tracker = ProgressTracker(total=num_inference_steps, description="Diffusion Steps", rate_unit='it/s')
272
 
273
  def progress_callback(pipe_instance, step, timestep, callback_kwargs):
274
+ process_and_send_updates(advance=1)
 
 
275
  return callback_kwargs
276
+
277
  images = pipe(
278
  prompt=prompt_style, negative_prompt=negative_prompt_style, width=1024, height=1024,
279
  guidance_scale=3.0, num_inference_steps=num_inference_steps,
280
  generator=generator, callback_on_step_end=progress_callback
281
  ).images
282
 
283
+ app_logger.log(logging.INFO + 5, "Image generated successfully!")
284
+ process_and_send_updates(status="success", final_image_payload=images)
285
+
 
 
286
 
287
  except Exception as e:
288
+ app_logger.error(f"Error in diffusion thread: {e}, process aborted!", exc_info=True)
289
+ process_and_send_updates(status="error")
 
 
 
290
  finally:
291
  update_queue.put(None)
292
 
293
+
294
  @spaces.GPU(duration=60, enable_queue=True)
295
  def generate(prompt):
296
  \"\"\"This function starts the worker thread and yields updates from the queue.\"\"\"
297
+ yield None, None, gr.update(interactive=False)
 
298
  update_queue = queue.Queue()
299
+ diffusion_thread = threading.Thread(target=run_diffusion_in_thread, args=(prompt, False, update_queue))
300
  diffusion_thread.start()
301
+ final_images = None
302
  while True:
303
  update = update_queue.get()
304
+ if update is None:
305
+ break
306
+
307
+ images, log_update = update
308
+
309
+ if images:
310
+ final_images = images
311
+
312
+ yield final_images, log_update, gr.skip()
313
+
314
+ yield final_images, log_update, gr.update(interactive=True)
315
 
316
  # --- 4. THE COMBINED GRADIO UI with TABS ---
317
  with gr.Blocks(theme=gr.themes.Ocean()) as demo:
 
330
  with gr.Group():
331
  gr.Markdown("### Component Properties")
332
  display_mode_radio = gr.Radio(["full", "log", "progress"], label="Display Mode", value="full")
333
+ rate_unit = gr.Radio(["it/s","s/it"], label="Progress rate unit", value="it/s")
334
  bg_color_picker = gr.ColorPicker(label="Background Color", value="#000000")
335
  line_numbers_checkbox = gr.Checkbox(label="Show Line Numbers", value=True)
336
  autoscroll_checkbox = gr.Checkbox(label="Autoscroll", value=True)
337
+ disable_console_checkbox = gr.Checkbox(label="Disable Python Console Output", value=False)
338
  with gr.Group():
339
  gr.Markdown("### Simulation Controls")
 
340
  start_btn = gr.Button("Run Success Case", variant="primary")
341
  error_btn = gr.Button("Run Error Case")
342
 
343
+ start_btn.click(fn=run_success_case, inputs=[disable_console_checkbox, rate_unit], outputs=feature_logger)
344
+ error_btn.click(fn=run_error_case, inputs=[disable_console_checkbox, rate_unit], outputs=feature_logger)
345
  feature_logger.clear(fn=clear_output, inputs=None, outputs=feature_logger)
346
  controls = [display_mode_radio, bg_color_picker, line_numbers_checkbox, autoscroll_checkbox]
347
  for control in controls:
 
368
  label="Result", columns=1, show_label=False, height=500, min_width=768, preview=True, allow_preview=True
369
  )
370
 
371
+ run_button.click(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer, run_button])
372
  prompt.submit(fn=generate, inputs=[prompt], outputs=[result_gallery, livelog_viewer])
373
  livelog_viewer.clear(fn=clear_output, inputs=None, outputs=[livelog_viewer])
374
 
src/frontend/shared/LiveLogPanel.svelte CHANGED
@@ -36,7 +36,7 @@
36
  // -------------------------------------------------------------------------
37
 
38
  /** Holds the current state of the progress bar. */
39
- let progress = { visible: true, current: 0, total: 100, desc: "", percentage: 0, rate: 0.0, status: "running" };
40
  /** Accumulates all received log lines. */
41
  let log_lines: { level: string; content: string }[] = [];
42
  /** A plain text representation of all logs for the utility buttons. */
@@ -66,12 +66,14 @@
66
  if (value !== null) {
67
  clearTimeout(debounceTimeout);
68
  debounceTimeout = setTimeout(async () => {
69
- if (value === null) {
70
- // A `null` value is the signal to clear the component's state.
71
- log_lines = [];
72
- progress = { visible: false, current: 0, total: 100, desc: "", percentage: 0, rate: 0.0, status: "running" };
 
 
73
  all_logs_as_text = "";
74
- initial_desc = "Processing..."; // Reset the initial description
75
  } else if (value) {
76
  if (Array.isArray(value)) {
77
  // Handles an initial state load if the backend provides a full list.
@@ -84,39 +86,38 @@
84
  progress.visible = true;
85
  progress.current = item.current;
86
  progress.total = item.total || 100;
87
- // Capture the initial fixed description on the first progress update.
88
  if (item.current === 0 && item.desc && initial_desc === "Processing...") {
89
  initial_desc = item.desc;
90
  }
91
- // In 'progress' mode, use the latest log as the description; otherwise, use the fixed one.
92
  progress.desc = display_mode === "progress" && log_lines.length > 0
93
  ? log_lines[log_lines.length - 1].content
94
  : initial_desc;
95
- progress.rate = item.rate || 0.0;
 
 
96
  progress.percentage = progress.total > 0 ? ((item.current / progress.total) * 100) : 0;
97
  progress.status = item.status || "running";
98
  }
99
  }
100
- } else if (typeof value === 'object' && value.type) {
101
- // This is the primary streaming case: handles a single new data object.
102
  if (value.type === "log") {
103
  log_lines = [...log_lines, { level: value.level || 'INFO', content: value.content }];
104
  } else if (value.type === "progress") {
105
  progress.visible = true;
106
  progress.current = value.current;
107
  progress.total = value.total || 100;
108
- // Capture the initial fixed description on the first progress update.
109
  if (value.current === 0 && value.desc && initial_desc === "Processing...") {
110
  initial_desc = value.desc;
111
  }
112
- // In 'progress' mode, use the latest log as the description; otherwise, use the fixed one.
113
  progress.desc = display_mode === "progress" && log_lines.length > 0
114
  ? log_lines[log_lines.length - 1].content
115
  : initial_desc;
116
- progress.rate = value.rate || 0.0;
117
- progress.percentage = progress.total > 0 ? ((value.current / progress.total) * 100) : 0;
 
 
118
  progress.status = value.status || "running";
119
- // The backend can send a full log history with a progress update.
120
  log_lines = Array.isArray(value.logs) ? value.logs.map(log => ({
121
  level: log.level || 'INFO',
122
  content: log.content
@@ -125,8 +126,8 @@
125
  }
126
  all_logs_as_text = log_lines.map(l => l.content).join('\n');
127
  }
128
- await tick(); // Ensure the DOM is updated before the next potential operation.
129
- }, 50); // 50ms debounce window.
130
  }
131
  }
132
 
@@ -166,7 +167,12 @@
166
  <div class="progress-container">
167
  <div class="progress-label-top">
168
  <span>{progress.desc}</span>
169
- <span>{progress.rate.toFixed(1)} it/s</span>
 
 
 
 
 
170
  </div>
171
  <div class="progress-bar-background">
172
  <!-- Conditionally apply CSS classes based on the progress status. -->
@@ -282,4 +288,17 @@
282
  .progress-bar-fill.error {
283
  background-color: var(--color-error, #ef4444);
284
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
285
  </style>
 
36
  // -------------------------------------------------------------------------
37
 
38
  /** Holds the current state of the progress bar. */
39
+ let progress = { visible: true, current: 0, total: 100, desc: "", percentage: 0, rate: 0.0, status: "running", rate_unit: 'it/s', extra_info:''};
40
  /** Accumulates all received log lines. */
41
  let log_lines: { level: string; content: string }[] = [];
42
  /** A plain text representation of all logs for the utility buttons. */
 
66
  if (value !== null) {
67
  clearTimeout(debounceTimeout);
68
  debounceTimeout = setTimeout(async () => {
69
+ if (value === null) {
70
+ log_lines = [];
71
+ progress = {
72
+ visible: false, current: 0, total: 100, desc: "", percentage: 0,
73
+ rate: 0.0, status: "running", rate_unit: 'it/s', extra_info: ''
74
+ };
75
  all_logs_as_text = "";
76
+ initial_desc = "Processing...";
77
  } else if (value) {
78
  if (Array.isArray(value)) {
79
  // Handles an initial state load if the backend provides a full list.
 
86
  progress.visible = true;
87
  progress.current = item.current;
88
  progress.total = item.total || 100;
 
89
  if (item.current === 0 && item.desc && initial_desc === "Processing...") {
90
  initial_desc = item.desc;
91
  }
 
92
  progress.desc = display_mode === "progress" && log_lines.length > 0
93
  ? log_lines[log_lines.length - 1].content
94
  : initial_desc;
95
+ progress.rate = item.rate || 0.0;
96
+ progress.rate_unit = item.rate_unit || 'it/s';
97
+ progress.extra_info = item.extra_info || '';
98
  progress.percentage = progress.total > 0 ? ((item.current / progress.total) * 100) : 0;
99
  progress.status = item.status || "running";
100
  }
101
  }
102
+ } else if (typeof value === 'object' && value.type) {
 
103
  if (value.type === "log") {
104
  log_lines = [...log_lines, { level: value.level || 'INFO', content: value.content }];
105
  } else if (value.type === "progress") {
106
  progress.visible = true;
107
  progress.current = value.current;
108
  progress.total = value.total || 100;
 
109
  if (value.current === 0 && value.desc && initial_desc === "Processing...") {
110
  initial_desc = value.desc;
111
  }
 
112
  progress.desc = display_mode === "progress" && log_lines.length > 0
113
  ? log_lines[log_lines.length - 1].content
114
  : initial_desc;
115
+ progress.rate = value.rate || 0.0;
116
+ progress.rate_unit = value.rate_unit || 'it/s';
117
+ progress.extra_info = value.extra_info || '';
118
+ progress.percentage = progress.total > 0 ? ((value.current / value.total) * 100) : 0;
119
  progress.status = value.status || "running";
120
+
121
  log_lines = Array.isArray(value.logs) ? value.logs.map(log => ({
122
  level: log.level || 'INFO',
123
  content: log.content
 
126
  }
127
  all_logs_as_text = log_lines.map(l => l.content).join('\n');
128
  }
129
+ await tick();
130
+ }, 50);
131
  }
132
  }
133
 
 
167
  <div class="progress-container">
168
  <div class="progress-label-top">
169
  <span>{progress.desc}</span>
170
+ <span class="rate-info">
171
+ {progress.rate.toFixed(2)} {progress.rate_unit}
172
+ {#if progress.extra_info}
173
+ <span class="extra-info">({progress.extra_info})</span>
174
+ {/if}
175
+ </span>
176
  </div>
177
  <div class="progress-bar-background">
178
  <!-- Conditionally apply CSS classes based on the progress status. -->
 
288
  .progress-bar-fill.error {
289
  background-color: var(--color-error, #ef4444);
290
  }
291
+ .rate-info {
292
+ display: flex;
293
+ align-items: center;
294
+ gap: 0.5ch;
295
+ }
296
+ .extra-info {
297
+ color: var(--body-text-color-subdued);
298
+ font-size: 0.9em;
299
+ white-space: nowrap;
300
+ overflow: hidden;
301
+ text-overflow: ellipsis;
302
+ max-width: 200px;
303
+ }
304
  </style>
src/pyproject.toml CHANGED
@@ -8,7 +8,7 @@ build-backend = "hatchling.build"
8
 
9
  [project]
10
  name = "gradio_livelog"
11
- version = "0.0.2"
12
  description = "A Live Log Component for Gradio Interface"
13
  readme = "README.md"
14
  license = "apache-2.0"
 
8
 
9
  [project]
10
  name = "gradio_livelog"
11
+ version = "0.0.3"
12
  description = "A Live Log Component for Gradio Interface"
13
  readme = "README.md"
14
  license = "apache-2.0"