File size: 20,540 Bytes
af15ec4
fc91ae0
af15ec4
 
 
 
 
 
 
c60b074
2c25d73
 
 
af15ec4
2c25d73
af15ec4
 
 
 
 
c60b074
2c25d73
 
 
 
c60b074
2c25d73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c038d42
 
2c25d73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
af15ec4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fc91ae0
af15ec4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c25d73
854df12
af15ec4
2c25d73
af15ec4
2c25d73
 
af15ec4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fc91ae0
af15ec4
 
fc91ae0
af15ec4
 
fc91ae0
af15ec4
 
 
2c25d73
 
 
 
 
 
af15ec4
 
2c25d73
af15ec4
2c25d73
af15ec4
2c25d73
 
af15ec4
 
2c25d73
af15ec4
2c25d73
af15ec4
2c25d73
af15ec4
2c25d73
af15ec4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c25d73
af15ec4
2c25d73
 
 
 
 
 
af15ec4
2c25d73
 
af15ec4
 
2c25d73
 
 
f58f6bd
fc91ae0
bd1714d
fc91ae0
 
 
 
 
 
2c25d73
 
 
 
 
 
d180412
2c25d73
d180412
 
 
 
2c25d73
d180412
 
bd1714d
d180412
 
 
 
2c25d73
 
d180412
 
2c25d73
 
b98ab62
2c25d73
 
 
af15ec4
 
2c25d73
 
 
b98ab62
2c25d73
 
af15ec4
2c25d73
af15ec4
2c25d73
af15ec4
 
 
2c25d73
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
# --- Environment Variables Used ---
# DISABLE_ZEROGPU: Set to 'true' or '1' to disable @spaces.GPU decorator (for Hugging Face Spaces).
# TRIPOSG_CODE_PATH: Absolute path to a local directory containing the checked-out TripoSG repository (scribble branch).
# GITHUB_TOKEN: A GitHub token used for cloning the TripoSG repo if TRIPOSG_CODE_PATH is not provided.
# WEIGHTS_PATH: Absolute path to a local directory containing the TripoSG-scribble model weights.
# HF_TOKEN: A Hugging Face Hub token used for downloading weights/models if local paths (WEIGHTS_PATH, WD14_CONVNEXT_PATH) are not provided.
# WD14_CONVNEXT_PATH: Absolute path to a local directory containing the WD14 ConvNeXT tagger model.onnx and selected_tags.csv.
# ----------------------------------

import gradio as gr
import os
import sys
import subprocess
from huggingface_hub import snapshot_download, HfFolder, hf_hub_download
import random # Import random for seed generation
import re # For WD14 tag processing
import cv2 # For WD14 preprocessing
import pandas as pd # For WD14 tags
from onnxruntime import InferenceSession # For WD14 model
from typing import Mapping, Tuple, Dict # Type hints

# --- Repo Setup --- 
DEFAULT_REPO_DIR = "./TripoSG-repo" # Directory to clone into if not using local path
REPO_GIT_URL = "github.com/VAST-AI-Research/TripoSG.git" # Base URL without schema/token
BRANCH = "scribble"

code_source_path = None

# Option 1: Use local path if TRIPOSG_CODE_PATH env var is set
local_code_path = os.environ.get("TRIPOSG_CODE_PATH")
if local_code_path:
    print(f"Attempting to use local code path specified by TRIPOSG_CODE_PATH: {local_code_path}")
    # Basic check: does it exist and seem like a git repo (has .git)?
    if os.path.isdir(local_code_path) and os.path.isdir(os.path.join(local_code_path, ".git")):
        code_source_path = os.path.abspath(local_code_path)
        print(f"Using local TripoSG code directory: {code_source_path}")
        # You might want to add a check here to verify the branch is correct, e.g.:
        # try:
        #     current_branch = subprocess.run(["git", "rev-parse", "--abbrev-ref", "HEAD"], cwd=code_source_path, check=True, capture_output=True, text=True).stdout.strip()
        #     if current_branch != BRANCH:
        #         print(f"Warning: Local repo is on branch '{current_branch}', expected '{BRANCH}'. Attempting checkout...")
        #         subprocess.run(["git", "checkout", BRANCH], cwd=code_source_path, check=True)
        # except Exception as e:
        #     print(f"Warning: Could not verify or checkout branch '{BRANCH}' in {code_source_path}: {e}")
    else:
        print(f"Warning: TRIPOSG_CODE_PATH '{local_code_path}' not found or not a valid git repository directory. Falling back to cloning.")

# Option 2: Clone from GitHub (if local path not used or invalid)
if not code_source_path:
    repo_url_to_clone = f"https://{REPO_GIT_URL}"
    github_token = os.environ.get("GITHUB_TOKEN")
    if github_token:
        print("Using GITHUB_TOKEN for repository cloning.")
        repo_url_to_clone = f"https://{github_token}@{REPO_GIT_URL}"
    else:
        print("No GITHUB_TOKEN found. Using public HTTPS for cloning.")

    repo_target_dir = os.path.abspath(DEFAULT_REPO_DIR)
    if not os.path.exists(repo_target_dir):
        print(f"Cloning TripoSG repository ({BRANCH} branch) into {repo_target_dir}...")
        try:
            subprocess.run(["git", "clone", "--branch", BRANCH, "--depth", "1", repo_url_to_clone, repo_target_dir], check=True)
            code_source_path = repo_target_dir
            print("Repository cloned successfully.")
        except subprocess.CalledProcessError as e:
            print(f"Error cloning repository: {e}")
            print("Please ensure the URL is correct, the branch '{BRANCH}' exists, and you have access rights (or provide a GITHUB_TOKEN).")
            sys.exit(1)
        except Exception as e:
            print(f"An unexpected error occurred during cloning: {e}")
            sys.exit(1)
    else:
        print(f"Directory {repo_target_dir} already exists. Assuming it contains the correct code/branch.")
        # Optional: Add checks here like git pull or verifying the branch
        code_source_path = repo_target_dir

if not code_source_path:
    print("Error: Could not determine TripoSG code source path.")
    sys.exit(1)

# Add repo to Python path
sys.path.insert(0, code_source_path) # Use the determined absolute path
print(f"Added {code_source_path} to sys.path")
# --- End Repo Setup ---

# --- ZeroGPU Setup ---
DISABLE_ZEROGPU = os.environ.get("DISABLE_ZEROGPU", "false").lower() in ("true", "1", "t")
ENABLE_ZEROGPU = not DISABLE_ZEROGPU
print(f"ZeroGPU Enabled: {ENABLE_ZEROGPU}")
# --- End ZeroGPU Setup ---

if ENABLE_ZEROGPU:
    import spaces # Import spaces for ZeroGPU
from PIL import Image
import numpy as np
import torch
from triposg.pipelines.pipeline_triposg_scribble import TripoSGScribblePipeline
import tempfile

# --- Weight Loading Logic ---
HF_TOKEN = os.environ.get("HF_TOKEN")
if HF_TOKEN:
    HfFolder.save_token(HF_TOKEN)
HUGGING_FACE_REPO_ID = "VAST-AI/TripoSG-scribble"
DEFAULT_CACHE_PATH = "./pretrained_weights/TripoSG-scribble"

# Option 1: Use local path if WEIGHTS_PATH env var is set
local_weights_path = os.environ.get("WEIGHTS_PATH")
model_load_path = None

if local_weights_path:
    print(f"Attempting to load weights from local path specified by WEIGHTS_PATH: {local_weights_path}")
    if os.path.isdir(local_weights_path):
        model_load_path = local_weights_path
        print(f"Using local weights directory: {model_load_path}")
    else:
        print(f"Warning: WEIGHTS_PATH '{local_weights_path}' not found or not a directory. Falling back to Hugging Face download.")

# Option 2: Download from Hugging Face (if local path not used or invalid)
if not model_load_path:
    hf_token = os.environ.get("HF_TOKEN")
    print(f"Attempting to download weights from Hugging Face repo: {HUGGING_FACE_REPO_ID}")
    if hf_token:
        print("Using Hugging Face token for download.")
        auth_token = hf_token
    else:
        print("No Hugging Face token found. Attempting public download.")
        auth_token = None
    try:
        model_load_path = snapshot_download(
            repo_id=HUGGING_FACE_REPO_ID,
            local_dir=DEFAULT_CACHE_PATH,
            local_dir_use_symlinks=False, # Recommended for Spaces
            token=auth_token,
            # revision="main" # Specify branch/commit if needed
        )
        print(f"Weights downloaded/cached to: {model_load_path}")
    except Exception as e:
        print(f"Error downloading weights from Hugging Face: {e}")
        print("Please ensure the repository exists and is accessible, or provide a valid WEIGHTS_PATH.")
        sys.exit(1) # Exit if weights cannot be loaded

# Load the pipeline using the determined path
print(f"Loading pipeline from: {model_load_path}")
pipe = TripoSGScribblePipeline.from_pretrained(model_load_path)
pipe.to(dtype=torch.float16, device="cuda")
print("Pipeline loaded.")
# --- End Weight Loading Logic ---

# Create a white background image and a transparent layer for drawing
canvas_width, canvas_height = 512, 512
initial_background = Image.new("RGB", (canvas_width, canvas_height), color="white")
initial_layer = Image.new("RGBA", (canvas_width, canvas_height), color=(0, 0, 0, 0)) # Transparent layer
# Prepare the initial value dictionary for ImageEditor
initial_value = {
    "background": initial_background,
    "layers": [initial_layer], # Add the transparent layer
    "composite": None
}

# --- ZeroGPU Setup ---
# ... existing ZeroGPU setup ...

MAX_SEED = np.iinfo(np.int32).max

def get_random_seed():
    return random.randint(0, MAX_SEED)

# --- WD14 Helper Functions ---
def make_square(img, target_size):
    old_size = img.shape[:2]
    desired_size = max(old_size)
    desired_size = max(desired_size, target_size)

    delta_w = desired_size - old_size[1]
    delta_h = desired_size - old_size[0]
    top, bottom = delta_h // 2, delta_h - (delta_h // 2)
    left, right = delta_w // 2, delta_w - (delta_w // 2)

    color = [255, 255, 255] # White padding
    return cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)

def smart_resize(img, size):
    if img.shape[0] > size:
        img = cv2.resize(img, (size, size), interpolation=cv2.INTER_AREA)
    elif img.shape[0] < size:
        img = cv2.resize(img, (size, size), interpolation=cv2.INTER_CUBIC)
    return img

RE_SPECIAL = re.compile(r'([\()])')

# --- WD14 Tagger Class ---
class WaifuDiffusionInterrogator:
    def __init__(
            self,
            repo: str,
            model_filename='model.onnx',
            tags_filename='selected_tags.csv',
            local_model_dir: str | None = None # Added local path option
    ) -> None:
        self.__repo = repo
        self.__model_filename = model_filename
        self.__tags_filename = tags_filename
        self.__local_model_dir = local_model_dir
        self.__initialized = False
        self._model = None
        self._tags = None

    def _init(self) -> None:
        if self.__initialized:
            return

        model_path = None
        tags_path = None

        if self.__local_model_dir:
            print(f"WD14: Attempting to load from local directory: {self.__local_model_dir}")
            potential_model_path = os.path.join(self.__local_model_dir, self.__model_filename)
            potential_tags_path = os.path.join(self.__local_model_dir, self.__tags_filename)
            if os.path.exists(potential_model_path) and os.path.exists(potential_tags_path):
                model_path = potential_model_path
                tags_path = potential_tags_path
                print("WD14: Found local model and tags file.")
            else:
                print("WD14: Local files not found. Falling back to Hugging Face download.")

        if model_path is None or tags_path is None:
            print(f"WD14: Downloading from repo: {self.__repo}")
            hf_token = os.environ.get("HF_TOKEN") # Reuse HF token if available
            try:
                model_path = hf_hub_download(self.__repo, filename=self.__model_filename, token=hf_token)
                tags_path = hf_hub_download(self.__repo, filename=self.__tags_filename, token=hf_token)
                print("WD14: Download complete.")
            except Exception as e:
                print(f"WD14: Error downloading from Hugging Face: {e}")
                # Decide how to handle this - maybe raise error or disable tagging?
                # For now, we'll let it fail later if model is None
                return # Cannot initialize

        try:
            self._model = InferenceSession(str(model_path))
            self._tags = pd.read_csv(tags_path)
            self.__initialized = True
            print("WD14: Tagger initialized successfully.")
        except Exception as e:
            print(f"WD14: Error initializing ONNX session or reading tags: {e}")

    def _calculation(self, image: Image.Image) -> pd.DataFrame | None:
        self._init()
        if not self._model or self._tags is None:
             print("WD14: Tagger not initialized.")
             return None

        _, height, _, _ = self._model.get_inputs()[0].shape

        image = image.convert('RGBA')
        new_image = Image.new('RGBA', image.size, 'WHITE')
        new_image.paste(image, mask=image)
        image = new_image.convert('RGB')

        image = np.asarray(image)
        image = image[:, :, ::-1]

        image = make_square(image, height)
        image = smart_resize(image, height)
        image = image.astype(np.float32)
        image = np.expand_dims(image, 0)

        input_name = self._model.get_inputs()[0].name
        label_name = self._model.get_outputs()[0].name
        confidence = self._model.run([label_name], {input_name: image})[0]

        full_tags = self._tags[['name', 'category']].copy()
        full_tags['confidence'] = confidence[0]

        return full_tags

    def interrogate(self, image: Image.Image) -> Tuple[Dict[str, float], Dict[str, float]] | None:
        full_tags = self._calculation(image)
        if full_tags is None:
            return None

        ratings = dict(full_tags[full_tags['category'] == 9][['name', 'confidence']].values)
        tags = dict(full_tags[full_tags['category'] != 9][['name', 'confidence']].values)

        return ratings, tags

# --- Instantiate WD14 Tagger ---
WD14_CONVNEXT_REPO = 'SmilingWolf/wd-v1-4-convnext-tagger'
wd14_local_path = os.environ.get("WD14_CONVNEXT_PATH")
wd14_tagger = WaifuDiffusionInterrogator(repo=WD14_CONVNEXT_REPO, local_model_dir=wd14_local_path)

# --- Helper to format tags ---
def format_wd14_tags(tags: Dict[str, float], threshold: float = 0.35) -> str:
    filtered_tags = {
        tag: score for tag, score in tags.items()
        if score >= threshold and "background" not in tag and tag not in {"monochrome", "greyscale", "no_humans", "comic", "solo"}
    }
    print(filtered_tags)
    # Sort by score descending, then alphabetically
    tags_pairs = sorted(filtered_tags.items(), key=lambda x: (-x[1], x[0]))
    text_items = [tag.replace('_', ' ') for tag, score in tags_pairs]
    return ', '.join(text_items)

# Apply decorator conditionally
@spaces.GPU() if ENABLE_ZEROGPU else lambda func: func
def generate_3d(scribble_image_dict, prompt, scribble_confidence, text_confidence, seed):
    print("Generating 3D model...")
    input_prompt = prompt # Keep track of original prompt for return on early exit
    if scribble_image_dict is None or scribble_image_dict.get("composite") is None:
        print("No scribble image provided.")
        return None, input_prompt # Return None for model, original prompt

    # --- Prompt Handling ---
    input_prompt = prompt.strip()
    if not input_prompt:
        print("Prompt is empty, attempting WD14 tagging...")
        try:
            # Get the user drawing (black on white) for tagging
            user_drawing_img = Image.fromarray(scribble_image_dict["composite"]).convert("RGB")
            tag_results = wd14_tagger.interrogate(user_drawing_img)
            if tag_results:
                ratings, tags = tag_results
                generated_prompt = format_wd14_tags(tags) # Use default threshold
                if generated_prompt:
                    print(f"WD14 generated prompt: {generated_prompt}")
                    input_prompt = generated_prompt
                else:
                    print("WD14 tagging did not produce tags above threshold.")
                    input_prompt = "3d object" # Fallback prompt
            else:
                print("WD14 tagging failed or tagger not initialized.")
                input_prompt = "3d object" # Fallback prompt
        except Exception as e:
            print(f"Error during WD14 tagging: {e}")
            input_prompt = "3d object" # Fallback prompt
    else:
        print(f"Using user provided prompt: {input_prompt}")
    # --- End Prompt Handling ---

    # --- Seed Handling ---
    current_seed = int(seed)
    print(f"Using seed: {current_seed}")
    # --- End Seed Handling ---

    # --- Image Preprocessing for TripoSG ---
    # Get the composite image again (safer in case dict is modified)
    # The composite might be RGBA if a layer was involved, ensure RGB for processing
    image_for_triposg = Image.fromarray(scribble_image_dict["composite"]).convert("RGB")
    # Preprocess the image: invert colors (black on white -> white on black)
    image_np = np.array(image_for_triposg)
    processed_image_np = 255 - image_np
    processed_image = Image.fromarray(processed_image_np)
    print("Image preprocessed for TripoSG.")
    # --- End Image Preprocessing ---

    # --- Generator Setup ---
    generator = torch.Generator(device='cuda').manual_seed(current_seed)
    # --- End Generator Setup ---

    # --- Run Pipeline ---
    print("Running pipeline...")
    try:
        out = pipe(
            processed_image,
            prompt=input_prompt, # Use the potentially generated prompt
            num_tokens=512, # Default value from example
            guidance_scale=0, # Default value from example
            num_inference_steps=16, # Default value from example
            attention_kwargs={
                "cross_attention_scale": text_confidence,
                "cross_attention_2_scale": scribble_confidence
            },
            generator=generator,
            use_flash_decoder=False, # Default value from example
            dense_octree_depth=8, # Default value from example
            hierarchical_octree_depth=8 # Default value from example
        )
        print("Pipeline finished.")
    except Exception as e:
        print(f"Error during pipeline execution: {e}")
        return None, input_prompt # Return None for model, the prompt used
    # --- End Run Pipeline ---

    # --- Save Output ---
    if out.meshes and len(out.meshes) > 0:
        # Create a temporary file with .glb extension
        with tempfile.NamedTemporaryFile(suffix=".glb", delete=False) as tmpfile:
            output_path = tmpfile.name
        out.meshes[0].export(output_path)
        print(f"Mesh saved to temporary file: {output_path}")
        return output_path, input_prompt # Return model path and the prompt used
    else:
        print("Pipeline did not generate any meshes.")
        return None, input_prompt # Return None for model, the prompt used
    # --- End Save Output ---

# Create the Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# TripoSG Scribble!!")
    gr.Markdown("""
### [GitHub](https://github.com/VAST-AI-Research/TripoSG) | [Paper](https://arxiv.org/abs/2502.06608) | [Project Page](https://yg256li.github.io/TripoSG-Page/)

### Fast 3D shape prototyping with simple scribble and text prompt. Presented by [Tripo](https://www.tripo3d.ai/).

- For local deployment, simply clone this space, set up the environment and run with DISABLE_ZEROGPU=1.
- Feel free to tune the scribble confidence to balance fidelity and alignment :)
""")
    with gr.Row():
        with gr.Column(scale=1):
            image_input = gr.ImageEditor(
                label="Scribble Input (Draw Black on White)",
                value=initial_value,
                image_mode="RGB",
                brush=gr.Brush(default_color="#000000", color_mode="fixed", default_size=4),
                interactive=True,
                eraser=gr.Brush(default_color="#FFFFFF", color_mode="fixed", default_size=20),
                canvas_size=(canvas_width, canvas_height),
                fixed_canvas=True,
                height=canvas_height + 128,
            )
        with gr.Column(scale=1):
            with gr.Row():
                prompt_input = gr.Textbox(label="Prompt", placeholder="e.g., a cat", scale=3)
                seed_input = gr.Number(label="Seed", value=0, precision=0, scale=1)
            with gr.Row(): # Add row for sliders
                confidence_input = gr.Slider(minimum=0.0, maximum=1.0, value=0.4, step=0.05, label="Scribble Confidence")
                prompt_confidence_input = gr.Slider(minimum=0.0, maximum=1.0, value=1.0, step=0.05, label="Prompt Confidence")
            with gr.Row():
                submit_button = gr.Button("Generate 3D Model", variant="primary", scale=1)
                lucky_button = gr.Button("I'm Feeling Lucky", scale=1)            
            model_output = gr.Model3D(label="Generated 3D Model", interactive=False, height=384)

    # Define the inputs for the main generation function
    gen_inputs = [image_input, prompt_input, confidence_input, prompt_confidence_input, seed_input] # Added text_confidence_input

    submit_button.click(
        fn=generate_3d,
        inputs=gen_inputs,
        outputs=[model_output, prompt_input] # Add prompt_input to outputs
    )

    # Define inputs for the lucky button (same as main button for the final call)
    lucky_gen_inputs = [image_input, prompt_input, confidence_input, prompt_confidence_input, seed_input] # Added text_confidence_input

    lucky_button.click(
        fn=get_random_seed,
        inputs=[],
        outputs=[seed_input]
    ).then(
        fn=generate_3d,
        inputs=lucky_gen_inputs,
        outputs=[model_output, prompt_input] # Add prompt_input to outputs
    )

# Launch with queue enabled if using ZeroGPU
print("Launching Gradio interface...")
demo.launch(share=False, server_name="0.0.0.0")
print("Gradio interface launched.")