File size: 7,303 Bytes
3db5e69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import gradio as gr


def fn1(mesh_upload, state, y2z, y2x, z2x, upside_down):
    return None, None, None, None, None, None, None, None, None


def fn2(position_images_tensor_path, normal_images_tensor_path, mask_images_tensor_path, w2c_tensor_path, text_prompt, selected_view, img_condition_seed, model_choice, edge_refinement):
    return None, None


with gr.Blocks(delete_cache=(600, 600)) as demo:
    gr.Markdown("# 🎨 SeqTex: Generate Mesh Textures in Video Sequence")

    gr.Markdown("""
    ## πŸš€ Welcome to SeqTex!
    **SeqTex** is a cutting-edge AI system that generates high-quality textures for 3D meshes using image prompts (here we use image generator to get them from textual prompts).

    Choose to either **try our example models** below or **upload your own 3D mesh** to create stunning textures.
    """)

    gr.Markdown("---")

    gr.Markdown("## πŸ”§ Step 1: Upload & Process 3D Mesh")
    gr.Markdown("""
    **πŸ“‹ How to prepare your 3D mesh:**
    - Upload your 3D mesh in **.obj** or **.glb** format
    - **πŸ’‘ Pro Tip**:
        - For optimal results, ensure your mesh includes only one part with <span style="color:#e74c3c; font-weight:bold;">UV parameterization</span>
        - Otherwise, we'll combine all parts and generate UV parameterization using *xAtlas* (may take longer for high-poly meshes; may also fail for certain meshes)
    - **⚠️ Important**: We recommend adjusting your model using *Mesh Orientation Adjustments* to be **Z-UP oriented** for best results
    """)
    position_map_tensor_path = gr.State()
    normal_map_tensor_path = gr.State()
    position_images_tensor_path = gr.State()
    normal_images_tensor_path = gr.State()
    mask_images_tensor_path = gr.State()
    w2c_tensor_path = gr.State()
    mesh = gr.State()
    mvp_matrix_tensor_path = gr.State()

    # fixed_texture_map = Image.open("image.webp").convert("RGB")
    # Step 1
    with gr.Row():
        with gr.Column():
            mesh_upload = gr.File(label="πŸ“ Upload 3D Mesh", file_types=[".obj", ".glb"])
            # uv_tool = gr.Radio(["xAtlas", "UVAtlas"], label="UV parameterizer", value="xAtlas")

            gr.Markdown("**πŸ”„ Mesh Orientation Adjustments** (if needed):")
            y2z = gr.Checkbox(label="Y β†’ Z Transform", value=False, info="Rotate: Y becomes Z, -Z becomes Y")
            y2x = gr.Checkbox(label="Y β†’ X Transform", value=False, info="Rotate: Y becomes X, -X becomes Y")
            z2x = gr.Checkbox(label="Z β†’ X Transform", value=False, info="Rotate: Z becomes X, -X becomes Z")
            upside_down = gr.Checkbox(label="πŸ”ƒ Flip Vertically", value=False, info="Fix upside-down mesh orientation")
            step1_button = gr.Button("πŸ”„ Process Mesh & Generate Views", variant="primary")
            step1_progress = gr.Textbox(label="πŸ“Š Processing Status", interactive=False)

        with gr.Column():
            model_input = gr.Model3D(label="πŸ“ Processed 3D Model", height=500)

    with gr.Row(equal_height=True):
        rgb_views = gr.Image(label="πŸ“· Generated Views", type="pil", scale=3)
        position_map = gr.Image(label="πŸ—ΊοΈ Position Map", type="pil", scale=1)
        normal_map = gr.Image(label="🧭 Normal Map", type="pil", scale=1)

    step1_button.click(
        fn=fn1,
        inputs=[mesh_upload, gr.State("xAtlas"), y2z, y2x, z2x, upside_down],
        outputs=[position_map_tensor_path, normal_map_tensor_path, position_images_tensor_path, normal_images_tensor_path, mask_images_tensor_path, w2c_tensor_path, mesh, mvp_matrix_tensor_path, step1_progress]
    )

    # Step 2
    gr.Markdown("---")
    gr.Markdown("## πŸ‘οΈ Step 2: Select View & Generate Image Condition")
    gr.Markdown("""
    **πŸ“‹ How to generate image condition:**
    - Your mesh will be rendered from **four viewpoints** (front, back, left, right)
    - Choose **one view** as your image condition
    - Enter a **descriptive text prompt** for the desired texture
    - Select your preferred AI model:
        - <span style="color:#27ae60; font-weight:bold;">🎯 SDXL</span>: Fast generation with depth + normal control, better details (often suffer from wrong highlights)
        - <span style="color:#3498db; font-weight:bold;">⚑ FLUX</span>: ~~High-quality generation with depth control (slower due to CPU offloading). Better work with **Edge Refinement**~~ (Not supported due to the memory limit of HF Space. You can try it locally)
    """)
    with gr.Row():
        with gr.Column():
            img_condition_seed = gr.Number(label="🎲 Random Seed", minimum=0, maximum=9999, step=1, value=42, info="Change for different results")
            selected_view = gr.Radio(["First View", "Second View", "Third View", "Fourth View"], label="πŸ“ Camera View", value="First View", info="Choose which viewpoint to use as reference")
            with gr.Row():
                # model_choice = gr.Radio(["SDXL", "FLUX"], label="πŸ€– AI Model", value="SDXL", info="SDXL: Fast, depth+normal control | FLUX: High-quality, slower processing")
                model_choice = gr.Radio(["SDXL"], label="πŸ€– AI Model", value="SDXL", info="SDXL: Fast, depth+normal control | FLUX: High-quality, slower processing (Not supported due to the memory limit of HF Space)")
                edge_refinement = gr.Checkbox(label="✨ Edge Refinement", value=True, info="Smooth boundary artifacts (recommended for delightning highlights in the boundary)")
            text_prompt = gr.Textbox(label="πŸ’¬ Texture Description", placeholder="Describe the desired texture appearance (e.g., 'rustic wooden surface with weathered paint')", lines=2)
            step2_button = gr.Button("🎯 Generate Image Condition", variant="primary")
            step2_progress = gr.Textbox(label="πŸ“Š Generation Status", interactive=False)

        with gr.Column():
            condition_image = gr.Image(label="πŸ–ΌοΈ Generated Image Condition", type="pil") # , interactive=False

    step2_button.click(
        fn=fn2,
        inputs=[position_images_tensor_path, normal_images_tensor_path, mask_images_tensor_path, w2c_tensor_path, text_prompt, selected_view, img_condition_seed, model_choice, edge_refinement],
        outputs=[condition_image, step2_progress],
    )

    # Step 3
    gr.Markdown("---")
    gr.Markdown("## 🎨 Step 3: Generate Final Texture")
    gr.Markdown("""
    **πŸ“‹ How to generate final texture:**
    - The **SeqTex pipeline** will create a complete texture map for your model
    - View the results from multiple angles and download your textured 3D model (the viewport is a little bit dark)
    """)
    texture_map_tensor_path = gr.State()
    with gr.Row():
        with gr.Column(scale=1):
            step3_button = gr.Button("🎨 Generate Final Texture", variant="primary")
            step3_progress = gr.Textbox(label="πŸ“Š Texture Generation Status", interactive=False)
            texture_map = gr.Image(label="πŸ† Generated Texture Map", interactive=False)
        with gr.Column(scale=2):
            rendered_imgs = gr.Image(label="πŸ–ΌοΈ Final Rendered Views")
            mv_branch_imgs = gr.Image(label="πŸ–ΌοΈ SeqTex Direct Output")
        with gr.Column(scale=1.5):
            model_display = gr.Model3D(label="πŸ† Final Textured Model", height=500)

demo.launch()