hysts HF Staff commited on
Commit
3db5e69
Β·
1 Parent(s): 87554e6
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +124 -0
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: πŸƒ
4
  colorFrom: indigo
5
  colorTo: gray
6
  sdk: gradio
7
- sdk_version: 5.37.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: indigo
5
  colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 5.35.0
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+
4
+ def fn1(mesh_upload, state, y2z, y2x, z2x, upside_down):
5
+ return None, None, None, None, None, None, None, None, None
6
+
7
+
8
+ def fn2(position_images_tensor_path, normal_images_tensor_path, mask_images_tensor_path, w2c_tensor_path, text_prompt, selected_view, img_condition_seed, model_choice, edge_refinement):
9
+ return None, None
10
+
11
+
12
+ with gr.Blocks(delete_cache=(600, 600)) as demo:
13
+ gr.Markdown("# 🎨 SeqTex: Generate Mesh Textures in Video Sequence")
14
+
15
+ gr.Markdown("""
16
+ ## πŸš€ Welcome to SeqTex!
17
+ **SeqTex** is a cutting-edge AI system that generates high-quality textures for 3D meshes using image prompts (here we use image generator to get them from textual prompts).
18
+
19
+ Choose to either **try our example models** below or **upload your own 3D mesh** to create stunning textures.
20
+ """)
21
+
22
+ gr.Markdown("---")
23
+
24
+ gr.Markdown("## πŸ”§ Step 1: Upload & Process 3D Mesh")
25
+ gr.Markdown("""
26
+ **πŸ“‹ How to prepare your 3D mesh:**
27
+ - Upload your 3D mesh in **.obj** or **.glb** format
28
+ - **πŸ’‘ Pro Tip**:
29
+ - For optimal results, ensure your mesh includes only one part with <span style="color:#e74c3c; font-weight:bold;">UV parameterization</span>
30
+ - Otherwise, we'll combine all parts and generate UV parameterization using *xAtlas* (may take longer for high-poly meshes; may also fail for certain meshes)
31
+ - **⚠️ Important**: We recommend adjusting your model using *Mesh Orientation Adjustments* to be **Z-UP oriented** for best results
32
+ """)
33
+ position_map_tensor_path = gr.State()
34
+ normal_map_tensor_path = gr.State()
35
+ position_images_tensor_path = gr.State()
36
+ normal_images_tensor_path = gr.State()
37
+ mask_images_tensor_path = gr.State()
38
+ w2c_tensor_path = gr.State()
39
+ mesh = gr.State()
40
+ mvp_matrix_tensor_path = gr.State()
41
+
42
+ # fixed_texture_map = Image.open("image.webp").convert("RGB")
43
+ # Step 1
44
+ with gr.Row():
45
+ with gr.Column():
46
+ mesh_upload = gr.File(label="πŸ“ Upload 3D Mesh", file_types=[".obj", ".glb"])
47
+ # uv_tool = gr.Radio(["xAtlas", "UVAtlas"], label="UV parameterizer", value="xAtlas")
48
+
49
+ gr.Markdown("**πŸ”„ Mesh Orientation Adjustments** (if needed):")
50
+ y2z = gr.Checkbox(label="Y β†’ Z Transform", value=False, info="Rotate: Y becomes Z, -Z becomes Y")
51
+ y2x = gr.Checkbox(label="Y β†’ X Transform", value=False, info="Rotate: Y becomes X, -X becomes Y")
52
+ z2x = gr.Checkbox(label="Z β†’ X Transform", value=False, info="Rotate: Z becomes X, -X becomes Z")
53
+ upside_down = gr.Checkbox(label="πŸ”ƒ Flip Vertically", value=False, info="Fix upside-down mesh orientation")
54
+ step1_button = gr.Button("πŸ”„ Process Mesh & Generate Views", variant="primary")
55
+ step1_progress = gr.Textbox(label="πŸ“Š Processing Status", interactive=False)
56
+
57
+ with gr.Column():
58
+ model_input = gr.Model3D(label="πŸ“ Processed 3D Model", height=500)
59
+
60
+ with gr.Row(equal_height=True):
61
+ rgb_views = gr.Image(label="πŸ“· Generated Views", type="pil", scale=3)
62
+ position_map = gr.Image(label="πŸ—ΊοΈ Position Map", type="pil", scale=1)
63
+ normal_map = gr.Image(label="🧭 Normal Map", type="pil", scale=1)
64
+
65
+ step1_button.click(
66
+ fn=fn1,
67
+ inputs=[mesh_upload, gr.State("xAtlas"), y2z, y2x, z2x, upside_down],
68
+ outputs=[position_map_tensor_path, normal_map_tensor_path, position_images_tensor_path, normal_images_tensor_path, mask_images_tensor_path, w2c_tensor_path, mesh, mvp_matrix_tensor_path, step1_progress]
69
+ )
70
+
71
+ # Step 2
72
+ gr.Markdown("---")
73
+ gr.Markdown("## πŸ‘οΈ Step 2: Select View & Generate Image Condition")
74
+ gr.Markdown("""
75
+ **πŸ“‹ How to generate image condition:**
76
+ - Your mesh will be rendered from **four viewpoints** (front, back, left, right)
77
+ - Choose **one view** as your image condition
78
+ - Enter a **descriptive text prompt** for the desired texture
79
+ - Select your preferred AI model:
80
+ - <span style="color:#27ae60; font-weight:bold;">🎯 SDXL</span>: Fast generation with depth + normal control, better details (often suffer from wrong highlights)
81
+ - <span style="color:#3498db; font-weight:bold;">⚑ FLUX</span>: ~~High-quality generation with depth control (slower due to CPU offloading). Better work with **Edge Refinement**~~ (Not supported due to the memory limit of HF Space. You can try it locally)
82
+ """)
83
+ with gr.Row():
84
+ with gr.Column():
85
+ img_condition_seed = gr.Number(label="🎲 Random Seed", minimum=0, maximum=9999, step=1, value=42, info="Change for different results")
86
+ selected_view = gr.Radio(["First View", "Second View", "Third View", "Fourth View"], label="πŸ“ Camera View", value="First View", info="Choose which viewpoint to use as reference")
87
+ with gr.Row():
88
+ # model_choice = gr.Radio(["SDXL", "FLUX"], label="πŸ€– AI Model", value="SDXL", info="SDXL: Fast, depth+normal control | FLUX: High-quality, slower processing")
89
+ model_choice = gr.Radio(["SDXL"], label="πŸ€– AI Model", value="SDXL", info="SDXL: Fast, depth+normal control | FLUX: High-quality, slower processing (Not supported due to the memory limit of HF Space)")
90
+ edge_refinement = gr.Checkbox(label="✨ Edge Refinement", value=True, info="Smooth boundary artifacts (recommended for delightning highlights in the boundary)")
91
+ text_prompt = gr.Textbox(label="πŸ’¬ Texture Description", placeholder="Describe the desired texture appearance (e.g., 'rustic wooden surface with weathered paint')", lines=2)
92
+ step2_button = gr.Button("🎯 Generate Image Condition", variant="primary")
93
+ step2_progress = gr.Textbox(label="πŸ“Š Generation Status", interactive=False)
94
+
95
+ with gr.Column():
96
+ condition_image = gr.Image(label="πŸ–ΌοΈ Generated Image Condition", type="pil") # , interactive=False
97
+
98
+ step2_button.click(
99
+ fn=fn2,
100
+ inputs=[position_images_tensor_path, normal_images_tensor_path, mask_images_tensor_path, w2c_tensor_path, text_prompt, selected_view, img_condition_seed, model_choice, edge_refinement],
101
+ outputs=[condition_image, step2_progress],
102
+ )
103
+
104
+ # Step 3
105
+ gr.Markdown("---")
106
+ gr.Markdown("## 🎨 Step 3: Generate Final Texture")
107
+ gr.Markdown("""
108
+ **πŸ“‹ How to generate final texture:**
109
+ - The **SeqTex pipeline** will create a complete texture map for your model
110
+ - View the results from multiple angles and download your textured 3D model (the viewport is a little bit dark)
111
+ """)
112
+ texture_map_tensor_path = gr.State()
113
+ with gr.Row():
114
+ with gr.Column(scale=1):
115
+ step3_button = gr.Button("🎨 Generate Final Texture", variant="primary")
116
+ step3_progress = gr.Textbox(label="πŸ“Š Texture Generation Status", interactive=False)
117
+ texture_map = gr.Image(label="πŸ† Generated Texture Map", interactive=False)
118
+ with gr.Column(scale=2):
119
+ rendered_imgs = gr.Image(label="πŸ–ΌοΈ Final Rendered Views")
120
+ mv_branch_imgs = gr.Image(label="πŸ–ΌοΈ SeqTex Direct Output")
121
+ with gr.Column(scale=1.5):
122
+ model_display = gr.Model3D(label="πŸ† Final Textured Model", height=500)
123
+
124
+ demo.launch()