HF User commited on
Commit
e7b9fb6
·
0 Parent(s):

🚀 Fresh deploy of Magic Articulate Enhanced MVP

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +37 -0
  2. DEPLOYMENT.md +197 -0
  3. README.md +132 -0
  4. app.py +704 -0
  5. data_utils/README.md +43 -0
  6. data_utils/__init__.py +8 -0
  7. data_utils/clean_skin_in_npz.py +95 -0
  8. data_utils/convert_npz_to_mesh_rig.py +107 -0
  9. data_utils/data_loader.py +121 -0
  10. data_utils/issue_data_list.txt +123 -0
  11. data_utils/pyrender_wrapper.py +135 -0
  12. data_utils/read_npz.py +43 -0
  13. data_utils/read_rig_mesh_from_glb.py +198 -0
  14. data_utils/render_data.py +61 -0
  15. data_utils/save_npz.py +252 -0
  16. data_utils/update_npz_rm_issue_data.py +59 -0
  17. download_models.py +80 -0
  18. magic_articulate_plus/__init__.py +20 -0
  19. magic_articulate_plus/articulate_api.py +899 -0
  20. requirements.txt +61 -0
  21. skeleton_models/__init__.py +9 -0
  22. skeleton_models/shape_opt.py +406 -0
  23. skeleton_models/skeletongen.py +198 -0
  24. src/config.py +90 -0
  25. src/enhanced_magic_wrapper.py +301 -0
  26. src/utils.py +290 -0
  27. third_party/Michelangelo/LICENSE +674 -0
  28. third_party/Michelangelo/README.md +113 -0
  29. third_party/Michelangelo/configs/shapevae-256.yaml +46 -0
  30. third_party/Michelangelo/encode.py +101 -0
  31. third_party/Michelangelo/inference.py +181 -0
  32. third_party/Michelangelo/michelangelo/__init__.py +1 -0
  33. third_party/Michelangelo/michelangelo/data/__init__.py +1 -0
  34. third_party/Michelangelo/michelangelo/data/templates.json +69 -0
  35. third_party/Michelangelo/michelangelo/data/transforms.py +407 -0
  36. third_party/Michelangelo/michelangelo/data/utils.py +59 -0
  37. third_party/Michelangelo/michelangelo/graphics/__init__.py +1 -0
  38. third_party/Michelangelo/michelangelo/graphics/primitives/__init__.py +9 -0
  39. third_party/Michelangelo/michelangelo/graphics/primitives/mesh.py +114 -0
  40. third_party/Michelangelo/michelangelo/graphics/primitives/volume.py +21 -0
  41. third_party/Michelangelo/michelangelo/models/__init__.py +1 -0
  42. third_party/Michelangelo/michelangelo/models/asl_diffusion/__init__.py +1 -0
  43. third_party/Michelangelo/michelangelo/models/asl_diffusion/asl_diffuser_pl_module.py +482 -0
  44. third_party/Michelangelo/michelangelo/models/asl_diffusion/asl_udt.py +104 -0
  45. third_party/Michelangelo/michelangelo/models/asl_diffusion/base.py +13 -0
  46. third_party/Michelangelo/michelangelo/models/asl_diffusion/clip_asl_diffuser_pl_module.py +393 -0
  47. third_party/Michelangelo/michelangelo/models/asl_diffusion/inference_utils.py +80 -0
  48. third_party/Michelangelo/michelangelo/models/conditional_encoders/__init__.py +3 -0
  49. third_party/Michelangelo/michelangelo/models/conditional_encoders/clip.py +89 -0
  50. third_party/Michelangelo/michelangelo/models/conditional_encoders/encoder_factory.py +562 -0
.gitattributes ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ data_utils/examples/0a59c5ffa4a1476bac6d540b79947f31_render_results.png filter=lfs diff=lfs merge=lfs -text
37
+ examples/bear.obj filter=lfs diff=lfs merge=lfs -text
DEPLOYMENT.md ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚀 MagicArticulate MVP Deployment Guide
2
+
3
+ ## 部署到Hugging Face Space
4
+
5
+ ### 1. 准备工作
6
+
7
+ 确保你有以下账户和权限:
8
+ - Hugging Face账户
9
+ - Git配置
10
+ - MagicArticulate模型权重(可选)
11
+
12
+ ### 2. 创建HF Space
13
+
14
+ 1. 访问 [Hugging Face Spaces](https://huggingface.co/spaces)
15
+ 2. 点击 "Create new Space"
16
+ 3. 配置Space信息:
17
+ - **Space name**: `magic-articulate-mvp` (或你喜欢的名称)
18
+ - **License**: MIT
19
+ - **SDK**: Gradio
20
+ - **Hardware**: ZeroGPU (免费)
21
+ - **Visibility**: Public
22
+
23
+ ### 3. 克隆和设置
24
+
25
+ ```bash
26
+ # 克隆你的HF Space仓库
27
+ git clone https://huggingface.co/spaces/YOUR_USERNAME/magic-articulate-mvp
28
+ cd magic-articulate-mvp
29
+
30
+ # 复制MVP文件
31
+ cp -r /path/to/articulate-hub/mvp-space/* .
32
+
33
+ # 设置MagicArticulate
34
+ git clone https://github.com/Seed3D/MagicArticulate.git
35
+ # 或者创建符号链接
36
+ ln -s /path/to/MagicArticulate .
37
+ ```
38
+
39
+ ### 4. 配置文件
40
+
41
+ 确保以下文件正确配置:
42
+
43
+ #### README.md (HF Space配置)
44
+ ```yaml
45
+ ---
46
+ title: MagicArticulate MVP
47
+ emoji: 🎯
48
+ colorFrom: purple
49
+ colorTo: red
50
+ sdk: gradio
51
+ sdk_version: 4.44.0
52
+ app_file: app.py
53
+ pinned: false
54
+ license: mit
55
+ hardware: zero-gpu
56
+ ---
57
+ ```
58
+
59
+ #### requirements.txt
60
+ 所有必要的依赖已经列出,包括:
61
+ - gradio==4.44.0
62
+ - spaces[gpu]
63
+ - torch==2.1.1
64
+ - 其他依赖...
65
+
66
+ ### 5. 推送到HF Space
67
+
68
+ ```bash
69
+ # 添加所有文件
70
+ git add .
71
+
72
+ # 提交更改
73
+ git commit -m "🎯 Initial MagicArticulate MVP deployment
74
+
75
+ Features:
76
+ - 3D model upload and processing
77
+ - Text-guided skeleton generation
78
+ - ZeroGPU integration
79
+ - Professional Gradio interface
80
+ - Multiple output formats
81
+
82
+ Ready for investor demonstrations!"
83
+
84
+ # 推送到HF Space
85
+ git push
86
+ ```
87
+
88
+ ### 6. 验证部署
89
+
90
+ 1. 访问你的HF Space URL
91
+ 2. 等待构建完成(通常5-10分钟)
92
+ 3. 测试基本功能:
93
+ - 文件上传
94
+ - 处理流程
95
+ - 结果下载
96
+
97
+ ### 7. 故障排除
98
+
99
+ #### 常见问题:
100
+
101
+ **构建失败**
102
+ - 检查requirements.txt中的依赖版本
103
+ - 确保所有文件都正确上传
104
+ - 查看Space的构建日志
105
+
106
+ **ZeroGPU不工作**
107
+ - 确认README.md中有 `hardware: zero-gpu`
108
+ - 检查`@spaces.GPU`装饰器的使用
109
+ - 验证你的HF账户有ZeroGPU访问权限
110
+
111
+ **MagicArticulate导入失败**
112
+ - 确保MagicArticulate目录结构正确
113
+ - 检查相对路径配置
114
+ - 验证依赖是否完整
115
+
116
+ **内存不足**
117
+ - 减少batch_size
118
+ - 优化模型加载
119
+ - 使用fp16精度
120
+
121
+ ### 8. 性能优化
122
+
123
+ #### 启动优化:
124
+ ```python
125
+ # 在app.py中添加缓存
126
+ @st.cache_resource
127
+ def load_model():
128
+ return MagicArticulateWrapper()
129
+ ```
130
+
131
+ #### 内存优化:
132
+ - 使用torch.no_grad()
133
+ - 及时清理临时文件
134
+ - 限制并发请求数
135
+
136
+ ### 9. 监控和维护
137
+
138
+ #### 关键指标:
139
+ - 处理成功率
140
+ - 平均处理时间
141
+ - GPU利用率
142
+ - 用户满意度
143
+
144
+ #### 定期维护:
145
+ - 更新依赖版本
146
+ - 优化模型性能
147
+ - 收集用户反馈
148
+ - 修复已知问题
149
+
150
+ ### 10. 扩展计划
151
+
152
+ #### 短期扩展:
153
+ - 添加更多示例模型
154
+ - 优化界面体验
155
+ - 增加处理状态显示
156
+ - 支持更多文件格式
157
+
158
+ #### 长期集成:
159
+ - 集成到主ArticulateHub平台
160
+ - 添加用户管理
161
+ - 实现批量处理
162
+ - 集成Three.js可视化
163
+
164
+ ## 📊 部署检查清单
165
+
166
+ - [ ] HF Space创建完成
167
+ - [ ] 所有文件正确上传
168
+ - [ ] README.md配置正确
169
+ - [ ] requirements.txt包含所有依赖
170
+ - [ ] MagicArticulate集成正确
171
+ - [ ] ZeroGPU配置启用
172
+ - [ ] 基本功能测试通过
173
+ - [ ] 错误处理工作正常
174
+ - [ ] 示例文件可用
175
+ - [ ] 文档更新完成
176
+
177
+ ## 🎯 成功标准
178
+
179
+ MVP部署成功的标准:
180
+ 1. ✅ Space可以正常访问
181
+ 2. ✅ 文件上传功能正常
182
+ 3. ✅ 处理流程无错误
183
+ 4. ✅ 结果可以下载
184
+ 5. ✅ 界面友好专业
185
+ 6. ✅ 处理时间合理(<2分钟)
186
+ 7. ✅ 适合投资人演示
187
+
188
+ ## 🔗 有用的链接
189
+
190
+ - [Hugging Face Spaces文档](https://huggingface.co/docs/hub/spaces)
191
+ - [ZeroGPU指南](https://huggingface.co/docs/hub/spaces-zerogpu)
192
+ - [Gradio文档](https://gradio.app/docs)
193
+ - [MagicArticulate项目](https://github.com/Seed3D/MagicArticulate)
194
+
195
+ ---
196
+
197
+ **准备好向投资人展示你的AI驱动的3D模型骨骼生成技术了!** 🎉
README.md ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Magic Articulate Enhanced
3
+ emoji: 🎯
4
+ colorFrom: purple
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 5.36.2
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ hardware: zero-gpu
12
+ ---
13
+
14
+ # 🎯 Magic Articulate Enhanced
15
+
16
+ [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/Nomad2082/Magic-plus-1)
17
+ [![ZeroGPU](https://img.shields.io/badge/ZeroGPU-Enabled-green)](https://huggingface.co/zero-gpu-explorer)
18
+
19
+ ## ✨ Enhanced Features
20
+
21
+ 🚀 **Revolutionary 3D Skeletal Rigging with AI**
22
+
23
+ This enhanced version of MagicArticulate provides:
24
+
25
+ ### 🔥 Core Capabilities
26
+ - **📁 Universal Model Support** - Upload ANY 3D model (OBJ, GLB, PLY, STL, FBX, DAE)
27
+ - **🤖 AI-Powered Rigging** - Automatic skeletal structure generation
28
+ - **🎨 Multi-Format Output** - Download as OBJ, TXT, or complete ZIP package
29
+ - **👁️ Real-time 3D Preview** - Interactive Three.js visualization
30
+ - **⚡ ZeroGPU Acceleration** - Free GPU processing in 30-120 seconds
31
+
32
+ ### 🆕 Enhanced Features
33
+ - ✅ **User Upload Support** - No more demo-only limitations
34
+ - ✅ **Advanced Model Validation** - Automatic repair and optimization
35
+ - ✅ **Professional Output Formats** - Industry-standard skeletal data
36
+ - ✅ **Session Management** - Multi-user concurrent processing
37
+ - ✅ **Intelligent Preprocessing** - Format conversion and mesh optimization
38
+
39
+ ## 🎯 Perfect for Investor Demonstrations
40
+
41
+ This MVP showcases a complete AI-driven 3D workflow:
42
+
43
+ 1. **Upload** - Any 3D model from your device
44
+ 2. **Process** - AI generates optimal skeletal structure
45
+ 3. **Preview** - Real-time 3D visualization
46
+ 4. **Download** - Professional multi-format outputs
47
+
48
+ ## 🚀 Quick Start
49
+
50
+ 1. **Upload your 3D model** (supports most common formats)
51
+ 2. **Describe your requirements** (e.g., "human skeleton for animation")
52
+ 3. **Click Generate** and wait 30-120 seconds
53
+ 4. **Preview and Download** your rigged skeleton
54
+
55
+ ## 💡 Use Cases
56
+
57
+ - **Game Development** - Character rigging automation
58
+ - **Animation Studios** - Rapid skeleton prototyping
59
+ - **AR/VR Applications** - Real-time avatar creation
60
+ - **3D Printing** - Articulated model preparation
61
+ - **Research & Education** - Skeletal anatomy studies
62
+
63
+ ## 🔧 Technical Details
64
+
65
+ ### Supported Input Formats
66
+ - **OBJ** - Wavefront object files
67
+ - **GLB/GLTF** - 3D transmission format
68
+ - **PLY** - Polygon file format
69
+ - **STL** - Stereolithography format
70
+ - **FBX** - Filmbox format
71
+ - **DAE** - Collada format
72
+
73
+ ### Output Formats
74
+ - **OBJ** - 3D geometric representation of the skeleton
75
+ - **TXT** - Traditional rigging format for animation software
76
+ - **ZIP** - Complete package with all formats and processing report
77
+
78
+ ### Processing Pipeline
79
+ 1. **Model Validation** - File format and mesh integrity checks
80
+ 2. **Automatic Repair** - Fix common mesh issues (holes, normals, duplicates)
81
+ 3. **Optimization** - Simplify complex models for faster processing
82
+ 4. **AI Generation** - Neural network skeletal structure prediction
83
+ 5. **Post-processing** - Joint optimization and bone hierarchy construction
84
+
85
+ ## 🎮 Example Use Cases
86
+
87
+ ### Game Character Rigging
88
+ ```
89
+ Input: Character.fbx (game asset)
90
+ Prompt: "humanoid skeleton for game animation with proper joint hierarchy"
91
+ Output: Complete rigging data ready for Unity/Unreal
92
+ ```
93
+
94
+ ### Animal Animation
95
+ ```
96
+ Input: Dog.obj (3D scan)
97
+ Prompt: "quadruped skeleton with spine and tail bones"
98
+ Output: Anatomically correct animal rig
99
+ ```
100
+
101
+ ### Mechanical Rigging
102
+ ```
103
+ Input: Robot.glb (CAD model)
104
+ Prompt: "mechanical joints for robotic movement"
105
+ Output: Engineering-ready joint structure
106
+ ```
107
+
108
+ ## 🧬 Powered by Advanced AI
109
+
110
+ - **MagicArticulate Neural Network** - State-of-the-art skeletal generation
111
+ - **Hugging Face ZeroGPU** - Free high-performance computing
112
+ - **Advanced Preprocessing** - Intelligent model optimization
113
+ - **Multi-User Architecture** - Concurrent processing support
114
+
115
+ ## 📊 Performance
116
+
117
+ - **Processing Time**: 30-120 seconds (depending on model complexity)
118
+ - **Max File Size**: 100MB
119
+ - **Max Vertices**: 100,000 (auto-simplified if needed)
120
+ - **Concurrent Users**: Multi-session support
121
+ - **Accuracy**: State-of-the-art AI skeletal prediction
122
+
123
+ ## Citation
124
+
125
+ If you use this work, please cite:
126
+ ```
127
+ @article{magicarticulate2024,
128
+ title={MagicArticulate: Automatic Skeletal Rigging for 3D Models},
129
+ author={ByteDance Research},
130
+ year={2024}
131
+ }
132
+ ```
app.py ADDED
@@ -0,0 +1,704 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MagicArticulate MVP - 增强版Gradio应用
3
+ 支持多格式文件下载和预览
4
+ """
5
+
6
+ import os
7
+ import sys
8
+ import time
9
+ import logging
10
+ import tempfile
11
+ import traceback
12
+ from pathlib import Path
13
+ from typing import Optional, Dict, Any, List, Tuple
14
+ import shutil
15
+ import zipfile
16
+
17
+ import gradio as gr
18
+ import spaces
19
+ import torch
20
+ import numpy as np
21
+
22
+ # 添加src目录到路径
23
+ sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
24
+
25
+ from enhanced_magic_wrapper import EnhancedMagicWrapper
26
+ from config import get_config, DEMO_PROMPTS, EXAMPLE_MODELS
27
+ from src.utils import (
28
+ validate_file, get_model_info, cleanup_temp_files,
29
+ format_processing_time, get_prompt_suggestions,
30
+ create_processing_status, estimate_processing_time,
31
+ generate_download_filename, safe_json_serialize
32
+ )
33
+
34
+ # 配置日志
35
+ logging.basicConfig(
36
+ level=logging.INFO,
37
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
38
+ )
39
+ logger = logging.getLogger(__name__)
40
+
41
+ # 获取配置
42
+ config = get_config()
43
+
44
+ # 全局变量
45
+ magic_wrapper = None
46
+ processing_status = {}
47
+ session_results = {} # 存储处理结果
48
+
49
+ def initialize_app():
50
+ """初始化应用"""
51
+ global magic_wrapper
52
+
53
+ try:
54
+ logger.info("🚀 Initializing MagicArticulate MVP LFG...")
55
+ logger.info(f"🔍 Current working directory: {os.getcwd()}")
56
+ logger.info(f"🔍 Script directory: {os.path.dirname(__file__)}")
57
+
58
+ # 检查关键目录结构
59
+ directories = ['src', 'utils', 'skeleton_models', 'magic_articulate_plus', 'third_party']
60
+ for dir_name in directories:
61
+ dir_path = os.path.join(os.getcwd(), dir_name)
62
+ exists = os.path.exists(dir_path)
63
+ logger.info(f"🔍 Directory {dir_name}: exists={exists}")
64
+ if exists and os.path.isdir(dir_path):
65
+ try:
66
+ contents = os.listdir(dir_path)[:5] # 只显示前5个文件
67
+ logger.info(f"🔍 Contents (first 5): {contents}")
68
+ except Exception as e:
69
+ logger.warning(f"🔍 Could not list contents: {e}")
70
+
71
+ # 首先下载所需的模型文件
72
+ try:
73
+ logger.info("📥 开始下载模型文件...")
74
+ from download_models import download_models
75
+ download_models()
76
+ except Exception as e:
77
+ logger.warning(f"⚠️ 模型下载过程中出现问题: {e}")
78
+ import traceback
79
+ logger.warning(f"⚠️ Download traceback: {traceback.format_exc()}")
80
+
81
+ # 创建增强版包装器实例(支持真实3D模型处理)
82
+ logger.info("🔧 Creating EnhancedMagicWrapper instance...")
83
+ magic_wrapper = EnhancedMagicWrapper()
84
+
85
+ # 初始化包装器
86
+ logger.info("🔧 Initializing wrapper...")
87
+ if magic_wrapper.initialize():
88
+ logger.info("✅ MagicArticulate MVP initialized successfully")
89
+ return True
90
+ else:
91
+ logger.error("❌ Failed to initialize MagicArticulate wrapper")
92
+ return False
93
+
94
+ except Exception as e:
95
+ logger.error(f"💥 App initialization failed: {str(e)}")
96
+ logger.error(traceback.format_exc())
97
+ return False
98
+
99
+ def create_download_package(output_files: Dict[str, str], session_id: str) -> str:
100
+ """
101
+ 创建包含所有输出文件的ZIP包
102
+
103
+ Args:
104
+ output_files: 输出文件路径字典
105
+ session_id: 会话ID
106
+
107
+ Returns:
108
+ ZIP文件路径
109
+ """
110
+ try:
111
+ # 创建临时目录
112
+ temp_dir = Path(tempfile.mkdtemp())
113
+ zip_path = temp_dir / f"skeleton_results_{session_id}.zip"
114
+
115
+ # 创建ZIP文件
116
+ with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
117
+ for file_type, file_path in output_files.items():
118
+ if os.path.exists(file_path):
119
+ # 使用描述性的文件名
120
+ if 'skeleton_json' in file_type:
121
+ arcname = "skeleton_data.json"
122
+ elif 'skeleton_obj' in file_type:
123
+ arcname = "skeleton_model.obj"
124
+ elif 'skeleton_txt' in file_type:
125
+ arcname = "skeleton_rig.txt"
126
+ elif 'processed_mesh' in file_type:
127
+ arcname = "processed_mesh.obj"
128
+ elif 'report' in file_type:
129
+ arcname = "processing_report.json"
130
+ else:
131
+ arcname = os.path.basename(file_path)
132
+
133
+ zipf.write(file_path, arcname)
134
+ logger.info(f"Added {arcname} to ZIP")
135
+
136
+ logger.info(f"Created download package: {zip_path}")
137
+ return str(zip_path)
138
+
139
+ except Exception as e:
140
+ logger.error(f"Failed to create download package: {str(e)}")
141
+ return None
142
+
143
+ @spaces.GPU(duration=120)
144
+ def process_3d_model_gpu(
145
+ model_file: gr.File,
146
+ prompt: str,
147
+ confidence_threshold: float,
148
+ generate_preview: bool
149
+ ) -> Tuple[str, str, Any, Any, Any, Any, str, str]:
150
+ """
151
+ GPU处理函数 - 使用ZeroGPU
152
+ 返回多个文件供下载
153
+
154
+ Returns:
155
+ (状态, 文本展示, OBJ下载, TXT下载, ZIP下载, 处理信息, 错误信息, 骨骼数据)
156
+ """
157
+ global magic_wrapper, session_results
158
+
159
+ start_time = time.time()
160
+ session_id = f"session_{int(start_time)}"
161
+
162
+ try:
163
+ logger.info(f"🔄 Starting GPU processing for session: {session_id}")
164
+
165
+ # 验证输入
166
+ if model_file is None:
167
+ return "❌ 错误", "", None, None, None, None, "", "请上传3D模型文件"
168
+
169
+ if not prompt.strip():
170
+ prompt = DEMO_PROMPTS['generic']
171
+ logger.info(f"Using default prompt: {prompt}")
172
+
173
+ # 验证文件
174
+ file_path = model_file.name
175
+ is_valid, error_msg = validate_file(file_path, config['file_limits']['max_size_mb'])
176
+ if not is_valid:
177
+ return "❌ 错误", "", None, None, None, None, "", f"文件验证失败: {error_msg}"
178
+
179
+ # 获取模型信息
180
+ model_info = get_model_info(file_path)
181
+ logger.info(f"📊 Model info: {model_info}")
182
+
183
+ # 估算处理时间
184
+ estimated_time = estimate_processing_time(model_info)
185
+ logger.info(f"⏱️ Estimated processing time: {estimated_time:.1f}s")
186
+
187
+ # 更新处理状态
188
+ processing_status[session_id] = create_processing_status(
189
+ "preparing", 0.1, "准备处理3D模型..."
190
+ )
191
+
192
+ # 调用MagicArticulate处理
193
+ if magic_wrapper is None:
194
+ logger.error("MagicArticulate wrapper not initialized")
195
+ return "❌ 错误", "", None, None, None, None, "", "AI模型未初始化"
196
+
197
+ processing_status[session_id] = create_processing_status(
198
+ "processing", 0.3, "正在生成骨骼结构..."
199
+ )
200
+
201
+ # 执行处理
202
+ result = magic_wrapper.process_3d_model(
203
+ model_file_path=file_path,
204
+ prompt=prompt,
205
+ confidence_threshold=confidence_threshold,
206
+ generate_preview=generate_preview
207
+ )
208
+
209
+ processing_status[session_id] = create_processing_status(
210
+ "finalizing", 0.9, "正在准备输出文件..."
211
+ )
212
+
213
+ # 处理结果
214
+ if not result['success']:
215
+ error_msg = result.get('error', 'Unknown error')
216
+ logger.error(f"Processing failed: {error_msg}")
217
+ return "❌ 处理失败", "", None, None, None, None, "", error_msg
218
+
219
+ # 保存结果到会话
220
+ session_results[session_id] = result
221
+
222
+ # 准备输出数据
223
+ skeleton_data = result['skeleton_data']
224
+ output_files = result['output_files']
225
+ processing_info = result['processing_info']
226
+
227
+ # 格式化骨骼数据为文本显示
228
+ skeleton_json = f"""骨骼结构数据预览
229
+ ===================
230
+
231
+ 关节数量: {skeleton_data.get('joint_count', 0)}
232
+ 骨骼数量: {skeleton_data.get('bone_count', 0)}
233
+ 根节点索引: {skeleton_data.get('root_index', 0)}
234
+
235
+ 关节坐标 (前10个):
236
+ {str(skeleton_data.get('joints', [])[:10])}
237
+
238
+ 骨骼连接 (前10个):
239
+ {str(skeleton_data.get('bones', [])[:10])}
240
+
241
+ 用户提示: {skeleton_data.get('user_prompt', 'N/A')}
242
+ """
243
+
244
+ # 准备各个文件供下载
245
+ obj_file = output_files.get('skeleton_obj', None)
246
+ txt_file = output_files.get('skeleton_txt', None)
247
+
248
+ # 创建ZIP包含所有文件
249
+ zip_file = create_download_package(output_files, session_id)
250
+
251
+ # 处理时间
252
+ processing_time = time.time() - start_time
253
+
254
+ # 准备处理信息
255
+ info_text = f"""
256
+ ## 处理完成! ✅
257
+
258
+ ### 📊 处理统计
259
+ - **输入文件**: {processing_info.get('input_file', 'Unknown')}
260
+ - **处理时间**: {format_processing_time(processing_time)}
261
+ - **提示词**: {processing_info.get('prompt', 'None')}
262
+
263
+ ### 🦴 骨骼数据
264
+ - **关节数量**: {processing_info.get('joint_count', 0)}
265
+ - **骨骼数量**: {processing_info.get('bone_count', 0)}
266
+ - **根节点索引**: {skeleton_data.get('root_index', 0)}
267
+
268
+ ### 📁 可下载文件
269
+ 1. **骨骼模型 (OBJ)** - 3D骨骼的几何表示,可在3D软件中查看
270
+ 2. **绑定数据 (TXT)** - 传统的骨骼绑定格式,适合导入到动画软件
271
+ 3. **完整包 (ZIP)** - 包含所有输出文件的压缩包
272
+
273
+ ### 💡 使用建议
274
+ - OBJ格式可以直接在Blender、Maya等3D软件中查看
275
+ - TXT格式符合传统骨骼绑定标准,便于集成到现有工作流程
276
+ - ZIP包含所有文件和处理报告,方便归档和分享
277
+ """
278
+
279
+ processing_status[session_id] = create_processing_status(
280
+ "completed", 1.0, "处理完成!"
281
+ )
282
+
283
+ logger.info(f"✅ Processing completed successfully in {processing_time:.1f}s")
284
+
285
+ return (
286
+ "✅ 处理完成",
287
+ skeleton_json,
288
+ obj_file,
289
+ txt_file,
290
+ zip_file,
291
+ info_text,
292
+ "",
293
+ skeleton_data # 添加原始skeleton_data用于3D预览
294
+ )
295
+
296
+ except Exception as e:
297
+ processing_time = time.time() - start_time
298
+ error_msg = f"处理过程中发生错误: {str(e)}"
299
+ logger.error(f"💥 Processing error: {error_msg}")
300
+ logger.error(traceback.format_exc())
301
+
302
+ processing_status[session_id] = create_processing_status(
303
+ "error", 0.0, error_msg
304
+ )
305
+
306
+ return (
307
+ "❌ 处理失败",
308
+ "",
309
+ None,
310
+ None,
311
+ None,
312
+ f"处理时间: {format_processing_time(processing_time)}",
313
+ error_msg,
314
+ None # 空的skeleton_data
315
+ )
316
+
317
+ def create_visualization_html(skeleton_data: Dict[str, Any]) -> str:
318
+ """
319
+ 创建骨骼可视化的HTML
320
+ 使用Three.js进行简单的3D展示
321
+ """
322
+ joints = skeleton_data.get('joints', [])
323
+ bones = skeleton_data.get('bones', [])
324
+
325
+ html_content = f"""
326
+ <div id="skeleton-viewer" style="width: 100%; height: 400px; border: 1px solid #ddd;">
327
+ <canvas id="three-canvas" style="width: 100%; height: 100%;"></canvas>
328
+ </div>
329
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script>
330
+ <script>
331
+ // 简单的Three.js骨骼可视化
332
+ const scene = new THREE.Scene();
333
+ scene.background = new THREE.Color(0xf0f0f0);
334
+
335
+ const camera = new THREE.PerspectiveCamera(75, 1, 0.1, 1000);
336
+ camera.position.set(2, 2, 2);
337
+ camera.lookAt(0, 0, 0);
338
+
339
+ const renderer = new THREE.WebGLRenderer({{canvas: document.getElementById('three-canvas')}});
340
+ renderer.setSize(400, 400);
341
+
342
+ // 添加光源
343
+ const light = new THREE.DirectionalLight(0xffffff, 1);
344
+ light.position.set(1, 1, 1);
345
+ scene.add(light);
346
+
347
+ // 添加网格
348
+ const gridHelper = new THREE.GridHelper(4, 10);
349
+ scene.add(gridHelper);
350
+
351
+ // 骨骼数据
352
+ const joints = {json.dumps(joints)};
353
+ const bones = {json.dumps(bones)};
354
+
355
+ // 创建关节球体
356
+ joints.forEach((joint, index) => {{
357
+ const geometry = new THREE.SphereGeometry(0.05);
358
+ const material = new THREE.MeshPhongMaterial({{color: 0xff0000}});
359
+ const sphere = new THREE.Mesh(geometry, material);
360
+ sphere.position.set(joint[0], joint[1], joint[2]);
361
+ scene.add(sphere);
362
+ }});
363
+
364
+ // 创建骨骼线条
365
+ bones.forEach(bone => {{
366
+ const start = joints[bone[0]];
367
+ const end = joints[bone[1]];
368
+
369
+ const points = [];
370
+ points.push(new THREE.Vector3(start[0], start[1], start[2]));
371
+ points.push(new THREE.Vector3(end[0], end[1], end[2]));
372
+
373
+ const geometry = new THREE.BufferGeometry().setFromPoints(points);
374
+ const material = new THREE.LineBasicMaterial({{color: 0x0000ff}});
375
+ const line = new THREE.Line(geometry, material);
376
+ scene.add(line);
377
+ }});
378
+
379
+ // 动画循环
380
+ function animate() {{
381
+ requestAnimationFrame(animate);
382
+ scene.rotation.y += 0.01;
383
+ renderer.render(scene, camera);
384
+ }}
385
+ animate();
386
+ </script>
387
+ """
388
+
389
+ return html_content
390
+
391
+ def create_demo_interface():
392
+ """创建增强版Gradio界面"""
393
+
394
+ # 自定义CSS
395
+ custom_css = """
396
+ .gradio-container {
397
+ max-width: 1400px;
398
+ margin: 0 auto;
399
+ }
400
+
401
+ .download-section {
402
+ border: 2px solid #e0e0e0;
403
+ border-radius: 10px;
404
+ padding: 20px;
405
+ margin: 10px 0;
406
+ background-color: #f9f9f9;
407
+ }
408
+
409
+ .status-box {
410
+ border: 1px solid #ddd;
411
+ border-radius: 8px;
412
+ padding: 15px;
413
+ margin: 10px 0;
414
+ background-color: #f8f9fa;
415
+ }
416
+
417
+ .success-status {
418
+ border-color: #28a745;
419
+ background-color: #d4edda;
420
+ }
421
+
422
+ .error-status {
423
+ border-color: #dc3545;
424
+ background-color: #f8d7da;
425
+ }
426
+
427
+ .info-panel {
428
+ font-family: monospace;
429
+ font-size: 14px;
430
+ line-height: 1.4;
431
+ }
432
+
433
+ .file-download-btn {
434
+ margin: 5px;
435
+ min-width: 200px;
436
+ }
437
+ """
438
+
439
+ # 创建界面
440
+ with gr.Blocks(
441
+ title=config['ui']['title'] + " Enhanced",
442
+ theme=gr.themes.Soft(),
443
+ css=custom_css
444
+ ) as demo:
445
+
446
+ # 标题和描述
447
+ gr.Markdown(f"""
448
+ # {config['ui']['title']} - 增强版
449
+
450
+ {config['ui']['description']}
451
+
452
+ ### ✨ 增强功能
453
+ - 📁 **多格式下载** - OBJ, TXT, ZIP
454
+ - 👁️ **骨骼预览** - 3D可视化展示
455
+ - 📊 **详细统计** - 完整的处理信息
456
+ - 🚀 **批量下载** - 一键下载所有文件
457
+ """)
458
+
459
+ # 主界面
460
+ with gr.Row():
461
+ # 左侧 - 输入
462
+ with gr.Column(scale=1):
463
+ gr.Markdown("### 📤 输入设置")
464
+
465
+ # 文件上传
466
+ model_file = gr.File(
467
+ label="上传3D模型",
468
+ file_types=['.obj', '.glb', '.ply', '.stl'],
469
+ file_count="single"
470
+ )
471
+
472
+ # 提示词输入
473
+ prompt_input = gr.Textbox(
474
+ label="提示词",
475
+ placeholder="描述你想要的骨骼类型,例如:realistic human skeleton for animation",
476
+ lines=3,
477
+ value=DEMO_PROMPTS['generic']
478
+ )
479
+
480
+ # 提示词建议
481
+ with gr.Accordion("💡 提示词建议", open=False):
482
+ for key, prompt in DEMO_PROMPTS.items():
483
+ gr.Button(
484
+ f"{key.title()}: {prompt}",
485
+ size="sm"
486
+ ).click(
487
+ lambda p=prompt: p,
488
+ outputs=prompt_input
489
+ )
490
+
491
+ # 高级选项
492
+ with gr.Accordion("⚙️ 高级选项", open=False):
493
+ confidence_threshold = gr.Slider(
494
+ label="置信度阈值",
495
+ minimum=0.1,
496
+ maximum=1.0,
497
+ value=0.8,
498
+ step=0.1
499
+ )
500
+
501
+ generate_preview = gr.Checkbox(
502
+ label="生成预览图",
503
+ value=True
504
+ )
505
+
506
+ # 处理按钮
507
+ process_btn = gr.Button(
508
+ "🎯 生成骨骼",
509
+ variant="primary",
510
+ size="lg"
511
+ )
512
+
513
+ # 右侧 - 输出
514
+ with gr.Column(scale=2):
515
+ gr.Markdown("### 📥 处理结果")
516
+
517
+ # 状态显示
518
+ status_text = gr.Textbox(
519
+ label="处理状态",
520
+ value="等待处理...",
521
+ interactive=False
522
+ )
523
+
524
+ # 标签页组织输出
525
+ with gr.Tabs():
526
+ # 数据展示标签
527
+ with gr.TabItem("📊 骨骼数据"):
528
+ skeleton_data_json = gr.Textbox(
529
+ label="骨骼数据预览",
530
+ lines=15,
531
+ interactive=False,
532
+ show_copy_button=True
533
+ )
534
+
535
+ # 3D预览标签
536
+ with gr.TabItem("👁️ 3D预览"):
537
+ skeleton_preview = gr.HTML(
538
+ label="骨骼可视化",
539
+ value="<p>等待处理...</p>"
540
+ )
541
+
542
+ # 下载标签
543
+ with gr.TabItem("📁 文件下载"):
544
+ gr.Markdown("### 下载骨骼文件")
545
+
546
+ with gr.Row():
547
+ download_obj = gr.File(
548
+ label="🎨 OBJ格式",
549
+ visible=True
550
+ )
551
+ download_txt = gr.File(
552
+ label="📝 TXT格式",
553
+ visible=True
554
+ )
555
+
556
+ with gr.Row():
557
+ download_zip = gr.File(
558
+ label="📦 完整包(ZIP)",
559
+ visible=True
560
+ )
561
+
562
+ # 处理信息标签
563
+ with gr.TabItem("ℹ️ 处理信息"):
564
+ processing_info = gr.Markdown(
565
+ value="等待处理..."
566
+ )
567
+
568
+ # 错误信息(通常隐藏)
569
+ error_info = gr.Textbox(
570
+ label="错误信息",
571
+ visible=False,
572
+ interactive=False
573
+ )
574
+
575
+ # 示例模型
576
+ with gr.Accordion("📂 示例模型", open=False):
577
+ gr.Examples(
578
+ examples=[
579
+ ["examples/boy.obj", "realistic human skeleton for animation"],
580
+ ["examples/dog.obj", "four-legged animal with spine and tail"],
581
+ ["examples/bird.obj", "bird skeleton with wing bones"],
582
+ ],
583
+ inputs=[model_file, prompt_input],
584
+ label="点击加载示例"
585
+ )
586
+
587
+ # 使用说明
588
+ with gr.Accordion("📖 使用说明", open=False):
589
+ gr.Markdown("""
590
+ ## 🎯 如何使用
591
+
592
+ 1. **上传模型** - 支持OBJ, GLB, PLY, STL格式
593
+ 2. **输入提示词** - 描述期望的骨骼类型
594
+ 3. **点击生成** - 等待30-120秒
595
+ 4. **查看结果** - 在不同标签页查看数据、预览和下载
596
+
597
+ ## 📁 输出文件说明
598
+
599
+ - **OBJ** - 可在3D软件中查看的骨骼模型
600
+ - **TXT** - 传统骨骼绑定格式
601
+ - **ZIP** - 包含所有文件的压缩包
602
+
603
+ ## 💡 提示
604
+
605
+ - 模型应该是封闭的网格以获得最佳效果
606
+ - 复杂模型可能需要更长处理时间
607
+ - 使用具体的提示词可以获得更好的结果
608
+ """)
609
+
610
+ # 事件绑定
611
+ def process_and_update_ui(model_file, prompt, confidence, preview):
612
+ # 处理模型
613
+ status, json_data, obj_file, txt_file, zip_file, info, error, skeleton_data = process_3d_model_gpu(
614
+ model_file, prompt, confidence, preview
615
+ )
616
+
617
+ # 生成3D预览
618
+ preview_html = "<p>暂无预览</p>"
619
+ if status == "✅ 处理完成" and skeleton_data:
620
+ try:
621
+ preview_html = create_visualization_html(skeleton_data)
622
+ except Exception as e:
623
+ preview_html = f"<p>预览生成失败: {str(e)}</p>"
624
+
625
+ # 更新可见性
626
+ error_visible = status.startswith("❌")
627
+
628
+ return (
629
+ status, # 状态
630
+ json_data, # JSON展示
631
+ obj_file, # OBJ下载
632
+ txt_file, # TXT下载
633
+ zip_file, # ZIP下载
634
+ preview_html, # 3D预览
635
+ info, # 处理信息
636
+ error, # 错误信息
637
+ gr.update(visible=error_visible) # 错误框可见性
638
+ )
639
+
640
+ # 绑定处理按钮
641
+ process_btn.click(
642
+ fn=process_and_update_ui,
643
+ inputs=[
644
+ model_file,
645
+ prompt_input,
646
+ confidence_threshold,
647
+ generate_preview
648
+ ],
649
+ outputs=[
650
+ status_text,
651
+ skeleton_data_json,
652
+ download_obj,
653
+ download_txt,
654
+ download_zip,
655
+ skeleton_preview,
656
+ processing_info,
657
+ error_info,
658
+ error_info # 控制可见性
659
+ ]
660
+ )
661
+
662
+ # 页脚
663
+ gr.Markdown("""
664
+ ---
665
+
666
+ ## 🔗 相关链接
667
+ - [MagicArticulate Paper](https://github.com/Seed3D/MagicArticulate)
668
+ - [ArticulateHub Project](https://github.com/your-repo)
669
+ - [Hugging Face Spaces](https://huggingface.co/spaces)
670
+
671
+ **Made with ❤️ using Gradio and ZeroGPU**
672
+ """)
673
+
674
+ return demo
675
+
676
+ def main():
677
+ """主函数"""
678
+ try:
679
+ logger.info("🚀 Starting Enhanced MagicArticulate MVP...")
680
+
681
+ # 初始化应用
682
+ if not initialize_app():
683
+ logger.error("❌ Failed to initialize app")
684
+ return
685
+
686
+ # 创建界面
687
+ demo = create_demo_interface()
688
+
689
+ # 启动应用
690
+ logger.info("🌟 Launching Enhanced Gradio interface...")
691
+ demo.launch(
692
+ server_name="0.0.0.0",
693
+ server_port=7860,
694
+ show_api=False,
695
+ share=False,
696
+ debug=False
697
+ )
698
+
699
+ except Exception as e:
700
+ logger.error(f"💥 Main function failed: {str(e)}")
701
+ logger.error(traceback.format_exc())
702
+
703
+ if __name__ == "__main__":
704
+ main()
data_utils/README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Preprocessed data
2
+ We provide the preprocessed data that saved in NPZ files, which contain the following information:
3
+ ```
4
+ 'vertices', 'faces', 'normals', 'joints', 'bones', 'root_index', 'uuid', 'pc_w_norm', 'joint_names', 'skinning_weights_value', 'skinning_weights_rows', 'skinning_weights_cols', 'skinning_weights_shape'
5
+ ```
6
+ You can check `read_npz.py` for how to read the NPZ files and `save_npz.py` for how we save them.
7
+
8
+ Before saving them into NPZ files, we extract mesh(.obj) and rig(.txt) from downloaded 3D models from Objaverse-XL using Blender. The rig file follows the format in [RigNet](https://github.com/zhan-xu/RigNet), which includes the following entries:
9
+ ```
10
+ joints [joint_name] [x] [y] [z]
11
+ root [root_joint_name]
12
+ skin [vertex_index] [joints_name1] [skinning_weight1] [joints_name2] [skinning_weight2] ...
13
+ hier [parent_joint_name] [child_joint_name]
14
+ ```
15
+ For an example, please see `examples/0a59c5ffa4a1476bac6d540b79947f31.txt`.
16
+
17
+ If you want to convert NPZ file back to OBJ and TXT files, we give an example by running:
18
+ ```
19
+ python convert_npz_to_mesh_rig.py
20
+ ```
21
+
22
+ ## Visualization
23
+ We provide a method for visualizing 3D models with skeleton using [Pyrender](https://github.com/mmatl/pyrender), modified from [Lab4D](https://github.com/lab4d-org/lab4d/tree/ppr/). This visualization also serves as input to the VLM for skeleton quality rating. Make sure you have installed the following packages before running visualization:
24
+ ```
25
+ pip install trimesh opencv-python pyrender
26
+ ```
27
+
28
+ We provide an example to demonstrate the process. For this example, we prepare an OBJ file along with a TXT file containing rigging information. Then, run:
29
+ ```
30
+ python render_data.py
31
+ ```
32
+ You will obtain the following outputs:
33
+
34
+ <p align="center">
35
+ <img width="80%" src="examples/0a59c5ffa4a1476bac6d540b79947f31_render_results.png"/>
36
+ </p>
37
+
38
+ ### Reading rig and mesh from GLBs
39
+ We provide the script we use for reading rig (.txt) and mesh (.obj) from glb files. You can run:
40
+ ```
41
+ python read_rig_mesh_from_glb.py
42
+ ```
43
+ Remember to download Blender (we use 4.2.0) and also bpy in your conda environment.
data_utils/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MagicArticulate Data Utils Package
3
+ 包含数据处理、渲染和加载的工具函数
4
+ """
5
+
6
+ from .pyrender_wrapper import PyRenderWrapper
7
+
8
+ __all__ = ['PyRenderWrapper']
data_utils/clean_skin_in_npz.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import numpy as np
15
+ import scipy.sparse as sp
16
+ import os
17
+
18
+ def check_and_clean_skinning_weights(file_path, output_path, tolerance=0.1):
19
+ """
20
+ Check if all rows in pc_skinning_weights sum to 1 for each item in the NPZ file.
21
+ Remove invalid items and save a cleaned version.
22
+
23
+ Args:
24
+ file_path: Path to the input NPZ file
25
+ output_path: Path for the cleaned NPZ file
26
+ tolerance: Tolerance for floating point comparison
27
+
28
+ Returns:
29
+ tuple: (cleaned_data_list, removed_indices)
30
+ """
31
+ data_list = np.load(file_path, allow_pickle=True)['arr_0']
32
+
33
+ invalid_indices = []
34
+ valid_data_list = []
35
+
36
+ for idx, data in enumerate(data_list):
37
+ is_valid = True
38
+
39
+ weights_data = data['skinning_weights_value']
40
+ weights_row = data['skinning_weights_row']
41
+ weights_col = data['skinning_weights_col']
42
+ weights_shape = data['skinning_weights_shape']
43
+
44
+ skinning_sparse = sp.coo_matrix(
45
+ (weights_data, (weights_row, weights_col)),
46
+ shape=weights_shape
47
+ )
48
+
49
+ skinning_csr = skinning_sparse.tocsr()
50
+ row_sums = np.array(skinning_csr.sum(axis=1)).flatten()
51
+
52
+ invalid_rows = np.where(np.abs(row_sums - 1.0) > tolerance)[0]
53
+
54
+ if len(invalid_rows) > 0:
55
+ min_sum = np.min(row_sums)
56
+ max_sum = np.max(row_sums)
57
+ invalid_indices.append((data['uuid'], f"{len(invalid_rows)} rows, range: [{min_sum:.6f}, {max_sum:.6f}]"))
58
+ is_valid = False
59
+
60
+ if is_valid:
61
+ valid_data_list.append(data)
62
+
63
+ # Save the cleaned data
64
+ if valid_data_list:
65
+ np.savez_compressed(output_path, valid_data_list, allow_pickle=True)
66
+ print(f"Saved {len(valid_data_list)} valid items to {output_path}")
67
+
68
+ return valid_data_list, invalid_indices
69
+
70
+ def main():
71
+ # File paths
72
+ file_path = "articulation_xlv2_train.npz" # "articulation_xlv2_test.npz"
73
+ log_file = "invalid_skinning_weights_intrain.txt" # "invalid_skinning_weights_intest.txt"
74
+ output_path = "articulation_xlv2_train_updated.npz" # "articulation_xlv2_test_updated.npz"
75
+
76
+ # Clean the data
77
+ valid_data, invalid_indices = check_and_clean_skinning_weights(file_path, output_path)
78
+
79
+ # Log the results
80
+ with open(log_file, "w") as f:
81
+ f.write(f"Original file: {file_path}\n")
82
+ f.write(f"Cleaned file: {output_path}\n")
83
+ f.write(f"Total items: {len(np.load(file_path, allow_pickle=True)['arr_0'])}\n")
84
+ f.write(f"Valid items: {len(valid_data)}\n")
85
+ f.write(f"Removed items: {len(invalid_indices)}\n\n")
86
+
87
+ if invalid_indices:
88
+ f.write("Details of removed items:\n")
89
+ for idx, details in invalid_indices:
90
+ f.write(f" Index {idx}: {details}\n")
91
+
92
+ print(f"Cleaning complete. Results written to {log_file}")
93
+
94
+ if __name__ == "__main__":
95
+ main()
data_utils/convert_npz_to_mesh_rig.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ You can convert npz file back to obj(mesh) and txt(rig) files using this python script.
16
+ """
17
+ import os
18
+ import numpy as np
19
+ import scipy.sparse as sp
20
+
21
+ def export_obj(vertices, faces, normals, output_path):
22
+ with open(output_path, 'w') as f:
23
+ for v in vertices:
24
+ f.write(f"v {v[0]} {v[1]} {v[2]}\n")
25
+ for n in normals:
26
+ f.write(f"vn {n[0]} {n[1]} {n[2]}\n")
27
+ for i, face in enumerate(faces):
28
+ # OBJ format is 1-based, so we add 1 to all indices
29
+ f.write(f"f {face[0]+1}//{face[0]+1} {face[1]+1}//{face[1]+1} {face[2]+1}//{face[2]+1}\n")
30
+
31
+ def export_rig_txt(joints, bones, root_index, joint_names, skinning_weights, output_path):
32
+ """
33
+ joints [joint_name] [x] [y] [z]
34
+ root [root_joint_name]
35
+ skin [vertex_index] [joint_name1] [weight1] [joint_name2] [weight2] ...
36
+ hier [parent_joint_name] [child_joint_name]
37
+ """
38
+ n_joints = len(joints)
39
+ n_verts = skinning_weights.shape[0] # (n_vertex, n_joints)
40
+
41
+ with open(output_path, 'w') as f:
42
+ # 1) joints
43
+ for i in range(n_joints):
44
+ x, y, z = joints[i]
45
+ jn = joint_names[i]
46
+ f.write(f"joints {jn} {x} {y} {z}\n")
47
+
48
+ # 2) root
49
+ root_name = joint_names[root_index]
50
+ f.write(f"root {root_name}\n")
51
+
52
+ # 3) skin
53
+ for vidx in range(n_verts):
54
+ row_weights = skinning_weights[vidx]
55
+ non_zero_indices = np.where(row_weights != 0)[0]
56
+ if len(non_zero_indices) == 0:
57
+ continue
58
+
59
+ line_parts = [f"skin {vidx}"] # vertex_idx
60
+ for jidx in non_zero_indices:
61
+ w = row_weights[jidx]
62
+ jn = joint_names[jidx]
63
+ line_parts.append(jn)
64
+ line_parts.append(str(w))
65
+
66
+ f.write(" ".join(line_parts) + "\n")
67
+
68
+ # 4) hier
69
+ for p_idx, c_idx in bones:
70
+ p_name = joint_names[p_idx]
71
+ c_name = joint_names[c_idx]
72
+ f.write(f"hier {p_name} {c_name}\n")
73
+
74
+ if __name__ == "__main__":
75
+
76
+ data = np.load('articulation_xlv2_test.npz', allow_pickle=True)
77
+ data_list = data['arr_0']
78
+
79
+ print(f"Loaded {len(data_list)} data entries")
80
+
81
+ model_data = data_list[0]
82
+ print("Data keys:", model_data.keys())
83
+ # 'vertices', 'faces', 'normals', 'joints', 'bones', 'root_index', 'uuid', 'joint_names',
84
+ # 'skinning_weights_value', 'skinning_weights_row', 'skinning_weights_col', 'skinning_weights_shape'
85
+
86
+ vertices = model_data['vertices'] # (n_vertex, 3)
87
+ faces = model_data['faces'] # (n_faces, 3)
88
+ normals = model_data['normals'] # (n_vertex, 3)
89
+ joints = model_data['joints'] # (n_joints, 3)
90
+ bones = model_data['bones'] # (n_bones, 2)
91
+ root_index = model_data['root_index'] # int
92
+ joint_names = model_data['joint_names'] # list of str
93
+ uuid_str = model_data['uuid']
94
+
95
+ skin_val = model_data['skinning_weights_value']
96
+ skin_row = model_data['skinning_weights_row']
97
+ skin_col = model_data['skinning_weights_col']
98
+ skin_shape = model_data['skinning_weights_shape']
99
+ skin_sparse = sp.coo_matrix((skin_val, (skin_row, skin_col)), shape=skin_shape)
100
+ skinning_weights = skin_sparse.toarray() # (n_vertex, n_joints)
101
+
102
+ obj_path = f"{uuid_str}.obj"
103
+ export_obj(vertices, faces, normals, obj_path)
104
+ rig_txt_path = f"{uuid_str}.txt"
105
+ export_rig_txt(joints, bones, root_index, joint_names, skinning_weights, rig_txt_path)
106
+
107
+ print("Done!")
data_utils/data_loader.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import json
15
+ import glob
16
+ import numpy as np
17
+ import trimesh
18
+
19
+ class DataLoader:
20
+ def __init__(self):
21
+ self.joint_name_to_idx = {}
22
+
23
+ def load_rig_data(self, rig_path):
24
+ joints = []
25
+ joints_names = []
26
+ bones = []
27
+
28
+ with open(rig_path, 'r') as f:
29
+ for line in f:
30
+ parts = line.strip().split()
31
+ if parts[0] == 'joints':
32
+ joint_name = parts[1]
33
+ joint_pos = [float(parts[2]), float(parts[3]), float(parts[4])]
34
+ self.joint_name_to_idx[joint_name] = len(joints)
35
+ joints.append(joint_pos)
36
+ joints_names.append(joint_name)
37
+ elif parts[0] == 'root':
38
+ self.root_name = parts[1]
39
+ elif parts[0] == 'hier':
40
+ parent_joint = self.joint_name_to_idx[parts[1]]
41
+ child_joint = self.joint_name_to_idx[parts[2]]
42
+ bones.append([parent_joint, child_joint])
43
+
44
+ self.joints = np.array(joints)
45
+ self.bones = np.array(bones)
46
+ self.joints_names = joints_names
47
+ self.root_idx = None
48
+ if self.root_name is not None:
49
+ self.root_idx = self.joint_name_to_idx[self.root_name]
50
+
51
+ def load_mesh(self, mesh_path):
52
+ mesh = trimesh.load(mesh_path, process=False)
53
+ mesh.visual.vertex_colors[:, 3] = 100 # set transparency
54
+ self.mesh = mesh
55
+
56
+ # Compute the centroid normal of the mesh
57
+ v = self.mesh.vertices
58
+ xmin, ymin, zmin = v.min(axis=0)
59
+ xmax, ymax, zmax = v.max(axis=0)
60
+ self.bbox_center = np.array([(xmax + xmin)/2, (ymax + ymin)/2, (zmax + zmin)/2])
61
+ self.bbox_size = np.array([xmax - xmin, ymax - ymin, zmax - zmin])
62
+ self.bbox_scale = max(xmax - xmin, ymax - ymin, zmax - zmin)
63
+
64
+ normal = mesh.center_mass - self.bbox_center
65
+ normal = normal / (np.linalg.norm(normal)+1e-5)
66
+
67
+ # Choose axis order based on normal direction
68
+ if abs(normal[1]) > abs(normal[2]): # if Y component is dominant
69
+ self.axis_order = [0, 1, 2] # swapping Y and Z
70
+ else:
71
+ self.axis_order =[0, 2, 1] # keep default order
72
+
73
+ self.mesh.vertices = self.mesh.vertices[:, self.axis_order]
74
+ self.joints = self.joints[:, self.axis_order]
75
+ self.normalize_coordinates()
76
+
77
+ def normalize_coordinates(self):
78
+
79
+ # Compute scale and offset
80
+ scale = 1.0 / (self.bbox_scale+1e-5)
81
+ offset = -self.bbox_center
82
+
83
+ self.mesh.vertices = (self.mesh.vertices + offset) * scale
84
+ self.joints = (self.joints + offset) * scale
85
+
86
+ # Calculate appropriate radii based on the mean size
87
+ self.joint_radius = 0.01
88
+ self.bone_radius = 0.005
89
+
90
+ def query_mesh_rig(self):
91
+
92
+ input_dict = {"shape": self.mesh}
93
+
94
+ # Create joints as spheres
95
+ joint_meshes = []
96
+ for i, joint in enumerate(self.joints):
97
+
98
+ sphere = trimesh.creation.icosphere(
99
+ radius=self.joint_radius, subdivisions=2
100
+ )
101
+ sphere.apply_translation(joint)
102
+ if i == self.root_idx:
103
+ # root green
104
+ sphere.visual.vertex_colors = [0, 255, 0, 255]
105
+ else:
106
+ sphere.visual.vertex_colors = [0, 0, 255, 255]
107
+
108
+ joint_meshes.append(sphere)
109
+ input_dict["joint_meshes"] = trimesh.util.concatenate(joint_meshes)
110
+
111
+ # Create bones as cylinders
112
+ bone_meshes = []
113
+ for bone in self.bones:
114
+ start, end = self.joints[bone[0]], self.joints[bone[1]]
115
+ cylinder = trimesh.creation.cylinder(radius=self.bone_radius, segment=np.array([[0, 0, 0], end - start]))
116
+ cylinder.apply_translation(start)
117
+ cylinder.visual.vertex_colors = [255, 0, 0, 255] #[0, 0, 255, 255] # blue
118
+ bone_meshes.append(cylinder)
119
+ input_dict["bone_meshes"] = trimesh.util.concatenate(bone_meshes)
120
+
121
+ return input_dict
data_utils/issue_data_list.txt ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 0b1f1ccb-db41-5689-b363-fd8ca0145041
2
+ d4705a2d-2dbf-5175-9fd0-b0cc538b9c4d
3
+ 12b3d88d-2845-57b7-b483-d3a766beeb0e
4
+ 778505b7-63da-5c08-bad7-6935fcd73cec
5
+ 35ed271f-e9d7-528f-b165-e25004ef802b
6
+ 0096279cc46c4d1d8e8611e611e2418b
7
+ 00ea25ccad8344cbaedc89d70bb75a49
8
+ 08b617be44b6466584ba9624f857222c
9
+ 0998722861ba489695ad8bd4456e76e6
10
+ 0bd786e936774176ac474694b0f6f876
11
+ 0c1a7657bea0421dadef56e2080f0297
12
+ 1073c44309524810b6cd4cef2d6e8008
13
+ 10b9c6e9bf214dc39476161dfe2eaa8a
14
+ 147df2ee69df488eb6cb2f88f2f703bb
15
+ 18ff6fa66b0d483a8758e4602e5b70b0
16
+ 1cf88736c59a43c88ba7dac44c929dab
17
+ 1e9544eea98d417db87347dcc16cb69e
18
+ 21a4bc038cbd415b8e09566148c87c46
19
+ 2809e172066d4140b1ddc9356490191a
20
+ 28483d55555f433d8fde4ba141ad5271
21
+ 31829af6c72146519d348a6d4d2bcc8b
22
+ 32202338cd5c40beace31deeacd598e5
23
+ 37fe21828c37413986a07a1bf8c75c93
24
+ 3857965c400c47c9a846c01eb1f36ed5
25
+ 404e622bdfd14ab693640ff86c131973
26
+ 44f8486a0b2c4f9489fc3912b2dcf880
27
+ 49580a36b07d47808aa91db6e2b9fcdd
28
+ 4db51555e8fd48a0905ecee93730f863
29
+ 57a9d6f9fec7430bae67d7d7a9bfdd2c
30
+ 593eeb44d67c49499d3580d908b9f5cd
31
+ 5a571bea2d0c4ad5b2cc912c3dc37a59
32
+ 5cd1f275bdb34d939ffaa07a641a2eef
33
+ 60ab9787fde64199ab59b728276b5cd8
34
+ 63453d744e3844d48bc9a7bedfe586a7
35
+ 6caf784e33084b1389fdea4043560d3f
36
+ 725ce5eae96b4602a3b8a30f73dcbc4c
37
+ 7f9c3d9ccbd949449f25f3711780c1e7
38
+ 80ff2e88de2144bbb21d231db5a02000
39
+ 835174fcce4a4969851ca1846b92036a
40
+ 85b73c92393e453faf0f7ec82d40720e
41
+ 860911c447744c0396b618db994c535e
42
+ 86d6d90704ff4e9c8fc0f0751bd837a2
43
+ 934b27da5e4249978bfa9c190ec01f9a
44
+ 968aecc8c38246f8af3d0d7fa169ca8f
45
+ 9fc1cb45c8404517aa8cee3bb47c14fd
46
+ a65a935fd54b4159a2687bffef7cbf81
47
+ af2f7b1678ea4194a9b8235e7dfd23b3
48
+ b4cd213509ec4dcba41a280b4b013e63
49
+ be7a64227e1f4f13b86389edc4926dfa
50
+ bff3cd47d0574f73980b3af9f7790c58
51
+ c8ac24a9bf2647fb9e7565eaf3a28558
52
+ cc1f905b148c4378ad46a40da72e839f
53
+ ce50fe2e6a654a3bafab950c0f101e59
54
+ d270505df059467e8fa17974f075f3cf
55
+ d476d6bfc0364001a6cc73877a59ca65
56
+ d9a5b67b5c9142e984f76b1afec1939b
57
+ da9cb8ac53274b9bbd9467b7d83c85fb
58
+ dc48f3ab2b2844eba788898509a52806
59
+ e1817fcc5d614723bcb1f49491fe3ed0
60
+ f1fbc33234374c3a911148a453399186
61
+ faab16de19484746a4716cb00b738f8e
62
+ fdb767e69a0748c6bcdfe8764772c0d4
63
+ ff8ec56b0c664b438d36e84882b304f4
64
+ 03ea3bf9d47e4e5789d027279e6edbbb
65
+ 064a05ca3df84e3fbf900f9a1df75577
66
+ 0ada42e959504b47ba58ca331a8d8549
67
+ 112ae8160af54eeea6b2483b903634f4
68
+ 156d6ab3d495476c997887c092aff781
69
+ 1c92543b1e9245e0a2c1e3770a0e3d11
70
+ 1e041df547e64db9aaa8d79218d880a8
71
+ 1e34fd79cbb24db4952db6e9642881d3
72
+ 1ec08e1e74d04354ac7085c004b01c2c
73
+ 20dd7f7bdc9a4c36aef491f12afa14d8
74
+ 242e99d9fe2f4eec91841fd3e8b01021
75
+ 27dbf22159a5464687f4ed9b347257d3
76
+ 28647ae054d74d2e9cac4a3dda31bb55
77
+ 29ff70f5772747f89b0db4aae9c0ade6
78
+ 2b03620bba824c1ea67945abd5c043f2
79
+ 314d74658df6431ea50bede8512882cc
80
+ 38f052a2027346e2943b4c76d2572415
81
+ 3dbaadb244e44f59b5a6b7490aac6883
82
+ 400dbd97e4e6429cab24fab8b5a3d845
83
+ 41790f8edba642ffa281a0660f318db4
84
+ 4c60ff4ebef241deae699ec8d2de86b5
85
+ 5de63c02a4374605acb69691450e6653
86
+ 65df530434624400b030da4579baa4b6
87
+ 66c66c960e1c4b3aab5f2792f5e71add
88
+ 6abf66991f584f1ba45d7297f3a128d4
89
+ 6dd6b05e20604f478d9fd868528b275f
90
+ 6f76008a68074d2bb59a0189f558ae34
91
+ 8bb433dfbef3479cbaa3bcdf63b5b6a2
92
+ 9338c7dbf4054c608c17353358cdb7c6
93
+ 9544bb7b09874f13a5ecd0429379cbd8
94
+ 95d2df27650f4beb8d208a21db7366d9
95
+ 96d50c0f7f6a40ad9e5ae39537d1062e
96
+ 9e7e71c08e5b4ff9b510afbfb2067152
97
+ a6cce2749dfb4b4d89c0dc3460ea9d3b
98
+ ab7e81a8a26d43ecb3131729a999ddcd
99
+ adae06ba4b7a4cbeab892957bc40331b
100
+ ba46772fa0234625832da0582c2f615c
101
+ c4f57ce4bc2b4c46a32414515ba991e9
102
+ cf09886dc98f4666bed77d6b51a4ef67
103
+ cfde2bfa5c634a788c2c4c4480f53ba7
104
+ d0008363ca6c4ea9976494eff45e90bb
105
+ d403eef8a45d485e905b968cc0a1670a
106
+ dc8d45c7ae7f453e9f861c79a40d9265
107
+ eb8e71b3a22f4e719d8157831c408a6e
108
+ ed896088728f4779b2fd9aa7f527e880
109
+ f06a196aea294b0fa05dee4be971a12c
110
+ f3e1bd29da234c8e89e0f208487fe31c
111
+ f84ffc38cbb9400ca31be98fe89abb01
112
+ fa31faff8ec04fa49e72e6266dc14cc4
113
+ fb6bd558e5ff4d3b8709a39d6280460b
114
+ 808f9ffa-c14a-5d78-b8bf-197bc1f0b29c
115
+ e1740d44-9be4-58cf-a3e6-f8208b9cdfc6
116
+ 4acf0253-00b8-5cca-be94-1f2af5bd72ba
117
+ 0c94fe68-2983-52db-822e-6ea63bd54f65
118
+ ff9b4de9-a702-5221-bc26-f0c7ec8c4c51
119
+ b927ce627b6841a688067331853302d6
120
+ ccfad91e-e66d-5cc3-aff8-99f5b3a824fd
121
+ 25434b7c-4ab4-58cd-900f-aa1bfcf53233
122
+ 23d9764b-5035-5025-aae1-2788c1942a7c
123
+ ecbc08ea-5f9d-5d2f-a496-77ec128bd3fe
data_utils/pyrender_wrapper.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from https://github.com/lab4d-org/lab4d
2
+
3
+ import os
4
+ import numpy as np
5
+ import cv2
6
+ import pyrender
7
+ import trimesh
8
+ from pyrender import (
9
+ IntrinsicsCamera,
10
+ Mesh,
11
+ Node,
12
+ Scene,
13
+ OffscreenRenderer,
14
+ MetallicRoughnessMaterial,
15
+ RenderFlags
16
+ )
17
+
18
+ os.environ["PYOPENGL_PLATFORM"] = "egl"
19
+
20
+ def look_at(eye, center, up):
21
+ """Create a look-at (view) matrix."""
22
+ f = np.array(center, dtype=np.float32) - np.array(eye, dtype=np.float32)
23
+ f /= np.linalg.norm(f)
24
+
25
+ u = np.array(up, dtype=np.float32)
26
+ u /= np.linalg.norm(u)
27
+
28
+ s = np.cross(f, u)
29
+ u = np.cross(s, f)
30
+
31
+ m = np.identity(4, dtype=np.float32)
32
+ m[0, :3] = s
33
+ m[1, :3] = u
34
+ m[2, :3] = -f
35
+ m[:3, 3] = -np.matmul(m[:3, :3], np.array(eye, dtype=np.float32))
36
+
37
+ return m
38
+
39
+ class PyRenderWrapper:
40
+ def __init__(self, image_size=(1024, 1024)) -> None:
41
+ # renderer
42
+ self.image_size = image_size
43
+ render_size = max(image_size)
44
+ self.r = OffscreenRenderer(render_size, render_size)
45
+ self.intrinsics = IntrinsicsCamera(
46
+ render_size, render_size, render_size / 2, render_size / 2
47
+ )
48
+ # light
49
+ self.light_pose = np.eye(4)
50
+ self.set_light_topdown()
51
+ self.direc_l = pyrender.DirectionalLight(color=np.ones(3), intensity=5.0)
52
+ self.material = MetallicRoughnessMaterial(
53
+ roughnessFactor=0.75, metallicFactor=0.75, alphaMode="BLEND"
54
+ )
55
+ self.init_camera()
56
+
57
+ def init_camera(self):
58
+ self.flip_pose = np.eye(4)
59
+ self.set_camera(np.eye(4))
60
+
61
+ def set_camera(self, scene_to_cam):
62
+ # object to camera transforms
63
+ self.scene_to_cam = self.flip_pose @ scene_to_cam
64
+
65
+ def set_light_topdown(self, gl=False):
66
+ # top down light, slightly closer to the camera
67
+ if gl:
68
+ rot = cv2.Rodrigues(np.asarray([-np.pi / 2, 0, 0]))[0]
69
+ else:
70
+ rot = cv2.Rodrigues(np.asarray([np.pi / 2, 0, 0]))[0]
71
+ self.light_pose[:3, :3] = rot
72
+
73
+ def align_light_to_camera(self):
74
+ self.light_pose = np.linalg.inv(self.scene_to_cam)
75
+
76
+ def set_intrinsics(self, intrinsics):
77
+ """
78
+ Args:
79
+ intrinsics: (4,) fx,fy,px,py
80
+ """
81
+ self.intrinsics = IntrinsicsCamera(
82
+ intrinsics[0], intrinsics[1], intrinsics[2], intrinsics[3]
83
+ )
84
+
85
+ def get_cam_to_scene(self):
86
+ cam_to_scene = np.eye(4)
87
+ cam_to_scene[:3, :3] = self.scene_to_cam[:3, :3].T
88
+ cam_to_scene[:3, 3] = -self.scene_to_cam[:3, :3].T @ self.scene_to_cam[:3, 3]
89
+ return cam_to_scene
90
+
91
+ def set_camera_view(self, angle, bbox_center, distance=2.0):
92
+ # Calculate camera position based on angle and distance from bounding box center
93
+ camera_position = bbox_center + distance * np.array([np.sin(angle), 0, np.cos(angle)], dtype=np.float32)
94
+ look_at_matrix = look_at(camera_position, bbox_center, [0, 1, 0])
95
+ self.scene_to_cam = look_at_matrix @ self.flip_pose
96
+
97
+ def render(self, input_dict):
98
+ # Create separate scenes for transparent objects (mesh) and solid objects (joints and bones)
99
+ scene_transparent = Scene(ambient_light=np.array([1.0, 1.0, 1.0, 1.0]) * 0.1)
100
+ scene_solid = Scene(ambient_light=np.array([1.0, 1.0, 1.0, 1.0]) * 0.1)
101
+
102
+ mesh_pyrender = Mesh.from_trimesh(input_dict["shape"], smooth=False)
103
+ mesh_pyrender.primitives[0].material = self.material
104
+ scene_transparent.add(mesh_pyrender, pose=np.eye(4), name="shape")
105
+
106
+ if "joint_meshes" in input_dict:
107
+ joints_pyrender = Mesh.from_trimesh(input_dict["joint_meshes"], smooth=False)
108
+ joints_pyrender.primitives[0].material = self.material
109
+ scene_solid.add(joints_pyrender, pose=np.eye(4), name="joints")
110
+
111
+ if "bone_meshes" in input_dict:
112
+ bones_pyrender = Mesh.from_trimesh(input_dict["bone_meshes"], smooth=False)
113
+ bones_pyrender.primitives[0].material = self.material
114
+ scene_solid.add(bones_pyrender, pose=np.eye(4), name="bones")
115
+
116
+ # Camera for both scenes
117
+ scene_transparent.add(self.intrinsics, pose=self.get_cam_to_scene())
118
+ scene_solid.add(self.intrinsics, pose=self.get_cam_to_scene())
119
+
120
+ # Light for both scenes
121
+ scene_transparent.add(self.direc_l, pose=self.light_pose)
122
+ scene_solid.add(self.direc_l, pose=self.light_pose)
123
+
124
+ # Render transparent scene first
125
+ color_transparent, depth_transparent = self.r.render(scene_transparent)
126
+
127
+ # Render solid scene on top
128
+ color_solid, depth_solid = self.r.render(scene_solid)
129
+
130
+ # Combine the two scenes
131
+ color_combined = np.where(depth_solid[..., np.newaxis] == 0, color_transparent, color_solid)
132
+
133
+ return color_combined, depth_solid
134
+ def delete(self):
135
+ self.r.delete()
data_utils/read_npz.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import numpy as np
15
+ import scipy.sparse as sp
16
+
17
+ # Load the NPZ file
18
+ data = np.load('articulation_xlv2_test.npz', allow_pickle=True)
19
+ data_list = data['arr_0']
20
+
21
+ print(f"Loaded {len(data_list)} data entries")
22
+ print(f"Data keys: {data_list[0].keys()}")
23
+ # 'vertices', 'faces', 'normals', 'joints', 'bones', 'root_index', 'uuid', 'pc_w_norm', 'joint_names', 'skinning_weights_value',
24
+ # 'skinning_weights_row', 'skinning_weights_col', 'skinning_weights_shape'
25
+
26
+ data = data_list[0] # check the first data
27
+
28
+ vertices = data['vertices'] # (n_vertex, 3)
29
+ faces = data['faces'] # (n_faces, 3)
30
+ normals = data['normals'] # (n_vertex, 3)
31
+ joints = data['joints'] # (n_joints, 3)
32
+ bones = data['bones'] # (n_bones, 2)
33
+ pc_w_norm = data['pc_w_norm'] # (8192, 6)
34
+
35
+ # Extract the sparse skinning weights components
36
+ skinning_data = data['skinning_weights_value']
37
+ skinning_rows = data['skinning_weights_row']
38
+ skinning_cols = data['skinning_weights_col']
39
+ skinning_shape = data['skinning_weights_shape']
40
+
41
+ skinning_sparse = sp.coo_matrix((skinning_data, (skinning_rows, skinning_cols)), shape=skinning_shape)
42
+ skinning_weights = skinning_sparse.toarray() # (n_vertex, n_joints)
43
+
data_utils/read_rig_mesh_from_glb.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Blender script for extracting rig (.txt) and mesh (.obj) from glbs.
17
+ This code currently supports GLB files only, but it can be easily modified to load other formats (e.g., FBX, DAE) with minimal changes.
18
+ """
19
+
20
+ import bpy
21
+ import os
22
+ import re
23
+ import json
24
+ import pickle
25
+
26
+ def get_hierarchy_root_joint(joint):
27
+ """
28
+ Function to find the top parent joint node from the given
29
+ 'joint' Blender node (armature bone).
30
+ """
31
+ root_joint = joint
32
+ while root_joint.parent is not None:
33
+ root_joint = root_joint.parent
34
+ return root_joint
35
+
36
+ def get_meshes_and_armatures():
37
+ """
38
+ Function to get all meshes and armatures in the scene
39
+ """
40
+ default_objects = ['Cube', 'Light', 'Camera', 'Icosphere']
41
+ for obj_name in default_objects:
42
+ if obj_name in bpy.data.objects:
43
+ bpy.data.objects.remove(bpy.data.objects[obj_name], do_unlink=True)
44
+
45
+ meshes = [obj for obj in bpy.context.scene.objects if obj.type == 'MESH']
46
+ armatures = [obj for obj in bpy.context.scene.objects if obj.type == 'ARMATURE']
47
+ return meshes, armatures
48
+
49
+ def get_joint_dict(root):
50
+ """
51
+ Function to create a dictionary of joints from the root joint
52
+ """
53
+ joint_pos = {}
54
+ def traverse_bone(bone):
55
+ joint_pos[bone.name] = {
56
+ 'pos': bone.head_local,
57
+ 'pa': bone.parent.name if bone.parent else 'None',
58
+ 'ch': [child.name for child in bone.children]
59
+ }
60
+ for child in bone.children:
61
+ traverse_bone(child)
62
+
63
+ traverse_bone(root)
64
+ return joint_pos
65
+
66
+ def record_info(root, joint_dict, meshes, mesh_vert_offsets, file_info):
67
+ """
68
+ - root: root joint
69
+ - joint_dict
70
+ - meshes
71
+ - mesh_vert_offsets: for multi-geometry
72
+ - file_info
73
+ """
74
+ skin_records = {}
75
+
76
+ def replace_special_characters(name):
77
+ return re.sub(r'\W+', '_', name)
78
+
79
+ for key, val in joint_dict.items():
80
+ modified_key = replace_special_characters(key)
81
+ file_info.write(f'joints {modified_key} {val["pos"][0]:.8f} {val["pos"][1]:.8f} {val["pos"][2]:.8f}\n')
82
+ file_info.write(f'root {replace_special_characters(root.name)}\n')
83
+
84
+ for mesh_index, mesh in enumerate(meshes):
85
+ vert_offset = mesh_vert_offsets[mesh_index]
86
+ if mesh.type == 'MESH':
87
+ for vtx in mesh.data.vertices:
88
+ weights = {}
89
+ for group in vtx.groups:
90
+ bone_name = replace_special_characters(mesh.vertex_groups[group.group].name)
91
+ weights[bone_name] = group.weight
92
+
93
+ global_vertex_index = vert_offset + vtx.index
94
+
95
+ skin_record = f"skin {global_vertex_index} " + " ".join(f"{bone} {weight:.4f}" for bone, weight in weights.items())
96
+
97
+ if global_vertex_index not in skin_records:
98
+ skin_records[global_vertex_index] = skin_record
99
+ file_info.write(skin_record + "\n")
100
+
101
+ for key, val in joint_dict.items():
102
+ if val['pa'] != 'None':
103
+ parent_name = replace_special_characters(val['pa'])
104
+ child_name = replace_special_characters(key)
105
+ file_info.write(f'hier {parent_name} {child_name}\n')
106
+
107
+
108
+ def record_obj(meshes, file_obj):
109
+ vert_offset = 0
110
+ norm_offset = 0
111
+ mesh_vert_offsets = []
112
+
113
+ for mesh in meshes:
114
+ mesh_vert_offsets.append(vert_offset)
115
+ bpy.context.view_layer.objects.active = mesh
116
+ bpy.ops.object.mode_set(mode='OBJECT')
117
+
118
+ # vertex
119
+ for v in mesh.data.vertices:
120
+ file_obj.write(f"v {v.co[0]} {v.co[1]} {v.co[2]}\n")
121
+ file_obj.write("\n")
122
+
123
+ # normal
124
+ for vn in mesh.data.vertices:
125
+ normal = vn.normal
126
+ file_obj.write(f"vn {normal[0]} {normal[1]} {normal[2]}\n")
127
+ file_obj.write("\n")
128
+
129
+ # face
130
+ for poly in mesh.data.polygons:
131
+ verts = [v + 1 + vert_offset for v in poly.vertices]
132
+ file_obj.write(f"f {verts[0]}//{verts[0]} {verts[1]}//{verts[1]} {verts[2]}//{verts[2]}\n")
133
+
134
+ vert_count = len(mesh.data.vertices)
135
+ vert_offset += vert_count
136
+ norm_offset += vert_count
137
+
138
+ return mesh_vert_offsets
139
+
140
+ def process_glb(glb_path, rigs_dir, meshes_dir):
141
+ base_name = os.path.splitext(os.path.basename(glb_path))[0]
142
+
143
+ obj_name = os.path.join(meshes_dir, f'{base_name}.obj')
144
+ info_name = os.path.join(rigs_dir, f'{base_name}.txt')
145
+
146
+ # Skip processing if rig info file already exists
147
+ if os.path.exists(info_name):
148
+ print(f"{info_name} already exists. Skipping...")
149
+ return
150
+
151
+ if os.path.exists(obj_name):
152
+ print(f"{obj_name} already exists. Skipping...")
153
+ return
154
+
155
+ bpy.ops.wm.read_factory_settings(use_empty=True)
156
+ bpy.ops.import_scene.gltf(filepath=glb_path)
157
+
158
+ meshes, armatures = get_meshes_and_armatures()
159
+
160
+ if not armatures:
161
+ print(f"No armatures found in {glb_path}. Skipping...")
162
+ return
163
+
164
+ root = armatures[0].data.bones[0]
165
+ root_name = get_hierarchy_root_joint(root)
166
+ joint_dict = get_joint_dict(root_name)
167
+
168
+ # save meshes
169
+ with open(obj_name, 'w') as file_obj:
170
+ mesh_vert_offsets = record_obj(meshes, file_obj)
171
+
172
+ # save rigs
173
+ with open(info_name, 'w') as file_info:
174
+ record_info(root_name, joint_dict, meshes, mesh_vert_offsets, file_info)
175
+
176
+ print(f"Processed {glb_path}")
177
+
178
+ if __name__ == '__main__':
179
+
180
+ src_dir = 'glbs'
181
+ rigs_dir = 'rigs'
182
+ meshes_dir = 'meshes'
183
+ # Ensure rigs directory exists
184
+ if not os.path.exists(rigs_dir):
185
+ os.makedirs(rigs_dir)
186
+ if not os.path.exists(meshes_dir):
187
+ os.makedirs(meshes_dir)
188
+
189
+ glb_paths = [os.path.join(src_dir, file) for file in os.listdir(src_dir) if file.endswith('.glb')]
190
+
191
+ print(len(glb_paths))
192
+
193
+ for glb_path in glb_paths:
194
+ try:
195
+ process_glb(glb_path, rigs_dir, meshes_dir)
196
+ except Exception as e:
197
+ with open('error.txt', 'a') as error_file:
198
+ error_file.write(f"{glb_path}: {str(e)}\n")
data_utils/render_data.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+ import numpy as np
16
+ import cv2
17
+
18
+ from pyrender_wrapper import PyRenderWrapper
19
+ from data_loader import DataLoader
20
+
21
+ def main():
22
+ loader = DataLoader()
23
+
24
+ raw_size = (960, 960)
25
+ renderer = PyRenderWrapper(raw_size)
26
+
27
+ output_dir = 'render_results'
28
+ os.makedirs(output_dir, exist_ok=True)
29
+
30
+ rig_path = 'examples/0a59c5ffa4a1476bac6d540b79947f31.txt'
31
+ mesh_path = rig_path.replace('.txt', '.obj')
32
+
33
+ filename = os.path.splitext(os.path.basename(rig_path))[0]
34
+
35
+ loader.load_rig_data(rig_path)
36
+ loader.load_mesh(mesh_path)
37
+ input_dict = loader.query_mesh_rig()
38
+
39
+ angles = [0, np.pi/2, np.pi, 3*np.pi/2]
40
+
41
+ bbox_center = loader.mesh.bounding_box.centroid
42
+ bbox_size = loader.mesh.bounding_box.extents
43
+ distance = np.max(bbox_size) * 2
44
+
45
+ subfolder_path = os.path.join(output_dir, filename)
46
+
47
+ os.makedirs(subfolder_path, exist_ok=True)
48
+
49
+ for i, angle in enumerate(angles):
50
+ print(f"Rendering view at {np.degrees(angle)} degrees")
51
+
52
+ renderer.set_camera_view(angle, bbox_center, distance)
53
+ renderer.align_light_to_camera()
54
+
55
+ color = renderer.render(input_dict)[0]
56
+
57
+ output_filename = f"{filename}_view{i+1}.png"
58
+ output_filepath = os.path.join(subfolder_path, output_filename)
59
+ cv2.imwrite(output_filepath, color)
60
+ if __name__ == "__main__":
61
+ main()
data_utils/save_npz.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ This python script shows how we process the meshes and rigs from the input folders and save them in a compressed npz file.
16
+ """
17
+ import os
18
+ import numpy as np
19
+ import glob
20
+ import pickle
21
+ from concurrent.futures import ProcessPoolExecutor
22
+ import skimage.measure
23
+ import trimesh
24
+ import mesh2sdf.core
25
+ import scipy.sparse as sp
26
+
27
+ def read_obj_file(file_path):
28
+ vertices = []
29
+ faces = []
30
+ normals = [] # Added normals list
31
+
32
+ with open(file_path, 'r') as file:
33
+ for line in file:
34
+ if line.startswith('v '):
35
+ parts = line.split()[1:]
36
+ vertices.append([float(parts[0]), float(parts[1]), float(parts[2])])
37
+ elif line.startswith('vn '): # Added reading normals
38
+ parts = line.split()[1:]
39
+ normals.append([float(parts[0]), float(parts[1]), float(parts[2])])
40
+ elif line.startswith('f '):
41
+ parts = line.split()[1:]
42
+ # OBJ format is 1-based, we need 0-based for npz
43
+ face = [int(part.split('//')[0]) - 1 for part in parts]
44
+ faces.append(face)
45
+
46
+ return np.array(vertices), np.array(faces), np.array(normals)
47
+
48
+ def read_rig_file(file_path):
49
+ """
50
+ Read rig from txt file, our format is the same as RigNet:
51
+ joints joint_name x y z
52
+ root root_joint_name
53
+ skin vertex_idx joint_name weight joint_name weight ...
54
+ hier parent_joint_name child_joint_name
55
+ """
56
+ joints = []
57
+ bones = []
58
+ joint_names = []
59
+
60
+ joint_mapping = {}
61
+ joint_index = 0
62
+
63
+ skinning_data = {} # Dictionary to store vertex index -> [(joint_idx, weight), ...]
64
+
65
+ with open(file_path, 'r') as file:
66
+ lines = file.readlines()
67
+
68
+ for line in lines:
69
+ parts = line.split()
70
+ if line.startswith('joints'):
71
+ name = parts[1]
72
+ position = [float(parts[2]), float(parts[3]), float(parts[4])]
73
+ joints.append(position)
74
+ joint_names.append(name)
75
+ joint_mapping[name] = joint_index
76
+ joint_index += 1
77
+ elif line.startswith('hier'):
78
+ parent_joint = joint_mapping[parts[1]]
79
+ child_joint = joint_mapping[parts[2]]
80
+ bones.append([parent_joint, child_joint])
81
+ elif line.startswith('root'):
82
+ root = joint_mapping[parts[1]]
83
+ elif line.startswith('skin'):
84
+ vertex_idx = int(parts[1])
85
+
86
+ if vertex_idx not in skinning_data:
87
+ skinning_data[vertex_idx] = []
88
+
89
+ for i in range(2, len(parts), 2):
90
+ if i+1 < len(parts):
91
+ joint_name = parts[i]
92
+ weight = float(parts[i+1])
93
+
94
+ if joint_name in joint_mapping:
95
+ joint_idx = joint_mapping[joint_name]
96
+ skinning_data[vertex_idx].append((joint_idx, weight))
97
+
98
+ return np.array(joints), np.array(bones), root, joint_names, skinning_data
99
+
100
+ def convert_to_sparse_skinning(skinning_data, num_vertices, num_joints):
101
+ """Convert skinning weights to sparse matrix format."""
102
+ rows = []
103
+ cols = []
104
+ data = []
105
+
106
+ for vertex_idx, weights in skinning_data.items():
107
+ for joint_idx, weight in weights:
108
+ rows.append(vertex_idx)
109
+ cols.append(joint_idx)
110
+ data.append(weight)
111
+
112
+ sparse_skinning = sp.coo_matrix((data, (rows, cols)), shape=(num_vertices, num_joints))
113
+
114
+ # Return as tuple of arrays which can be serialized
115
+ return (sparse_skinning.data, sparse_skinning.row, sparse_skinning.col, sparse_skinning.shape)
116
+
117
+ def normalize_to_unit_cube(vertices, normals=None, scale_factor=1.0):
118
+ min_coords = vertices.min(axis=0)
119
+ max_coords = vertices.max(axis=0)
120
+ center = (max_coords + min_coords) / 2.0
121
+
122
+ vertices -= center
123
+ scale = 1.0 / np.abs(vertices).max() * scale_factor
124
+ vertices *= scale
125
+
126
+ if normals is not None:
127
+ # Normalize each normal vector to unit length
128
+ norms = np.linalg.norm(normals, axis=1, keepdims=True)
129
+ normals = normals / (norms+1e-8)
130
+
131
+ return vertices, normals, center, scale
132
+ else:
133
+ return vertices, center, scale
134
+
135
+ def normalize_vertices(vertices, scale=0.9):
136
+ bbmin, bbmax = vertices.min(0), vertices.max(0)
137
+ center = (bbmin + bbmax) * 0.5
138
+ scale = 2.0 * scale / (bbmax - bbmin).max()
139
+ vertices = (vertices - center) * scale
140
+ return vertices, center, scale
141
+
142
+ def export_to_watertight(normalized_mesh, octree_depth: int = 7):
143
+ """
144
+ Convert the non-watertight mesh to watertight.
145
+
146
+ Args:
147
+ input_path (str): normalized path
148
+ octree_depth (int):
149
+
150
+ Returns:
151
+ mesh(trimesh.Trimesh): watertight mesh
152
+
153
+ """
154
+ size = 2 ** octree_depth
155
+ level = 2 / size
156
+
157
+ scaled_vertices, to_orig_center, to_orig_scale = normalize_vertices(normalized_mesh.vertices)
158
+
159
+ sdf = mesh2sdf.core.compute(scaled_vertices, normalized_mesh.faces, size=size)
160
+
161
+ vertices, faces, normals, _ = skimage.measure.marching_cubes(np.abs(sdf), level)
162
+
163
+ # watertight mesh
164
+ vertices = vertices / size * 2 - 1 # -1 to 1
165
+ vertices = vertices / to_orig_scale + to_orig_center
166
+ mesh = trimesh.Trimesh(vertices, faces, normals=normals)
167
+
168
+ return mesh
169
+
170
+ def process_mesh_to_pc(mesh, marching_cubes = True, sample_num = 8192):
171
+ if marching_cubes:
172
+ mesh = export_to_watertight(mesh)
173
+ return_mesh = mesh
174
+ points, face_idx = mesh.sample(sample_num, return_index=True)
175
+ points, _, _ = normalize_to_unit_cube(points, scale_factor=0.9995)
176
+ normals = mesh.face_normals[face_idx]
177
+
178
+ pc_normal = np.concatenate([points, normals], axis=-1, dtype=np.float16)
179
+ return pc_normal, return_mesh
180
+
181
+ def process_single_file(args):
182
+ mesh_file, rig_file = args
183
+ mesh_name = os.path.basename(mesh_file).split('.')[0]
184
+ rig_name = os.path.basename(rig_file).split('.')[0]
185
+
186
+ if mesh_name != rig_name:
187
+ print(f"Skipping files {mesh_file} and {rig_file} because their names do not match.")
188
+ return None
189
+
190
+ vertices, faces, normals = read_obj_file(mesh_file)
191
+
192
+ joints, bones, root, joint_names, skinning_data = read_rig_file(rig_file)
193
+
194
+ # Normalize the mesh to the unit cube centered at the origin
195
+ vertices, normals, center, scale = normalize_to_unit_cube(vertices, normals, scale_factor=0.5)
196
+
197
+ # Apply the same transformation to joints
198
+ joints -= center
199
+ joints *= scale
200
+
201
+ # Create trimesh object for processing
202
+ mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
203
+
204
+ # Process into point cloud with normals
205
+ pc_normal, _ = process_mesh_to_pc(mesh)
206
+
207
+ # Convert skinning data to sparse format
208
+ sparse_skinning = convert_to_sparse_skinning(skinning_data, len(vertices), len(joints))
209
+
210
+ return {
211
+ 'vertices': vertices,
212
+ 'faces': faces,
213
+ 'normals': normals,
214
+ 'joints': joints,
215
+ 'bones': bones,
216
+ 'root_index': root,
217
+ 'uuid': mesh_name,
218
+ 'pc_w_norm': pc_normal,
219
+ 'joint_names': joint_names,
220
+ 'skinning_weights_value': sparse_skinning[0], # values
221
+ 'skinning_weights_rows': sparse_skinning[1], # row indices
222
+ 'skinning_weights_cols': sparse_skinning[2], # column indices
223
+ 'skinning_weights_shape': sparse_skinning[3] # shape of matrix
224
+ }
225
+
226
+ def process_files(mesh_folder, rig_folder, output_file, num_workers=8):
227
+ file_pairs = []
228
+
229
+ for root, _, files in os.walk(rig_folder):
230
+ for file in files:
231
+ if file.endswith('.txt'):
232
+ rig_file = os.path.join(root, file)
233
+ obj_base_name = os.path.splitext(file)[0]
234
+ mesh_file = os.path.join(mesh_folder, obj_base_name + '.obj')
235
+ if os.path.exists(mesh_file):
236
+ file_pairs.append((mesh_file, rig_file))
237
+ else:
238
+ print(f"Mesh file not found: {mesh_file}")
239
+
240
+ with ProcessPoolExecutor(max_workers=num_workers) as executor:
241
+ data_list = list(executor.map(process_single_file, file_pairs))
242
+
243
+ data_list = [data for data in data_list if data is not None]
244
+
245
+ np.savez_compressed(output_file, data_list, allow_pickle=True)
246
+
247
+ # Example usage
248
+ mesh_folder = 'meshes/'
249
+ rig_folder = 'rigs/'
250
+ output_file = 'results.npz'
251
+
252
+ process_files(mesh_folder, rig_folder, output_file)
data_utils/update_npz_rm_issue_data.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import numpy as np
15
+ import os
16
+
17
+ def filter_npz_by_filenames(npz_path, txt_path, output_path):
18
+
19
+ data_list = np.load(npz_path, allow_pickle=True)['arr_0']
20
+
21
+ with open(txt_path, 'r') as f:
22
+ exclude_filenames = set(line.strip() for line in f if line.strip())
23
+
24
+ # Filter the data list
25
+ filtered_data = []
26
+ excluded_count = 0
27
+
28
+ for item in data_list:
29
+
30
+ filename = item['uuid']
31
+
32
+ if filename in exclude_filenames:
33
+ excluded_count += 1
34
+ print(filename)
35
+ else:
36
+ filtered_data.append(item)
37
+
38
+ # Save the filtered data
39
+ kept_count = len(filtered_data)
40
+ total_count = len(data_list)
41
+ print(f"Original items: {total_count}")
42
+ print(f"Kept items: {kept_count}")
43
+ print(f"Removed items: {excluded_count}")
44
+
45
+ print(f"Saving filtered data")
46
+ np.savez_compressed(output_path, filtered_data, allow_pickle=True)
47
+
48
+ def main():
49
+ issue_list = "data_utils/issue_data_list.txt" # Change this to your text file path
50
+ npz_path_train = "articulation_xlv2_train.npz" # Change this to your NPZ file path
51
+ output_path_train = "articulation_xlv2_train_update.npz"
52
+ npz_path_test = "articulation_xlv2_test.npz" # Change this to your NPZ file path
53
+ output_path_test = "articulation_xlv2_test_update.npz"
54
+
55
+ filter_npz_by_filenames(npz_path_train, issue_list, output_path_train)
56
+ filter_npz_by_filenames(npz_path_test, issue_list, output_path_test)
57
+
58
+ if __name__ == "__main__":
59
+ main()
download_models.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ 自动下载MagicArticulate和Michelangelo所需的模型文件
3
+ 在HF Space启动时调用
4
+ """
5
+
6
+ import os
7
+ import logging
8
+ from pathlib import Path
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ def download_models():
13
+ """下载所有必需的模型文件"""
14
+ try:
15
+ from huggingface_hub import hf_hub_download
16
+
17
+ logger.info("🔄 开始下载模型文件...")
18
+
19
+ # 1. 下载Michelangelo模型
20
+ michelangelo_path = "third_party/Michelangelo/checkpoints/aligned_shape_latents/shapevae-256.ckpt"
21
+ if not os.path.exists(michelangelo_path):
22
+ logger.info("📥 下载Michelangelo模型...")
23
+ try:
24
+ file_path = hf_hub_download(
25
+ repo_id="Maikou/Michelangelo",
26
+ filename="checkpoints/aligned_shape_latents/shapevae-256.ckpt",
27
+ local_dir="third_party/Michelangelo"
28
+ )
29
+ logger.info(f"✅ Michelangelo模型下载完成: {file_path}")
30
+ except Exception as e:
31
+ logger.error(f"❌ Michelangelo模型下载失败: {e}")
32
+ else:
33
+ logger.info("✅ Michelangelo模型已存在")
34
+
35
+ # 2. 下载MagicArticulate层次模型
36
+ hier_path = "skeleton_ckpt/checkpoint_trainonv2_hier.pth"
37
+ if not os.path.exists(hier_path):
38
+ logger.info("📥 下载MagicArticulate层次模型...")
39
+ try:
40
+ os.makedirs("skeleton_ckpt", exist_ok=True)
41
+ file_path = hf_hub_download(
42
+ repo_id="Seed3D/MagicArticulate",
43
+ filename="skeleton_ckpt/checkpoint_trainonv2_hier.pth",
44
+ local_dir=""
45
+ )
46
+ logger.info(f"✅ MagicArticulate层次模型下载完成: {file_path}")
47
+ except Exception as e:
48
+ logger.error(f"❌ MagicArticulate层次模型下载失败: {e}")
49
+ else:
50
+ logger.info("✅ MagicArticulate层次模型已存在")
51
+
52
+ # 3. 下载MagicArticulate空间模型
53
+ spatial_path = "skeleton_ckpt/checkpoint_trainonv2_spatial.pth"
54
+ if not os.path.exists(spatial_path):
55
+ logger.info("📥 下载MagicArticulate空间模型...")
56
+ try:
57
+ os.makedirs("skeleton_ckpt", exist_ok=True)
58
+ file_path = hf_hub_download(
59
+ repo_id="Seed3D/MagicArticulate",
60
+ filename="skeleton_ckpt/checkpoint_trainonv2_spatial.pth",
61
+ local_dir=""
62
+ )
63
+ logger.info(f"✅ MagicArticulate空间模型下载完成: {file_path}")
64
+ except Exception as e:
65
+ logger.error(f"❌ MagicArticulate空间模型下载失败: {e}")
66
+ else:
67
+ logger.info("✅ MagicArticulate空间模型已存在")
68
+
69
+ logger.info("🎯 模型下载过程完成")
70
+ return True
71
+
72
+ except ImportError:
73
+ logger.error("❌ huggingface_hub未安装,无法下载模型")
74
+ return False
75
+ except Exception as e:
76
+ logger.error(f"💥 模型下载过程出错: {e}")
77
+ return False
78
+
79
+ if __name__ == "__main__":
80
+ download_models()
magic_articulate_plus/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MagicArticulate-Plus Integration
3
+ 集成用户上传支持和增强功能
4
+ """
5
+
6
+ from .articulate_api import (
7
+ MagicArticulateAPI,
8
+ ModelValidator,
9
+ ModelPreprocessor,
10
+ UserSessionManager,
11
+ process_model_file
12
+ )
13
+
14
+ __all__ = [
15
+ 'MagicArticulateAPI',
16
+ 'ModelValidator',
17
+ 'ModelPreprocessor',
18
+ 'UserSessionManager',
19
+ 'process_model_file'
20
+ ]
magic_articulate_plus/articulate_api.py ADDED
@@ -0,0 +1,899 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ MagicArticulate API - Enhanced Version
4
+ 支持用户上传的3D模型文件和多用户结果管理
5
+ """
6
+
7
+ import os
8
+ import sys
9
+ import uuid
10
+ import json
11
+ import time
12
+ import shutil
13
+ import logging
14
+ import tempfile
15
+ import traceback
16
+ from pathlib import Path
17
+ from datetime import datetime
18
+ from typing import Dict, Any, List, Optional, Tuple
19
+
20
+ import torch
21
+ import trimesh
22
+ import numpy as np
23
+ from tqdm import tqdm
24
+
25
+ from accelerate import Accelerator
26
+ from accelerate.utils import set_seed, DistributedDataParallelKwargs
27
+
28
+ # 添加父目录到路径以正确导入模块
29
+ parent_dir = str(Path(__file__).parent.parent)
30
+ if parent_dir not in sys.path:
31
+ sys.path.insert(0, parent_dir)
32
+
33
+ print(f"🔍 ARTICULATE_API DEBUG: Current working directory: {os.getcwd()}")
34
+ print(f"🔍 ARTICULATE_API DEBUG: Script file path: {__file__}")
35
+ print(f"🔍 ARTICULATE_API DEBUG: Parent directory: {parent_dir}")
36
+ print(f"🔍 ARTICULATE_API DEBUG: sys.path includes:")
37
+ for i, path in enumerate(sys.path[:10]): # 只显示前10个避免太长
38
+ print(f" {i}: {path}")
39
+
40
+ # 检查目录结构
41
+ utils_path = os.path.join(parent_dir, 'utils')
42
+ skeleton_path = os.path.join(parent_dir, 'skeleton_models')
43
+ print(f"🔍 ARTICULATE_API DEBUG: utils path exists: {os.path.exists(utils_path)}")
44
+ print(f"🔍 ARTICULATE_API DEBUG: skeleton_models path exists: {os.path.exists(skeleton_path)}")
45
+
46
+ if os.path.exists(utils_path):
47
+ print(f"🔍 ARTICULATE_API DEBUG: utils contents: {os.listdir(utils_path)}")
48
+
49
+ from skeleton_models.skeletongen import SkeletonGPT
50
+ from utils.mesh_to_pc import MeshProcessor
51
+ from utils.save_utils import (
52
+ save_mesh, pred_joints_and_bones, save_skeleton_to_txt,
53
+ save_args, remove_duplicate_joints, save_skeleton_obj,
54
+ render_mesh_with_skeleton
55
+ )
56
+
57
+ # 配置日志
58
+ logging.basicConfig(
59
+ level=logging.INFO,
60
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
61
+ )
62
+ logger = logging.getLogger(__name__)
63
+
64
+ class ModelValidator:
65
+ """3D模型验证和修复类"""
66
+
67
+ SUPPORTED_FORMATS = {'.obj', '.glb', '.gltf', '.ply', '.stl', '.fbx', '.dae'}
68
+ MAX_VERTICES = 100000 # 最大顶点数
69
+ MIN_VERTICES = 100 # 最小顶点数
70
+ MAX_FILE_SIZE_MB = 100 # 最大文件大小
71
+
72
+ @staticmethod
73
+ def validate_file(file_path: str) -> Tuple[bool, str, Dict[str, Any]]:
74
+ """
75
+ 验证3D模型文件
76
+
77
+ Returns:
78
+ (is_valid, error_message, model_info)
79
+ """
80
+ try:
81
+ # 检查文件是否存在
82
+ if not os.path.exists(file_path):
83
+ return False, "文件不存在", {}
84
+
85
+ # 检查文件大小
86
+ file_size_mb = os.path.getsize(file_path) / (1024 * 1024)
87
+ if file_size_mb > ModelValidator.MAX_FILE_SIZE_MB:
88
+ return False, f"文件过大: {file_size_mb:.1f}MB > {ModelValidator.MAX_FILE_SIZE_MB}MB", {}
89
+
90
+ # 检查文件格式
91
+ file_ext = Path(file_path).suffix.lower()
92
+ if file_ext not in ModelValidator.SUPPORTED_FORMATS:
93
+ return False, f"不支持的文件格式: {file_ext}", {}
94
+
95
+ # 尝试加载模型
96
+ mesh = trimesh.load(file_path, force='mesh')
97
+
98
+ # 检查是否为有效网格
99
+ if not hasattr(mesh, 'vertices') or not hasattr(mesh, 'faces'):
100
+ return False, "文件不包含有效的网格数据", {}
101
+
102
+ # 检查顶点数量
103
+ vertex_count = len(mesh.vertices)
104
+ if vertex_count < ModelValidator.MIN_VERTICES:
105
+ return False, f"顶点数量过少: {vertex_count} < {ModelValidator.MIN_VERTICES}", {}
106
+ if vertex_count > ModelValidator.MAX_VERTICES:
107
+ return False, f"顶点数量过多: {vertex_count} > {ModelValidator.MAX_VERTICES}", {}
108
+
109
+ # 收集模型信息
110
+ model_info = {
111
+ 'file_name': os.path.basename(file_path),
112
+ 'file_size_mb': file_size_mb,
113
+ 'format': file_ext,
114
+ 'vertex_count': vertex_count,
115
+ 'face_count': len(mesh.faces) if hasattr(mesh, 'faces') else 0,
116
+ 'bounds': mesh.bounds.tolist() if hasattr(mesh, 'bounds') else None,
117
+ 'is_watertight': mesh.is_watertight if hasattr(mesh, 'is_watertight') else False,
118
+ 'volume': float(mesh.volume) if hasattr(mesh, 'volume') else 0.0,
119
+ 'area': float(mesh.area) if hasattr(mesh, 'area') else 0.0,
120
+ }
121
+
122
+ return True, "验证通过", model_info
123
+
124
+ except Exception as e:
125
+ return False, f"模型验证失败: {str(e)}", {}
126
+
127
+ @staticmethod
128
+ def auto_repair_mesh(mesh: trimesh.Trimesh) -> Tuple[trimesh.Trimesh, List[str]]:
129
+ """
130
+ 自动修复网格问题
131
+
132
+ Returns:
133
+ (repaired_mesh, repair_log)
134
+ """
135
+ repair_log = []
136
+
137
+ try:
138
+ # 移除重复顶点
139
+ if mesh.is_volume:
140
+ original_vertices = len(mesh.vertices)
141
+ mesh.merge_vertices()
142
+ if len(mesh.vertices) < original_vertices:
143
+ repair_log.append(f"移除了 {original_vertices - len(mesh.vertices)} 个重复顶点")
144
+
145
+ # 修复法向量
146
+ if not hasattr(mesh, 'vertex_normals') or mesh.vertex_normals is None:
147
+ mesh.fix_normals()
148
+ repair_log.append("修复了顶点法向量")
149
+
150
+ # 移除退化面
151
+ original_faces = len(mesh.faces)
152
+ mesh.remove_degenerate_faces()
153
+ if len(mesh.faces) < original_faces:
154
+ repair_log.append(f"移除了 {original_faces - len(mesh.faces)} 个退化面")
155
+
156
+ # 填充孔洞(如果需要)
157
+ if not mesh.is_watertight and hasattr(mesh, 'fill_holes'):
158
+ try:
159
+ mesh.fill_holes()
160
+ repair_log.append("填充了网格孔洞")
161
+ except:
162
+ repair_log.append("尝试填充孔洞失败,但继续处理")
163
+
164
+ return mesh, repair_log
165
+
166
+ except Exception as e:
167
+ logger.warning(f"网格修复过程中出现错误: {str(e)}")
168
+ return mesh, repair_log + [f"修复过程出错: {str(e)}"]
169
+
170
+ class ModelPreprocessor:
171
+ """模型预处理类"""
172
+
173
+ @staticmethod
174
+ def convert_format(input_path: str, output_format: str = '.obj') -> str:
175
+ """
176
+ 转换模型格式
177
+
178
+ Args:
179
+ input_path: 输入文件路径
180
+ output_format: 输出格式 (默认为.obj)
181
+
182
+ Returns:
183
+ 输出文件路径
184
+ """
185
+ try:
186
+ mesh = trimesh.load(input_path, force='mesh')
187
+
188
+ # 生成输出路径
189
+ base_name = os.path.splitext(os.path.basename(input_path))[0]
190
+ output_path = os.path.join(
191
+ os.path.dirname(input_path),
192
+ f"{base_name}_converted{output_format}"
193
+ )
194
+
195
+ # 导出为指定格式
196
+ mesh.export(output_path)
197
+
198
+ logger.info(f"格式转换完成: {input_path} -> {output_path}")
199
+ return output_path
200
+
201
+ except Exception as e:
202
+ logger.error(f"格式转换失败: {str(e)}")
203
+ raise
204
+
205
+ @staticmethod
206
+ def simplify_mesh(mesh: trimesh.Trimesh, target_faces: int = 5000) -> trimesh.Trimesh:
207
+ """
208
+ 简化网格
209
+
210
+ Args:
211
+ mesh: 输入网格
212
+ target_faces: 目标面数
213
+
214
+ Returns:
215
+ 简化后的网格
216
+ """
217
+ try:
218
+ if len(mesh.faces) <= target_faces:
219
+ return mesh
220
+
221
+ # 使用quadric decimation进行简化
222
+ simplified = mesh.simplify_quadratic_decimation(target_faces)
223
+
224
+ logger.info(f"网格简化: {len(mesh.faces)} -> {len(simplified.faces)} 面")
225
+ return simplified
226
+
227
+ except Exception as e:
228
+ logger.warning(f"网格简化失败: {str(e)}, 使用原始网格")
229
+ return mesh
230
+
231
+ @staticmethod
232
+ def normalize_mesh(mesh: trimesh.Trimesh, scale_factor: float = 0.95) -> Tuple[trimesh.Trimesh, Dict[str, Any]]:
233
+ """
234
+ 标准化网格到标准坐标空间
235
+
236
+ Args:
237
+ mesh: 输入网格
238
+ scale_factor: 缩放因子
239
+
240
+ Returns:
241
+ (normalized_mesh, transform_info)
242
+ """
243
+ try:
244
+ # 计算边界框
245
+ bounds = mesh.bounds
246
+ center = (bounds[0] + bounds[1]) / 2
247
+ size = bounds[1] - bounds[0]
248
+ max_size = size.max()
249
+
250
+ # 计算变换参数
251
+ scale = (2.0 * scale_factor) / max_size
252
+ translation = -center
253
+
254
+ # 应用变换
255
+ vertices = mesh.vertices.copy()
256
+ vertices = (vertices + translation) * scale
257
+
258
+ # 创建新网格
259
+ normalized_mesh = trimesh.Trimesh(vertices=vertices, faces=mesh.faces)
260
+
261
+ # 记录变换信息
262
+ transform_info = {
263
+ 'original_center': center.tolist(),
264
+ 'original_size': size.tolist(),
265
+ 'scale': float(scale),
266
+ 'translation': translation.tolist()
267
+ }
268
+
269
+ logger.info(f"网格标准化完成: scale={scale:.4f}")
270
+ return normalized_mesh, transform_info
271
+
272
+ except Exception as e:
273
+ logger.error(f"网格标准化失败: {str(e)}")
274
+ raise
275
+
276
+ class UserSessionManager:
277
+ """用户会话管理类"""
278
+
279
+ def __init__(self, base_dir: str = "user_sessions"):
280
+ self.base_dir = Path(base_dir)
281
+ self.base_dir.mkdir(exist_ok=True)
282
+
283
+ # 元数据文件
284
+ self.metadata_file = self.base_dir / "sessions_metadata.json"
285
+ self.load_metadata()
286
+
287
+ def load_metadata(self):
288
+ """加载会话元数据"""
289
+ if self.metadata_file.exists():
290
+ with open(self.metadata_file, 'r', encoding='utf-8') as f:
291
+ self.sessions = json.load(f)
292
+ else:
293
+ self.sessions = {}
294
+
295
+ def save_metadata(self):
296
+ """保存会话元数据"""
297
+ with open(self.metadata_file, 'w', encoding='utf-8') as f:
298
+ json.dump(self.sessions, f, indent=2, ensure_ascii=False)
299
+
300
+ def create_session(self, user_id: Optional[str] = None) -> str:
301
+ """
302
+ 创建新的用户会话
303
+
304
+ Args:
305
+ user_id: 用户ID(可选)
306
+
307
+ Returns:
308
+ session_id
309
+ """
310
+ session_id = str(uuid.uuid4())
311
+ session_dir = self.base_dir / session_id
312
+ session_dir.mkdir(exist_ok=True)
313
+
314
+ # 创建子目录
315
+ (session_dir / "uploads").mkdir(exist_ok=True)
316
+ (session_dir / "outputs").mkdir(exist_ok=True)
317
+ (session_dir / "temp").mkdir(exist_ok=True)
318
+
319
+ # 记录会话信息
320
+ self.sessions[session_id] = {
321
+ 'user_id': user_id,
322
+ 'created_at': datetime.now().isoformat(),
323
+ 'status': 'active',
324
+ 'processed_models': [],
325
+ 'last_activity': datetime.now().isoformat()
326
+ }
327
+
328
+ self.save_metadata()
329
+ logger.info(f"创建新会话: {session_id}")
330
+ return session_id
331
+
332
+ def get_session_dir(self, session_id: str) -> Path:
333
+ """获取会话目录"""
334
+ session_dir = self.base_dir / session_id
335
+ if not session_dir.exists():
336
+ raise ValueError(f"会话不存在: {session_id}")
337
+ return session_dir
338
+
339
+ def update_activity(self, session_id: str):
340
+ """更新会话活动时间"""
341
+ if session_id in self.sessions:
342
+ self.sessions[session_id]['last_activity'] = datetime.now().isoformat()
343
+ self.save_metadata()
344
+
345
+ def add_processed_model(self, session_id: str, model_info: Dict[str, Any]):
346
+ """添加已处理模型记录"""
347
+ if session_id in self.sessions:
348
+ self.sessions[session_id]['processed_models'].append(model_info)
349
+ self.update_activity(session_id)
350
+
351
+ def cleanup_old_sessions(self, max_age_days: int = 7):
352
+ """清理旧会话"""
353
+ cutoff_time = datetime.now().timestamp() - (max_age_days * 24 * 3600)
354
+
355
+ sessions_to_remove = []
356
+ for session_id, session_info in self.sessions.items():
357
+ last_activity = datetime.fromisoformat(session_info['last_activity'])
358
+ if last_activity.timestamp() < cutoff_time:
359
+ sessions_to_remove.append(session_id)
360
+
361
+ for session_id in sessions_to_remove:
362
+ try:
363
+ session_dir = self.base_dir / session_id
364
+ if session_dir.exists():
365
+ shutil.rmtree(session_dir)
366
+ del self.sessions[session_id]
367
+ logger.info(f"清理旧会话: {session_id}")
368
+ except Exception as e:
369
+ logger.error(f"清理会话失败 {session_id}: {str(e)}")
370
+
371
+ if sessions_to_remove:
372
+ self.save_metadata()
373
+
374
+ class MagicArticulateAPI:
375
+ """MagicArticulate API主类"""
376
+
377
+ def __init__(self,
378
+ model_weights_path: Optional[str] = None,
379
+ device: str = "auto",
380
+ session_base_dir: str = "user_sessions"):
381
+
382
+ self.device = self._setup_device(device)
383
+ self.model = None
384
+ self.accelerator = None
385
+ self.model_weights_path = model_weights_path
386
+
387
+ # 初始化会话管理器
388
+ self.session_manager = UserSessionManager(session_base_dir)
389
+
390
+ # 默认处理参数 - 匹配原始demo.py设置
391
+ self.default_args = {
392
+ 'input_pc_num': 8192,
393
+ 'num_beams': 1,
394
+ 'n_discrete_size': 128,
395
+ 'n_max_bones': 100,
396
+ 'pad_id': -1,
397
+ 'precision': 'fp16',
398
+ 'batchsize_per_gpu': 1,
399
+ 'apply_marching_cubes': False,
400
+ 'octree_depth': 7,
401
+ 'hier_order': False, # 匹配demo.py默认值
402
+ 'save_render': False,
403
+ 'llm': 'facebook/opt-350m' # 匹配demo.py默认值
404
+ }
405
+
406
+ self.initialized = False
407
+ logger.info("MagicArticulate API 初始化完成")
408
+
409
+ def _setup_device(self, device: str) -> torch.device:
410
+ """设置计算设备"""
411
+ if device == "auto":
412
+ if torch.cuda.is_available():
413
+ device = "cuda"
414
+ logger.info(f"使用GPU: {torch.cuda.get_device_name()}")
415
+ else:
416
+ device = "cpu"
417
+ logger.info("使用CPU")
418
+
419
+ return torch.device(device)
420
+
421
+ def initialize_model(self) -> bool:
422
+ """初始化模型"""
423
+ try:
424
+ if self.initialized:
425
+ return True
426
+
427
+ logger.info("正在初始化MagicArticulate模型...")
428
+
429
+ # 设置加速器
430
+ kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
431
+ self.accelerator = Accelerator(
432
+ kwargs_handlers=[kwargs],
433
+ mixed_precision=self.default_args['precision'],
434
+ )
435
+
436
+ # 创建模型
437
+ args = self._create_args_object()
438
+ self.model = SkeletonGPT(args)
439
+
440
+ if self.device.type == "cuda":
441
+ self.model = self.model.cuda()
442
+
443
+ # 加载预训练权重
444
+ if self.model_weights_path and os.path.exists(self.model_weights_path):
445
+ logger.info(f"加载模型权重: {self.model_weights_path}")
446
+ pkg = torch.load(self.model_weights_path, map_location=self.device)
447
+ self.model.load_state_dict(pkg["model"])
448
+ else:
449
+ error_msg = "预训练权重必须提供!当前使用随机初始化,结果将不准确。"
450
+ logger.error(error_msg)
451
+ # 不抛出错误,但给出强烈警告
452
+ logger.error("⚠️ WARNING: 没有预训练权重,生成的骨骼结构将不准确!")
453
+
454
+ self.model.eval()
455
+ set_seed(0)
456
+
457
+ # 准备模型
458
+ if self.accelerator:
459
+ self.model = self.accelerator.prepare(self.model)
460
+
461
+ self.initialized = True
462
+ logger.info("✅ 模型初始化成功")
463
+ return True
464
+
465
+ except Exception as e:
466
+ logger.error(f"❌ 模型初始化失败: {str(e)}")
467
+ logger.error(traceback.format_exc())
468
+ return False
469
+
470
+ def process_uploaded_model(self,
471
+ file_path: str,
472
+ session_id: Optional[str] = None,
473
+ user_prompt: str = "",
474
+ processing_options: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
475
+ """
476
+ 处理用户上传的3D模型
477
+
478
+ Args:
479
+ file_path: 模型文件路径
480
+ session_id: 会话ID(可选)
481
+ user_prompt: 用户提示词
482
+ processing_options: 处理选项
483
+
484
+ Returns:
485
+ 处理结果字典
486
+ """
487
+ start_time = time.time()
488
+
489
+ try:
490
+ # 创建会话(如果未提供)
491
+ if not session_id:
492
+ session_id = self.session_manager.create_session()
493
+
494
+ logger.info(f"开始处理模型: {file_path}, 会话: {session_id}")
495
+
496
+ # 步骤1: 验证模型文件
497
+ is_valid, error_msg, model_info = ModelValidator.validate_file(file_path)
498
+ if not is_valid:
499
+ return self._create_error_result(error_msg, session_id, start_time)
500
+
501
+ logger.info(f"模型验证通过: {model_info}")
502
+
503
+ # 步骤2: 复制文件到会话目录
504
+ session_dir = self.session_manager.get_session_dir(session_id)
505
+ uploaded_file = session_dir / "uploads" / os.path.basename(file_path)
506
+ shutil.copy2(file_path, uploaded_file)
507
+
508
+ # 步骤3: 预处理模型
509
+ processed_mesh, preprocessing_log = self._preprocess_model(
510
+ str(uploaded_file),
511
+ processing_options or {}
512
+ )
513
+
514
+ # 步骤4: 生成骨骼
515
+ if not self.initialized:
516
+ if not self.initialize_model():
517
+ return self._create_error_result("模型初始化失败", session_id, start_time)
518
+
519
+ skeleton_result = self._generate_skeleton(
520
+ processed_mesh,
521
+ model_info['file_name'],
522
+ user_prompt
523
+ )
524
+
525
+ # 步骤5: 保存结果
526
+ output_files = self._save_results(
527
+ skeleton_result,
528
+ processed_mesh,
529
+ model_info,
530
+ session_dir,
531
+ user_prompt
532
+ )
533
+
534
+ # 步骤6: 记录处理结果
535
+ processing_time = time.time() - start_time
536
+ result = {
537
+ 'success': True,
538
+ 'session_id': session_id,
539
+ 'processing_time': processing_time,
540
+ 'model_info': model_info,
541
+ 'preprocessing_log': preprocessing_log,
542
+ 'skeleton_data': skeleton_result,
543
+ 'output_files': output_files,
544
+ 'user_prompt': user_prompt,
545
+ 'timestamp': datetime.now().isoformat()
546
+ }
547
+
548
+ # 更新会话记录
549
+ self.session_manager.add_processed_model(session_id, {
550
+ 'file_name': model_info['file_name'],
551
+ 'processing_time': processing_time,
552
+ 'timestamp': datetime.now().isoformat(),
553
+ 'success': True
554
+ })
555
+
556
+ logger.info(f"✅ 模型处理完成,耗时: {processing_time:.2f}秒")
557
+ return result
558
+
559
+ except Exception as e:
560
+ processing_time = time.time() - start_time
561
+ error_msg = f"处理过程中发生错误: {str(e)}"
562
+ logger.error(f"❌ {error_msg}")
563
+ logger.error(traceback.format_exc())
564
+
565
+ return self._create_error_result(error_msg, session_id, start_time)
566
+
567
+ def _preprocess_model(self, file_path: str, options: Dict[str, Any]) -> Tuple[trimesh.Trimesh, List[str]]:
568
+ """预处理模型"""
569
+ preprocessing_log = []
570
+
571
+ try:
572
+ # 加载模型
573
+ mesh = trimesh.load(file_path, force='mesh')
574
+ preprocessing_log.append(f"加载模型: {len(mesh.vertices)} 顶点, {len(mesh.faces)} 面")
575
+
576
+ # 自动修复
577
+ if options.get('auto_repair', True):
578
+ mesh, repair_log = ModelValidator.auto_repair_mesh(mesh)
579
+ preprocessing_log.extend(repair_log)
580
+
581
+ # 简化网格(如果需要)
582
+ target_faces = options.get('target_faces', 10000)
583
+ if len(mesh.faces) > target_faces:
584
+ mesh = ModelPreprocessor.simplify_mesh(mesh, target_faces)
585
+ preprocessing_log.append(f"简化网格到 {len(mesh.faces)} 面")
586
+
587
+ # 标准化网格
588
+ mesh, transform_info = ModelPreprocessor.normalize_mesh(mesh)
589
+ preprocessing_log.append(f"标准化网格: scale={transform_info['scale']:.4f}")
590
+
591
+ return mesh, preprocessing_log
592
+
593
+ except Exception as e:
594
+ error_msg = f"预处理失败: {str(e)}"
595
+ logger.error(error_msg)
596
+ raise RuntimeError(error_msg)
597
+
598
+ def _generate_skeleton(self, mesh: trimesh.Trimesh, file_name: str, user_prompt: str) -> Dict[str, Any]:
599
+ """生成骨骼结构"""
600
+ try:
601
+ # 转换网格为点云
602
+ points_per_mesh = self.default_args['input_pc_num']
603
+ apply_marching_cubes = self.default_args['apply_marching_cubes']
604
+ octree_depth = self.default_args['octree_depth']
605
+
606
+ point_clouds = MeshProcessor.convert_meshes_to_point_clouds(
607
+ [mesh],
608
+ points_per_mesh,
609
+ apply_marching_cubes,
610
+ octree_depth
611
+ )
612
+
613
+ pc_data = point_clouds[0]
614
+
615
+ # 按照原始demo进行标准化处理
616
+ pc_coor = pc_data[:, :3]
617
+ normals = pc_data[:, 3:]
618
+ bounds = np.array([pc_coor.min(axis=0), pc_coor.max(axis=0)])
619
+
620
+ # 存储变换信息以便后续去标准化
621
+ trans = (bounds[0] + bounds[1])[None, :] / 2
622
+ scale = ((bounds[1] - bounds[0]).max() + 1e-5)
623
+
624
+ # 标准化坐标 - 与原始demo完全一致
625
+ pc_coor = pc_coor - (bounds[0] + bounds[1])[None, :] / 2
626
+ pc_coor = pc_coor / np.abs(pc_coor).max() * 0.9995
627
+
628
+ # 组合坐标和法向量
629
+ pc_coor = pc_coor.astype(np.float32)
630
+ normals = normals.astype(np.float32)
631
+ pc_normal_data = np.concatenate([pc_coor, normals], axis=-1, dtype=np.float16)
632
+
633
+ # 准备输入数据
634
+ pc_normal = torch.from_numpy(pc_normal_data).unsqueeze(0)
635
+ if self.device.type == "cuda":
636
+ pc_normal = pc_normal.cuda()
637
+
638
+ # 获取mesh的变换信息
639
+ mesh_bounds = np.array([mesh.vertices.min(axis=0), mesh.vertices.max(axis=0)])
640
+ mesh_trans = (mesh_bounds[0] + mesh_bounds[1])[None, :] / 2
641
+ mesh_scale = ((mesh_bounds[1] - mesh_bounds[0]).max() + 1e-5)
642
+
643
+ batch_data = {
644
+ 'pc_normal': pc_normal,
645
+ 'file_name': [os.path.splitext(file_name)[0]],
646
+ 'trans': torch.from_numpy(mesh_trans).unsqueeze(0),
647
+ 'scale': torch.tensor(mesh_scale).unsqueeze(0),
648
+ 'vertices': torch.from_numpy(mesh.vertices).unsqueeze(0),
649
+ 'faces': torch.from_numpy(mesh.faces).unsqueeze(0)
650
+ }
651
+
652
+ # 生成骨骼
653
+ with torch.no_grad():
654
+ if self.accelerator:
655
+ with self.accelerator.autocast():
656
+ pred_bone_coords = self.model.generate(batch_data)
657
+ else:
658
+ pred_bone_coords = self.model.generate(batch_data)
659
+
660
+ # 处理��出 - 完全按照原始demo的流程
661
+ trans = batch_data['trans'][0].cpu().numpy()
662
+ scale = batch_data['scale'][0].cpu().numpy()
663
+ vertices = batch_data['vertices'][0].cpu().numpy()
664
+ faces = batch_data['faces'][0].cpu().numpy()
665
+
666
+ skeleton = pred_bone_coords[0].cpu().numpy().squeeze()
667
+ pred_joints, pred_bones = pred_joints_and_bones(skeleton)
668
+
669
+ # 去重处理
670
+ if self.default_args['hier_order']:
671
+ pred_joints, pred_bones, pred_root_index = remove_duplicate_joints(
672
+ pred_joints, pred_bones, root_index=pred_bones[0][0]
673
+ )
674
+ else:
675
+ pred_joints, pred_bones = remove_duplicate_joints(pred_joints, pred_bones)
676
+ pred_root_index = 0
677
+
678
+ # 重要:去标准化骨骼关节到原始模型坐标系
679
+ pred_joints_denorm = pred_joints * scale + trans
680
+
681
+ return {
682
+ 'joints': pred_joints_denorm.tolist(), # 使用去标准化后的关节
683
+ 'joints_normalized': pred_joints.tolist(), # 保留标准化的关节用于可视化
684
+ 'bones': pred_bones,
685
+ 'root_index': pred_root_index,
686
+ 'joint_count': len(pred_joints),
687
+ 'bone_count': len(pred_bones),
688
+ 'raw_skeleton': skeleton.tolist(),
689
+ 'user_prompt': user_prompt,
690
+ 'transform_info': {
691
+ 'trans': trans.tolist(),
692
+ 'scale': float(scale)
693
+ }
694
+ }
695
+
696
+ except Exception as e:
697
+ error_msg = f"骨骼生成失败: {str(e)}"
698
+ logger.error(error_msg)
699
+ raise RuntimeError(error_msg)
700
+
701
+ def _save_results(self,
702
+ skeleton_result: Dict[str, Any],
703
+ mesh: trimesh.Trimesh,
704
+ model_info: Dict[str, Any],
705
+ session_dir: Path,
706
+ user_prompt: str) -> Dict[str, str]:
707
+ """保存处理结果"""
708
+ try:
709
+ output_dir = session_dir / "outputs"
710
+ base_name = os.path.splitext(model_info['file_name'])[0]
711
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
712
+
713
+ output_files = {}
714
+
715
+ # 移除JSON格式输出以避免序列化问题
716
+
717
+ # 保存骨骼OBJ - 使用去标准化后的关节
718
+ obj_file = output_dir / f"{base_name}_{timestamp}_skeleton.obj"
719
+ save_skeleton_obj(
720
+ np.array(skeleton_result['joints']),
721
+ skeleton_result['bones'],
722
+ str(obj_file),
723
+ skeleton_result.get('root_index', 0),
724
+ use_cone=self.default_args['hier_order']
725
+ )
726
+ output_files['skeleton_obj'] = str(obj_file)
727
+
728
+ # 保存骨骼TXT
729
+ txt_file = output_dir / f"{base_name}_{timestamp}_rig.txt"
730
+ save_skeleton_to_txt(
731
+ np.array(skeleton_result['joints']),
732
+ skeleton_result['bones'],
733
+ skeleton_result.get('root_index', 0),
734
+ self.default_args['hier_order'],
735
+ mesh.vertices,
736
+ str(txt_file)
737
+ )
738
+ output_files['skeleton_txt'] = str(txt_file)
739
+
740
+ # 保存处理后的网格
741
+ mesh_file = output_dir / f"{base_name}_{timestamp}_processed.obj"
742
+ mesh.export(str(mesh_file))
743
+ output_files['processed_mesh'] = str(mesh_file)
744
+
745
+ # 保存处理报告(文本格式)
746
+ report_file = output_dir / f"{base_name}_{timestamp}_report.txt"
747
+ report_content = f"""MagicArticulate Processing Report
748
+ =====================================
749
+
750
+ File: {model_info['file_name']}
751
+ Processing Time: {datetime.now().isoformat()}
752
+ User Prompt: {user_prompt}
753
+
754
+ Model Information:
755
+ - Vertices: {model_info.get('vertex_count', 'N/A')}
756
+ - Faces: {model_info.get('face_count', 'N/A')}
757
+ - File Size: {model_info.get('file_size_mb', 'N/A')} MB
758
+ - Format: {model_info.get('format', 'N/A')}
759
+
760
+ Skeleton Results:
761
+ - Joints: {skeleton_result.get('joint_count', 'N/A')}
762
+ - Bones: {skeleton_result.get('bone_count', 'N/A')}
763
+ - Root Index: {skeleton_result.get('root_index', 'N/A')}
764
+
765
+ Generated Files:
766
+ - Skeleton OBJ: {base_name}_{timestamp}_skeleton.obj
767
+ - Skeleton TXT: {base_name}_{timestamp}_rig.txt
768
+ - Processed Mesh: {base_name}_{timestamp}_processed.obj
769
+ """
770
+
771
+ with open(report_file, 'w', encoding='utf-8') as f:
772
+ f.write(report_content)
773
+ output_files['report'] = str(report_file)
774
+
775
+ logger.info(f"结果保存完成: {len(output_files)} 个文件")
776
+ return output_files
777
+
778
+ except Exception as e:
779
+ error_msg = f"保存结果失败: {str(e)}"
780
+ logger.error(error_msg)
781
+ raise RuntimeError(error_msg)
782
+
783
+ def _create_error_result(self, error_message: str, session_id: str, start_time: float) -> Dict[str, Any]:
784
+ """创建错误结果"""
785
+ processing_time = time.time() - start_time
786
+
787
+ return {
788
+ 'success': False,
789
+ 'session_id': session_id,
790
+ 'error': error_message,
791
+ 'processing_time': processing_time,
792
+ 'timestamp': datetime.now().isoformat()
793
+ }
794
+
795
+ def _make_json_serializable(self, obj):
796
+ """将对象转换为JSON可序列化格式"""
797
+ if isinstance(obj, np.ndarray):
798
+ return obj.tolist()
799
+ elif isinstance(obj, np.integer):
800
+ return int(obj)
801
+ elif isinstance(obj, np.floating):
802
+ return float(obj)
803
+ elif isinstance(obj, dict):
804
+ return {key: self._make_json_serializable(value) for key, value in obj.items()}
805
+ elif isinstance(obj, list):
806
+ return [self._make_json_serializable(item) for item in obj]
807
+ else:
808
+ return obj
809
+
810
+ def _create_args_object(self):
811
+ """创建参数对象"""
812
+ class Args:
813
+ def __init__(self, **kwargs):
814
+ for key, value in kwargs.items():
815
+ setattr(self, key, value)
816
+
817
+ return Args(**self.default_args)
818
+
819
+ def get_session_info(self, session_id: str) -> Dict[str, Any]:
820
+ """获取会话信息"""
821
+ if session_id not in self.session_manager.sessions:
822
+ raise ValueError(f"会话不存在: {session_id}")
823
+
824
+ return self.session_manager.sessions[session_id].copy()
825
+
826
+ def list_user_sessions(self, user_id: Optional[str] = None) -> List[Dict[str, Any]]:
827
+ """列出用户会话"""
828
+ sessions = []
829
+ for session_id, session_info in self.session_manager.sessions.items():
830
+ if user_id is None or session_info.get('user_id') == user_id:
831
+ sessions.append({
832
+ 'session_id': session_id,
833
+ **session_info
834
+ })
835
+
836
+ return sorted(sessions, key=lambda x: x['created_at'], reverse=True)
837
+
838
+ def cleanup_sessions(self, max_age_days: int = 7):
839
+ """清理旧会话"""
840
+ self.session_manager.cleanup_old_sessions(max_age_days)
841
+
842
+ # 简化的使用接口
843
+ def process_model_file(file_path: str,
844
+ user_prompt: str = "",
845
+ model_weights_path: Optional[str] = None,
846
+ output_dir: Optional[str] = None) -> Dict[str, Any]:
847
+ """
848
+ 简化的模型处理接口
849
+
850
+ Args:
851
+ file_path: 模型文件路径
852
+ user_prompt: 用户提示词
853
+ model_weights_path: 模型权重路径
854
+ output_dir: 输出目录
855
+
856
+ Returns:
857
+ 处理结果
858
+ """
859
+ api = MagicArticulateAPI(
860
+ model_weights_path=model_weights_path,
861
+ session_base_dir=output_dir or "temp_sessions"
862
+ )
863
+
864
+ result = api.process_uploaded_model(
865
+ file_path=file_path,
866
+ user_prompt=user_prompt
867
+ )
868
+
869
+ return result
870
+
871
+ if __name__ == "__main__":
872
+ import argparse
873
+
874
+ parser = argparse.ArgumentParser(description="MagicArticulate API 测试")
875
+ parser.add_argument("--input", required=True, help="输入模型文件路径")
876
+ parser.add_argument("--prompt", default="", help="用户提示词")
877
+ parser.add_argument("--weights", help="模型权重路径")
878
+ parser.add_argument("--output", default="api_outputs", help="输出目录")
879
+
880
+ args = parser.parse_args()
881
+
882
+ # 测试API
883
+ result = process_model_file(
884
+ file_path=args.input,
885
+ user_prompt=args.prompt,
886
+ model_weights_path=args.weights,
887
+ output_dir=args.output
888
+ )
889
+
890
+ if result['success']:
891
+ print("✅ 处理成功!")
892
+ print(f"会话ID: {result['session_id']}")
893
+ print(f"处理时间: {result['processing_time']:.2f}秒")
894
+ print(f"关节数量: {result['skeleton_data']['joint_count']}")
895
+ print(f"骨骼数量: {result['skeleton_data']['bone_count']}")
896
+ print(f"输出文件: {len(result['output_files'])} 个")
897
+ else:
898
+ print("❌ 处理失败!")
899
+ print(f"错误: {result['error']}")
requirements.txt ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # HuggingFace Space Requirements - 修复版
2
+ # 解决依赖冲突问题
3
+
4
+ # HuggingFace Space必需
5
+ gradio==5.36.2
6
+ spaces
7
+ huggingface-hub
8
+
9
+ # 注意: HF Space会自动安装合适版本的PyTorch
10
+
11
+ # 加速和Transformer
12
+ accelerate==0.28.0
13
+ transformers==4.39.3
14
+ flash-attn>=2.3.0
15
+
16
+ # 3D处理库 (核心功能)
17
+ trimesh==4.2.3
18
+ scikit-image==0.21.0
19
+
20
+ # 科学计算
21
+ numpy==1.26.4
22
+ scipy==1.11.4
23
+
24
+ # 图像处理
25
+ Pillow==10.0.1
26
+ matplotlib==3.7.2
27
+ opencv-python==4.8.1.78
28
+
29
+ # 3D相关
30
+ imageio==2.31.5
31
+ networkx==3.2.1
32
+
33
+ # 工具库
34
+ tqdm==4.66.1
35
+ scikit-learn==1.3.2
36
+
37
+ # Web功能 (用户上传支持)
38
+ fastapi>=0.115.2
39
+ pydantic>=2.5.0
40
+ python-multipart>=0.0.9 # 修复: 使用与gradio兼容的版本
41
+ aiofiles>=23.2.1
42
+
43
+ # 文件和数据处理
44
+ orjson==3.9.10
45
+ python-dotenv==1.0.0
46
+ pyyaml==6.0.1
47
+
48
+ # 性能和监控
49
+ psutil==5.9.6
50
+ loguru==0.7.2
51
+
52
+ # Michelangelo依赖
53
+ omegaconf==2.3.0
54
+ einops==0.6.0
55
+ xatlas==0.0.7
56
+
57
+ # MagicArticulate依赖
58
+ mesh2sdf==1.1.0
59
+ pyrender==0.1.45
60
+ pytorch-lightning==1.9.3
61
+ pythreejs==2.4.2
skeleton_models/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MagicArticulate Skeleton Models Package
3
+ 包含骨骼生成模型和形状优化功能
4
+ """
5
+
6
+ from .skeletongen import SkeletonGPT
7
+ from .shape_opt import ShapeOPTConfig
8
+
9
+ __all__ = ['SkeletonGPT', 'ShapeOPTConfig']
skeleton_models/shape_opt.py ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from https://github.com/buaacyw/MeshAnything
2
+ from transformers import AutoModelForCausalLM, AutoConfig, OPTConfig
3
+ from transformers.models.opt.modeling_opt import OPTForCausalLM, OPTModel, OPTDecoder, OPTLearnedPositionalEmbedding, OPTDecoderLayer
4
+ from typing import List, Optional, Tuple, Union
5
+ from transformers.modeling_outputs import (
6
+ CausalLMOutputWithPast,
7
+ )
8
+ import torch
9
+ from torch import nn
10
+ from torch.nn import CrossEntropyLoss
11
+ from transformers.utils import replace_return_docstrings
12
+ from transformers.modeling_outputs import BaseModelOutputWithPast
13
+
14
+ class ShapeOPTConfig(OPTConfig):
15
+ model_type = "shape_opt"
16
+
17
+ class ShapeOPT(OPTForCausalLM):
18
+ config_class = ShapeOPTConfig
19
+ def __init__(self, config: ShapeOPTConfig):
20
+ super(OPTForCausalLM, self).__init__(config)
21
+ self.model = ShapeOPTModel(config)
22
+ self.lm_head = nn.Linear(config.word_embed_proj_dim, config.vocab_size, bias=False)
23
+ # Initialize weights and apply final processing
24
+ self.post_init()
25
+
26
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class="OPTConfig")
27
+ def forward(
28
+ self,
29
+ input_ids: torch.LongTensor = None,
30
+ bone_ids: torch.LongTensor = None,
31
+ attention_mask: Optional[torch.Tensor] = None,
32
+ head_mask: Optional[torch.Tensor] = None,
33
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
34
+ inputs_embeds: Optional[torch.FloatTensor] = None,
35
+ labels: Optional[torch.LongTensor] = None,
36
+ use_cache: Optional[bool] = None,
37
+ output_attentions: Optional[bool] = None,
38
+ output_hidden_states: Optional[bool] = None,
39
+ return_dict: Optional[bool] = None,
40
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
41
+ r"""
42
+ Args:
43
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
44
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
45
+ provide it.
46
+
47
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
48
+ [`PreTrainedTokenizer.__call__`] for details.
49
+
50
+ [What are input IDs?](../glossary#input-ids)
51
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
52
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
53
+
54
+ - 1 for tokens that are **not masked**,
55
+ - 0 for tokens that are **masked**.
56
+
57
+ [What are attention masks?](../glossary#attention-mask)
58
+ head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
59
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
60
+
61
+ - 1 indicates the head is **not masked**,
62
+ - 0 indicates the head is **masked**.
63
+
64
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
65
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
66
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
67
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
68
+ tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
69
+
70
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
71
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
72
+
73
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
74
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
75
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
76
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
77
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
78
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
79
+ than the model's internal embedding lookup matrix.
80
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
81
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
82
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
83
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
84
+ use_cache (`bool`, *optional*):
85
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
86
+ (see `past_key_values`).
87
+ output_attentions (`bool`, *optional*):
88
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
89
+ returned tensors for more detail.
90
+ output_hidden_states (`bool`, *optional*):
91
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
92
+ for more detail.
93
+ return_dict (`bool`, *optional*):
94
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
95
+
96
+ Returns:
97
+
98
+ Example:
99
+
100
+ ```python
101
+ >>> from transformers import AutoTokenizer, OPTForCausalLM
102
+
103
+ >>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m")
104
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
105
+
106
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
107
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
108
+
109
+ >>> # Generate
110
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
111
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
112
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious. I'm just a little bit of a weirdo."
113
+ ```"""
114
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
115
+ output_hidden_states = (
116
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
117
+ )
118
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
119
+
120
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
121
+ outputs = self.model.decoder(
122
+ input_ids = input_ids,
123
+ bone_ids = bone_ids,
124
+ attention_mask=attention_mask,
125
+ head_mask=head_mask,
126
+ past_key_values=past_key_values,
127
+ inputs_embeds=inputs_embeds,
128
+ use_cache=use_cache,
129
+ output_attentions=output_attentions,
130
+ output_hidden_states=output_hidden_states,
131
+ return_dict=return_dict,
132
+ )
133
+
134
+ logits = self.lm_head(outputs[0]).contiguous()
135
+
136
+ loss = None
137
+ if labels is not None:
138
+ # move labels to correct device to enable model parallelism
139
+ labels = labels.to(logits.device)
140
+ # Shift so that tokens < n predict n
141
+ shift_logits = logits[..., :-1, :].contiguous()
142
+ shift_labels = labels[..., 1:].contiguous()
143
+ # Flatten the tokens
144
+ loss_fct = CrossEntropyLoss()
145
+ loss = loss_fct(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1))
146
+
147
+ if not return_dict:
148
+ output = (logits,) + outputs[1:]
149
+ return (loss,) + output if loss is not None else output
150
+
151
+ return CausalLMOutputWithPast(
152
+ loss=loss,
153
+ logits=logits,
154
+ past_key_values=outputs.past_key_values,
155
+ hidden_states=outputs.hidden_states,
156
+ attentions=outputs.attentions,
157
+ )
158
+
159
+ class ShapeOPTModel(OPTModel):
160
+ config_class = ShapeOPTConfig
161
+ def __init__(self, config: ShapeOPTConfig):
162
+ super(OPTModel,self).__init__(config)
163
+ self.decoder = ShapeOPTDecoder(config)
164
+ # Initialize weights and apply final processing
165
+ self.post_init()
166
+
167
+ class ShapeOPTDecoder(OPTDecoder):
168
+ config_class = ShapeOPTConfig
169
+ def __init__(self, config: ShapeOPTConfig):
170
+ super(OPTDecoder,self).__init__(config)
171
+ self.config = config
172
+ self.dropout = config.dropout
173
+ self.layerdrop = config.layerdrop
174
+ self.padding_idx = config.pad_token_id
175
+ self.vocab_size = config.vocab_size
176
+ assert config.word_embed_proj_dim == config.hidden_size
177
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.word_embed_proj_dim, self.padding_idx)
178
+ self.hidden_size = config.hidden_size
179
+ self.word_embed_proj_dim = config.word_embed_proj_dim
180
+ self.n_discrete_size = config.n_discrete_size
181
+
182
+ self.embed_positions = OPTLearnedPositionalEmbedding(config.max_position_embeddings, config.hidden_size)
183
+ self.token_embed_positions = OPTBonePositionalEmbedding(config.bone_per_token+3, config.word_embed_proj_dim)
184
+
185
+ self.bone_per_token = config.bone_per_token
186
+ self.cond_length = config.cond_length
187
+ self.cond_embed = nn.Embedding(2, config.word_embed_proj_dim)
188
+ # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility
189
+ # with checkpoints that have been fine-tuned before transformers v4.20.1
190
+ # see https://github.com/facebookresearch/metaseq/pull/164
191
+ if config.do_layer_norm_before and not config._remove_final_layer_norm:
192
+ self.final_layer_norm = nn.LayerNorm(
193
+ config.hidden_size, elementwise_affine=config.layer_norm_elementwise_affine
194
+ )
195
+ else:
196
+ self.final_layer_norm = None
197
+
198
+ self.layers = nn.ModuleList([OPTDecoderLayer(config) for _ in range(config.num_hidden_layers)])
199
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
200
+
201
+ self.gradient_checkpointing = False
202
+ # Initialize weights and apply final processing
203
+ self.post_init()
204
+
205
+ def forward(
206
+ self,
207
+ input_ids: torch.LongTensor = None,
208
+ bone_ids: torch.LongTensor = None,
209
+ attention_mask: Optional[torch.Tensor] = None,
210
+ head_mask: Optional[torch.Tensor] = None,
211
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
212
+ inputs_embeds: Optional[torch.FloatTensor] = None,
213
+ use_cache: Optional[bool] = None,
214
+ output_attentions: Optional[bool] = None,
215
+ output_hidden_states: Optional[bool] = None,
216
+ return_dict: Optional[bool] = None,
217
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
218
+ r"""
219
+ Args:
220
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
221
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
222
+ provide it.
223
+
224
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
225
+ [`PreTrainedTokenizer.__call__`] for details.
226
+
227
+ [What are input IDs?](../glossary#input-ids)
228
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
229
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
230
+
231
+ - 1 for tokens that are **not masked**,
232
+ - 0 for tokens that are **masked**.
233
+
234
+ [What are attention masks?](../glossary#attention-mask)
235
+ head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
236
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
237
+
238
+ - 1 indicates the head is **not masked**,
239
+ - 0 indicates the head is **masked**.
240
+
241
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
242
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
243
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
244
+
245
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
246
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
247
+
248
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
249
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
250
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
251
+
252
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
253
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
254
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
255
+ than the model's internal embedding lookup matrix.
256
+ output_attentions (`bool`, *optional*):
257
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
258
+ returned tensors for more detail.
259
+ output_hidden_states (`bool`, *optional*):
260
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
261
+ for more detail.
262
+ return_dict (`bool`, *optional*):
263
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
264
+ """
265
+ # OPT Decoder
266
+ # print("used my Trans")
267
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
268
+ output_hidden_states = (
269
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
270
+ )
271
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
272
+
273
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
274
+ # Transformer Decoder
275
+ if input_ids is not None and inputs_embeds is not None: # when training
276
+ pass
277
+ elif input_ids is not None: # when inference
278
+ assert not self.training
279
+ input_shape = input_ids.size()
280
+ input_ids = input_ids.view(-1, input_shape[-1])
281
+ inputs_embeds = self.embed_tokens(input_ids)
282
+ bone_embeds = self.token_embed_positions(attention_mask[:, self.cond_length:], bone_ids, input_ids,
283
+ self.bone_per_token)
284
+ inputs_embeds += bone_embeds
285
+ cond_embed_query = torch.ones((inputs_embeds.shape[0], inputs_embeds.shape[1]), device=inputs_embeds.device,
286
+ dtype=inputs_embeds.dtype).long()
287
+ inputs_embeds = inputs_embeds + self.cond_embed(cond_embed_query)
288
+
289
+ elif inputs_embeds is not None: # when generate first skeleton token
290
+ assert not self.training
291
+ total_length = inputs_embeds.shape[1]
292
+ cond_embed_query = torch.zeros((inputs_embeds.shape[0], total_length), device=inputs_embeds.device,
293
+ dtype=inputs_embeds.dtype).long()
294
+ inputs_embeds = inputs_embeds + self.cond_embed(cond_embed_query)
295
+ else:
296
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
297
+
298
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
299
+ # embed positions
300
+ if self._use_flash_attention_2:
301
+ # 2d mask is passed through the layers
302
+ assert attention_mask is not None
303
+ causal_attention_mask = attention_mask if 0 in attention_mask else None
304
+ else:
305
+ raise ValueError("Only flash_attention_2 is supported")
306
+
307
+ pos_embeds = self.embed_positions(attention_mask, past_key_values_length)
308
+
309
+ hidden_states = inputs_embeds + pos_embeds
310
+
311
+ # decoder layers
312
+ all_hidden_states = () if output_hidden_states else None
313
+ all_self_attns = () if output_attentions else None
314
+ next_decoder_cache = () if use_cache else None
315
+
316
+ # check if head_mask has a correct number of layers specified if desired
317
+ for attn_mask, mask_name in zip([head_mask], ["head_mask"]):
318
+ if attn_mask is not None:
319
+ if attn_mask.size()[0] != (len(self.layers)):
320
+ raise ValueError(
321
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
322
+ f" {head_mask.size()[0]}."
323
+ )
324
+
325
+ for idx, decoder_layer in enumerate(self.layers):
326
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
327
+ if output_hidden_states:
328
+ all_hidden_states += (hidden_states,)
329
+
330
+ if self.training:
331
+ dropout_probability = torch.rand([])
332
+ if dropout_probability < self.layerdrop:
333
+ continue
334
+
335
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
336
+
337
+ if self.gradient_checkpointing and self.training:
338
+ layer_outputs = self._gradient_checkpointing_func(
339
+ decoder_layer.__call__,
340
+ hidden_states,
341
+ causal_attention_mask,
342
+ head_mask[idx] if head_mask is not None else None,
343
+ None,
344
+ output_attentions,
345
+ use_cache,
346
+ )
347
+ else:
348
+ layer_outputs = decoder_layer(
349
+ hidden_states,
350
+ attention_mask=causal_attention_mask,
351
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
352
+ past_key_value=past_key_value,
353
+ output_attentions=output_attentions,
354
+ use_cache=use_cache,
355
+ )
356
+
357
+ hidden_states = layer_outputs[0]
358
+
359
+ if use_cache:
360
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
361
+
362
+ if output_attentions:
363
+ all_self_attns += (layer_outputs[1],)
364
+
365
+ if self.final_layer_norm is not None:
366
+ hidden_states = self.final_layer_norm(hidden_states)
367
+
368
+ # add hidden states from the last decoder layer
369
+ if output_hidden_states:
370
+ all_hidden_states += (hidden_states,)
371
+
372
+ next_cache = next_decoder_cache if use_cache else None
373
+ if not return_dict:
374
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
375
+ return BaseModelOutputWithPast(
376
+ last_hidden_state=hidden_states,
377
+ past_key_values=next_cache,
378
+ hidden_states=all_hidden_states,
379
+ attentions=all_self_attns,
380
+ )
381
+
382
+ class OPTBonePositionalEmbedding(nn.Embedding):
383
+ """
384
+ This module learns positional embeddings up to a fixed maximum size.
385
+ """
386
+
387
+ def __init__(self, num_embeddings: int, embedding_dim: int):
388
+ super().__init__(num_embeddings, embedding_dim)
389
+
390
+ def forward(self, attention_mask=None, bone_ids = None, input_ids = None, bone_per_token = None):
391
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
392
+ if bone_ids is not None:
393
+ return super().forward(bone_ids)
394
+
395
+ assert input_ids.shape[1] == 1
396
+ idx_in_extra = torch.isin(input_ids, torch.LongTensor([0, 1, 2]).to(input_ids.device))
397
+ cur_ids = input_ids.clone().detach()
398
+
399
+ cur_index = (attention_mask.sum(dim=1, keepdim=True) - 2) % bone_per_token + 3
400
+ cur_ids[~idx_in_extra]=cur_index[~idx_in_extra]
401
+
402
+ return super().forward(cur_ids)
403
+
404
+ AutoConfig.register("shape_opt", ShapeOPTConfig)
405
+ AutoModelForCausalLM.register(ShapeOPTConfig, ShapeOPT)
406
+
skeleton_models/skeletongen.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import torch
15
+ from torch import nn
16
+ from transformers import AutoModelForCausalLM
17
+ from third_party.Michelangelo.encode import load_model
18
+ from skeleton_models.shape_opt import ShapeOPTConfig
19
+
20
+ def undiscretize(t, low, high, num_discrete):
21
+ assert (t >= 0).all() and (t <= num_discrete-1).all()
22
+ assert high > low
23
+ t = t.float()
24
+ t /= num_discrete
25
+ t = t * (high - low) + low
26
+ assert (t < high).all() and (t >= low).all()
27
+ return t
28
+
29
+ class SkeletonGPT(nn.Module):
30
+ def __init__(self, args):
31
+ super().__init__()
32
+
33
+ self.args = args
34
+ self.point_encoder = load_model()
35
+
36
+ self.cond_length = 257
37
+ self.cond_dim = 768
38
+
39
+ self.n_discrete_size = args.n_discrete_size
40
+
41
+ self.bone_per_token = 6 # (2 joints per bone)
42
+ self.max_length = int(args.n_max_bones * self.bone_per_token + 2 + self.cond_length)
43
+ self.pad_id = -1
44
+
45
+ self.coor_continuous_range = (-0.5, 0.5)
46
+
47
+ vocab_size = self.n_discrete_size + 3 # 3 for bos, eos, pad
48
+ self.config = ShapeOPTConfig.from_pretrained(
49
+ args.llm,
50
+ n_positions=self.max_length,
51
+ max_position_embeddings=self.max_length,
52
+ vocab_size = vocab_size,
53
+ _attn_implementation="flash_attention_2"
54
+ )
55
+
56
+ self.bos_token_id = 0
57
+ self.eos_token_id = 1
58
+ self.pad_token_id = 2
59
+
60
+ self.config.bos_token_id = self.bos_token_id
61
+ self.config.eos_token_id = self.eos_token_id
62
+ self.config.pad_token_id = self.pad_token_id
63
+ self.config._attn_implementation ="flash_attention_2"
64
+ self.config.n_discrete_size = self.n_discrete_size
65
+ self.config.bone_per_token = self.bone_per_token
66
+ self.config.cond_length = self.cond_length
67
+
68
+ self.config.word_embed_proj_dim = self.config.hidden_size # 1024
69
+
70
+
71
+ self.transformer = AutoModelForCausalLM.from_config(
72
+ config=self.config, attn_implementation="flash_attention_2")
73
+
74
+ self.cond_head_proj = nn.Linear(self.cond_dim, self.config.word_embed_proj_dim)
75
+ self.cond_proj = nn.Linear(self.cond_dim, self.config.word_embed_proj_dim)
76
+
77
+ self.eval()
78
+
79
+ def detokenize(self, input_ids):
80
+ # input_ids: torch.Tensor of shape (batch_size, seq_length)
81
+ batch_size = input_ids.size(0)
82
+
83
+ continuous_coors_list = []
84
+ num_bones_list = []
85
+
86
+ for i in range(batch_size):
87
+ cur_ids = input_ids[i] # Shape: (seq_length,)
88
+
89
+ # Remove padding tokens
90
+ cur_ids = cur_ids[cur_ids != self.pad_id] # Shape: (effective_seq_length,)
91
+
92
+ # Check if length is a multiple of 6 (2 joints * 3 coordinates)
93
+ if cur_ids.numel() % 6 != 0:
94
+ return None
95
+ # raise ValueError(f"Invalid length of input_ids in sample {i}. It should be a multiple of 6.")
96
+
97
+ num_bones = cur_ids.numel() // 6
98
+ num_bones_list.append(num_bones)
99
+
100
+ # Reshape into (num_bones, 6)
101
+ bone_coords = cur_ids.view(num_bones, 6) # Shape: (num_bones, 6)
102
+
103
+ # Undiscretize the coordinates
104
+ # Initialize tensor to hold bone coordinates
105
+ bones_coors = torch.zeros((num_bones, 2, 3), dtype=torch.float16, device=cur_ids.device)
106
+
107
+ for j in range(num_bones):
108
+ bone_coord = bone_coords[j] # Shape: (6,)
109
+
110
+ # Split into two joints
111
+ joint1_ids = bone_coord[:3]
112
+ joint2_ids = bone_coord[3:]
113
+
114
+ # Undiscretize joint coordinates
115
+ joint1_coords = undiscretize(joint1_ids, self.coor_continuous_range[0], self.coor_continuous_range[1], self.n_discrete_size)
116
+ joint2_coords = undiscretize(joint2_ids, self.coor_continuous_range[0], self.coor_continuous_range[1], self.n_discrete_size)
117
+
118
+ # Assign to bones_coors
119
+ bones_coors[j, 0, :] = joint1_coords
120
+ bones_coors[j, 1, :] = joint2_coords
121
+
122
+ continuous_coors_list.append(bones_coors)
123
+
124
+ max_num_bones = max(num_bones_list)
125
+
126
+ # Initialize the continuous_coors tensor with NaNs
127
+ continuous_coors = torch.full(
128
+ (batch_size, max_num_bones, 2, 3),
129
+ float('nan'),
130
+ dtype=torch.float16,
131
+ device=input_ids.device
132
+ )
133
+
134
+ # Place the bones_coors into continuous_coors
135
+ for i in range(batch_size):
136
+ num_bones = num_bones_list[i]
137
+ continuous_coors[i, :num_bones, :, :] = continuous_coors_list[i]
138
+
139
+ return continuous_coors # Shape: (batch_size, max_num_bones, 2, 3)
140
+
141
+
142
+ # def forward(self, data_dict: dict, is_eval: bool = False) -> dict:
143
+ # return self.generate(data_dict)
144
+
145
+ def process_point_feature(self, point_feature):
146
+
147
+ encode_feature = torch.zeros(self.args.batchsize_per_gpu, self.cond_length, self.config.word_embed_proj_dim,
148
+ device=self.cond_head_proj.weight.device, dtype=self.cond_head_proj.weight.dtype)
149
+ encode_feature[:, 0] = self.cond_head_proj(point_feature[:, 0])
150
+ shape_latents = self.point_encoder.to_shape_latents(point_feature[:, 1:])
151
+
152
+ encode_feature[:, 1:] = self.cond_proj(shape_latents)
153
+
154
+ return encode_feature
155
+
156
+ @torch.no_grad()
157
+ def generate(self, data_dict) -> dict:
158
+
159
+ point_feature = self.point_encoder.encode_latents(data_dict["pc_normal"])
160
+ processed_point_feature = self.process_point_feature(point_feature=point_feature)
161
+ generate_length = self.max_length - self.cond_length
162
+ net_device = next(self.parameters()).device
163
+ outputs = torch.ones(self.args.batchsize_per_gpu, generate_length).long().to(net_device) * self.eos_token_id
164
+ # batch x ntokens
165
+ if self.args.num_beams is not None and "pc_normal" in data_dict:
166
+ results = self.transformer.generate(
167
+ inputs_embeds=processed_point_feature,
168
+ max_new_tokens=generate_length, # all faces plus two
169
+ num_beams=self.args.num_beams,
170
+ bos_token_id=self.bos_token_id,
171
+ eos_token_id=self.eos_token_id,
172
+ pad_token_id=self.pad_token_id,
173
+ )
174
+ else:
175
+ results = self.transformer.generate(
176
+ inputs_embeds = processed_point_feature,
177
+ max_new_tokens = generate_length, # all faces plus two
178
+ do_sample=True,
179
+ top_k=50,
180
+ top_p=0.95,
181
+ bos_token_id = self.bos_token_id,
182
+ eos_token_id = self.eos_token_id,
183
+ pad_token_id = self.pad_token_id,
184
+ )
185
+ assert results.shape[1] <= generate_length # B x ID bos is not included since it's predicted
186
+ outputs[:, :results.shape[1]] = results
187
+ # batch x ntokens ====> batch x ntokens x D
188
+ outputs = outputs[:, 1: -1] # eos and bos removed
189
+
190
+ outputs[outputs == self.bos_token_id] = self.pad_id
191
+ outputs[outputs == self.eos_token_id] = self.pad_id
192
+ outputs[outputs == self.pad_token_id] = self.pad_id
193
+
194
+ outputs[outputs != self.pad_id] -= 3
195
+
196
+ gen_joints = self.detokenize(outputs)
197
+
198
+ return gen_joints
src/config.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ 配置文件
3
+ """
4
+
5
+ import os
6
+ from typing import Dict, Any
7
+
8
+ # 文件限制
9
+ MAX_FILE_SIZE_MB = 50
10
+ SUPPORTED_FORMATS = ['.obj', '.glb', '.ply', '.stl']
11
+
12
+ # 处理参数
13
+ DEFAULT_PROCESSING_PARAMS = {
14
+ 'input_pc_num': 8192,
15
+ 'confidence_threshold': 0.8,
16
+ 'generate_preview': True,
17
+ 'timeout_seconds': 120,
18
+ }
19
+
20
+ # 演示提示模板
21
+ DEMO_PROMPTS = {
22
+ 'human': "realistic human skeleton for walking and animation",
23
+ 'animal': "four-legged animal with spine and tail bones for natural movement",
24
+ 'robot': "mechanical robot with joint articulation for industrial movements",
25
+ 'bird': "bird skeleton with wing bones for flight animation",
26
+ 'generic': "articulated skeleton suitable for animation"
27
+ }
28
+
29
+ # 示例模型描述
30
+ EXAMPLE_MODELS = [
31
+ {
32
+ 'name': 'Boy Character',
33
+ 'file': 'boy.obj',
34
+ 'prompt': DEMO_PROMPTS['human'],
35
+ 'description': 'Human character suitable for walk cycle and basic animations'
36
+ },
37
+ {
38
+ 'name': 'Dog Model',
39
+ 'file': 'dog.obj',
40
+ 'prompt': DEMO_PROMPTS['animal'],
41
+ 'description': 'Quadruped animal with natural bone structure'
42
+ },
43
+ {
44
+ 'name': 'Bird Model',
45
+ 'file': 'bird.obj',
46
+ 'prompt': DEMO_PROMPTS['bird'],
47
+ 'description': 'Bird with wing bones for flight animations'
48
+ },
49
+ {
50
+ 'name': 'Robot/Mech',
51
+ 'file': 'ironman.obj',
52
+ 'prompt': DEMO_PROMPTS['robot'],
53
+ 'description': 'Mechanical character with joint-based movement'
54
+ }
55
+ ]
56
+
57
+ # UI配置
58
+ UI_CONFIG = {
59
+ 'title': '🎯 MagicArticulate MVP',
60
+ 'description': """
61
+ AI-powered 3D model articulation using skeletal generation.
62
+ Upload a 3D model and get an automatically generated skeleton for animation.
63
+ """,
64
+ 'theme': 'soft',
65
+ 'show_tips': True,
66
+ 'max_examples': 4
67
+ }
68
+
69
+ # 性能配置
70
+ PERFORMANCE_CONFIG = {
71
+ 'use_gpu': True,
72
+ 'mixed_precision': 'fp16',
73
+ 'batch_size': 1,
74
+ 'max_concurrent_requests': 2,
75
+ 'cleanup_temp_files': True
76
+ }
77
+
78
+ def get_config() -> Dict[str, Any]:
79
+ """获取完整配置"""
80
+ return {
81
+ 'file_limits': {
82
+ 'max_size_mb': MAX_FILE_SIZE_MB,
83
+ 'supported_formats': SUPPORTED_FORMATS
84
+ },
85
+ 'processing': DEFAULT_PROCESSING_PARAMS,
86
+ 'prompts': DEMO_PROMPTS,
87
+ 'examples': EXAMPLE_MODELS,
88
+ 'ui': UI_CONFIG,
89
+ 'performance': PERFORMANCE_CONFIG
90
+ }
src/enhanced_magic_wrapper.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Enhanced MagicArticulate包装器
3
+ 集成MagicArticulate-Plus用户上传支持
4
+ 基于我们已完善的articulate_api.py
5
+ """
6
+
7
+ import os
8
+ import sys
9
+ import time
10
+ import logging
11
+ import tempfile
12
+ from pathlib import Path
13
+ from typing import Optional, Dict, Any, Tuple, List
14
+
15
+ # 添加必要的路径
16
+ parent_dir = os.path.join(os.path.dirname(__file__), '..')
17
+ sys.path.append(parent_dir) # 添加mvp-space根目录
18
+ sys.path.append(os.path.join(parent_dir, 'magic_articulate_plus'))
19
+
20
+ # 详细的调试信息
21
+ print(f"🔍 DEBUG: Current working directory: {os.getcwd()}")
22
+ print(f"🔍 DEBUG: Script directory: {os.path.dirname(__file__)}")
23
+ print(f"🔍 DEBUG: Parent directory: {parent_dir}")
24
+ print(f"🔍 DEBUG: Python path includes:")
25
+ for i, path in enumerate(sys.path):
26
+ print(f" {i}: {path}")
27
+
28
+ # 检查关键目录是否存在
29
+ utils_dir = os.path.join(parent_dir, 'utils')
30
+ magic_plus_dir = os.path.join(parent_dir, 'magic_articulate_plus')
31
+ skeleton_dir = os.path.join(parent_dir, 'skeleton_models')
32
+
33
+ print(f"🔍 DEBUG: Directory existence:")
34
+ print(f" utils directory exists: {os.path.exists(utils_dir)}")
35
+ print(f" magic_articulate_plus directory exists: {os.path.exists(magic_plus_dir)}")
36
+ print(f" skeleton_models directory exists: {os.path.exists(skeleton_dir)}")
37
+
38
+ if os.path.exists(utils_dir):
39
+ print(f"🔍 DEBUG: utils directory contents: {os.listdir(utils_dir)}")
40
+ if os.path.exists(magic_plus_dir):
41
+ print(f"🔍 DEBUG: magic_articulate_plus directory contents: {os.listdir(magic_plus_dir)}")
42
+
43
+ # 导入我们已经完善的MagicArticulate-Plus功能
44
+ try:
45
+ print("🔍 DEBUG: Attempting to import magic_articulate_plus.articulate_api...")
46
+ from magic_articulate_plus.articulate_api import (
47
+ MagicArticulateAPI,
48
+ ModelValidator,
49
+ process_model_file
50
+ )
51
+ print("✅ DEBUG: Successfully imported MagicArticulate-Plus components")
52
+ ENHANCED_AVAILABLE = True
53
+ except ImportError as e:
54
+ print(f"❌ DEBUG: Import failed with error: {e}")
55
+ print(f"❌ DEBUG: Error type: {type(e)}")
56
+ import traceback
57
+ print(f"❌ DEBUG: Full traceback:")
58
+ traceback.print_exc()
59
+ logging.warning(f"MagicArticulate-Plus not available: {e}")
60
+ ENHANCED_AVAILABLE = False
61
+
62
+ # 配置日志
63
+ logging.basicConfig(level=logging.INFO)
64
+ logger = logging.getLogger(__name__)
65
+
66
+ class EnhancedMagicWrapper:
67
+ """
68
+ 增强版MagicArticulate包装器
69
+ 支持用户上传任意3D模型文件
70
+ """
71
+
72
+ def __init__(self, model_weights_path: Optional[str] = None):
73
+ # 如果没有指定权重路径,使用默认的空间模型(匹配demo.py hier_order=False)
74
+ if model_weights_path is None:
75
+ model_weights_path = "skeleton_ckpt/checkpoint_trainonv2_spatial.pth"
76
+
77
+ self.model_weights_path = model_weights_path
78
+ self.initialized = False
79
+
80
+ if ENHANCED_AVAILABLE:
81
+ # 使用我们完善的MagicArticulate-Plus API
82
+ self.api = MagicArticulateAPI(
83
+ model_weights_path=model_weights_path,
84
+ device="auto",
85
+ session_base_dir="hf_user_sessions"
86
+ )
87
+ logger.info(f"✅ 使用增强版MagicArticulate-Plus API (weights: {model_weights_path})")
88
+ else:
89
+ # 降级到原始包装器
90
+ logger.error("❌ MagicArticulate-Plus不可用,请检查集成")
91
+ self.api = None
92
+
93
+ def initialize(self) -> bool:
94
+ """初始化API"""
95
+ try:
96
+ if not ENHANCED_AVAILABLE:
97
+ logger.error("增强版API不可用")
98
+ return False
99
+
100
+ logger.info("🚀 初始化增强版MagicArticulate...")
101
+
102
+ # 使用我们已经完善的初始化逻辑
103
+ success = self.api.initialize_model()
104
+
105
+ if success:
106
+ self.initialized = True
107
+ logger.info("✅ 增强版MagicArticulate初始化成功")
108
+ else:
109
+ logger.error("❌ 增强版MagicArticulate初始化失败")
110
+
111
+ return success
112
+
113
+ except Exception as e:
114
+ logger.error(f"💥 初始化失败: {str(e)}")
115
+ return False
116
+
117
+ def validate_uploaded_file(self, file_path: str) -> Tuple[bool, str, Dict[str, Any]]:
118
+ """
119
+ 验证用户上传的文件
120
+ 使用我们已完善的ModelValidator
121
+ """
122
+ try:
123
+ if not ENHANCED_AVAILABLE:
124
+ return False, "增强功能不可用", {}
125
+
126
+ # 使用我们已经完善的验证逻辑
127
+ is_valid, error_msg, model_info = ModelValidator.validate_file(file_path)
128
+
129
+ if is_valid:
130
+ logger.info(f"✅ 文件验证通过: {model_info.get('file_name', 'Unknown')}")
131
+ else:
132
+ logger.warning(f"⚠️ 文件验证失败: {error_msg}")
133
+
134
+ return is_valid, error_msg, model_info
135
+
136
+ except Exception as e:
137
+ error_msg = f"文件验证过程出错: {str(e)}"
138
+ logger.error(error_msg)
139
+ return False, error_msg, {}
140
+
141
+ def process_3d_model(self,
142
+ model_file_path: str,
143
+ prompt: str = "",
144
+ confidence_threshold: float = 0.8,
145
+ generate_preview: bool = True,
146
+ **kwargs) -> Dict[str, Any]:
147
+ """
148
+ 处理3D模型 - 支持用户上传
149
+ 使用我们已完善的处理管道
150
+ """
151
+ try:
152
+ if not self.initialized:
153
+ return {
154
+ 'success': False,
155
+ 'error': 'API未初始化',
156
+ 'skeleton_data': None,
157
+ 'output_files': None,
158
+ 'processing_info': None
159
+ }
160
+
161
+ if not ENHANCED_AVAILABLE:
162
+ return {
163
+ 'success': False,
164
+ 'error': '增强功能不可用',
165
+ 'skeleton_data': None,
166
+ 'output_files': None,
167
+ 'processing_info': None
168
+ }
169
+
170
+ logger.info(f"🔄 开始处理用户上传的模型: {model_file_path}")
171
+
172
+ # 首先验证文件
173
+ is_valid, error_msg, model_info = self.validate_uploaded_file(model_file_path)
174
+ if not is_valid:
175
+ return {
176
+ 'success': False,
177
+ 'error': f'文件验证失败: {error_msg}',
178
+ 'skeleton_data': None,
179
+ 'output_files': None,
180
+ 'processing_info': model_info
181
+ }
182
+
183
+ # 准备处理选项
184
+ processing_options = {
185
+ 'auto_repair': kwargs.get('auto_repair', True),
186
+ 'target_faces': kwargs.get('target_faces', 10000),
187
+ 'confidence_threshold': confidence_threshold,
188
+ 'generate_preview': generate_preview
189
+ }
190
+
191
+ # 使用我们已完善的处理API
192
+ result = self.api.process_uploaded_model(
193
+ file_path=model_file_path,
194
+ user_prompt=prompt,
195
+ processing_options=processing_options
196
+ )
197
+
198
+ # 转换为MVP期望的格式
199
+ if result['success']:
200
+ logger.info("✅ 模型处理完成")
201
+
202
+ # 添加处理信息
203
+ processing_info = {
204
+ 'input_file': model_info.get('file_name', 'Unknown'),
205
+ 'prompt': prompt,
206
+ 'joint_count': result['skeleton_data'].get('joint_count', 0),
207
+ 'bone_count': result['skeleton_data'].get('bone_count', 0),
208
+ 'confidence_threshold': confidence_threshold,
209
+ 'vertex_count': model_info.get('vertex_count', 0),
210
+ 'face_count': model_info.get('face_count', 0),
211
+ 'file_size_mb': model_info.get('file_size_mb', 0),
212
+ 'preprocessing_log': result.get('preprocessing_log', [])
213
+ }
214
+
215
+ return {
216
+ 'success': True,
217
+ 'skeleton_data': result['skeleton_data'],
218
+ 'output_files': result['output_files'],
219
+ 'processing_info': processing_info
220
+ }
221
+ else:
222
+ logger.error(f"❌ 处理失败: {result.get('error', 'Unknown error')}")
223
+ return {
224
+ 'success': False,
225
+ 'error': result.get('error', 'Unknown error'),
226
+ 'skeleton_data': None,
227
+ 'output_files': None,
228
+ 'processing_info': None
229
+ }
230
+
231
+ except Exception as e:
232
+ error_msg = f"处理过程中发生错误: {str(e)}"
233
+ logger.error(f"💥 {error_msg}")
234
+
235
+ return {
236
+ 'success': False,
237
+ 'error': error_msg,
238
+ 'skeleton_data': None,
239
+ 'output_files': None,
240
+ 'processing_info': None
241
+ }
242
+
243
+ def get_supported_formats(self) -> List[str]:
244
+ """获取支持的文件格式"""
245
+ if ENHANCED_AVAILABLE:
246
+ # 返回我们已完善的格式列表
247
+ return list(ModelValidator.SUPPORTED_FORMATS)
248
+ else:
249
+ # 降级到基础格式
250
+ return ['.obj', '.glb', '.ply', '.stl']
251
+
252
+ def get_session_info(self, session_id: str) -> Dict[str, Any]:
253
+ """获取会话信息"""
254
+ try:
255
+ if self.api and hasattr(self.api, 'get_session_info'):
256
+ return self.api.get_session_info(session_id)
257
+ else:
258
+ return {}
259
+ except Exception as e:
260
+ logger.error(f"获取会话信息��败: {str(e)}")
261
+ return {}
262
+
263
+ def cleanup_sessions(self, max_age_days: int = 1):
264
+ """清理旧会话(HF Space内存限制)"""
265
+ try:
266
+ if self.api and hasattr(self.api, 'cleanup_sessions'):
267
+ self.api.cleanup_sessions(max_age_days)
268
+ logger.info(f"✅ 清理了超过 {max_age_days} 天的旧会话")
269
+ except Exception as e:
270
+ logger.error(f"清理会话失败: {str(e)}")
271
+
272
+ # 为了保持兼容性,提供原始类名的别名
273
+ MagicArticulateWrapper = EnhancedMagicWrapper
274
+
275
+ # 简化的处理函数,直接使用我们完善的API
276
+ def process_user_model(file_path: str,
277
+ prompt: str = "",
278
+ model_weights_path: Optional[str] = None) -> Dict[str, Any]:
279
+ """
280
+ 简化的用户模型处理接口
281
+ 直接使用我们已完善的process_model_file函数
282
+ """
283
+ try:
284
+ if ENHANCED_AVAILABLE:
285
+ # 使用我们已完善的简化接口
286
+ return process_model_file(
287
+ file_path=file_path,
288
+ user_prompt=prompt,
289
+ model_weights_path=model_weights_path,
290
+ output_dir="hf_temp_sessions"
291
+ )
292
+ else:
293
+ return {
294
+ 'success': False,
295
+ 'error': 'Enhanced API not available'
296
+ }
297
+ except Exception as e:
298
+ return {
299
+ 'success': False,
300
+ 'error': f'Processing failed: {str(e)}'
301
+ }
src/utils.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ 工具函数
3
+ """
4
+
5
+ import os
6
+ import shutil
7
+ import tempfile
8
+ import logging
9
+ from pathlib import Path
10
+ from typing import Optional, Dict, Any, List, Tuple
11
+ import numpy as np
12
+ import trimesh
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+ def validate_file(file_path: str, max_size_mb: int = 50) -> Tuple[bool, str]:
17
+ """
18
+ 验证上传的文件
19
+
20
+ Args:
21
+ file_path: 文件路径
22
+ max_size_mb: 最大文件大小(MB)
23
+
24
+ Returns:
25
+ (是否有效, 错误信息)
26
+ """
27
+ try:
28
+ if not os.path.exists(file_path):
29
+ return False, "文件不存在"
30
+
31
+ # 检查文件大小
32
+ file_size_mb = os.path.getsize(file_path) / (1024 * 1024)
33
+ if file_size_mb > max_size_mb:
34
+ return False, f"文件太大: {file_size_mb:.1f}MB > {max_size_mb}MB"
35
+
36
+ # 检查文件扩展名
37
+ file_ext = Path(file_path).suffix.lower()
38
+ supported_formats = ['.obj', '.glb', '.ply', '.stl']
39
+ if file_ext not in supported_formats:
40
+ return False, f"不支持的文件格式: {file_ext}"
41
+
42
+ # 尝试加载文件
43
+ try:
44
+ mesh = trimesh.load(file_path, force='mesh')
45
+ if not hasattr(mesh, 'vertices') or len(mesh.vertices) == 0:
46
+ return False, "文件无法解析为有效的3D模型"
47
+ except Exception as e:
48
+ return False, f"文件格式错误: {str(e)}"
49
+
50
+ return True, "文件有效"
51
+
52
+ except Exception as e:
53
+ return False, f"文件验证失败: {str(e)}"
54
+
55
+ def get_model_info(file_path: str) -> Dict[str, Any]:
56
+ """
57
+ 获取模型信息
58
+
59
+ Args:
60
+ file_path: 模型文件路径
61
+
62
+ Returns:
63
+ 模型信息字典
64
+ """
65
+ try:
66
+ mesh = trimesh.load(file_path, force='mesh')
67
+
68
+ # 计算基本信息
69
+ vertex_count = len(mesh.vertices) if hasattr(mesh, 'vertices') else 0
70
+ face_count = len(mesh.faces) if hasattr(mesh, 'faces') else 0
71
+
72
+ # 计算包围盒
73
+ if vertex_count > 0:
74
+ bounds = mesh.bounds
75
+ size = bounds[1] - bounds[0]
76
+ center = (bounds[0] + bounds[1]) / 2
77
+ else:
78
+ size = np.array([0, 0, 0])
79
+ center = np.array([0, 0, 0])
80
+
81
+ # 计算表面积和体积
82
+ surface_area = mesh.area if hasattr(mesh, 'area') else 0
83
+ volume = mesh.volume if hasattr(mesh, 'volume') else 0
84
+
85
+ return {
86
+ 'file_name': os.path.basename(file_path),
87
+ 'file_size_mb': os.path.getsize(file_path) / (1024 * 1024),
88
+ 'vertex_count': vertex_count,
89
+ 'face_count': face_count,
90
+ 'bounding_box': {
91
+ 'min': bounds[0].tolist() if vertex_count > 0 else [0, 0, 0],
92
+ 'max': bounds[1].tolist() if vertex_count > 0 else [0, 0, 0],
93
+ 'size': size.tolist(),
94
+ 'center': center.tolist()
95
+ },
96
+ 'surface_area': float(surface_area),
97
+ 'volume': float(volume),
98
+ 'is_watertight': mesh.is_watertight if hasattr(mesh, 'is_watertight') else False,
99
+ 'is_closed': mesh.is_closed if hasattr(mesh, 'is_closed') else False
100
+ }
101
+
102
+ except Exception as e:
103
+ logger.error(f"Failed to get model info: {str(e)}")
104
+ return {
105
+ 'file_name': os.path.basename(file_path),
106
+ 'error': str(e)
107
+ }
108
+
109
+ def cleanup_temp_files(temp_dir: str, keep_files: Optional[List[str]] = None):
110
+ """
111
+ 清理临时文件
112
+
113
+ Args:
114
+ temp_dir: 临时目录
115
+ keep_files: 需要保留的文件列表
116
+ """
117
+ try:
118
+ if not os.path.exists(temp_dir):
119
+ return
120
+
121
+ for file_name in os.listdir(temp_dir):
122
+ file_path = os.path.join(temp_dir, file_name)
123
+
124
+ if keep_files and file_name in keep_files:
125
+ continue
126
+
127
+ try:
128
+ if os.path.isfile(file_path):
129
+ os.remove(file_path)
130
+ elif os.path.isdir(file_path):
131
+ shutil.rmtree(file_path)
132
+ except Exception as e:
133
+ logger.warning(f"Failed to remove {file_path}: {str(e)}")
134
+
135
+ except Exception as e:
136
+ logger.error(f"Cleanup failed: {str(e)}")
137
+
138
+ def format_processing_time(seconds: float) -> str:
139
+ """
140
+ 格式化处理时间
141
+
142
+ Args:
143
+ seconds: 秒数
144
+
145
+ Returns:
146
+ 格式化的时间字符串
147
+ """
148
+ if seconds < 60:
149
+ return f"{seconds:.1f}秒"
150
+ elif seconds < 3600:
151
+ minutes = seconds / 60
152
+ return f"{minutes:.1f}分钟"
153
+ else:
154
+ hours = seconds / 3600
155
+ return f"{hours:.1f}小时"
156
+
157
+ def get_prompt_suggestions(model_info: Dict[str, Any]) -> List[str]:
158
+ """
159
+ 根据模型信息获取提示建议
160
+
161
+ Args:
162
+ model_info: 模型信息
163
+
164
+ Returns:
165
+ 提示建议列表
166
+ """
167
+ suggestions = []
168
+
169
+ # 基于文件名的建议
170
+ file_name = model_info.get('file_name', '').lower()
171
+
172
+ if any(keyword in file_name for keyword in ['human', 'person', 'character', 'boy', 'girl']):
173
+ suggestions.extend([
174
+ "realistic human skeleton for walking animations",
175
+ "character with full body rig for game animation",
176
+ "human bone structure suitable for motion capture"
177
+ ])
178
+ elif any(keyword in file_name for keyword in ['dog', 'cat', 'animal', 'pet']):
179
+ suggestions.extend([
180
+ "four-legged animal with spine and tail bones",
181
+ "quadruped skeleton for natural movement",
182
+ "animal bone structure with flexible spine"
183
+ ])
184
+ elif any(keyword in file_name for keyword in ['bird', 'eagle', 'chicken']):
185
+ suggestions.extend([
186
+ "bird skeleton with wing bones for flight",
187
+ "avian bone structure with hollow bones",
188
+ "bird with articulated wings and tail"
189
+ ])
190
+ elif any(keyword in file_name for keyword in ['robot', 'mech', 'mechanical']):
191
+ suggestions.extend([
192
+ "mechanical robot with joint articulation",
193
+ "industrial robot with precise joint control",
194
+ "mech suit with hydraulic joint system"
195
+ ])
196
+ else:
197
+ suggestions.extend([
198
+ "articulated skeleton suitable for animation",
199
+ "flexible bone structure for general movement",
200
+ "skeleton with natural joint hierarchy"
201
+ ])
202
+
203
+ # 基于模型复杂度的建议
204
+ vertex_count = model_info.get('vertex_count', 0)
205
+ if vertex_count > 10000:
206
+ suggestions.append("detailed skeleton for high-poly model")
207
+ elif vertex_count < 1000:
208
+ suggestions.append("simple skeleton for low-poly model")
209
+
210
+ return suggestions[:5] # 限制建议数量
211
+
212
+ def create_processing_status(stage: str, progress: float, message: str) -> Dict[str, Any]:
213
+ """
214
+ 创建处理状态信息
215
+
216
+ Args:
217
+ stage: 处理阶段
218
+ progress: 进度 (0-1)
219
+ message: 状态消息
220
+
221
+ Returns:
222
+ 状态信息字典
223
+ """
224
+ return {
225
+ 'stage': stage,
226
+ 'progress': min(max(progress, 0.0), 1.0),
227
+ 'message': message,
228
+ 'timestamp': __import__('time').time()
229
+ }
230
+
231
+ def estimate_processing_time(model_info: Dict[str, Any]) -> float:
232
+ """
233
+ 估算处理时间
234
+
235
+ Args:
236
+ model_info: 模型信息
237
+
238
+ Returns:
239
+ 估算的处理时间(秒)
240
+ """
241
+ try:
242
+ vertex_count = model_info.get('vertex_count', 1000)
243
+ face_count = model_info.get('face_count', 1000)
244
+
245
+ # 基于模型复杂度的简单估算
246
+ complexity_factor = (vertex_count + face_count) / 10000
247
+ base_time = 30 # 基础处理时间30秒
248
+
249
+ estimated_time = base_time * (1 + complexity_factor * 0.5)
250
+ return min(estimated_time, 120) # 最多120秒
251
+
252
+ except Exception:
253
+ return 60 # 默认60秒
254
+
255
+ def generate_download_filename(original_name: str, suffix: str) -> str:
256
+ """
257
+ 生成下载文件名
258
+
259
+ Args:
260
+ original_name: 原始文件名
261
+ suffix: 后缀
262
+
263
+ Returns:
264
+ 新文件名
265
+ """
266
+ base_name = os.path.splitext(original_name)[0]
267
+ return f"{base_name}_{suffix}"
268
+
269
+ def safe_json_serialize(obj: Any) -> Any:
270
+ """
271
+ 安全的JSON序列化
272
+
273
+ Args:
274
+ obj: 要序列化的对象
275
+
276
+ Returns:
277
+ 可序列化的对象
278
+ """
279
+ if isinstance(obj, np.ndarray):
280
+ return obj.tolist()
281
+ elif isinstance(obj, np.floating):
282
+ return float(obj)
283
+ elif isinstance(obj, np.integer):
284
+ return int(obj)
285
+ elif isinstance(obj, dict):
286
+ return {k: safe_json_serialize(v) for k, v in obj.items()}
287
+ elif isinstance(obj, list):
288
+ return [safe_json_serialize(item) for item in obj]
289
+ else:
290
+ return obj
third_party/Michelangelo/LICENSE ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU GENERAL PUBLIC LICENSE
2
+ Version 3, 29 June 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU General Public License is a free, copyleft license for
11
+ software and other kinds of works.
12
+
13
+ The licenses for most software and other practical works are designed
14
+ to take away your freedom to share and change the works. By contrast,
15
+ the GNU General Public License is intended to guarantee your freedom to
16
+ share and change all versions of a program--to make sure it remains free
17
+ software for all its users. We, the Free Software Foundation, use the
18
+ GNU General Public License for most of our software; it applies also to
19
+ any other work released this way by its authors. You can apply it to
20
+ your programs, too.
21
+
22
+ When we speak of free software, we are referring to freedom, not
23
+ price. Our General Public Licenses are designed to make sure that you
24
+ have the freedom to distribute copies of free software (and charge for
25
+ them if you wish), that you receive source code or can get it if you
26
+ want it, that you can change the software or use pieces of it in new
27
+ free programs, and that you know you can do these things.
28
+
29
+ To protect your rights, we need to prevent others from denying you
30
+ these rights or asking you to surrender the rights. Therefore, you have
31
+ certain responsibilities if you distribute copies of the software, or if
32
+ you modify it: responsibilities to respect the freedom of others.
33
+
34
+ For example, if you distribute copies of such a program, whether
35
+ gratis or for a fee, you must pass on to the recipients the same
36
+ freedoms that you received. You must make sure that they, too, receive
37
+ or can get the source code. And you must show them these terms so they
38
+ know their rights.
39
+
40
+ Developers that use the GNU GPL protect your rights with two steps:
41
+ (1) assert copyright on the software, and (2) offer you this License
42
+ giving you legal permission to copy, distribute and/or modify it.
43
+
44
+ For the developers' and authors' protection, the GPL clearly explains
45
+ that there is no warranty for this free software. For both users' and
46
+ authors' sake, the GPL requires that modified versions be marked as
47
+ changed, so that their problems will not be attributed erroneously to
48
+ authors of previous versions.
49
+
50
+ Some devices are designed to deny users access to install or run
51
+ modified versions of the software inside them, although the manufacturer
52
+ can do so. This is fundamentally incompatible with the aim of
53
+ protecting users' freedom to change the software. The systematic
54
+ pattern of such abuse occurs in the area of products for individuals to
55
+ use, which is precisely where it is most unacceptable. Therefore, we
56
+ have designed this version of the GPL to prohibit the practice for those
57
+ products. If such problems arise substantially in other domains, we
58
+ stand ready to extend this provision to those domains in future versions
59
+ of the GPL, as needed to protect the freedom of users.
60
+
61
+ Finally, every program is threatened constantly by software patents.
62
+ States should not allow patents to restrict development and use of
63
+ software on general-purpose computers, but in those that do, we wish to
64
+ avoid the special danger that patents applied to a free program could
65
+ make it effectively proprietary. To prevent this, the GPL assures that
66
+ patents cannot be used to render the program non-free.
67
+
68
+ The precise terms and conditions for copying, distribution and
69
+ modification follow.
70
+
71
+ TERMS AND CONDITIONS
72
+
73
+ 0. Definitions.
74
+
75
+ "This License" refers to version 3 of the GNU General Public License.
76
+
77
+ "Copyright" also means copyright-like laws that apply to other kinds of
78
+ works, such as semiconductor masks.
79
+
80
+ "The Program" refers to any copyrightable work licensed under this
81
+ License. Each licensee is addressed as "you". "Licensees" and
82
+ "recipients" may be individuals or organizations.
83
+
84
+ To "modify" a work means to copy from or adapt all or part of the work
85
+ in a fashion requiring copyright permission, other than the making of an
86
+ exact copy. The resulting work is called a "modified version" of the
87
+ earlier work or a work "based on" the earlier work.
88
+
89
+ A "covered work" means either the unmodified Program or a work based
90
+ on the Program.
91
+
92
+ To "propagate" a work means to do anything with it that, without
93
+ permission, would make you directly or secondarily liable for
94
+ infringement under applicable copyright law, except executing it on a
95
+ computer or modifying a private copy. Propagation includes copying,
96
+ distribution (with or without modification), making available to the
97
+ public, and in some countries other activities as well.
98
+
99
+ To "convey" a work means any kind of propagation that enables other
100
+ parties to make or receive copies. Mere interaction with a user through
101
+ a computer network, with no transfer of a copy, is not conveying.
102
+
103
+ An interactive user interface displays "Appropriate Legal Notices"
104
+ to the extent that it includes a convenient and prominently visible
105
+ feature that (1) displays an appropriate copyright notice, and (2)
106
+ tells the user that there is no warranty for the work (except to the
107
+ extent that warranties are provided), that licensees may convey the
108
+ work under this License, and how to view a copy of this License. If
109
+ the interface presents a list of user commands or options, such as a
110
+ menu, a prominent item in the list meets this criterion.
111
+
112
+ 1. Source Code.
113
+
114
+ The "source code" for a work means the preferred form of the work
115
+ for making modifications to it. "Object code" means any non-source
116
+ form of a work.
117
+
118
+ A "Standard Interface" means an interface that either is an official
119
+ standard defined by a recognized standards body, or, in the case of
120
+ interfaces specified for a particular programming language, one that
121
+ is widely used among developers working in that language.
122
+
123
+ The "System Libraries" of an executable work include anything, other
124
+ than the work as a whole, that (a) is included in the normal form of
125
+ packaging a Major Component, but which is not part of that Major
126
+ Component, and (b) serves only to enable use of the work with that
127
+ Major Component, or to implement a Standard Interface for which an
128
+ implementation is available to the public in source code form. A
129
+ "Major Component", in this context, means a major essential component
130
+ (kernel, window system, and so on) of the specific operating system
131
+ (if any) on which the executable work runs, or a compiler used to
132
+ produce the work, or an object code interpreter used to run it.
133
+
134
+ The "Corresponding Source" for a work in object code form means all
135
+ the source code needed to generate, install, and (for an executable
136
+ work) run the object code and to modify the work, including scripts to
137
+ control those activities. However, it does not include the work's
138
+ System Libraries, or general-purpose tools or generally available free
139
+ programs which are used unmodified in performing those activities but
140
+ which are not part of the work. For example, Corresponding Source
141
+ includes interface definition files associated with source files for
142
+ the work, and the source code for shared libraries and dynamically
143
+ linked subprograms that the work is specifically designed to require,
144
+ such as by intimate data communication or control flow between those
145
+ subprograms and other parts of the work.
146
+
147
+ The Corresponding Source need not include anything that users
148
+ can regenerate automatically from other parts of the Corresponding
149
+ Source.
150
+
151
+ The Corresponding Source for a work in source code form is that
152
+ same work.
153
+
154
+ 2. Basic Permissions.
155
+
156
+ All rights granted under this License are granted for the term of
157
+ copyright on the Program, and are irrevocable provided the stated
158
+ conditions are met. This License explicitly affirms your unlimited
159
+ permission to run the unmodified Program. The output from running a
160
+ covered work is covered by this License only if the output, given its
161
+ content, constitutes a covered work. This License acknowledges your
162
+ rights of fair use or other equivalent, as provided by copyright law.
163
+
164
+ You may make, run and propagate covered works that you do not
165
+ convey, without conditions so long as your license otherwise remains
166
+ in force. You may convey covered works to others for the sole purpose
167
+ of having them make modifications exclusively for you, or provide you
168
+ with facilities for running those works, provided that you comply with
169
+ the terms of this License in conveying all material for which you do
170
+ not control copyright. Those thus making or running the covered works
171
+ for you must do so exclusively on your behalf, under your direction
172
+ and control, on terms that prohibit them from making any copies of
173
+ your copyrighted material outside their relationship with you.
174
+
175
+ Conveying under any other circumstances is permitted solely under
176
+ the conditions stated below. Sublicensing is not allowed; section 10
177
+ makes it unnecessary.
178
+
179
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180
+
181
+ No covered work shall be deemed part of an effective technological
182
+ measure under any applicable law fulfilling obligations under article
183
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184
+ similar laws prohibiting or restricting circumvention of such
185
+ measures.
186
+
187
+ When you convey a covered work, you waive any legal power to forbid
188
+ circumvention of technological measures to the extent such circumvention
189
+ is effected by exercising rights under this License with respect to
190
+ the covered work, and you disclaim any intention to limit operation or
191
+ modification of the work as a means of enforcing, against the work's
192
+ users, your or third parties' legal rights to forbid circumvention of
193
+ technological measures.
194
+
195
+ 4. Conveying Verbatim Copies.
196
+
197
+ You may convey verbatim copies of the Program's source code as you
198
+ receive it, in any medium, provided that you conspicuously and
199
+ appropriately publish on each copy an appropriate copyright notice;
200
+ keep intact all notices stating that this License and any
201
+ non-permissive terms added in accord with section 7 apply to the code;
202
+ keep intact all notices of the absence of any warranty; and give all
203
+ recipients a copy of this License along with the Program.
204
+
205
+ You may charge any price or no price for each copy that you convey,
206
+ and you may offer support or warranty protection for a fee.
207
+
208
+ 5. Conveying Modified Source Versions.
209
+
210
+ You may convey a work based on the Program, or the modifications to
211
+ produce it from the Program, in the form of source code under the
212
+ terms of section 4, provided that you also meet all of these conditions:
213
+
214
+ a) The work must carry prominent notices stating that you modified
215
+ it, and giving a relevant date.
216
+
217
+ b) The work must carry prominent notices stating that it is
218
+ released under this License and any conditions added under section
219
+ 7. This requirement modifies the requirement in section 4 to
220
+ "keep intact all notices".
221
+
222
+ c) You must license the entire work, as a whole, under this
223
+ License to anyone who comes into possession of a copy. This
224
+ License will therefore apply, along with any applicable section 7
225
+ additional terms, to the whole of the work, and all its parts,
226
+ regardless of how they are packaged. This License gives no
227
+ permission to license the work in any other way, but it does not
228
+ invalidate such permission if you have separately received it.
229
+
230
+ d) If the work has interactive user interfaces, each must display
231
+ Appropriate Legal Notices; however, if the Program has interactive
232
+ interfaces that do not display Appropriate Legal Notices, your
233
+ work need not make them do so.
234
+
235
+ A compilation of a covered work with other separate and independent
236
+ works, which are not by their nature extensions of the covered work,
237
+ and which are not combined with it such as to form a larger program,
238
+ in or on a volume of a storage or distribution medium, is called an
239
+ "aggregate" if the compilation and its resulting copyright are not
240
+ used to limit the access or legal rights of the compilation's users
241
+ beyond what the individual works permit. Inclusion of a covered work
242
+ in an aggregate does not cause this License to apply to the other
243
+ parts of the aggregate.
244
+
245
+ 6. Conveying Non-Source Forms.
246
+
247
+ You may convey a covered work in object code form under the terms
248
+ of sections 4 and 5, provided that you also convey the
249
+ machine-readable Corresponding Source under the terms of this License,
250
+ in one of these ways:
251
+
252
+ a) Convey the object code in, or embodied in, a physical product
253
+ (including a physical distribution medium), accompanied by the
254
+ Corresponding Source fixed on a durable physical medium
255
+ customarily used for software interchange.
256
+
257
+ b) Convey the object code in, or embodied in, a physical product
258
+ (including a physical distribution medium), accompanied by a
259
+ written offer, valid for at least three years and valid for as
260
+ long as you offer spare parts or customer support for that product
261
+ model, to give anyone who possesses the object code either (1) a
262
+ copy of the Corresponding Source for all the software in the
263
+ product that is covered by this License, on a durable physical
264
+ medium customarily used for software interchange, for a price no
265
+ more than your reasonable cost of physically performing this
266
+ conveying of source, or (2) access to copy the
267
+ Corresponding Source from a network server at no charge.
268
+
269
+ c) Convey individual copies of the object code with a copy of the
270
+ written offer to provide the Corresponding Source. This
271
+ alternative is allowed only occasionally and noncommercially, and
272
+ only if you received the object code with such an offer, in accord
273
+ with subsection 6b.
274
+
275
+ d) Convey the object code by offering access from a designated
276
+ place (gratis or for a charge), and offer equivalent access to the
277
+ Corresponding Source in the same way through the same place at no
278
+ further charge. You need not require recipients to copy the
279
+ Corresponding Source along with the object code. If the place to
280
+ copy the object code is a network server, the Corresponding Source
281
+ may be on a different server (operated by you or a third party)
282
+ that supports equivalent copying facilities, provided you maintain
283
+ clear directions next to the object code saying where to find the
284
+ Corresponding Source. Regardless of what server hosts the
285
+ Corresponding Source, you remain obligated to ensure that it is
286
+ available for as long as needed to satisfy these requirements.
287
+
288
+ e) Convey the object code using peer-to-peer transmission, provided
289
+ you inform other peers where the object code and Corresponding
290
+ Source of the work are being offered to the general public at no
291
+ charge under subsection 6d.
292
+
293
+ A separable portion of the object code, whose source code is excluded
294
+ from the Corresponding Source as a System Library, need not be
295
+ included in conveying the object code work.
296
+
297
+ A "User Product" is either (1) a "consumer product", which means any
298
+ tangible personal property which is normally used for personal, family,
299
+ or household purposes, or (2) anything designed or sold for incorporation
300
+ into a dwelling. In determining whether a product is a consumer product,
301
+ doubtful cases shall be resolved in favor of coverage. For a particular
302
+ product received by a particular user, "normally used" refers to a
303
+ typical or common use of that class of product, regardless of the status
304
+ of the particular user or of the way in which the particular user
305
+ actually uses, or expects or is expected to use, the product. A product
306
+ is a consumer product regardless of whether the product has substantial
307
+ commercial, industrial or non-consumer uses, unless such uses represent
308
+ the only significant mode of use of the product.
309
+
310
+ "Installation Information" for a User Product means any methods,
311
+ procedures, authorization keys, or other information required to install
312
+ and execute modified versions of a covered work in that User Product from
313
+ a modified version of its Corresponding Source. The information must
314
+ suffice to ensure that the continued functioning of the modified object
315
+ code is in no case prevented or interfered with solely because
316
+ modification has been made.
317
+
318
+ If you convey an object code work under this section in, or with, or
319
+ specifically for use in, a User Product, and the conveying occurs as
320
+ part of a transaction in which the right of possession and use of the
321
+ User Product is transferred to the recipient in perpetuity or for a
322
+ fixed term (regardless of how the transaction is characterized), the
323
+ Corresponding Source conveyed under this section must be accompanied
324
+ by the Installation Information. But this requirement does not apply
325
+ if neither you nor any third party retains the ability to install
326
+ modified object code on the User Product (for example, the work has
327
+ been installed in ROM).
328
+
329
+ The requirement to provide Installation Information does not include a
330
+ requirement to continue to provide support service, warranty, or updates
331
+ for a work that has been modified or installed by the recipient, or for
332
+ the User Product in which it has been modified or installed. Access to a
333
+ network may be denied when the modification itself materially and
334
+ adversely affects the operation of the network or violates the rules and
335
+ protocols for communication across the network.
336
+
337
+ Corresponding Source conveyed, and Installation Information provided,
338
+ in accord with this section must be in a format that is publicly
339
+ documented (and with an implementation available to the public in
340
+ source code form), and must require no special password or key for
341
+ unpacking, reading or copying.
342
+
343
+ 7. Additional Terms.
344
+
345
+ "Additional permissions" are terms that supplement the terms of this
346
+ License by making exceptions from one or more of its conditions.
347
+ Additional permissions that are applicable to the entire Program shall
348
+ be treated as though they were included in this License, to the extent
349
+ that they are valid under applicable law. If additional permissions
350
+ apply only to part of the Program, that part may be used separately
351
+ under those permissions, but the entire Program remains governed by
352
+ this License without regard to the additional permissions.
353
+
354
+ When you convey a copy of a covered work, you may at your option
355
+ remove any additional permissions from that copy, or from any part of
356
+ it. (Additional permissions may be written to require their own
357
+ removal in certain cases when you modify the work.) You may place
358
+ additional permissions on material, added by you to a covered work,
359
+ for which you have or can give appropriate copyright permission.
360
+
361
+ Notwithstanding any other provision of this License, for material you
362
+ add to a covered work, you may (if authorized by the copyright holders of
363
+ that material) supplement the terms of this License with terms:
364
+
365
+ a) Disclaiming warranty or limiting liability differently from the
366
+ terms of sections 15 and 16 of this License; or
367
+
368
+ b) Requiring preservation of specified reasonable legal notices or
369
+ author attributions in that material or in the Appropriate Legal
370
+ Notices displayed by works containing it; or
371
+
372
+ c) Prohibiting misrepresentation of the origin of that material, or
373
+ requiring that modified versions of such material be marked in
374
+ reasonable ways as different from the original version; or
375
+
376
+ d) Limiting the use for publicity purposes of names of licensors or
377
+ authors of the material; or
378
+
379
+ e) Declining to grant rights under trademark law for use of some
380
+ trade names, trademarks, or service marks; or
381
+
382
+ f) Requiring indemnification of licensors and authors of that
383
+ material by anyone who conveys the material (or modified versions of
384
+ it) with contractual assumptions of liability to the recipient, for
385
+ any liability that these contractual assumptions directly impose on
386
+ those licensors and authors.
387
+
388
+ All other non-permissive additional terms are considered "further
389
+ restrictions" within the meaning of section 10. If the Program as you
390
+ received it, or any part of it, contains a notice stating that it is
391
+ governed by this License along with a term that is a further
392
+ restriction, you may remove that term. If a license document contains
393
+ a further restriction but permits relicensing or conveying under this
394
+ License, you may add to a covered work material governed by the terms
395
+ of that license document, provided that the further restriction does
396
+ not survive such relicensing or conveying.
397
+
398
+ If you add terms to a covered work in accord with this section, you
399
+ must place, in the relevant source files, a statement of the
400
+ additional terms that apply to those files, or a notice indicating
401
+ where to find the applicable terms.
402
+
403
+ Additional terms, permissive or non-permissive, may be stated in the
404
+ form of a separately written license, or stated as exceptions;
405
+ the above requirements apply either way.
406
+
407
+ 8. Termination.
408
+
409
+ You may not propagate or modify a covered work except as expressly
410
+ provided under this License. Any attempt otherwise to propagate or
411
+ modify it is void, and will automatically terminate your rights under
412
+ this License (including any patent licenses granted under the third
413
+ paragraph of section 11).
414
+
415
+ However, if you cease all violation of this License, then your
416
+ license from a particular copyright holder is reinstated (a)
417
+ provisionally, unless and until the copyright holder explicitly and
418
+ finally terminates your license, and (b) permanently, if the copyright
419
+ holder fails to notify you of the violation by some reasonable means
420
+ prior to 60 days after the cessation.
421
+
422
+ Moreover, your license from a particular copyright holder is
423
+ reinstated permanently if the copyright holder notifies you of the
424
+ violation by some reasonable means, this is the first time you have
425
+ received notice of violation of this License (for any work) from that
426
+ copyright holder, and you cure the violation prior to 30 days after
427
+ your receipt of the notice.
428
+
429
+ Termination of your rights under this section does not terminate the
430
+ licenses of parties who have received copies or rights from you under
431
+ this License. If your rights have been terminated and not permanently
432
+ reinstated, you do not qualify to receive new licenses for the same
433
+ material under section 10.
434
+
435
+ 9. Acceptance Not Required for Having Copies.
436
+
437
+ You are not required to accept this License in order to receive or
438
+ run a copy of the Program. Ancillary propagation of a covered work
439
+ occurring solely as a consequence of using peer-to-peer transmission
440
+ to receive a copy likewise does not require acceptance. However,
441
+ nothing other than this License grants you permission to propagate or
442
+ modify any covered work. These actions infringe copyright if you do
443
+ not accept this License. Therefore, by modifying or propagating a
444
+ covered work, you indicate your acceptance of this License to do so.
445
+
446
+ 10. Automatic Licensing of Downstream Recipients.
447
+
448
+ Each time you convey a covered work, the recipient automatically
449
+ receives a license from the original licensors, to run, modify and
450
+ propagate that work, subject to this License. You are not responsible
451
+ for enforcing compliance by third parties with this License.
452
+
453
+ An "entity transaction" is a transaction transferring control of an
454
+ organization, or substantially all assets of one, or subdividing an
455
+ organization, or merging organizations. If propagation of a covered
456
+ work results from an entity transaction, each party to that
457
+ transaction who receives a copy of the work also receives whatever
458
+ licenses to the work the party's predecessor in interest had or could
459
+ give under the previous paragraph, plus a right to possession of the
460
+ Corresponding Source of the work from the predecessor in interest, if
461
+ the predecessor has it or can get it with reasonable efforts.
462
+
463
+ You may not impose any further restrictions on the exercise of the
464
+ rights granted or affirmed under this License. For example, you may
465
+ not impose a license fee, royalty, or other charge for exercise of
466
+ rights granted under this License, and you may not initiate litigation
467
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
468
+ any patent claim is infringed by making, using, selling, offering for
469
+ sale, or importing the Program or any portion of it.
470
+
471
+ 11. Patents.
472
+
473
+ A "contributor" is a copyright holder who authorizes use under this
474
+ License of the Program or a work on which the Program is based. The
475
+ work thus licensed is called the contributor's "contributor version".
476
+
477
+ A contributor's "essential patent claims" are all patent claims
478
+ owned or controlled by the contributor, whether already acquired or
479
+ hereafter acquired, that would be infringed by some manner, permitted
480
+ by this License, of making, using, or selling its contributor version,
481
+ but do not include claims that would be infringed only as a
482
+ consequence of further modification of the contributor version. For
483
+ purposes of this definition, "control" includes the right to grant
484
+ patent sublicenses in a manner consistent with the requirements of
485
+ this License.
486
+
487
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
488
+ patent license under the contributor's essential patent claims, to
489
+ make, use, sell, offer for sale, import and otherwise run, modify and
490
+ propagate the contents of its contributor version.
491
+
492
+ In the following three paragraphs, a "patent license" is any express
493
+ agreement or commitment, however denominated, not to enforce a patent
494
+ (such as an express permission to practice a patent or covenant not to
495
+ sue for patent infringement). To "grant" such a patent license to a
496
+ party means to make such an agreement or commitment not to enforce a
497
+ patent against the party.
498
+
499
+ If you convey a covered work, knowingly relying on a patent license,
500
+ and the Corresponding Source of the work is not available for anyone
501
+ to copy, free of charge and under the terms of this License, through a
502
+ publicly available network server or other readily accessible means,
503
+ then you must either (1) cause the Corresponding Source to be so
504
+ available, or (2) arrange to deprive yourself of the benefit of the
505
+ patent license for this particular work, or (3) arrange, in a manner
506
+ consistent with the requirements of this License, to extend the patent
507
+ license to downstream recipients. "Knowingly relying" means you have
508
+ actual knowledge that, but for the patent license, your conveying the
509
+ covered work in a country, or your recipient's use of the covered work
510
+ in a country, would infringe one or more identifiable patents in that
511
+ country that you have reason to believe are valid.
512
+
513
+ If, pursuant to or in connection with a single transaction or
514
+ arrangement, you convey, or propagate by procuring conveyance of, a
515
+ covered work, and grant a patent license to some of the parties
516
+ receiving the covered work authorizing them to use, propagate, modify
517
+ or convey a specific copy of the covered work, then the patent license
518
+ you grant is automatically extended to all recipients of the covered
519
+ work and works based on it.
520
+
521
+ A patent license is "discriminatory" if it does not include within
522
+ the scope of its coverage, prohibits the exercise of, or is
523
+ conditioned on the non-exercise of one or more of the rights that are
524
+ specifically granted under this License. You may not convey a covered
525
+ work if you are a party to an arrangement with a third party that is
526
+ in the business of distributing software, under which you make payment
527
+ to the third party based on the extent of your activity of conveying
528
+ the work, and under which the third party grants, to any of the
529
+ parties who would receive the covered work from you, a discriminatory
530
+ patent license (a) in connection with copies of the covered work
531
+ conveyed by you (or copies made from those copies), or (b) primarily
532
+ for and in connection with specific products or compilations that
533
+ contain the covered work, unless you entered into that arrangement,
534
+ or that patent license was granted, prior to 28 March 2007.
535
+
536
+ Nothing in this License shall be construed as excluding or limiting
537
+ any implied license or other defenses to infringement that may
538
+ otherwise be available to you under applicable patent law.
539
+
540
+ 12. No Surrender of Others' Freedom.
541
+
542
+ If conditions are imposed on you (whether by court order, agreement or
543
+ otherwise) that contradict the conditions of this License, they do not
544
+ excuse you from the conditions of this License. If you cannot convey a
545
+ covered work so as to satisfy simultaneously your obligations under this
546
+ License and any other pertinent obligations, then as a consequence you may
547
+ not convey it at all. For example, if you agree to terms that obligate you
548
+ to collect a royalty for further conveying from those to whom you convey
549
+ the Program, the only way you could satisfy both those terms and this
550
+ License would be to refrain entirely from conveying the Program.
551
+
552
+ 13. Use with the GNU Affero General Public License.
553
+
554
+ Notwithstanding any other provision of this License, you have
555
+ permission to link or combine any covered work with a work licensed
556
+ under version 3 of the GNU Affero General Public License into a single
557
+ combined work, and to convey the resulting work. The terms of this
558
+ License will continue to apply to the part which is the covered work,
559
+ but the special requirements of the GNU Affero General Public License,
560
+ section 13, concerning interaction through a network will apply to the
561
+ combination as such.
562
+
563
+ 14. Revised Versions of this License.
564
+
565
+ The Free Software Foundation may publish revised and/or new versions of
566
+ the GNU General Public License from time to time. Such new versions will
567
+ be similar in spirit to the present version, but may differ in detail to
568
+ address new problems or concerns.
569
+
570
+ Each version is given a distinguishing version number. If the
571
+ Program specifies that a certain numbered version of the GNU General
572
+ Public License "or any later version" applies to it, you have the
573
+ option of following the terms and conditions either of that numbered
574
+ version or of any later version published by the Free Software
575
+ Foundation. If the Program does not specify a version number of the
576
+ GNU General Public License, you may choose any version ever published
577
+ by the Free Software Foundation.
578
+
579
+ If the Program specifies that a proxy can decide which future
580
+ versions of the GNU General Public License can be used, that proxy's
581
+ public statement of acceptance of a version permanently authorizes you
582
+ to choose that version for the Program.
583
+
584
+ Later license versions may give you additional or different
585
+ permissions. However, no additional obligations are imposed on any
586
+ author or copyright holder as a result of your choosing to follow a
587
+ later version.
588
+
589
+ 15. Disclaimer of Warranty.
590
+
591
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599
+
600
+ 16. Limitation of Liability.
601
+
602
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610
+ SUCH DAMAGES.
611
+
612
+ 17. Interpretation of Sections 15 and 16.
613
+
614
+ If the disclaimer of warranty and limitation of liability provided
615
+ above cannot be given local legal effect according to their terms,
616
+ reviewing courts shall apply local law that most closely approximates
617
+ an absolute waiver of all civil liability in connection with the
618
+ Program, unless a warranty or assumption of liability accompanies a
619
+ copy of the Program in return for a fee.
620
+
621
+ END OF TERMS AND CONDITIONS
622
+
623
+ How to Apply These Terms to Your New Programs
624
+
625
+ If you develop a new program, and you want it to be of the greatest
626
+ possible use to the public, the best way to achieve this is to make it
627
+ free software which everyone can redistribute and change under these terms.
628
+
629
+ To do so, attach the following notices to the program. It is safest
630
+ to attach them to the start of each source file to most effectively
631
+ state the exclusion of warranty; and each file should have at least
632
+ the "copyright" line and a pointer to where the full notice is found.
633
+
634
+ <one line to give the program's name and a brief idea of what it does.>
635
+ Copyright (C) <year> <name of author>
636
+
637
+ This program is free software: you can redistribute it and/or modify
638
+ it under the terms of the GNU General Public License as published by
639
+ the Free Software Foundation, either version 3 of the License, or
640
+ (at your option) any later version.
641
+
642
+ This program is distributed in the hope that it will be useful,
643
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
644
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645
+ GNU General Public License for more details.
646
+
647
+ You should have received a copy of the GNU General Public License
648
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
649
+
650
+ Also add information on how to contact you by electronic and paper mail.
651
+
652
+ If the program does terminal interaction, make it output a short
653
+ notice like this when it starts in an interactive mode:
654
+
655
+ <program> Copyright (C) <year> <name of author>
656
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657
+ This is free software, and you are welcome to redistribute it
658
+ under certain conditions; type `show c' for details.
659
+
660
+ The hypothetical commands `show w' and `show c' should show the appropriate
661
+ parts of the General Public License. Of course, your program's commands
662
+ might be different; for a GUI interface, you would use an "about box".
663
+
664
+ You should also get your employer (if you work as a programmer) or school,
665
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
666
+ For more information on this, and how to apply and follow the GNU GPL, see
667
+ <https://www.gnu.org/licenses/>.
668
+
669
+ The GNU General Public License does not permit incorporating your program
670
+ into proprietary programs. If your program is a subroutine library, you
671
+ may consider it more useful to permit linking proprietary applications with
672
+ the library. If this is what you want to do, use the GNU Lesser General
673
+ Public License instead of this License. But first, please read
674
+ <https://www.gnu.org/licenses/why-not-lgpl.html>.
third_party/Michelangelo/README.md ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Michelangelo
2
+
3
+ ## [Conditional 3D Shape Generation based on Shape-Image-Text Aligned Latent Representation](https://neuralcarver.github.io/michelangelo)<br/>
4
+ [Zibo Zhao](https://github.com/Maikouuu),
5
+ [Wen Liu](https://github.com/StevenLiuWen),
6
+ [Xin Chen](https://chenxin.tech/),
7
+ [Xianfang Zeng](https://github.com/Zzlongjuanfeng),
8
+ [Rui Wang](https://wrong.wang/),
9
+ [Pei Cheng](https://neuralcarver.github.io/michelangelo),
10
+ [Bin Fu](https://neuralcarver.github.io/michelangelo),
11
+ [Tao Chen](https://eetchen.github.io),
12
+ [Gang Yu](https://www.skicyyu.org),
13
+ [Shenghua Gao](https://sist.shanghaitech.edu.cn/sist_en/2020/0814/c7582a54772/page.htm)<br/>
14
+ ### [Hugging Face Demo](https://huggingface.co/spaces/Maikou/Michelangelo) | [Project Page](https://neuralcarver.github.io/michelangelo/) | [Arxiv](https://arxiv.org/abs/2306.17115) | [Paper](https://openreview.net/pdf?id=xmxgMij3LY)<br/>
15
+
16
+ https://github.com/NeuralCarver/Michelangelo/assets/37449470/123bae2c-fbb1-4d63-bd13-0e300a550868
17
+
18
+ Visualization of the 3D shape produced by our framework, which splits into triplets with a conditional input on the left, a normal map in the middle, and a triangle mesh on the right. The generated 3D shapes semantically conform to the visual or textural conditional inputs.<br/>
19
+
20
+ ## 🔆 Features
21
+ **Michelangelo** possesses three capabilities:
22
+
23
+ 1. Representing a shape into shape-image-text aligned space;
24
+ 2. Image-conditioned Shape Generation;
25
+ 3. Text-conditioned Shape Generation.
26
+
27
+ <details>
28
+ <summary><b> Techniques </b></summary>
29
+
30
+ We present a novel _alignment-before-generation_ approach to tackle the challenging task of generating general 3D shapes based on 2D images or texts. Directly learning a conditional generative model from images or texts to 3D shapes is prone to producing inconsistent results with the conditions because 3D shapes have an additional dimension whose distribution significantly differs from that of 2D images and texts. To bridge the domain gap among the three modalities and facilitate multi-modal-conditioned 3D shape generation, we explore representing 3D shapes in a shape-image-text-aligned space. Our framework comprises two models: a Shape-Image-Text-Aligned Variational Auto-Encoder (SITA-VAE) and a conditional Aligned Shape Latent Diffusion Model (ASLDM). The former model encodes the 3D shapes into the shape latent space aligned to the image and text and reconstructs the fine-grained 3D neural fields corresponding to given shape embeddings via the transformer-based decoder. The latter model learns a probabilistic mapping function from the image or text space to the latent shape space. Our extensive experiments demonstrate that our proposed approach can generate higher-quality and more diverse 3D shapes that better semantically conform to the visual or textural conditional inputs, validating the effectiveness of the shape-image-text-aligned space for cross-modality 3D shape generation.
31
+
32
+ ![newnetwork](https://github.com/NeuralCarver/Michelangelo/assets/16475892/d5231fb7-7768-45ee-92e1-3599a4c43a2c)
33
+ </details>
34
+
35
+ ## 📰 News
36
+ - [2024/1/23] Set up the <a href="https://huggingface.co/spaces/Maikou/Michelangelo">Hugging Face Demo</a> and release the code
37
+ - [2023/09/22] **Michelangelo got accepted by NeurIPS 2023!**
38
+ - [2023/6/29] Upload paper and init project
39
+
40
+ ## ⚙️ Setup
41
+
42
+ ### Installation
43
+ Follow the command below to install the environment. We have tested the installation package on Tesla V100 and Tesla T4.
44
+ ```
45
+ git clone https://github.com/NeuralCarver/Michelangelo.git
46
+ cd Michelangelo
47
+ conda create --name Michelangelo python=3.9
48
+ conda activate Michelangelo
49
+ pip install -r requirements.txt
50
+ ```
51
+
52
+ ### Checkpoints
53
+ Pleasae download weights from <a href="https://huggingface.co/Maikou/Michelangelo/tree/main/checkpoints">Hugging Face Model Space</a> and put it to root folder. We have also uploaded the weights related to CLIP to facilitate quick usage.
54
+
55
+ <details>
56
+ <summary><b>
57
+ Tips for debugging configureation
58
+ </b></summary>
59
+
60
+ - If something goes wrong in the environment configuration process unfortunately, the user may consider skipping those packages, such as pysdf, torch-cluster, and torch-scatter. These packages will not affect the execution of the commands we provide.
61
+ - If you encounter any issues while downloading CLIP, you can consider downloading it from [CLIP's Hugging Face page](https://huggingface.co/openai/clip-vit-large-patch14). Once the download is complete, remember to modify line [26](https://github.com/NeuralCarver/Michelangelo/blob/b53fa004cd4aeb0f4eb4d159ecec8489a4450dab/configs/text_cond_diffuser_asl/text-ASLDM-256.yaml#L26C1-L26C76) and line [34](https://github.com/NeuralCarver/Michelangelo/blob/b53fa004cd4aeb0f4eb4d159ecec8489a4450dab/configs/text_cond_diffuser_asl/text-ASLDM-256.yaml#L34) in the config file for providing correct path of CLIP.
62
+ - From [issue 6](https://github.com/NeuralCarver/Michelangelo/issues/6#issuecomment-1913513382). For Windows users, running wsl2 + ubuntu 22.04, will have issues. As discussed in [issue 786](https://github.com/microsoft/WSL/issues/8587) it is just a matter to add this in the .bashrc:
63
+ ```
64
+ export LD_LIBRARY_PATH=/usr/lib/wsl/lib:$LD_LIBRARY_PATH.
65
+ ```
66
+ </details>
67
+
68
+ ## ⚡ Quick Start
69
+
70
+ ### Inference
71
+
72
+ #### Reconstruction a 3D shape
73
+ ```
74
+ ./scripts/inference/reconstruction.sh
75
+ ```
76
+
77
+ #### Image-conditioned shape generation
78
+ ```
79
+ ./scripts/inference/image2mesh.sh
80
+ ```
81
+
82
+ #### Text-conditioned shape generation
83
+ ```
84
+ ./scripts/inference/text2mesh.sh
85
+ ```
86
+
87
+ #### Simply run all the scripts
88
+ ```
89
+ ./scripts/infer.sh
90
+ ```
91
+
92
+
93
+ ## ❓ FAQ
94
+
95
+ ## Citation
96
+
97
+ If you find our code or paper helps, please consider citing:
98
+
99
+ ```bibtex
100
+ @inproceedings{
101
+ zhao2023michelangelo,
102
+ title={Michelangelo: Conditional 3D Shape Generation based on Shape-Image-Text Aligned Latent Representation},
103
+ author={Zibo Zhao and Wen Liu and Xin Chen and Xianfang Zeng and Rui Wang and Pei Cheng and BIN FU and Tao Chen and Gang YU and Shenghua Gao},
104
+ booktitle={Thirty-seventh Conference on Neural Information Processing Systems},
105
+ year={2023},
106
+ url={https://openreview.net/forum?id=xmxgMij3LY}
107
+ }
108
+ ```
109
+
110
+ ## License
111
+
112
+ This code is distributed under an [GPL-3.0 license](LICENSE).
113
+
third_party/Michelangelo/configs/shapevae-256.yaml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: third_party.Michelangelo.michelangelo.models.tsal.asl_pl_module.AlignedShapeAsLatentPLModule
3
+ params:
4
+ shape_module_cfg:
5
+ target: third_party.Michelangelo.michelangelo.models.tsal.sal_perceiver.AlignedShapeLatentPerceiver
6
+ params:
7
+ num_latents: 256
8
+ embed_dim: 64
9
+ point_feats: 3 # normal
10
+ num_freqs: 8
11
+ include_pi: false
12
+ heads: 12
13
+ width: 768
14
+ num_encoder_layers: 8
15
+ num_decoder_layers: 16
16
+ use_ln_post: true
17
+ init_scale: 0.25
18
+ qkv_bias: false
19
+ use_checkpoint: true
20
+ aligned_module_cfg:
21
+ target: third_party.Michelangelo.michelangelo.models.tsal.clip_asl_module.CLIPAlignedShapeAsLatentModule
22
+ params:
23
+ clip_model_version: "./checkpoints/clip/clip-vit-large-patch14"
24
+
25
+ loss_cfg:
26
+ target: third_party.Michelangelo.michelangelo.models.tsal.loss.ContrastKLNearFar
27
+ params:
28
+ contrast_weight: 0.1
29
+ near_weight: 0.1
30
+ kl_weight: 0.001
31
+
32
+ optimizer_cfg:
33
+ optimizer:
34
+ target: torch.optim.AdamW
35
+ params:
36
+ betas: [0.9, 0.99]
37
+ eps: 1.e-6
38
+ weight_decay: 1.e-2
39
+
40
+ scheduler:
41
+ target: third_party.Michelangelo.michelangelo.utils.trainings.lr_scheduler.LambdaWarmUpCosineFactorScheduler
42
+ params:
43
+ warm_up_steps: 5000
44
+ f_start: 1.e-6
45
+ f_min: 1.e-3
46
+ f_max: 1.0
third_party/Michelangelo/encode.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import os
3
+ import argparse
4
+ from omegaconf import OmegaConf, DictConfig, ListConfig
5
+ import numpy as np
6
+ import torch
7
+ from .michelangelo.utils.misc import instantiate_from_config
8
+
9
+ def load_surface(fp):
10
+
11
+ with np.load(fp) as input_pc:
12
+ surface = input_pc['points']
13
+ normal = input_pc['normals']
14
+
15
+ rng = np.random.default_rng()
16
+ ind = rng.choice(surface.shape[0], 4096, replace=False)
17
+ surface = torch.FloatTensor(surface[ind])
18
+ normal = torch.FloatTensor(normal[ind])
19
+
20
+ surface = torch.cat([surface, normal], dim=-1).unsqueeze(0).cuda()
21
+
22
+ return surface
23
+
24
+ def reconstruction(args, model, bounds=(-1.25, -1.25, -1.25, 1.25, 1.25, 1.25), octree_depth=7, num_chunks=10000):
25
+
26
+ surface = load_surface(args.pointcloud_path)
27
+ # old_surface = surface.clone()
28
+
29
+ # surface[0,:,0]*=-1
30
+ # surface[0,:,1]*=-1
31
+ surface[0,:,2]*=-1
32
+
33
+ # encoding
34
+ shape_embed, shape_latents = model.model.encode_shape_embed(surface, return_latents=True)
35
+ shape_zq, posterior = model.model.shape_model.encode_kl_embed(shape_latents)
36
+
37
+ # decoding
38
+ latents = model.model.shape_model.decode(shape_zq)
39
+ # geometric_func = partial(model.model.shape_model.query_geometry, latents=latents)
40
+
41
+ return 0
42
+
43
+ def load_model(ckpt_path="third_party/Michelangelo/checkpoints/aligned_shape_latents/shapevae-256.ckpt"):
44
+ import urllib.request
45
+ from pathlib import Path
46
+
47
+ # 自动下载checkpoint文件如果不存在
48
+ if not os.path.exists(ckpt_path):
49
+ print(f"Downloading checkpoint to {ckpt_path}...")
50
+ os.makedirs(os.path.dirname(ckpt_path), exist_ok=True)
51
+
52
+ # HuggingFace直接下载链接
53
+ download_url = "https://huggingface.co/Maikou/Michelangelo/resolve/main/checkpoints/aligned_shape_latents/shapevae-256.ckpt"
54
+
55
+ try:
56
+ print("正在从HuggingFace下载模型文件...")
57
+ urllib.request.urlretrieve(download_url, ckpt_path)
58
+ print(f"✅ 模型文件下载完成: {ckpt_path}")
59
+ except Exception as e:
60
+ print(f"❌ 模型文件下载失败: {e}")
61
+ # 如果下载失败,返回一个简化的模型
62
+ import torch.nn as nn
63
+ class DummyModel(nn.Module):
64
+ def __init__(self):
65
+ super().__init__()
66
+ self.dummy = nn.Linear(1, 1)
67
+ def forward(self, x):
68
+ return x
69
+ def encode(self, x):
70
+ return torch.randn(1, 768) # 返回期望的特征维度
71
+ print("⚠️ 使用简化模型替代")
72
+ return DummyModel()
73
+
74
+ model_config = OmegaConf.load("third_party/Michelangelo/configs/shapevae-256.yaml")
75
+ if hasattr(model_config, "model"):
76
+ model_config = model_config.model
77
+
78
+ model = instantiate_from_config(model_config, ckpt_path=ckpt_path)
79
+
80
+ return model
81
+ if __name__ == "__main__":
82
+ '''
83
+ 1. Reconstruct point cloud
84
+ 2. Image-conditioned generation
85
+ 3. Text-conditioned generation
86
+ '''
87
+ parser = argparse.ArgumentParser()
88
+ parser.add_argument("--config_path", type=str, required=True)
89
+ parser.add_argument("--ckpt_path", type=str, required=True)
90
+ parser.add_argument("--pointcloud_path", type=str, default='./example_data/surface.npz', help='Path to the input point cloud')
91
+ parser.add_argument("--image_path", type=str, help='Path to the input image')
92
+ parser.add_argument("--text", type=str, help='Input text within a format: A 3D model of motorcar; Porsche 911.')
93
+ parser.add_argument("--output_dir", type=str, default='./output')
94
+ parser.add_argument("-s", "--seed", type=int, default=0)
95
+ args = parser.parse_args()
96
+
97
+ print(f'-----------------------------------------------------------------------------')
98
+ print(f'>>> Output directory: {args.output_dir}')
99
+ print(f'-----------------------------------------------------------------------------')
100
+
101
+ reconstruction(args, load_model(args))
third_party/Michelangelo/inference.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import os
3
+ import time
4
+ from collections import OrderedDict
5
+ from typing import Optional, List
6
+ import argparse
7
+ from functools import partial
8
+
9
+ from einops import repeat, rearrange
10
+ import numpy as np
11
+ from PIL import Image
12
+ import trimesh
13
+ import cv2
14
+
15
+ import torch
16
+ import pytorch_lightning as pl
17
+
18
+ from michelangelo.models.tsal.tsal_base import Latent2MeshOutput
19
+ from michelangelo.models.tsal.inference_utils import extract_geometry
20
+ from michelangelo.utils.misc import get_config_from_file, instantiate_from_config
21
+ from michelangelo.utils.visualizers.pythreejs_viewer import PyThreeJSViewer
22
+ from michelangelo.utils.visualizers import html_util
23
+
24
+ def load_model(args):
25
+
26
+ model_config = get_config_from_file(args.config_path)
27
+ if hasattr(model_config, "model"):
28
+ model_config = model_config.model
29
+
30
+ model = instantiate_from_config(model_config, ckpt_path=args.ckpt_path)
31
+ model = model.cuda()
32
+ model = model.eval()
33
+
34
+ return model
35
+
36
+ def load_surface(fp):
37
+
38
+ with np.load(args.pointcloud_path) as input_pc:
39
+ surface = input_pc['points']
40
+ normal = input_pc['normals']
41
+
42
+ rng = np.random.default_rng()
43
+ ind = rng.choice(surface.shape[0], 4096, replace=False)
44
+ surface = torch.FloatTensor(surface[ind])
45
+ normal = torch.FloatTensor(normal[ind])
46
+
47
+ surface = torch.cat([surface, normal], dim=-1).unsqueeze(0).cuda()
48
+
49
+ return surface
50
+
51
+ def prepare_image(args, number_samples=2):
52
+
53
+ image = cv2.imread(f"{args.image_path}")
54
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
55
+
56
+ image_pt = torch.tensor(image).float()
57
+ image_pt = image_pt / 255 * 2 - 1
58
+ image_pt = rearrange(image_pt, "h w c -> c h w")
59
+
60
+ image_pt = repeat(image_pt, "c h w -> b c h w", b=number_samples)
61
+
62
+ return image_pt
63
+
64
+ def save_output(args, mesh_outputs):
65
+
66
+ os.makedirs(args.output_dir, exist_ok=True)
67
+ for i, mesh in enumerate(mesh_outputs):
68
+ mesh.mesh_f = mesh.mesh_f[:, ::-1]
69
+ mesh_output = trimesh.Trimesh(mesh.mesh_v, mesh.mesh_f)
70
+
71
+ name = str(i) + "_out_mesh.obj"
72
+ mesh_output.export(os.path.join(args.output_dir, name), include_normals=True)
73
+
74
+ print(f'-----------------------------------------------------------------------------')
75
+ print(f'>>> Finished and mesh saved in {args.output_dir}')
76
+ print(f'-----------------------------------------------------------------------------')
77
+
78
+ return 0
79
+
80
+ def reconstruction(args, model, bounds=(-1.25, -1.25, -1.25, 1.25, 1.25, 1.25), octree_depth=7, num_chunks=10000):
81
+
82
+ surface = load_surface(args.pointcloud_path)
83
+
84
+ # encoding
85
+ shape_embed, shape_latents = model.model.encode_shape_embed(surface, return_latents=True)
86
+ shape_zq, posterior = model.model.shape_model.encode_kl_embed(shape_latents)
87
+
88
+ # decoding
89
+ latents = model.model.shape_model.decode(shape_zq)
90
+ geometric_func = partial(model.model.shape_model.query_geometry, latents=latents)
91
+
92
+ # reconstruction
93
+ mesh_v_f, has_surface = extract_geometry(
94
+ geometric_func=geometric_func,
95
+ device=surface.device,
96
+ batch_size=surface.shape[0],
97
+ bounds=bounds,
98
+ octree_depth=octree_depth,
99
+ num_chunks=num_chunks,
100
+ )
101
+ recon_mesh = trimesh.Trimesh(mesh_v_f[0][0], mesh_v_f[0][1])
102
+
103
+ # save
104
+ os.makedirs(args.output_dir, exist_ok=True)
105
+ recon_mesh.export(os.path.join(args.output_dir, 'reconstruction.obj'))
106
+
107
+ print(f'-----------------------------------------------------------------------------')
108
+ print(f'>>> Finished and mesh saved in {os.path.join(args.output_dir, "reconstruction.obj")}')
109
+ print(f'-----------------------------------------------------------------------------')
110
+
111
+ return 0
112
+
113
+ def image2mesh(args, model, guidance_scale=7.5, box_v=1.1, octree_depth=7):
114
+
115
+ sample_inputs = {
116
+ "image": prepare_image(args)
117
+ }
118
+
119
+ mesh_outputs = model.sample(
120
+ sample_inputs,
121
+ sample_times=1,
122
+ guidance_scale=guidance_scale,
123
+ return_intermediates=False,
124
+ bounds=[-box_v, -box_v, -box_v, box_v, box_v, box_v],
125
+ octree_depth=octree_depth,
126
+ )[0]
127
+
128
+ save_output(args, mesh_outputs)
129
+
130
+ return 0
131
+
132
+ def text2mesh(args, model, num_samples=2, guidance_scale=7.5, box_v=1.1, octree_depth=7):
133
+
134
+ sample_inputs = {
135
+ "text": [args.text] * num_samples
136
+ }
137
+ mesh_outputs = model.sample(
138
+ sample_inputs,
139
+ sample_times=1,
140
+ guidance_scale=guidance_scale,
141
+ return_intermediates=False,
142
+ bounds=[-box_v, -box_v, -box_v, box_v, box_v, box_v],
143
+ octree_depth=octree_depth,
144
+ )[0]
145
+
146
+ save_output(args, mesh_outputs)
147
+
148
+ return 0
149
+
150
+ task_dick = {
151
+ 'reconstruction': reconstruction,
152
+ 'image2mesh': image2mesh,
153
+ 'text2mesh': text2mesh,
154
+ }
155
+
156
+ if __name__ == "__main__":
157
+ '''
158
+ 1. Reconstruct point cloud
159
+ 2. Image-conditioned generation
160
+ 3. Text-conditioned generation
161
+ '''
162
+ parser = argparse.ArgumentParser()
163
+ parser.add_argument("--task", type=str, choices=['reconstruction', 'image2mesh', 'text2mesh'], required=True)
164
+ parser.add_argument("--config_path", type=str, required=True)
165
+ parser.add_argument("--ckpt_path", type=str, required=True)
166
+ parser.add_argument("--pointcloud_path", type=str, default='./example_data/surface.npz', help='Path to the input point cloud')
167
+ parser.add_argument("--image_path", type=str, help='Path to the input image')
168
+ parser.add_argument("--text", type=str, help='Input text within a format: A 3D model of motorcar; Porsche 911.')
169
+ parser.add_argument("--output_dir", type=str, default='./output')
170
+ parser.add_argument("-s", "--seed", type=int, default=0)
171
+ args = parser.parse_args()
172
+
173
+ pl.seed_everything(args.seed)
174
+
175
+ print(f'-----------------------------------------------------------------------------')
176
+ print(f'>>> Running {args.task}')
177
+ args.output_dir = os.path.join(args.output_dir, args.task)
178
+ print(f'>>> Output directory: {args.output_dir}')
179
+ print(f'-----------------------------------------------------------------------------')
180
+
181
+ task_dick[args.task](args, load_model(args))
third_party/Michelangelo/michelangelo/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # -*- coding: utf-8 -*-
third_party/Michelangelo/michelangelo/data/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # -*- coding: utf-8 -*-
third_party/Michelangelo/michelangelo/data/templates.json ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "shape": [
3
+ "a point cloud model of {}.",
4
+ "There is a {} in the scene.",
5
+ "There is the {} in the scene.",
6
+ "a photo of a {} in the scene.",
7
+ "a photo of the {} in the scene.",
8
+ "a photo of one {} in the scene.",
9
+ "itap of a {}.",
10
+ "itap of my {}.",
11
+ "itap of the {}.",
12
+ "a photo of a {}.",
13
+ "a photo of my {}.",
14
+ "a photo of the {}.",
15
+ "a photo of one {}.",
16
+ "a photo of many {}.",
17
+ "a good photo of a {}.",
18
+ "a good photo of the {}.",
19
+ "a bad photo of a {}.",
20
+ "a bad photo of the {}.",
21
+ "a photo of a nice {}.",
22
+ "a photo of the nice {}.",
23
+ "a photo of a cool {}.",
24
+ "a photo of the cool {}.",
25
+ "a photo of a weird {}.",
26
+ "a photo of the weird {}.",
27
+ "a photo of a small {}.",
28
+ "a photo of the small {}.",
29
+ "a photo of a large {}.",
30
+ "a photo of the large {}.",
31
+ "a photo of a clean {}.",
32
+ "a photo of the clean {}.",
33
+ "a photo of a dirty {}.",
34
+ "a photo of the dirty {}.",
35
+ "a bright photo of a {}.",
36
+ "a bright photo of the {}.",
37
+ "a dark photo of a {}.",
38
+ "a dark photo of the {}.",
39
+ "a photo of a hard to see {}.",
40
+ "a photo of the hard to see {}.",
41
+ "a low resolution photo of a {}.",
42
+ "a low resolution photo of the {}.",
43
+ "a cropped photo of a {}.",
44
+ "a cropped photo of the {}.",
45
+ "a close-up photo of a {}.",
46
+ "a close-up photo of the {}.",
47
+ "a jpeg corrupted photo of a {}.",
48
+ "a jpeg corrupted photo of the {}.",
49
+ "a blurry photo of a {}.",
50
+ "a blurry photo of the {}.",
51
+ "a pixelated photo of a {}.",
52
+ "a pixelated photo of the {}.",
53
+ "a black and white photo of the {}.",
54
+ "a black and white photo of a {}",
55
+ "a plastic {}.",
56
+ "the plastic {}.",
57
+ "a toy {}.",
58
+ "the toy {}.",
59
+ "a plushie {}.",
60
+ "the plushie {}.",
61
+ "a cartoon {}.",
62
+ "the cartoon {}.",
63
+ "an embroidered {}.",
64
+ "the embroidered {}.",
65
+ "a painting of the {}.",
66
+ "a painting of a {}."
67
+ ]
68
+
69
+ }
third_party/Michelangelo/michelangelo/data/transforms.py ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import os
3
+ import time
4
+ import numpy as np
5
+ import warnings
6
+ import random
7
+ from omegaconf.listconfig import ListConfig
8
+ from webdataset import pipelinefilter
9
+ import torch
10
+ import torchvision.transforms.functional as TVF
11
+ from torchvision.transforms import InterpolationMode
12
+ from torchvision.transforms.transforms import _interpolation_modes_from_int
13
+ from typing import Sequence
14
+
15
+ from third_party.miche.michelangelo.utils import instantiate_from_config
16
+
17
+
18
+ def _uid_buffer_pick(buf_dict, rng):
19
+ uid_keys = list(buf_dict.keys())
20
+ selected_uid = rng.choice(uid_keys)
21
+ buf = buf_dict[selected_uid]
22
+
23
+ k = rng.randint(0, len(buf) - 1)
24
+ sample = buf[k]
25
+ buf[k] = buf[-1]
26
+ buf.pop()
27
+
28
+ if len(buf) == 0:
29
+ del buf_dict[selected_uid]
30
+
31
+ return sample
32
+
33
+
34
+ def _add_to_buf_dict(buf_dict, sample):
35
+ key = sample["__key__"]
36
+ uid, uid_sample_id = key.split("_")
37
+ if uid not in buf_dict:
38
+ buf_dict[uid] = []
39
+ buf_dict[uid].append(sample)
40
+
41
+ return buf_dict
42
+
43
+
44
+ def _uid_shuffle(data, bufsize=1000, initial=100, rng=None, handler=None):
45
+ """Shuffle the data in the stream.
46
+
47
+ This uses a buffer of size `bufsize`. Shuffling at
48
+ startup is less random; this is traded off against
49
+ yielding samples quickly.
50
+
51
+ data: iterator
52
+ bufsize: buffer size for shuffling
53
+ returns: iterator
54
+ rng: either random module or random.Random instance
55
+
56
+ """
57
+ if rng is None:
58
+ rng = random.Random(int((os.getpid() + time.time()) * 1e9))
59
+ initial = min(initial, bufsize)
60
+ buf_dict = dict()
61
+ current_samples = 0
62
+ for sample in data:
63
+ _add_to_buf_dict(buf_dict, sample)
64
+ current_samples += 1
65
+
66
+ if current_samples < bufsize:
67
+ try:
68
+ _add_to_buf_dict(buf_dict, next(data)) # skipcq: PYL-R1708
69
+ current_samples += 1
70
+ except StopIteration:
71
+ pass
72
+
73
+ if current_samples >= initial:
74
+ current_samples -= 1
75
+ yield _uid_buffer_pick(buf_dict, rng)
76
+
77
+ while current_samples > 0:
78
+ current_samples -= 1
79
+ yield _uid_buffer_pick(buf_dict, rng)
80
+
81
+
82
+ uid_shuffle = pipelinefilter(_uid_shuffle)
83
+
84
+
85
+ class RandomSample(object):
86
+ def __init__(self,
87
+ num_volume_samples: int = 1024,
88
+ num_near_samples: int = 1024):
89
+
90
+ super().__init__()
91
+
92
+ self.num_volume_samples = num_volume_samples
93
+ self.num_near_samples = num_near_samples
94
+
95
+ def __call__(self, sample):
96
+ rng = np.random.default_rng()
97
+
98
+ # 1. sample surface input
99
+ total_surface = sample["surface"]
100
+ ind = rng.choice(total_surface.shape[0], replace=False)
101
+ surface = total_surface[ind]
102
+
103
+ # 2. sample volume/near geometric points
104
+ vol_points = sample["vol_points"]
105
+ vol_label = sample["vol_label"]
106
+ near_points = sample["near_points"]
107
+ near_label = sample["near_label"]
108
+
109
+ ind = rng.choice(vol_points.shape[0], self.num_volume_samples, replace=False)
110
+ vol_points = vol_points[ind]
111
+ vol_label = vol_label[ind]
112
+ vol_points_labels = np.concatenate([vol_points, vol_label[:, np.newaxis]], axis=1)
113
+
114
+ ind = rng.choice(near_points.shape[0], self.num_near_samples, replace=False)
115
+ near_points = near_points[ind]
116
+ near_label = near_label[ind]
117
+ near_points_labels = np.concatenate([near_points, near_label[:, np.newaxis]], axis=1)
118
+
119
+ # concat sampled volume and near points
120
+ geo_points = np.concatenate([vol_points_labels, near_points_labels], axis=0)
121
+
122
+ sample = {
123
+ "surface": surface,
124
+ "geo_points": geo_points
125
+ }
126
+
127
+ return sample
128
+
129
+
130
+ class SplitRandomSample(object):
131
+ def __init__(self,
132
+ use_surface_sample: bool = False,
133
+ num_surface_samples: int = 4096,
134
+ num_volume_samples: int = 1024,
135
+ num_near_samples: int = 1024):
136
+
137
+ super().__init__()
138
+
139
+ self.use_surface_sample = use_surface_sample
140
+ self.num_surface_samples = num_surface_samples
141
+ self.num_volume_samples = num_volume_samples
142
+ self.num_near_samples = num_near_samples
143
+
144
+ def __call__(self, sample):
145
+
146
+ rng = np.random.default_rng()
147
+
148
+ # 1. sample surface input
149
+ surface = sample["surface"]
150
+
151
+ if self.use_surface_sample:
152
+ replace = surface.shape[0] < self.num_surface_samples
153
+ ind = rng.choice(surface.shape[0], self.num_surface_samples, replace=replace)
154
+ surface = surface[ind]
155
+
156
+ # 2. sample volume/near geometric points
157
+ vol_points = sample["vol_points"]
158
+ vol_label = sample["vol_label"]
159
+ near_points = sample["near_points"]
160
+ near_label = sample["near_label"]
161
+
162
+ ind = rng.choice(vol_points.shape[0], self.num_volume_samples, replace=False)
163
+ vol_points = vol_points[ind]
164
+ vol_label = vol_label[ind]
165
+ vol_points_labels = np.concatenate([vol_points, vol_label[:, np.newaxis]], axis=1)
166
+
167
+ ind = rng.choice(near_points.shape[0], self.num_near_samples, replace=False)
168
+ near_points = near_points[ind]
169
+ near_label = near_label[ind]
170
+ near_points_labels = np.concatenate([near_points, near_label[:, np.newaxis]], axis=1)
171
+
172
+ # concat sampled volume and near points
173
+ geo_points = np.concatenate([vol_points_labels, near_points_labels], axis=0)
174
+
175
+ sample = {
176
+ "surface": surface,
177
+ "geo_points": geo_points
178
+ }
179
+
180
+ return sample
181
+
182
+
183
+ class FeatureSelection(object):
184
+
185
+ VALID_SURFACE_FEATURE_DIMS = {
186
+ "none": [0, 1, 2], # xyz
187
+ "watertight_normal": [0, 1, 2, 3, 4, 5], # xyz, normal
188
+ "normal": [0, 1, 2, 6, 7, 8]
189
+ }
190
+
191
+ def __init__(self, surface_feature_type: str):
192
+
193
+ self.surface_feature_type = surface_feature_type
194
+ self.surface_dims = self.VALID_SURFACE_FEATURE_DIMS[surface_feature_type]
195
+
196
+ def __call__(self, sample):
197
+ sample["surface"] = sample["surface"][:, self.surface_dims]
198
+ return sample
199
+
200
+
201
+ class AxisScaleTransform(object):
202
+ def __init__(self, interval=(0.75, 1.25), jitter=True, jitter_scale=0.005):
203
+ assert isinstance(interval, (tuple, list, ListConfig))
204
+ self.interval = interval
205
+ self.min_val = interval[0]
206
+ self.max_val = interval[1]
207
+ self.inter_size = interval[1] - interval[0]
208
+ self.jitter = jitter
209
+ self.jitter_scale = jitter_scale
210
+
211
+ def __call__(self, sample):
212
+
213
+ surface = sample["surface"][..., 0:3]
214
+ geo_points = sample["geo_points"][..., 0:3]
215
+
216
+ scaling = torch.rand(1, 3) * self.inter_size + self.min_val
217
+ # print(scaling)
218
+ surface = surface * scaling
219
+ geo_points = geo_points * scaling
220
+
221
+ scale = (1 / torch.abs(surface).max().item()) * 0.999999
222
+ surface *= scale
223
+ geo_points *= scale
224
+
225
+ if self.jitter:
226
+ surface += self.jitter_scale * torch.randn_like(surface)
227
+ surface.clamp_(min=-1.015, max=1.015)
228
+
229
+ sample["surface"][..., 0:3] = surface
230
+ sample["geo_points"][..., 0:3] = geo_points
231
+
232
+ return sample
233
+
234
+
235
+ class ToTensor(object):
236
+
237
+ def __init__(self, tensor_keys=("surface", "geo_points", "tex_points")):
238
+ self.tensor_keys = tensor_keys
239
+
240
+ def __call__(self, sample):
241
+ for key in self.tensor_keys:
242
+ if key not in sample:
243
+ continue
244
+
245
+ sample[key] = torch.tensor(sample[key], dtype=torch.float32)
246
+
247
+ return sample
248
+
249
+
250
+ class AxisScale(object):
251
+ def __init__(self, interval=(0.75, 1.25), jitter=True, jitter_scale=0.005):
252
+ assert isinstance(interval, (tuple, list, ListConfig))
253
+ self.interval = interval
254
+ self.jitter = jitter
255
+ self.jitter_scale = jitter_scale
256
+
257
+ def __call__(self, surface, *args):
258
+ scaling = torch.rand(1, 3) * 0.5 + 0.75
259
+ # print(scaling)
260
+ surface = surface * scaling
261
+ scale = (1 / torch.abs(surface).max().item()) * 0.999999
262
+ surface *= scale
263
+
264
+ args_outputs = []
265
+ for _arg in args:
266
+ _arg = _arg * scaling * scale
267
+ args_outputs.append(_arg)
268
+
269
+ if self.jitter:
270
+ surface += self.jitter_scale * torch.randn_like(surface)
271
+ surface.clamp_(min=-1, max=1)
272
+
273
+ if len(args) == 0:
274
+ return surface
275
+ else:
276
+ return surface, *args_outputs
277
+
278
+
279
+ class RandomResize(torch.nn.Module):
280
+ """Apply randomly Resize with a given probability."""
281
+
282
+ def __init__(
283
+ self,
284
+ size,
285
+ resize_radio=(0.5, 1),
286
+ allow_resize_interpolations=(InterpolationMode.BICUBIC, InterpolationMode.BILINEAR, InterpolationMode.BILINEAR),
287
+ interpolation=InterpolationMode.BICUBIC,
288
+ max_size=None,
289
+ antialias=None,
290
+ ):
291
+ super().__init__()
292
+ if not isinstance(size, (int, Sequence)):
293
+ raise TypeError(f"Size should be int or sequence. Got {type(size)}")
294
+ if isinstance(size, Sequence) and len(size) not in (1, 2):
295
+ raise ValueError("If size is a sequence, it should have 1 or 2 values")
296
+
297
+ self.size = size
298
+ self.max_size = max_size
299
+ # Backward compatibility with integer value
300
+ if isinstance(interpolation, int):
301
+ warnings.warn(
302
+ "Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
303
+ "Please use InterpolationMode enum."
304
+ )
305
+ interpolation = _interpolation_modes_from_int(interpolation)
306
+
307
+ self.interpolation = interpolation
308
+ self.antialias = antialias
309
+
310
+ self.resize_radio = resize_radio
311
+ self.allow_resize_interpolations = allow_resize_interpolations
312
+
313
+ def random_resize_params(self):
314
+ radio = torch.rand(1) * (self.resize_radio[1] - self.resize_radio[0]) + self.resize_radio[0]
315
+
316
+ if isinstance(self.size, int):
317
+ size = int(self.size * radio)
318
+ elif isinstance(self.size, Sequence):
319
+ size = list(self.size)
320
+ size = (int(size[0] * radio), int(size[1] * radio))
321
+ else:
322
+ raise RuntimeError()
323
+
324
+ interpolation = self.allow_resize_interpolations[
325
+ torch.randint(low=0, high=len(self.allow_resize_interpolations), size=(1,))
326
+ ]
327
+ return size, interpolation
328
+
329
+ def forward(self, img):
330
+ size, interpolation = self.random_resize_params()
331
+ img = TVF.resize(img, size, interpolation, self.max_size, self.antialias)
332
+ img = TVF.resize(img, self.size, self.interpolation, self.max_size, self.antialias)
333
+ return img
334
+
335
+ def __repr__(self) -> str:
336
+ detail = f"(size={self.size}, interpolation={self.interpolation.value},"
337
+ detail += f"max_size={self.max_size}, antialias={self.antialias}), resize_radio={self.resize_radio}"
338
+ return f"{self.__class__.__name__}{detail}"
339
+
340
+
341
+ class Compose(object):
342
+ """Composes several transforms together. This transform does not support torchscript.
343
+ Please, see the note below.
344
+
345
+ Args:
346
+ transforms (list of ``Transform`` objects): list of transforms to compose.
347
+
348
+ Example:
349
+ >>> transforms.Compose([
350
+ >>> transforms.CenterCrop(10),
351
+ >>> transforms.ToTensor(),
352
+ >>> ])
353
+
354
+ .. note::
355
+ In order to script the transformations, please use ``torch.nn.Sequential`` as below.
356
+
357
+ >>> transforms = torch.nn.Sequential(
358
+ >>> transforms.CenterCrop(10),
359
+ >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
360
+ >>> )
361
+ >>> scripted_transforms = torch.jit.script(transforms)
362
+
363
+ Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require
364
+ `lambda` functions or ``PIL.Image``.
365
+
366
+ """
367
+
368
+ def __init__(self, transforms):
369
+ self.transforms = transforms
370
+
371
+ def __call__(self, *args):
372
+ for t in self.transforms:
373
+ args = t(*args)
374
+ return args
375
+
376
+ def __repr__(self):
377
+ format_string = self.__class__.__name__ + '('
378
+ for t in self.transforms:
379
+ format_string += '\n'
380
+ format_string += ' {0}'.format(t)
381
+ format_string += '\n)'
382
+ return format_string
383
+
384
+
385
+ def identity(*args, **kwargs):
386
+ if len(args) == 1:
387
+ return args[0]
388
+ else:
389
+ return args
390
+
391
+
392
+ def build_transforms(cfg):
393
+
394
+ if cfg is None:
395
+ return identity
396
+
397
+ transforms = []
398
+
399
+ for transform_name, cfg_instance in cfg.items():
400
+ transform_instance = instantiate_from_config(cfg_instance)
401
+ transforms.append(transform_instance)
402
+ print(f"Build transform: {transform_instance}")
403
+
404
+ transforms = Compose(transforms)
405
+
406
+ return transforms
407
+
third_party/Michelangelo/michelangelo/data/utils.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import torch
4
+ import numpy as np
5
+
6
+
7
+ def worker_init_fn(_):
8
+ worker_info = torch.utils.data.get_worker_info()
9
+ worker_id = worker_info.id
10
+
11
+ # dataset = worker_info.dataset
12
+ # split_size = dataset.num_records // worker_info.num_workers
13
+ # # reset num_records to the true number to retain reliable length information
14
+ # dataset.sample_ids = dataset.valid_ids[worker_id * split_size:(worker_id + 1) * split_size]
15
+ # current_id = np.random.choice(len(np.random.get_state()[1]), 1)
16
+ # return np.random.seed(np.random.get_state()[1][current_id] + worker_id)
17
+
18
+ return np.random.seed(np.random.get_state()[1][0] + worker_id)
19
+
20
+
21
+ def collation_fn(samples, combine_tensors=True, combine_scalars=True):
22
+ """
23
+
24
+ Args:
25
+ samples (list[dict]):
26
+ combine_tensors:
27
+ combine_scalars:
28
+
29
+ Returns:
30
+
31
+ """
32
+
33
+ result = {}
34
+
35
+ keys = samples[0].keys()
36
+
37
+ for key in keys:
38
+ result[key] = []
39
+
40
+ for sample in samples:
41
+ for key in keys:
42
+ val = sample[key]
43
+ result[key].append(val)
44
+
45
+ for key in keys:
46
+ val_list = result[key]
47
+ if isinstance(val_list[0], (int, float)):
48
+ if combine_scalars:
49
+ result[key] = np.array(result[key])
50
+
51
+ elif isinstance(val_list[0], torch.Tensor):
52
+ if combine_tensors:
53
+ result[key] = torch.stack(val_list)
54
+
55
+ elif isinstance(val_list[0], np.ndarray):
56
+ if combine_tensors:
57
+ result[key] = np.stack(val_list)
58
+
59
+ return result
third_party/Michelangelo/michelangelo/graphics/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # -*- coding: utf-8 -*-
third_party/Michelangelo/michelangelo/graphics/primitives/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from .volume import generate_dense_grid_points
4
+
5
+ from .mesh import (
6
+ MeshOutput,
7
+ save_obj,
8
+ savemeshtes2
9
+ )
third_party/Michelangelo/michelangelo/graphics/primitives/mesh.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import os
4
+ import cv2
5
+ import numpy as np
6
+ import PIL.Image
7
+ from typing import Optional
8
+
9
+ import trimesh
10
+
11
+
12
+ def save_obj(pointnp_px3, facenp_fx3, fname):
13
+ fid = open(fname, "w")
14
+ write_str = ""
15
+ for pidx, p in enumerate(pointnp_px3):
16
+ pp = p
17
+ write_str += "v %f %f %f\n" % (pp[0], pp[1], pp[2])
18
+
19
+ for i, f in enumerate(facenp_fx3):
20
+ f1 = f + 1
21
+ write_str += "f %d %d %d\n" % (f1[0], f1[1], f1[2])
22
+ fid.write(write_str)
23
+ fid.close()
24
+ return
25
+
26
+
27
+ def savemeshtes2(pointnp_px3, tcoords_px2, facenp_fx3, facetex_fx3, tex_map, fname):
28
+ fol, na = os.path.split(fname)
29
+ na, _ = os.path.splitext(na)
30
+
31
+ matname = "%s/%s.mtl" % (fol, na)
32
+ fid = open(matname, "w")
33
+ fid.write("newmtl material_0\n")
34
+ fid.write("Kd 1 1 1\n")
35
+ fid.write("Ka 0 0 0\n")
36
+ fid.write("Ks 0.4 0.4 0.4\n")
37
+ fid.write("Ns 10\n")
38
+ fid.write("illum 2\n")
39
+ fid.write("map_Kd %s.png\n" % na)
40
+ fid.close()
41
+ ####
42
+
43
+ fid = open(fname, "w")
44
+ fid.write("mtllib %s.mtl\n" % na)
45
+
46
+ for pidx, p in enumerate(pointnp_px3):
47
+ pp = p
48
+ fid.write("v %f %f %f\n" % (pp[0], pp[1], pp[2]))
49
+
50
+ for pidx, p in enumerate(tcoords_px2):
51
+ pp = p
52
+ fid.write("vt %f %f\n" % (pp[0], pp[1]))
53
+
54
+ fid.write("usemtl material_0\n")
55
+ for i, f in enumerate(facenp_fx3):
56
+ f1 = f + 1
57
+ f2 = facetex_fx3[i] + 1
58
+ fid.write("f %d/%d %d/%d %d/%d\n" % (f1[0], f2[0], f1[1], f2[1], f1[2], f2[2]))
59
+ fid.close()
60
+
61
+ PIL.Image.fromarray(np.ascontiguousarray(tex_map), "RGB").save(
62
+ os.path.join(fol, "%s.png" % na))
63
+
64
+ return
65
+
66
+
67
+ class MeshOutput(object):
68
+
69
+ def __init__(self,
70
+ mesh_v: np.ndarray,
71
+ mesh_f: np.ndarray,
72
+ vertex_colors: Optional[np.ndarray] = None,
73
+ uvs: Optional[np.ndarray] = None,
74
+ mesh_tex_idx: Optional[np.ndarray] = None,
75
+ tex_map: Optional[np.ndarray] = None):
76
+
77
+ self.mesh_v = mesh_v
78
+ self.mesh_f = mesh_f
79
+ self.vertex_colors = vertex_colors
80
+ self.uvs = uvs
81
+ self.mesh_tex_idx = mesh_tex_idx
82
+ self.tex_map = tex_map
83
+
84
+ def contain_uv_texture(self):
85
+ return (self.uvs is not None) and (self.mesh_tex_idx is not None) and (self.tex_map is not None)
86
+
87
+ def contain_vertex_colors(self):
88
+ return self.vertex_colors is not None
89
+
90
+ def export(self, fname):
91
+
92
+ if self.contain_uv_texture():
93
+ savemeshtes2(
94
+ self.mesh_v,
95
+ self.uvs,
96
+ self.mesh_f,
97
+ self.mesh_tex_idx,
98
+ self.tex_map,
99
+ fname
100
+ )
101
+
102
+ elif self.contain_vertex_colors():
103
+ mesh_obj = trimesh.Trimesh(vertices=self.mesh_v, faces=self.mesh_f, vertex_colors=self.vertex_colors)
104
+ mesh_obj.export(fname)
105
+
106
+ else:
107
+ save_obj(
108
+ self.mesh_v,
109
+ self.mesh_f,
110
+ fname
111
+ )
112
+
113
+
114
+
third_party/Michelangelo/michelangelo/graphics/primitives/volume.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import numpy as np
4
+
5
+
6
+ def generate_dense_grid_points(bbox_min: np.ndarray,
7
+ bbox_max: np.ndarray,
8
+ octree_depth: int,
9
+ indexing: str = "ij"):
10
+ length = bbox_max - bbox_min
11
+ num_cells = np.exp2(octree_depth)
12
+ x = np.linspace(bbox_min[0], bbox_max[0], int(num_cells) + 1, dtype=np.float32)
13
+ y = np.linspace(bbox_min[1], bbox_max[1], int(num_cells) + 1, dtype=np.float32)
14
+ z = np.linspace(bbox_min[2], bbox_max[2], int(num_cells) + 1, dtype=np.float32)
15
+ [xs, ys, zs] = np.meshgrid(x, y, z, indexing=indexing)
16
+ xyz = np.stack((xs, ys, zs), axis=-1)
17
+ xyz = xyz.reshape(-1, 3)
18
+ grid_size = [int(num_cells) + 1, int(num_cells) + 1, int(num_cells) + 1]
19
+
20
+ return xyz, grid_size, length
21
+
third_party/Michelangelo/michelangelo/models/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # -*- coding: utf-8 -*-
third_party/Michelangelo/michelangelo/models/asl_diffusion/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # -*- coding: utf-8 -*-
third_party/Michelangelo/michelangelo/models/asl_diffusion/asl_diffuser_pl_module.py ADDED
@@ -0,0 +1,482 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from omegaconf import DictConfig
4
+ from typing import List, Tuple, Dict, Optional, Union
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ from torch.optim import lr_scheduler
10
+ import pytorch_lightning as pl
11
+ from pytorch_lightning.utilities import rank_zero_only
12
+
13
+ from einops import rearrange
14
+
15
+ from diffusers.schedulers import (
16
+ DDPMScheduler,
17
+ DDIMScheduler,
18
+ KarrasVeScheduler,
19
+ DPMSolverMultistepScheduler
20
+ )
21
+
22
+ from third_party.Michelangelo.michelangelo.utils import instantiate_from_config
23
+ from third_party.Michelangelo.michelangelo.models.tsal.tsal_base import AlignedShapeAsLatentPLModule
24
+ from third_party.Michelangelo.michelangelo.models.asl_diffusion.inference_utils import ddim_sample
25
+
26
+ SchedulerType = Union[DDIMScheduler, KarrasVeScheduler, DPMSolverMultistepScheduler]
27
+
28
+
29
+ def disabled_train(self, mode=True):
30
+ """Overwrite model.train with this function to make sure train/eval mode
31
+ does not change anymore."""
32
+ return self
33
+
34
+
35
+ class ASLDiffuser(pl.LightningModule):
36
+ first_stage_model: Optional[AlignedShapeAsLatentPLModule]
37
+ # cond_stage_model: Optional[Union[nn.Module, pl.LightningModule]]
38
+ model: nn.Module
39
+
40
+ def __init__(self, *,
41
+ first_stage_config,
42
+ denoiser_cfg,
43
+ scheduler_cfg,
44
+ optimizer_cfg,
45
+ loss_cfg,
46
+ first_stage_key: str = "surface",
47
+ cond_stage_key: str = "image",
48
+ cond_stage_trainable: bool = True,
49
+ scale_by_std: bool = False,
50
+ z_scale_factor: float = 1.0,
51
+ ckpt_path: Optional[str] = None,
52
+ ignore_keys: Union[Tuple[str], List[str]] = ()):
53
+
54
+ super().__init__()
55
+
56
+ self.first_stage_key = first_stage_key
57
+ self.cond_stage_key = cond_stage_key
58
+ self.cond_stage_trainable = cond_stage_trainable
59
+
60
+ # 1. initialize first stage.
61
+ # Note: the condition model contained in the first stage model.
62
+ self.first_stage_config = first_stage_config
63
+ self.first_stage_model = None
64
+ # self.instantiate_first_stage(first_stage_config)
65
+
66
+ # 2. initialize conditional stage
67
+ # self.instantiate_cond_stage(cond_stage_config)
68
+ self.cond_stage_model = {
69
+ "image": self.encode_image,
70
+ "image_unconditional_embedding": self.empty_img_cond,
71
+ "text": self.encode_text,
72
+ "text_unconditional_embedding": self.empty_text_cond,
73
+ "surface": self.encode_surface,
74
+ "surface_unconditional_embedding": self.empty_surface_cond,
75
+ }
76
+
77
+ # 3. diffusion model
78
+ self.model = instantiate_from_config(
79
+ denoiser_cfg, device=None, dtype=None
80
+ )
81
+
82
+ self.optimizer_cfg = optimizer_cfg
83
+
84
+ # 4. scheduling strategy
85
+ self.scheduler_cfg = scheduler_cfg
86
+
87
+ self.noise_scheduler: DDPMScheduler = instantiate_from_config(scheduler_cfg.noise)
88
+ self.denoise_scheduler: SchedulerType = instantiate_from_config(scheduler_cfg.denoise)
89
+
90
+ # 5. loss configures
91
+ self.loss_cfg = loss_cfg
92
+
93
+ self.scale_by_std = scale_by_std
94
+ if scale_by_std:
95
+ self.register_buffer("z_scale_factor", torch.tensor(z_scale_factor))
96
+ else:
97
+ self.z_scale_factor = z_scale_factor
98
+
99
+ self.ckpt_path = ckpt_path
100
+ if ckpt_path is not None:
101
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
102
+
103
+ def instantiate_first_stage(self, config):
104
+ model = instantiate_from_config(config)
105
+ self.first_stage_model = model.eval()
106
+ self.first_stage_model.train = disabled_train
107
+ for param in self.first_stage_model.parameters():
108
+ param.requires_grad = False
109
+
110
+ self.first_stage_model = self.first_stage_model.to(self.device)
111
+
112
+ # def instantiate_cond_stage(self, config):
113
+ # if not self.cond_stage_trainable:
114
+ # if config == "__is_first_stage__":
115
+ # print("Using first stage also as cond stage.")
116
+ # self.cond_stage_model = self.first_stage_model
117
+ # elif config == "__is_unconditional__":
118
+ # print(f"Training {self.__class__.__name__} as an unconditional model.")
119
+ # self.cond_stage_model = None
120
+ # # self.be_unconditional = True
121
+ # else:
122
+ # model = instantiate_from_config(config)
123
+ # self.cond_stage_model = model.eval()
124
+ # self.cond_stage_model.train = disabled_train
125
+ # for param in self.cond_stage_model.parameters():
126
+ # param.requires_grad = False
127
+ # else:
128
+ # assert config != "__is_first_stage__"
129
+ # assert config != "__is_unconditional__"
130
+ # model = instantiate_from_config(config)
131
+ # self.cond_stage_model = model
132
+
133
+ def init_from_ckpt(self, path, ignore_keys=()):
134
+ state_dict = torch.load(path, map_location="cpu")["state_dict"]
135
+
136
+ keys = list(state_dict.keys())
137
+ for k in keys:
138
+ for ik in ignore_keys:
139
+ if k.startswith(ik):
140
+ print("Deleting key {} from state_dict.".format(k))
141
+ del state_dict[k]
142
+
143
+ missing, unexpected = self.load_state_dict(state_dict, strict=False)
144
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
145
+ if len(missing) > 0:
146
+ print(f"Missing Keys: {missing}")
147
+ print(f"Unexpected Keys: {unexpected}")
148
+
149
+ @property
150
+ def zero_rank(self):
151
+ if self._trainer:
152
+ zero_rank = self.trainer.local_rank == 0
153
+ else:
154
+ zero_rank = True
155
+
156
+ return zero_rank
157
+
158
+ def configure_optimizers(self) -> Tuple[List, List]:
159
+
160
+ lr = self.learning_rate
161
+
162
+ trainable_parameters = list(self.model.parameters())
163
+ # if the conditional encoder is trainable
164
+
165
+ # if self.cond_stage_trainable:
166
+ # conditioner_params = [p for p in self.cond_stage_model.parameters() if p.requires_grad]
167
+ # trainable_parameters += conditioner_params
168
+ # print(f"number of trainable conditional parameters: {len(conditioner_params)}.")
169
+
170
+ if self.optimizer_cfg is None:
171
+ optimizers = [torch.optim.AdamW(trainable_parameters, lr=lr, betas=(0.9, 0.99), weight_decay=1e-3)]
172
+ schedulers = []
173
+ else:
174
+ optimizer = instantiate_from_config(self.optimizer_cfg.optimizer, params=trainable_parameters)
175
+ scheduler_func = instantiate_from_config(
176
+ self.optimizer_cfg.scheduler,
177
+ max_decay_steps=self.trainer.max_steps,
178
+ lr_max=lr
179
+ )
180
+ scheduler = {
181
+ "scheduler": lr_scheduler.LambdaLR(optimizer, lr_lambda=scheduler_func.schedule),
182
+ "interval": "step",
183
+ "frequency": 1
184
+ }
185
+ optimizers = [optimizer]
186
+ schedulers = [scheduler]
187
+
188
+ return optimizers, schedulers
189
+
190
+ @torch.no_grad()
191
+ def encode_text(self, text):
192
+
193
+ b = text.shape[0]
194
+ text_tokens = rearrange(text, "b t l -> (b t) l")
195
+ text_embed = self.first_stage_model.model.encode_text_embed(text_tokens)
196
+ text_embed = rearrange(text_embed, "(b t) d -> b t d", b=b)
197
+ text_embed = text_embed.mean(dim=1)
198
+ text_embed = text_embed / text_embed.norm(dim=-1, keepdim=True)
199
+
200
+ return text_embed
201
+
202
+ @torch.no_grad()
203
+ def encode_image(self, img):
204
+
205
+ return self.first_stage_model.model.encode_image_embed(img)
206
+
207
+ @torch.no_grad()
208
+ def encode_surface(self, surface):
209
+
210
+ return self.first_stage_model.model.encode_shape_embed(surface, return_latents=False)
211
+
212
+ @torch.no_grad()
213
+ def empty_text_cond(self, cond):
214
+
215
+ return torch.zeros_like(cond, device=cond.device)
216
+
217
+ @torch.no_grad()
218
+ def empty_img_cond(self, cond):
219
+
220
+ return torch.zeros_like(cond, device=cond.device)
221
+
222
+ @torch.no_grad()
223
+ def empty_surface_cond(self, cond):
224
+
225
+ return torch.zeros_like(cond, device=cond.device)
226
+
227
+ @torch.no_grad()
228
+ def encode_first_stage(self, surface: torch.FloatTensor, sample_posterior=True):
229
+
230
+ z_q = self.first_stage_model.encode(surface, sample_posterior)
231
+ z_q = self.z_scale_factor * z_q
232
+
233
+ return z_q
234
+
235
+ @torch.no_grad()
236
+ def decode_first_stage(self, z_q: torch.FloatTensor, **kwargs):
237
+
238
+ z_q = 1. / self.z_scale_factor * z_q
239
+ latents = self.first_stage_model.decode(z_q, **kwargs)
240
+ return latents
241
+
242
+ @rank_zero_only
243
+ @torch.no_grad()
244
+ def on_train_batch_start(self, batch, batch_idx):
245
+ # only for very first batch
246
+ if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 \
247
+ and batch_idx == 0 and self.ckpt_path is None:
248
+ # set rescale weight to 1./std of encodings
249
+ print("### USING STD-RESCALING ###")
250
+
251
+ z_q = self.encode_first_stage(batch[self.first_stage_key])
252
+ z = z_q.detach()
253
+
254
+ del self.z_scale_factor
255
+ self.register_buffer("z_scale_factor", 1. / z.flatten().std())
256
+ print(f"setting self.z_scale_factor to {self.z_scale_factor}")
257
+
258
+ print("### USING STD-RESCALING ###")
259
+
260
+ def compute_loss(self, model_outputs, split):
261
+ """
262
+
263
+ Args:
264
+ model_outputs (dict):
265
+ - x_0:
266
+ - noise:
267
+ - noise_prior:
268
+ - noise_pred:
269
+ - noise_pred_prior:
270
+
271
+ split (str):
272
+
273
+ Returns:
274
+
275
+ """
276
+
277
+ pred = model_outputs["pred"]
278
+
279
+ if self.noise_scheduler.prediction_type == "epsilon":
280
+ target = model_outputs["noise"]
281
+ elif self.noise_scheduler.prediction_type == "sample":
282
+ target = model_outputs["x_0"]
283
+ else:
284
+ raise NotImplementedError(f"Prediction Type: {self.noise_scheduler.prediction_type} not yet supported.")
285
+
286
+ if self.loss_cfg.loss_type == "l1":
287
+ simple = F.l1_loss(pred, target, reduction="mean")
288
+ elif self.loss_cfg.loss_type in ["mse", "l2"]:
289
+ simple = F.mse_loss(pred, target, reduction="mean")
290
+ else:
291
+ raise NotImplementedError(f"Loss Type: {self.loss_cfg.loss_type} not yet supported.")
292
+
293
+ total_loss = simple
294
+
295
+ loss_dict = {
296
+ f"{split}/total_loss": total_loss.clone().detach(),
297
+ f"{split}/simple": simple.detach(),
298
+ }
299
+
300
+ return total_loss, loss_dict
301
+
302
+ def forward(self, batch):
303
+ """
304
+
305
+ Args:
306
+ batch:
307
+
308
+ Returns:
309
+
310
+ """
311
+
312
+ if self.first_stage_model is None:
313
+ self.instantiate_first_stage(self.first_stage_config)
314
+
315
+ latents = self.encode_first_stage(batch[self.first_stage_key])
316
+
317
+ # conditions = self.cond_stage_model.encode(batch[self.cond_stage_key])
318
+
319
+ conditions = self.cond_stage_model[self.cond_stage_key](batch[self.cond_stage_key]).unsqueeze(1)
320
+
321
+ mask = torch.rand((len(conditions), 1, 1), device=conditions.device, dtype=conditions.dtype) >= 0.1
322
+ conditions = conditions * mask.to(conditions)
323
+
324
+ # Sample noise that we"ll add to the latents
325
+ # [batch_size, n_token, latent_dim]
326
+ noise = torch.randn_like(latents)
327
+ bs = latents.shape[0]
328
+ # Sample a random timestep for each motion
329
+ timesteps = torch.randint(
330
+ 0,
331
+ self.noise_scheduler.config.num_train_timesteps,
332
+ (bs,),
333
+ device=latents.device,
334
+ )
335
+ timesteps = timesteps.long()
336
+ # Add noise to the latents according to the noise magnitude at each timestep
337
+ noisy_z = self.noise_scheduler.add_noise(latents, noise, timesteps)
338
+
339
+ # diffusion model forward
340
+ noise_pred = self.model(noisy_z, timesteps, conditions)
341
+
342
+ diffusion_outputs = {
343
+ "x_0": noisy_z,
344
+ "noise": noise,
345
+ "pred": noise_pred
346
+ }
347
+
348
+ return diffusion_outputs
349
+
350
+ def training_step(self, batch: Dict[str, Union[torch.FloatTensor, List[str]]],
351
+ batch_idx: int, optimizer_idx: int = 0) -> torch.FloatTensor:
352
+ """
353
+
354
+ Args:
355
+ batch (dict): the batch sample, and it contains:
356
+ - surface (torch.FloatTensor):
357
+ - image (torch.FloatTensor): if provide, [bs, 3, h, w], item range [0, 1]
358
+ - depth (torch.FloatTensor): if provide, [bs, 1, h, w], item range [-1, 1]
359
+ - normal (torch.FloatTensor): if provide, [bs, 3, h, w], item range [-1, 1]
360
+ - text (list of str):
361
+
362
+ batch_idx (int):
363
+
364
+ optimizer_idx (int):
365
+
366
+ Returns:
367
+ loss (torch.FloatTensor):
368
+
369
+ """
370
+
371
+ diffusion_outputs = self(batch)
372
+
373
+ loss, loss_dict = self.compute_loss(diffusion_outputs, "train")
374
+ self.log_dict(loss_dict, prog_bar=True, logger=True, sync_dist=False, rank_zero_only=True)
375
+
376
+ return loss
377
+
378
+ def validation_step(self, batch: Dict[str, torch.FloatTensor],
379
+ batch_idx: int, optimizer_idx: int = 0) -> torch.FloatTensor:
380
+ """
381
+
382
+ Args:
383
+ batch (dict): the batch sample, and it contains:
384
+ - surface_pc (torch.FloatTensor): [n_pts, 4]
385
+ - surface_feats (torch.FloatTensor): [n_pts, c]
386
+ - text (list of str):
387
+
388
+ batch_idx (int):
389
+
390
+ optimizer_idx (int):
391
+
392
+ Returns:
393
+ loss (torch.FloatTensor):
394
+
395
+ """
396
+
397
+ diffusion_outputs = self(batch)
398
+
399
+ loss, loss_dict = self.compute_loss(diffusion_outputs, "val")
400
+ self.log_dict(loss_dict, prog_bar=True, logger=True, sync_dist=False, rank_zero_only=True)
401
+
402
+ return loss
403
+
404
+ @torch.no_grad()
405
+ def sample(self,
406
+ batch: Dict[str, Union[torch.FloatTensor, List[str]]],
407
+ sample_times: int = 1,
408
+ steps: Optional[int] = None,
409
+ guidance_scale: Optional[float] = None,
410
+ eta: float = 0.0,
411
+ return_intermediates: bool = False, **kwargs):
412
+
413
+ if self.first_stage_model is None:
414
+ self.instantiate_first_stage(self.first_stage_config)
415
+
416
+ if steps is None:
417
+ steps = self.scheduler_cfg.num_inference_steps
418
+
419
+ if guidance_scale is None:
420
+ guidance_scale = self.scheduler_cfg.guidance_scale
421
+ do_classifier_free_guidance = guidance_scale > 0
422
+
423
+ # conditional encode
424
+ xc = batch[self.cond_stage_key]
425
+ # cond = self.cond_stage_model[self.cond_stage_key](xc)
426
+ cond = self.cond_stage_model[self.cond_stage_key](xc).unsqueeze(1)
427
+
428
+ if do_classifier_free_guidance:
429
+ """
430
+ Note: There are two kinds of uncond for text.
431
+ 1: using "" as uncond text; (in SAL diffusion)
432
+ 2: zeros_like(cond) as uncond text; (in MDM)
433
+ """
434
+ # un_cond = self.cond_stage_model.unconditional_embedding(batch_size=len(xc))
435
+ un_cond = self.cond_stage_model[f"{self.cond_stage_key}_unconditional_embedding"](cond)
436
+ # un_cond = torch.zeros_like(cond, device=cond.device)
437
+ cond = torch.cat([un_cond, cond], dim=0)
438
+
439
+ outputs = []
440
+ latents = None
441
+
442
+ if not return_intermediates:
443
+ for _ in range(sample_times):
444
+ sample_loop = ddim_sample(
445
+ self.denoise_scheduler,
446
+ self.model,
447
+ shape=self.first_stage_model.latent_shape,
448
+ cond=cond,
449
+ steps=steps,
450
+ guidance_scale=guidance_scale,
451
+ do_classifier_free_guidance=do_classifier_free_guidance,
452
+ device=self.device,
453
+ eta=eta,
454
+ disable_prog=not self.zero_rank
455
+ )
456
+ for sample, t in sample_loop:
457
+ latents = sample
458
+ outputs.append(self.decode_first_stage(latents, **kwargs))
459
+ else:
460
+
461
+ sample_loop = ddim_sample(
462
+ self.denoise_scheduler,
463
+ self.model,
464
+ shape=self.first_stage_model.latent_shape,
465
+ cond=cond,
466
+ steps=steps,
467
+ guidance_scale=guidance_scale,
468
+ do_classifier_free_guidance=do_classifier_free_guidance,
469
+ device=self.device,
470
+ eta=eta,
471
+ disable_prog=not self.zero_rank
472
+ )
473
+
474
+ iter_size = steps // sample_times
475
+ i = 0
476
+ for sample, t in sample_loop:
477
+ latents = sample
478
+ if i % iter_size == 0 or i == steps - 1:
479
+ outputs.append(self.decode_first_stage(latents, **kwargs))
480
+ i += 1
481
+
482
+ return outputs
third_party/Michelangelo/michelangelo/models/asl_diffusion/asl_udt.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ from typing import Optional
6
+ from diffusers.models.embeddings import Timesteps
7
+ import math
8
+
9
+ from third_party.Michelangelo.michelangelo.models.modules.transformer_blocks import MLP
10
+ from third_party.Michelangelo.michelangelo.models.modules.diffusion_transformer import UNetDiffusionTransformer
11
+
12
+
13
+ class ConditionalASLUDTDenoiser(nn.Module):
14
+
15
+ def __init__(self, *,
16
+ device: Optional[torch.device],
17
+ dtype: Optional[torch.dtype],
18
+ input_channels: int,
19
+ output_channels: int,
20
+ n_ctx: int,
21
+ width: int,
22
+ layers: int,
23
+ heads: int,
24
+ context_dim: int,
25
+ context_ln: bool = True,
26
+ skip_ln: bool = False,
27
+ init_scale: float = 0.25,
28
+ flip_sin_to_cos: bool = False,
29
+ use_checkpoint: bool = False):
30
+ super().__init__()
31
+
32
+ self.use_checkpoint = use_checkpoint
33
+
34
+ init_scale = init_scale * math.sqrt(1.0 / width)
35
+
36
+ self.backbone = UNetDiffusionTransformer(
37
+ device=device,
38
+ dtype=dtype,
39
+ n_ctx=n_ctx,
40
+ width=width,
41
+ layers=layers,
42
+ heads=heads,
43
+ skip_ln=skip_ln,
44
+ init_scale=init_scale,
45
+ use_checkpoint=use_checkpoint
46
+ )
47
+ self.ln_post = nn.LayerNorm(width, device=device, dtype=dtype)
48
+ self.input_proj = nn.Linear(input_channels, width, device=device, dtype=dtype)
49
+ self.output_proj = nn.Linear(width, output_channels, device=device, dtype=dtype)
50
+
51
+ # timestep embedding
52
+ self.time_embed = Timesteps(width, flip_sin_to_cos=flip_sin_to_cos, downscale_freq_shift=0)
53
+ self.time_proj = MLP(
54
+ device=device, dtype=dtype, width=width, init_scale=init_scale
55
+ )
56
+
57
+ self.context_embed = nn.Sequential(
58
+ nn.LayerNorm(context_dim, device=device, dtype=dtype),
59
+ nn.Linear(context_dim, width, device=device, dtype=dtype),
60
+ )
61
+
62
+ if context_ln:
63
+ self.context_embed = nn.Sequential(
64
+ nn.LayerNorm(context_dim, device=device, dtype=dtype),
65
+ nn.Linear(context_dim, width, device=device, dtype=dtype),
66
+ )
67
+ else:
68
+ self.context_embed = nn.Linear(context_dim, width, device=device, dtype=dtype)
69
+
70
+ def forward(self,
71
+ model_input: torch.FloatTensor,
72
+ timestep: torch.LongTensor,
73
+ context: torch.FloatTensor):
74
+
75
+ r"""
76
+ Args:
77
+ model_input (torch.FloatTensor): [bs, n_data, c]
78
+ timestep (torch.LongTensor): [bs,]
79
+ context (torch.FloatTensor): [bs, context_tokens, c]
80
+
81
+ Returns:
82
+ sample (torch.FloatTensor): [bs, n_data, c]
83
+
84
+ """
85
+
86
+ _, n_data, _ = model_input.shape
87
+
88
+ # 1. time
89
+ t_emb = self.time_proj(self.time_embed(timestep)).unsqueeze(dim=1)
90
+
91
+ # 2. conditions projector
92
+ context = self.context_embed(context)
93
+
94
+ # 3. denoiser
95
+ x = self.input_proj(model_input)
96
+ x = torch.cat([t_emb, context, x], dim=1)
97
+ x = self.backbone(x)
98
+ x = self.ln_post(x)
99
+ x = x[:, -n_data:]
100
+ sample = self.output_proj(x)
101
+
102
+ return sample
103
+
104
+
third_party/Michelangelo/michelangelo/models/asl_diffusion/base.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+
7
+ class BaseDenoiser(nn.Module):
8
+
9
+ def __init__(self):
10
+ super().__init__()
11
+
12
+ def forward(self, x, t, context):
13
+ raise NotImplementedError
third_party/Michelangelo/michelangelo/models/asl_diffusion/clip_asl_diffuser_pl_module.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from omegaconf import DictConfig
4
+ from typing import List, Tuple, Dict, Optional, Union
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ from torch.optim import lr_scheduler
10
+ import pytorch_lightning as pl
11
+ from pytorch_lightning.utilities import rank_zero_only
12
+
13
+ from diffusers.schedulers import (
14
+ DDPMScheduler,
15
+ DDIMScheduler,
16
+ KarrasVeScheduler,
17
+ DPMSolverMultistepScheduler
18
+ )
19
+
20
+ from third_party.Michelangelo.michelangelo.utils import instantiate_from_config
21
+ from third_party.Michelangelo.michelangelo.models.tsal.tsal_base import AlignedShapeAsLatentPLModule
22
+ from third_party.Michelangelo.michelangelo.models.asl_diffusion.inference_utils import ddim_sample
23
+
24
+ SchedulerType = Union[DDIMScheduler, KarrasVeScheduler, DPMSolverMultistepScheduler]
25
+
26
+
27
+ def disabled_train(self, mode=True):
28
+ """Overwrite model.train with this function to make sure train/eval mode
29
+ does not change anymore."""
30
+ return self
31
+
32
+
33
+ class ClipASLDiffuser(pl.LightningModule):
34
+ first_stage_model: Optional[AlignedShapeAsLatentPLModule]
35
+ cond_stage_model: Optional[Union[nn.Module, pl.LightningModule]]
36
+ model: nn.Module
37
+
38
+ def __init__(self, *,
39
+ first_stage_config,
40
+ cond_stage_config,
41
+ denoiser_cfg,
42
+ scheduler_cfg,
43
+ optimizer_cfg,
44
+ loss_cfg,
45
+ first_stage_key: str = "surface",
46
+ cond_stage_key: str = "image",
47
+ scale_by_std: bool = False,
48
+ z_scale_factor: float = 1.0,
49
+ ckpt_path: Optional[str] = None,
50
+ ignore_keys: Union[Tuple[str], List[str]] = ()):
51
+
52
+ super().__init__()
53
+
54
+ self.first_stage_key = first_stage_key
55
+ self.cond_stage_key = cond_stage_key
56
+
57
+ # 1. lazy initialize first stage
58
+ self.instantiate_first_stage(first_stage_config)
59
+
60
+ # 2. initialize conditional stage
61
+ self.instantiate_cond_stage(cond_stage_config)
62
+
63
+ # 3. diffusion model
64
+ self.model = instantiate_from_config(
65
+ denoiser_cfg, device=None, dtype=None
66
+ )
67
+
68
+ self.optimizer_cfg = optimizer_cfg
69
+
70
+ # 4. scheduling strategy
71
+ self.scheduler_cfg = scheduler_cfg
72
+
73
+ self.noise_scheduler: DDPMScheduler = instantiate_from_config(scheduler_cfg.noise)
74
+ self.denoise_scheduler: SchedulerType = instantiate_from_config(scheduler_cfg.denoise)
75
+
76
+ # 5. loss configures
77
+ self.loss_cfg = loss_cfg
78
+
79
+ self.scale_by_std = scale_by_std
80
+ if scale_by_std:
81
+ self.register_buffer("z_scale_factor", torch.tensor(z_scale_factor))
82
+ else:
83
+ self.z_scale_factor = z_scale_factor
84
+
85
+ self.ckpt_path = ckpt_path
86
+ if ckpt_path is not None:
87
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
88
+
89
+ def instantiate_non_trainable_model(self, config):
90
+ model = instantiate_from_config(config)
91
+ model = model.eval()
92
+ model.train = disabled_train
93
+ for param in model.parameters():
94
+ param.requires_grad = False
95
+
96
+ return model
97
+
98
+ def instantiate_first_stage(self, first_stage_config):
99
+ self.first_stage_model = self.instantiate_non_trainable_model(first_stage_config)
100
+ self.first_stage_model.set_shape_model_only()
101
+
102
+ def instantiate_cond_stage(self, cond_stage_config):
103
+ self.cond_stage_model = self.instantiate_non_trainable_model(cond_stage_config)
104
+
105
+ def init_from_ckpt(self, path, ignore_keys=()):
106
+ state_dict = torch.load(path, map_location="cpu")["state_dict"]
107
+
108
+ keys = list(state_dict.keys())
109
+ for k in keys:
110
+ for ik in ignore_keys:
111
+ if k.startswith(ik):
112
+ print("Deleting key {} from state_dict.".format(k))
113
+ del state_dict[k]
114
+
115
+ missing, unexpected = self.load_state_dict(state_dict, strict=False)
116
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
117
+ if len(missing) > 0:
118
+ print(f"Missing Keys: {missing}")
119
+ print(f"Unexpected Keys: {unexpected}")
120
+
121
+ @property
122
+ def zero_rank(self):
123
+ if self._trainer:
124
+ zero_rank = self.trainer.local_rank == 0
125
+ else:
126
+ zero_rank = True
127
+
128
+ return zero_rank
129
+
130
+ def configure_optimizers(self) -> Tuple[List, List]:
131
+
132
+ lr = self.learning_rate
133
+
134
+ trainable_parameters = list(self.model.parameters())
135
+ if self.optimizer_cfg is None:
136
+ optimizers = [torch.optim.AdamW(trainable_parameters, lr=lr, betas=(0.9, 0.99), weight_decay=1e-3)]
137
+ schedulers = []
138
+ else:
139
+ optimizer = instantiate_from_config(self.optimizer_cfg.optimizer, params=trainable_parameters)
140
+ scheduler_func = instantiate_from_config(
141
+ self.optimizer_cfg.scheduler,
142
+ max_decay_steps=self.trainer.max_steps,
143
+ lr_max=lr
144
+ )
145
+ scheduler = {
146
+ "scheduler": lr_scheduler.LambdaLR(optimizer, lr_lambda=scheduler_func.schedule),
147
+ "interval": "step",
148
+ "frequency": 1
149
+ }
150
+ optimizers = [optimizer]
151
+ schedulers = [scheduler]
152
+
153
+ return optimizers, schedulers
154
+
155
+ @torch.no_grad()
156
+ def encode_first_stage(self, surface: torch.FloatTensor, sample_posterior=True):
157
+
158
+ z_q = self.first_stage_model.encode(surface, sample_posterior)
159
+ z_q = self.z_scale_factor * z_q
160
+
161
+ return z_q
162
+
163
+ @torch.no_grad()
164
+ def decode_first_stage(self, z_q: torch.FloatTensor, **kwargs):
165
+
166
+ z_q = 1. / self.z_scale_factor * z_q
167
+ latents = self.first_stage_model.decode(z_q, **kwargs)
168
+ return latents
169
+
170
+ @rank_zero_only
171
+ @torch.no_grad()
172
+ def on_train_batch_start(self, batch, batch_idx):
173
+ # only for very first batch
174
+ if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 \
175
+ and batch_idx == 0 and self.ckpt_path is None:
176
+ # set rescale weight to 1./std of encodings
177
+ print("### USING STD-RESCALING ###")
178
+
179
+ z_q = self.encode_first_stage(batch[self.first_stage_key])
180
+ z = z_q.detach()
181
+
182
+ del self.z_scale_factor
183
+ self.register_buffer("z_scale_factor", 1. / z.flatten().std())
184
+ print(f"setting self.z_scale_factor to {self.z_scale_factor}")
185
+
186
+ print("### USING STD-RESCALING ###")
187
+
188
+ def compute_loss(self, model_outputs, split):
189
+ """
190
+
191
+ Args:
192
+ model_outputs (dict):
193
+ - x_0:
194
+ - noise:
195
+ - noise_prior:
196
+ - noise_pred:
197
+ - noise_pred_prior:
198
+
199
+ split (str):
200
+
201
+ Returns:
202
+
203
+ """
204
+
205
+ pred = model_outputs["pred"]
206
+
207
+ if self.noise_scheduler.prediction_type == "epsilon":
208
+ target = model_outputs["noise"]
209
+ elif self.noise_scheduler.prediction_type == "sample":
210
+ target = model_outputs["x_0"]
211
+ else:
212
+ raise NotImplementedError(f"Prediction Type: {self.noise_scheduler.prediction_type} not yet supported.")
213
+
214
+ if self.loss_cfg.loss_type == "l1":
215
+ simple = F.l1_loss(pred, target, reduction="mean")
216
+ elif self.loss_cfg.loss_type in ["mse", "l2"]:
217
+ simple = F.mse_loss(pred, target, reduction="mean")
218
+ else:
219
+ raise NotImplementedError(f"Loss Type: {self.loss_cfg.loss_type} not yet supported.")
220
+
221
+ total_loss = simple
222
+
223
+ loss_dict = {
224
+ f"{split}/total_loss": total_loss.clone().detach(),
225
+ f"{split}/simple": simple.detach(),
226
+ }
227
+
228
+ return total_loss, loss_dict
229
+
230
+ def forward(self, batch):
231
+ """
232
+
233
+ Args:
234
+ batch:
235
+
236
+ Returns:
237
+
238
+ """
239
+
240
+ latents = self.encode_first_stage(batch[self.first_stage_key])
241
+ conditions = self.cond_stage_model.encode(batch[self.cond_stage_key])
242
+
243
+ # Sample noise that we"ll add to the latents
244
+ # [batch_size, n_token, latent_dim]
245
+ noise = torch.randn_like(latents)
246
+ bs = latents.shape[0]
247
+ # Sample a random timestep for each motion
248
+ timesteps = torch.randint(
249
+ 0,
250
+ self.noise_scheduler.config.num_train_timesteps,
251
+ (bs,),
252
+ device=latents.device,
253
+ )
254
+ timesteps = timesteps.long()
255
+ # Add noise to the latents according to the noise magnitude at each timestep
256
+ noisy_z = self.noise_scheduler.add_noise(latents, noise, timesteps)
257
+
258
+ # diffusion model forward
259
+ noise_pred = self.model(noisy_z, timesteps, conditions)
260
+
261
+ diffusion_outputs = {
262
+ "x_0": noisy_z,
263
+ "noise": noise,
264
+ "pred": noise_pred
265
+ }
266
+
267
+ return diffusion_outputs
268
+
269
+ def training_step(self, batch: Dict[str, Union[torch.FloatTensor, List[str]]],
270
+ batch_idx: int, optimizer_idx: int = 0) -> torch.FloatTensor:
271
+ """
272
+
273
+ Args:
274
+ batch (dict): the batch sample, and it contains:
275
+ - surface (torch.FloatTensor):
276
+ - image (torch.FloatTensor): if provide, [bs, 3, h, w], item range [0, 1]
277
+ - depth (torch.FloatTensor): if provide, [bs, 1, h, w], item range [-1, 1]
278
+ - normal (torch.FloatTensor): if provide, [bs, 3, h, w], item range [-1, 1]
279
+ - text (list of str):
280
+
281
+ batch_idx (int):
282
+
283
+ optimizer_idx (int):
284
+
285
+ Returns:
286
+ loss (torch.FloatTensor):
287
+
288
+ """
289
+
290
+ diffusion_outputs = self(batch)
291
+
292
+ loss, loss_dict = self.compute_loss(diffusion_outputs, "train")
293
+ self.log_dict(loss_dict, prog_bar=True, logger=True, sync_dist=False, rank_zero_only=True)
294
+
295
+ return loss
296
+
297
+ def validation_step(self, batch: Dict[str, torch.FloatTensor],
298
+ batch_idx: int, optimizer_idx: int = 0) -> torch.FloatTensor:
299
+ """
300
+
301
+ Args:
302
+ batch (dict): the batch sample, and it contains:
303
+ - surface_pc (torch.FloatTensor): [n_pts, 4]
304
+ - surface_feats (torch.FloatTensor): [n_pts, c]
305
+ - text (list of str):
306
+
307
+ batch_idx (int):
308
+
309
+ optimizer_idx (int):
310
+
311
+ Returns:
312
+ loss (torch.FloatTensor):
313
+
314
+ """
315
+
316
+ diffusion_outputs = self(batch)
317
+
318
+ loss, loss_dict = self.compute_loss(diffusion_outputs, "val")
319
+ self.log_dict(loss_dict, prog_bar=True, logger=True, sync_dist=False, rank_zero_only=True)
320
+
321
+ return loss
322
+
323
+ @torch.no_grad()
324
+ def sample(self,
325
+ batch: Dict[str, Union[torch.FloatTensor, List[str]]],
326
+ sample_times: int = 1,
327
+ steps: Optional[int] = None,
328
+ guidance_scale: Optional[float] = None,
329
+ eta: float = 0.0,
330
+ return_intermediates: bool = False, **kwargs):
331
+
332
+ if steps is None:
333
+ steps = self.scheduler_cfg.num_inference_steps
334
+
335
+ if guidance_scale is None:
336
+ guidance_scale = self.scheduler_cfg.guidance_scale
337
+ do_classifier_free_guidance = guidance_scale > 0
338
+
339
+ # conditional encode
340
+ xc = batch[self.cond_stage_key]
341
+
342
+ # print(self.first_stage_model.device, self.cond_stage_model.device, self.device)
343
+
344
+ cond = self.cond_stage_model(xc)
345
+
346
+ if do_classifier_free_guidance:
347
+ un_cond = self.cond_stage_model.unconditional_embedding(batch_size=len(xc))
348
+ cond = torch.cat([un_cond, cond], dim=0)
349
+
350
+ outputs = []
351
+ latents = None
352
+
353
+ if not return_intermediates:
354
+ for _ in range(sample_times):
355
+ sample_loop = ddim_sample(
356
+ self.denoise_scheduler,
357
+ self.model,
358
+ shape=self.first_stage_model.latent_shape,
359
+ cond=cond,
360
+ steps=steps,
361
+ guidance_scale=guidance_scale,
362
+ do_classifier_free_guidance=do_classifier_free_guidance,
363
+ device=self.device,
364
+ eta=eta,
365
+ disable_prog=not self.zero_rank
366
+ )
367
+ for sample, t in sample_loop:
368
+ latents = sample
369
+ outputs.append(self.decode_first_stage(latents, **kwargs))
370
+ else:
371
+
372
+ sample_loop = ddim_sample(
373
+ self.denoise_scheduler,
374
+ self.model,
375
+ shape=self.first_stage_model.latent_shape,
376
+ cond=cond,
377
+ steps=steps,
378
+ guidance_scale=guidance_scale,
379
+ do_classifier_free_guidance=do_classifier_free_guidance,
380
+ device=self.device,
381
+ eta=eta,
382
+ disable_prog=not self.zero_rank
383
+ )
384
+
385
+ iter_size = steps // sample_times
386
+ i = 0
387
+ for sample, t in sample_loop:
388
+ latents = sample
389
+ if i % iter_size == 0 or i == steps - 1:
390
+ outputs.append(self.decode_first_stage(latents, **kwargs))
391
+ i += 1
392
+
393
+ return outputs
third_party/Michelangelo/michelangelo/models/asl_diffusion/inference_utils.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import torch
4
+ from tqdm import tqdm
5
+ from typing import Tuple, List, Union, Optional
6
+ from diffusers.schedulers import DDIMScheduler
7
+
8
+
9
+ __all__ = ["ddim_sample"]
10
+
11
+
12
+ def ddim_sample(ddim_scheduler: DDIMScheduler,
13
+ diffusion_model: torch.nn.Module,
14
+ shape: Union[List[int], Tuple[int]],
15
+ cond: torch.FloatTensor,
16
+ steps: int,
17
+ eta: float = 0.0,
18
+ guidance_scale: float = 3.0,
19
+ do_classifier_free_guidance: bool = True,
20
+ generator: Optional[torch.Generator] = None,
21
+ device: torch.device = "cuda:0",
22
+ disable_prog: bool = True):
23
+
24
+ assert steps > 0, f"{steps} must > 0."
25
+
26
+ # init latents
27
+ bsz = cond.shape[0]
28
+ if do_classifier_free_guidance:
29
+ bsz = bsz // 2
30
+
31
+ latents = torch.randn(
32
+ (bsz, *shape),
33
+ generator=generator,
34
+ device=cond.device,
35
+ dtype=cond.dtype,
36
+ )
37
+ # scale the initial noise by the standard deviation required by the scheduler
38
+ latents = latents * ddim_scheduler.init_noise_sigma
39
+ # set timesteps
40
+ ddim_scheduler.set_timesteps(steps)
41
+ timesteps = ddim_scheduler.timesteps.to(device)
42
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
43
+ # eta (η) is only used with the DDIMScheduler, and between [0, 1]
44
+ extra_step_kwargs = {
45
+ "eta": eta,
46
+ "generator": generator
47
+ }
48
+
49
+ # reverse
50
+ for i, t in enumerate(tqdm(timesteps, disable=disable_prog, desc="DDIM Sampling:", leave=False)):
51
+ # expand the latents if we are doing classifier free guidance
52
+ latent_model_input = (
53
+ torch.cat([latents] * 2)
54
+ if do_classifier_free_guidance
55
+ else latents
56
+ )
57
+ # latent_model_input = scheduler.scale_model_input(latent_model_input, t)
58
+ # predict the noise residual
59
+ timestep_tensor = torch.tensor([t], dtype=torch.long, device=device)
60
+ timestep_tensor = timestep_tensor.expand(latent_model_input.shape[0])
61
+ noise_pred = diffusion_model.forward(latent_model_input, timestep_tensor, cond)
62
+
63
+ # perform guidance
64
+ if do_classifier_free_guidance:
65
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
66
+ noise_pred = noise_pred_uncond + guidance_scale * (
67
+ noise_pred_text - noise_pred_uncond
68
+ )
69
+ # text_embeddings_for_guidance = encoder_hidden_states.chunk(
70
+ # 2)[1] if do_classifier_free_guidance else encoder_hidden_states
71
+ # compute the previous noisy sample x_t -> x_t-1
72
+ latents = ddim_scheduler.step(
73
+ noise_pred, t, latents, **extra_step_kwargs
74
+ ).prev_sample
75
+
76
+ yield latents, t
77
+
78
+
79
+ def karra_sample():
80
+ pass
third_party/Michelangelo/michelangelo/models/conditional_encoders/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from .clip import CLIPEncoder
third_party/Michelangelo/michelangelo/models/conditional_encoders/clip.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import torch
4
+ import numpy as np
5
+ from PIL import Image
6
+ from dataclasses import dataclass
7
+ from torchvision.transforms import Normalize
8
+ from transformers import CLIPModel, CLIPTokenizer
9
+ from transformers.utils import ModelOutput
10
+ from typing import Iterable, Optional, Union, List
11
+
12
+
13
+ ImageType = Union[np.ndarray, torch.Tensor, Image.Image]
14
+
15
+
16
+ @dataclass
17
+ class CLIPEmbedOutput(ModelOutput):
18
+ last_hidden_state: torch.FloatTensor = None
19
+ pooler_output: torch.FloatTensor = None
20
+ embeds: torch.FloatTensor = None
21
+
22
+
23
+ class CLIPEncoder(torch.nn.Module):
24
+
25
+ def __init__(self, model_path="openai/clip-vit-base-patch32"):
26
+
27
+ super().__init__()
28
+
29
+ # Load the CLIP model and processor
30
+ self.model: CLIPModel = CLIPModel.from_pretrained(model_path)
31
+ self.tokenizer = CLIPTokenizer.from_pretrained(model_path)
32
+ self.image_preprocess = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
33
+
34
+ self.model.training = False
35
+ for p in self.model.parameters():
36
+ p.requires_grad = False
37
+
38
+ @torch.no_grad()
39
+ def encode_image(self, images: Iterable[Optional[ImageType]]):
40
+ pixel_values = self.image_preprocess(images)
41
+
42
+ vision_outputs = self.model.vision_model(pixel_values=pixel_values)
43
+
44
+ pooler_output = vision_outputs[1] # pooled_output
45
+ image_features = self.model.visual_projection(pooler_output)
46
+
47
+ visual_embeds = CLIPEmbedOutput(
48
+ last_hidden_state=vision_outputs.last_hidden_state,
49
+ pooler_output=pooler_output,
50
+ embeds=image_features
51
+ )
52
+
53
+ return visual_embeds
54
+
55
+ @torch.no_grad()
56
+ def encode_text(self, texts: List[str]):
57
+ text_inputs = self.tokenizer(texts, padding=True, return_tensors="pt")
58
+
59
+ text_outputs = self.model.text_model(input_ids=text_inputs)
60
+
61
+ pooler_output = text_outputs[1] # pooled_output
62
+ text_features = self.model.text_projection(pooler_output)
63
+
64
+ text_embeds = CLIPEmbedOutput(
65
+ last_hidden_state=text_outputs.last_hidden_state,
66
+ pooler_output=pooler_output,
67
+ embeds=text_features
68
+ )
69
+
70
+ return text_embeds
71
+
72
+ def forward(self,
73
+ images: Iterable[Optional[ImageType]],
74
+ texts: List[str]):
75
+
76
+ visual_embeds = self.encode_image(images)
77
+ text_embeds = self.encode_text(texts)
78
+
79
+ return visual_embeds, text_embeds
80
+
81
+
82
+
83
+
84
+
85
+
86
+
87
+
88
+
89
+
third_party/Michelangelo/michelangelo/models/conditional_encoders/encoder_factory.py ADDED
@@ -0,0 +1,562 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import os
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torchvision import transforms
7
+ from transformers import CLIPModel, CLIPTokenizer
8
+ from collections import OrderedDict
9
+
10
+ from third_party.Michelangelo.michelangelo.data.transforms import RandomResize
11
+
12
+
13
+ class AbstractEncoder(nn.Module):
14
+ embedding_dim: int
15
+
16
+ def __init__(self):
17
+ super().__init__()
18
+
19
+ def encode(self, *args, **kwargs):
20
+ raise NotImplementedError
21
+
22
+
23
+ class ClassEmbedder(nn.Module):
24
+ def __init__(self, embed_dim, n_classes=1000, key="class"):
25
+ super().__init__()
26
+ self.key = key
27
+ self.embedding = nn.Embedding(n_classes, embed_dim)
28
+
29
+ def forward(self, batch, key=None):
30
+ if key is None:
31
+ key = self.key
32
+ # this is for use in crossattn
33
+ c = batch[key][:, None]
34
+ c = self.embedding(c)
35
+ return c
36
+
37
+
38
+ class FrozenCLIPTextEmbedder(AbstractEncoder):
39
+ """Uses the CLIP transformer encoder for text (from Hugging Face)"""
40
+
41
+ def __init__(
42
+ self,
43
+ version="openai/clip-vit-large-patch14",
44
+ tokenizer_version=None,
45
+ device="cuda",
46
+ max_length=77,
47
+ zero_embedding_radio: float = 0.1,
48
+ ):
49
+ super().__init__()
50
+ self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_version or version)
51
+
52
+ self.device = device
53
+ self.max_length = max_length
54
+ self.zero_embedding_radio = zero_embedding_radio
55
+
56
+ self.clip_dict = OrderedDict()
57
+ self.clip_name = os.path.split(version)[-1]
58
+
59
+ transformer = CLIPModel.from_pretrained(version).text_model
60
+
61
+ for param in transformer.parameters():
62
+ param.requires_grad = False
63
+ self.clip_dict[self.clip_name] = transformer
64
+
65
+ self._move_flag = False
66
+
67
+ @property
68
+ def clip(self):
69
+ return self.clip_dict[self.clip_name]
70
+
71
+ def move(self):
72
+ if self._move_flag:
73
+ return
74
+
75
+ self.clip_dict[self.clip_name] = self.clip_dict[self.clip_name].to(self.device)
76
+ self._move_flag = True
77
+
78
+ def unconditional_embedding(self, batch_size):
79
+ empty_text = [""] * batch_size
80
+ empty_z = self.forward(empty_text)
81
+ return empty_z
82
+
83
+ def forward(self, text):
84
+ self.move()
85
+
86
+ batch_encoding = self.tokenizer(
87
+ text,
88
+ truncation=True,
89
+ max_length=self.max_length,
90
+ return_length=True,
91
+ return_overflowing_tokens=False,
92
+ padding="max_length",
93
+ return_tensors="pt",
94
+ )
95
+
96
+ tokens = batch_encoding["input_ids"].to(self.device)
97
+ outputs = self.clip(input_ids=tokens)
98
+
99
+ z = outputs.last_hidden_state
100
+ return z
101
+
102
+ def encode(self, text):
103
+ batch_size = len(text)
104
+ batch_mask = torch.rand((batch_size,))
105
+ for i in range(batch_size):
106
+ if batch_mask[i] < self.zero_embedding_radio:
107
+ text[i] = ""
108
+
109
+ return self(text)
110
+
111
+ class FrozenAlignedCLIPTextEmbedder(AbstractEncoder):
112
+ """Uses the CLIP transformer encoder for text (from Hugging Face)"""
113
+
114
+ def __init__(
115
+ self,
116
+ version="openai/clip-vit-large-patch14",
117
+ tokenizer_version=None,
118
+ device="cuda",
119
+ max_length=77,
120
+ zero_embedding_radio: float = 0.1,
121
+ ):
122
+ super().__init__()
123
+ self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_version or version)
124
+
125
+ self.device = device
126
+ self.max_length = max_length
127
+ self.zero_embedding_radio = zero_embedding_radio
128
+
129
+ self.clip_dict = OrderedDict()
130
+ self.clip_name = os.path.split(version)[-1]
131
+
132
+ transformer = CLIPModel.from_pretrained(version).text_model
133
+
134
+ for param in transformer.parameters():
135
+ param.requires_grad = False
136
+ self.clip_dict[self.clip_name] = transformer
137
+
138
+ self._move_flag = False
139
+
140
+ @property
141
+ def clip(self):
142
+ return self.clip_dict[self.clip_name]
143
+
144
+ def move(self):
145
+ if self._move_flag:
146
+ return
147
+
148
+ self.clip_dict[self.clip_name] = self.clip_dict[self.clip_name].to(self.device)
149
+ self._move_flag = True
150
+
151
+ def unconditional_embedding(self, batch_size):
152
+ empty_text = [""] * batch_size
153
+ empty_z = self.forward(empty_text)
154
+ return empty_z
155
+
156
+ def forward(self, text):
157
+ self.move()
158
+
159
+ batch_encoding = self.tokenizer(
160
+ text,
161
+ truncation=True,
162
+ max_length=self.max_length,
163
+ return_length=True,
164
+ return_overflowing_tokens=False,
165
+ padding="max_length",
166
+ return_tensors="pt",
167
+ )
168
+
169
+ tokens = batch_encoding["input_ids"].to(self.device)
170
+ outputs = self.clip(input_ids=tokens)
171
+
172
+ z = outputs.last_hidden_state
173
+ return z
174
+
175
+ def encode(self, text):
176
+ batch_size = len(text)
177
+ batch_mask = torch.rand((batch_size,))
178
+ for i in range(batch_size):
179
+ if batch_mask[i] < self.zero_embedding_radio:
180
+ text[i] = ""
181
+
182
+ return self(text)
183
+
184
+
185
+ class FrozenCLIPImageEmbedder(AbstractEncoder):
186
+ """Uses the CLIP transformer encoder for text (from Hugging Face)"""
187
+
188
+ def __init__(
189
+ self,
190
+ version="openai/clip-vit-large-patch14",
191
+ device="cuda",
192
+ zero_embedding_radio=0.1,
193
+ normalize_embedding=True,
194
+ num_projection_vector=0,
195
+ linear_mapping_bias=True,
196
+ reverse_visual_projection=False,
197
+ ):
198
+ super().__init__()
199
+
200
+ self.device = device
201
+
202
+ self.clip_dict = OrderedDict()
203
+ self.clip_name = os.path.split(version)[-1]
204
+
205
+ clip_model = CLIPModel.from_pretrained(version)
206
+ clip_model.text_model = None
207
+ clip_model.text_projection = None
208
+ clip_model = clip_model.eval()
209
+ for param in self.parameters():
210
+ param.requires_grad = False
211
+ self.clip_dict[self.clip_name] = clip_model
212
+
213
+ self.transform = transforms.Compose(
214
+ [
215
+ transforms.Resize(224, transforms.InterpolationMode.BICUBIC, antialias=True),
216
+ transforms.CenterCrop(224), # crop a (224, 224) square
217
+ transforms.Normalize(
218
+ mean=[0.48145466, 0.4578275, 0.40821073],
219
+ std=[0.26862954, 0.26130258, 0.27577711],
220
+ ),
221
+ ]
222
+ )
223
+ self.zero_embedding_radio = zero_embedding_radio
224
+
225
+ self.num_projection_vector = num_projection_vector
226
+ self.reverse_visual_projection = reverse_visual_projection
227
+ self.normalize_embedding = normalize_embedding
228
+
229
+ embedding_dim = (
230
+ clip_model.visual_projection.in_features
231
+ if reverse_visual_projection
232
+ else clip_model.visual_projection.out_features
233
+ )
234
+ self.embedding_dim = embedding_dim
235
+ if self.num_projection_vector > 0:
236
+ self.projection = nn.Linear(
237
+ embedding_dim,
238
+ clip_model.visual_projection.out_features * num_projection_vector,
239
+ bias=linear_mapping_bias,
240
+ )
241
+ nn.init.normal_(self.projection.weight, std=embedding_dim ** -0.5)
242
+
243
+ self._move_flag = False
244
+
245
+ @property
246
+ def clip(self):
247
+ return self.clip_dict[self.clip_name]
248
+
249
+ def unconditional_embedding(self, batch_size):
250
+ zero = torch.zeros(
251
+ batch_size,
252
+ 1,
253
+ self.embedding_dim,
254
+ device=self.device,
255
+ dtype=self.clip.visual_projection.weight.dtype,
256
+ )
257
+ if self.num_projection_vector > 0:
258
+ zero = self.projection(zero).view(batch_size, self.num_projection_vector, -1)
259
+ return zero
260
+
261
+ def forward(self, image, value_range=(-1, 1), zero_embedding_radio=0):
262
+ if value_range is not None:
263
+ low, high = value_range
264
+ image = (image - low) / (high - low)
265
+
266
+ image = image.to(self.device, dtype=self.clip.visual_projection.weight.dtype)
267
+
268
+ if self.reverse_visual_projection:
269
+ z = self.clip.vision_model(self.transform(image))[1]
270
+ else:
271
+ z = self.clip.get_image_features(self.transform(image))
272
+
273
+ if self.normalize_embedding:
274
+ z = z / z.norm(dim=-1, keepdim=True)
275
+ if z.ndim == 2:
276
+ z = z.unsqueeze(dim=-2)
277
+
278
+ if zero_embedding_radio > 0:
279
+ mask = torch.rand((len(image), 1, 1), device=z.device, dtype=z.dtype) < zero_embedding_radio
280
+ z = z * mask.to(z)
281
+
282
+ if self.num_projection_vector > 0:
283
+ z = self.projection(z).view(len(image), self.num_projection_vector, -1)
284
+
285
+ return z
286
+
287
+ def move(self):
288
+ if self._move_flag:
289
+ return
290
+
291
+ self.clip_dict[self.clip_name] = self.clip_dict[self.clip_name].to(self.device)
292
+ self._move_flag = True
293
+
294
+ def encode(self, image):
295
+ self.move()
296
+ return self(image, zero_embedding_radio=self.zero_embedding_radio)
297
+
298
+
299
+ class FrozenCLIPImageGridEmbedder(AbstractEncoder):
300
+
301
+ def __init__(
302
+ self,
303
+ version="openai/clip-vit-large-patch14",
304
+ device="cuda",
305
+ zero_embedding_radio=0.1,
306
+ ):
307
+ super().__init__()
308
+
309
+ self.device = device
310
+
311
+ self.clip_dict = OrderedDict()
312
+ self.clip_name = os.path.split(version)[-1]
313
+
314
+ clip_model: CLIPModel = CLIPModel.from_pretrained(version)
315
+ clip_model.text_model = None
316
+ clip_model.text_projection = None
317
+ clip_model = clip_model.eval()
318
+ for param in self.parameters():
319
+ param.requires_grad = False
320
+ self.clip_dict[self.clip_name] = clip_model
321
+
322
+ self.transform = transforms.Compose(
323
+ [
324
+ transforms.Resize(224, transforms.InterpolationMode.BILINEAR, antialias=True),
325
+ transforms.CenterCrop(224), # crop a (224, 224) square
326
+ transforms.Normalize(
327
+ mean=[0.48145466, 0.4578275, 0.40821073],
328
+ std=[0.26862954, 0.26130258, 0.27577711],
329
+ ),
330
+ ]
331
+ )
332
+ self.zero_embedding_radio = zero_embedding_radio
333
+ self.embedding_dim = clip_model.vision_embed_dim
334
+
335
+ self._move_flag = False
336
+
337
+ @property
338
+ def clip(self):
339
+ return self.clip_dict[self.clip_name]
340
+
341
+ def move(self):
342
+ if self._move_flag:
343
+ return
344
+
345
+ self.clip_dict[self.clip_name] = self.clip_dict[self.clip_name].to(self.device)
346
+ self._move_flag = True
347
+
348
+ def unconditional_embedding(self, batch_size):
349
+ zero = torch.zeros(
350
+ batch_size,
351
+ self.clip.vision_model.embeddings.num_positions,
352
+ self.embedding_dim,
353
+ device=self.device,
354
+ dtype=self.clip.visual_projection.weight.dtype,
355
+ )
356
+ return zero
357
+
358
+ def forward(self, image, value_range=(-1, 1), zero_embedding_radio=0):
359
+ self.move()
360
+
361
+ if value_range is not None:
362
+ low, high = value_range
363
+ image = (image - low) / (high - low)
364
+
365
+ image = image.to(self.device, dtype=self.clip.visual_projection.weight.dtype)
366
+
367
+ z = self.clip.vision_model(self.transform(image)).last_hidden_state
368
+
369
+ if zero_embedding_radio > 0:
370
+ mask = torch.rand((len(image), 1, 1), device=z.device, dtype=z.dtype) >= zero_embedding_radio
371
+ z = z * mask.to(z)
372
+
373
+ return z
374
+
375
+ def encode(self, image):
376
+ return self(image, zero_embedding_radio=self.zero_embedding_radio)
377
+
378
+
379
+ class MoECLIPImageEncoder(nn.Module):
380
+ def __init__(
381
+ self,
382
+ versions,
383
+ hidden_state_dim,
384
+ num_projection_vector=8,
385
+ zero_embedding_radio=0.1,
386
+ device="cuda",
387
+ precision="fp16",
388
+ normalize=False,
389
+ clip_max=0,
390
+ transform_type="base",
391
+ argument_p=0.2,
392
+ ):
393
+ super().__init__()
394
+
395
+ self.device = torch.device(device)
396
+ self.hidden_state_dim = hidden_state_dim
397
+ self.zero_embedding_radio = zero_embedding_radio
398
+ self.num_projection_vector = num_projection_vector
399
+ self.dtype = dict(fp16=torch.float16, fp32=torch.float32, bf16=torch.bfloat16)[precision]
400
+ self.normalize = normalize
401
+ self.clip_max = clip_max
402
+
403
+ if transform_type == "base":
404
+ self.transform = transforms.Compose(
405
+ [
406
+ transforms.Resize(224, transforms.InterpolationMode.BICUBIC, antialias=True),
407
+ transforms.CenterCrop(224), # crop a (224, 224) square
408
+ transforms.Normalize(
409
+ mean=[0.48145466, 0.4578275, 0.40821073],
410
+ std=[0.26862954, 0.26130258, 0.27577711],
411
+ ),
412
+ ]
413
+ )
414
+ elif transform_type == "crop_blur_resize":
415
+ self.transform = transforms.Compose(
416
+ [
417
+ transforms.Resize(224, transforms.InterpolationMode.BICUBIC, antialias=True),
418
+ transforms.CenterCrop(224), # crop a (224, 224) square
419
+ transforms.RandomApply(
420
+ transforms=[
421
+ transforms.RandomResizedCrop(
422
+ size=224,
423
+ scale=(0.8, 1.0),
424
+ ratio=(0.99, 1.01),
425
+ interpolation=transforms.InterpolationMode.BICUBIC,
426
+ ),
427
+ ],
428
+ p=argument_p,
429
+ ),
430
+ transforms.RandomApply(
431
+ transforms=[
432
+ transforms.GaussianBlur(kernel_size=9, sigma=(0.1, 5)),
433
+ ],
434
+ p=argument_p,
435
+ ),
436
+ transforms.RandomApply(
437
+ transforms=[
438
+ RandomResize(size=224, resize_radio=(0.2, 1)),
439
+ ],
440
+ p=argument_p,
441
+ ),
442
+ transforms.Normalize(
443
+ mean=[0.48145466, 0.4578275, 0.40821073],
444
+ std=[0.26862954, 0.26130258, 0.27577711],
445
+ ),
446
+ ]
447
+ )
448
+ else:
449
+ raise ValueError(f"invalid {transform_type=}")
450
+
451
+ if isinstance(versions, str):
452
+ versions = (versions,)
453
+
454
+ # 如果直接把clips定位为当前类的子module,1. 会在保存ckp时存无用的多个权重。 2. pl会调用to,导致layer_norm的权重也被转换成fp16
455
+ clips = OrderedDict()
456
+
457
+ for v in versions:
458
+ # 因为clips不是子module,直接指定device="cuda"会错误地导致clip模型权重都被放到cuda:0上。
459
+ clips[v], _ = clip.load(name=v, device="cpu", jit=False, download_root=None)
460
+ delattr(clips[v], "transformer")
461
+ clips[v].eval()
462
+ clips[v].requires_grad_(False)
463
+
464
+ self.clips_hidden_dim = sum(clips[v].ln_final.weight.size(0) for v in clips)
465
+
466
+ if self.num_projection_vector == 0:
467
+ self.projection = nn.Identity()
468
+ else:
469
+ self.projection = nn.Linear(self.clips_hidden_dim, hidden_state_dim * self.num_projection_vector, bias=True)
470
+ self.projection.to(dtype=self.dtype)
471
+ nn.init.normal_(self.projection.weight, std=self.clips_hidden_dim ** -0.5)
472
+
473
+ self.clips = clips
474
+
475
+ self._move_flag = False
476
+
477
+ def move(self):
478
+ if self._move_flag:
479
+ return
480
+
481
+ def convert_weights(model: nn.Module):
482
+ """Convert applicable model parameters to fp16"""
483
+
484
+ def _convert_weights_to_fp16(l):
485
+ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
486
+ l.weight.data = l.weight.data.type(self.dtype)
487
+ if l.bias is not None:
488
+ l.bias.data = l.bias.data.type(self.dtype)
489
+
490
+ if isinstance(l, nn.MultiheadAttention):
491
+ for attr in [
492
+ *[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]],
493
+ "in_proj_bias",
494
+ "bias_k",
495
+ "bias_v",
496
+ ]:
497
+ tensor = getattr(l, attr)
498
+ if tensor is not None:
499
+ tensor.data = tensor.data.type(self.dtype)
500
+
501
+ for name in ["text_projection", "proj"]:
502
+ if hasattr(l, name):
503
+ attr = getattr(l, name)
504
+ if attr is not None:
505
+ attr.data = attr.data.type(self.dtype)
506
+
507
+ model.apply(_convert_weights_to_fp16)
508
+
509
+ for k in self.clips:
510
+ self.clips[k].to(self.device)
511
+ convert_weights(self.clips[k]) # fp32 -> self.dtype
512
+ self._move_flag = True
513
+
514
+ def unconditional_embedding(self, batch_size=None):
515
+ zero = torch.zeros(
516
+ batch_size,
517
+ self.clips_hidden_dim,
518
+ device=self.device,
519
+ dtype=self.dtype,
520
+ )
521
+ if self.num_projection_vector > 0:
522
+ zero = self.projection(zero).view(batch_size, self.num_projection_vector, -1)
523
+ return zero
524
+
525
+ def convert_embedding(self, z):
526
+ if self.num_projection_vector > 0:
527
+ z = self.projection(z.type(self.projection.weight.dtype)).view(len(z), self.num_projection_vector, -1)
528
+ return z
529
+
530
+ def forward(self, image, value_range=(-1, 1), zero_embedding_radio=0):
531
+ if value_range is not None:
532
+ low, high = value_range
533
+ image = (image - low) / (high - low)
534
+
535
+ image = self.transform(image)
536
+
537
+ with torch.no_grad():
538
+ embs = []
539
+ for v in self.clips:
540
+ x = self.clips[v].encode_image(image)
541
+ if self.normalize:
542
+ x = x / x.norm(p=2, dim=-1, keepdim=True) * (x.size(-1) ** 0.5)
543
+ # clip_max only works with normalization
544
+ if self.clip_max > 0:
545
+ x = x.clamp(-self.clip_max, self.clip_max)
546
+ embs.append(x)
547
+
548
+ z = torch.cat(embs, dim=-1)
549
+ if self.normalize:
550
+ z /= z.size(-1) ** 0.5
551
+
552
+ if zero_embedding_radio > 0:
553
+ mask = torch.rand((len(image), 1, 1), device=z.device, dtype=z.dtype) >= zero_embedding_radio
554
+ z = z + mask.to(z)
555
+
556
+ if self.num_projection_vector > 0:
557
+ z = self.projection(z).view(len(image), self.num_projection_vector, -1)
558
+ return z
559
+
560
+ def encode(self, image):
561
+ self.move()
562
+ return self(image, zero_embedding_radio=self.zero_embedding_radio)