Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- .github/workflows/ci.yaml +22 -0
- .gitignore +21 -0
- .python-version +1 -0
- LICENSE +201 -0
- README.md +161 -8
- app.py +370 -0
- cli.py +144 -0
- dia/__init__.py +6 -0
- dia/audio.py +185 -0
- dia/config.py +187 -0
- dia/layers.py +624 -0
- dia/model.py +455 -0
- dia/state.py +207 -0
- dia/static/images/banner.png +3 -0
- docker/Dockerfile.cpu +48 -0
- docker/Dockerfile.gpu +49 -0
- example/simple.py +10 -0
- example/voice_clone.py +24 -0
- example_prompt.mp3 +0 -0
- pyproject.toml +66 -0
- uv.lock +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
dia/static/images/banner.png filter=lfs diff=lfs merge=lfs -text
|
.github/workflows/ci.yaml
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Continuous Integration
|
2 |
+
|
3 |
+
on:
|
4 |
+
pull_request:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
lint_and_format:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
name: Lint and Format
|
12 |
+
steps:
|
13 |
+
- uses: actions/checkout@v4
|
14 |
+
- uses: astral-sh/ruff-action@v3
|
15 |
+
with:
|
16 |
+
version: latest
|
17 |
+
|
18 |
+
- name: Check Lint using Ruff
|
19 |
+
run: ruff check
|
20 |
+
|
21 |
+
- name: Check Format using Ruff
|
22 |
+
run: ruff format --check --diff
|
.gitignore
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python-generated files
|
2 |
+
__pycache__/
|
3 |
+
*.py[oc]
|
4 |
+
build/
|
5 |
+
dist/
|
6 |
+
wheels/
|
7 |
+
*.egg-info
|
8 |
+
|
9 |
+
# Virtual environments
|
10 |
+
.venv
|
11 |
+
|
12 |
+
.gradio
|
13 |
+
|
14 |
+
**/*.pth
|
15 |
+
**/*.mp3
|
16 |
+
!example_prompt.mp3
|
17 |
+
**/*.txt
|
18 |
+
|
19 |
+
.ruff_cache
|
20 |
+
.ipynb_checkpoints
|
21 |
+
config.json
|
.python-version
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3.10
|
LICENSE
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Apache License
|
2 |
+
Version 2.0, January 2004
|
3 |
+
http://www.apache.org/licenses/
|
4 |
+
|
5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6 |
+
|
7 |
+
1. Definitions.
|
8 |
+
|
9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
11 |
+
|
12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
13 |
+
the copyright owner that is granting the License.
|
14 |
+
|
15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
16 |
+
other entities that control, are controlled by, or are under common
|
17 |
+
control with that entity. For the purposes of this definition,
|
18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
19 |
+
direction or management of such entity, whether by contract or
|
20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
22 |
+
|
23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
24 |
+
exercising permissions granted by this License.
|
25 |
+
|
26 |
+
"Source" form shall mean the preferred form for making modifications,
|
27 |
+
including but not limited to software source code, documentation
|
28 |
+
source, and configuration files.
|
29 |
+
|
30 |
+
"Object" form shall mean any form resulting from mechanical
|
31 |
+
transformation or translation of a Source form, including but
|
32 |
+
not limited to compiled object code, generated documentation,
|
33 |
+
and conversions to other media types.
|
34 |
+
|
35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
36 |
+
Object form, made available under the License, as indicated by a
|
37 |
+
copyright notice that is included in or attached to the work
|
38 |
+
(an example is provided in the Appendix below).
|
39 |
+
|
40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
41 |
+
form, that is based on (or derived from) the Work and for which the
|
42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
44 |
+
of this License, Derivative Works shall not include works that remain
|
45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
46 |
+
the Work and Derivative Works thereof.
|
47 |
+
|
48 |
+
"Contribution" shall mean any work of authorship, including
|
49 |
+
the original version of the Work and any modifications or additions
|
50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
54 |
+
means any form of electronic, verbal, or written communication sent
|
55 |
+
to the Licensor or its representatives, including but not limited to
|
56 |
+
communication on electronic mailing lists, source code control systems,
|
57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
59 |
+
excluding communication that is conspicuously marked or otherwise
|
60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
61 |
+
|
62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
64 |
+
subsequently incorporated within the Work.
|
65 |
+
|
66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
71 |
+
Work and such Derivative Works in Source or Object form.
|
72 |
+
|
73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76 |
+
(except as stated in this section) patent license to make, have made,
|
77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78 |
+
where such license applies only to those patent claims licensable
|
79 |
+
by such Contributor that are necessarily infringed by their
|
80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
82 |
+
institute patent litigation against any entity (including a
|
83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84 |
+
or a Contribution incorporated within the Work constitutes direct
|
85 |
+
or contributory patent infringement, then any patent licenses
|
86 |
+
granted to You under this License for that Work shall terminate
|
87 |
+
as of the date such litigation is filed.
|
88 |
+
|
89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
90 |
+
Work or Derivative Works thereof in any medium, with or without
|
91 |
+
modifications, and in Source or Object form, provided that You
|
92 |
+
meet the following conditions:
|
93 |
+
|
94 |
+
(a) You must give any other recipients of the Work or
|
95 |
+
Derivative Works a copy of this License; and
|
96 |
+
|
97 |
+
(b) You must cause any modified files to carry prominent notices
|
98 |
+
stating that You changed the files; and
|
99 |
+
|
100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
101 |
+
that You distribute, all copyright, patent, trademark, and
|
102 |
+
attribution notices from the Source form of the Work,
|
103 |
+
excluding those notices that do not pertain to any part of
|
104 |
+
the Derivative Works; and
|
105 |
+
|
106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
107 |
+
distribution, then any Derivative Works that You distribute must
|
108 |
+
include a readable copy of the attribution notices contained
|
109 |
+
within such NOTICE file, excluding those notices that do not
|
110 |
+
pertain to any part of the Derivative Works, in at least one
|
111 |
+
of the following places: within a NOTICE text file distributed
|
112 |
+
as part of the Derivative Works; within the Source form or
|
113 |
+
documentation, if provided along with the Derivative Works; or,
|
114 |
+
within a display generated by the Derivative Works, if and
|
115 |
+
wherever such third-party notices normally appear. The contents
|
116 |
+
of the NOTICE file are for informational purposes only and
|
117 |
+
do not modify the License. You may add Your own attribution
|
118 |
+
notices within Derivative Works that You distribute, alongside
|
119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
120 |
+
that such additional attribution notices cannot be construed
|
121 |
+
as modifying the License.
|
122 |
+
|
123 |
+
You may add Your own copyright statement to Your modifications and
|
124 |
+
may provide additional or different license terms and conditions
|
125 |
+
for use, reproduction, or distribution of Your modifications, or
|
126 |
+
for any such Derivative Works as a whole, provided Your use,
|
127 |
+
reproduction, and distribution of the Work otherwise complies with
|
128 |
+
the conditions stated in this License.
|
129 |
+
|
130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
132 |
+
by You to the Licensor shall be under the terms and conditions of
|
133 |
+
this License, without any additional terms or conditions.
|
134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
135 |
+
the terms of any separate license agreement you may have executed
|
136 |
+
with Licensor regarding such Contributions.
|
137 |
+
|
138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
140 |
+
except as required for reasonable and customary use in describing the
|
141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
142 |
+
|
143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
144 |
+
agreed to in writing, Licensor provides the Work (and each
|
145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147 |
+
implied, including, without limitation, any warranties or conditions
|
148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150 |
+
appropriateness of using or redistributing the Work and assume any
|
151 |
+
risks associated with Your exercise of permissions under this License.
|
152 |
+
|
153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
154 |
+
whether in tort (including negligence), contract, or otherwise,
|
155 |
+
unless required by applicable law (such as deliberate and grossly
|
156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
157 |
+
liable to You for damages, including any direct, indirect, special,
|
158 |
+
incidental, or consequential damages of any character arising as a
|
159 |
+
result of this License or out of the use or inability to use the
|
160 |
+
Work (including but not limited to damages for loss of goodwill,
|
161 |
+
work stoppage, computer failure or malfunction, or any and all
|
162 |
+
other commercial damages or losses), even if such Contributor
|
163 |
+
has been advised of the possibility of such damages.
|
164 |
+
|
165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
168 |
+
or other liability obligations and/or rights consistent with this
|
169 |
+
License. However, in accepting such obligations, You may act only
|
170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
171 |
+
of any other Contributor, and only if You agree to indemnify,
|
172 |
+
defend, and hold each Contributor harmless for any liability
|
173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
174 |
+
of your accepting any such warranty or additional liability.
|
175 |
+
|
176 |
+
END OF TERMS AND CONDITIONS
|
177 |
+
|
178 |
+
APPENDIX: How to apply the Apache License to your work.
|
179 |
+
|
180 |
+
To apply the Apache License to your work, attach the following
|
181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
182 |
+
replaced with your own identifying information. (Don't include
|
183 |
+
the brackets!) The text should be enclosed in the appropriate
|
184 |
+
comment syntax for the file format. We also recommend that a
|
185 |
+
file or class name and description of purpose be included on the
|
186 |
+
same "printed page" as the copyright notice for easier
|
187 |
+
identification within third-party archives.
|
188 |
+
|
189 |
+
Copyright 2025 Nari Labs
|
190 |
+
|
191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
192 |
+
you may not use this file except in compliance with the License.
|
193 |
+
You may obtain a copy of the License at
|
194 |
+
|
195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
196 |
+
|
197 |
+
Unless required by applicable law or agreed to in writing, software
|
198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200 |
+
See the License for the specific language governing permissions and
|
201 |
+
limitations under the License.
|
README.md
CHANGED
@@ -1,12 +1,165 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji: 🌖
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 5.29.0
|
8 |
app_file: app.py
|
9 |
-
|
|
|
10 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
title: dia
|
|
|
|
|
|
|
|
|
|
|
3 |
app_file: app.py
|
4 |
+
sdk: gradio
|
5 |
+
sdk_version: 5.27.1
|
6 |
---
|
7 |
+
<p align="center">
|
8 |
+
<a href="https://github.com/nari-labs/dia">
|
9 |
+
<img src="./dia/static/images/banner.png">
|
10 |
+
</a>
|
11 |
+
</p>
|
12 |
+
<p align="center">
|
13 |
+
<a href="https://tally.so/r/meokbo" target="_blank"><img alt="Static Badge" src="https://img.shields.io/badge/Join-Waitlist-white?style=for-the-badge"></a>
|
14 |
+
<a href="https://discord.gg/yBrqQ9Dd" target="_blank"><img src="https://img.shields.io/badge/Discord-Join%20Chat-7289DA?logo=discord&style=for-the-badge"></a>
|
15 |
+
<a href="https://github.com/nari-labs/dia/blob/main/LICENSE" target="_blank"><img src="https://img.shields.io/badge/License-Apache_2.0-blue.svg?style=for-the-badge" alt="LICENSE"></a>
|
16 |
+
</p>
|
17 |
+
<p align="center">
|
18 |
+
<a href="https://huggingface.co/nari-labs/Dia-1.6B"><img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/model-on-hf-lg-dark.svg" alt="Dataset on HuggingFace" height=42 ></a>
|
19 |
+
<a href="https://huggingface.co/spaces/nari-labs/Dia-1.6B"><img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/open-in-hf-spaces-lg-dark.svg" alt="Space on HuggingFace" height=38></a>
|
20 |
+
</p>
|
21 |
+
|
22 |
+
Dia is a 1.6B parameter text to speech model created by Nari Labs.
|
23 |
+
|
24 |
+
Dia **directly generates highly realistic dialogue from a transcript**. You can condition the output on audio, enabling emotion and tone control. The model can also produce nonverbal communications like laughter, coughing, clearing throat, etc.
|
25 |
+
|
26 |
+
To accelerate research, we are providing access to pretrained model checkpoints and inference code. The model weights are hosted on [Hugging Face](https://huggingface.co/nari-labs/Dia-1.6B). The model only supports English generation at the moment.
|
27 |
+
|
28 |
+
We also provide a [demo page](https://yummy-fir-7a4.notion.site/dia) comparing our model to [ElevenLabs Studio](https://elevenlabs.io/studio) and [Sesame CSM-1B](https://github.com/SesameAILabs/csm).
|
29 |
+
|
30 |
+
- (Update) We have a ZeroGPU Space running! Try it now [here](https://huggingface.co/spaces/nari-labs/Dia-1.6B). Thanks to the HF team for the support :)
|
31 |
+
- Join our [discord server](https://discord.gg/yBrqQ9Dd) for community support and access to new features.
|
32 |
+
- Play with a larger version of Dia: generate fun conversations, remix content, and share with friends. 🔮 Join the [waitlist](https://tally.so/r/meokbo) for early access.
|
33 |
+
|
34 |
+
## Generation Guidelines
|
35 |
+
|
36 |
+
- Keep input text length moderate
|
37 |
+
- Short input (corresponding to under 5s of audio) will sound unnatural
|
38 |
+
- Very long input (corresponding to over 20s of audio) will make the speech unnaturally fast.
|
39 |
+
- Use non-verbal tags sparingly, from the list in the README. Overusing or using unlisted non-verbals may cause weird artifacts.
|
40 |
+
- Always begin input text with `[S1]`, and always alternate between `[S1]` and `[S2]` (i.e. `[S1]`... `[S1]`... is not good)
|
41 |
+
- When using audio prompts (voice cloning), follow these instructions carefully:
|
42 |
+
- Provide the transcript of the to-be cloned audio before the generation text.
|
43 |
+
- Transcript must use `[S1]`, `[S2]` speaker tags correctly (i.e. single speaker: `[S1]`..., two speakers: `[S1]`... `[S2]`...)
|
44 |
+
- Duration of the to-be cloned audio should be 5~10 seconds for the best results.
|
45 |
+
(Keep in mind: 1 second ≈ 86 tokens)
|
46 |
+
- Put `[S1]` or `[S2]` (the second-to-last speaker's tag) at the end of the audio to improve audio quality at the end
|
47 |
+
|
48 |
+
### Install via pip
|
49 |
+
|
50 |
+
```bash
|
51 |
+
# Install directly from GitHub
|
52 |
+
pip install git+https://github.com/nari-labs/dia.git
|
53 |
+
```
|
54 |
+
|
55 |
+
### Run the Gradio UI
|
56 |
+
|
57 |
+
This will open a Gradio UI that you can work on.
|
58 |
+
|
59 |
+
```bash
|
60 |
+
git clone https://github.com/nari-labs/dia.git
|
61 |
+
cd dia && uv run app.py
|
62 |
+
```
|
63 |
+
|
64 |
+
or if you do not have `uv` pre-installed:
|
65 |
+
|
66 |
+
```bash
|
67 |
+
git clone https://github.com/nari-labs/dia.git
|
68 |
+
cd dia
|
69 |
+
python -m venv .venv
|
70 |
+
source .venv/bin/activate
|
71 |
+
pip install -e .
|
72 |
+
python app.py
|
73 |
+
```
|
74 |
+
|
75 |
+
Note that the model was not fine-tuned on a specific voice. Hence, you will get different voices every time you run the model.
|
76 |
+
You can keep speaker consistency by either adding an audio prompt (a guide coming VERY soon - try it with the second example on Gradio for now), or fixing the seed.
|
77 |
+
|
78 |
+
## Features
|
79 |
+
|
80 |
+
- Generate dialogue via `[S1]` and `[S2]` tag
|
81 |
+
- Generate non-verbal like `(laughs)`, `(coughs)`, etc.
|
82 |
+
- Below verbal tags will be recognized, but might result in unexpected output.
|
83 |
+
- `(laughs), (clears throat), (sighs), (gasps), (coughs), (singing), (sings), (mumbles), (beep), (groans), (sniffs), (claps), (screams), (inhales), (exhales), (applause), (burps), (humming), (sneezes), (chuckle), (whistles)`
|
84 |
+
- Voice cloning. See [`example/voice_clone.py`](example/voice_clone.py) for more information.
|
85 |
+
- In the Hugging Face space, you can upload the audio you want to clone and place its transcript before your script. Make sure the transcript follows the required format. The model will then output only the content of your script.
|
86 |
+
|
87 |
+
## ⚙️ Usage
|
88 |
+
|
89 |
+
### As a Python Library
|
90 |
+
|
91 |
+
```python
|
92 |
+
from dia.model import Dia
|
93 |
+
|
94 |
+
|
95 |
+
model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16")
|
96 |
+
|
97 |
+
text = "[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face."
|
98 |
+
|
99 |
+
output = model.generate(text, use_torch_compile=True, verbose=True)
|
100 |
+
|
101 |
+
model.save_audio("simple.mp3", output)
|
102 |
+
```
|
103 |
+
|
104 |
+
A pypi package and a working CLI tool will be available soon.
|
105 |
+
|
106 |
+
## 💻 Hardware and Inference Speed
|
107 |
+
|
108 |
+
Dia has been tested on only GPUs (pytorch 2.0+, CUDA 12.6). CPU support is to be added soon.
|
109 |
+
The initial run will take longer as the Descript Audio Codec also needs to be downloaded.
|
110 |
+
|
111 |
+
These are the speed we benchmarked in RTX 4090.
|
112 |
+
|
113 |
+
| precision | realtime factor w/ compile | realtime factor w/o compile | VRAM |
|
114 |
+
|:-:|:-:|:-:|:-:|
|
115 |
+
| `bfloat16` | x2.1 | x1.5 | ~10GB |
|
116 |
+
| `float16` | x2.2 | x1.3 | ~10GB |
|
117 |
+
| `float32` | x1 | x0.9 | ~13GB |
|
118 |
+
|
119 |
+
We will be adding a quantized version in the future.
|
120 |
+
|
121 |
+
If you don't have hardware available or if you want to play with bigger versions of our models, join the waitlist [here](https://tally.so/r/meokbo).
|
122 |
+
|
123 |
+
## 🪪 License
|
124 |
+
|
125 |
+
This project is licensed under the Apache License 2.0 - see the [LICENSE](LICENSE) file for details.
|
126 |
+
|
127 |
+
## ⚠️ Disclaimer
|
128 |
+
|
129 |
+
This project offers a high-fidelity speech generation model intended for research and educational use. The following uses are **strictly forbidden**:
|
130 |
+
|
131 |
+
- **Identity Misuse**: Do not produce audio resembling real individuals without permission.
|
132 |
+
- **Deceptive Content**: Do not use this model to generate misleading content (e.g. fake news)
|
133 |
+
- **Illegal or Malicious Use**: Do not use this model for activities that are illegal or intended to cause harm.
|
134 |
+
|
135 |
+
By using this model, you agree to uphold relevant legal standards and ethical responsibilities. We **are not responsible** for any misuse and firmly oppose any unethical usage of this technology.
|
136 |
+
|
137 |
+
## 🔭 TODO / Future Work
|
138 |
+
|
139 |
+
- Docker support for ARM architecture and MacOS.
|
140 |
+
- Optimize inference speed.
|
141 |
+
- Add quantization for memory efficiency.
|
142 |
+
|
143 |
+
## 🤝 Contributing
|
144 |
+
|
145 |
+
We are a tiny team of 1 full-time and 1 part-time research-engineers. We are extra-welcome to any contributions!
|
146 |
+
Join our [Discord Server](https://discord.gg/yBrqQ9Dd) for discussions.
|
147 |
+
|
148 |
+
## 🤗 Acknowledgements
|
149 |
+
|
150 |
+
- We thank the [Google TPU Research Cloud program](https://sites.research.google/trc/about/) for providing computation resources.
|
151 |
+
- Our work was heavily inspired by [SoundStorm](https://arxiv.org/abs/2305.09636), [Parakeet](https://jordandarefsky.com/blog/2024/parakeet/), and [Descript Audio Codec](https://github.com/descriptinc/descript-audio-codec).
|
152 |
+
- Hugging Face for providing the ZeroGPU Grant.
|
153 |
+
- "Nari" is a pure Korean word for lily.
|
154 |
+
- We thank Jason Y. for providing help with data filtering.
|
155 |
+
|
156 |
+
|
157 |
+
## ⭐ Star History
|
158 |
|
159 |
+
<a href="https://www.star-history.com/#nari-labs/dia&Date">
|
160 |
+
<picture>
|
161 |
+
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=nari-labs/dia&type=Date&theme=dark" />
|
162 |
+
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=nari-labs/dia&type=Date" />
|
163 |
+
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=nari-labs/dia&type=Date" />
|
164 |
+
</picture>
|
165 |
+
</a>
|
app.py
ADDED
@@ -0,0 +1,370 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import tempfile
|
3 |
+
import time
|
4 |
+
from pathlib import Path
|
5 |
+
from typing import Optional, Tuple
|
6 |
+
|
7 |
+
import gradio as gr
|
8 |
+
import numpy as np
|
9 |
+
import soundfile as sf
|
10 |
+
import torch
|
11 |
+
|
12 |
+
from dia.model import Dia
|
13 |
+
|
14 |
+
|
15 |
+
# --- Global Setup ---
|
16 |
+
parser = argparse.ArgumentParser(description="Gradio interface for Nari TTS")
|
17 |
+
parser.add_argument("--device", type=str, default=None, help="Force device (e.g., 'cuda', 'mps', 'cpu')")
|
18 |
+
parser.add_argument("--share", action="store_true", help="Enable Gradio sharing")
|
19 |
+
|
20 |
+
args = parser.parse_args()
|
21 |
+
|
22 |
+
|
23 |
+
# Determine device
|
24 |
+
if args.device:
|
25 |
+
device = torch.device(args.device)
|
26 |
+
elif torch.cuda.is_available():
|
27 |
+
device = torch.device("cuda")
|
28 |
+
# Simplified MPS check for broader compatibility
|
29 |
+
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
30 |
+
# Basic check is usually sufficient, detailed check can be problematic
|
31 |
+
device = torch.device("mps")
|
32 |
+
else:
|
33 |
+
device = torch.device("cpu")
|
34 |
+
|
35 |
+
print(f"Using device: {device}")
|
36 |
+
|
37 |
+
# Load Nari model and config
|
38 |
+
print("Loading Nari model...")
|
39 |
+
try:
|
40 |
+
# Use the function from inference.py
|
41 |
+
model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16", device=device)
|
42 |
+
except Exception as e:
|
43 |
+
print(f"Error loading Nari model: {e}")
|
44 |
+
raise
|
45 |
+
|
46 |
+
|
47 |
+
def run_inference(
|
48 |
+
text_input: str,
|
49 |
+
audio_prompt_input: Optional[Tuple[int, np.ndarray]],
|
50 |
+
max_new_tokens: int,
|
51 |
+
cfg_scale: float,
|
52 |
+
temperature: float,
|
53 |
+
top_p: float,
|
54 |
+
cfg_filter_top_k: int,
|
55 |
+
speed_factor: float,
|
56 |
+
):
|
57 |
+
"""
|
58 |
+
Runs Nari inference using the globally loaded model and provided inputs.
|
59 |
+
Uses temporary files for text and audio prompt compatibility with inference.generate.
|
60 |
+
"""
|
61 |
+
global model, device # Access global model, config, device
|
62 |
+
|
63 |
+
if not text_input or text_input.isspace():
|
64 |
+
raise gr.Error("Text input cannot be empty.")
|
65 |
+
|
66 |
+
temp_txt_file_path = None
|
67 |
+
temp_audio_prompt_path = None
|
68 |
+
output_audio = (44100, np.zeros(1, dtype=np.float32))
|
69 |
+
|
70 |
+
try:
|
71 |
+
prompt_path_for_generate = None
|
72 |
+
if audio_prompt_input is not None:
|
73 |
+
sr, audio_data = audio_prompt_input
|
74 |
+
# Check if audio_data is valid
|
75 |
+
if audio_data is None or audio_data.size == 0 or audio_data.max() == 0: # Check for silence/empty
|
76 |
+
gr.Warning("Audio prompt seems empty or silent, ignoring prompt.")
|
77 |
+
else:
|
78 |
+
# Save prompt audio to a temporary WAV file
|
79 |
+
with tempfile.NamedTemporaryFile(mode="wb", suffix=".wav", delete=False) as f_audio:
|
80 |
+
temp_audio_prompt_path = f_audio.name # Store path for cleanup
|
81 |
+
|
82 |
+
# Basic audio preprocessing for consistency
|
83 |
+
# Convert to float32 in [-1, 1] range if integer type
|
84 |
+
if np.issubdtype(audio_data.dtype, np.integer):
|
85 |
+
max_val = np.iinfo(audio_data.dtype).max
|
86 |
+
audio_data = audio_data.astype(np.float32) / max_val
|
87 |
+
elif not np.issubdtype(audio_data.dtype, np.floating):
|
88 |
+
gr.Warning(f"Unsupported audio prompt dtype {audio_data.dtype}, attempting conversion.")
|
89 |
+
# Attempt conversion, might fail for complex types
|
90 |
+
try:
|
91 |
+
audio_data = audio_data.astype(np.float32)
|
92 |
+
except Exception as conv_e:
|
93 |
+
raise gr.Error(f"Failed to convert audio prompt to float32: {conv_e}")
|
94 |
+
|
95 |
+
# Ensure mono (average channels if stereo)
|
96 |
+
if audio_data.ndim > 1:
|
97 |
+
if audio_data.shape[0] == 2: # Assume (2, N)
|
98 |
+
audio_data = np.mean(audio_data, axis=0)
|
99 |
+
elif audio_data.shape[1] == 2: # Assume (N, 2)
|
100 |
+
audio_data = np.mean(audio_data, axis=1)
|
101 |
+
else:
|
102 |
+
gr.Warning(
|
103 |
+
f"Audio prompt has unexpected shape {audio_data.shape}, taking first channel/axis."
|
104 |
+
)
|
105 |
+
audio_data = (
|
106 |
+
audio_data[0] if audio_data.shape[0] < audio_data.shape[1] else audio_data[:, 0]
|
107 |
+
)
|
108 |
+
audio_data = np.ascontiguousarray(audio_data) # Ensure contiguous after slicing/mean
|
109 |
+
|
110 |
+
# Write using soundfile
|
111 |
+
try:
|
112 |
+
sf.write(
|
113 |
+
temp_audio_prompt_path, audio_data, sr, subtype="FLOAT"
|
114 |
+
) # Explicitly use FLOAT subtype
|
115 |
+
prompt_path_for_generate = temp_audio_prompt_path
|
116 |
+
print(f"Created temporary audio prompt file: {temp_audio_prompt_path} (orig sr: {sr})")
|
117 |
+
except Exception as write_e:
|
118 |
+
print(f"Error writing temporary audio file: {write_e}")
|
119 |
+
raise gr.Error(f"Failed to save audio prompt: {write_e}")
|
120 |
+
|
121 |
+
# 3. Run Generation
|
122 |
+
|
123 |
+
start_time = time.time()
|
124 |
+
|
125 |
+
# Use torch.inference_mode() context manager for the generation call
|
126 |
+
with torch.inference_mode():
|
127 |
+
output_audio_np = model.generate(
|
128 |
+
text_input,
|
129 |
+
max_tokens=max_new_tokens,
|
130 |
+
cfg_scale=cfg_scale,
|
131 |
+
temperature=temperature,
|
132 |
+
top_p=top_p,
|
133 |
+
cfg_filter_top_k=cfg_filter_top_k, # Pass the value here
|
134 |
+
use_torch_compile=False, # Keep False for Gradio stability
|
135 |
+
audio_prompt=prompt_path_for_generate,
|
136 |
+
)
|
137 |
+
|
138 |
+
end_time = time.time()
|
139 |
+
print(f"Generation finished in {end_time - start_time:.2f} seconds.")
|
140 |
+
|
141 |
+
# 4. Convert Codes to Audio
|
142 |
+
if output_audio_np is not None:
|
143 |
+
# Get sample rate from the loaded DAC model
|
144 |
+
output_sr = 44100
|
145 |
+
|
146 |
+
# --- Slow down audio ---
|
147 |
+
original_len = len(output_audio_np)
|
148 |
+
# Ensure speed_factor is positive and not excessively small/large to avoid issues
|
149 |
+
speed_factor = max(0.1, min(speed_factor, 5.0))
|
150 |
+
target_len = int(original_len / speed_factor) # Target length based on speed_factor
|
151 |
+
if target_len != original_len and target_len > 0: # Only interpolate if length changes and is valid
|
152 |
+
x_original = np.arange(original_len)
|
153 |
+
x_resampled = np.linspace(0, original_len - 1, target_len)
|
154 |
+
resampled_audio_np = np.interp(x_resampled, x_original, output_audio_np)
|
155 |
+
output_audio = (
|
156 |
+
output_sr,
|
157 |
+
resampled_audio_np.astype(np.float32),
|
158 |
+
) # Use resampled audio
|
159 |
+
print(f"Resampled audio from {original_len} to {target_len} samples for {speed_factor:.2f}x speed.")
|
160 |
+
else:
|
161 |
+
output_audio = (
|
162 |
+
output_sr,
|
163 |
+
output_audio_np,
|
164 |
+
) # Keep original if calculation fails or no change
|
165 |
+
print(f"Skipping audio speed adjustment (factor: {speed_factor:.2f}).")
|
166 |
+
# --- End slowdown ---
|
167 |
+
|
168 |
+
print(f"Audio conversion successful. Final shape: {output_audio[1].shape}, Sample Rate: {output_sr}")
|
169 |
+
|
170 |
+
# Explicitly convert to int16 to prevent Gradio warning
|
171 |
+
if output_audio[1].dtype == np.float32 or output_audio[1].dtype == np.float64:
|
172 |
+
audio_for_gradio = np.clip(output_audio[1], -1.0, 1.0)
|
173 |
+
audio_for_gradio = (audio_for_gradio * 32767).astype(np.int16)
|
174 |
+
output_audio = (output_sr, audio_for_gradio)
|
175 |
+
print("Converted audio to int16 for Gradio output.")
|
176 |
+
|
177 |
+
else:
|
178 |
+
print("\nGeneration finished, but no valid tokens were produced.")
|
179 |
+
# Return default silence
|
180 |
+
gr.Warning("Generation produced no output.")
|
181 |
+
|
182 |
+
except Exception as e:
|
183 |
+
print(f"Error during inference: {e}")
|
184 |
+
import traceback
|
185 |
+
|
186 |
+
traceback.print_exc()
|
187 |
+
# Re-raise as Gradio error to display nicely in the UI
|
188 |
+
raise gr.Error(f"Inference failed: {e}")
|
189 |
+
|
190 |
+
finally:
|
191 |
+
# 5. Cleanup Temporary Files defensively
|
192 |
+
if temp_txt_file_path and Path(temp_txt_file_path).exists():
|
193 |
+
try:
|
194 |
+
Path(temp_txt_file_path).unlink()
|
195 |
+
print(f"Deleted temporary text file: {temp_txt_file_path}")
|
196 |
+
except OSError as e:
|
197 |
+
print(f"Warning: Error deleting temporary text file {temp_txt_file_path}: {e}")
|
198 |
+
if temp_audio_prompt_path and Path(temp_audio_prompt_path).exists():
|
199 |
+
try:
|
200 |
+
Path(temp_audio_prompt_path).unlink()
|
201 |
+
print(f"Deleted temporary audio prompt file: {temp_audio_prompt_path}")
|
202 |
+
except OSError as e:
|
203 |
+
print(f"Warning: Error deleting temporary audio prompt file {temp_audio_prompt_path}: {e}")
|
204 |
+
|
205 |
+
return output_audio
|
206 |
+
|
207 |
+
|
208 |
+
# --- Create Gradio Interface ---
|
209 |
+
css = """
|
210 |
+
#col-container {max-width: 90%; margin-left: auto; margin-right: auto;}
|
211 |
+
"""
|
212 |
+
# Attempt to load default text from example.txt
|
213 |
+
default_text = "[S1] Dia is an open weights text to dialogue model. \n[S2] You get full control over scripts and voices. \n[S1] Wow. Amazing. (laughs) \n[S2] Try it now on Git hub or Hugging Face."
|
214 |
+
example_txt_path = Path("./example.txt")
|
215 |
+
if example_txt_path.exists():
|
216 |
+
try:
|
217 |
+
default_text = example_txt_path.read_text(encoding="utf-8").strip()
|
218 |
+
if not default_text: # Handle empty example file
|
219 |
+
default_text = "Example text file was empty."
|
220 |
+
except Exception as e:
|
221 |
+
print(f"Warning: Could not read example.txt: {e}")
|
222 |
+
|
223 |
+
|
224 |
+
# Build Gradio UI
|
225 |
+
with gr.Blocks(css=css) as demo:
|
226 |
+
gr.Markdown("# Nari Text-to-Speech Synthesis")
|
227 |
+
|
228 |
+
with gr.Row(equal_height=False):
|
229 |
+
with gr.Column(scale=1):
|
230 |
+
text_input = gr.Textbox(
|
231 |
+
label="Input Text",
|
232 |
+
placeholder="Enter text here...",
|
233 |
+
value=default_text,
|
234 |
+
lines=5, # Increased lines
|
235 |
+
)
|
236 |
+
audio_prompt_input = gr.Audio(
|
237 |
+
label="Audio Prompt (Optional)",
|
238 |
+
show_label=True,
|
239 |
+
sources=["upload", "microphone"],
|
240 |
+
type="numpy",
|
241 |
+
)
|
242 |
+
with gr.Accordion("Generation Parameters", open=False):
|
243 |
+
max_new_tokens = gr.Slider(
|
244 |
+
label="Max New Tokens (Audio Length)",
|
245 |
+
minimum=860,
|
246 |
+
maximum=3072,
|
247 |
+
value=model.config.data.audio_length, # Use config default if available, else fallback
|
248 |
+
step=50,
|
249 |
+
info="Controls the maximum length of the generated audio (more tokens = longer audio).",
|
250 |
+
)
|
251 |
+
cfg_scale = gr.Slider(
|
252 |
+
label="CFG Scale (Guidance Strength)",
|
253 |
+
minimum=1.0,
|
254 |
+
maximum=5.0,
|
255 |
+
value=3.0, # Default from inference.py
|
256 |
+
step=0.1,
|
257 |
+
info="Higher values increase adherence to the text prompt.",
|
258 |
+
)
|
259 |
+
temperature = gr.Slider(
|
260 |
+
label="Temperature (Randomness)",
|
261 |
+
minimum=1.0,
|
262 |
+
maximum=1.5,
|
263 |
+
value=1.3, # Default from inference.py
|
264 |
+
step=0.05,
|
265 |
+
info="Lower values make the output more deterministic, higher values increase randomness.",
|
266 |
+
)
|
267 |
+
top_p = gr.Slider(
|
268 |
+
label="Top P (Nucleus Sampling)",
|
269 |
+
minimum=0.80,
|
270 |
+
maximum=1.0,
|
271 |
+
value=0.95, # Default from inference.py
|
272 |
+
step=0.01,
|
273 |
+
info="Filters vocabulary to the most likely tokens cumulatively reaching probability P.",
|
274 |
+
)
|
275 |
+
cfg_filter_top_k = gr.Slider(
|
276 |
+
label="CFG Filter Top K",
|
277 |
+
minimum=15,
|
278 |
+
maximum=50,
|
279 |
+
value=30,
|
280 |
+
step=1,
|
281 |
+
info="Top k filter for CFG guidance.",
|
282 |
+
)
|
283 |
+
speed_factor_slider = gr.Slider(
|
284 |
+
label="Speed Factor",
|
285 |
+
minimum=0.8,
|
286 |
+
maximum=1.0,
|
287 |
+
value=0.94,
|
288 |
+
step=0.02,
|
289 |
+
info="Adjusts the speed of the generated audio (1.0 = original speed).",
|
290 |
+
)
|
291 |
+
|
292 |
+
run_button = gr.Button("Generate Audio", variant="primary")
|
293 |
+
|
294 |
+
with gr.Column(scale=1):
|
295 |
+
audio_output = gr.Audio(
|
296 |
+
label="Generated Audio",
|
297 |
+
type="numpy",
|
298 |
+
autoplay=False,
|
299 |
+
)
|
300 |
+
|
301 |
+
# Link button click to function
|
302 |
+
run_button.click(
|
303 |
+
fn=run_inference,
|
304 |
+
inputs=[
|
305 |
+
text_input,
|
306 |
+
audio_prompt_input,
|
307 |
+
max_new_tokens,
|
308 |
+
cfg_scale,
|
309 |
+
temperature,
|
310 |
+
top_p,
|
311 |
+
cfg_filter_top_k,
|
312 |
+
speed_factor_slider,
|
313 |
+
],
|
314 |
+
outputs=[audio_output], # Add status_output here if using it
|
315 |
+
api_name="generate_audio",
|
316 |
+
)
|
317 |
+
|
318 |
+
# Add examples (ensure the prompt path is correct or remove it if example file doesn't exist)
|
319 |
+
example_prompt_path = "./example_prompt.mp3" # Adjust if needed
|
320 |
+
examples_list = [
|
321 |
+
[
|
322 |
+
"[S1] Oh fire! Oh my goodness! What's the procedure? What to we do people? The smoke could be coming through an air duct! \n[S2] Oh my god! Okay.. it's happening. Everybody stay calm! \n[S1] What's the procedure... \n[S2] Everybody stay fucking calm!!!... Everybody fucking calm down!!!!! \n[S1] No! No! If you touch the handle, if its hot there might be a fire down the hallway! ",
|
323 |
+
None,
|
324 |
+
3072,
|
325 |
+
3.0,
|
326 |
+
1.3,
|
327 |
+
0.95,
|
328 |
+
35,
|
329 |
+
0.94,
|
330 |
+
],
|
331 |
+
[
|
332 |
+
"[S1] Open weights text to dialogue model. \n[S2] You get full control over scripts and voices. \n[S1] I'm biased, but I think we clearly won. \n[S2] Hard to disagree. (laughs) \n[S1] Thanks for listening to this demo. \n[S2] Try it now on Git hub and Hugging Face. \n[S1] If you liked our model, please give us a star and share to your friends. \n[S2] This was Nari Labs.",
|
333 |
+
example_prompt_path if Path(example_prompt_path).exists() else None,
|
334 |
+
3072,
|
335 |
+
3.0,
|
336 |
+
1.3,
|
337 |
+
0.95,
|
338 |
+
35,
|
339 |
+
0.94,
|
340 |
+
],
|
341 |
+
]
|
342 |
+
|
343 |
+
if examples_list:
|
344 |
+
gr.Examples(
|
345 |
+
examples=examples_list,
|
346 |
+
inputs=[
|
347 |
+
text_input,
|
348 |
+
audio_prompt_input,
|
349 |
+
max_new_tokens,
|
350 |
+
cfg_scale,
|
351 |
+
temperature,
|
352 |
+
top_p,
|
353 |
+
cfg_filter_top_k,
|
354 |
+
speed_factor_slider,
|
355 |
+
],
|
356 |
+
outputs=[audio_output],
|
357 |
+
fn=run_inference,
|
358 |
+
cache_examples=False,
|
359 |
+
label="Examples (Click to Run)",
|
360 |
+
)
|
361 |
+
else:
|
362 |
+
gr.Markdown("_(No examples configured or example prompt file missing)_")
|
363 |
+
|
364 |
+
# --- Launch the App ---
|
365 |
+
if __name__ == "__main__":
|
366 |
+
print("Launching Gradio interface...")
|
367 |
+
|
368 |
+
# set `GRADIO_SERVER_NAME`, `GRADIO_SERVER_PORT` env vars to override default values
|
369 |
+
# use `GRADIO_SERVER_NAME=0.0.0.0` for Docker
|
370 |
+
demo.launch(server_name="0.0.0.0", server_port=7860, share=args.share)
|
cli.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
import random
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import soundfile as sf
|
7 |
+
import torch
|
8 |
+
|
9 |
+
from dia.model import Dia
|
10 |
+
|
11 |
+
|
12 |
+
def set_seed(seed: int):
|
13 |
+
"""Sets the random seed for reproducibility."""
|
14 |
+
random.seed(seed)
|
15 |
+
np.random.seed(seed)
|
16 |
+
torch.manual_seed(seed)
|
17 |
+
if torch.cuda.is_available():
|
18 |
+
torch.cuda.manual_seed(seed)
|
19 |
+
torch.cuda.manual_seed_all(seed)
|
20 |
+
# Ensure deterministic behavior for cuDNN (if used)
|
21 |
+
torch.backends.cudnn.deterministic = True
|
22 |
+
torch.backends.cudnn.benchmark = False
|
23 |
+
|
24 |
+
|
25 |
+
def main():
|
26 |
+
parser = argparse.ArgumentParser(description="Generate audio using the Dia model.")
|
27 |
+
|
28 |
+
parser.add_argument("text", type=str, help="Input text for speech generation.")
|
29 |
+
parser.add_argument(
|
30 |
+
"--output", type=str, required=True, help="Path to save the generated audio file (e.g., output.wav)."
|
31 |
+
)
|
32 |
+
|
33 |
+
parser.add_argument(
|
34 |
+
"--repo-id",
|
35 |
+
type=str,
|
36 |
+
default="nari-labs/Dia-1.6B",
|
37 |
+
help="Hugging Face repository ID (e.g., nari-labs/Dia-1.6B).",
|
38 |
+
)
|
39 |
+
parser.add_argument(
|
40 |
+
"--local-paths", action="store_true", help="Load model from local config and checkpoint files."
|
41 |
+
)
|
42 |
+
|
43 |
+
parser.add_argument(
|
44 |
+
"--config", type=str, help="Path to local config.json file (required if --local-paths is set)."
|
45 |
+
)
|
46 |
+
parser.add_argument(
|
47 |
+
"--checkpoint", type=str, help="Path to local model checkpoint .pth file (required if --local-paths is set)."
|
48 |
+
)
|
49 |
+
parser.add_argument(
|
50 |
+
"--audio-prompt", type=str, default=None, help="Path to an optional audio prompt WAV file for voice cloning."
|
51 |
+
)
|
52 |
+
|
53 |
+
gen_group = parser.add_argument_group("Generation Parameters")
|
54 |
+
gen_group.add_argument(
|
55 |
+
"--max-tokens",
|
56 |
+
type=int,
|
57 |
+
default=None,
|
58 |
+
help="Maximum number of audio tokens to generate (defaults to config value).",
|
59 |
+
)
|
60 |
+
gen_group.add_argument(
|
61 |
+
"--cfg-scale", type=float, default=3.0, help="Classifier-Free Guidance scale (default: 3.0)."
|
62 |
+
)
|
63 |
+
gen_group.add_argument(
|
64 |
+
"--temperature", type=float, default=1.3, help="Sampling temperature (higher is more random, default: 0.7)."
|
65 |
+
)
|
66 |
+
gen_group.add_argument("--top-p", type=float, default=0.95, help="Nucleus sampling probability (default: 0.95).")
|
67 |
+
|
68 |
+
infra_group = parser.add_argument_group("Infrastructure")
|
69 |
+
infra_group.add_argument("--seed", type=int, default=None, help="Random seed for reproducibility.")
|
70 |
+
infra_group.add_argument(
|
71 |
+
"--device",
|
72 |
+
type=str,
|
73 |
+
default="cuda" if torch.cuda.is_available() else "cpu",
|
74 |
+
help="Device to run inference on (e.g., 'cuda', 'cpu', default: auto).",
|
75 |
+
)
|
76 |
+
|
77 |
+
args = parser.parse_args()
|
78 |
+
|
79 |
+
# Validation for local paths
|
80 |
+
if args.local_paths:
|
81 |
+
if not args.config:
|
82 |
+
parser.error("--config is required when --local-paths is set.")
|
83 |
+
if not args.checkpoint:
|
84 |
+
parser.error("--checkpoint is required when --local-paths is set.")
|
85 |
+
if not os.path.exists(args.config):
|
86 |
+
parser.error(f"Config file not found: {args.config}")
|
87 |
+
if not os.path.exists(args.checkpoint):
|
88 |
+
parser.error(f"Checkpoint file not found: {args.checkpoint}")
|
89 |
+
|
90 |
+
# Set seed if provided
|
91 |
+
if args.seed is not None:
|
92 |
+
set_seed(args.seed)
|
93 |
+
print(f"Using random seed: {args.seed}")
|
94 |
+
|
95 |
+
# Determine device
|
96 |
+
device = torch.device(args.device)
|
97 |
+
print(f"Using device: {device}")
|
98 |
+
|
99 |
+
# Load model
|
100 |
+
print("Loading model...")
|
101 |
+
if args.local_paths:
|
102 |
+
print(f"Loading from local paths: config='{args.config}', checkpoint='{args.checkpoint}'")
|
103 |
+
try:
|
104 |
+
model = Dia.from_local(args.config, args.checkpoint, device=device)
|
105 |
+
except Exception as e:
|
106 |
+
print(f"Error loading local model: {e}")
|
107 |
+
exit(1)
|
108 |
+
else:
|
109 |
+
print(f"Loading from Hugging Face Hub: repo_id='{args.repo_id}'")
|
110 |
+
try:
|
111 |
+
model = Dia.from_pretrained(args.repo_id, device=device)
|
112 |
+
except Exception as e:
|
113 |
+
print(f"Error loading model from Hub: {e}")
|
114 |
+
exit(1)
|
115 |
+
print("Model loaded.")
|
116 |
+
|
117 |
+
# Generate audio
|
118 |
+
print("Generating audio...")
|
119 |
+
try:
|
120 |
+
sample_rate = 44100 # Default assumption
|
121 |
+
|
122 |
+
output_audio = model.generate(
|
123 |
+
text=args.text,
|
124 |
+
audio_prompt=args.audio_prompt,
|
125 |
+
max_tokens=args.max_tokens,
|
126 |
+
cfg_scale=args.cfg_scale,
|
127 |
+
temperature=args.temperature,
|
128 |
+
top_p=args.top_p,
|
129 |
+
)
|
130 |
+
print("Audio generation complete.")
|
131 |
+
|
132 |
+
print(f"Saving audio to {args.output}...")
|
133 |
+
os.makedirs(os.path.dirname(args.output) or ".", exist_ok=True)
|
134 |
+
|
135 |
+
sf.write(args.output, output_audio, sample_rate)
|
136 |
+
print(f"Audio successfully saved to {args.output}")
|
137 |
+
|
138 |
+
except Exception as e:
|
139 |
+
print(f"Error during audio generation or saving: {e}")
|
140 |
+
exit(1)
|
141 |
+
|
142 |
+
|
143 |
+
if __name__ == "__main__":
|
144 |
+
main()
|
dia/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .model import Dia
|
2 |
+
|
3 |
+
|
4 |
+
__all__ = [
|
5 |
+
"Dia",
|
6 |
+
]
|
dia/audio.py
ADDED
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import typing as tp
|
2 |
+
|
3 |
+
import torch
|
4 |
+
|
5 |
+
|
6 |
+
def build_delay_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:
|
7 |
+
"""
|
8 |
+
Precompute (t_idx_BxTxC, indices_BTCx3) so that out[t, c] = in[t - delay[c], c].
|
9 |
+
Negative t_idx => BOS; t_idx >= T => PAD.
|
10 |
+
"""
|
11 |
+
delay_arr = torch.tensor(delay_pattern, dtype=torch.int32)
|
12 |
+
|
13 |
+
t_idx_BxT = torch.broadcast_to(
|
14 |
+
torch.arange(T, dtype=torch.int32)[None, :],
|
15 |
+
[B, T],
|
16 |
+
)
|
17 |
+
t_idx_BxTx1 = t_idx_BxT[..., None]
|
18 |
+
t_idx_BxTxC = t_idx_BxTx1 - delay_arr.view(1, 1, C)
|
19 |
+
|
20 |
+
b_idx_BxTxC = torch.broadcast_to(
|
21 |
+
torch.arange(B, dtype=torch.int32).view(B, 1, 1),
|
22 |
+
[B, T, C],
|
23 |
+
)
|
24 |
+
c_idx_BxTxC = torch.broadcast_to(
|
25 |
+
torch.arange(C, dtype=torch.int32).view(1, 1, C),
|
26 |
+
[B, T, C],
|
27 |
+
)
|
28 |
+
|
29 |
+
# We must clamp time indices to [0..T-1] so gather_nd equivalent won't fail
|
30 |
+
t_clamped_BxTxC = torch.clamp(t_idx_BxTxC, 0, T - 1)
|
31 |
+
|
32 |
+
indices_BTCx3 = torch.stack(
|
33 |
+
[
|
34 |
+
b_idx_BxTxC.reshape(-1),
|
35 |
+
t_clamped_BxTxC.reshape(-1),
|
36 |
+
c_idx_BxTxC.reshape(-1),
|
37 |
+
],
|
38 |
+
dim=1,
|
39 |
+
).long() # Ensure indices are long type for indexing
|
40 |
+
|
41 |
+
return t_idx_BxTxC, indices_BTCx3
|
42 |
+
|
43 |
+
|
44 |
+
def apply_audio_delay(
|
45 |
+
audio_BxTxC: torch.Tensor,
|
46 |
+
pad_value: int,
|
47 |
+
bos_value: int,
|
48 |
+
precomp: tp.Tuple[torch.Tensor, torch.Tensor],
|
49 |
+
) -> torch.Tensor:
|
50 |
+
"""
|
51 |
+
Applies the delay pattern to batched audio tokens using precomputed indices,
|
52 |
+
inserting BOS where t_idx < 0 and PAD where t_idx >= T.
|
53 |
+
|
54 |
+
Args:
|
55 |
+
audio_BxTxC: [B, T, C] int16 audio tokens (or int32/float)
|
56 |
+
pad_value: the padding token
|
57 |
+
bos_value: the BOS token
|
58 |
+
precomp: (t_idx_BxTxC, indices_BTCx3) from build_delay_indices
|
59 |
+
|
60 |
+
Returns:
|
61 |
+
result_BxTxC: [B, T, C] delayed audio tokens
|
62 |
+
"""
|
63 |
+
device = audio_BxTxC.device # Get device from input tensor
|
64 |
+
t_idx_BxTxC, indices_BTCx3 = precomp
|
65 |
+
t_idx_BxTxC = t_idx_BxTxC.to(device) # Move precomputed indices to device
|
66 |
+
indices_BTCx3 = indices_BTCx3.to(device)
|
67 |
+
|
68 |
+
# Equivalent of tf.gather_nd using advanced indexing
|
69 |
+
# Ensure indices are long type if not already (build_delay_indices should handle this)
|
70 |
+
gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]
|
71 |
+
gathered_BxTxC = gathered_flat.view(audio_BxTxC.shape)
|
72 |
+
|
73 |
+
# Create masks on the correct device
|
74 |
+
mask_bos = t_idx_BxTxC < 0 # => place bos_value
|
75 |
+
mask_pad = t_idx_BxTxC >= audio_BxTxC.shape[1] # => place pad_value
|
76 |
+
|
77 |
+
# Create scalar tensors on the correct device
|
78 |
+
bos_tensor = torch.tensor(bos_value, dtype=audio_BxTxC.dtype, device=device)
|
79 |
+
pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)
|
80 |
+
|
81 |
+
# If mask_bos, BOS; else if mask_pad, PAD; else original gather
|
82 |
+
# All tensors should now be on the same device
|
83 |
+
result_BxTxC = torch.where(mask_bos, bos_tensor, torch.where(mask_pad, pad_tensor, gathered_BxTxC))
|
84 |
+
|
85 |
+
return result_BxTxC
|
86 |
+
|
87 |
+
|
88 |
+
def build_revert_indices(B: int, T: int, C: int, delay_pattern: tp.List[int]) -> tp.Tuple[torch.Tensor, torch.Tensor]:
|
89 |
+
"""
|
90 |
+
Precompute indices for the revert operation using PyTorch.
|
91 |
+
|
92 |
+
Returns:
|
93 |
+
A tuple (t_idx_BxTxC, indices_BTCx3) where:
|
94 |
+
- t_idx_BxTxC is a tensor of shape [B, T, C] computed as time indices plus the delay.
|
95 |
+
- indices_BTCx3 is a tensor of shape [B*T*C, 3] used for gathering, computed from:
|
96 |
+
batch indices, clamped time indices, and channel indices.
|
97 |
+
"""
|
98 |
+
# Use default device unless specified otherwise; assumes inputs might define device later
|
99 |
+
device = None # Or determine dynamically if needed, e.g., from a model parameter
|
100 |
+
|
101 |
+
delay_arr = torch.tensor(delay_pattern, dtype=torch.int32, device=device)
|
102 |
+
|
103 |
+
t_idx_BT1 = torch.broadcast_to(torch.arange(T, device=device).unsqueeze(0), [B, T])
|
104 |
+
t_idx_BT1 = t_idx_BT1.unsqueeze(-1)
|
105 |
+
|
106 |
+
t_idx_BxTxC = torch.minimum(
|
107 |
+
t_idx_BT1 + delay_arr.view(1, 1, C),
|
108 |
+
torch.tensor(T - 1, device=device),
|
109 |
+
)
|
110 |
+
b_idx_BxTxC = torch.broadcast_to(torch.arange(B, device=device).view(B, 1, 1), [B, T, C])
|
111 |
+
c_idx_BxTxC = torch.broadcast_to(torch.arange(C, device=device).view(1, 1, C), [B, T, C])
|
112 |
+
|
113 |
+
indices_BTCx3 = torch.stack(
|
114 |
+
[
|
115 |
+
b_idx_BxTxC.reshape(-1),
|
116 |
+
t_idx_BxTxC.reshape(-1),
|
117 |
+
c_idx_BxTxC.reshape(-1),
|
118 |
+
],
|
119 |
+
axis=1,
|
120 |
+
).long() # Ensure indices are long type
|
121 |
+
|
122 |
+
return t_idx_BxTxC, indices_BTCx3
|
123 |
+
|
124 |
+
|
125 |
+
def revert_audio_delay(
|
126 |
+
audio_BxTxC: torch.Tensor,
|
127 |
+
pad_value: int,
|
128 |
+
precomp: tp.Tuple[torch.Tensor, torch.Tensor],
|
129 |
+
T: int,
|
130 |
+
) -> torch.Tensor:
|
131 |
+
"""
|
132 |
+
Reverts a delay pattern from batched audio tokens using precomputed indices (PyTorch version).
|
133 |
+
|
134 |
+
Args:
|
135 |
+
audio_BxTxC: Input delayed audio tensor
|
136 |
+
pad_value: Padding value for out-of-bounds indices
|
137 |
+
precomp: Precomputed revert indices tuple containing:
|
138 |
+
- t_idx_BxTxC: Time offset indices tensor
|
139 |
+
- indices_BTCx3: Gather indices tensor for original audio
|
140 |
+
T: Original sequence length before padding
|
141 |
+
|
142 |
+
Returns:
|
143 |
+
Reverted audio tensor with same shape as input
|
144 |
+
"""
|
145 |
+
t_idx_BxTxC, indices_BTCx3 = precomp
|
146 |
+
device = audio_BxTxC.device # Get device from input tensor
|
147 |
+
|
148 |
+
# Move precomputed indices to the same device as audio_BxTxC if they aren't already
|
149 |
+
t_idx_BxTxC = t_idx_BxTxC.to(device)
|
150 |
+
indices_BTCx3 = indices_BTCx3.to(device)
|
151 |
+
|
152 |
+
# Using PyTorch advanced indexing (equivalent to tf.gather_nd or np equivalent)
|
153 |
+
gathered_flat = audio_BxTxC[indices_BTCx3[:, 0], indices_BTCx3[:, 1], indices_BTCx3[:, 2]]
|
154 |
+
gathered_BxTxC = gathered_flat.view(audio_BxTxC.size()) # Use .size() for robust reshaping
|
155 |
+
|
156 |
+
# Create pad_tensor on the correct device
|
157 |
+
pad_tensor = torch.tensor(pad_value, dtype=audio_BxTxC.dtype, device=device)
|
158 |
+
# Create T tensor on the correct device for comparison
|
159 |
+
T_tensor = torch.tensor(T, device=device)
|
160 |
+
|
161 |
+
result_BxTxC = torch.where(t_idx_BxTxC >= T_tensor, pad_tensor, gathered_BxTxC) # Changed np.where to torch.where
|
162 |
+
|
163 |
+
return result_BxTxC
|
164 |
+
|
165 |
+
|
166 |
+
@torch.no_grad()
|
167 |
+
@torch.inference_mode()
|
168 |
+
def decode(
|
169 |
+
model,
|
170 |
+
audio_codes,
|
171 |
+
):
|
172 |
+
"""
|
173 |
+
Decodes the given frames into an output audio waveform
|
174 |
+
"""
|
175 |
+
if len(audio_codes) != 1:
|
176 |
+
raise ValueError(f"Expected one frame, got {len(audio_codes)}")
|
177 |
+
|
178 |
+
try:
|
179 |
+
audio_values = model.quantizer.from_codes(audio_codes)
|
180 |
+
audio_values = model.decode(audio_values[0])
|
181 |
+
|
182 |
+
return audio_values
|
183 |
+
except Exception as e:
|
184 |
+
print(f"Error in decode method: {str(e)}")
|
185 |
+
raise
|
dia/config.py
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Configuration management module for the Dia model.
|
2 |
+
|
3 |
+
This module provides comprehensive configuration management for the Dia model,
|
4 |
+
utilizing Pydantic for validation. It defines configurations for data processing,
|
5 |
+
model architecture (encoder and decoder), and training settings.
|
6 |
+
|
7 |
+
Key components:
|
8 |
+
- DataConfig: Parameters for data loading and preprocessing.
|
9 |
+
- EncoderConfig: Architecture details for the encoder module.
|
10 |
+
- DecoderConfig: Architecture details for the decoder module.
|
11 |
+
- ModelConfig: Combined model architecture settings.
|
12 |
+
- TrainingConfig: Training hyperparameters and settings.
|
13 |
+
- DiaConfig: Master configuration combining all components.
|
14 |
+
"""
|
15 |
+
|
16 |
+
import os
|
17 |
+
from typing import Annotated
|
18 |
+
|
19 |
+
from pydantic import BaseModel, BeforeValidator, Field
|
20 |
+
|
21 |
+
|
22 |
+
class DataConfig(BaseModel, frozen=True):
|
23 |
+
"""Configuration for data loading and preprocessing.
|
24 |
+
|
25 |
+
Attributes:
|
26 |
+
text_length: Maximum length of text sequences (must be multiple of 128).
|
27 |
+
audio_length: Maximum length of audio sequences (must be multiple of 128).
|
28 |
+
channels: Number of audio channels.
|
29 |
+
text_pad_value: Value used for padding text sequences.
|
30 |
+
audio_eos_value: Value representing the end of audio sequences.
|
31 |
+
audio_bos_value: Value representing the beginning of audio sequences.
|
32 |
+
audio_pad_value: Value used for padding audio sequences.
|
33 |
+
delay_pattern: List of delay values for each audio channel.
|
34 |
+
"""
|
35 |
+
|
36 |
+
text_length: Annotated[int, BeforeValidator(lambda x: (x + 127) // 128 * 128)] = Field(gt=0, multiple_of=128)
|
37 |
+
audio_length: Annotated[int, BeforeValidator(lambda x: (x + 127) // 128 * 128)] = Field(gt=0, multiple_of=128)
|
38 |
+
channels: int = Field(default=9, gt=0, multiple_of=1)
|
39 |
+
text_pad_value: int = Field(default=0)
|
40 |
+
audio_eos_value: int = Field(default=1024)
|
41 |
+
audio_pad_value: int = Field(default=1025)
|
42 |
+
audio_bos_value: int = Field(default=1026)
|
43 |
+
delay_pattern: list[Annotated[int, Field(ge=0)]] = Field(default_factory=lambda: [0, 8, 9, 10, 11, 12, 13, 14, 15])
|
44 |
+
|
45 |
+
def __hash__(self) -> int:
|
46 |
+
"""Generate a hash based on all fields of the config."""
|
47 |
+
return hash(
|
48 |
+
(
|
49 |
+
self.text_length,
|
50 |
+
self.audio_length,
|
51 |
+
self.channels,
|
52 |
+
self.text_pad_value,
|
53 |
+
self.audio_pad_value,
|
54 |
+
self.audio_bos_value,
|
55 |
+
self.audio_eos_value,
|
56 |
+
tuple(self.delay_pattern),
|
57 |
+
)
|
58 |
+
)
|
59 |
+
|
60 |
+
|
61 |
+
class EncoderConfig(BaseModel, frozen=True):
|
62 |
+
"""Configuration for the encoder component of the Dia model.
|
63 |
+
|
64 |
+
Attributes:
|
65 |
+
n_layer: Number of transformer layers.
|
66 |
+
n_embd: Embedding dimension.
|
67 |
+
n_hidden: Hidden dimension size in the MLP layers.
|
68 |
+
n_head: Number of attention heads.
|
69 |
+
head_dim: Dimension per attention head.
|
70 |
+
"""
|
71 |
+
|
72 |
+
n_layer: int = Field(gt=0)
|
73 |
+
n_embd: int = Field(gt=0)
|
74 |
+
n_hidden: int = Field(gt=0)
|
75 |
+
n_head: int = Field(gt=0)
|
76 |
+
head_dim: int = Field(gt=0)
|
77 |
+
|
78 |
+
|
79 |
+
class DecoderConfig(BaseModel, frozen=True):
|
80 |
+
"""Configuration for the decoder component of the Dia model.
|
81 |
+
|
82 |
+
Attributes:
|
83 |
+
n_layer: Number of transformer layers.
|
84 |
+
n_embd: Embedding dimension.
|
85 |
+
n_hidden: Hidden dimension size in the MLP layers.
|
86 |
+
gqa_query_heads: Number of query heads for grouped-query self-attention.
|
87 |
+
kv_heads: Number of key/value heads for grouped-query self-attention.
|
88 |
+
gqa_head_dim: Dimension per query head for grouped-query self-attention.
|
89 |
+
cross_query_heads: Number of query heads for cross-attention.
|
90 |
+
cross_head_dim: Dimension per cross-attention head.
|
91 |
+
"""
|
92 |
+
|
93 |
+
n_layer: int = Field(gt=0)
|
94 |
+
n_embd: int = Field(gt=0)
|
95 |
+
n_hidden: int = Field(gt=0)
|
96 |
+
gqa_query_heads: int = Field(gt=0)
|
97 |
+
kv_heads: int = Field(gt=0)
|
98 |
+
gqa_head_dim: int = Field(gt=0)
|
99 |
+
cross_query_heads: int = Field(gt=0)
|
100 |
+
cross_head_dim: int = Field(gt=0)
|
101 |
+
|
102 |
+
|
103 |
+
class ModelConfig(BaseModel, frozen=True):
|
104 |
+
"""Main configuration container for the Dia model architecture.
|
105 |
+
|
106 |
+
Attributes:
|
107 |
+
encoder: Configuration for the encoder component.
|
108 |
+
decoder: Configuration for the decoder component.
|
109 |
+
src_vocab_size: Size of the source (text) vocabulary.
|
110 |
+
tgt_vocab_size: Size of the target (audio code) vocabulary.
|
111 |
+
dropout: Dropout probability applied within the model.
|
112 |
+
normalization_layer_epsilon: Epsilon value for normalization layers (e.g., LayerNorm).
|
113 |
+
weight_dtype: Data type for model weights (e.g., "float32", "bfloat16").
|
114 |
+
rope_min_timescale: Minimum timescale for Rotary Positional Embeddings (RoPE).
|
115 |
+
rope_max_timescale: Maximum timescale for Rotary Positional Embeddings (RoPE).
|
116 |
+
"""
|
117 |
+
|
118 |
+
encoder: EncoderConfig
|
119 |
+
decoder: DecoderConfig
|
120 |
+
src_vocab_size: int = Field(default=128, gt=0)
|
121 |
+
tgt_vocab_size: int = Field(default=1028, gt=0)
|
122 |
+
dropout: float = Field(default=0.0, ge=0.0, lt=1.0)
|
123 |
+
normalization_layer_epsilon: float = Field(default=1.0e-5, ge=0.0)
|
124 |
+
weight_dtype: str = Field(default="float32", description="Weight precision")
|
125 |
+
rope_min_timescale: int = Field(default=1, description="Timescale For global Attention")
|
126 |
+
rope_max_timescale: int = Field(default=10_000, description="Timescale For global Attention")
|
127 |
+
|
128 |
+
|
129 |
+
class TrainingConfig(BaseModel, frozen=True):
|
130 |
+
pass
|
131 |
+
|
132 |
+
|
133 |
+
class DiaConfig(BaseModel, frozen=True):
|
134 |
+
"""Master configuration for the Dia model.
|
135 |
+
|
136 |
+
Combines all sub-configurations into a single validated object.
|
137 |
+
|
138 |
+
Attributes:
|
139 |
+
version: Configuration version string.
|
140 |
+
model: Model architecture configuration.
|
141 |
+
training: Training process configuration (precision settings).
|
142 |
+
data: Data loading and processing configuration.
|
143 |
+
"""
|
144 |
+
|
145 |
+
version: str = Field(default="1.0")
|
146 |
+
model: ModelConfig
|
147 |
+
# TODO: remove training. this is just for backward compatibility
|
148 |
+
training: TrainingConfig | None = Field(default=None)
|
149 |
+
data: DataConfig
|
150 |
+
|
151 |
+
def save(self, path: str) -> None:
|
152 |
+
"""Save the current configuration instance to a JSON file.
|
153 |
+
|
154 |
+
Ensures the parent directory exists and the file has a .json extension.
|
155 |
+
|
156 |
+
Args:
|
157 |
+
path: The target file path to save the configuration.
|
158 |
+
|
159 |
+
Raises:
|
160 |
+
ValueError: If the path is not a file with a .json extension.
|
161 |
+
"""
|
162 |
+
os.makedirs(os.path.dirname(path), exist_ok=True)
|
163 |
+
config_json = self.model_dump_json(indent=2)
|
164 |
+
with open(path, "w") as f:
|
165 |
+
f.write(config_json)
|
166 |
+
|
167 |
+
@classmethod
|
168 |
+
def load(cls, path: str) -> "DiaConfig | None":
|
169 |
+
"""Load and validate a Dia configuration from a JSON file.
|
170 |
+
|
171 |
+
Args:
|
172 |
+
path: The path to the configuration file.
|
173 |
+
|
174 |
+
Returns:
|
175 |
+
A validated DiaConfig instance if the file exists and is valid,
|
176 |
+
otherwise None if the file is not found.
|
177 |
+
|
178 |
+
Raises:
|
179 |
+
ValueError: If the path does not point to an existing .json file.
|
180 |
+
pydantic.ValidationError: If the JSON content fails validation against the DiaConfig schema.
|
181 |
+
"""
|
182 |
+
try:
|
183 |
+
with open(path, "r") as f:
|
184 |
+
content = f.read()
|
185 |
+
return cls.model_validate_json(content)
|
186 |
+
except FileNotFoundError:
|
187 |
+
return None
|
dia/layers.py
ADDED
@@ -0,0 +1,624 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
from huggingface_hub import PyTorchModelHubMixin
|
5 |
+
from torch import Tensor
|
6 |
+
from torch.nn import RMSNorm
|
7 |
+
|
8 |
+
from .config import DiaConfig
|
9 |
+
from .state import DecoderInferenceState, EncoderInferenceState, KVCache
|
10 |
+
|
11 |
+
|
12 |
+
def _normalize_axes(axes: tuple[int, ...], ndim: int) -> tuple[int, ...]:
|
13 |
+
return tuple(ax if ax >= 0 else ndim + ax for ax in axes)
|
14 |
+
|
15 |
+
|
16 |
+
class DenseGeneral(nn.Module):
|
17 |
+
"""
|
18 |
+
PyTorch equivalent of flax.linen.DenseGeneral with shapes defined at init.
|
19 |
+
|
20 |
+
Stores weights (`kernel`) in the same layout as Jax and uses torch.tensordot
|
21 |
+
for the generalized matrix multiplication. Weight/bias shapes are calculated
|
22 |
+
and parameters created during initialization based on config.
|
23 |
+
`load_weights` validates shapes and copies data.
|
24 |
+
|
25 |
+
Attributes:
|
26 |
+
axis (Tuple[int, ...]): Input axis or axes to contract.
|
27 |
+
in_shapes (Tuple[int, ...]): Sizes of the input dimensions specified by `axis`.
|
28 |
+
out_features (Tuple[int, ...]): Shape of the output features (non-contracted dims).
|
29 |
+
use_bias (bool): Whether to add a bias term.
|
30 |
+
weight (nn.Parameter): The kernel parameter.
|
31 |
+
bias (Optional[nn.Parameter]): The bias parameter (if use_bias=True).
|
32 |
+
"""
|
33 |
+
|
34 |
+
def __init__(
|
35 |
+
self,
|
36 |
+
in_shapes: tuple[int, ...],
|
37 |
+
out_features: tuple[int, ...],
|
38 |
+
axis: tuple[int, ...] = (-1,),
|
39 |
+
weight_dtype: torch.dtype | None = None,
|
40 |
+
device: torch.device | None = None,
|
41 |
+
):
|
42 |
+
super().__init__()
|
43 |
+
self.in_shapes = in_shapes
|
44 |
+
self.out_features = out_features
|
45 |
+
self.axis = axis
|
46 |
+
self.kernel_shape = self.in_shapes + self.out_features
|
47 |
+
|
48 |
+
factory_kwargs = {"device": device, "dtype": weight_dtype}
|
49 |
+
self.weight = nn.Parameter(torch.empty(self.kernel_shape, **factory_kwargs))
|
50 |
+
|
51 |
+
def forward(self, inputs: Tensor) -> Tensor:
|
52 |
+
norm_axis = _normalize_axes(self.axis, inputs.ndim)
|
53 |
+
kernel_contract_axes = tuple(range(len(norm_axis)))
|
54 |
+
|
55 |
+
output = torch.tensordot(
|
56 |
+
inputs.to(self.weight.dtype),
|
57 |
+
self.weight,
|
58 |
+
dims=(norm_axis, kernel_contract_axes),
|
59 |
+
).to(inputs.dtype)
|
60 |
+
return output
|
61 |
+
|
62 |
+
|
63 |
+
class MlpBlock(nn.Module):
|
64 |
+
"""MLP block using DenseGeneral."""
|
65 |
+
|
66 |
+
def __init__(self, embed_dim: int, intermediate_dim: int, compute_dtype: torch.dtype):
|
67 |
+
super().__init__()
|
68 |
+
self.dtype = compute_dtype
|
69 |
+
|
70 |
+
self.wi_fused = DenseGeneral(
|
71 |
+
in_shapes=(embed_dim,),
|
72 |
+
out_features=(2, intermediate_dim),
|
73 |
+
axis=(-1,),
|
74 |
+
weight_dtype=compute_dtype,
|
75 |
+
)
|
76 |
+
|
77 |
+
self.wo = DenseGeneral(
|
78 |
+
in_shapes=(intermediate_dim,),
|
79 |
+
out_features=(embed_dim,),
|
80 |
+
axis=(-1,),
|
81 |
+
weight_dtype=compute_dtype,
|
82 |
+
)
|
83 |
+
|
84 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
85 |
+
"""Forward pass."""
|
86 |
+
fused_x = self.wi_fused(x)
|
87 |
+
|
88 |
+
gate = fused_x[..., 0, :]
|
89 |
+
up = fused_x[..., 1, :]
|
90 |
+
|
91 |
+
hidden = torch.mul(F.silu(gate), up).to(self.dtype)
|
92 |
+
|
93 |
+
output = self.wo(hidden)
|
94 |
+
return output
|
95 |
+
|
96 |
+
|
97 |
+
class RotaryEmbedding(nn.Module):
|
98 |
+
"""Rotary Position Embedding (RoPE) implementation in PyTorch."""
|
99 |
+
|
100 |
+
def __init__(
|
101 |
+
self,
|
102 |
+
embedding_dims: int,
|
103 |
+
min_timescale: int = 1,
|
104 |
+
max_timescale: int = 10000,
|
105 |
+
dtype: torch.dtype = torch.float32,
|
106 |
+
):
|
107 |
+
super().__init__()
|
108 |
+
if embedding_dims % 2 != 0:
|
109 |
+
raise ValueError("Embedding dim must be even for RoPE.")
|
110 |
+
self.embedding_dims = embedding_dims
|
111 |
+
self.min_timescale = min_timescale
|
112 |
+
self.max_timescale = max_timescale
|
113 |
+
self.compute_dtype = dtype
|
114 |
+
|
115 |
+
half_embedding_dim = embedding_dims // 2
|
116 |
+
fraction = (2.0 * torch.arange(0, half_embedding_dim)) / embedding_dims
|
117 |
+
timescale = (self.min_timescale * (self.max_timescale / self.min_timescale) ** fraction).to(torch.float32)
|
118 |
+
self.register_buffer("timescale", timescale, persistent=False)
|
119 |
+
|
120 |
+
def forward(self, inputs: torch.Tensor, position: torch.Tensor):
|
121 |
+
"""Applies RoPE."""
|
122 |
+
position = position.unsqueeze(-1).unsqueeze(-1)
|
123 |
+
sinusoid_inp = position / self.timescale
|
124 |
+
sin = torch.sin(sinusoid_inp)
|
125 |
+
cos = torch.cos(sinusoid_inp)
|
126 |
+
first_half, second_half = torch.chunk(inputs.to(torch.float32), 2, dim=-1)
|
127 |
+
first_part = first_half * cos - second_half * sin
|
128 |
+
second_part = second_half * cos + first_half * sin
|
129 |
+
return torch.cat((first_part.to(self.compute_dtype), second_part.to(self.compute_dtype)), dim=-1)
|
130 |
+
|
131 |
+
|
132 |
+
class Attention(nn.Module):
|
133 |
+
"""Attention using DenseGeneral."""
|
134 |
+
|
135 |
+
def __init__(
|
136 |
+
self,
|
137 |
+
config: DiaConfig,
|
138 |
+
q_embed_dim: int,
|
139 |
+
kv_embed_dim: int,
|
140 |
+
num_query_heads: int,
|
141 |
+
num_kv_heads: int,
|
142 |
+
head_dim: int,
|
143 |
+
compute_dtype: torch.dtype,
|
144 |
+
is_cross_attn: bool = False,
|
145 |
+
out_embed_dim: int | None = None,
|
146 |
+
):
|
147 |
+
super().__init__()
|
148 |
+
self.num_query_heads = num_query_heads
|
149 |
+
self.num_kv_heads = num_kv_heads
|
150 |
+
self.head_dim = head_dim
|
151 |
+
self.is_cross_attn = is_cross_attn
|
152 |
+
self.output_dim = out_embed_dim if out_embed_dim is not None else q_embed_dim
|
153 |
+
self.projected_query_dim = num_query_heads * head_dim
|
154 |
+
if num_query_heads % num_kv_heads != 0:
|
155 |
+
raise ValueError(f"num_query_heads ({num_query_heads}) must be divisible by num_kv_heads ({num_kv_heads})")
|
156 |
+
self.num_gqa_groups = num_query_heads // num_kv_heads
|
157 |
+
|
158 |
+
# --- Projection Layers using DenseGeneral ---
|
159 |
+
self.q_proj = DenseGeneral(
|
160 |
+
in_shapes=(q_embed_dim,),
|
161 |
+
out_features=(num_query_heads, head_dim),
|
162 |
+
axis=(-1,),
|
163 |
+
weight_dtype=compute_dtype,
|
164 |
+
)
|
165 |
+
self.k_proj = DenseGeneral(
|
166 |
+
in_shapes=(kv_embed_dim,),
|
167 |
+
out_features=(num_kv_heads, head_dim),
|
168 |
+
axis=(-1,),
|
169 |
+
weight_dtype=compute_dtype,
|
170 |
+
)
|
171 |
+
self.v_proj = DenseGeneral(
|
172 |
+
in_shapes=(kv_embed_dim,),
|
173 |
+
out_features=(num_kv_heads, head_dim),
|
174 |
+
axis=(-1,),
|
175 |
+
weight_dtype=compute_dtype,
|
176 |
+
)
|
177 |
+
self.o_proj = DenseGeneral(
|
178 |
+
in_shapes=(num_query_heads, head_dim),
|
179 |
+
out_features=(self.output_dim,),
|
180 |
+
axis=(-2, -1),
|
181 |
+
weight_dtype=compute_dtype,
|
182 |
+
)
|
183 |
+
|
184 |
+
# --- Rotary Embedding ---
|
185 |
+
self.rotary_emb = RotaryEmbedding(
|
186 |
+
embedding_dims=self.head_dim,
|
187 |
+
min_timescale=config.model.rope_min_timescale,
|
188 |
+
max_timescale=config.model.rope_max_timescale,
|
189 |
+
dtype=compute_dtype,
|
190 |
+
)
|
191 |
+
|
192 |
+
def forward(
|
193 |
+
self,
|
194 |
+
Xq: torch.Tensor, # (B, T, D) T = 1 in AR generation
|
195 |
+
Xkv: torch.Tensor, # (B, S, E) S = 1 in AR generation
|
196 |
+
q_positions: torch.Tensor, # (B, T)
|
197 |
+
kv_positions: torch.Tensor | None = None, # (B, S)
|
198 |
+
attn_mask: torch.Tensor | None = None, # None in Decoder Self Attention, Valid mask in Others
|
199 |
+
cache: KVCache | None = None, # None in Encoder, KVCache in Decoder
|
200 |
+
prefill: bool = False,
|
201 |
+
is_causal: bool = False,
|
202 |
+
) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:
|
203 |
+
"""
|
204 |
+
Performs attention calculation with optional KV caching.
|
205 |
+
|
206 |
+
Args:
|
207 |
+
Xq: Query tensor (B, T, D). T=1 during single-step decoding.
|
208 |
+
Xkv: Key/Value source tensor (B, S, E). S=1 during single-step decoding for self-attn.
|
209 |
+
q_positions: Positions for queries (B, T).
|
210 |
+
kv_positions: Positions for keys/values (B, S). If None, uses q_positions.
|
211 |
+
attn_mask: Attention mask.
|
212 |
+
cache: KVCache.
|
213 |
+
prefill: If True, use prefill mode.
|
214 |
+
|
215 |
+
Returns:
|
216 |
+
A tuple containing:
|
217 |
+
- output: The attention output tensor (B, T, output_dim).
|
218 |
+
- present_kv: The K/V state to be cached for the next step ((B, N, S_new, H), (B, N, S_new, H)). For self-attn, S_new = S_past + S. For cross-attn, S_new = S_kv.
|
219 |
+
"""
|
220 |
+
if kv_positions is None:
|
221 |
+
kv_positions = q_positions
|
222 |
+
original_dtype = Xq.dtype
|
223 |
+
|
224 |
+
Xq_BxTxNxH = self.q_proj(Xq)
|
225 |
+
Xq_BxTxNxH = self.rotary_emb(Xq_BxTxNxH, position=q_positions)
|
226 |
+
Xq_BxNxTxH = Xq_BxTxNxH.transpose(1, 2)
|
227 |
+
|
228 |
+
attn_k: torch.Tensor | None = None
|
229 |
+
attn_v: torch.Tensor | None = None
|
230 |
+
|
231 |
+
if self.is_cross_attn:
|
232 |
+
attn_k, attn_v = cache.k, cache.v
|
233 |
+
else:
|
234 |
+
Xk_BxSxKxH = self.k_proj(Xkv) # (B, S, K, H)
|
235 |
+
Xv_BxSxKxH = self.v_proj(Xkv) # (B, S, K, H)
|
236 |
+
Xk_BxSxKxH = self.rotary_emb(Xk_BxSxKxH, position=kv_positions) # (B, S, K, H)
|
237 |
+
|
238 |
+
Xk_BxKxSxH = Xk_BxSxKxH.transpose(1, 2) # (B, K, S, H)
|
239 |
+
Xv_BxKxSxH = Xv_BxSxKxH.transpose(1, 2) # (B, K, S, H)
|
240 |
+
|
241 |
+
if cache is None:
|
242 |
+
attn_k = Xk_BxKxSxH
|
243 |
+
attn_v = Xv_BxKxSxH
|
244 |
+
else:
|
245 |
+
if prefill:
|
246 |
+
attn_k, attn_v = Xk_BxKxSxH, Xv_BxKxSxH
|
247 |
+
cache.prefill(attn_k, attn_v)
|
248 |
+
else:
|
249 |
+
attn_k, attn_v = cache.update(Xk_BxKxSxH, Xv_BxKxSxH)
|
250 |
+
|
251 |
+
attn_output = F.scaled_dot_product_attention(
|
252 |
+
Xq_BxNxTxH,
|
253 |
+
attn_k,
|
254 |
+
attn_v,
|
255 |
+
attn_mask=attn_mask,
|
256 |
+
scale=1.0,
|
257 |
+
enable_gqa=self.num_gqa_groups > 1,
|
258 |
+
is_causal=is_causal,
|
259 |
+
)
|
260 |
+
|
261 |
+
attn_output = attn_output.transpose(1, 2).contiguous() # (B, T, N, H)
|
262 |
+
output = self.o_proj(attn_output)
|
263 |
+
|
264 |
+
return output.to(original_dtype)
|
265 |
+
|
266 |
+
|
267 |
+
class EncoderLayer(nn.Module):
|
268 |
+
"""Transformer Encoder Layer using DenseGeneral."""
|
269 |
+
|
270 |
+
def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):
|
271 |
+
super().__init__()
|
272 |
+
self.config = config
|
273 |
+
model_config = config.model
|
274 |
+
enc_config = config.model.encoder
|
275 |
+
embed_dim = enc_config.n_embd
|
276 |
+
self.compute_dtype = compute_dtype
|
277 |
+
|
278 |
+
self.pre_sa_norm = RMSNorm(
|
279 |
+
embed_dim,
|
280 |
+
eps=model_config.normalization_layer_epsilon,
|
281 |
+
dtype=torch.float32,
|
282 |
+
)
|
283 |
+
self.self_attention = Attention(
|
284 |
+
config,
|
285 |
+
q_embed_dim=embed_dim,
|
286 |
+
kv_embed_dim=embed_dim,
|
287 |
+
num_query_heads=enc_config.n_head,
|
288 |
+
num_kv_heads=enc_config.n_head,
|
289 |
+
head_dim=enc_config.head_dim,
|
290 |
+
compute_dtype=compute_dtype,
|
291 |
+
is_cross_attn=False,
|
292 |
+
out_embed_dim=embed_dim,
|
293 |
+
)
|
294 |
+
self.post_sa_norm = RMSNorm(
|
295 |
+
embed_dim,
|
296 |
+
eps=model_config.normalization_layer_epsilon,
|
297 |
+
dtype=torch.float32,
|
298 |
+
)
|
299 |
+
self.mlp = MlpBlock(embed_dim=embed_dim, intermediate_dim=enc_config.n_hidden, compute_dtype=compute_dtype)
|
300 |
+
|
301 |
+
def forward(
|
302 |
+
self,
|
303 |
+
x: torch.Tensor,
|
304 |
+
state: EncoderInferenceState,
|
305 |
+
) -> torch.Tensor:
|
306 |
+
residual = x
|
307 |
+
x_norm = self.pre_sa_norm(x).to(self.compute_dtype)
|
308 |
+
|
309 |
+
sa_out = self.self_attention(
|
310 |
+
Xq=x_norm,
|
311 |
+
Xkv=x_norm,
|
312 |
+
q_positions=state.positions,
|
313 |
+
kv_positions=state.positions,
|
314 |
+
attn_mask=state.attn_mask,
|
315 |
+
)
|
316 |
+
x = residual + sa_out
|
317 |
+
|
318 |
+
residual = x
|
319 |
+
x_norm = self.post_sa_norm(x).to(self.compute_dtype)
|
320 |
+
mlp_out = self.mlp(x_norm)
|
321 |
+
x = residual + mlp_out
|
322 |
+
|
323 |
+
return x
|
324 |
+
|
325 |
+
|
326 |
+
class Encoder(nn.Module):
|
327 |
+
"""Transformer Encoder Stack using DenseGeneral."""
|
328 |
+
|
329 |
+
def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):
|
330 |
+
super().__init__()
|
331 |
+
self.config = config
|
332 |
+
model_config = config.model
|
333 |
+
enc_config = config.model.encoder
|
334 |
+
self.compute_dtype = compute_dtype
|
335 |
+
|
336 |
+
self.embedding = nn.Embedding(
|
337 |
+
model_config.src_vocab_size,
|
338 |
+
enc_config.n_embd,
|
339 |
+
dtype=compute_dtype,
|
340 |
+
)
|
341 |
+
self.layers = nn.ModuleList([EncoderLayer(config, compute_dtype) for _ in range(enc_config.n_layer)])
|
342 |
+
self.norm = RMSNorm(
|
343 |
+
enc_config.n_embd,
|
344 |
+
eps=model_config.normalization_layer_epsilon,
|
345 |
+
dtype=torch.float32,
|
346 |
+
)
|
347 |
+
|
348 |
+
def forward(
|
349 |
+
self,
|
350 |
+
x_ids: torch.Tensor,
|
351 |
+
state: EncoderInferenceState,
|
352 |
+
) -> torch.Tensor:
|
353 |
+
x = self.embedding(x_ids)
|
354 |
+
|
355 |
+
for layer in self.layers:
|
356 |
+
x = layer(x, state)
|
357 |
+
|
358 |
+
x = self.norm(x).to(self.compute_dtype)
|
359 |
+
return x
|
360 |
+
|
361 |
+
|
362 |
+
class DecoderLayer(nn.Module):
|
363 |
+
"""Transformer Decoder Layer using DenseGeneral."""
|
364 |
+
|
365 |
+
def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):
|
366 |
+
super().__init__()
|
367 |
+
self.config = config
|
368 |
+
model_config = config.model
|
369 |
+
dec_config = config.model.decoder
|
370 |
+
enc_config = config.model.encoder
|
371 |
+
dec_embed_dim = dec_config.n_embd
|
372 |
+
enc_embed_dim = enc_config.n_embd
|
373 |
+
self.compute_dtype = compute_dtype
|
374 |
+
|
375 |
+
# Norms
|
376 |
+
self.pre_sa_norm = RMSNorm(
|
377 |
+
dec_embed_dim,
|
378 |
+
eps=model_config.normalization_layer_epsilon,
|
379 |
+
dtype=torch.float32,
|
380 |
+
)
|
381 |
+
self.pre_ca_norm = RMSNorm(
|
382 |
+
dec_embed_dim,
|
383 |
+
eps=model_config.normalization_layer_epsilon,
|
384 |
+
dtype=torch.float32,
|
385 |
+
)
|
386 |
+
self.pre_mlp_norm = RMSNorm(
|
387 |
+
dec_embed_dim,
|
388 |
+
eps=model_config.normalization_layer_epsilon,
|
389 |
+
dtype=torch.float32,
|
390 |
+
)
|
391 |
+
|
392 |
+
# Self-Attention (GQA) with Causal Masking
|
393 |
+
self.self_attention = Attention(
|
394 |
+
config,
|
395 |
+
q_embed_dim=dec_embed_dim,
|
396 |
+
kv_embed_dim=dec_embed_dim,
|
397 |
+
num_query_heads=dec_config.gqa_query_heads,
|
398 |
+
num_kv_heads=dec_config.kv_heads,
|
399 |
+
head_dim=dec_config.gqa_head_dim,
|
400 |
+
compute_dtype=compute_dtype,
|
401 |
+
is_cross_attn=False,
|
402 |
+
out_embed_dim=dec_embed_dim,
|
403 |
+
)
|
404 |
+
# Cross-Attention (MHA)
|
405 |
+
self.cross_attention = Attention(
|
406 |
+
config=config,
|
407 |
+
q_embed_dim=dec_embed_dim,
|
408 |
+
kv_embed_dim=enc_embed_dim, # Note kv_embed_dim
|
409 |
+
num_query_heads=dec_config.cross_query_heads,
|
410 |
+
num_kv_heads=dec_config.cross_query_heads,
|
411 |
+
head_dim=dec_config.cross_head_dim,
|
412 |
+
compute_dtype=compute_dtype,
|
413 |
+
is_cross_attn=True,
|
414 |
+
out_embed_dim=dec_embed_dim,
|
415 |
+
)
|
416 |
+
# MLP
|
417 |
+
self.mlp = MlpBlock(
|
418 |
+
embed_dim=dec_embed_dim,
|
419 |
+
intermediate_dim=dec_config.n_hidden,
|
420 |
+
compute_dtype=compute_dtype,
|
421 |
+
)
|
422 |
+
|
423 |
+
def forward(
|
424 |
+
self,
|
425 |
+
x: torch.Tensor,
|
426 |
+
state: DecoderInferenceState,
|
427 |
+
self_attn_cache: KVCache | None = None,
|
428 |
+
cross_attn_cache: KVCache | None = None,
|
429 |
+
prefill: bool = False,
|
430 |
+
) -> torch.Tensor:
|
431 |
+
residual = x
|
432 |
+
x_norm = self.pre_sa_norm(x).to(self.compute_dtype)
|
433 |
+
|
434 |
+
sa_out = self.self_attention(
|
435 |
+
Xq=x_norm, # (2, 1, D)
|
436 |
+
Xkv=x_norm, # (2, 1, D)
|
437 |
+
q_positions=state.dec_positions, # (2, 1)
|
438 |
+
kv_positions=state.dec_positions, # (2, 1)
|
439 |
+
attn_mask=None,
|
440 |
+
cache=self_attn_cache,
|
441 |
+
prefill=prefill,
|
442 |
+
is_causal=prefill,
|
443 |
+
)
|
444 |
+
|
445 |
+
x = residual + sa_out
|
446 |
+
|
447 |
+
residual = x
|
448 |
+
x_norm = self.pre_ca_norm(x).to(self.compute_dtype)
|
449 |
+
ca_out = self.cross_attention(
|
450 |
+
Xq=x_norm,
|
451 |
+
Xkv=state.enc_out,
|
452 |
+
q_positions=state.dec_positions,
|
453 |
+
kv_positions=state.enc_positions,
|
454 |
+
attn_mask=state.dec_cross_attn_mask,
|
455 |
+
cache=cross_attn_cache,
|
456 |
+
)
|
457 |
+
x = residual + ca_out
|
458 |
+
|
459 |
+
residual = x
|
460 |
+
x_norm = self.pre_mlp_norm(x).to(self.compute_dtype)
|
461 |
+
mlp_out = self.mlp(x_norm)
|
462 |
+
x = residual + mlp_out
|
463 |
+
|
464 |
+
return x
|
465 |
+
|
466 |
+
|
467 |
+
class Decoder(nn.Module):
|
468 |
+
"""Transformer Decoder Stack using DenseGeneral."""
|
469 |
+
|
470 |
+
def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):
|
471 |
+
super().__init__()
|
472 |
+
self.config = config
|
473 |
+
model_config = config.model
|
474 |
+
dec_config = config.model.decoder
|
475 |
+
data_config = config.data
|
476 |
+
self.num_channels = data_config.channels
|
477 |
+
self.num_layers = dec_config.n_layer
|
478 |
+
|
479 |
+
self.embeddings = nn.ModuleList(
|
480 |
+
[
|
481 |
+
nn.Embedding(model_config.tgt_vocab_size, dec_config.n_embd, dtype=compute_dtype)
|
482 |
+
for _ in range(self.num_channels)
|
483 |
+
]
|
484 |
+
)
|
485 |
+
self.layers = nn.ModuleList(
|
486 |
+
[DecoderLayer(config=config, compute_dtype=compute_dtype) for _ in range(self.num_layers)]
|
487 |
+
)
|
488 |
+
|
489 |
+
self.norm = RMSNorm(
|
490 |
+
dec_config.n_embd,
|
491 |
+
eps=model_config.normalization_layer_epsilon,
|
492 |
+
dtype=torch.float32,
|
493 |
+
)
|
494 |
+
|
495 |
+
self.logits_dense = DenseGeneral(
|
496 |
+
in_shapes=(dec_config.n_embd,),
|
497 |
+
out_features=(self.num_channels, model_config.tgt_vocab_size),
|
498 |
+
axis=(-1,),
|
499 |
+
weight_dtype=compute_dtype,
|
500 |
+
)
|
501 |
+
|
502 |
+
def precompute_cross_attn_cache(
|
503 |
+
self,
|
504 |
+
enc_out: torch.Tensor, # (B, S, E)
|
505 |
+
enc_positions: torch.Tensor, # (B, S)
|
506 |
+
) -> list[KVCache]:
|
507 |
+
"""
|
508 |
+
Computes the Key and Value tensors for cross-attention for each layer from the encoder output.
|
509 |
+
"""
|
510 |
+
per_layer_kv_cache: list[KVCache] = []
|
511 |
+
|
512 |
+
for layer in self.layers:
|
513 |
+
cross_attn_module = layer.cross_attention
|
514 |
+
k_proj = cross_attn_module.k_proj(enc_out)
|
515 |
+
v_proj = cross_attn_module.v_proj(enc_out)
|
516 |
+
|
517 |
+
k_proj = cross_attn_module.rotary_emb(k_proj, position=enc_positions)
|
518 |
+
k = k_proj.transpose(1, 2)
|
519 |
+
v = v_proj.transpose(1, 2)
|
520 |
+
|
521 |
+
per_layer_kv_cache.append(KVCache.from_kv(k, v))
|
522 |
+
|
523 |
+
return per_layer_kv_cache
|
524 |
+
|
525 |
+
def decode_step(
|
526 |
+
self,
|
527 |
+
tgt_ids_Bx1xC: torch.Tensor, # [B, 1, C]
|
528 |
+
state: DecoderInferenceState,
|
529 |
+
) -> torch.Tensor:
|
530 |
+
"""
|
531 |
+
Performs a single decoding step, managing KV caches layer by layer.
|
532 |
+
|
533 |
+
Returns:
|
534 |
+
A tuple containing:
|
535 |
+
- logits_Bx1xCV: The final output logits for the current step (B, 1, C*V), cast to float32.
|
536 |
+
"""
|
537 |
+
|
538 |
+
x = None
|
539 |
+
for i in range(self.num_channels):
|
540 |
+
channel_tokens = tgt_ids_Bx1xC[..., i]
|
541 |
+
channel_embed = self.embeddings[i](channel_tokens)
|
542 |
+
x = channel_embed if x is None else x + channel_embed
|
543 |
+
|
544 |
+
for i, layer in enumerate(self.layers):
|
545 |
+
self_cache = state.self_attn_cache[i]
|
546 |
+
cross_cache = state.cross_attn_cache[i]
|
547 |
+
x = layer(
|
548 |
+
x, # (2, 1, D)
|
549 |
+
state,
|
550 |
+
self_attn_cache=self_cache,
|
551 |
+
cross_attn_cache=cross_cache,
|
552 |
+
)
|
553 |
+
|
554 |
+
x = self.norm(x)
|
555 |
+
logits_Bx1xCxV = self.logits_dense(x)
|
556 |
+
|
557 |
+
return logits_Bx1xCxV.to(torch.float32)
|
558 |
+
|
559 |
+
def forward(self, tgt_ids_BxTxC: torch.Tensor, state: DecoderInferenceState) -> torch.Tensor:
|
560 |
+
"""
|
561 |
+
Forward pass for the Decoder stack, managing KV caches.
|
562 |
+
|
563 |
+
Args:
|
564 |
+
tgt_ids_BxTxC: Target token IDs (B, T, C).
|
565 |
+
encoder_out: Output from the encoder (B, S, E).
|
566 |
+
tgt_positions: Positions for target sequence (B, T).
|
567 |
+
src_positions: Positions for source sequence (B, S).
|
568 |
+
self_attn_mask: Mask for self-attention.
|
569 |
+
cross_attn_mask: Mask for cross-attention.
|
570 |
+
past_key_values: List containing the self-attention KV cache for each layer
|
571 |
+
from the previous decoding step. `len(past_key_values)` should
|
572 |
+
equal `num_layers`.
|
573 |
+
precomputed_cross_attn_kv: A single tuple containing the pre-computed K/V cache
|
574 |
+
derived from `encoder_out`. This is passed identically
|
575 |
+
to all layers.
|
576 |
+
|
577 |
+
Returns:
|
578 |
+
A tuple containing:
|
579 |
+
- logits: The final output logits (B, T, C * V), cast to float32.
|
580 |
+
- present_key_values: A list containing the updated self-attention KV cache
|
581 |
+
for each layer for the *current* decoding step.
|
582 |
+
"""
|
583 |
+
_, _, num_channels_in = tgt_ids_BxTxC.shape
|
584 |
+
assert num_channels_in == self.num_channels, "Input channels mismatch"
|
585 |
+
|
586 |
+
# Embeddings
|
587 |
+
x = None
|
588 |
+
for i in range(self.num_channels):
|
589 |
+
channel_tokens = tgt_ids_BxTxC[..., i]
|
590 |
+
channel_embed = self.embeddings[i](channel_tokens)
|
591 |
+
x = channel_embed if x is None else x + channel_embed
|
592 |
+
|
593 |
+
for i, layer in enumerate(self.layers):
|
594 |
+
self_cache = state.self_attn_cache[i]
|
595 |
+
cross_cache = state.cross_attn_cache[i]
|
596 |
+
x = layer(x, state, self_attn_cache=self_cache, cross_attn_cache=cross_cache, prefill=True)
|
597 |
+
|
598 |
+
# Final Norm
|
599 |
+
x = self.norm(x)
|
600 |
+
logits_BxTxCxV = self.logits_dense(x)
|
601 |
+
|
602 |
+
return logits_BxTxCxV.to(torch.float32)
|
603 |
+
|
604 |
+
|
605 |
+
class DiaModel(
|
606 |
+
nn.Module,
|
607 |
+
PyTorchModelHubMixin,
|
608 |
+
repo_url="https://github.com/nari-labs/dia",
|
609 |
+
pipeline_tag="text-to-speech",
|
610 |
+
license="apache-2.0",
|
611 |
+
coders={
|
612 |
+
DiaConfig: (
|
613 |
+
lambda x: x.model_dump(),
|
614 |
+
lambda data: DiaConfig.model_validate(data),
|
615 |
+
),
|
616 |
+
},
|
617 |
+
):
|
618 |
+
"""PyTorch Dia Model using DenseGeneral."""
|
619 |
+
|
620 |
+
def __init__(self, config: DiaConfig, compute_dtype: torch.dtype):
|
621 |
+
super().__init__()
|
622 |
+
self.config = config
|
623 |
+
self.encoder = Encoder(config, compute_dtype)
|
624 |
+
self.decoder = Decoder(config, compute_dtype)
|
dia/model.py
ADDED
@@ -0,0 +1,455 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
from enum import Enum
|
3 |
+
|
4 |
+
import dac
|
5 |
+
import numpy as np
|
6 |
+
import torch
|
7 |
+
import torchaudio
|
8 |
+
|
9 |
+
from .audio import apply_audio_delay, build_delay_indices, build_revert_indices, decode, revert_audio_delay
|
10 |
+
from .config import DiaConfig
|
11 |
+
from .layers import DiaModel
|
12 |
+
from .state import DecoderInferenceState, DecoderOutput, EncoderInferenceState
|
13 |
+
|
14 |
+
|
15 |
+
DEFAULT_SAMPLE_RATE = 44100
|
16 |
+
|
17 |
+
|
18 |
+
def _get_default_device():
|
19 |
+
if torch.cuda.is_available():
|
20 |
+
return torch.device("cuda")
|
21 |
+
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
22 |
+
return torch.device("mps")
|
23 |
+
return torch.device("cpu")
|
24 |
+
|
25 |
+
|
26 |
+
def _sample_next_token(
|
27 |
+
logits_BCxV: torch.Tensor,
|
28 |
+
temperature: float,
|
29 |
+
top_p: float,
|
30 |
+
cfg_filter_top_k: int | None = None,
|
31 |
+
) -> torch.Tensor:
|
32 |
+
if temperature == 0.0:
|
33 |
+
return torch.argmax(logits_BCxV, dim=-1)
|
34 |
+
|
35 |
+
logits_BCxV = logits_BCxV / temperature
|
36 |
+
if cfg_filter_top_k is not None:
|
37 |
+
_, top_k_indices_BCxV = torch.topk(logits_BCxV, k=cfg_filter_top_k, dim=-1)
|
38 |
+
mask = torch.ones_like(logits_BCxV, dtype=torch.bool)
|
39 |
+
mask.scatter_(dim=-1, index=top_k_indices_BCxV, value=False)
|
40 |
+
logits_BCxV = logits_BCxV.masked_fill(mask, -torch.inf)
|
41 |
+
|
42 |
+
if top_p < 1.0:
|
43 |
+
probs_BCxV = torch.softmax(logits_BCxV, dim=-1)
|
44 |
+
sorted_probs_BCxV, sorted_indices_BCxV = torch.sort(probs_BCxV, dim=-1, descending=True)
|
45 |
+
cumulative_probs_BCxV = torch.cumsum(sorted_probs_BCxV, dim=-1)
|
46 |
+
|
47 |
+
sorted_indices_to_remove_BCxV = cumulative_probs_BCxV > top_p
|
48 |
+
sorted_indices_to_remove_BCxV[..., 1:] = sorted_indices_to_remove_BCxV[..., :-1].clone()
|
49 |
+
sorted_indices_to_remove_BCxV[..., 0] = 0
|
50 |
+
|
51 |
+
indices_to_remove_BCxV = torch.zeros_like(sorted_indices_to_remove_BCxV)
|
52 |
+
indices_to_remove_BCxV.scatter_(dim=-1, index=sorted_indices_BCxV, src=sorted_indices_to_remove_BCxV)
|
53 |
+
logits_BCxV = logits_BCxV.masked_fill(indices_to_remove_BCxV, -torch.inf)
|
54 |
+
|
55 |
+
final_probs_BCxV = torch.softmax(logits_BCxV, dim=-1)
|
56 |
+
|
57 |
+
sampled_indices_BC = torch.multinomial(final_probs_BCxV, num_samples=1)
|
58 |
+
sampled_indices_C = sampled_indices_BC.squeeze(-1)
|
59 |
+
return sampled_indices_C
|
60 |
+
|
61 |
+
|
62 |
+
class ComputeDtype(str, Enum):
|
63 |
+
FLOAT32 = "float32"
|
64 |
+
FLOAT16 = "float16"
|
65 |
+
BFLOAT16 = "bfloat16"
|
66 |
+
|
67 |
+
def to_dtype(self) -> torch.dtype:
|
68 |
+
if self == ComputeDtype.FLOAT32:
|
69 |
+
return torch.float32
|
70 |
+
elif self == ComputeDtype.FLOAT16:
|
71 |
+
return torch.float16
|
72 |
+
elif self == ComputeDtype.BFLOAT16:
|
73 |
+
return torch.bfloat16
|
74 |
+
else:
|
75 |
+
raise ValueError(f"Unsupported compute dtype: {self}")
|
76 |
+
|
77 |
+
|
78 |
+
class Dia:
|
79 |
+
def __init__(
|
80 |
+
self,
|
81 |
+
config: DiaConfig,
|
82 |
+
compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,
|
83 |
+
device: torch.device | None = None,
|
84 |
+
):
|
85 |
+
"""Initializes the Dia model.
|
86 |
+
|
87 |
+
Args:
|
88 |
+
config: The configuration object for the model.
|
89 |
+
device: The device to load the model onto. If None, will automatically select the best available device.
|
90 |
+
|
91 |
+
Raises:
|
92 |
+
RuntimeError: If there is an error loading the DAC model.
|
93 |
+
"""
|
94 |
+
super().__init__()
|
95 |
+
self.config = config
|
96 |
+
self.device = device if device is not None else _get_default_device()
|
97 |
+
if isinstance(compute_dtype, str):
|
98 |
+
compute_dtype = ComputeDtype(compute_dtype)
|
99 |
+
self.compute_dtype = compute_dtype.to_dtype()
|
100 |
+
self.model = DiaModel(config, self.compute_dtype)
|
101 |
+
self.dac_model = None
|
102 |
+
|
103 |
+
@classmethod
|
104 |
+
def from_local(
|
105 |
+
cls,
|
106 |
+
config_path: str,
|
107 |
+
checkpoint_path: str,
|
108 |
+
compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,
|
109 |
+
device: torch.device | None = None,
|
110 |
+
) -> "Dia":
|
111 |
+
"""Loads the Dia model from local configuration and checkpoint files.
|
112 |
+
|
113 |
+
Args:
|
114 |
+
config_path: Path to the configuration JSON file.
|
115 |
+
checkpoint_path: Path to the model checkpoint (.pth) file.
|
116 |
+
device: The device to load the model onto. If None, will automatically select the best available device.
|
117 |
+
|
118 |
+
Returns:
|
119 |
+
An instance of the Dia model loaded with weights and set to eval mode.
|
120 |
+
|
121 |
+
Raises:
|
122 |
+
FileNotFoundError: If the config or checkpoint file is not found.
|
123 |
+
RuntimeError: If there is an error loading the checkpoint.
|
124 |
+
"""
|
125 |
+
config = DiaConfig.load(config_path)
|
126 |
+
if config is None:
|
127 |
+
raise FileNotFoundError(f"Config file not found at {config_path}")
|
128 |
+
|
129 |
+
dia = cls(config, compute_dtype, device)
|
130 |
+
|
131 |
+
try:
|
132 |
+
state_dict = torch.load(checkpoint_path, map_location=dia.device)
|
133 |
+
dia.model.load_state_dict(state_dict)
|
134 |
+
except FileNotFoundError:
|
135 |
+
raise FileNotFoundError(f"Checkpoint file not found at {checkpoint_path}")
|
136 |
+
except Exception as e:
|
137 |
+
raise RuntimeError(f"Error loading checkpoint from {checkpoint_path}") from e
|
138 |
+
|
139 |
+
dia.model.to(dia.device)
|
140 |
+
dia.model.eval()
|
141 |
+
dia._load_dac_model()
|
142 |
+
return dia
|
143 |
+
|
144 |
+
@classmethod
|
145 |
+
def from_pretrained(
|
146 |
+
cls,
|
147 |
+
model_name: str = "nari-labs/Dia-1.6B",
|
148 |
+
compute_dtype: str | ComputeDtype = ComputeDtype.FLOAT32,
|
149 |
+
device: torch.device | None = None,
|
150 |
+
) -> "Dia":
|
151 |
+
"""Loads the Dia model from a Hugging Face Hub repository.
|
152 |
+
|
153 |
+
Downloads the configuration and checkpoint files from the specified
|
154 |
+
repository ID and then loads the model.
|
155 |
+
|
156 |
+
Args:
|
157 |
+
model_name: The Hugging Face Hub repository ID (e.g., "nari-labs/Dia-1.6B").
|
158 |
+
compute_dtype: The computation dtype to use.
|
159 |
+
device: The device to load the model onto. If None, will automatically select the best available device.
|
160 |
+
|
161 |
+
Returns:
|
162 |
+
An instance of the Dia model loaded with weights and set to eval mode.
|
163 |
+
|
164 |
+
Raises:
|
165 |
+
FileNotFoundError: If config or checkpoint download/loading fails.
|
166 |
+
RuntimeError: If there is an error loading the checkpoint.
|
167 |
+
"""
|
168 |
+
if isinstance(compute_dtype, str):
|
169 |
+
compute_dtype = ComputeDtype(compute_dtype)
|
170 |
+
loaded_model = DiaModel.from_pretrained(model_name, compute_dtype=compute_dtype.to_dtype())
|
171 |
+
config = loaded_model.config
|
172 |
+
dia = cls(config, compute_dtype, device)
|
173 |
+
|
174 |
+
dia.model = loaded_model
|
175 |
+
dia.model.to(dia.device)
|
176 |
+
dia.model.eval()
|
177 |
+
dia._load_dac_model()
|
178 |
+
return dia
|
179 |
+
|
180 |
+
def _load_dac_model(self):
|
181 |
+
try:
|
182 |
+
dac_model_path = dac.utils.download()
|
183 |
+
dac_model = dac.DAC.load(dac_model_path).to(self.device)
|
184 |
+
except Exception as e:
|
185 |
+
raise RuntimeError("Failed to load DAC model") from e
|
186 |
+
self.dac_model = dac_model
|
187 |
+
|
188 |
+
def _prepare_text_input(self, text: str) -> torch.Tensor:
|
189 |
+
"""Encodes text prompt, pads, and creates attention mask and positions."""
|
190 |
+
text_pad_value = self.config.data.text_pad_value
|
191 |
+
max_len = self.config.data.text_length
|
192 |
+
|
193 |
+
byte_text = text.encode("utf-8")
|
194 |
+
replaced_bytes = byte_text.replace(b"[S1]", b"\x01").replace(b"[S2]", b"\x02")
|
195 |
+
text_tokens = list(replaced_bytes)
|
196 |
+
|
197 |
+
current_len = len(text_tokens)
|
198 |
+
padding_needed = max_len - current_len
|
199 |
+
if padding_needed <= 0:
|
200 |
+
text_tokens = text_tokens[:max_len]
|
201 |
+
padded_text_np = np.array(text_tokens, dtype=np.uint8)
|
202 |
+
else:
|
203 |
+
padded_text_np = np.pad(
|
204 |
+
text_tokens,
|
205 |
+
(0, padding_needed),
|
206 |
+
mode="constant",
|
207 |
+
constant_values=text_pad_value,
|
208 |
+
).astype(np.uint8)
|
209 |
+
|
210 |
+
src_tokens = torch.from_numpy(padded_text_np).to(torch.long).to(self.device).unsqueeze(0) # [1, S]
|
211 |
+
return src_tokens
|
212 |
+
|
213 |
+
def _prepare_audio_prompt(self, audio_prompt: torch.Tensor | None) -> tuple[torch.Tensor, int]:
|
214 |
+
num_channels = self.config.data.channels
|
215 |
+
audio_bos_value = self.config.data.audio_bos_value
|
216 |
+
audio_pad_value = self.config.data.audio_pad_value
|
217 |
+
delay_pattern = self.config.data.delay_pattern
|
218 |
+
max_delay_pattern = max(delay_pattern)
|
219 |
+
|
220 |
+
prefill = torch.full(
|
221 |
+
(1, num_channels),
|
222 |
+
fill_value=audio_bos_value,
|
223 |
+
dtype=torch.int,
|
224 |
+
device=self.device,
|
225 |
+
)
|
226 |
+
|
227 |
+
prefill_step = 1
|
228 |
+
|
229 |
+
if audio_prompt is not None:
|
230 |
+
prefill_step += audio_prompt.shape[0]
|
231 |
+
prefill = torch.cat([prefill, audio_prompt], dim=0)
|
232 |
+
|
233 |
+
delay_pad_tensor = torch.full(
|
234 |
+
(max_delay_pattern, num_channels), fill_value=-1, dtype=torch.int, device=self.device
|
235 |
+
)
|
236 |
+
prefill = torch.cat([prefill, delay_pad_tensor], dim=0)
|
237 |
+
|
238 |
+
delay_precomp = build_delay_indices(
|
239 |
+
B=1,
|
240 |
+
T=prefill.shape[0],
|
241 |
+
C=num_channels,
|
242 |
+
delay_pattern=delay_pattern,
|
243 |
+
)
|
244 |
+
|
245 |
+
prefill = apply_audio_delay(
|
246 |
+
audio_BxTxC=prefill.unsqueeze(0),
|
247 |
+
pad_value=audio_pad_value,
|
248 |
+
bos_value=audio_bos_value,
|
249 |
+
precomp=delay_precomp,
|
250 |
+
).squeeze(0)
|
251 |
+
|
252 |
+
return prefill, prefill_step
|
253 |
+
|
254 |
+
def _prepare_generation(self, text: str, audio_prompt: str | torch.Tensor | None, verbose: bool):
|
255 |
+
enc_input_cond = self._prepare_text_input(text)
|
256 |
+
enc_input_uncond = torch.zeros_like(enc_input_cond)
|
257 |
+
enc_input = torch.cat([enc_input_uncond, enc_input_cond], dim=0)
|
258 |
+
|
259 |
+
if isinstance(audio_prompt, str):
|
260 |
+
audio_prompt = self.load_audio(audio_prompt)
|
261 |
+
prefill, prefill_step = self._prepare_audio_prompt(audio_prompt)
|
262 |
+
|
263 |
+
if verbose:
|
264 |
+
print("generate: data loaded")
|
265 |
+
|
266 |
+
enc_state = EncoderInferenceState.new(self.config, enc_input_cond)
|
267 |
+
encoder_out = self.model.encoder(enc_input, enc_state)
|
268 |
+
|
269 |
+
dec_cross_attn_cache = self.model.decoder.precompute_cross_attn_cache(encoder_out, enc_state.positions)
|
270 |
+
dec_state = DecoderInferenceState.new(
|
271 |
+
self.config, enc_state, encoder_out, dec_cross_attn_cache, self.compute_dtype
|
272 |
+
)
|
273 |
+
dec_output = DecoderOutput.new(self.config, self.device)
|
274 |
+
dec_output.prefill(prefill, prefill_step)
|
275 |
+
|
276 |
+
dec_step = prefill_step - 1
|
277 |
+
if dec_step > 0:
|
278 |
+
dec_state.prepare_step(0, dec_step)
|
279 |
+
tokens_BxTxC = dec_output.get_tokens_at(0, dec_step).unsqueeze(0).expand(2, -1, -1)
|
280 |
+
self.model.decoder.forward(tokens_BxTxC, dec_state)
|
281 |
+
|
282 |
+
return dec_state, dec_output
|
283 |
+
|
284 |
+
def _decoder_step(
|
285 |
+
self,
|
286 |
+
tokens_Bx1xC: torch.Tensor,
|
287 |
+
dec_state: DecoderInferenceState,
|
288 |
+
cfg_scale: float,
|
289 |
+
temperature: float,
|
290 |
+
top_p: float,
|
291 |
+
cfg_filter_top_k: int,
|
292 |
+
) -> torch.Tensor:
|
293 |
+
audio_eos_value = self.config.data.audio_eos_value
|
294 |
+
logits_Bx1xCxV = self.model.decoder.decode_step(tokens_Bx1xC, dec_state)
|
295 |
+
|
296 |
+
logits_last_BxCxV = logits_Bx1xCxV[:, -1, :, :]
|
297 |
+
uncond_logits_CxV = logits_last_BxCxV[0, :, :]
|
298 |
+
cond_logits_CxV = logits_last_BxCxV[1, :, :]
|
299 |
+
|
300 |
+
logits_CxV = cond_logits_CxV + cfg_scale * (cond_logits_CxV - uncond_logits_CxV)
|
301 |
+
logits_CxV[:, audio_eos_value + 1 :] = -torch.inf
|
302 |
+
logits_CxV[1:, audio_eos_value:] = -torch.inf
|
303 |
+
|
304 |
+
pred_C = _sample_next_token(
|
305 |
+
logits_CxV.float(),
|
306 |
+
temperature=temperature,
|
307 |
+
top_p=top_p,
|
308 |
+
cfg_filter_top_k=cfg_filter_top_k,
|
309 |
+
)
|
310 |
+
return pred_C
|
311 |
+
|
312 |
+
def _generate_output(self, generated_codes: torch.Tensor) -> np.ndarray:
|
313 |
+
num_channels = self.config.data.channels
|
314 |
+
seq_length = generated_codes.shape[0]
|
315 |
+
delay_pattern = self.config.data.delay_pattern
|
316 |
+
audio_pad_value = self.config.data.audio_pad_value
|
317 |
+
max_delay_pattern = max(delay_pattern)
|
318 |
+
|
319 |
+
revert_precomp = build_revert_indices(
|
320 |
+
B=1,
|
321 |
+
T=seq_length,
|
322 |
+
C=num_channels,
|
323 |
+
delay_pattern=delay_pattern,
|
324 |
+
)
|
325 |
+
|
326 |
+
codebook = revert_audio_delay(
|
327 |
+
audio_BxTxC=generated_codes.unsqueeze(0),
|
328 |
+
pad_value=audio_pad_value,
|
329 |
+
precomp=revert_precomp,
|
330 |
+
T=seq_length,
|
331 |
+
)[:, :-max_delay_pattern, :]
|
332 |
+
|
333 |
+
min_valid_index = 0
|
334 |
+
max_valid_index = 1023
|
335 |
+
invalid_mask = (codebook < min_valid_index) | (codebook > max_valid_index)
|
336 |
+
codebook[invalid_mask] = 0
|
337 |
+
|
338 |
+
audio = decode(self.dac_model, codebook.transpose(1, 2))
|
339 |
+
|
340 |
+
return audio.squeeze().cpu().numpy()
|
341 |
+
|
342 |
+
def load_audio(self, audio_path: str) -> torch.Tensor:
|
343 |
+
audio, sr = torchaudio.load(audio_path, channels_first=True) # C, T
|
344 |
+
if sr != DEFAULT_SAMPLE_RATE:
|
345 |
+
audio = torchaudio.functional.resample(audio, sr, DEFAULT_SAMPLE_RATE)
|
346 |
+
audio = audio.to(self.device).unsqueeze(0) # 1, C, T
|
347 |
+
audio_data = self.dac_model.preprocess(audio, DEFAULT_SAMPLE_RATE)
|
348 |
+
_, encoded_frame, _, _, _ = self.dac_model.encode(audio_data) # 1, C, T
|
349 |
+
return encoded_frame.squeeze(0).transpose(0, 1)
|
350 |
+
|
351 |
+
def save_audio(self, path: str, audio: np.ndarray):
|
352 |
+
import soundfile as sf
|
353 |
+
|
354 |
+
sf.write(path, audio, DEFAULT_SAMPLE_RATE)
|
355 |
+
|
356 |
+
@torch.inference_mode()
|
357 |
+
def generate(
|
358 |
+
self,
|
359 |
+
text: str,
|
360 |
+
max_tokens: int | None = None,
|
361 |
+
cfg_scale: float = 3.0,
|
362 |
+
temperature: float = 1.3,
|
363 |
+
top_p: float = 0.95,
|
364 |
+
use_torch_compile: bool = False,
|
365 |
+
cfg_filter_top_k: int = 35,
|
366 |
+
audio_prompt: str | torch.Tensor | None = None,
|
367 |
+
audio_prompt_path: str | None = None,
|
368 |
+
use_cfg_filter: bool | None = None,
|
369 |
+
verbose: bool = False,
|
370 |
+
) -> np.ndarray:
|
371 |
+
audio_eos_value = self.config.data.audio_eos_value
|
372 |
+
audio_pad_value = self.config.data.audio_pad_value
|
373 |
+
delay_pattern = self.config.data.delay_pattern
|
374 |
+
max_tokens = self.config.data.audio_length if max_tokens is None else max_tokens
|
375 |
+
max_delay_pattern = max(delay_pattern)
|
376 |
+
self.model.eval()
|
377 |
+
|
378 |
+
if audio_prompt_path:
|
379 |
+
print("Warning: audio_prompt_path is deprecated. Use audio_prompt instead.")
|
380 |
+
audio_prompt = audio_prompt_path
|
381 |
+
if use_cfg_filter is not None:
|
382 |
+
print("Warning: use_cfg_filter is deprecated.")
|
383 |
+
|
384 |
+
if verbose:
|
385 |
+
total_start_time = time.time()
|
386 |
+
|
387 |
+
dec_state, dec_output = self._prepare_generation(text, audio_prompt, verbose)
|
388 |
+
dec_step = dec_output.prefill_step - 1
|
389 |
+
|
390 |
+
bos_countdown = max_delay_pattern
|
391 |
+
eos_detected = False
|
392 |
+
eos_countdown = -1
|
393 |
+
|
394 |
+
if use_torch_compile:
|
395 |
+
step_fn = torch.compile(self._decoder_step, mode="default")
|
396 |
+
else:
|
397 |
+
step_fn = self._decoder_step
|
398 |
+
|
399 |
+
if verbose:
|
400 |
+
print("generate: starting generation loop")
|
401 |
+
if use_torch_compile:
|
402 |
+
print("generate: by using use_torch_compile=True, the first step would take long")
|
403 |
+
start_time = time.time()
|
404 |
+
|
405 |
+
while dec_step < max_tokens:
|
406 |
+
dec_state.prepare_step(dec_step)
|
407 |
+
tokens_Bx1xC = dec_output.get_tokens_at(dec_step).unsqueeze(0).expand(2, -1, -1)
|
408 |
+
pred_C = step_fn(
|
409 |
+
tokens_Bx1xC,
|
410 |
+
dec_state,
|
411 |
+
cfg_scale,
|
412 |
+
temperature,
|
413 |
+
top_p,
|
414 |
+
cfg_filter_top_k,
|
415 |
+
)
|
416 |
+
|
417 |
+
if (not eos_detected and pred_C[0] == audio_eos_value) or dec_step == max_tokens - max_delay_pattern - 1:
|
418 |
+
eos_detected = True
|
419 |
+
eos_countdown = max_delay_pattern
|
420 |
+
|
421 |
+
if eos_countdown > 0:
|
422 |
+
step_after_eos = max_delay_pattern - eos_countdown
|
423 |
+
for i, d in enumerate(delay_pattern):
|
424 |
+
if step_after_eos == d:
|
425 |
+
pred_C[i] = audio_eos_value
|
426 |
+
elif step_after_eos > d:
|
427 |
+
pred_C[i] = audio_pad_value
|
428 |
+
eos_countdown -= 1
|
429 |
+
|
430 |
+
bos_countdown = max(0, bos_countdown - 1)
|
431 |
+
dec_output.update_one(pred_C, dec_step + 1, bos_countdown > 0)
|
432 |
+
|
433 |
+
if eos_countdown == 0:
|
434 |
+
break
|
435 |
+
|
436 |
+
dec_step += 1
|
437 |
+
if verbose and dec_step % 86 == 0:
|
438 |
+
duration = time.time() - start_time
|
439 |
+
print(
|
440 |
+
f"generate step {dec_step}: speed={86 / duration:.3f} tokens/s, realtime factor={1 / duration:.3f}x"
|
441 |
+
)
|
442 |
+
start_time = time.time()
|
443 |
+
|
444 |
+
if dec_output.prefill_step >= dec_step + 1:
|
445 |
+
print("Warning: Nothing generated")
|
446 |
+
return None
|
447 |
+
|
448 |
+
generated_codes = dec_output.generated_tokens[dec_output.prefill_step : dec_step + 1, :]
|
449 |
+
|
450 |
+
if verbose:
|
451 |
+
total_step = dec_step + 1 - dec_output.prefill_step
|
452 |
+
total_duration = time.time() - total_start_time
|
453 |
+
print(f"generate: total step={total_step}, total duration={total_duration:.3f}s")
|
454 |
+
|
455 |
+
return self._generate_output(generated_codes)
|
dia/state.py
ADDED
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
|
3 |
+
import torch
|
4 |
+
|
5 |
+
from .config import DiaConfig
|
6 |
+
|
7 |
+
|
8 |
+
def create_attn_mask(
|
9 |
+
q_padding_mask_1d: torch.Tensor,
|
10 |
+
k_padding_mask_1d: torch.Tensor,
|
11 |
+
device: torch.device,
|
12 |
+
is_causal: bool = False,
|
13 |
+
) -> torch.Tensor:
|
14 |
+
"""
|
15 |
+
Creates the attention mask (self or cross) mimicking JAX segment ID logic.
|
16 |
+
"""
|
17 |
+
B1, Tq = q_padding_mask_1d.shape
|
18 |
+
B2, Tk = k_padding_mask_1d.shape
|
19 |
+
assert B1 == B2, "Query and key batch dimensions must match"
|
20 |
+
|
21 |
+
p_mask_q = q_padding_mask_1d.unsqueeze(2) # Shape [B, Tq, 1]
|
22 |
+
p_mask_k = k_padding_mask_1d.unsqueeze(1) # Shape [B, 1, Tk]
|
23 |
+
|
24 |
+
# Condition A: Non-padding query attends to non-padding key
|
25 |
+
non_pad_attends_non_pad = p_mask_q & p_mask_k # Shape [B, Tq, Tk]
|
26 |
+
|
27 |
+
# Condition B: Padding query attends to padding key
|
28 |
+
pad_attends_pad = (~p_mask_q) & (~p_mask_k) # Shape [B, Tq, Tk]
|
29 |
+
|
30 |
+
# Combine: True if padding status is compatible (both non-pad OR both pad)
|
31 |
+
mask = non_pad_attends_non_pad | pad_attends_pad # Shape [B, Tq, Tk]
|
32 |
+
|
33 |
+
if is_causal:
|
34 |
+
assert Tq == Tk, "Causal mask requires query and key sequence lengths to be equal"
|
35 |
+
causal_mask_2d = torch.tril(torch.ones((Tq, Tk), dtype=torch.bool, device=device)) # Shape [Tq, Tk]
|
36 |
+
causal_mask = mask & causal_mask_2d # Shape [B, Tq, Tk]
|
37 |
+
return causal_mask.unsqueeze(1) # Shape [B, 1, Tq, Tk]
|
38 |
+
else:
|
39 |
+
return mask.unsqueeze(1) # Shape [B, 1, Tq, Tk]
|
40 |
+
|
41 |
+
|
42 |
+
@dataclass
|
43 |
+
class EncoderInferenceState:
|
44 |
+
"""Parameters specifically for encoder inference."""
|
45 |
+
|
46 |
+
max_seq_len: int
|
47 |
+
device: torch.device
|
48 |
+
positions: torch.Tensor
|
49 |
+
padding_mask: torch.Tensor
|
50 |
+
attn_mask: torch.Tensor
|
51 |
+
|
52 |
+
@classmethod
|
53 |
+
def new(cls, config: DiaConfig, cond_src: torch.Tensor) -> "EncoderInferenceState":
|
54 |
+
"""Creates EtorchrInferenceParams from DiaConfig and a device."""
|
55 |
+
device = cond_src.device
|
56 |
+
|
57 |
+
positions = (
|
58 |
+
torch.arange(config.data.text_length, dtype=torch.float32, device=device).unsqueeze(0).expand(2, -1)
|
59 |
+
)
|
60 |
+
padding_mask = (cond_src != config.data.text_pad_value).to(device).expand(2, -1)
|
61 |
+
attn_mask = create_attn_mask(padding_mask, padding_mask, device, is_causal=False)
|
62 |
+
|
63 |
+
return cls(
|
64 |
+
max_seq_len=config.data.text_length,
|
65 |
+
device=device,
|
66 |
+
positions=positions,
|
67 |
+
padding_mask=padding_mask,
|
68 |
+
attn_mask=attn_mask,
|
69 |
+
)
|
70 |
+
|
71 |
+
|
72 |
+
class KVCache:
|
73 |
+
def __init__(
|
74 |
+
self,
|
75 |
+
num_heads: int,
|
76 |
+
max_len: int,
|
77 |
+
head_dim: int,
|
78 |
+
dtype: torch.dtype,
|
79 |
+
device: torch.device,
|
80 |
+
k: torch.Tensor | None = None,
|
81 |
+
v: torch.Tensor | None = None,
|
82 |
+
):
|
83 |
+
self.k = torch.zeros((2, num_heads, max_len, head_dim), dtype=dtype, device=device) if k is None else k
|
84 |
+
self.v = torch.zeros((2, num_heads, max_len, head_dim), dtype=dtype, device=device) if v is None else v
|
85 |
+
self.current_idx = torch.tensor(0)
|
86 |
+
|
87 |
+
@classmethod
|
88 |
+
def from_kv(cls, k: torch.Tensor, v: torch.Tensor) -> "KVCache":
|
89 |
+
return cls(
|
90 |
+
num_heads=k.shape[1],
|
91 |
+
max_len=k.shape[2],
|
92 |
+
head_dim=k.shape[3],
|
93 |
+
dtype=k.dtype,
|
94 |
+
device=k.device,
|
95 |
+
k=k,
|
96 |
+
v=v,
|
97 |
+
)
|
98 |
+
|
99 |
+
def update(self, k: torch.Tensor, v: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
|
100 |
+
self.k[:, :, self.current_idx : self.current_idx + 1, :] = k
|
101 |
+
self.v[:, :, self.current_idx : self.current_idx + 1, :] = v
|
102 |
+
self.current_idx += 1
|
103 |
+
return self.k[:, :, : self.current_idx, :], self.v[:, :, : self.current_idx, :]
|
104 |
+
|
105 |
+
def prefill(self, k: torch.Tensor, v: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
|
106 |
+
prefill_len = k.shape[2]
|
107 |
+
self.k[:, :, :prefill_len, :] = k
|
108 |
+
self.v[:, :, :prefill_len, :] = v
|
109 |
+
self.current_idx = prefill_len - 1
|
110 |
+
|
111 |
+
|
112 |
+
@dataclass
|
113 |
+
class DecoderInferenceState:
|
114 |
+
"""Parameters specifically for decoder inference."""
|
115 |
+
|
116 |
+
device: torch.device
|
117 |
+
dtype: torch.dtype
|
118 |
+
enc_out: torch.Tensor
|
119 |
+
enc_positions: torch.Tensor
|
120 |
+
dec_positions: torch.Tensor
|
121 |
+
dec_cross_attn_mask: torch.Tensor
|
122 |
+
self_attn_cache: list[KVCache]
|
123 |
+
cross_attn_cache: list[KVCache]
|
124 |
+
|
125 |
+
@classmethod
|
126 |
+
def new(
|
127 |
+
cls,
|
128 |
+
config: DiaConfig,
|
129 |
+
enc_state: EncoderInferenceState,
|
130 |
+
enc_out: torch.Tensor,
|
131 |
+
dec_cross_attn_cache: list[KVCache],
|
132 |
+
compute_dtype: torch.dtype,
|
133 |
+
) -> "DecoderInferenceState":
|
134 |
+
"""Creates DecoderInferenceParams from DiaConfig and a device."""
|
135 |
+
device = enc_out.device
|
136 |
+
max_audio_len = config.data.audio_length
|
137 |
+
|
138 |
+
dec_positions = torch.full((2, 1), fill_value=0, dtype=torch.long, device=device)
|
139 |
+
tgt_padding_mask = torch.ones((2, 1), dtype=torch.bool, device=device)
|
140 |
+
dec_cross_attn_mask = create_attn_mask(tgt_padding_mask, enc_state.padding_mask, device, is_causal=False)
|
141 |
+
|
142 |
+
self_attn_cache = [
|
143 |
+
KVCache(
|
144 |
+
config.model.decoder.kv_heads,
|
145 |
+
max_audio_len,
|
146 |
+
config.model.decoder.gqa_head_dim,
|
147 |
+
compute_dtype,
|
148 |
+
device,
|
149 |
+
)
|
150 |
+
for _ in range(config.model.decoder.n_layer)
|
151 |
+
]
|
152 |
+
|
153 |
+
return cls(
|
154 |
+
device=device,
|
155 |
+
dtype=compute_dtype,
|
156 |
+
enc_out=enc_out,
|
157 |
+
enc_positions=enc_state.positions,
|
158 |
+
dec_positions=dec_positions,
|
159 |
+
dec_cross_attn_mask=dec_cross_attn_mask,
|
160 |
+
self_attn_cache=self_attn_cache,
|
161 |
+
cross_attn_cache=dec_cross_attn_cache,
|
162 |
+
)
|
163 |
+
|
164 |
+
def prepare_step(self, step_from: int, step_to: int | None = None) -> None:
|
165 |
+
if step_to is None:
|
166 |
+
step_to = step_from + 1
|
167 |
+
self.dec_positions = (
|
168 |
+
torch.arange(step_from, step_to, dtype=torch.float32, device=self.device).unsqueeze(0).expand(2, -1)
|
169 |
+
)
|
170 |
+
|
171 |
+
|
172 |
+
@dataclass
|
173 |
+
class DecoderOutput:
|
174 |
+
generated_tokens: torch.Tensor
|
175 |
+
prefill_step: int
|
176 |
+
|
177 |
+
@classmethod
|
178 |
+
def new(cls, config: DiaConfig, device: torch.device) -> "DecoderOutput":
|
179 |
+
max_audio_len = config.data.audio_length
|
180 |
+
return cls(
|
181 |
+
generated_tokens=torch.full(
|
182 |
+
(max_audio_len, config.data.channels),
|
183 |
+
fill_value=-1,
|
184 |
+
dtype=torch.int,
|
185 |
+
device=device,
|
186 |
+
),
|
187 |
+
prefill_step=0,
|
188 |
+
)
|
189 |
+
|
190 |
+
def get_tokens_at(self, step_from: int, step_to: int | None = None) -> torch.Tensor:
|
191 |
+
if step_to is None:
|
192 |
+
step_to = step_from + 1
|
193 |
+
return self.generated_tokens[step_from:step_to, :]
|
194 |
+
|
195 |
+
def update_one(self, dec_out: torch.Tensor, step: int, apply_mask: bool = False):
|
196 |
+
if apply_mask:
|
197 |
+
mask = self.generated_tokens[step : step + 1, :] == -1
|
198 |
+
self.generated_tokens[step : step + 1, :] = torch.where(
|
199 |
+
mask, dec_out, self.generated_tokens[step : step + 1, :]
|
200 |
+
)
|
201 |
+
else:
|
202 |
+
self.generated_tokens[step : step + 1, :] = dec_out
|
203 |
+
|
204 |
+
def prefill(self, dec_out: torch.Tensor, prefill_step: int):
|
205 |
+
length = dec_out.shape[0]
|
206 |
+
self.generated_tokens[0:length, :] = dec_out
|
207 |
+
self.prefill_step = prefill_step
|
dia/static/images/banner.png
ADDED
![]() |
Git LFS Details
|
docker/Dockerfile.cpu
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Dockerfile.cpu - CPU-only deployment for DIA
|
2 |
+
# --------------------------------------------------
|
3 |
+
# Build: docker build . -f docker/Dockerfile.cpu -t dia-cpu
|
4 |
+
# Run: docker run --rm -p 7860:7860 dia-cpu
|
5 |
+
|
6 |
+
FROM python:3.10-slim
|
7 |
+
|
8 |
+
# Set non-interactive frontend
|
9 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
10 |
+
|
11 |
+
# Install venv, and system dependencies
|
12 |
+
RUN apt-get update && apt-get install -y \
|
13 |
+
python3-venv \
|
14 |
+
libsndfile1 \
|
15 |
+
ffmpeg \
|
16 |
+
curl \
|
17 |
+
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
18 |
+
|
19 |
+
# Create non-root user and set up directories
|
20 |
+
RUN useradd -m -u 1001 appuser && \
|
21 |
+
mkdir -p /app/outputs /app && \
|
22 |
+
chown -R appuser:appuser /app
|
23 |
+
|
24 |
+
USER appuser
|
25 |
+
WORKDIR /app
|
26 |
+
|
27 |
+
# Copy all code (including pyproject.toml)
|
28 |
+
COPY --chown=appuser:appuser . .
|
29 |
+
|
30 |
+
# Create and activate virtual environment
|
31 |
+
RUN python3 -m venv /app/venv
|
32 |
+
ENV PATH="/app/venv/bin:$PATH"
|
33 |
+
|
34 |
+
# Install all project dependencies (CPU-only PyTorch)
|
35 |
+
RUN pip install --upgrade pip && \
|
36 |
+
pip install torch torchaudio --index-url https://download.pytorch.org/whl/cpu && \
|
37 |
+
pip install --no-cache-dir -e .[dev]
|
38 |
+
|
39 |
+
# Set environment variables
|
40 |
+
ENV PYTHONUNBUFFERED=1 \
|
41 |
+
PYTHONPATH=/app
|
42 |
+
|
43 |
+
# Expose Gradio default port
|
44 |
+
ENV GRADIO_SERVER_NAME="0.0.0.0"
|
45 |
+
EXPOSE 7860
|
46 |
+
|
47 |
+
# Entrypoint
|
48 |
+
CMD ["python3", "app.py"]
|
docker/Dockerfile.gpu
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Dockerfile.gpu - GPU deployment for DIA
|
2 |
+
# --------------------------------------------------
|
3 |
+
# Build: docker build . -f docker/Dockerfile.gpu -t dia-gpu
|
4 |
+
# Run: docker run --rm --gpus all -p 7860:7860 dia-gpu
|
5 |
+
# Requires NVIDIA Container Toolkit on host.
|
6 |
+
|
7 |
+
FROM pytorch/pytorch:2.1.2-cuda12.1-cudnn8-runtime
|
8 |
+
|
9 |
+
# Set non-interactive frontend
|
10 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
11 |
+
|
12 |
+
# Install venv, and system dependencies
|
13 |
+
RUN apt-get update && apt-get install -y \
|
14 |
+
python3-venv \
|
15 |
+
libsndfile1 \
|
16 |
+
ffmpeg \
|
17 |
+
curl \
|
18 |
+
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
19 |
+
|
20 |
+
# Create non-root user and set up directories
|
21 |
+
RUN useradd -m -u 1001 appuser && \
|
22 |
+
mkdir -p /app/outputs /app && \
|
23 |
+
chown -R appuser:appuser /app
|
24 |
+
|
25 |
+
USER appuser
|
26 |
+
WORKDIR /app
|
27 |
+
|
28 |
+
# Copy all code (including pyproject.toml)
|
29 |
+
COPY --chown=appuser:appuser . .
|
30 |
+
|
31 |
+
# Create and activate virtual environment
|
32 |
+
RUN python3 -m venv /app/venv
|
33 |
+
ENV PATH="/app/venv/bin:$PATH"
|
34 |
+
|
35 |
+
# Install all project dependencies
|
36 |
+
RUN pip install --upgrade pip && pip install --no-cache-dir .
|
37 |
+
|
38 |
+
# Set environment variables
|
39 |
+
ENV PYTHONUNBUFFERED=1 \
|
40 |
+
PYTHONPATH=/app \
|
41 |
+
USE_GPU=true \
|
42 |
+
LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/cuda-12.1/lib64:${LD_LIBRARY_PATH}
|
43 |
+
|
44 |
+
# Expose Gradio default port
|
45 |
+
ENV GRADIO_SERVER_NAME="0.0.0.0"
|
46 |
+
EXPOSE 7860
|
47 |
+
|
48 |
+
# Entrypoint
|
49 |
+
CMD ["python3", "app.py"]
|
example/simple.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dia.model import Dia
|
2 |
+
|
3 |
+
|
4 |
+
model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16")
|
5 |
+
|
6 |
+
text = "[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face."
|
7 |
+
|
8 |
+
output = model.generate(text, use_torch_compile=True, verbose=True)
|
9 |
+
|
10 |
+
model.save_audio("simple.mp3", output)
|
example/voice_clone.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dia.model import Dia
|
2 |
+
|
3 |
+
|
4 |
+
model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16")
|
5 |
+
|
6 |
+
# You should put the transcript of the voice you want to clone
|
7 |
+
# We will use the audio created by running simple.py as an example.
|
8 |
+
# Note that you will be REQUIRED TO RUN simple.py for the script to work as-is.
|
9 |
+
clone_from_text = "[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Git hub or Hugging Face."
|
10 |
+
clone_from_audio = "simple.mp3"
|
11 |
+
|
12 |
+
# For your custom needs, replace above with below and add your audio file to this directory:
|
13 |
+
# clone_from_text = "[S1] ... [S2] ... [S1] ... corresponding to your_audio_name.mp3"
|
14 |
+
# clone_from_audio = "your_audio_name.mp3"
|
15 |
+
|
16 |
+
# Text to generate
|
17 |
+
text_to_generate = "[S1] Hello, how are you? [S2] I'm good, thank you. [S1] What's your name? [S2] My name is Dia. [S1] Nice to meet you. [S2] Nice to meet you too."
|
18 |
+
|
19 |
+
# It will only return the audio from the text_to_generate
|
20 |
+
output = model.generate(
|
21 |
+
clone_from_text + text_to_generate, audio_prompt=clone_from_audio, use_torch_compile=True, verbose=True
|
22 |
+
)
|
23 |
+
|
24 |
+
model.save_audio("voice_clone.mp3", output)
|
example_prompt.mp3
ADDED
Binary file (45.8 kB). View file
|
|
pyproject.toml
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "nari-tts"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "Dia - A text-to-speech model for dialogue generation"
|
5 |
+
readme = "README.md"
|
6 |
+
requires-python = ">=3.10"
|
7 |
+
license = {file = "LICENSE"}
|
8 |
+
authors = [
|
9 |
+
{name = "Nari Labs", email = "[email protected]"}
|
10 |
+
]
|
11 |
+
dependencies = [
|
12 |
+
"descript-audio-codec>=1.0.0",
|
13 |
+
"gradio>=5.25.2",
|
14 |
+
"huggingface-hub>=0.30.2",
|
15 |
+
"numpy>=2.2.4",
|
16 |
+
"pydantic>=2.11.3",
|
17 |
+
"safetensors>=0.5.3",
|
18 |
+
"soundfile>=0.13.1",
|
19 |
+
"torch==2.6.0",
|
20 |
+
"torchaudio==2.6.0",
|
21 |
+
"triton==3.2.0 ; sys_platform == 'linux'",
|
22 |
+
"triton-windows==3.2.0.post18 ; sys_platform == 'win32'",
|
23 |
+
]
|
24 |
+
|
25 |
+
[build-system]
|
26 |
+
requires = ["hatchling"]
|
27 |
+
build-backend = "hatchling.build"
|
28 |
+
|
29 |
+
[project.urls]
|
30 |
+
"Homepage" = "https://github.com/nari-labs/dia"
|
31 |
+
"Bug Tracker" = "https://github.com/nari-labs/dia/issues"
|
32 |
+
|
33 |
+
[tool.hatch.build.targets.wheel]
|
34 |
+
packages = ["dia"]
|
35 |
+
|
36 |
+
[tool.ruff]
|
37 |
+
# Never enforce `E501` (line length violations).
|
38 |
+
lint.ignore = ["C901", "E501", "E741", "W605"]
|
39 |
+
lint.select = ["C", "E", "F", "I", "W"]
|
40 |
+
line-length = 119
|
41 |
+
|
42 |
+
# Ignore import violations in all `__init__.py` files.
|
43 |
+
[tool.ruff.lint.per-file-ignores]
|
44 |
+
"__init__.py" = ["E402", "F401", "F403", "F811"]
|
45 |
+
|
46 |
+
[tool.ruff.lint.isort]
|
47 |
+
lines-after-imports = 2
|
48 |
+
|
49 |
+
[tool.uv.sources]
|
50 |
+
torch = [
|
51 |
+
{ index = "pytorch-cu126", marker = "sys_platform == 'linux' or sys_platform == 'win32'" },
|
52 |
+
]
|
53 |
+
torchaudio = [
|
54 |
+
{ index = "pytorch-cu126", marker = "sys_platform == 'linux' or sys_platform == 'win32'" },
|
55 |
+
]
|
56 |
+
|
57 |
+
[[tool.uv.index]]
|
58 |
+
name = "pytorch-cu126"
|
59 |
+
url = "https://download.pytorch.org/whl/cu126"
|
60 |
+
explicit = true
|
61 |
+
|
62 |
+
[dependency-groups]
|
63 |
+
dev = [
|
64 |
+
"ninja>=1.11.1.4",
|
65 |
+
"packaging>=25.0",
|
66 |
+
]
|
uv.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|