Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +5 -0
- .github/ISSUE_TEMPLATE/bug_report.yml +50 -0
- .github/ISSUE_TEMPLATE/config.yml +1 -0
- .github/ISSUE_TEMPLATE/feature_request.yml +62 -0
- .github/ISSUE_TEMPLATE/help_wanted.yml +54 -0
- .github/ISSUE_TEMPLATE/question.yml +26 -0
- .github/workflows/pre-commit.yaml +14 -0
- .github/workflows/publish-docker-image.yaml +60 -0
- .github/workflows/publish-pypi.yaml +66 -0
- .github/workflows/sync-hf.yaml +17 -0
- .gitignore +171 -0
- .gitmodules +3 -0
- .gradio/certificate.pem +31 -0
- .pre-commit-config.yaml +17 -0
- Dockerfile +30 -0
- LICENSE +21 -0
- README.md +261 -6
- pyproject.toml +64 -0
- ruff.toml +10 -0
- src/f5_tts/api.py +164 -0
- src/f5_tts/configs/E2TTS_Base.yaml +49 -0
- src/f5_tts/configs/E2TTS_Small.yaml +49 -0
- src/f5_tts/configs/F5TTS_Base.yaml +54 -0
- src/f5_tts/configs/F5TTS_Small.yaml +54 -0
- src/f5_tts/configs/F5TTS_v1_Base.yaml +55 -0
- src/f5_tts/eval/README.md +52 -0
- src/f5_tts/eval/ecapa_tdnn.py +331 -0
- src/f5_tts/eval/eval_infer_batch.py +210 -0
- src/f5_tts/eval/eval_infer_batch.sh +18 -0
- src/f5_tts/eval/eval_librispeech_test_clean.py +89 -0
- src/f5_tts/eval/eval_seedtts_testset.py +88 -0
- src/f5_tts/eval/eval_utmos.py +42 -0
- src/f5_tts/eval/utils_eval.py +419 -0
- src/f5_tts/infer/README.md +177 -0
- src/f5_tts/infer/SHARED.md +193 -0
- src/f5_tts/infer/examples/basic/basic.toml +11 -0
- src/f5_tts/infer/examples/basic/basic_ref_en.wav +3 -0
- src/f5_tts/infer/examples/basic/basic_ref_zh.wav +3 -0
- src/f5_tts/infer/examples/multi/country.flac +3 -0
- src/f5_tts/infer/examples/multi/main.flac +3 -0
- src/f5_tts/infer/examples/multi/story.toml +20 -0
- src/f5_tts/infer/examples/multi/story.txt +1 -0
- src/f5_tts/infer/examples/multi/town.flac +3 -0
- src/f5_tts/infer/examples/vocab.txt +2545 -0
- src/f5_tts/infer/infer_cli.py +383 -0
- src/f5_tts/infer/infer_gradio.py +1121 -0
- src/f5_tts/infer/speech_edit.py +205 -0
- src/f5_tts/infer/utils_infer.py +605 -0
- src/f5_tts/model/__init__.py +8 -0
- src/f5_tts/model/backbones/README.md +20 -0
.gitattributes
CHANGED
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
src/f5_tts/infer/examples/basic/basic_ref_en.wav filter=lfs diff=lfs merge=lfs -text
|
37 |
+
src/f5_tts/infer/examples/basic/basic_ref_zh.wav filter=lfs diff=lfs merge=lfs -text
|
38 |
+
src/f5_tts/infer/examples/multi/country.flac filter=lfs diff=lfs merge=lfs -text
|
39 |
+
src/f5_tts/infer/examples/multi/main.flac filter=lfs diff=lfs merge=lfs -text
|
40 |
+
src/f5_tts/infer/examples/multi/town.flac filter=lfs diff=lfs merge=lfs -text
|
.github/ISSUE_TEMPLATE/bug_report.yml
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "Bug Report"
|
2 |
+
description: |
|
3 |
+
Please provide as much details to help address the issue more efficiently, including input, output, logs and screenshots.
|
4 |
+
labels:
|
5 |
+
- bug
|
6 |
+
body:
|
7 |
+
- type: checkboxes
|
8 |
+
attributes:
|
9 |
+
label: Checks
|
10 |
+
description: "To ensure timely help, please confirm the following:"
|
11 |
+
options:
|
12 |
+
- label: This template is only for bug reports, usage problems go with 'Help Wanted'.
|
13 |
+
required: true
|
14 |
+
- label: I have thoroughly reviewed the project documentation but couldn't find information to solve my problem.
|
15 |
+
required: true
|
16 |
+
- label: I have searched for existing issues, including closed ones, and couldn't find a solution.
|
17 |
+
required: true
|
18 |
+
- label: I am using English to submit this issue to facilitate community communication.
|
19 |
+
required: true
|
20 |
+
- type: textarea
|
21 |
+
attributes:
|
22 |
+
label: Environment Details
|
23 |
+
description: "Provide details including OS, GPU info, Python version, any relevant software or dependencies, and trainer setting."
|
24 |
+
placeholder: e.g., CentOS Linux 7, 4 * RTX 3090, Python 3.10, torch==2.3.0+cu118, cuda 11.8, config yaml is ...
|
25 |
+
validations:
|
26 |
+
required: true
|
27 |
+
- type: textarea
|
28 |
+
attributes:
|
29 |
+
label: Steps to Reproduce
|
30 |
+
description: |
|
31 |
+
Include detailed steps, screenshots, and logs. Use the correct markdown syntax for code blocks.
|
32 |
+
placeholder: |
|
33 |
+
1. Create a new conda environment.
|
34 |
+
2. Clone the repository, install as local editable and properly set up.
|
35 |
+
3. Run the command: `accelerate launch src/f5_tts/train/train.py`.
|
36 |
+
4. Have following error message... (attach logs).
|
37 |
+
validations:
|
38 |
+
required: true
|
39 |
+
- type: textarea
|
40 |
+
attributes:
|
41 |
+
label: ✔️ Expected Behavior
|
42 |
+
placeholder: Describe in detail what you expected to happen.
|
43 |
+
validations:
|
44 |
+
required: false
|
45 |
+
- type: textarea
|
46 |
+
attributes:
|
47 |
+
label: ❌ Actual Behavior
|
48 |
+
placeholder: Describe in detail what actually happened.
|
49 |
+
validations:
|
50 |
+
required: false
|
.github/ISSUE_TEMPLATE/config.yml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
blank_issues_enabled: false
|
.github/ISSUE_TEMPLATE/feature_request.yml
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "Feature Request"
|
2 |
+
description: |
|
3 |
+
Some constructive suggestions and new ideas regarding current repo.
|
4 |
+
labels:
|
5 |
+
- enhancement
|
6 |
+
body:
|
7 |
+
- type: checkboxes
|
8 |
+
attributes:
|
9 |
+
label: Checks
|
10 |
+
description: "To help us grasp quickly, please confirm the following:"
|
11 |
+
options:
|
12 |
+
- label: This template is only for feature request.
|
13 |
+
required: true
|
14 |
+
- label: I have thoroughly reviewed the project documentation but couldn't find any relevant information that meets my needs.
|
15 |
+
required: true
|
16 |
+
- label: I have searched for existing issues, including closed ones, and found not discussion yet.
|
17 |
+
required: true
|
18 |
+
- label: I am using English to submit this issue to facilitate community communication.
|
19 |
+
required: true
|
20 |
+
- type: textarea
|
21 |
+
attributes:
|
22 |
+
label: 1. Is this request related to a challenge you're experiencing? Tell us your story.
|
23 |
+
description: |
|
24 |
+
Describe the specific problem or scenario you're facing in detail. For example:
|
25 |
+
*"I was trying to use [feature] for [specific task], but encountered [issue]. This was frustrating because...."*
|
26 |
+
placeholder: Please describe the situation in as much detail as possible.
|
27 |
+
validations:
|
28 |
+
required: true
|
29 |
+
|
30 |
+
- type: textarea
|
31 |
+
attributes:
|
32 |
+
label: 2. What is your suggested solution?
|
33 |
+
description: |
|
34 |
+
Provide a clear description of the feature or enhancement you'd like to propose.
|
35 |
+
How would this feature solve your issue or improve the project?
|
36 |
+
placeholder: Describe your idea or proposed solution here.
|
37 |
+
validations:
|
38 |
+
required: true
|
39 |
+
|
40 |
+
- type: textarea
|
41 |
+
attributes:
|
42 |
+
label: 3. Additional context or comments
|
43 |
+
description: |
|
44 |
+
Any other relevant information, links, documents, or screenshots that provide clarity.
|
45 |
+
Use this section for anything not covered above.
|
46 |
+
placeholder: Add any extra details here.
|
47 |
+
validations:
|
48 |
+
required: false
|
49 |
+
|
50 |
+
- type: checkboxes
|
51 |
+
attributes:
|
52 |
+
label: 4. Can you help us with this feature?
|
53 |
+
description: |
|
54 |
+
Let us know if you're interested in contributing. This is not a commitment but a way to express interest in collaboration.
|
55 |
+
options:
|
56 |
+
- label: I am interested in contributing to this feature.
|
57 |
+
required: false
|
58 |
+
|
59 |
+
- type: markdown
|
60 |
+
attributes:
|
61 |
+
value: |
|
62 |
+
**Note:** Please submit only one request per issue to keep discussions focused and manageable.
|
.github/ISSUE_TEMPLATE/help_wanted.yml
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "Help Wanted"
|
2 |
+
description: |
|
3 |
+
Please provide as much details to help address the issue more efficiently, including input, output, logs and screenshots.
|
4 |
+
labels:
|
5 |
+
- help wanted
|
6 |
+
body:
|
7 |
+
- type: checkboxes
|
8 |
+
attributes:
|
9 |
+
label: Checks
|
10 |
+
description: "To ensure timely help, please confirm the following:"
|
11 |
+
options:
|
12 |
+
- label: This template is only for usage issues encountered.
|
13 |
+
required: true
|
14 |
+
- label: I have thoroughly reviewed the project documentation but couldn't find information to solve my problem.
|
15 |
+
required: true
|
16 |
+
- label: I have searched for existing issues, including closed ones, and couldn't find a solution.
|
17 |
+
required: true
|
18 |
+
- label: I am using English to submit this issue to facilitate community communication.
|
19 |
+
required: true
|
20 |
+
- type: textarea
|
21 |
+
attributes:
|
22 |
+
label: Environment Details
|
23 |
+
description: "Provide details such as OS, Python version, and any relevant software or dependencies."
|
24 |
+
placeholder: |
|
25 |
+
e.g., macOS 13.5, Python 3.10, torch==2.3.0, Gradio 4.44.1
|
26 |
+
If training or finetuning related, provide detailed configuration including GPU info and training setup.
|
27 |
+
validations:
|
28 |
+
required: true
|
29 |
+
- type: textarea
|
30 |
+
attributes:
|
31 |
+
label: Steps to Reproduce
|
32 |
+
description: |
|
33 |
+
Include detailed steps, screenshots, and logs. Provide used prompt wav and text. Use the correct markdown syntax for code blocks.
|
34 |
+
placeholder: |
|
35 |
+
1. Create a new conda environment.
|
36 |
+
2. Clone the repository and install as pip package.
|
37 |
+
3. Run the command: `f5-tts_infer-gradio` with no ref_text provided.
|
38 |
+
4. Stuck there with the following message... (attach logs and also error msg e.g. after ctrl-c).
|
39 |
+
5. Prompt & generated wavs are [change suffix to .mp4 to enable direct upload or pack all to .zip].
|
40 |
+
6. Reference audio's transcription or provided ref_text is `xxx`, and text to generate is `xxx`.
|
41 |
+
validations:
|
42 |
+
required: true
|
43 |
+
- type: textarea
|
44 |
+
attributes:
|
45 |
+
label: ✔️ Expected Behavior
|
46 |
+
placeholder: Describe what you expected to happen in detail, e.g. output a generated audio.
|
47 |
+
validations:
|
48 |
+
required: false
|
49 |
+
- type: textarea
|
50 |
+
attributes:
|
51 |
+
label: ❌ Actual Behavior
|
52 |
+
placeholder: Describe what actually happened in detail, failure messages, etc.
|
53 |
+
validations:
|
54 |
+
required: false
|
.github/ISSUE_TEMPLATE/question.yml
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "Question"
|
2 |
+
description: |
|
3 |
+
Research question or pure inquiry about the project, usage issue goes with "help wanted".
|
4 |
+
labels:
|
5 |
+
- question
|
6 |
+
body:
|
7 |
+
- type: checkboxes
|
8 |
+
attributes:
|
9 |
+
label: Checks
|
10 |
+
description: "To help us grasp quickly, please confirm the following:"
|
11 |
+
options:
|
12 |
+
- label: This template is only for research question, not usage problems, feature requests or bug reports.
|
13 |
+
required: true
|
14 |
+
- label: I have thoroughly reviewed the project documentation and read the related paper(s).
|
15 |
+
required: true
|
16 |
+
- label: I have searched for existing issues, including closed ones, no similar questions.
|
17 |
+
required: true
|
18 |
+
- label: I am using English to submit this issue to facilitate community communication.
|
19 |
+
required: true
|
20 |
+
- type: textarea
|
21 |
+
attributes:
|
22 |
+
label: Question details
|
23 |
+
description: |
|
24 |
+
Question details, clearly stated using proper markdown syntax.
|
25 |
+
validations:
|
26 |
+
required: true
|
.github/workflows/pre-commit.yaml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: pre-commit
|
2 |
+
|
3 |
+
on:
|
4 |
+
pull_request:
|
5 |
+
push:
|
6 |
+
branches: [main]
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
pre-commit:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
steps:
|
12 |
+
- uses: actions/checkout@v3
|
13 |
+
- uses: actions/setup-python@v3
|
14 |
+
- uses: pre-commit/[email protected]
|
.github/workflows/publish-docker-image.yaml
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Create and publish a Docker image
|
2 |
+
|
3 |
+
# Configures this workflow to run every time a change is pushed to the branch called `release`.
|
4 |
+
on:
|
5 |
+
push:
|
6 |
+
branches: ['main']
|
7 |
+
|
8 |
+
# Defines two custom environment variables for the workflow. These are used for the Container registry domain, and a name for the Docker image that this workflow builds.
|
9 |
+
env:
|
10 |
+
REGISTRY: ghcr.io
|
11 |
+
IMAGE_NAME: ${{ github.repository }}
|
12 |
+
|
13 |
+
# There is a single job in this workflow. It's configured to run on the latest available version of Ubuntu.
|
14 |
+
jobs:
|
15 |
+
build-and-push-image:
|
16 |
+
runs-on: ubuntu-latest
|
17 |
+
# Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job.
|
18 |
+
permissions:
|
19 |
+
contents: read
|
20 |
+
packages: write
|
21 |
+
#
|
22 |
+
steps:
|
23 |
+
- name: Checkout repository
|
24 |
+
uses: actions/checkout@v4
|
25 |
+
- name: Free Up GitHub Actions Ubuntu Runner Disk Space 🔧
|
26 |
+
uses: jlumbroso/free-disk-space@main
|
27 |
+
with:
|
28 |
+
# This might remove tools that are actually needed, if set to "true" but frees about 6 GB
|
29 |
+
tool-cache: false
|
30 |
+
|
31 |
+
# All of these default to true, but feel free to set to "false" if necessary for your workflow
|
32 |
+
android: true
|
33 |
+
dotnet: true
|
34 |
+
haskell: true
|
35 |
+
large-packages: false
|
36 |
+
swap-storage: false
|
37 |
+
docker-images: false
|
38 |
+
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
39 |
+
- name: Log in to the Container registry
|
40 |
+
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
|
41 |
+
with:
|
42 |
+
registry: ${{ env.REGISTRY }}
|
43 |
+
username: ${{ github.actor }}
|
44 |
+
password: ${{ secrets.GITHUB_TOKEN }}
|
45 |
+
# This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
|
46 |
+
- name: Extract metadata (tags, labels) for Docker
|
47 |
+
id: meta
|
48 |
+
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
|
49 |
+
with:
|
50 |
+
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
51 |
+
# This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
|
52 |
+
# It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository.
|
53 |
+
# It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
|
54 |
+
- name: Build and push Docker image
|
55 |
+
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
|
56 |
+
with:
|
57 |
+
context: .
|
58 |
+
push: true
|
59 |
+
tags: ${{ steps.meta.outputs.tags }}
|
60 |
+
labels: ${{ steps.meta.outputs.labels }}
|
.github/workflows/publish-pypi.yaml
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This workflow uses actions that are not certified by GitHub.
|
2 |
+
# They are provided by a third-party and are governed by
|
3 |
+
# separate terms of service, privacy policy, and support
|
4 |
+
# documentation.
|
5 |
+
|
6 |
+
# GitHub recommends pinning actions to a commit SHA.
|
7 |
+
# To get a newer version, you will need to update the SHA.
|
8 |
+
# You can also reference a tag or branch, but the action may change without warning.
|
9 |
+
|
10 |
+
name: Upload Python Package
|
11 |
+
|
12 |
+
on:
|
13 |
+
release:
|
14 |
+
types: [published]
|
15 |
+
|
16 |
+
permissions:
|
17 |
+
contents: read
|
18 |
+
|
19 |
+
jobs:
|
20 |
+
release-build:
|
21 |
+
runs-on: ubuntu-latest
|
22 |
+
|
23 |
+
steps:
|
24 |
+
- uses: actions/checkout@v4
|
25 |
+
|
26 |
+
- uses: actions/setup-python@v5
|
27 |
+
with:
|
28 |
+
python-version: "3.x"
|
29 |
+
|
30 |
+
- name: Build release distributions
|
31 |
+
run: |
|
32 |
+
# NOTE: put your own distribution build steps here.
|
33 |
+
python -m pip install build
|
34 |
+
python -m build
|
35 |
+
|
36 |
+
- name: Upload distributions
|
37 |
+
uses: actions/upload-artifact@v4
|
38 |
+
with:
|
39 |
+
name: release-dists
|
40 |
+
path: dist/
|
41 |
+
|
42 |
+
pypi-publish:
|
43 |
+
runs-on: ubuntu-latest
|
44 |
+
|
45 |
+
needs:
|
46 |
+
- release-build
|
47 |
+
|
48 |
+
permissions:
|
49 |
+
# IMPORTANT: this permission is mandatory for trusted publishing
|
50 |
+
id-token: write
|
51 |
+
|
52 |
+
# Dedicated environments with protections for publishing are strongly recommended.
|
53 |
+
environment:
|
54 |
+
name: pypi
|
55 |
+
# OPTIONAL: uncomment and update to include your PyPI project URL in the deployment status:
|
56 |
+
# url: https://pypi.org/p/YOURPROJECT
|
57 |
+
|
58 |
+
steps:
|
59 |
+
- name: Retrieve release distributions
|
60 |
+
uses: actions/download-artifact@v4
|
61 |
+
with:
|
62 |
+
name: release-dists
|
63 |
+
path: dist/
|
64 |
+
|
65 |
+
- name: Publish release distributions to PyPI
|
66 |
+
uses: pypa/gh-action-pypi-publish@release/v1
|
.github/workflows/sync-hf.yaml
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Sync to HF Space
|
2 |
+
|
3 |
+
on:
|
4 |
+
release:
|
5 |
+
types: [published]
|
6 |
+
|
7 |
+
jobs:
|
8 |
+
trigger_curl:
|
9 |
+
runs-on: ubuntu-latest
|
10 |
+
|
11 |
+
steps:
|
12 |
+
- name: Send cURL POST request
|
13 |
+
run: |
|
14 |
+
curl -X POST https://mrfakename-sync-f5.hf.space/gradio_api/call/refresh \
|
15 |
+
-s \
|
16 |
+
-H "Content-Type: application/json" \
|
17 |
+
-d "{\"data\": [\"${{ secrets.REFRESH_PASSWORD }}\"]}"
|
.gitignore
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Customed
|
2 |
+
.vscode/
|
3 |
+
tests/
|
4 |
+
runs/
|
5 |
+
data/
|
6 |
+
ckpts/
|
7 |
+
wandb/
|
8 |
+
results/
|
9 |
+
|
10 |
+
# Byte-compiled / optimized / DLL files
|
11 |
+
__pycache__/
|
12 |
+
*.py[cod]
|
13 |
+
*$py.class
|
14 |
+
|
15 |
+
# C extensions
|
16 |
+
*.so
|
17 |
+
|
18 |
+
# Distribution / packaging
|
19 |
+
.Python
|
20 |
+
build/
|
21 |
+
develop-eggs/
|
22 |
+
dist/
|
23 |
+
downloads/
|
24 |
+
eggs/
|
25 |
+
.eggs/
|
26 |
+
lib/
|
27 |
+
lib64/
|
28 |
+
parts/
|
29 |
+
sdist/
|
30 |
+
var/
|
31 |
+
wheels/
|
32 |
+
share/python-wheels/
|
33 |
+
*.egg-info/
|
34 |
+
.installed.cfg
|
35 |
+
*.egg
|
36 |
+
MANIFEST
|
37 |
+
|
38 |
+
# PyInstaller
|
39 |
+
# Usually these files are written by a python script from a template
|
40 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
41 |
+
*.manifest
|
42 |
+
*.spec
|
43 |
+
|
44 |
+
# Installer logs
|
45 |
+
pip-log.txt
|
46 |
+
pip-delete-this-directory.txt
|
47 |
+
|
48 |
+
# Unit test / coverage reports
|
49 |
+
htmlcov/
|
50 |
+
.tox/
|
51 |
+
.nox/
|
52 |
+
.coverage
|
53 |
+
.coverage.*
|
54 |
+
.cache
|
55 |
+
nosetests.xml
|
56 |
+
coverage.xml
|
57 |
+
*.cover
|
58 |
+
*.py,cover
|
59 |
+
.hypothesis/
|
60 |
+
.pytest_cache/
|
61 |
+
cover/
|
62 |
+
|
63 |
+
# Translations
|
64 |
+
*.mo
|
65 |
+
*.pot
|
66 |
+
|
67 |
+
# Django stuff:
|
68 |
+
*.log
|
69 |
+
local_settings.py
|
70 |
+
db.sqlite3
|
71 |
+
db.sqlite3-journal
|
72 |
+
|
73 |
+
# Flask stuff:
|
74 |
+
instance/
|
75 |
+
.webassets-cache
|
76 |
+
|
77 |
+
# Scrapy stuff:
|
78 |
+
.scrapy
|
79 |
+
|
80 |
+
# Sphinx documentation
|
81 |
+
docs/_build/
|
82 |
+
|
83 |
+
# PyBuilder
|
84 |
+
.pybuilder/
|
85 |
+
target/
|
86 |
+
|
87 |
+
# Jupyter Notebook
|
88 |
+
.ipynb_checkpoints
|
89 |
+
|
90 |
+
# IPython
|
91 |
+
profile_default/
|
92 |
+
ipython_config.py
|
93 |
+
|
94 |
+
# pyenv
|
95 |
+
# For a library or package, you might want to ignore these files since the code is
|
96 |
+
# intended to run in multiple environments; otherwise, check them in:
|
97 |
+
# .python-version
|
98 |
+
|
99 |
+
# pipenv
|
100 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
101 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
102 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
103 |
+
# install all needed dependencies.
|
104 |
+
#Pipfile.lock
|
105 |
+
|
106 |
+
# poetry
|
107 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
108 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
109 |
+
# commonly ignored for libraries.
|
110 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
111 |
+
#poetry.lock
|
112 |
+
|
113 |
+
# pdm
|
114 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
115 |
+
#pdm.lock
|
116 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
117 |
+
# in version control.
|
118 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
119 |
+
.pdm.toml
|
120 |
+
.pdm-python
|
121 |
+
.pdm-build/
|
122 |
+
|
123 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
124 |
+
__pypackages__/
|
125 |
+
|
126 |
+
# Celery stuff
|
127 |
+
celerybeat-schedule
|
128 |
+
celerybeat.pid
|
129 |
+
|
130 |
+
# SageMath parsed files
|
131 |
+
*.sage.py
|
132 |
+
|
133 |
+
# Environments
|
134 |
+
.env
|
135 |
+
.venv
|
136 |
+
env/
|
137 |
+
venv/
|
138 |
+
ENV/
|
139 |
+
env.bak/
|
140 |
+
venv.bak/
|
141 |
+
|
142 |
+
# Spyder project settings
|
143 |
+
.spyderproject
|
144 |
+
.spyproject
|
145 |
+
|
146 |
+
# Rope project settings
|
147 |
+
.ropeproject
|
148 |
+
|
149 |
+
# mkdocs documentation
|
150 |
+
/site
|
151 |
+
|
152 |
+
# mypy
|
153 |
+
.mypy_cache/
|
154 |
+
.dmypy.json
|
155 |
+
dmypy.json
|
156 |
+
|
157 |
+
# Pyre type checker
|
158 |
+
.pyre/
|
159 |
+
|
160 |
+
# pytype static type analyzer
|
161 |
+
.pytype/
|
162 |
+
|
163 |
+
# Cython debug symbols
|
164 |
+
cython_debug/
|
165 |
+
|
166 |
+
# PyCharm
|
167 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
168 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
169 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
170 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
171 |
+
#.idea/
|
.gitmodules
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[submodule "src/third_party/BigVGAN"]
|
2 |
+
path = src/third_party/BigVGAN
|
3 |
+
url = https://github.com/NVIDIA/BigVGAN.git
|
.gradio/certificate.pem
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN CERTIFICATE-----
|
2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
31 |
+
-----END CERTIFICATE-----
|
.pre-commit-config.yaml
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
repos:
|
2 |
+
- repo: https://github.com/astral-sh/ruff-pre-commit
|
3 |
+
# Ruff version.
|
4 |
+
rev: v0.11.2
|
5 |
+
hooks:
|
6 |
+
- id: ruff
|
7 |
+
name: ruff linter
|
8 |
+
args: [--fix]
|
9 |
+
- id: ruff-format
|
10 |
+
name: ruff formatter
|
11 |
+
- id: ruff
|
12 |
+
name: ruff sorter
|
13 |
+
args: [--select, I, --fix]
|
14 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
15 |
+
rev: v5.0.0
|
16 |
+
hooks:
|
17 |
+
- id: check-yaml
|
Dockerfile
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM pytorch/pytorch:2.4.0-cuda12.4-cudnn9-devel
|
2 |
+
|
3 |
+
USER root
|
4 |
+
|
5 |
+
ARG DEBIAN_FRONTEND=noninteractive
|
6 |
+
|
7 |
+
LABEL github_repo="https://github.com/SWivid/F5-TTS"
|
8 |
+
|
9 |
+
RUN set -x \
|
10 |
+
&& apt-get update \
|
11 |
+
&& apt-get -y install wget curl man git less openssl libssl-dev unzip unar build-essential aria2 tmux vim \
|
12 |
+
&& apt-get install -y openssh-server sox libsox-fmt-all libsox-fmt-mp3 libsndfile1-dev ffmpeg \
|
13 |
+
&& apt-get install -y librdmacm1 libibumad3 librdmacm-dev libibverbs1 libibverbs-dev ibverbs-utils ibverbs-providers \
|
14 |
+
&& rm -rf /var/lib/apt/lists/* \
|
15 |
+
&& apt-get clean
|
16 |
+
|
17 |
+
WORKDIR /workspace
|
18 |
+
|
19 |
+
RUN git clone https://github.com/SWivid/F5-TTS.git \
|
20 |
+
&& cd F5-TTS \
|
21 |
+
&& git submodule update --init --recursive \
|
22 |
+
&& pip install -e . --no-cache-dir
|
23 |
+
|
24 |
+
ENV SHELL=/bin/bash
|
25 |
+
|
26 |
+
VOLUME /root/.cache/huggingface/hub/
|
27 |
+
|
28 |
+
EXPOSE 7860
|
29 |
+
|
30 |
+
WORKDIR /workspace/F5-TTS
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2024 Yushen CHEN
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
CHANGED
@@ -1,12 +1,267 @@
|
|
1 |
---
|
2 |
title: TTS
|
3 |
-
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: red
|
6 |
sdk: gradio
|
7 |
sdk_version: 5.37.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
|
|
11 |
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
title: TTS
|
3 |
+
app_file: src/f5_tts/infer/infer_gradio.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
sdk_version: 5.37.0
|
|
|
|
|
6 |
---
|
7 |
+
# F5-TTS: A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching
|
8 |
|
9 |
+
[](https://github.com/SWivid/F5-TTS)
|
10 |
+
[](https://arxiv.org/abs/2410.06885)
|
11 |
+
[](https://swivid.github.io/F5-TTS/)
|
12 |
+
[](https://huggingface.co/spaces/mrfakename/E2-F5-TTS)
|
13 |
+
[](https://modelscope.cn/studios/modelscope/E2-F5-TTS)
|
14 |
+
[](https://x-lance.sjtu.edu.cn/)
|
15 |
+
[](https://www.pcl.ac.cn)
|
16 |
+
<!-- <img src="https://github.com/user-attachments/assets/12d7749c-071a-427c-81bf-b87b91def670" alt="Watermark" style="width: 40px; height: auto"> -->
|
17 |
+
|
18 |
+
**F5-TTS**: Diffusion Transformer with ConvNeXt V2, faster trained and inference.
|
19 |
+
|
20 |
+
**E2 TTS**: Flat-UNet Transformer, closest reproduction from [paper](https://arxiv.org/abs/2406.18009).
|
21 |
+
|
22 |
+
**Sway Sampling**: Inference-time flow step sampling strategy, greatly improves performance
|
23 |
+
|
24 |
+
### Thanks to all the contributors !
|
25 |
+
|
26 |
+
## News
|
27 |
+
- **2025/03/12**: 🔥 F5-TTS v1 base model with better training and inference performance. [Few demo](https://swivid.github.io/F5-TTS_updates).
|
28 |
+
- **2024/10/08**: F5-TTS & E2 TTS base models on [🤗 Hugging Face](https://huggingface.co/SWivid/F5-TTS), [🤖 Model Scope](https://www.modelscope.cn/models/SWivid/F5-TTS_Emilia-ZH-EN), [🟣 Wisemodel](https://wisemodel.cn/models/SJTU_X-LANCE/F5-TTS_Emilia-ZH-EN).
|
29 |
+
|
30 |
+
## Installation
|
31 |
+
|
32 |
+
### Create a separate environment if needed
|
33 |
+
|
34 |
+
```bash
|
35 |
+
# Create a python 3.10 conda env (you could also use virtualenv)
|
36 |
+
conda create -n f5-tts python=3.10
|
37 |
+
conda activate f5-tts
|
38 |
+
```
|
39 |
+
|
40 |
+
### Install PyTorch with matched device
|
41 |
+
|
42 |
+
<details>
|
43 |
+
<summary>NVIDIA GPU</summary>
|
44 |
+
|
45 |
+
> ```bash
|
46 |
+
> # Install pytorch with your CUDA version, e.g.
|
47 |
+
> pip install torch==2.4.0+cu124 torchaudio==2.4.0+cu124 --extra-index-url https://download.pytorch.org/whl/cu124
|
48 |
+
> ```
|
49 |
+
|
50 |
+
</details>
|
51 |
+
|
52 |
+
<details>
|
53 |
+
<summary>AMD GPU</summary>
|
54 |
+
|
55 |
+
> ```bash
|
56 |
+
> # Install pytorch with your ROCm version (Linux only), e.g.
|
57 |
+
> pip install torch==2.5.1+rocm6.2 torchaudio==2.5.1+rocm6.2 --extra-index-url https://download.pytorch.org/whl/rocm6.2
|
58 |
+
> ```
|
59 |
+
|
60 |
+
</details>
|
61 |
+
|
62 |
+
<details>
|
63 |
+
<summary>Intel GPU</summary>
|
64 |
+
|
65 |
+
> ```bash
|
66 |
+
> # Install pytorch with your XPU version, e.g.
|
67 |
+
> # Intel® Deep Learning Essentials or Intel® oneAPI Base Toolkit must be installed
|
68 |
+
> pip install torch torchaudio --index-url https://download.pytorch.org/whl/test/xpu
|
69 |
+
>
|
70 |
+
> # Intel GPU support is also available through IPEX (Intel® Extension for PyTorch)
|
71 |
+
> # IPEX does not require the Intel® Deep Learning Essentials or Intel® oneAPI Base Toolkit
|
72 |
+
> # See: https://pytorch-extension.intel.com/installation?request=platform
|
73 |
+
> ```
|
74 |
+
|
75 |
+
</details>
|
76 |
+
|
77 |
+
<details>
|
78 |
+
<summary>Apple Silicon</summary>
|
79 |
+
|
80 |
+
> ```bash
|
81 |
+
> # Install the stable pytorch, e.g.
|
82 |
+
> pip install torch torchaudio
|
83 |
+
> ```
|
84 |
+
|
85 |
+
</details>
|
86 |
+
|
87 |
+
### Then you can choose one from below:
|
88 |
+
|
89 |
+
> ### 1. As a pip package (if just for inference)
|
90 |
+
>
|
91 |
+
> ```bash
|
92 |
+
> pip install f5-tts
|
93 |
+
> ```
|
94 |
+
>
|
95 |
+
> ### 2. Local editable (if also do training, finetuning)
|
96 |
+
>
|
97 |
+
> ```bash
|
98 |
+
> git clone https://github.com/SWivid/F5-TTS.git
|
99 |
+
> cd F5-TTS
|
100 |
+
> # git submodule update --init --recursive # (optional, if use bigvgan as vocoder)
|
101 |
+
> pip install -e .
|
102 |
+
> ```
|
103 |
+
|
104 |
+
### Docker usage also available
|
105 |
+
```bash
|
106 |
+
# Build from Dockerfile
|
107 |
+
docker build -t f5tts:v1 .
|
108 |
+
|
109 |
+
# Run from GitHub Container Registry
|
110 |
+
docker container run --rm -it --gpus=all --mount 'type=volume,source=f5-tts,target=/root/.cache/huggingface/hub/' -p 7860:7860 ghcr.io/swivid/f5-tts:main
|
111 |
+
|
112 |
+
# Quickstart if you want to just run the web interface (not CLI)
|
113 |
+
docker container run --rm -it --gpus=all --mount 'type=volume,source=f5-tts,target=/root/.cache/huggingface/hub/' -p 7860:7860 ghcr.io/swivid/f5-tts:main f5-tts_infer-gradio --host 0.0.0.0
|
114 |
+
```
|
115 |
+
|
116 |
+
### Runtime
|
117 |
+
|
118 |
+
Deployment solution with Triton and TensorRT-LLM.
|
119 |
+
|
120 |
+
#### Benchmark Results
|
121 |
+
Decoding on a single L20 GPU, using 26 different prompt_audio & target_text pairs, 16 NFE.
|
122 |
+
|
123 |
+
| Model | Concurrency | Avg Latency | RTF | Mode |
|
124 |
+
|---------------------|----------------|-------------|--------|-----------------|
|
125 |
+
| F5-TTS Base (Vocos) | 2 | 253 ms | 0.0394 | Client-Server |
|
126 |
+
| F5-TTS Base (Vocos) | 1 (Batch_size) | - | 0.0402 | Offline TRT-LLM |
|
127 |
+
| F5-TTS Base (Vocos) | 1 (Batch_size) | - | 0.1467 | Offline Pytorch |
|
128 |
+
|
129 |
+
See [detailed instructions](src/f5_tts/runtime/triton_trtllm/README.md) for more information.
|
130 |
+
|
131 |
+
|
132 |
+
## Inference
|
133 |
+
|
134 |
+
- In order to achieve desired performance, take a moment to read [detailed guidance](src/f5_tts/infer).
|
135 |
+
- By properly searching the keywords of problem encountered, [issues](https://github.com/SWivid/F5-TTS/issues?q=is%3Aissue) are very helpful.
|
136 |
+
|
137 |
+
### 1. Gradio App
|
138 |
+
|
139 |
+
Currently supported features:
|
140 |
+
|
141 |
+
- Basic TTS with Chunk Inference
|
142 |
+
- Multi-Style / Multi-Speaker Generation
|
143 |
+
- Voice Chat powered by Qwen2.5-3B-Instruct
|
144 |
+
- [Custom inference with more language support](src/f5_tts/infer/SHARED.md)
|
145 |
+
|
146 |
+
```bash
|
147 |
+
# Launch a Gradio app (web interface)
|
148 |
+
f5-tts_infer-gradio
|
149 |
+
|
150 |
+
# Specify the port/host
|
151 |
+
f5-tts_infer-gradio --port 7860 --host 0.0.0.0
|
152 |
+
|
153 |
+
# Launch a share link
|
154 |
+
f5-tts_infer-gradio --share
|
155 |
+
```
|
156 |
+
|
157 |
+
<details>
|
158 |
+
<summary>NVIDIA device docker compose file example</summary>
|
159 |
+
|
160 |
+
```yaml
|
161 |
+
services:
|
162 |
+
f5-tts:
|
163 |
+
image: ghcr.io/swivid/f5-tts:main
|
164 |
+
ports:
|
165 |
+
- "7860:7860"
|
166 |
+
environment:
|
167 |
+
GRADIO_SERVER_PORT: 7860
|
168 |
+
entrypoint: ["f5-tts_infer-gradio", "--port", "7860", "--host", "0.0.0.0"]
|
169 |
+
deploy:
|
170 |
+
resources:
|
171 |
+
reservations:
|
172 |
+
devices:
|
173 |
+
- driver: nvidia
|
174 |
+
count: 1
|
175 |
+
capabilities: [gpu]
|
176 |
+
|
177 |
+
volumes:
|
178 |
+
f5-tts:
|
179 |
+
driver: local
|
180 |
+
```
|
181 |
+
|
182 |
+
</details>
|
183 |
+
|
184 |
+
### 2. CLI Inference
|
185 |
+
|
186 |
+
```bash
|
187 |
+
# Run with flags
|
188 |
+
# Leave --ref_text "" will have ASR model transcribe (extra GPU memory usage)
|
189 |
+
f5-tts_infer-cli --model F5TTS_v1_Base \
|
190 |
+
--ref_audio "provide_prompt_wav_path_here.wav" \
|
191 |
+
--ref_text "The content, subtitle or transcription of reference audio." \
|
192 |
+
--gen_text "Some text you want TTS model generate for you."
|
193 |
+
|
194 |
+
# Run with default setting. src/f5_tts/infer/examples/basic/basic.toml
|
195 |
+
f5-tts_infer-cli
|
196 |
+
# Or with your own .toml file
|
197 |
+
f5-tts_infer-cli -c custom.toml
|
198 |
+
|
199 |
+
# Multi voice. See src/f5_tts/infer/README.md
|
200 |
+
f5-tts_infer-cli -c src/f5_tts/infer/examples/multi/story.toml
|
201 |
+
```
|
202 |
+
|
203 |
+
|
204 |
+
## Training
|
205 |
+
|
206 |
+
### 1. With Hugging Face Accelerate
|
207 |
+
|
208 |
+
Refer to [training & finetuning guidance](src/f5_tts/train) for best practice.
|
209 |
+
|
210 |
+
### 2. With Gradio App
|
211 |
+
|
212 |
+
```bash
|
213 |
+
# Quick start with Gradio web interface
|
214 |
+
f5-tts_finetune-gradio
|
215 |
+
```
|
216 |
+
|
217 |
+
Read [training & finetuning guidance](src/f5_tts/train) for more instructions.
|
218 |
+
|
219 |
+
|
220 |
+
## [Evaluation](src/f5_tts/eval)
|
221 |
+
|
222 |
+
|
223 |
+
## Development
|
224 |
+
|
225 |
+
Use pre-commit to ensure code quality (will run linters and formatters automatically):
|
226 |
+
|
227 |
+
```bash
|
228 |
+
pip install pre-commit
|
229 |
+
pre-commit install
|
230 |
+
```
|
231 |
+
|
232 |
+
When making a pull request, before each commit, run:
|
233 |
+
|
234 |
+
```bash
|
235 |
+
pre-commit run --all-files
|
236 |
+
```
|
237 |
+
|
238 |
+
Note: Some model components have linting exceptions for E722 to accommodate tensor notation.
|
239 |
+
|
240 |
+
|
241 |
+
## Acknowledgements
|
242 |
+
|
243 |
+
- [E2-TTS](https://arxiv.org/abs/2406.18009) brilliant work, simple and effective
|
244 |
+
- [Emilia](https://arxiv.org/abs/2407.05361), [WenetSpeech4TTS](https://arxiv.org/abs/2406.05763), [LibriTTS](https://arxiv.org/abs/1904.02882), [LJSpeech](https://keithito.com/LJ-Speech-Dataset/) valuable datasets
|
245 |
+
- [lucidrains](https://github.com/lucidrains) initial CFM structure with also [bfs18](https://github.com/bfs18) for discussion
|
246 |
+
- [SD3](https://arxiv.org/abs/2403.03206) & [Hugging Face diffusers](https://github.com/huggingface/diffusers) DiT and MMDiT code structure
|
247 |
+
- [torchdiffeq](https://github.com/rtqichen/torchdiffeq) as ODE solver, [Vocos](https://huggingface.co/charactr/vocos-mel-24khz) and [BigVGAN](https://github.com/NVIDIA/BigVGAN) as vocoder
|
248 |
+
- [FunASR](https://github.com/modelscope/FunASR), [faster-whisper](https://github.com/SYSTRAN/faster-whisper), [UniSpeech](https://github.com/microsoft/UniSpeech), [SpeechMOS](https://github.com/tarepan/SpeechMOS) for evaluation tools
|
249 |
+
- [ctc-forced-aligner](https://github.com/MahmoudAshraf97/ctc-forced-aligner) for speech edit test
|
250 |
+
- [mrfakename](https://x.com/realmrfakename) huggingface space demo ~
|
251 |
+
- [f5-tts-mlx](https://github.com/lucasnewman/f5-tts-mlx/tree/main) Implementation with MLX framework by [Lucas Newman](https://github.com/lucasnewman)
|
252 |
+
- [F5-TTS-ONNX](https://github.com/DakeQQ/F5-TTS-ONNX) ONNX Runtime version by [DakeQQ](https://github.com/DakeQQ)
|
253 |
+
- [Yuekai Zhang](https://github.com/yuekaizhang) Triton and TensorRT-LLM support ~
|
254 |
+
|
255 |
+
## Citation
|
256 |
+
If our work and codebase is useful for you, please cite as:
|
257 |
+
```
|
258 |
+
@article{chen-etal-2024-f5tts,
|
259 |
+
title={F5-TTS: A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching},
|
260 |
+
author={Yushen Chen and Zhikang Niu and Ziyang Ma and Keqi Deng and Chunhui Wang and Jian Zhao and Kai Yu and Xie Chen},
|
261 |
+
journal={arXiv preprint arXiv:2410.06885},
|
262 |
+
year={2024},
|
263 |
+
}
|
264 |
+
```
|
265 |
+
## License
|
266 |
+
|
267 |
+
Our code is released under MIT License. The pre-trained models are licensed under the CC-BY-NC license due to the training data Emilia, which is an in-the-wild dataset. Sorry for any inconvenience this may cause.
|
pyproject.toml
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[build-system]
|
2 |
+
requires = ["setuptools >= 61.0", "setuptools-scm>=8.0"]
|
3 |
+
build-backend = "setuptools.build_meta"
|
4 |
+
|
5 |
+
[project]
|
6 |
+
name = "f5-tts"
|
7 |
+
version = "1.1.7"
|
8 |
+
description = "F5-TTS: A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching"
|
9 |
+
readme = "README.md"
|
10 |
+
license = {text = "MIT License"}
|
11 |
+
classifiers = [
|
12 |
+
"License :: OSI Approved :: MIT License",
|
13 |
+
"Operating System :: OS Independent",
|
14 |
+
"Programming Language :: Python :: 3",
|
15 |
+
]
|
16 |
+
dependencies = [
|
17 |
+
"accelerate>=0.33.0",
|
18 |
+
"bitsandbytes>0.37.0; platform_machine != 'arm64' and platform_system != 'Darwin'",
|
19 |
+
"cached_path",
|
20 |
+
"click",
|
21 |
+
"datasets",
|
22 |
+
"ema_pytorch>=0.5.2",
|
23 |
+
"gradio>=3.45.2",
|
24 |
+
"hydra-core>=1.3.0",
|
25 |
+
"jieba",
|
26 |
+
"librosa",
|
27 |
+
"matplotlib",
|
28 |
+
"numpy<=1.26.4",
|
29 |
+
"pydantic<=2.10.6",
|
30 |
+
"pydub",
|
31 |
+
"pypinyin",
|
32 |
+
"safetensors",
|
33 |
+
"soundfile",
|
34 |
+
"tomli",
|
35 |
+
"torch>=2.0.0",
|
36 |
+
"torchaudio>=2.0.0",
|
37 |
+
"torchdiffeq",
|
38 |
+
"tqdm>=4.65.0",
|
39 |
+
"transformers",
|
40 |
+
"transformers_stream_generator",
|
41 |
+
"unidecode",
|
42 |
+
"vocos",
|
43 |
+
"wandb",
|
44 |
+
"x_transformers>=1.31.14",
|
45 |
+
]
|
46 |
+
|
47 |
+
[project.optional-dependencies]
|
48 |
+
eval = [
|
49 |
+
"faster_whisper==0.10.1",
|
50 |
+
"funasr",
|
51 |
+
"jiwer",
|
52 |
+
"modelscope",
|
53 |
+
"zhconv",
|
54 |
+
"zhon",
|
55 |
+
]
|
56 |
+
|
57 |
+
[project.urls]
|
58 |
+
Homepage = "https://github.com/SWivid/F5-TTS"
|
59 |
+
|
60 |
+
[project.scripts]
|
61 |
+
"f5-tts_infer-cli" = "f5_tts.infer.infer_cli:main"
|
62 |
+
"f5-tts_infer-gradio" = "f5_tts.infer.infer_gradio:main"
|
63 |
+
"f5-tts_finetune-cli" = "f5_tts.train.finetune_cli:main"
|
64 |
+
"f5-tts_finetune-gradio" = "f5_tts.train.finetune_gradio:main"
|
ruff.toml
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
line-length = 120
|
2 |
+
target-version = "py310"
|
3 |
+
|
4 |
+
[lint]
|
5 |
+
# Only ignore variables with names starting with "_".
|
6 |
+
dummy-variable-rgx = "^_.*$"
|
7 |
+
|
8 |
+
[lint.isort]
|
9 |
+
force-single-line = false
|
10 |
+
lines-after-imports = 2
|
src/f5_tts/api.py
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import sys
|
3 |
+
from importlib.resources import files
|
4 |
+
|
5 |
+
import soundfile as sf
|
6 |
+
import tqdm
|
7 |
+
from cached_path import cached_path
|
8 |
+
from hydra.utils import get_class
|
9 |
+
from omegaconf import OmegaConf
|
10 |
+
|
11 |
+
from f5_tts.infer.utils_infer import (
|
12 |
+
infer_process,
|
13 |
+
load_model,
|
14 |
+
load_vocoder,
|
15 |
+
preprocess_ref_audio_text,
|
16 |
+
remove_silence_for_generated_wav,
|
17 |
+
save_spectrogram,
|
18 |
+
transcribe,
|
19 |
+
)
|
20 |
+
from f5_tts.model.utils import seed_everything
|
21 |
+
|
22 |
+
|
23 |
+
class F5TTS:
|
24 |
+
def __init__(
|
25 |
+
self,
|
26 |
+
model="F5TTS_v1_Base",
|
27 |
+
ckpt_file="",
|
28 |
+
vocab_file="",
|
29 |
+
ode_method="euler",
|
30 |
+
use_ema=True,
|
31 |
+
vocoder_local_path=None,
|
32 |
+
device=None,
|
33 |
+
hf_cache_dir=None,
|
34 |
+
):
|
35 |
+
model_cfg = OmegaConf.load(str(files("f5_tts").joinpath(f"configs/{model}.yaml")))
|
36 |
+
model_cls = get_class(f"f5_tts.model.{model_cfg.model.backbone}")
|
37 |
+
model_arc = model_cfg.model.arch
|
38 |
+
|
39 |
+
self.mel_spec_type = model_cfg.model.mel_spec.mel_spec_type
|
40 |
+
self.target_sample_rate = model_cfg.model.mel_spec.target_sample_rate
|
41 |
+
|
42 |
+
self.ode_method = ode_method
|
43 |
+
self.use_ema = use_ema
|
44 |
+
|
45 |
+
if device is not None:
|
46 |
+
self.device = device
|
47 |
+
else:
|
48 |
+
import torch
|
49 |
+
|
50 |
+
self.device = (
|
51 |
+
"cuda"
|
52 |
+
if torch.cuda.is_available()
|
53 |
+
else "xpu"
|
54 |
+
if torch.xpu.is_available()
|
55 |
+
else "mps"
|
56 |
+
if torch.backends.mps.is_available()
|
57 |
+
else "cpu"
|
58 |
+
)
|
59 |
+
|
60 |
+
# Load models
|
61 |
+
self.vocoder = load_vocoder(
|
62 |
+
self.mel_spec_type, vocoder_local_path is not None, vocoder_local_path, self.device, hf_cache_dir
|
63 |
+
)
|
64 |
+
|
65 |
+
repo_name, ckpt_step, ckpt_type = "F5-TTS", 1250000, "safetensors"
|
66 |
+
|
67 |
+
# override for previous models
|
68 |
+
if model == "F5TTS_Base":
|
69 |
+
if self.mel_spec_type == "vocos":
|
70 |
+
ckpt_step = 1200000
|
71 |
+
elif self.mel_spec_type == "bigvgan":
|
72 |
+
model = "F5TTS_Base_bigvgan"
|
73 |
+
ckpt_type = "pt"
|
74 |
+
elif model == "E2TTS_Base":
|
75 |
+
repo_name = "E2-TTS"
|
76 |
+
ckpt_step = 1200000
|
77 |
+
|
78 |
+
if not ckpt_file:
|
79 |
+
ckpt_file = str(
|
80 |
+
cached_path(f"hf://SWivid/{repo_name}/{model}/model_{ckpt_step}.{ckpt_type}", cache_dir=hf_cache_dir)
|
81 |
+
)
|
82 |
+
self.ema_model = load_model(
|
83 |
+
model_cls, model_arc, ckpt_file, self.mel_spec_type, vocab_file, self.ode_method, self.use_ema, self.device
|
84 |
+
)
|
85 |
+
|
86 |
+
def transcribe(self, ref_audio, language=None):
|
87 |
+
return transcribe(ref_audio, language)
|
88 |
+
|
89 |
+
def export_wav(self, wav, file_wave, remove_silence=False):
|
90 |
+
sf.write(file_wave, wav, self.target_sample_rate)
|
91 |
+
|
92 |
+
if remove_silence:
|
93 |
+
remove_silence_for_generated_wav(file_wave)
|
94 |
+
|
95 |
+
def export_spectrogram(self, spec, file_spec):
|
96 |
+
save_spectrogram(spec, file_spec)
|
97 |
+
|
98 |
+
def infer(
|
99 |
+
self,
|
100 |
+
ref_file,
|
101 |
+
ref_text,
|
102 |
+
gen_text,
|
103 |
+
show_info=print,
|
104 |
+
progress=tqdm,
|
105 |
+
target_rms=0.1,
|
106 |
+
cross_fade_duration=0.15,
|
107 |
+
sway_sampling_coef=-1,
|
108 |
+
cfg_strength=2,
|
109 |
+
nfe_step=32,
|
110 |
+
speed=1.0,
|
111 |
+
fix_duration=None,
|
112 |
+
remove_silence=False,
|
113 |
+
file_wave=None,
|
114 |
+
file_spec=None,
|
115 |
+
seed=None,
|
116 |
+
):
|
117 |
+
if seed is None:
|
118 |
+
seed = random.randint(0, sys.maxsize)
|
119 |
+
seed_everything(seed)
|
120 |
+
self.seed = seed
|
121 |
+
|
122 |
+
ref_file, ref_text = preprocess_ref_audio_text(ref_file, ref_text)
|
123 |
+
|
124 |
+
wav, sr, spec = infer_process(
|
125 |
+
ref_file,
|
126 |
+
ref_text,
|
127 |
+
gen_text,
|
128 |
+
self.ema_model,
|
129 |
+
self.vocoder,
|
130 |
+
self.mel_spec_type,
|
131 |
+
show_info=show_info,
|
132 |
+
progress=progress,
|
133 |
+
target_rms=target_rms,
|
134 |
+
cross_fade_duration=cross_fade_duration,
|
135 |
+
nfe_step=nfe_step,
|
136 |
+
cfg_strength=cfg_strength,
|
137 |
+
sway_sampling_coef=sway_sampling_coef,
|
138 |
+
speed=speed,
|
139 |
+
fix_duration=fix_duration,
|
140 |
+
device=self.device,
|
141 |
+
)
|
142 |
+
|
143 |
+
if file_wave is not None:
|
144 |
+
self.export_wav(wav, file_wave, remove_silence)
|
145 |
+
|
146 |
+
if file_spec is not None:
|
147 |
+
self.export_spectrogram(spec, file_spec)
|
148 |
+
|
149 |
+
return wav, sr, spec
|
150 |
+
|
151 |
+
|
152 |
+
if __name__ == "__main__":
|
153 |
+
f5tts = F5TTS()
|
154 |
+
|
155 |
+
wav, sr, spec = f5tts.infer(
|
156 |
+
ref_file=str(files("f5_tts").joinpath("infer/examples/basic/basic_ref_en.wav")),
|
157 |
+
ref_text="some call me nature, others call me mother nature.",
|
158 |
+
gen_text="""I don't really care what you call me. I've been a silent spectator, watching species evolve, empires rise and fall. But always remember, I am mighty and enduring. Respect me and I'll nurture you; ignore me and you shall face the consequences.""",
|
159 |
+
file_wave=str(files("f5_tts").joinpath("../../tests/api_out.wav")),
|
160 |
+
file_spec=str(files("f5_tts").joinpath("../../tests/api_out.png")),
|
161 |
+
seed=None,
|
162 |
+
)
|
163 |
+
|
164 |
+
print("seed :", f5tts.seed)
|
src/f5_tts/configs/E2TTS_Base.yaml
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
4 |
+
|
5 |
+
datasets:
|
6 |
+
name: Emilia_ZH_EN # dataset name
|
7 |
+
batch_size_per_gpu: 38400 # 8 GPUs, 8 * 38400 = 307200
|
8 |
+
batch_size_type: frame # frame | sample
|
9 |
+
max_samples: 64 # max sequences per batch if use frame-wise batch_size. we set 32 for small models, 64 for base models
|
10 |
+
num_workers: 16
|
11 |
+
|
12 |
+
optim:
|
13 |
+
epochs: 11
|
14 |
+
learning_rate: 7.5e-5
|
15 |
+
num_warmup_updates: 20000 # warmup updates
|
16 |
+
grad_accumulation_steps: 1 # note: updates = steps / grad_accumulation_steps
|
17 |
+
max_grad_norm: 1.0 # gradient clipping
|
18 |
+
bnb_optimizer: False # use bnb 8bit AdamW optimizer or not
|
19 |
+
|
20 |
+
model:
|
21 |
+
name: E2TTS_Base
|
22 |
+
tokenizer: pinyin
|
23 |
+
tokenizer_path: null # if 'custom' tokenizer, define the path want to use (should be vocab.txt)
|
24 |
+
backbone: UNetT
|
25 |
+
arch:
|
26 |
+
dim: 1024
|
27 |
+
depth: 24
|
28 |
+
heads: 16
|
29 |
+
ff_mult: 4
|
30 |
+
text_mask_padding: False
|
31 |
+
pe_attn_head: 1
|
32 |
+
mel_spec:
|
33 |
+
target_sample_rate: 24000
|
34 |
+
n_mel_channels: 100
|
35 |
+
hop_length: 256
|
36 |
+
win_length: 1024
|
37 |
+
n_fft: 1024
|
38 |
+
mel_spec_type: vocos # vocos | bigvgan
|
39 |
+
vocoder:
|
40 |
+
is_local: False # use local offline ckpt or not
|
41 |
+
local_path: null # local vocoder path
|
42 |
+
|
43 |
+
ckpts:
|
44 |
+
logger: wandb # wandb | tensorboard | null
|
45 |
+
log_samples: True # infer random sample per save checkpoint. wip, normal to fail with extra long samples
|
46 |
+
save_per_updates: 50000 # save checkpoint per updates
|
47 |
+
keep_last_n_checkpoints: -1 # -1 to keep all, 0 to not save intermediate, > 0 to keep last N checkpoints
|
48 |
+
last_per_updates: 5000 # save last checkpoint per updates
|
49 |
+
save_dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}
|
src/f5_tts/configs/E2TTS_Small.yaml
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
4 |
+
|
5 |
+
datasets:
|
6 |
+
name: Emilia_ZH_EN
|
7 |
+
batch_size_per_gpu: 38400 # 8 GPUs, 8 * 38400 = 307200
|
8 |
+
batch_size_type: frame # frame | sample
|
9 |
+
max_samples: 64 # max sequences per batch if use frame-wise batch_size. we set 32 for small models, 64 for base models
|
10 |
+
num_workers: 16
|
11 |
+
|
12 |
+
optim:
|
13 |
+
epochs: 11
|
14 |
+
learning_rate: 7.5e-5
|
15 |
+
num_warmup_updates: 20000 # warmup updates
|
16 |
+
grad_accumulation_steps: 1 # note: updates = steps / grad_accumulation_steps
|
17 |
+
max_grad_norm: 1.0
|
18 |
+
bnb_optimizer: False
|
19 |
+
|
20 |
+
model:
|
21 |
+
name: E2TTS_Small
|
22 |
+
tokenizer: pinyin
|
23 |
+
tokenizer_path: null # if 'custom' tokenizer, define the path want to use (should be vocab.txt)
|
24 |
+
backbone: UNetT
|
25 |
+
arch:
|
26 |
+
dim: 768
|
27 |
+
depth: 20
|
28 |
+
heads: 12
|
29 |
+
ff_mult: 4
|
30 |
+
text_mask_padding: False
|
31 |
+
pe_attn_head: 1
|
32 |
+
mel_spec:
|
33 |
+
target_sample_rate: 24000
|
34 |
+
n_mel_channels: 100
|
35 |
+
hop_length: 256
|
36 |
+
win_length: 1024
|
37 |
+
n_fft: 1024
|
38 |
+
mel_spec_type: vocos # vocos | bigvgan
|
39 |
+
vocoder:
|
40 |
+
is_local: False # use local offline ckpt or not
|
41 |
+
local_path: null # local vocoder path
|
42 |
+
|
43 |
+
ckpts:
|
44 |
+
logger: wandb # wandb | tensorboard | null
|
45 |
+
log_samples: True # infer random sample per save checkpoint. wip, normal to fail with extra long samples
|
46 |
+
save_per_updates: 50000 # save checkpoint per updates
|
47 |
+
keep_last_n_checkpoints: -1 # -1 to keep all, 0 to not save intermediate, > 0 to keep last N checkpoints
|
48 |
+
last_per_updates: 5000 # save last checkpoint per updates
|
49 |
+
save_dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}
|
src/f5_tts/configs/F5TTS_Base.yaml
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
4 |
+
|
5 |
+
datasets:
|
6 |
+
name: Emilia_ZH_EN # dataset name
|
7 |
+
batch_size_per_gpu: 38400 # 8 GPUs, 8 * 38400 = 307200
|
8 |
+
batch_size_type: frame # frame | sample
|
9 |
+
max_samples: 64 # max sequences per batch if use frame-wise batch_size. we set 32 for small models, 64 for base models
|
10 |
+
num_workers: 16
|
11 |
+
|
12 |
+
optim:
|
13 |
+
epochs: 11
|
14 |
+
learning_rate: 7.5e-5
|
15 |
+
num_warmup_updates: 20000 # warmup updates
|
16 |
+
grad_accumulation_steps: 1 # note: updates = steps / grad_accumulation_steps
|
17 |
+
max_grad_norm: 1.0 # gradient clipping
|
18 |
+
bnb_optimizer: False # use bnb 8bit AdamW optimizer or not
|
19 |
+
|
20 |
+
model:
|
21 |
+
name: F5TTS_Base # model name
|
22 |
+
tokenizer: pinyin # tokenizer type
|
23 |
+
tokenizer_path: null # if 'custom' tokenizer, define the path want to use (should be vocab.txt)
|
24 |
+
backbone: DiT
|
25 |
+
arch:
|
26 |
+
dim: 1024
|
27 |
+
depth: 22
|
28 |
+
heads: 16
|
29 |
+
ff_mult: 2
|
30 |
+
text_dim: 512
|
31 |
+
text_mask_padding: False
|
32 |
+
conv_layers: 4
|
33 |
+
pe_attn_head: 1
|
34 |
+
attn_backend: torch # torch | flash_attn
|
35 |
+
attn_mask_enabled: False
|
36 |
+
checkpoint_activations: False # recompute activations and save memory for extra compute
|
37 |
+
mel_spec:
|
38 |
+
target_sample_rate: 24000
|
39 |
+
n_mel_channels: 100
|
40 |
+
hop_length: 256
|
41 |
+
win_length: 1024
|
42 |
+
n_fft: 1024
|
43 |
+
mel_spec_type: vocos # vocos | bigvgan
|
44 |
+
vocoder:
|
45 |
+
is_local: False # use local offline ckpt or not
|
46 |
+
local_path: null # local vocoder path
|
47 |
+
|
48 |
+
ckpts:
|
49 |
+
logger: wandb # wandb | tensorboard | null
|
50 |
+
log_samples: True # infer random sample per save checkpoint. wip, normal to fail with extra long samples
|
51 |
+
save_per_updates: 50000 # save checkpoint per updates
|
52 |
+
keep_last_n_checkpoints: -1 # -1 to keep all, 0 to not save intermediate, > 0 to keep last N checkpoints
|
53 |
+
last_per_updates: 5000 # save last checkpoint per updates
|
54 |
+
save_dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}
|
src/f5_tts/configs/F5TTS_Small.yaml
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
4 |
+
|
5 |
+
datasets:
|
6 |
+
name: Emilia_ZH_EN
|
7 |
+
batch_size_per_gpu: 38400 # 8 GPUs, 8 * 38400 = 307200
|
8 |
+
batch_size_type: frame # frame | sample
|
9 |
+
max_samples: 64 # max sequences per batch if use frame-wise batch_size. we set 32 for small models, 64 for base models
|
10 |
+
num_workers: 16
|
11 |
+
|
12 |
+
optim:
|
13 |
+
epochs: 11 # only suitable for Emilia, if you want to train it on LibriTTS, set epoch 686
|
14 |
+
learning_rate: 7.5e-5
|
15 |
+
num_warmup_updates: 20000 # warmup updates
|
16 |
+
grad_accumulation_steps: 1 # note: updates = steps / grad_accumulation_steps
|
17 |
+
max_grad_norm: 1.0 # gradient clipping
|
18 |
+
bnb_optimizer: False # use bnb 8bit AdamW optimizer or not
|
19 |
+
|
20 |
+
model:
|
21 |
+
name: F5TTS_Small
|
22 |
+
tokenizer: pinyin
|
23 |
+
tokenizer_path: null # if 'custom' tokenizer, define the path want to use (should be vocab.txt)
|
24 |
+
backbone: DiT
|
25 |
+
arch:
|
26 |
+
dim: 768
|
27 |
+
depth: 18
|
28 |
+
heads: 12
|
29 |
+
ff_mult: 2
|
30 |
+
text_dim: 512
|
31 |
+
text_mask_padding: False
|
32 |
+
conv_layers: 4
|
33 |
+
pe_attn_head: 1
|
34 |
+
attn_backend: torch # torch | flash_attn
|
35 |
+
attn_mask_enabled: False
|
36 |
+
checkpoint_activations: False # recompute activations and save memory for extra compute
|
37 |
+
mel_spec:
|
38 |
+
target_sample_rate: 24000
|
39 |
+
n_mel_channels: 100
|
40 |
+
hop_length: 256
|
41 |
+
win_length: 1024
|
42 |
+
n_fft: 1024
|
43 |
+
mel_spec_type: vocos # vocos | bigvgan
|
44 |
+
vocoder:
|
45 |
+
is_local: False # use local offline ckpt or not
|
46 |
+
local_path: null # local vocoder path
|
47 |
+
|
48 |
+
ckpts:
|
49 |
+
logger: wandb # wandb | tensorboard | null
|
50 |
+
log_samples: True # infer random sample per save checkpoint. wip, normal to fail with extra long samples
|
51 |
+
save_per_updates: 50000 # save checkpoint per updates
|
52 |
+
keep_last_n_checkpoints: -1 # -1 to keep all, 0 to not save intermediate, > 0 to keep last N checkpoints
|
53 |
+
last_per_updates: 5000 # save last checkpoint per updates
|
54 |
+
save_dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}
|
src/f5_tts/configs/F5TTS_v1_Base.yaml
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
4 |
+
|
5 |
+
datasets:
|
6 |
+
name: Emilia_ZH_EN # dataset name
|
7 |
+
batch_size_per_gpu: 38400 # 8 GPUs, 8 * 38400 = 307200
|
8 |
+
batch_size_type: frame # frame | sample
|
9 |
+
max_samples: 64 # max sequences per batch if use frame-wise batch_size. we set 32 for small models, 64 for base models
|
10 |
+
num_workers: 16
|
11 |
+
|
12 |
+
optim:
|
13 |
+
epochs: 11
|
14 |
+
learning_rate: 7.5e-5
|
15 |
+
num_warmup_updates: 20000 # warmup updates
|
16 |
+
grad_accumulation_steps: 1 # note: updates = steps / grad_accumulation_steps
|
17 |
+
max_grad_norm: 1.0 # gradient clipping
|
18 |
+
bnb_optimizer: False # use bnb 8bit AdamW optimizer or not
|
19 |
+
|
20 |
+
model:
|
21 |
+
name: F5TTS_v1_Base # model name
|
22 |
+
tokenizer: pinyin # tokenizer type
|
23 |
+
tokenizer_path: null # if 'custom' tokenizer, define the path want to use (should be vocab.txt)
|
24 |
+
backbone: DiT
|
25 |
+
arch:
|
26 |
+
dim: 1024
|
27 |
+
depth: 22
|
28 |
+
heads: 16
|
29 |
+
ff_mult: 2
|
30 |
+
text_dim: 512
|
31 |
+
text_mask_padding: True
|
32 |
+
qk_norm: null # null | rms_norm
|
33 |
+
conv_layers: 4
|
34 |
+
pe_attn_head: null
|
35 |
+
attn_backend: torch # torch | flash_attn
|
36 |
+
attn_mask_enabled: False
|
37 |
+
checkpoint_activations: False # recompute activations and save memory for extra compute
|
38 |
+
mel_spec:
|
39 |
+
target_sample_rate: 24000
|
40 |
+
n_mel_channels: 100
|
41 |
+
hop_length: 256
|
42 |
+
win_length: 1024
|
43 |
+
n_fft: 1024
|
44 |
+
mel_spec_type: vocos # vocos | bigvgan
|
45 |
+
vocoder:
|
46 |
+
is_local: False # use local offline ckpt or not
|
47 |
+
local_path: null # local vocoder path
|
48 |
+
|
49 |
+
ckpts:
|
50 |
+
logger: wandb # wandb | tensorboard | null
|
51 |
+
log_samples: True # infer random sample per save checkpoint. wip, normal to fail with extra long samples
|
52 |
+
save_per_updates: 50000 # save checkpoint per updates
|
53 |
+
keep_last_n_checkpoints: -1 # -1 to keep all, 0 to not save intermediate, > 0 to keep last N checkpoints
|
54 |
+
last_per_updates: 5000 # save last checkpoint per updates
|
55 |
+
save_dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}
|
src/f5_tts/eval/README.md
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# Evaluation
|
3 |
+
|
4 |
+
Install packages for evaluation:
|
5 |
+
|
6 |
+
```bash
|
7 |
+
pip install -e .[eval]
|
8 |
+
```
|
9 |
+
|
10 |
+
## Generating Samples for Evaluation
|
11 |
+
|
12 |
+
### Prepare Test Datasets
|
13 |
+
|
14 |
+
1. *Seed-TTS testset*: Download from [seed-tts-eval](https://github.com/BytedanceSpeech/seed-tts-eval).
|
15 |
+
2. *LibriSpeech test-clean*: Download from [OpenSLR](http://www.openslr.org/12/).
|
16 |
+
3. Unzip the downloaded datasets and place them in the `data/` directory.
|
17 |
+
4. Update the path for *LibriSpeech test-clean* data in `src/f5_tts/eval/eval_infer_batch.py`
|
18 |
+
5. Our filtered LibriSpeech-PC 4-10s subset: `data/librispeech_pc_test_clean_cross_sentence.lst`
|
19 |
+
|
20 |
+
### Batch Inference for Test Set
|
21 |
+
|
22 |
+
To run batch inference for evaluations, execute the following commands:
|
23 |
+
|
24 |
+
```bash
|
25 |
+
# batch inference for evaluations
|
26 |
+
accelerate config # if not set before
|
27 |
+
bash src/f5_tts/eval/eval_infer_batch.sh
|
28 |
+
```
|
29 |
+
|
30 |
+
## Objective Evaluation on Generated Results
|
31 |
+
|
32 |
+
### Download Evaluation Model Checkpoints
|
33 |
+
|
34 |
+
1. Chinese ASR Model: [Paraformer-zh](https://huggingface.co/funasr/paraformer-zh)
|
35 |
+
2. English ASR Model: [Faster-Whisper](https://huggingface.co/Systran/faster-whisper-large-v3)
|
36 |
+
3. WavLM Model: Download from [Google Drive](https://drive.google.com/file/d/1-aE1NfzpRCLxA4GUxX9ITI3F9LlbtEGP/view).
|
37 |
+
|
38 |
+
Then update in the following scripts with the paths you put evaluation model ckpts to.
|
39 |
+
|
40 |
+
### Objective Evaluation
|
41 |
+
|
42 |
+
Update the path with your batch-inferenced results, and carry out WER / SIM / UTMOS evaluations:
|
43 |
+
```bash
|
44 |
+
# Evaluation [WER] for Seed-TTS test [ZH] set
|
45 |
+
python src/f5_tts/eval/eval_seedtts_testset.py --eval_task wer --lang zh --gen_wav_dir <GEN_WAV_DIR> --gpu_nums 8
|
46 |
+
|
47 |
+
# Evaluation [SIM] for LibriSpeech-PC test-clean (cross-sentence)
|
48 |
+
python src/f5_tts/eval/eval_librispeech_test_clean.py --eval_task sim --gen_wav_dir <GEN_WAV_DIR> --librispeech_test_clean_path <TEST_CLEAN_PATH>
|
49 |
+
|
50 |
+
# Evaluation [UTMOS]. --ext: Audio extension
|
51 |
+
python src/f5_tts/eval/eval_utmos.py --audio_dir <WAV_DIR> --ext wav
|
52 |
+
```
|
src/f5_tts/eval/ecapa_tdnn.py
ADDED
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# just for speaker similarity evaluation, third-party code
|
2 |
+
|
3 |
+
# From https://github.com/microsoft/UniSpeech/blob/main/downstreams/speaker_verification/models/
|
4 |
+
# part of the code is borrowed from https://github.com/lawlict/ECAPA-TDNN
|
5 |
+
|
6 |
+
import os
|
7 |
+
|
8 |
+
import torch
|
9 |
+
import torch.nn as nn
|
10 |
+
import torch.nn.functional as F
|
11 |
+
|
12 |
+
|
13 |
+
""" Res2Conv1d + BatchNorm1d + ReLU
|
14 |
+
"""
|
15 |
+
|
16 |
+
|
17 |
+
class Res2Conv1dReluBn(nn.Module):
|
18 |
+
"""
|
19 |
+
in_channels == out_channels == channels
|
20 |
+
"""
|
21 |
+
|
22 |
+
def __init__(self, channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True, scale=4):
|
23 |
+
super().__init__()
|
24 |
+
assert channels % scale == 0, "{} % {} != 0".format(channels, scale)
|
25 |
+
self.scale = scale
|
26 |
+
self.width = channels // scale
|
27 |
+
self.nums = scale if scale == 1 else scale - 1
|
28 |
+
|
29 |
+
self.convs = []
|
30 |
+
self.bns = []
|
31 |
+
for i in range(self.nums):
|
32 |
+
self.convs.append(nn.Conv1d(self.width, self.width, kernel_size, stride, padding, dilation, bias=bias))
|
33 |
+
self.bns.append(nn.BatchNorm1d(self.width))
|
34 |
+
self.convs = nn.ModuleList(self.convs)
|
35 |
+
self.bns = nn.ModuleList(self.bns)
|
36 |
+
|
37 |
+
def forward(self, x):
|
38 |
+
out = []
|
39 |
+
spx = torch.split(x, self.width, 1)
|
40 |
+
for i in range(self.nums):
|
41 |
+
if i == 0:
|
42 |
+
sp = spx[i]
|
43 |
+
else:
|
44 |
+
sp = sp + spx[i]
|
45 |
+
# Order: conv -> relu -> bn
|
46 |
+
sp = self.convs[i](sp)
|
47 |
+
sp = self.bns[i](F.relu(sp))
|
48 |
+
out.append(sp)
|
49 |
+
if self.scale != 1:
|
50 |
+
out.append(spx[self.nums])
|
51 |
+
out = torch.cat(out, dim=1)
|
52 |
+
|
53 |
+
return out
|
54 |
+
|
55 |
+
|
56 |
+
""" Conv1d + BatchNorm1d + ReLU
|
57 |
+
"""
|
58 |
+
|
59 |
+
|
60 |
+
class Conv1dReluBn(nn.Module):
|
61 |
+
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True):
|
62 |
+
super().__init__()
|
63 |
+
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias)
|
64 |
+
self.bn = nn.BatchNorm1d(out_channels)
|
65 |
+
|
66 |
+
def forward(self, x):
|
67 |
+
return self.bn(F.relu(self.conv(x)))
|
68 |
+
|
69 |
+
|
70 |
+
""" The SE connection of 1D case.
|
71 |
+
"""
|
72 |
+
|
73 |
+
|
74 |
+
class SE_Connect(nn.Module):
|
75 |
+
def __init__(self, channels, se_bottleneck_dim=128):
|
76 |
+
super().__init__()
|
77 |
+
self.linear1 = nn.Linear(channels, se_bottleneck_dim)
|
78 |
+
self.linear2 = nn.Linear(se_bottleneck_dim, channels)
|
79 |
+
|
80 |
+
def forward(self, x):
|
81 |
+
out = x.mean(dim=2)
|
82 |
+
out = F.relu(self.linear1(out))
|
83 |
+
out = torch.sigmoid(self.linear2(out))
|
84 |
+
out = x * out.unsqueeze(2)
|
85 |
+
|
86 |
+
return out
|
87 |
+
|
88 |
+
|
89 |
+
""" SE-Res2Block of the ECAPA-TDNN architecture.
|
90 |
+
"""
|
91 |
+
|
92 |
+
# def SE_Res2Block(channels, kernel_size, stride, padding, dilation, scale):
|
93 |
+
# return nn.Sequential(
|
94 |
+
# Conv1dReluBn(channels, 512, kernel_size=1, stride=1, padding=0),
|
95 |
+
# Res2Conv1dReluBn(512, kernel_size, stride, padding, dilation, scale=scale),
|
96 |
+
# Conv1dReluBn(512, channels, kernel_size=1, stride=1, padding=0),
|
97 |
+
# SE_Connect(channels)
|
98 |
+
# )
|
99 |
+
|
100 |
+
|
101 |
+
class SE_Res2Block(nn.Module):
|
102 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, scale, se_bottleneck_dim):
|
103 |
+
super().__init__()
|
104 |
+
self.Conv1dReluBn1 = Conv1dReluBn(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
|
105 |
+
self.Res2Conv1dReluBn = Res2Conv1dReluBn(out_channels, kernel_size, stride, padding, dilation, scale=scale)
|
106 |
+
self.Conv1dReluBn2 = Conv1dReluBn(out_channels, out_channels, kernel_size=1, stride=1, padding=0)
|
107 |
+
self.SE_Connect = SE_Connect(out_channels, se_bottleneck_dim)
|
108 |
+
|
109 |
+
self.shortcut = None
|
110 |
+
if in_channels != out_channels:
|
111 |
+
self.shortcut = nn.Conv1d(
|
112 |
+
in_channels=in_channels,
|
113 |
+
out_channels=out_channels,
|
114 |
+
kernel_size=1,
|
115 |
+
)
|
116 |
+
|
117 |
+
def forward(self, x):
|
118 |
+
residual = x
|
119 |
+
if self.shortcut:
|
120 |
+
residual = self.shortcut(x)
|
121 |
+
|
122 |
+
x = self.Conv1dReluBn1(x)
|
123 |
+
x = self.Res2Conv1dReluBn(x)
|
124 |
+
x = self.Conv1dReluBn2(x)
|
125 |
+
x = self.SE_Connect(x)
|
126 |
+
|
127 |
+
return x + residual
|
128 |
+
|
129 |
+
|
130 |
+
""" Attentive weighted mean and standard deviation pooling.
|
131 |
+
"""
|
132 |
+
|
133 |
+
|
134 |
+
class AttentiveStatsPool(nn.Module):
|
135 |
+
def __init__(self, in_dim, attention_channels=128, global_context_att=False):
|
136 |
+
super().__init__()
|
137 |
+
self.global_context_att = global_context_att
|
138 |
+
|
139 |
+
# Use Conv1d with stride == 1 rather than Linear, then we don't need to transpose inputs.
|
140 |
+
if global_context_att:
|
141 |
+
self.linear1 = nn.Conv1d(in_dim * 3, attention_channels, kernel_size=1) # equals W and b in the paper
|
142 |
+
else:
|
143 |
+
self.linear1 = nn.Conv1d(in_dim, attention_channels, kernel_size=1) # equals W and b in the paper
|
144 |
+
self.linear2 = nn.Conv1d(attention_channels, in_dim, kernel_size=1) # equals V and k in the paper
|
145 |
+
|
146 |
+
def forward(self, x):
|
147 |
+
if self.global_context_att:
|
148 |
+
context_mean = torch.mean(x, dim=-1, keepdim=True).expand_as(x)
|
149 |
+
context_std = torch.sqrt(torch.var(x, dim=-1, keepdim=True) + 1e-10).expand_as(x)
|
150 |
+
x_in = torch.cat((x, context_mean, context_std), dim=1)
|
151 |
+
else:
|
152 |
+
x_in = x
|
153 |
+
|
154 |
+
# DON'T use ReLU here! In experiments, I find ReLU hard to converge.
|
155 |
+
alpha = torch.tanh(self.linear1(x_in))
|
156 |
+
# alpha = F.relu(self.linear1(x_in))
|
157 |
+
alpha = torch.softmax(self.linear2(alpha), dim=2)
|
158 |
+
mean = torch.sum(alpha * x, dim=2)
|
159 |
+
residuals = torch.sum(alpha * (x**2), dim=2) - mean**2
|
160 |
+
std = torch.sqrt(residuals.clamp(min=1e-9))
|
161 |
+
return torch.cat([mean, std], dim=1)
|
162 |
+
|
163 |
+
|
164 |
+
class ECAPA_TDNN(nn.Module):
|
165 |
+
def __init__(
|
166 |
+
self,
|
167 |
+
feat_dim=80,
|
168 |
+
channels=512,
|
169 |
+
emb_dim=192,
|
170 |
+
global_context_att=False,
|
171 |
+
feat_type="wavlm_large",
|
172 |
+
sr=16000,
|
173 |
+
feature_selection="hidden_states",
|
174 |
+
update_extract=False,
|
175 |
+
config_path=None,
|
176 |
+
):
|
177 |
+
super().__init__()
|
178 |
+
|
179 |
+
self.feat_type = feat_type
|
180 |
+
self.feature_selection = feature_selection
|
181 |
+
self.update_extract = update_extract
|
182 |
+
self.sr = sr
|
183 |
+
|
184 |
+
torch.hub._validate_not_a_forked_repo = lambda a, b, c: True
|
185 |
+
try:
|
186 |
+
local_s3prl_path = os.path.expanduser("~/.cache/torch/hub/s3prl_s3prl_main")
|
187 |
+
self.feature_extract = torch.hub.load(local_s3prl_path, feat_type, source="local", config_path=config_path)
|
188 |
+
except: # noqa: E722
|
189 |
+
self.feature_extract = torch.hub.load("s3prl/s3prl", feat_type)
|
190 |
+
|
191 |
+
if len(self.feature_extract.model.encoder.layers) == 24 and hasattr(
|
192 |
+
self.feature_extract.model.encoder.layers[23].self_attn, "fp32_attention"
|
193 |
+
):
|
194 |
+
self.feature_extract.model.encoder.layers[23].self_attn.fp32_attention = False
|
195 |
+
if len(self.feature_extract.model.encoder.layers) == 24 and hasattr(
|
196 |
+
self.feature_extract.model.encoder.layers[11].self_attn, "fp32_attention"
|
197 |
+
):
|
198 |
+
self.feature_extract.model.encoder.layers[11].self_attn.fp32_attention = False
|
199 |
+
|
200 |
+
self.feat_num = self.get_feat_num()
|
201 |
+
self.feature_weight = nn.Parameter(torch.zeros(self.feat_num))
|
202 |
+
|
203 |
+
if feat_type != "fbank" and feat_type != "mfcc":
|
204 |
+
freeze_list = ["final_proj", "label_embs_concat", "mask_emb", "project_q", "quantizer"]
|
205 |
+
for name, param in self.feature_extract.named_parameters():
|
206 |
+
for freeze_val in freeze_list:
|
207 |
+
if freeze_val in name:
|
208 |
+
param.requires_grad = False
|
209 |
+
break
|
210 |
+
|
211 |
+
if not self.update_extract:
|
212 |
+
for param in self.feature_extract.parameters():
|
213 |
+
param.requires_grad = False
|
214 |
+
|
215 |
+
self.instance_norm = nn.InstanceNorm1d(feat_dim)
|
216 |
+
# self.channels = [channels] * 4 + [channels * 3]
|
217 |
+
self.channels = [channels] * 4 + [1536]
|
218 |
+
|
219 |
+
self.layer1 = Conv1dReluBn(feat_dim, self.channels[0], kernel_size=5, padding=2)
|
220 |
+
self.layer2 = SE_Res2Block(
|
221 |
+
self.channels[0],
|
222 |
+
self.channels[1],
|
223 |
+
kernel_size=3,
|
224 |
+
stride=1,
|
225 |
+
padding=2,
|
226 |
+
dilation=2,
|
227 |
+
scale=8,
|
228 |
+
se_bottleneck_dim=128,
|
229 |
+
)
|
230 |
+
self.layer3 = SE_Res2Block(
|
231 |
+
self.channels[1],
|
232 |
+
self.channels[2],
|
233 |
+
kernel_size=3,
|
234 |
+
stride=1,
|
235 |
+
padding=3,
|
236 |
+
dilation=3,
|
237 |
+
scale=8,
|
238 |
+
se_bottleneck_dim=128,
|
239 |
+
)
|
240 |
+
self.layer4 = SE_Res2Block(
|
241 |
+
self.channels[2],
|
242 |
+
self.channels[3],
|
243 |
+
kernel_size=3,
|
244 |
+
stride=1,
|
245 |
+
padding=4,
|
246 |
+
dilation=4,
|
247 |
+
scale=8,
|
248 |
+
se_bottleneck_dim=128,
|
249 |
+
)
|
250 |
+
|
251 |
+
# self.conv = nn.Conv1d(self.channels[-1], self.channels[-1], kernel_size=1)
|
252 |
+
cat_channels = channels * 3
|
253 |
+
self.conv = nn.Conv1d(cat_channels, self.channels[-1], kernel_size=1)
|
254 |
+
self.pooling = AttentiveStatsPool(
|
255 |
+
self.channels[-1], attention_channels=128, global_context_att=global_context_att
|
256 |
+
)
|
257 |
+
self.bn = nn.BatchNorm1d(self.channels[-1] * 2)
|
258 |
+
self.linear = nn.Linear(self.channels[-1] * 2, emb_dim)
|
259 |
+
|
260 |
+
def get_feat_num(self):
|
261 |
+
self.feature_extract.eval()
|
262 |
+
wav = [torch.randn(self.sr).to(next(self.feature_extract.parameters()).device)]
|
263 |
+
with torch.no_grad():
|
264 |
+
features = self.feature_extract(wav)
|
265 |
+
select_feature = features[self.feature_selection]
|
266 |
+
if isinstance(select_feature, (list, tuple)):
|
267 |
+
return len(select_feature)
|
268 |
+
else:
|
269 |
+
return 1
|
270 |
+
|
271 |
+
def get_feat(self, x):
|
272 |
+
if self.update_extract:
|
273 |
+
x = self.feature_extract([sample for sample in x])
|
274 |
+
else:
|
275 |
+
with torch.no_grad():
|
276 |
+
if self.feat_type == "fbank" or self.feat_type == "mfcc":
|
277 |
+
x = self.feature_extract(x) + 1e-6 # B x feat_dim x time_len
|
278 |
+
else:
|
279 |
+
x = self.feature_extract([sample for sample in x])
|
280 |
+
|
281 |
+
if self.feat_type == "fbank":
|
282 |
+
x = x.log()
|
283 |
+
|
284 |
+
if self.feat_type != "fbank" and self.feat_type != "mfcc":
|
285 |
+
x = x[self.feature_selection]
|
286 |
+
if isinstance(x, (list, tuple)):
|
287 |
+
x = torch.stack(x, dim=0)
|
288 |
+
else:
|
289 |
+
x = x.unsqueeze(0)
|
290 |
+
norm_weights = F.softmax(self.feature_weight, dim=-1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
|
291 |
+
x = (norm_weights * x).sum(dim=0)
|
292 |
+
x = torch.transpose(x, 1, 2) + 1e-6
|
293 |
+
|
294 |
+
x = self.instance_norm(x)
|
295 |
+
return x
|
296 |
+
|
297 |
+
def forward(self, x):
|
298 |
+
x = self.get_feat(x)
|
299 |
+
|
300 |
+
out1 = self.layer1(x)
|
301 |
+
out2 = self.layer2(out1)
|
302 |
+
out3 = self.layer3(out2)
|
303 |
+
out4 = self.layer4(out3)
|
304 |
+
|
305 |
+
out = torch.cat([out2, out3, out4], dim=1)
|
306 |
+
out = F.relu(self.conv(out))
|
307 |
+
out = self.bn(self.pooling(out))
|
308 |
+
out = self.linear(out)
|
309 |
+
|
310 |
+
return out
|
311 |
+
|
312 |
+
|
313 |
+
def ECAPA_TDNN_SMALL(
|
314 |
+
feat_dim,
|
315 |
+
emb_dim=256,
|
316 |
+
feat_type="wavlm_large",
|
317 |
+
sr=16000,
|
318 |
+
feature_selection="hidden_states",
|
319 |
+
update_extract=False,
|
320 |
+
config_path=None,
|
321 |
+
):
|
322 |
+
return ECAPA_TDNN(
|
323 |
+
feat_dim=feat_dim,
|
324 |
+
channels=512,
|
325 |
+
emb_dim=emb_dim,
|
326 |
+
feat_type=feat_type,
|
327 |
+
sr=sr,
|
328 |
+
feature_selection=feature_selection,
|
329 |
+
update_extract=update_extract,
|
330 |
+
config_path=config_path,
|
331 |
+
)
|
src/f5_tts/eval/eval_infer_batch.py
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
|
4 |
+
|
5 |
+
sys.path.append(os.getcwd())
|
6 |
+
|
7 |
+
import argparse
|
8 |
+
import time
|
9 |
+
from importlib.resources import files
|
10 |
+
|
11 |
+
import torch
|
12 |
+
import torchaudio
|
13 |
+
from accelerate import Accelerator
|
14 |
+
from hydra.utils import get_class
|
15 |
+
from omegaconf import OmegaConf
|
16 |
+
from tqdm import tqdm
|
17 |
+
|
18 |
+
from f5_tts.eval.utils_eval import (
|
19 |
+
get_inference_prompt,
|
20 |
+
get_librispeech_test_clean_metainfo,
|
21 |
+
get_seedtts_testset_metainfo,
|
22 |
+
)
|
23 |
+
from f5_tts.infer.utils_infer import load_checkpoint, load_vocoder
|
24 |
+
from f5_tts.model import CFM
|
25 |
+
from f5_tts.model.utils import get_tokenizer
|
26 |
+
|
27 |
+
|
28 |
+
accelerator = Accelerator()
|
29 |
+
device = f"cuda:{accelerator.process_index}"
|
30 |
+
|
31 |
+
|
32 |
+
use_ema = True
|
33 |
+
target_rms = 0.1
|
34 |
+
|
35 |
+
|
36 |
+
rel_path = str(files("f5_tts").joinpath("../../"))
|
37 |
+
|
38 |
+
|
39 |
+
def main():
|
40 |
+
parser = argparse.ArgumentParser(description="batch inference")
|
41 |
+
|
42 |
+
parser.add_argument("-s", "--seed", default=None, type=int)
|
43 |
+
parser.add_argument("-n", "--expname", required=True)
|
44 |
+
parser.add_argument("-c", "--ckptstep", default=1250000, type=int)
|
45 |
+
|
46 |
+
parser.add_argument("-nfe", "--nfestep", default=32, type=int)
|
47 |
+
parser.add_argument("-o", "--odemethod", default="euler")
|
48 |
+
parser.add_argument("-ss", "--swaysampling", default=-1, type=float)
|
49 |
+
|
50 |
+
parser.add_argument("-t", "--testset", required=True)
|
51 |
+
|
52 |
+
args = parser.parse_args()
|
53 |
+
|
54 |
+
seed = args.seed
|
55 |
+
exp_name = args.expname
|
56 |
+
ckpt_step = args.ckptstep
|
57 |
+
|
58 |
+
nfe_step = args.nfestep
|
59 |
+
ode_method = args.odemethod
|
60 |
+
sway_sampling_coef = args.swaysampling
|
61 |
+
|
62 |
+
testset = args.testset
|
63 |
+
|
64 |
+
infer_batch_size = 1 # max frames. 1 for ddp single inference (recommended)
|
65 |
+
cfg_strength = 2.0
|
66 |
+
speed = 1.0
|
67 |
+
use_truth_duration = False
|
68 |
+
no_ref_audio = False
|
69 |
+
|
70 |
+
model_cfg = OmegaConf.load(str(files("f5_tts").joinpath(f"configs/{exp_name}.yaml")))
|
71 |
+
model_cls = get_class(f"f5_tts.model.{model_cfg.model.backbone}")
|
72 |
+
model_arc = model_cfg.model.arch
|
73 |
+
|
74 |
+
dataset_name = model_cfg.datasets.name
|
75 |
+
tokenizer = model_cfg.model.tokenizer
|
76 |
+
|
77 |
+
mel_spec_type = model_cfg.model.mel_spec.mel_spec_type
|
78 |
+
target_sample_rate = model_cfg.model.mel_spec.target_sample_rate
|
79 |
+
n_mel_channels = model_cfg.model.mel_spec.n_mel_channels
|
80 |
+
hop_length = model_cfg.model.mel_spec.hop_length
|
81 |
+
win_length = model_cfg.model.mel_spec.win_length
|
82 |
+
n_fft = model_cfg.model.mel_spec.n_fft
|
83 |
+
|
84 |
+
if testset == "ls_pc_test_clean":
|
85 |
+
metalst = rel_path + "/data/librispeech_pc_test_clean_cross_sentence.lst"
|
86 |
+
librispeech_test_clean_path = "<SOME_PATH>/LibriSpeech/test-clean" # test-clean path
|
87 |
+
metainfo = get_librispeech_test_clean_metainfo(metalst, librispeech_test_clean_path)
|
88 |
+
|
89 |
+
elif testset == "seedtts_test_zh":
|
90 |
+
metalst = rel_path + "/data/seedtts_testset/zh/meta.lst"
|
91 |
+
metainfo = get_seedtts_testset_metainfo(metalst)
|
92 |
+
|
93 |
+
elif testset == "seedtts_test_en":
|
94 |
+
metalst = rel_path + "/data/seedtts_testset/en/meta.lst"
|
95 |
+
metainfo = get_seedtts_testset_metainfo(metalst)
|
96 |
+
|
97 |
+
# path to save genereted wavs
|
98 |
+
output_dir = (
|
99 |
+
f"{rel_path}/"
|
100 |
+
f"results/{exp_name}_{ckpt_step}/{testset}/"
|
101 |
+
f"seed{seed}_{ode_method}_nfe{nfe_step}_{mel_spec_type}"
|
102 |
+
f"{f'_ss{sway_sampling_coef}' if sway_sampling_coef else ''}"
|
103 |
+
f"_cfg{cfg_strength}_speed{speed}"
|
104 |
+
f"{'_gt-dur' if use_truth_duration else ''}"
|
105 |
+
f"{'_no-ref-audio' if no_ref_audio else ''}"
|
106 |
+
)
|
107 |
+
|
108 |
+
# -------------------------------------------------#
|
109 |
+
|
110 |
+
prompts_all = get_inference_prompt(
|
111 |
+
metainfo,
|
112 |
+
speed=speed,
|
113 |
+
tokenizer=tokenizer,
|
114 |
+
target_sample_rate=target_sample_rate,
|
115 |
+
n_mel_channels=n_mel_channels,
|
116 |
+
hop_length=hop_length,
|
117 |
+
mel_spec_type=mel_spec_type,
|
118 |
+
target_rms=target_rms,
|
119 |
+
use_truth_duration=use_truth_duration,
|
120 |
+
infer_batch_size=infer_batch_size,
|
121 |
+
)
|
122 |
+
|
123 |
+
# Vocoder model
|
124 |
+
local = False
|
125 |
+
if mel_spec_type == "vocos":
|
126 |
+
vocoder_local_path = "../checkpoints/charactr/vocos-mel-24khz"
|
127 |
+
elif mel_spec_type == "bigvgan":
|
128 |
+
vocoder_local_path = "../checkpoints/bigvgan_v2_24khz_100band_256x"
|
129 |
+
vocoder = load_vocoder(vocoder_name=mel_spec_type, is_local=local, local_path=vocoder_local_path)
|
130 |
+
|
131 |
+
# Tokenizer
|
132 |
+
vocab_char_map, vocab_size = get_tokenizer(dataset_name, tokenizer)
|
133 |
+
|
134 |
+
# Model
|
135 |
+
model = CFM(
|
136 |
+
transformer=model_cls(**model_arc, text_num_embeds=vocab_size, mel_dim=n_mel_channels),
|
137 |
+
mel_spec_kwargs=dict(
|
138 |
+
n_fft=n_fft,
|
139 |
+
hop_length=hop_length,
|
140 |
+
win_length=win_length,
|
141 |
+
n_mel_channels=n_mel_channels,
|
142 |
+
target_sample_rate=target_sample_rate,
|
143 |
+
mel_spec_type=mel_spec_type,
|
144 |
+
),
|
145 |
+
odeint_kwargs=dict(
|
146 |
+
method=ode_method,
|
147 |
+
),
|
148 |
+
vocab_char_map=vocab_char_map,
|
149 |
+
).to(device)
|
150 |
+
|
151 |
+
ckpt_prefix = rel_path + f"/ckpts/{exp_name}/model_{ckpt_step}"
|
152 |
+
if os.path.exists(ckpt_prefix + ".pt"):
|
153 |
+
ckpt_path = ckpt_prefix + ".pt"
|
154 |
+
elif os.path.exists(ckpt_prefix + ".safetensors"):
|
155 |
+
ckpt_path = ckpt_prefix + ".safetensors"
|
156 |
+
else:
|
157 |
+
print("Loading from self-organized training checkpoints rather than released pretrained.")
|
158 |
+
ckpt_path = rel_path + f"/{model_cfg.ckpts.save_dir}/model_{ckpt_step}.pt"
|
159 |
+
|
160 |
+
dtype = torch.float32 if mel_spec_type == "bigvgan" else None
|
161 |
+
model = load_checkpoint(model, ckpt_path, device, dtype=dtype, use_ema=use_ema)
|
162 |
+
|
163 |
+
if not os.path.exists(output_dir) and accelerator.is_main_process:
|
164 |
+
os.makedirs(output_dir)
|
165 |
+
|
166 |
+
# start batch inference
|
167 |
+
accelerator.wait_for_everyone()
|
168 |
+
start = time.time()
|
169 |
+
|
170 |
+
with accelerator.split_between_processes(prompts_all) as prompts:
|
171 |
+
for prompt in tqdm(prompts, disable=not accelerator.is_local_main_process):
|
172 |
+
utts, ref_rms_list, ref_mels, ref_mel_lens, total_mel_lens, final_text_list = prompt
|
173 |
+
ref_mels = ref_mels.to(device)
|
174 |
+
ref_mel_lens = torch.tensor(ref_mel_lens, dtype=torch.long).to(device)
|
175 |
+
total_mel_lens = torch.tensor(total_mel_lens, dtype=torch.long).to(device)
|
176 |
+
|
177 |
+
# Inference
|
178 |
+
with torch.inference_mode():
|
179 |
+
generated, _ = model.sample(
|
180 |
+
cond=ref_mels,
|
181 |
+
text=final_text_list,
|
182 |
+
duration=total_mel_lens,
|
183 |
+
lens=ref_mel_lens,
|
184 |
+
steps=nfe_step,
|
185 |
+
cfg_strength=cfg_strength,
|
186 |
+
sway_sampling_coef=sway_sampling_coef,
|
187 |
+
no_ref_audio=no_ref_audio,
|
188 |
+
seed=seed,
|
189 |
+
)
|
190 |
+
# Final result
|
191 |
+
for i, gen in enumerate(generated):
|
192 |
+
gen = gen[ref_mel_lens[i] : total_mel_lens[i], :].unsqueeze(0)
|
193 |
+
gen_mel_spec = gen.permute(0, 2, 1).to(torch.float32)
|
194 |
+
if mel_spec_type == "vocos":
|
195 |
+
generated_wave = vocoder.decode(gen_mel_spec).cpu()
|
196 |
+
elif mel_spec_type == "bigvgan":
|
197 |
+
generated_wave = vocoder(gen_mel_spec).squeeze(0).cpu()
|
198 |
+
|
199 |
+
if ref_rms_list[i] < target_rms:
|
200 |
+
generated_wave = generated_wave * ref_rms_list[i] / target_rms
|
201 |
+
torchaudio.save(f"{output_dir}/{utts[i]}.wav", generated_wave, target_sample_rate)
|
202 |
+
|
203 |
+
accelerator.wait_for_everyone()
|
204 |
+
if accelerator.is_main_process:
|
205 |
+
timediff = time.time() - start
|
206 |
+
print(f"Done batch inference in {timediff / 60:.2f} minutes.")
|
207 |
+
|
208 |
+
|
209 |
+
if __name__ == "__main__":
|
210 |
+
main()
|
src/f5_tts/eval/eval_infer_batch.sh
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# e.g. F5-TTS, 16 NFE
|
4 |
+
accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "F5TTS_v1_Base" -t "seedtts_test_zh" -nfe 16
|
5 |
+
accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "F5TTS_v1_Base" -t "seedtts_test_en" -nfe 16
|
6 |
+
accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "F5TTS_v1_Base" -t "ls_pc_test_clean" -nfe 16
|
7 |
+
|
8 |
+
# e.g. Vanilla E2 TTS, 32 NFE
|
9 |
+
accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "E2TTS_Base" -c 1200000 -t "seedtts_test_zh" -o "midpoint" -ss 0
|
10 |
+
accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "E2TTS_Base" -c 1200000 -t "seedtts_test_en" -o "midpoint" -ss 0
|
11 |
+
accelerate launch src/f5_tts/eval/eval_infer_batch.py -s 0 -n "E2TTS_Base" -c 1200000 -t "ls_pc_test_clean" -o "midpoint" -ss 0
|
12 |
+
|
13 |
+
# e.g. evaluate F5-TTS 16 NFE result on Seed-TTS test-zh
|
14 |
+
python src/f5_tts/eval/eval_seedtts_testset.py -e wer -l zh --gen_wav_dir results/F5TTS_v1_Base_1250000/seedtts_test_zh/seed0_euler_nfe32_vocos_ss-1_cfg2.0_speed1.0 --gpu_nums 8
|
15 |
+
python src/f5_tts/eval/eval_seedtts_testset.py -e sim -l zh --gen_wav_dir results/F5TTS_v1_Base_1250000/seedtts_test_zh/seed0_euler_nfe32_vocos_ss-1_cfg2.0_speed1.0 --gpu_nums 8
|
16 |
+
python src/f5_tts/eval/eval_utmos.py --audio_dir results/F5TTS_v1_Base_1250000/seedtts_test_zh/seed0_euler_nfe32_vocos_ss-1_cfg2.0_speed1.0
|
17 |
+
|
18 |
+
# etc.
|
src/f5_tts/eval/eval_librispeech_test_clean.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Evaluate with Librispeech test-clean, ~3s prompt to generate 4-10s audio (the way of valle/voicebox evaluation)
|
2 |
+
|
3 |
+
import argparse
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
import sys
|
7 |
+
|
8 |
+
|
9 |
+
sys.path.append(os.getcwd())
|
10 |
+
|
11 |
+
import multiprocessing as mp
|
12 |
+
from importlib.resources import files
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
from f5_tts.eval.utils_eval import get_librispeech_test, run_asr_wer, run_sim
|
17 |
+
|
18 |
+
|
19 |
+
rel_path = str(files("f5_tts").joinpath("../../"))
|
20 |
+
|
21 |
+
|
22 |
+
def get_args():
|
23 |
+
parser = argparse.ArgumentParser()
|
24 |
+
parser.add_argument("-e", "--eval_task", type=str, default="wer", choices=["sim", "wer"])
|
25 |
+
parser.add_argument("-l", "--lang", type=str, default="en")
|
26 |
+
parser.add_argument("-g", "--gen_wav_dir", type=str, required=True)
|
27 |
+
parser.add_argument("-p", "--librispeech_test_clean_path", type=str, required=True)
|
28 |
+
parser.add_argument("-n", "--gpu_nums", type=int, default=8, help="Number of GPUs to use")
|
29 |
+
parser.add_argument("--local", action="store_true", help="Use local custom checkpoint directory")
|
30 |
+
return parser.parse_args()
|
31 |
+
|
32 |
+
|
33 |
+
def main():
|
34 |
+
args = get_args()
|
35 |
+
eval_task = args.eval_task
|
36 |
+
lang = args.lang
|
37 |
+
librispeech_test_clean_path = args.librispeech_test_clean_path # test-clean path
|
38 |
+
gen_wav_dir = args.gen_wav_dir
|
39 |
+
metalst = rel_path + "/data/librispeech_pc_test_clean_cross_sentence.lst"
|
40 |
+
|
41 |
+
gpus = list(range(args.gpu_nums))
|
42 |
+
test_set = get_librispeech_test(metalst, gen_wav_dir, gpus, librispeech_test_clean_path)
|
43 |
+
|
44 |
+
## In LibriSpeech, some speakers utilized varying voice characteristics for different characters in the book,
|
45 |
+
## leading to a low similarity for the ground truth in some cases.
|
46 |
+
# test_set = get_librispeech_test(metalst, gen_wav_dir, gpus, librispeech_test_clean_path, eval_ground_truth = True) # eval ground truth
|
47 |
+
|
48 |
+
local = args.local
|
49 |
+
if local: # use local custom checkpoint dir
|
50 |
+
asr_ckpt_dir = "../checkpoints/Systran/faster-whisper-large-v3"
|
51 |
+
else:
|
52 |
+
asr_ckpt_dir = "" # auto download to cache dir
|
53 |
+
wavlm_ckpt_dir = "../checkpoints/UniSpeech/wavlm_large_finetune.pth"
|
54 |
+
|
55 |
+
# --------------------------------------------------------------------------
|
56 |
+
|
57 |
+
full_results = []
|
58 |
+
metrics = []
|
59 |
+
|
60 |
+
if eval_task == "wer":
|
61 |
+
with mp.Pool(processes=len(gpus)) as pool:
|
62 |
+
args = [(rank, lang, sub_test_set, asr_ckpt_dir) for (rank, sub_test_set) in test_set]
|
63 |
+
results = pool.map(run_asr_wer, args)
|
64 |
+
for r in results:
|
65 |
+
full_results.extend(r)
|
66 |
+
elif eval_task == "sim":
|
67 |
+
with mp.Pool(processes=len(gpus)) as pool:
|
68 |
+
args = [(rank, sub_test_set, wavlm_ckpt_dir) for (rank, sub_test_set) in test_set]
|
69 |
+
results = pool.map(run_sim, args)
|
70 |
+
for r in results:
|
71 |
+
full_results.extend(r)
|
72 |
+
else:
|
73 |
+
raise ValueError(f"Unknown metric type: {eval_task}")
|
74 |
+
|
75 |
+
result_path = f"{gen_wav_dir}/_{eval_task}_results.jsonl"
|
76 |
+
with open(result_path, "w") as f:
|
77 |
+
for line in full_results:
|
78 |
+
metrics.append(line[eval_task])
|
79 |
+
f.write(json.dumps(line, ensure_ascii=False) + "\n")
|
80 |
+
metric = round(np.mean(metrics), 5)
|
81 |
+
f.write(f"\n{eval_task.upper()}: {metric}\n")
|
82 |
+
|
83 |
+
print(f"\nTotal {len(metrics)} samples")
|
84 |
+
print(f"{eval_task.upper()}: {metric}")
|
85 |
+
print(f"{eval_task.upper()} results saved to {result_path}")
|
86 |
+
|
87 |
+
|
88 |
+
if __name__ == "__main__":
|
89 |
+
main()
|
src/f5_tts/eval/eval_seedtts_testset.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Evaluate with Seed-TTS testset
|
2 |
+
|
3 |
+
import argparse
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
import sys
|
7 |
+
|
8 |
+
|
9 |
+
sys.path.append(os.getcwd())
|
10 |
+
|
11 |
+
import multiprocessing as mp
|
12 |
+
from importlib.resources import files
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
from f5_tts.eval.utils_eval import get_seed_tts_test, run_asr_wer, run_sim
|
17 |
+
|
18 |
+
|
19 |
+
rel_path = str(files("f5_tts").joinpath("../../"))
|
20 |
+
|
21 |
+
|
22 |
+
def get_args():
|
23 |
+
parser = argparse.ArgumentParser()
|
24 |
+
parser.add_argument("-e", "--eval_task", type=str, default="wer", choices=["sim", "wer"])
|
25 |
+
parser.add_argument("-l", "--lang", type=str, default="en", choices=["zh", "en"])
|
26 |
+
parser.add_argument("-g", "--gen_wav_dir", type=str, required=True)
|
27 |
+
parser.add_argument("-n", "--gpu_nums", type=int, default=8, help="Number of GPUs to use")
|
28 |
+
parser.add_argument("--local", action="store_true", help="Use local custom checkpoint directory")
|
29 |
+
return parser.parse_args()
|
30 |
+
|
31 |
+
|
32 |
+
def main():
|
33 |
+
args = get_args()
|
34 |
+
eval_task = args.eval_task
|
35 |
+
lang = args.lang
|
36 |
+
gen_wav_dir = args.gen_wav_dir
|
37 |
+
metalst = rel_path + f"/data/seedtts_testset/{lang}/meta.lst" # seed-tts testset
|
38 |
+
|
39 |
+
# NOTE. paraformer-zh result will be slightly different according to the number of gpus, cuz batchsize is different
|
40 |
+
# zh 1.254 seems a result of 4 workers wer_seed_tts
|
41 |
+
gpus = list(range(args.gpu_nums))
|
42 |
+
test_set = get_seed_tts_test(metalst, gen_wav_dir, gpus)
|
43 |
+
|
44 |
+
local = args.local
|
45 |
+
if local: # use local custom checkpoint dir
|
46 |
+
if lang == "zh":
|
47 |
+
asr_ckpt_dir = "../checkpoints/funasr" # paraformer-zh dir under funasr
|
48 |
+
elif lang == "en":
|
49 |
+
asr_ckpt_dir = "../checkpoints/Systran/faster-whisper-large-v3"
|
50 |
+
else:
|
51 |
+
asr_ckpt_dir = "" # auto download to cache dir
|
52 |
+
wavlm_ckpt_dir = "../checkpoints/UniSpeech/wavlm_large_finetune.pth"
|
53 |
+
|
54 |
+
# --------------------------------------------------------------------------
|
55 |
+
|
56 |
+
full_results = []
|
57 |
+
metrics = []
|
58 |
+
|
59 |
+
if eval_task == "wer":
|
60 |
+
with mp.Pool(processes=len(gpus)) as pool:
|
61 |
+
args = [(rank, lang, sub_test_set, asr_ckpt_dir) for (rank, sub_test_set) in test_set]
|
62 |
+
results = pool.map(run_asr_wer, args)
|
63 |
+
for r in results:
|
64 |
+
full_results.extend(r)
|
65 |
+
elif eval_task == "sim":
|
66 |
+
with mp.Pool(processes=len(gpus)) as pool:
|
67 |
+
args = [(rank, sub_test_set, wavlm_ckpt_dir) for (rank, sub_test_set) in test_set]
|
68 |
+
results = pool.map(run_sim, args)
|
69 |
+
for r in results:
|
70 |
+
full_results.extend(r)
|
71 |
+
else:
|
72 |
+
raise ValueError(f"Unknown metric type: {eval_task}")
|
73 |
+
|
74 |
+
result_path = f"{gen_wav_dir}/_{eval_task}_results.jsonl"
|
75 |
+
with open(result_path, "w") as f:
|
76 |
+
for line in full_results:
|
77 |
+
metrics.append(line[eval_task])
|
78 |
+
f.write(json.dumps(line, ensure_ascii=False) + "\n")
|
79 |
+
metric = round(np.mean(metrics), 5)
|
80 |
+
f.write(f"\n{eval_task.upper()}: {metric}\n")
|
81 |
+
|
82 |
+
print(f"\nTotal {len(metrics)} samples")
|
83 |
+
print(f"{eval_task.upper()}: {metric}")
|
84 |
+
print(f"{eval_task.upper()} results saved to {result_path}")
|
85 |
+
|
86 |
+
|
87 |
+
if __name__ == "__main__":
|
88 |
+
main()
|
src/f5_tts/eval/eval_utmos.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
import librosa
|
6 |
+
import torch
|
7 |
+
from tqdm import tqdm
|
8 |
+
|
9 |
+
|
10 |
+
def main():
|
11 |
+
parser = argparse.ArgumentParser(description="UTMOS Evaluation")
|
12 |
+
parser.add_argument("--audio_dir", type=str, required=True, help="Audio file path.")
|
13 |
+
parser.add_argument("--ext", type=str, default="wav", help="Audio extension.")
|
14 |
+
args = parser.parse_args()
|
15 |
+
|
16 |
+
device = "cuda" if torch.cuda.is_available() else "xpu" if torch.xpu.is_available() else "cpu"
|
17 |
+
|
18 |
+
predictor = torch.hub.load("tarepan/SpeechMOS:v1.2.0", "utmos22_strong", trust_repo=True)
|
19 |
+
predictor = predictor.to(device)
|
20 |
+
|
21 |
+
audio_paths = list(Path(args.audio_dir).rglob(f"*.{args.ext}"))
|
22 |
+
utmos_score = 0
|
23 |
+
|
24 |
+
utmos_result_path = Path(args.audio_dir) / "_utmos_results.jsonl"
|
25 |
+
with open(utmos_result_path, "w", encoding="utf-8") as f:
|
26 |
+
for audio_path in tqdm(audio_paths, desc="Processing"):
|
27 |
+
wav, sr = librosa.load(audio_path, sr=None, mono=True)
|
28 |
+
wav_tensor = torch.from_numpy(wav).to(device).unsqueeze(0)
|
29 |
+
score = predictor(wav_tensor, sr)
|
30 |
+
line = {}
|
31 |
+
line["wav"], line["utmos"] = str(audio_path.stem), score.item()
|
32 |
+
utmos_score += score.item()
|
33 |
+
f.write(json.dumps(line, ensure_ascii=False) + "\n")
|
34 |
+
avg_score = utmos_score / len(audio_paths) if len(audio_paths) > 0 else 0
|
35 |
+
f.write(f"\nUTMOS: {avg_score:.4f}\n")
|
36 |
+
|
37 |
+
print(f"UTMOS: {avg_score:.4f}")
|
38 |
+
print(f"UTMOS results saved to {utmos_result_path}")
|
39 |
+
|
40 |
+
|
41 |
+
if __name__ == "__main__":
|
42 |
+
main()
|
src/f5_tts/eval/utils_eval.py
ADDED
@@ -0,0 +1,419 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import os
|
3 |
+
import random
|
4 |
+
import string
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import torch.nn.functional as F
|
9 |
+
import torchaudio
|
10 |
+
from tqdm import tqdm
|
11 |
+
|
12 |
+
from f5_tts.eval.ecapa_tdnn import ECAPA_TDNN_SMALL
|
13 |
+
from f5_tts.model.modules import MelSpec
|
14 |
+
from f5_tts.model.utils import convert_char_to_pinyin
|
15 |
+
|
16 |
+
|
17 |
+
# seedtts testset metainfo: utt, prompt_text, prompt_wav, gt_text, gt_wav
|
18 |
+
def get_seedtts_testset_metainfo(metalst):
|
19 |
+
f = open(metalst)
|
20 |
+
lines = f.readlines()
|
21 |
+
f.close()
|
22 |
+
metainfo = []
|
23 |
+
for line in lines:
|
24 |
+
if len(line.strip().split("|")) == 5:
|
25 |
+
utt, prompt_text, prompt_wav, gt_text, gt_wav = line.strip().split("|")
|
26 |
+
elif len(line.strip().split("|")) == 4:
|
27 |
+
utt, prompt_text, prompt_wav, gt_text = line.strip().split("|")
|
28 |
+
gt_wav = os.path.join(os.path.dirname(metalst), "wavs", utt + ".wav")
|
29 |
+
if not os.path.isabs(prompt_wav):
|
30 |
+
prompt_wav = os.path.join(os.path.dirname(metalst), prompt_wav)
|
31 |
+
metainfo.append((utt, prompt_text, prompt_wav, gt_text, gt_wav))
|
32 |
+
return metainfo
|
33 |
+
|
34 |
+
|
35 |
+
# librispeech test-clean metainfo: gen_utt, ref_txt, ref_wav, gen_txt, gen_wav
|
36 |
+
def get_librispeech_test_clean_metainfo(metalst, librispeech_test_clean_path):
|
37 |
+
f = open(metalst)
|
38 |
+
lines = f.readlines()
|
39 |
+
f.close()
|
40 |
+
metainfo = []
|
41 |
+
for line in lines:
|
42 |
+
ref_utt, ref_dur, ref_txt, gen_utt, gen_dur, gen_txt = line.strip().split("\t")
|
43 |
+
|
44 |
+
# ref_txt = ref_txt[0] + ref_txt[1:].lower() + '.' # if use librispeech test-clean (no-pc)
|
45 |
+
ref_spk_id, ref_chaptr_id, _ = ref_utt.split("-")
|
46 |
+
ref_wav = os.path.join(librispeech_test_clean_path, ref_spk_id, ref_chaptr_id, ref_utt + ".flac")
|
47 |
+
|
48 |
+
# gen_txt = gen_txt[0] + gen_txt[1:].lower() + '.' # if use librispeech test-clean (no-pc)
|
49 |
+
gen_spk_id, gen_chaptr_id, _ = gen_utt.split("-")
|
50 |
+
gen_wav = os.path.join(librispeech_test_clean_path, gen_spk_id, gen_chaptr_id, gen_utt + ".flac")
|
51 |
+
|
52 |
+
metainfo.append((gen_utt, ref_txt, ref_wav, " " + gen_txt, gen_wav))
|
53 |
+
|
54 |
+
return metainfo
|
55 |
+
|
56 |
+
|
57 |
+
# padded to max length mel batch
|
58 |
+
def padded_mel_batch(ref_mels):
|
59 |
+
max_mel_length = torch.LongTensor([mel.shape[-1] for mel in ref_mels]).amax()
|
60 |
+
padded_ref_mels = []
|
61 |
+
for mel in ref_mels:
|
62 |
+
padded_ref_mel = F.pad(mel, (0, max_mel_length - mel.shape[-1]), value=0)
|
63 |
+
padded_ref_mels.append(padded_ref_mel)
|
64 |
+
padded_ref_mels = torch.stack(padded_ref_mels)
|
65 |
+
padded_ref_mels = padded_ref_mels.permute(0, 2, 1)
|
66 |
+
return padded_ref_mels
|
67 |
+
|
68 |
+
|
69 |
+
# get prompts from metainfo containing: utt, prompt_text, prompt_wav, gt_text, gt_wav
|
70 |
+
|
71 |
+
|
72 |
+
def get_inference_prompt(
|
73 |
+
metainfo,
|
74 |
+
speed=1.0,
|
75 |
+
tokenizer="pinyin",
|
76 |
+
polyphone=True,
|
77 |
+
target_sample_rate=24000,
|
78 |
+
n_fft=1024,
|
79 |
+
win_length=1024,
|
80 |
+
n_mel_channels=100,
|
81 |
+
hop_length=256,
|
82 |
+
mel_spec_type="vocos",
|
83 |
+
target_rms=0.1,
|
84 |
+
use_truth_duration=False,
|
85 |
+
infer_batch_size=1,
|
86 |
+
num_buckets=200,
|
87 |
+
min_secs=3,
|
88 |
+
max_secs=40,
|
89 |
+
):
|
90 |
+
prompts_all = []
|
91 |
+
|
92 |
+
min_tokens = min_secs * target_sample_rate // hop_length
|
93 |
+
max_tokens = max_secs * target_sample_rate // hop_length
|
94 |
+
|
95 |
+
batch_accum = [0] * num_buckets
|
96 |
+
utts, ref_rms_list, ref_mels, ref_mel_lens, total_mel_lens, final_text_list = (
|
97 |
+
[[] for _ in range(num_buckets)] for _ in range(6)
|
98 |
+
)
|
99 |
+
|
100 |
+
mel_spectrogram = MelSpec(
|
101 |
+
n_fft=n_fft,
|
102 |
+
hop_length=hop_length,
|
103 |
+
win_length=win_length,
|
104 |
+
n_mel_channels=n_mel_channels,
|
105 |
+
target_sample_rate=target_sample_rate,
|
106 |
+
mel_spec_type=mel_spec_type,
|
107 |
+
)
|
108 |
+
|
109 |
+
for utt, prompt_text, prompt_wav, gt_text, gt_wav in tqdm(metainfo, desc="Processing prompts..."):
|
110 |
+
# Audio
|
111 |
+
ref_audio, ref_sr = torchaudio.load(prompt_wav)
|
112 |
+
ref_rms = torch.sqrt(torch.mean(torch.square(ref_audio)))
|
113 |
+
if ref_rms < target_rms:
|
114 |
+
ref_audio = ref_audio * target_rms / ref_rms
|
115 |
+
assert ref_audio.shape[-1] > 5000, f"Empty prompt wav: {prompt_wav}, or torchaudio backend issue."
|
116 |
+
if ref_sr != target_sample_rate:
|
117 |
+
resampler = torchaudio.transforms.Resample(ref_sr, target_sample_rate)
|
118 |
+
ref_audio = resampler(ref_audio)
|
119 |
+
|
120 |
+
# Text
|
121 |
+
if len(prompt_text[-1].encode("utf-8")) == 1:
|
122 |
+
prompt_text = prompt_text + " "
|
123 |
+
text = [prompt_text + gt_text]
|
124 |
+
if tokenizer == "pinyin":
|
125 |
+
text_list = convert_char_to_pinyin(text, polyphone=polyphone)
|
126 |
+
else:
|
127 |
+
text_list = text
|
128 |
+
|
129 |
+
# to mel spectrogram
|
130 |
+
ref_mel = mel_spectrogram(ref_audio)
|
131 |
+
ref_mel = ref_mel.squeeze(0)
|
132 |
+
|
133 |
+
# Duration, mel frame length
|
134 |
+
ref_mel_len = ref_mel.shape[-1]
|
135 |
+
|
136 |
+
if use_truth_duration:
|
137 |
+
gt_audio, gt_sr = torchaudio.load(gt_wav)
|
138 |
+
if gt_sr != target_sample_rate:
|
139 |
+
resampler = torchaudio.transforms.Resample(gt_sr, target_sample_rate)
|
140 |
+
gt_audio = resampler(gt_audio)
|
141 |
+
total_mel_len = ref_mel_len + int(gt_audio.shape[-1] / hop_length / speed)
|
142 |
+
|
143 |
+
# # test vocoder resynthesis
|
144 |
+
# ref_audio = gt_audio
|
145 |
+
else:
|
146 |
+
ref_text_len = len(prompt_text.encode("utf-8"))
|
147 |
+
gen_text_len = len(gt_text.encode("utf-8"))
|
148 |
+
total_mel_len = ref_mel_len + int(ref_mel_len / ref_text_len * gen_text_len / speed)
|
149 |
+
|
150 |
+
# deal with batch
|
151 |
+
assert infer_batch_size > 0, "infer_batch_size should be greater than 0."
|
152 |
+
assert min_tokens <= total_mel_len <= max_tokens, (
|
153 |
+
f"Audio {utt} has duration {total_mel_len * hop_length // target_sample_rate}s out of range [{min_secs}, {max_secs}]."
|
154 |
+
)
|
155 |
+
bucket_i = math.floor((total_mel_len - min_tokens) / (max_tokens - min_tokens + 1) * num_buckets)
|
156 |
+
|
157 |
+
utts[bucket_i].append(utt)
|
158 |
+
ref_rms_list[bucket_i].append(ref_rms)
|
159 |
+
ref_mels[bucket_i].append(ref_mel)
|
160 |
+
ref_mel_lens[bucket_i].append(ref_mel_len)
|
161 |
+
total_mel_lens[bucket_i].append(total_mel_len)
|
162 |
+
final_text_list[bucket_i].extend(text_list)
|
163 |
+
|
164 |
+
batch_accum[bucket_i] += total_mel_len
|
165 |
+
|
166 |
+
if batch_accum[bucket_i] >= infer_batch_size:
|
167 |
+
# print(f"\n{len(ref_mels[bucket_i][0][0])}\n{ref_mel_lens[bucket_i]}\n{total_mel_lens[bucket_i]}")
|
168 |
+
prompts_all.append(
|
169 |
+
(
|
170 |
+
utts[bucket_i],
|
171 |
+
ref_rms_list[bucket_i],
|
172 |
+
padded_mel_batch(ref_mels[bucket_i]),
|
173 |
+
ref_mel_lens[bucket_i],
|
174 |
+
total_mel_lens[bucket_i],
|
175 |
+
final_text_list[bucket_i],
|
176 |
+
)
|
177 |
+
)
|
178 |
+
batch_accum[bucket_i] = 0
|
179 |
+
(
|
180 |
+
utts[bucket_i],
|
181 |
+
ref_rms_list[bucket_i],
|
182 |
+
ref_mels[bucket_i],
|
183 |
+
ref_mel_lens[bucket_i],
|
184 |
+
total_mel_lens[bucket_i],
|
185 |
+
final_text_list[bucket_i],
|
186 |
+
) = [], [], [], [], [], []
|
187 |
+
|
188 |
+
# add residual
|
189 |
+
for bucket_i, bucket_frames in enumerate(batch_accum):
|
190 |
+
if bucket_frames > 0:
|
191 |
+
prompts_all.append(
|
192 |
+
(
|
193 |
+
utts[bucket_i],
|
194 |
+
ref_rms_list[bucket_i],
|
195 |
+
padded_mel_batch(ref_mels[bucket_i]),
|
196 |
+
ref_mel_lens[bucket_i],
|
197 |
+
total_mel_lens[bucket_i],
|
198 |
+
final_text_list[bucket_i],
|
199 |
+
)
|
200 |
+
)
|
201 |
+
# not only leave easy work for last workers
|
202 |
+
random.seed(666)
|
203 |
+
random.shuffle(prompts_all)
|
204 |
+
|
205 |
+
return prompts_all
|
206 |
+
|
207 |
+
|
208 |
+
# get wav_res_ref_text of seed-tts test metalst
|
209 |
+
# https://github.com/BytedanceSpeech/seed-tts-eval
|
210 |
+
|
211 |
+
|
212 |
+
def get_seed_tts_test(metalst, gen_wav_dir, gpus):
|
213 |
+
f = open(metalst)
|
214 |
+
lines = f.readlines()
|
215 |
+
f.close()
|
216 |
+
|
217 |
+
test_set_ = []
|
218 |
+
for line in tqdm(lines):
|
219 |
+
if len(line.strip().split("|")) == 5:
|
220 |
+
utt, prompt_text, prompt_wav, gt_text, gt_wav = line.strip().split("|")
|
221 |
+
elif len(line.strip().split("|")) == 4:
|
222 |
+
utt, prompt_text, prompt_wav, gt_text = line.strip().split("|")
|
223 |
+
|
224 |
+
if not os.path.exists(os.path.join(gen_wav_dir, utt + ".wav")):
|
225 |
+
continue
|
226 |
+
gen_wav = os.path.join(gen_wav_dir, utt + ".wav")
|
227 |
+
if not os.path.isabs(prompt_wav):
|
228 |
+
prompt_wav = os.path.join(os.path.dirname(metalst), prompt_wav)
|
229 |
+
|
230 |
+
test_set_.append((gen_wav, prompt_wav, gt_text))
|
231 |
+
|
232 |
+
num_jobs = len(gpus)
|
233 |
+
if num_jobs == 1:
|
234 |
+
return [(gpus[0], test_set_)]
|
235 |
+
|
236 |
+
wav_per_job = len(test_set_) // num_jobs + 1
|
237 |
+
test_set = []
|
238 |
+
for i in range(num_jobs):
|
239 |
+
test_set.append((gpus[i], test_set_[i * wav_per_job : (i + 1) * wav_per_job]))
|
240 |
+
|
241 |
+
return test_set
|
242 |
+
|
243 |
+
|
244 |
+
# get librispeech test-clean cross sentence test
|
245 |
+
|
246 |
+
|
247 |
+
def get_librispeech_test(metalst, gen_wav_dir, gpus, librispeech_test_clean_path, eval_ground_truth=False):
|
248 |
+
f = open(metalst)
|
249 |
+
lines = f.readlines()
|
250 |
+
f.close()
|
251 |
+
|
252 |
+
test_set_ = []
|
253 |
+
for line in tqdm(lines):
|
254 |
+
ref_utt, ref_dur, ref_txt, gen_utt, gen_dur, gen_txt = line.strip().split("\t")
|
255 |
+
|
256 |
+
if eval_ground_truth:
|
257 |
+
gen_spk_id, gen_chaptr_id, _ = gen_utt.split("-")
|
258 |
+
gen_wav = os.path.join(librispeech_test_clean_path, gen_spk_id, gen_chaptr_id, gen_utt + ".flac")
|
259 |
+
else:
|
260 |
+
if not os.path.exists(os.path.join(gen_wav_dir, gen_utt + ".wav")):
|
261 |
+
raise FileNotFoundError(f"Generated wav not found: {gen_utt}")
|
262 |
+
gen_wav = os.path.join(gen_wav_dir, gen_utt + ".wav")
|
263 |
+
|
264 |
+
ref_spk_id, ref_chaptr_id, _ = ref_utt.split("-")
|
265 |
+
ref_wav = os.path.join(librispeech_test_clean_path, ref_spk_id, ref_chaptr_id, ref_utt + ".flac")
|
266 |
+
|
267 |
+
test_set_.append((gen_wav, ref_wav, gen_txt))
|
268 |
+
|
269 |
+
num_jobs = len(gpus)
|
270 |
+
if num_jobs == 1:
|
271 |
+
return [(gpus[0], test_set_)]
|
272 |
+
|
273 |
+
wav_per_job = len(test_set_) // num_jobs + 1
|
274 |
+
test_set = []
|
275 |
+
for i in range(num_jobs):
|
276 |
+
test_set.append((gpus[i], test_set_[i * wav_per_job : (i + 1) * wav_per_job]))
|
277 |
+
|
278 |
+
return test_set
|
279 |
+
|
280 |
+
|
281 |
+
# load asr model
|
282 |
+
|
283 |
+
|
284 |
+
def load_asr_model(lang, ckpt_dir=""):
|
285 |
+
if lang == "zh":
|
286 |
+
from funasr import AutoModel
|
287 |
+
|
288 |
+
model = AutoModel(
|
289 |
+
model=os.path.join(ckpt_dir, "paraformer-zh"),
|
290 |
+
# vad_model = os.path.join(ckpt_dir, "fsmn-vad"),
|
291 |
+
# punc_model = os.path.join(ckpt_dir, "ct-punc"),
|
292 |
+
# spk_model = os.path.join(ckpt_dir, "cam++"),
|
293 |
+
disable_update=True,
|
294 |
+
) # following seed-tts setting
|
295 |
+
elif lang == "en":
|
296 |
+
from faster_whisper import WhisperModel
|
297 |
+
|
298 |
+
model_size = "large-v3" if ckpt_dir == "" else ckpt_dir
|
299 |
+
model = WhisperModel(model_size, device="cuda", compute_type="float16")
|
300 |
+
return model
|
301 |
+
|
302 |
+
|
303 |
+
# WER Evaluation, the way Seed-TTS does
|
304 |
+
|
305 |
+
|
306 |
+
def run_asr_wer(args):
|
307 |
+
rank, lang, test_set, ckpt_dir = args
|
308 |
+
|
309 |
+
if lang == "zh":
|
310 |
+
import zhconv
|
311 |
+
|
312 |
+
torch.cuda.set_device(rank)
|
313 |
+
elif lang == "en":
|
314 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = str(rank)
|
315 |
+
else:
|
316 |
+
raise NotImplementedError(
|
317 |
+
"lang support only 'zh' (funasr paraformer-zh), 'en' (faster-whisper-large-v3), for now."
|
318 |
+
)
|
319 |
+
|
320 |
+
asr_model = load_asr_model(lang, ckpt_dir=ckpt_dir)
|
321 |
+
|
322 |
+
from zhon.hanzi import punctuation
|
323 |
+
|
324 |
+
punctuation_all = punctuation + string.punctuation
|
325 |
+
wer_results = []
|
326 |
+
|
327 |
+
from jiwer import compute_measures
|
328 |
+
|
329 |
+
for gen_wav, prompt_wav, truth in tqdm(test_set):
|
330 |
+
if lang == "zh":
|
331 |
+
res = asr_model.generate(input=gen_wav, batch_size_s=300, disable_pbar=True)
|
332 |
+
hypo = res[0]["text"]
|
333 |
+
hypo = zhconv.convert(hypo, "zh-cn")
|
334 |
+
elif lang == "en":
|
335 |
+
segments, _ = asr_model.transcribe(gen_wav, beam_size=5, language="en")
|
336 |
+
hypo = ""
|
337 |
+
for segment in segments:
|
338 |
+
hypo = hypo + " " + segment.text
|
339 |
+
|
340 |
+
raw_truth = truth
|
341 |
+
raw_hypo = hypo
|
342 |
+
|
343 |
+
for x in punctuation_all:
|
344 |
+
truth = truth.replace(x, "")
|
345 |
+
hypo = hypo.replace(x, "")
|
346 |
+
|
347 |
+
truth = truth.replace(" ", " ")
|
348 |
+
hypo = hypo.replace(" ", " ")
|
349 |
+
|
350 |
+
if lang == "zh":
|
351 |
+
truth = " ".join([x for x in truth])
|
352 |
+
hypo = " ".join([x for x in hypo])
|
353 |
+
elif lang == "en":
|
354 |
+
truth = truth.lower()
|
355 |
+
hypo = hypo.lower()
|
356 |
+
|
357 |
+
measures = compute_measures(truth, hypo)
|
358 |
+
wer = measures["wer"]
|
359 |
+
|
360 |
+
# ref_list = truth.split(" ")
|
361 |
+
# subs = measures["substitutions"] / len(ref_list)
|
362 |
+
# dele = measures["deletions"] / len(ref_list)
|
363 |
+
# inse = measures["insertions"] / len(ref_list)
|
364 |
+
|
365 |
+
wer_results.append(
|
366 |
+
{
|
367 |
+
"wav": Path(gen_wav).stem,
|
368 |
+
"truth": raw_truth,
|
369 |
+
"hypo": raw_hypo,
|
370 |
+
"wer": wer,
|
371 |
+
}
|
372 |
+
)
|
373 |
+
|
374 |
+
return wer_results
|
375 |
+
|
376 |
+
|
377 |
+
# SIM Evaluation
|
378 |
+
|
379 |
+
|
380 |
+
def run_sim(args):
|
381 |
+
rank, test_set, ckpt_dir = args
|
382 |
+
device = f"cuda:{rank}"
|
383 |
+
|
384 |
+
model = ECAPA_TDNN_SMALL(feat_dim=1024, feat_type="wavlm_large", config_path=None)
|
385 |
+
state_dict = torch.load(ckpt_dir, weights_only=True, map_location=lambda storage, loc: storage)
|
386 |
+
model.load_state_dict(state_dict["model"], strict=False)
|
387 |
+
|
388 |
+
use_gpu = True if torch.cuda.is_available() else False
|
389 |
+
if use_gpu:
|
390 |
+
model = model.cuda(device)
|
391 |
+
model.eval()
|
392 |
+
|
393 |
+
sim_results = []
|
394 |
+
for gen_wav, prompt_wav, truth in tqdm(test_set):
|
395 |
+
wav1, sr1 = torchaudio.load(gen_wav)
|
396 |
+
wav2, sr2 = torchaudio.load(prompt_wav)
|
397 |
+
|
398 |
+
resample1 = torchaudio.transforms.Resample(orig_freq=sr1, new_freq=16000)
|
399 |
+
resample2 = torchaudio.transforms.Resample(orig_freq=sr2, new_freq=16000)
|
400 |
+
wav1 = resample1(wav1)
|
401 |
+
wav2 = resample2(wav2)
|
402 |
+
|
403 |
+
if use_gpu:
|
404 |
+
wav1 = wav1.cuda(device)
|
405 |
+
wav2 = wav2.cuda(device)
|
406 |
+
with torch.no_grad():
|
407 |
+
emb1 = model(wav1)
|
408 |
+
emb2 = model(wav2)
|
409 |
+
|
410 |
+
sim = F.cosine_similarity(emb1, emb2)[0].item()
|
411 |
+
# print(f"VSim score between two audios: {sim:.4f} (-1.0, 1.0).")
|
412 |
+
sim_results.append(
|
413 |
+
{
|
414 |
+
"wav": Path(gen_wav).stem,
|
415 |
+
"sim": sim,
|
416 |
+
}
|
417 |
+
)
|
418 |
+
|
419 |
+
return sim_results
|
src/f5_tts/infer/README.md
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference
|
2 |
+
|
3 |
+
The pretrained model checkpoints can be reached at [🤗 Hugging Face](https://huggingface.co/SWivid/F5-TTS) and [🤖 Model Scope](https://www.modelscope.cn/models/SWivid/F5-TTS_Emilia-ZH-EN), or will be automatically downloaded when running inference scripts.
|
4 |
+
|
5 |
+
**More checkpoints with whole community efforts can be found in [SHARED.md](SHARED.md), supporting more languages.**
|
6 |
+
|
7 |
+
Currently support **30s for a single** generation, which is the **total length** (same logic if `fix_duration`) including both prompt and output audio. However, `infer_cli` and `infer_gradio` will automatically do chunk generation for longer text. Long reference audio will be **clip short to ~12s**.
|
8 |
+
|
9 |
+
To avoid possible inference failures, make sure you have seen through the following instructions.
|
10 |
+
|
11 |
+
- Use reference audio <12s and leave proper silence space (e.g. 1s) at the end. Otherwise there is a risk of truncating in the middle of word, leading to suboptimal generation.
|
12 |
+
- <ins>Uppercased letters</ins> (best with form like K.F.C.) will be uttered letter by letter, and lowercased letters used for common words.
|
13 |
+
- Add some spaces (blank: " ") or punctuations (e.g. "," ".") <ins>to explicitly introduce some pauses</ins>.
|
14 |
+
- If English punctuation marks the end of a sentence, make sure there is a space " " after it. Otherwise not regarded as when chunk.
|
15 |
+
- <ins>Preprocess numbers</ins> to Chinese letters if you want to have them read in Chinese, otherwise in English.
|
16 |
+
- If the generation output is blank (pure silence), <ins>check for FFmpeg installation</ins>.
|
17 |
+
- Try <ins>turn off `use_ema` if using an early-stage</ins> finetuned checkpoint (which goes just few updates).
|
18 |
+
|
19 |
+
|
20 |
+
## Gradio App
|
21 |
+
|
22 |
+
Currently supported features:
|
23 |
+
|
24 |
+
- Basic TTS with Chunk Inference
|
25 |
+
- Multi-Style / Multi-Speaker Generation
|
26 |
+
- Voice Chat powered by Qwen2.5-3B-Instruct
|
27 |
+
- [Custom inference with more language support](SHARED.md)
|
28 |
+
|
29 |
+
The cli command `f5-tts_infer-gradio` equals to `python src/f5_tts/infer/infer_gradio.py`, which launches a Gradio APP (web interface) for inference.
|
30 |
+
|
31 |
+
The script will load model checkpoints from Huggingface. You can also manually download files and update the path to `load_model()` in `infer_gradio.py`. Currently only load TTS models first, will load ASR model to do transcription if `ref_text` not provided, will load LLM model if use Voice Chat.
|
32 |
+
|
33 |
+
More flags options:
|
34 |
+
|
35 |
+
```bash
|
36 |
+
# Automatically launch the interface in the default web browser
|
37 |
+
f5-tts_infer-gradio --inbrowser
|
38 |
+
|
39 |
+
# Set the root path of the application, if it's not served from the root ("/") of the domain
|
40 |
+
# For example, if the application is served at "https://example.com/myapp"
|
41 |
+
f5-tts_infer-gradio --root_path "/myapp"
|
42 |
+
```
|
43 |
+
|
44 |
+
Could also be used as a component for larger application:
|
45 |
+
```python
|
46 |
+
import gradio as gr
|
47 |
+
from f5_tts.infer.infer_gradio import app
|
48 |
+
|
49 |
+
with gr.Blocks() as main_app:
|
50 |
+
gr.Markdown("# This is an example of using F5-TTS within a bigger Gradio app")
|
51 |
+
|
52 |
+
# ... other Gradio components
|
53 |
+
|
54 |
+
app.render()
|
55 |
+
|
56 |
+
main_app.launch()
|
57 |
+
```
|
58 |
+
|
59 |
+
|
60 |
+
## CLI Inference
|
61 |
+
|
62 |
+
The cli command `f5-tts_infer-cli` equals to `python src/f5_tts/infer/infer_cli.py`, which is a command line tool for inference.
|
63 |
+
|
64 |
+
The script will load model checkpoints from Huggingface. You can also manually download files and use `--ckpt_file` to specify the model you want to load, or directly update in `infer_cli.py`.
|
65 |
+
|
66 |
+
For change vocab.txt use `--vocab_file` to provide your `vocab.txt` file.
|
67 |
+
|
68 |
+
Basically you can inference with flags:
|
69 |
+
```bash
|
70 |
+
# Leave --ref_text "" will have ASR model transcribe (extra GPU memory usage)
|
71 |
+
f5-tts_infer-cli \
|
72 |
+
--model F5TTS_v1_Base \
|
73 |
+
--ref_audio "ref_audio.wav" \
|
74 |
+
--ref_text "The content, subtitle or transcription of reference audio." \
|
75 |
+
--gen_text "Some text you want TTS model generate for you."
|
76 |
+
|
77 |
+
# Use BigVGAN as vocoder. Currently only support F5TTS_Base.
|
78 |
+
f5-tts_infer-cli --model F5TTS_Base --vocoder_name bigvgan --load_vocoder_from_local
|
79 |
+
|
80 |
+
# Use custom path checkpoint, e.g.
|
81 |
+
f5-tts_infer-cli --ckpt_file ckpts/F5TTS_v1_Base/model_1250000.safetensors
|
82 |
+
|
83 |
+
# More instructions
|
84 |
+
f5-tts_infer-cli --help
|
85 |
+
```
|
86 |
+
|
87 |
+
And a `.toml` file would help with more flexible usage.
|
88 |
+
|
89 |
+
```bash
|
90 |
+
f5-tts_infer-cli -c custom.toml
|
91 |
+
```
|
92 |
+
|
93 |
+
For example, you can use `.toml` to pass in variables, refer to `src/f5_tts/infer/examples/basic/basic.toml`:
|
94 |
+
|
95 |
+
```toml
|
96 |
+
# F5TTS_v1_Base | E2TTS_Base
|
97 |
+
model = "F5TTS_v1_Base"
|
98 |
+
ref_audio = "infer/examples/basic/basic_ref_en.wav"
|
99 |
+
# If an empty "", transcribes the reference audio automatically.
|
100 |
+
ref_text = "Some call me nature, others call me mother nature."
|
101 |
+
gen_text = "I don't really care what you call me. I've been a silent spectator, watching species evolve, empires rise and fall. But always remember, I am mighty and enduring."
|
102 |
+
# File with text to generate. Ignores the text above.
|
103 |
+
gen_file = ""
|
104 |
+
remove_silence = false
|
105 |
+
output_dir = "tests"
|
106 |
+
```
|
107 |
+
|
108 |
+
You can also leverage `.toml` file to do multi-style generation, refer to `src/f5_tts/infer/examples/multi/story.toml`.
|
109 |
+
|
110 |
+
```toml
|
111 |
+
# F5TTS_v1_Base | E2TTS_Base
|
112 |
+
model = "F5TTS_v1_Base"
|
113 |
+
ref_audio = "infer/examples/multi/main.flac"
|
114 |
+
# If an empty "", transcribes the reference audio automatically.
|
115 |
+
ref_text = ""
|
116 |
+
gen_text = ""
|
117 |
+
# File with text to generate. Ignores the text above.
|
118 |
+
gen_file = "infer/examples/multi/story.txt"
|
119 |
+
remove_silence = true
|
120 |
+
output_dir = "tests"
|
121 |
+
|
122 |
+
[voices.town]
|
123 |
+
ref_audio = "infer/examples/multi/town.flac"
|
124 |
+
ref_text = ""
|
125 |
+
|
126 |
+
[voices.country]
|
127 |
+
ref_audio = "infer/examples/multi/country.flac"
|
128 |
+
ref_text = ""
|
129 |
+
```
|
130 |
+
You should mark the voice with `[main]` `[town]` `[country]` whenever you want to change voice, refer to `src/f5_tts/infer/examples/multi/story.txt`.
|
131 |
+
|
132 |
+
## API Usage
|
133 |
+
|
134 |
+
```python
|
135 |
+
from importlib.resources import files
|
136 |
+
from f5_tts.api import F5TTS
|
137 |
+
|
138 |
+
f5tts = F5TTS()
|
139 |
+
wav, sr, spec = f5tts.infer(
|
140 |
+
ref_file=str(files("f5_tts").joinpath("infer/examples/basic/basic_ref_en.wav")),
|
141 |
+
ref_text="some call me nature, others call me mother nature.",
|
142 |
+
gen_text="""I don't really care what you call me. I've been a silent spectator, watching species evolve, empires rise and fall. But always remember, I am mighty and enduring. Respect me and I'll nurture you; ignore me and you shall face the consequences.""",
|
143 |
+
file_wave=str(files("f5_tts").joinpath("../../tests/api_out.wav")),
|
144 |
+
file_spec=str(files("f5_tts").joinpath("../../tests/api_out.png")),
|
145 |
+
seed=None,
|
146 |
+
)
|
147 |
+
```
|
148 |
+
Check [api.py](../api.py) for more details.
|
149 |
+
|
150 |
+
## TensorRT-LLM Deployment
|
151 |
+
|
152 |
+
See [detailed instructions](../runtime/triton_trtllm/README.md) for more information.
|
153 |
+
|
154 |
+
## Socket Real-time Service
|
155 |
+
|
156 |
+
Real-time voice output with chunk stream:
|
157 |
+
|
158 |
+
```bash
|
159 |
+
# Start socket server
|
160 |
+
python src/f5_tts/socket_server.py
|
161 |
+
|
162 |
+
# If PyAudio not installed
|
163 |
+
sudo apt-get install portaudio19-dev
|
164 |
+
pip install pyaudio
|
165 |
+
|
166 |
+
# Communicate with socket client
|
167 |
+
python src/f5_tts/socket_client.py
|
168 |
+
```
|
169 |
+
|
170 |
+
## Speech Editing
|
171 |
+
|
172 |
+
To test speech editing capabilities, use the following command:
|
173 |
+
|
174 |
+
```bash
|
175 |
+
python src/f5_tts/infer/speech_edit.py
|
176 |
+
```
|
177 |
+
|
src/f5_tts/infer/SHARED.md
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!-- omit in toc -->
|
2 |
+
# Shared Model Cards
|
3 |
+
|
4 |
+
<!-- omit in toc -->
|
5 |
+
### **Prerequisites of using**
|
6 |
+
- This document is serving as a quick lookup table for the community training/finetuning result, with various language support.
|
7 |
+
- The models in this repository are open source and are based on voluntary contributions from contributors.
|
8 |
+
- The use of models must be conditioned on respect for the respective creators. The convenience brought comes from their efforts.
|
9 |
+
|
10 |
+
<!-- omit in toc -->
|
11 |
+
### **Welcome to share here**
|
12 |
+
- Have a pretrained/finetuned result: model checkpoint (pruned best to facilitate inference, i.e. leave only `ema_model_state_dict`) and corresponding vocab file (for tokenization).
|
13 |
+
- Host a public [huggingface model repository](https://huggingface.co/new) and upload the model related files.
|
14 |
+
- Make a pull request adding a model card to the current page, i.e. `src\f5_tts\infer\SHARED.md`.
|
15 |
+
|
16 |
+
<!-- omit in toc -->
|
17 |
+
### Supported Languages
|
18 |
+
- [Multilingual](#multilingual)
|
19 |
+
- [F5-TTS v1 v0 Base @ zh \& en @ F5-TTS](#f5-tts-v1-v0-base--zh--en--f5-tts)
|
20 |
+
- [English](#english)
|
21 |
+
- [Finnish](#finnish)
|
22 |
+
- [F5-TTS Base @ fi @ AsmoKoskinen](#f5-tts-base--fi--asmokoskinen)
|
23 |
+
- [French](#french)
|
24 |
+
- [F5-TTS Base @ fr @ RASPIAUDIO](#f5-tts-base--fr--raspiaudio)
|
25 |
+
- [German](#german)
|
26 |
+
- [F5-TTS Base @ de @ hvoss-techfak](#f5-tts-base--de--hvoss-techfak)
|
27 |
+
- [Hindi](#hindi)
|
28 |
+
- [F5-TTS Small @ hi @ SPRINGLab](#f5-tts-small--hi--springlab)
|
29 |
+
- [Italian](#italian)
|
30 |
+
- [F5-TTS Base @ it @ alien79](#f5-tts-base--it--alien79)
|
31 |
+
- [Japanese](#japanese)
|
32 |
+
- [F5-TTS Base @ ja @ Jmica](#f5-tts-base--ja--jmica)
|
33 |
+
- [Mandarin](#mandarin)
|
34 |
+
- [Russian](#russian)
|
35 |
+
- [F5-TTS Base @ ru @ HotDro4illa](#f5-tts-base--ru--hotdro4illa)
|
36 |
+
- [Spanish](#spanish)
|
37 |
+
- [F5-TTS Base @ es @ jpgallegoar](#f5-tts-base--es--jpgallegoar)
|
38 |
+
|
39 |
+
|
40 |
+
## Multilingual
|
41 |
+
|
42 |
+
#### F5-TTS v1 v0 Base @ zh & en @ F5-TTS
|
43 |
+
|Model|🤗Hugging Face|Data (Hours)|Model License|
|
44 |
+
|:---:|:------------:|:-----------:|:-------------:|
|
45 |
+
|F5-TTS v1 Base|[ckpt & vocab](https://huggingface.co/SWivid/F5-TTS/tree/main/F5TTS_v1_Base)|[Emilia 95K zh&en](https://huggingface.co/datasets/amphion/Emilia-Dataset/tree/fc71e07)|cc-by-nc-4.0|
|
46 |
+
|
47 |
+
```bash
|
48 |
+
Model: hf://SWivid/F5-TTS/F5TTS_v1_Base/model_1250000.safetensors
|
49 |
+
# A Variant Model: hf://SWivid/F5-TTS/F5TTS_v1_Base_no_zero_init/model_1250000.safetensors
|
50 |
+
Vocab: hf://SWivid/F5-TTS/F5TTS_v1_Base/vocab.txt
|
51 |
+
Config: {"dim": 1024, "depth": 22, "heads": 16, "ff_mult": 2, "text_dim": 512, "conv_layers": 4}
|
52 |
+
```
|
53 |
+
|
54 |
+
|Model|🤗Hugging Face|Data (Hours)|Model License|
|
55 |
+
|:---:|:------------:|:-----------:|:-------------:|
|
56 |
+
|F5-TTS Base|[ckpt & vocab](https://huggingface.co/SWivid/F5-TTS/tree/main/F5TTS_Base)|[Emilia 95K zh&en](https://huggingface.co/datasets/amphion/Emilia-Dataset/tree/fc71e07)|cc-by-nc-4.0|
|
57 |
+
|
58 |
+
```bash
|
59 |
+
Model: hf://SWivid/F5-TTS/F5TTS_Base/model_1200000.safetensors
|
60 |
+
Vocab: hf://SWivid/F5-TTS/F5TTS_Base/vocab.txt
|
61 |
+
Config: {"dim": 1024, "depth": 22, "heads": 16, "ff_mult": 2, "text_dim": 512, "text_mask_padding": False, "conv_layers": 4, "pe_attn_head": 1}
|
62 |
+
```
|
63 |
+
|
64 |
+
*Other infos, e.g. Author info, Github repo, Link to some sampled results, Usage instruction, Tutorial (Blog, Video, etc.) ...*
|
65 |
+
|
66 |
+
|
67 |
+
## English
|
68 |
+
|
69 |
+
|
70 |
+
## Finnish
|
71 |
+
|
72 |
+
#### F5-TTS Base @ fi @ AsmoKoskinen
|
73 |
+
|Model|🤗Hugging Face|Data|Model License|
|
74 |
+
|:---:|:------------:|:-----------:|:-------------:|
|
75 |
+
|F5-TTS Base|[ckpt & vocab](https://huggingface.co/AsmoKoskinen/F5-TTS_Finnish_Model)|[Common Voice](https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0), [Vox Populi](https://huggingface.co/datasets/facebook/voxpopuli)|cc-by-nc-4.0|
|
76 |
+
|
77 |
+
```bash
|
78 |
+
Model: hf://AsmoKoskinen/F5-TTS_Finnish_Model/model_common_voice_fi_vox_populi_fi_20241206.safetensors
|
79 |
+
Vocab: hf://AsmoKoskinen/F5-TTS_Finnish_Model/vocab.txt
|
80 |
+
Config: {"dim": 1024, "depth": 22, "heads": 16, "ff_mult": 2, "text_dim": 512, "text_mask_padding": False, "conv_layers": 4, "pe_attn_head": 1}
|
81 |
+
```
|
82 |
+
|
83 |
+
|
84 |
+
## French
|
85 |
+
|
86 |
+
#### F5-TTS Base @ fr @ RASPIAUDIO
|
87 |
+
|Model|🤗Hugging Face|Data (Hours)|Model License|
|
88 |
+
|:---:|:------------:|:-----------:|:-------------:|
|
89 |
+
|F5-TTS Base|[ckpt & vocab](https://huggingface.co/RASPIAUDIO/F5-French-MixedSpeakers-reduced)|[LibriVox](https://librivox.org/)|cc-by-nc-4.0|
|
90 |
+
|
91 |
+
```bash
|
92 |
+
Model: hf://RASPIAUDIO/F5-French-MixedSpeakers-reduced/model_last_reduced.pt
|
93 |
+
Vocab: hf://RASPIAUDIO/F5-French-MixedSpeakers-reduced/vocab.txt
|
94 |
+
Config: {"dim": 1024, "depth": 22, "heads": 16, "ff_mult": 2, "text_dim": 512, "text_mask_padding": False, "conv_layers": 4, "pe_attn_head": 1}
|
95 |
+
```
|
96 |
+
|
97 |
+
- [Online Inference with Hugging Face Space](https://huggingface.co/spaces/RASPIAUDIO/f5-tts_french).
|
98 |
+
- [Tutorial video to train a new language model](https://www.youtube.com/watch?v=UO4usaOojys).
|
99 |
+
- [Discussion about this training can be found here](https://github.com/SWivid/F5-TTS/issues/434).
|
100 |
+
|
101 |
+
|
102 |
+
## German
|
103 |
+
|
104 |
+
#### F5-TTS Base @ de @ hvoss-techfak
|
105 |
+
|Model|🤗Hugging Face|Data (Hours)|Model License|
|
106 |
+
|:---:|:------------:|:-----------:|:-------------:|
|
107 |
+
|F5-TTS Base|[ckpt & vocab](https://huggingface.co/hvoss-techfak/F5-TTS-German)|[Mozilla Common Voice 19.0](https://commonvoice.mozilla.org/en/datasets) & 800 hours Crowdsourced |cc-by-nc-4.0|
|
108 |
+
|
109 |
+
```bash
|
110 |
+
Model: hf://hvoss-techfak/F5-TTS-German/model_f5tts_german.pt
|
111 |
+
Vocab: hf://hvoss-techfak/F5-TTS-German/vocab.txt
|
112 |
+
Config: {"dim": 1024, "depth": 22, "heads": 16, "ff_mult": 2, "text_dim": 512, "text_mask_padding": False, "conv_layers": 4, "pe_attn_head": 1}
|
113 |
+
```
|
114 |
+
|
115 |
+
- Finetuned by [@hvoss-techfak](https://github.com/hvoss-techfak)
|
116 |
+
|
117 |
+
|
118 |
+
## Hindi
|
119 |
+
|
120 |
+
#### F5-TTS Small @ hi @ SPRINGLab
|
121 |
+
|Model|🤗Hugging Face|Data (Hours)|Model License|
|
122 |
+
|:---:|:------------:|:-----------:|:-------------:|
|
123 |
+
|F5-TTS Small|[ckpt & vocab](https://huggingface.co/SPRINGLab/F5-Hindi-24KHz)|[IndicTTS Hi](https://huggingface.co/datasets/SPRINGLab/IndicTTS-Hindi) & [IndicVoices-R Hi](https://huggingface.co/datasets/SPRINGLab/IndicVoices-R_Hindi) |cc-by-4.0|
|
124 |
+
|
125 |
+
```bash
|
126 |
+
Model: hf://SPRINGLab/F5-Hindi-24KHz/model_2500000.safetensors
|
127 |
+
Vocab: hf://SPRINGLab/F5-Hindi-24KHz/vocab.txt
|
128 |
+
Config: {"dim": 768, "depth": 18, "heads": 12, "ff_mult": 2, "text_dim": 512, "text_mask_padding": False, "conv_layers": 4, "pe_attn_head": 1}
|
129 |
+
```
|
130 |
+
|
131 |
+
- Authors: SPRING Lab, Indian Institute of Technology, Madras
|
132 |
+
- Website: https://asr.iitm.ac.in/
|
133 |
+
|
134 |
+
|
135 |
+
## Italian
|
136 |
+
|
137 |
+
#### F5-TTS Base @ it @ alien79
|
138 |
+
|Model|🤗Hugging Face|Data|Model License|
|
139 |
+
|:---:|:------------:|:-----------:|:-------------:|
|
140 |
+
|F5-TTS Base|[ckpt & vocab](https://huggingface.co/alien79/F5-TTS-italian)|[ylacombe/cml-tts](https://huggingface.co/datasets/ylacombe/cml-tts) |cc-by-nc-4.0|
|
141 |
+
|
142 |
+
```bash
|
143 |
+
Model: hf://alien79/F5-TTS-italian/model_159600.safetensors
|
144 |
+
Vocab: hf://alien79/F5-TTS-italian/vocab.txt
|
145 |
+
Config: {"dim": 1024, "depth": 22, "heads": 16, "ff_mult": 2, "text_dim": 512, "text_mask_padding": False, "conv_layers": 4, "pe_attn_head": 1}
|
146 |
+
```
|
147 |
+
|
148 |
+
- Trained by [Mithril Man](https://github.com/MithrilMan)
|
149 |
+
- Model details on [hf project home](https://huggingface.co/alien79/F5-TTS-italian)
|
150 |
+
- Open to collaborations to further improve the model
|
151 |
+
|
152 |
+
|
153 |
+
## Japanese
|
154 |
+
|
155 |
+
#### F5-TTS Base @ ja @ Jmica
|
156 |
+
|Model|🤗Hugging Face|Data (Hours)|Model License|
|
157 |
+
|:---:|:------------:|:-----------:|:-------------:|
|
158 |
+
|F5-TTS Base|[ckpt & vocab](https://huggingface.co/Jmica/F5TTS/tree/main/JA_21999120)|[Emilia 1.7k JA](https://huggingface.co/datasets/amphion/Emilia-Dataset/tree/fc71e07) & [Galgame Dataset 5.4k](https://huggingface.co/datasets/OOPPEENN/Galgame_Dataset)|cc-by-nc-4.0|
|
159 |
+
|
160 |
+
```bash
|
161 |
+
Model: hf://Jmica/F5TTS/JA_21999120/model_21999120.pt
|
162 |
+
Vocab: hf://Jmica/F5TTS/JA_21999120/vocab_japanese.txt
|
163 |
+
Config: {"dim": 1024, "depth": 22, "heads": 16, "ff_mult": 2, "text_dim": 512, "text_mask_padding": False, "conv_layers": 4, "pe_attn_head": 1}
|
164 |
+
```
|
165 |
+
|
166 |
+
|
167 |
+
## Mandarin
|
168 |
+
|
169 |
+
|
170 |
+
## Russian
|
171 |
+
|
172 |
+
#### F5-TTS Base @ ru @ HotDro4illa
|
173 |
+
|Model|🤗Hugging Face|Data (Hours)|Model License|
|
174 |
+
|:---:|:------------:|:-----------:|:-------------:|
|
175 |
+
|F5-TTS Base|[ckpt & vocab](https://huggingface.co/hotstone228/F5-TTS-Russian)|[Common voice](https://huggingface.co/datasets/mozilla-foundation/common_voice_17_0)|cc-by-nc-4.0|
|
176 |
+
|
177 |
+
```bash
|
178 |
+
Model: hf://hotstone228/F5-TTS-Russian/model_last.safetensors
|
179 |
+
Vocab: hf://hotstone228/F5-TTS-Russian/vocab.txt
|
180 |
+
Config: {"dim": 1024, "depth": 22, "heads": 16, "ff_mult": 2, "text_dim": 512, "text_mask_padding": False, "conv_layers": 4, "pe_attn_head": 1}
|
181 |
+
```
|
182 |
+
- Finetuned by [HotDro4illa](https://github.com/HotDro4illa)
|
183 |
+
- Any improvements are welcome
|
184 |
+
|
185 |
+
|
186 |
+
## Spanish
|
187 |
+
|
188 |
+
#### F5-TTS Base @ es @ jpgallegoar
|
189 |
+
|Model|🤗Hugging Face|Data (Hours)|Model License|
|
190 |
+
|:---:|:------------:|:-----------:|:-------------:|
|
191 |
+
|F5-TTS Base|[ckpt & vocab](https://huggingface.co/jpgallegoar/F5-Spanish)|[Voxpopuli](https://huggingface.co/datasets/facebook/voxpopuli) & Crowdsourced & TEDx, 218 hours|cc0-1.0|
|
192 |
+
|
193 |
+
- @jpgallegoar [GitHub repo](https://github.com/jpgallegoar/Spanish-F5), Jupyter Notebook and Gradio usage for Spanish model.
|
src/f5_tts/infer/examples/basic/basic.toml
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# F5TTS_v1_Base | E2TTS_Base
|
2 |
+
model = "F5TTS_v1_Base"
|
3 |
+
ref_audio = "infer/examples/basic/basic_ref_en.wav"
|
4 |
+
# If an empty "", transcribes the reference audio automatically.
|
5 |
+
ref_text = "Some call me nature, others call me mother nature."
|
6 |
+
gen_text = "I don't really care what you call me. I've been a silent spectator, watching species evolve, empires rise and fall. But always remember, I am mighty and enduring."
|
7 |
+
# File with text to generate. Ignores the text above.
|
8 |
+
gen_file = ""
|
9 |
+
remove_silence = false
|
10 |
+
output_dir = "tests"
|
11 |
+
output_file = "infer_cli_basic.wav"
|
src/f5_tts/infer/examples/basic/basic_ref_en.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b0e22048e72414fcc1e6b6342e47a774d748a195ed34e4a5b3fcf416707f2b71
|
3 |
+
size 256018
|
src/f5_tts/infer/examples/basic/basic_ref_zh.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:96724a113240d1f82c6ded1334122f0176b96c9226ccd3c919e625bcfd2a3ede
|
3 |
+
size 324558
|
src/f5_tts/infer/examples/multi/country.flac
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb15708b4b3875e37beec46591a5d89e1a9a63fdad3b8fe4a5c8738f4f554400
|
3 |
+
size 180321
|
src/f5_tts/infer/examples/multi/main.flac
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4abb1107771ce7e14926fde879b959dde6db6e572476b98684f04e45e978ab19
|
3 |
+
size 279219
|
src/f5_tts/infer/examples/multi/story.toml
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# F5TTS_v1_Base | E2TTS_Base
|
2 |
+
model = "F5TTS_v1_Base"
|
3 |
+
ref_audio = "infer/examples/multi/main.flac"
|
4 |
+
# If an empty "", transcribes the reference audio automatically.
|
5 |
+
ref_text = ""
|
6 |
+
gen_text = ""
|
7 |
+
# File with text to generate. Ignores the text above.
|
8 |
+
gen_file = "infer/examples/multi/story.txt"
|
9 |
+
remove_silence = true
|
10 |
+
output_dir = "tests"
|
11 |
+
output_file = "infer_cli_story.wav"
|
12 |
+
|
13 |
+
[voices.town]
|
14 |
+
ref_audio = "infer/examples/multi/town.flac"
|
15 |
+
ref_text = ""
|
16 |
+
speed = 0.8 # will ignore global speed
|
17 |
+
|
18 |
+
[voices.country]
|
19 |
+
ref_audio = "infer/examples/multi/country.flac"
|
20 |
+
ref_text = ""
|
src/f5_tts/infer/examples/multi/story.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
A Town Mouse and a Country Mouse were acquaintances, and the Country Mouse one day invited his friend to come and see him at his home in the fields. The Town Mouse came, and they sat down to a dinner of barleycorns and roots, the latter of which had a distinctly earthy flavour. The fare was not much to the taste of the guest, and presently he broke out with [town] "My poor dear friend, you live here no better than the ants! Now, you should just see how I fare! My larder is a regular horn of plenty. You must come and stay with me, and I promise you you shall live on the fat of the land." [main] So when he returned to town he took the Country Mouse with him, and showed him into a larder containing flour and oatmeal and figs and honey and dates. The Country Mouse had never seen anything like it, and sat down to enjoy the luxuries his friend provided: but before they had well begun, the door of the larder opened and someone came in. The two Mice scampered off and hid themselves in a narrow and exceedingly uncomfortable hole. Presently, when all was quiet, they ventured out again; but someone else came in, and off they scuttled again. This was too much for the visitor. [country] "Goodbye," [main] said he, [country] "I'm off. You live in the lap of luxury, I can see, but you are surrounded by dangers; whereas at home I can enjoy my simple dinner of roots and corn in peace."
|
src/f5_tts/infer/examples/multi/town.flac
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e7d069b8ebd5180c3b30fde5d378f0a1ddac96722d62cf43537efc3c3f3a3ce8
|
3 |
+
size 229383
|
src/f5_tts/infer/examples/vocab.txt
ADDED
@@ -0,0 +1,2545 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
!
|
3 |
+
"
|
4 |
+
#
|
5 |
+
$
|
6 |
+
%
|
7 |
+
&
|
8 |
+
'
|
9 |
+
(
|
10 |
+
)
|
11 |
+
*
|
12 |
+
+
|
13 |
+
,
|
14 |
+
-
|
15 |
+
.
|
16 |
+
/
|
17 |
+
0
|
18 |
+
1
|
19 |
+
2
|
20 |
+
3
|
21 |
+
4
|
22 |
+
5
|
23 |
+
6
|
24 |
+
7
|
25 |
+
8
|
26 |
+
9
|
27 |
+
:
|
28 |
+
;
|
29 |
+
=
|
30 |
+
>
|
31 |
+
?
|
32 |
+
@
|
33 |
+
A
|
34 |
+
B
|
35 |
+
C
|
36 |
+
D
|
37 |
+
E
|
38 |
+
F
|
39 |
+
G
|
40 |
+
H
|
41 |
+
I
|
42 |
+
J
|
43 |
+
K
|
44 |
+
L
|
45 |
+
M
|
46 |
+
N
|
47 |
+
O
|
48 |
+
P
|
49 |
+
Q
|
50 |
+
R
|
51 |
+
S
|
52 |
+
T
|
53 |
+
U
|
54 |
+
V
|
55 |
+
W
|
56 |
+
X
|
57 |
+
Y
|
58 |
+
Z
|
59 |
+
[
|
60 |
+
\
|
61 |
+
]
|
62 |
+
_
|
63 |
+
a
|
64 |
+
a1
|
65 |
+
ai1
|
66 |
+
ai2
|
67 |
+
ai3
|
68 |
+
ai4
|
69 |
+
an1
|
70 |
+
an3
|
71 |
+
an4
|
72 |
+
ang1
|
73 |
+
ang2
|
74 |
+
ang4
|
75 |
+
ao1
|
76 |
+
ao2
|
77 |
+
ao3
|
78 |
+
ao4
|
79 |
+
b
|
80 |
+
ba
|
81 |
+
ba1
|
82 |
+
ba2
|
83 |
+
ba3
|
84 |
+
ba4
|
85 |
+
bai1
|
86 |
+
bai2
|
87 |
+
bai3
|
88 |
+
bai4
|
89 |
+
ban1
|
90 |
+
ban2
|
91 |
+
ban3
|
92 |
+
ban4
|
93 |
+
bang1
|
94 |
+
bang2
|
95 |
+
bang3
|
96 |
+
bang4
|
97 |
+
bao1
|
98 |
+
bao2
|
99 |
+
bao3
|
100 |
+
bao4
|
101 |
+
bei
|
102 |
+
bei1
|
103 |
+
bei2
|
104 |
+
bei3
|
105 |
+
bei4
|
106 |
+
ben1
|
107 |
+
ben2
|
108 |
+
ben3
|
109 |
+
ben4
|
110 |
+
beng
|
111 |
+
beng1
|
112 |
+
beng2
|
113 |
+
beng3
|
114 |
+
beng4
|
115 |
+
bi1
|
116 |
+
bi2
|
117 |
+
bi3
|
118 |
+
bi4
|
119 |
+
bian1
|
120 |
+
bian2
|
121 |
+
bian3
|
122 |
+
bian4
|
123 |
+
biao1
|
124 |
+
biao2
|
125 |
+
biao3
|
126 |
+
bie1
|
127 |
+
bie2
|
128 |
+
bie3
|
129 |
+
bie4
|
130 |
+
bin1
|
131 |
+
bin4
|
132 |
+
bing1
|
133 |
+
bing2
|
134 |
+
bing3
|
135 |
+
bing4
|
136 |
+
bo
|
137 |
+
bo1
|
138 |
+
bo2
|
139 |
+
bo3
|
140 |
+
bo4
|
141 |
+
bu2
|
142 |
+
bu3
|
143 |
+
bu4
|
144 |
+
c
|
145 |
+
ca1
|
146 |
+
cai1
|
147 |
+
cai2
|
148 |
+
cai3
|
149 |
+
cai4
|
150 |
+
can1
|
151 |
+
can2
|
152 |
+
can3
|
153 |
+
can4
|
154 |
+
cang1
|
155 |
+
cang2
|
156 |
+
cao1
|
157 |
+
cao2
|
158 |
+
cao3
|
159 |
+
ce4
|
160 |
+
cen1
|
161 |
+
cen2
|
162 |
+
ceng1
|
163 |
+
ceng2
|
164 |
+
ceng4
|
165 |
+
cha1
|
166 |
+
cha2
|
167 |
+
cha3
|
168 |
+
cha4
|
169 |
+
chai1
|
170 |
+
chai2
|
171 |
+
chan1
|
172 |
+
chan2
|
173 |
+
chan3
|
174 |
+
chan4
|
175 |
+
chang1
|
176 |
+
chang2
|
177 |
+
chang3
|
178 |
+
chang4
|
179 |
+
chao1
|
180 |
+
chao2
|
181 |
+
chao3
|
182 |
+
che1
|
183 |
+
che2
|
184 |
+
che3
|
185 |
+
che4
|
186 |
+
chen1
|
187 |
+
chen2
|
188 |
+
chen3
|
189 |
+
chen4
|
190 |
+
cheng1
|
191 |
+
cheng2
|
192 |
+
cheng3
|
193 |
+
cheng4
|
194 |
+
chi1
|
195 |
+
chi2
|
196 |
+
chi3
|
197 |
+
chi4
|
198 |
+
chong1
|
199 |
+
chong2
|
200 |
+
chong3
|
201 |
+
chong4
|
202 |
+
chou1
|
203 |
+
chou2
|
204 |
+
chou3
|
205 |
+
chou4
|
206 |
+
chu1
|
207 |
+
chu2
|
208 |
+
chu3
|
209 |
+
chu4
|
210 |
+
chua1
|
211 |
+
chuai1
|
212 |
+
chuai2
|
213 |
+
chuai3
|
214 |
+
chuai4
|
215 |
+
chuan1
|
216 |
+
chuan2
|
217 |
+
chuan3
|
218 |
+
chuan4
|
219 |
+
chuang1
|
220 |
+
chuang2
|
221 |
+
chuang3
|
222 |
+
chuang4
|
223 |
+
chui1
|
224 |
+
chui2
|
225 |
+
chun1
|
226 |
+
chun2
|
227 |
+
chun3
|
228 |
+
chuo1
|
229 |
+
chuo4
|
230 |
+
ci1
|
231 |
+
ci2
|
232 |
+
ci3
|
233 |
+
ci4
|
234 |
+
cong1
|
235 |
+
cong2
|
236 |
+
cou4
|
237 |
+
cu1
|
238 |
+
cu4
|
239 |
+
cuan1
|
240 |
+
cuan2
|
241 |
+
cuan4
|
242 |
+
cui1
|
243 |
+
cui3
|
244 |
+
cui4
|
245 |
+
cun1
|
246 |
+
cun2
|
247 |
+
cun4
|
248 |
+
cuo1
|
249 |
+
cuo2
|
250 |
+
cuo4
|
251 |
+
d
|
252 |
+
da
|
253 |
+
da1
|
254 |
+
da2
|
255 |
+
da3
|
256 |
+
da4
|
257 |
+
dai1
|
258 |
+
dai2
|
259 |
+
dai3
|
260 |
+
dai4
|
261 |
+
dan1
|
262 |
+
dan2
|
263 |
+
dan3
|
264 |
+
dan4
|
265 |
+
dang1
|
266 |
+
dang2
|
267 |
+
dang3
|
268 |
+
dang4
|
269 |
+
dao1
|
270 |
+
dao2
|
271 |
+
dao3
|
272 |
+
dao4
|
273 |
+
de
|
274 |
+
de1
|
275 |
+
de2
|
276 |
+
dei3
|
277 |
+
den4
|
278 |
+
deng1
|
279 |
+
deng2
|
280 |
+
deng3
|
281 |
+
deng4
|
282 |
+
di1
|
283 |
+
di2
|
284 |
+
di3
|
285 |
+
di4
|
286 |
+
dia3
|
287 |
+
dian1
|
288 |
+
dian2
|
289 |
+
dian3
|
290 |
+
dian4
|
291 |
+
diao1
|
292 |
+
diao3
|
293 |
+
diao4
|
294 |
+
die1
|
295 |
+
die2
|
296 |
+
die4
|
297 |
+
ding1
|
298 |
+
ding2
|
299 |
+
ding3
|
300 |
+
ding4
|
301 |
+
diu1
|
302 |
+
dong1
|
303 |
+
dong3
|
304 |
+
dong4
|
305 |
+
dou1
|
306 |
+
dou2
|
307 |
+
dou3
|
308 |
+
dou4
|
309 |
+
du1
|
310 |
+
du2
|
311 |
+
du3
|
312 |
+
du4
|
313 |
+
duan1
|
314 |
+
duan2
|
315 |
+
duan3
|
316 |
+
duan4
|
317 |
+
dui1
|
318 |
+
dui4
|
319 |
+
dun1
|
320 |
+
dun3
|
321 |
+
dun4
|
322 |
+
duo1
|
323 |
+
duo2
|
324 |
+
duo3
|
325 |
+
duo4
|
326 |
+
e
|
327 |
+
e1
|
328 |
+
e2
|
329 |
+
e3
|
330 |
+
e4
|
331 |
+
ei2
|
332 |
+
en1
|
333 |
+
en4
|
334 |
+
er
|
335 |
+
er2
|
336 |
+
er3
|
337 |
+
er4
|
338 |
+
f
|
339 |
+
fa1
|
340 |
+
fa2
|
341 |
+
fa3
|
342 |
+
fa4
|
343 |
+
fan1
|
344 |
+
fan2
|
345 |
+
fan3
|
346 |
+
fan4
|
347 |
+
fang1
|
348 |
+
fang2
|
349 |
+
fang3
|
350 |
+
fang4
|
351 |
+
fei1
|
352 |
+
fei2
|
353 |
+
fei3
|
354 |
+
fei4
|
355 |
+
fen1
|
356 |
+
fen2
|
357 |
+
fen3
|
358 |
+
fen4
|
359 |
+
feng1
|
360 |
+
feng2
|
361 |
+
feng3
|
362 |
+
feng4
|
363 |
+
fo2
|
364 |
+
fou2
|
365 |
+
fou3
|
366 |
+
fu1
|
367 |
+
fu2
|
368 |
+
fu3
|
369 |
+
fu4
|
370 |
+
g
|
371 |
+
ga1
|
372 |
+
ga2
|
373 |
+
ga3
|
374 |
+
ga4
|
375 |
+
gai1
|
376 |
+
gai2
|
377 |
+
gai3
|
378 |
+
gai4
|
379 |
+
gan1
|
380 |
+
gan2
|
381 |
+
gan3
|
382 |
+
gan4
|
383 |
+
gang1
|
384 |
+
gang2
|
385 |
+
gang3
|
386 |
+
gang4
|
387 |
+
gao1
|
388 |
+
gao2
|
389 |
+
gao3
|
390 |
+
gao4
|
391 |
+
ge1
|
392 |
+
ge2
|
393 |
+
ge3
|
394 |
+
ge4
|
395 |
+
gei2
|
396 |
+
gei3
|
397 |
+
gen1
|
398 |
+
gen2
|
399 |
+
gen3
|
400 |
+
gen4
|
401 |
+
geng1
|
402 |
+
geng3
|
403 |
+
geng4
|
404 |
+
gong1
|
405 |
+
gong3
|
406 |
+
gong4
|
407 |
+
gou1
|
408 |
+
gou2
|
409 |
+
gou3
|
410 |
+
gou4
|
411 |
+
gu
|
412 |
+
gu1
|
413 |
+
gu2
|
414 |
+
gu3
|
415 |
+
gu4
|
416 |
+
gua1
|
417 |
+
gua2
|
418 |
+
gua3
|
419 |
+
gua4
|
420 |
+
guai1
|
421 |
+
guai2
|
422 |
+
guai3
|
423 |
+
guai4
|
424 |
+
guan1
|
425 |
+
guan2
|
426 |
+
guan3
|
427 |
+
guan4
|
428 |
+
guang1
|
429 |
+
guang2
|
430 |
+
guang3
|
431 |
+
guang4
|
432 |
+
gui1
|
433 |
+
gui2
|
434 |
+
gui3
|
435 |
+
gui4
|
436 |
+
gun3
|
437 |
+
gun4
|
438 |
+
guo1
|
439 |
+
guo2
|
440 |
+
guo3
|
441 |
+
guo4
|
442 |
+
h
|
443 |
+
ha1
|
444 |
+
ha2
|
445 |
+
ha3
|
446 |
+
hai1
|
447 |
+
hai2
|
448 |
+
hai3
|
449 |
+
hai4
|
450 |
+
han1
|
451 |
+
han2
|
452 |
+
han3
|
453 |
+
han4
|
454 |
+
hang1
|
455 |
+
hang2
|
456 |
+
hang4
|
457 |
+
hao1
|
458 |
+
hao2
|
459 |
+
hao3
|
460 |
+
hao4
|
461 |
+
he1
|
462 |
+
he2
|
463 |
+
he4
|
464 |
+
hei1
|
465 |
+
hen2
|
466 |
+
hen3
|
467 |
+
hen4
|
468 |
+
heng1
|
469 |
+
heng2
|
470 |
+
heng4
|
471 |
+
hong1
|
472 |
+
hong2
|
473 |
+
hong3
|
474 |
+
hong4
|
475 |
+
hou1
|
476 |
+
hou2
|
477 |
+
hou3
|
478 |
+
hou4
|
479 |
+
hu1
|
480 |
+
hu2
|
481 |
+
hu3
|
482 |
+
hu4
|
483 |
+
hua1
|
484 |
+
hua2
|
485 |
+
hua4
|
486 |
+
huai2
|
487 |
+
huai4
|
488 |
+
huan1
|
489 |
+
huan2
|
490 |
+
huan3
|
491 |
+
huan4
|
492 |
+
huang1
|
493 |
+
huang2
|
494 |
+
huang3
|
495 |
+
huang4
|
496 |
+
hui1
|
497 |
+
hui2
|
498 |
+
hui3
|
499 |
+
hui4
|
500 |
+
hun1
|
501 |
+
hun2
|
502 |
+
hun4
|
503 |
+
huo
|
504 |
+
huo1
|
505 |
+
huo2
|
506 |
+
huo3
|
507 |
+
huo4
|
508 |
+
i
|
509 |
+
j
|
510 |
+
ji1
|
511 |
+
ji2
|
512 |
+
ji3
|
513 |
+
ji4
|
514 |
+
jia
|
515 |
+
jia1
|
516 |
+
jia2
|
517 |
+
jia3
|
518 |
+
jia4
|
519 |
+
jian1
|
520 |
+
jian2
|
521 |
+
jian3
|
522 |
+
jian4
|
523 |
+
jiang1
|
524 |
+
jiang2
|
525 |
+
jiang3
|
526 |
+
jiang4
|
527 |
+
jiao1
|
528 |
+
jiao2
|
529 |
+
jiao3
|
530 |
+
jiao4
|
531 |
+
jie1
|
532 |
+
jie2
|
533 |
+
jie3
|
534 |
+
jie4
|
535 |
+
jin1
|
536 |
+
jin2
|
537 |
+
jin3
|
538 |
+
jin4
|
539 |
+
jing1
|
540 |
+
jing2
|
541 |
+
jing3
|
542 |
+
jing4
|
543 |
+
jiong3
|
544 |
+
jiu1
|
545 |
+
jiu2
|
546 |
+
jiu3
|
547 |
+
jiu4
|
548 |
+
ju1
|
549 |
+
ju2
|
550 |
+
ju3
|
551 |
+
ju4
|
552 |
+
juan1
|
553 |
+
juan2
|
554 |
+
juan3
|
555 |
+
juan4
|
556 |
+
jue1
|
557 |
+
jue2
|
558 |
+
jue4
|
559 |
+
jun1
|
560 |
+
jun4
|
561 |
+
k
|
562 |
+
ka1
|
563 |
+
ka2
|
564 |
+
ka3
|
565 |
+
kai1
|
566 |
+
kai2
|
567 |
+
kai3
|
568 |
+
kai4
|
569 |
+
kan1
|
570 |
+
kan2
|
571 |
+
kan3
|
572 |
+
kan4
|
573 |
+
kang1
|
574 |
+
kang2
|
575 |
+
kang4
|
576 |
+
kao1
|
577 |
+
kao2
|
578 |
+
kao3
|
579 |
+
kao4
|
580 |
+
ke1
|
581 |
+
ke2
|
582 |
+
ke3
|
583 |
+
ke4
|
584 |
+
ken3
|
585 |
+
keng1
|
586 |
+
kong1
|
587 |
+
kong3
|
588 |
+
kong4
|
589 |
+
kou1
|
590 |
+
kou2
|
591 |
+
kou3
|
592 |
+
kou4
|
593 |
+
ku1
|
594 |
+
ku2
|
595 |
+
ku3
|
596 |
+
ku4
|
597 |
+
kua1
|
598 |
+
kua3
|
599 |
+
kua4
|
600 |
+
kuai3
|
601 |
+
kuai4
|
602 |
+
kuan1
|
603 |
+
kuan2
|
604 |
+
kuan3
|
605 |
+
kuang1
|
606 |
+
kuang2
|
607 |
+
kuang4
|
608 |
+
kui1
|
609 |
+
kui2
|
610 |
+
kui3
|
611 |
+
kui4
|
612 |
+
kun1
|
613 |
+
kun3
|
614 |
+
kun4
|
615 |
+
kuo4
|
616 |
+
l
|
617 |
+
la
|
618 |
+
la1
|
619 |
+
la2
|
620 |
+
la3
|
621 |
+
la4
|
622 |
+
lai2
|
623 |
+
lai4
|
624 |
+
lan2
|
625 |
+
lan3
|
626 |
+
lan4
|
627 |
+
lang1
|
628 |
+
lang2
|
629 |
+
lang3
|
630 |
+
lang4
|
631 |
+
lao1
|
632 |
+
lao2
|
633 |
+
lao3
|
634 |
+
lao4
|
635 |
+
le
|
636 |
+
le1
|
637 |
+
le4
|
638 |
+
lei
|
639 |
+
lei1
|
640 |
+
lei2
|
641 |
+
lei3
|
642 |
+
lei4
|
643 |
+
leng1
|
644 |
+
leng2
|
645 |
+
leng3
|
646 |
+
leng4
|
647 |
+
li
|
648 |
+
li1
|
649 |
+
li2
|
650 |
+
li3
|
651 |
+
li4
|
652 |
+
lia3
|
653 |
+
lian2
|
654 |
+
lian3
|
655 |
+
lian4
|
656 |
+
liang2
|
657 |
+
liang3
|
658 |
+
liang4
|
659 |
+
liao1
|
660 |
+
liao2
|
661 |
+
liao3
|
662 |
+
liao4
|
663 |
+
lie1
|
664 |
+
lie2
|
665 |
+
lie3
|
666 |
+
lie4
|
667 |
+
lin1
|
668 |
+
lin2
|
669 |
+
lin3
|
670 |
+
lin4
|
671 |
+
ling2
|
672 |
+
ling3
|
673 |
+
ling4
|
674 |
+
liu1
|
675 |
+
liu2
|
676 |
+
liu3
|
677 |
+
liu4
|
678 |
+
long1
|
679 |
+
long2
|
680 |
+
long3
|
681 |
+
long4
|
682 |
+
lou1
|
683 |
+
lou2
|
684 |
+
lou3
|
685 |
+
lou4
|
686 |
+
lu1
|
687 |
+
lu2
|
688 |
+
lu3
|
689 |
+
lu4
|
690 |
+
luan2
|
691 |
+
luan3
|
692 |
+
luan4
|
693 |
+
lun1
|
694 |
+
lun2
|
695 |
+
lun4
|
696 |
+
luo1
|
697 |
+
luo2
|
698 |
+
luo3
|
699 |
+
luo4
|
700 |
+
lv2
|
701 |
+
lv3
|
702 |
+
lv4
|
703 |
+
lve3
|
704 |
+
lve4
|
705 |
+
m
|
706 |
+
ma
|
707 |
+
ma1
|
708 |
+
ma2
|
709 |
+
ma3
|
710 |
+
ma4
|
711 |
+
mai2
|
712 |
+
mai3
|
713 |
+
mai4
|
714 |
+
man1
|
715 |
+
man2
|
716 |
+
man3
|
717 |
+
man4
|
718 |
+
mang2
|
719 |
+
mang3
|
720 |
+
mao1
|
721 |
+
mao2
|
722 |
+
mao3
|
723 |
+
mao4
|
724 |
+
me
|
725 |
+
mei2
|
726 |
+
mei3
|
727 |
+
mei4
|
728 |
+
men
|
729 |
+
men1
|
730 |
+
men2
|
731 |
+
men4
|
732 |
+
meng
|
733 |
+
meng1
|
734 |
+
meng2
|
735 |
+
meng3
|
736 |
+
meng4
|
737 |
+
mi1
|
738 |
+
mi2
|
739 |
+
mi3
|
740 |
+
mi4
|
741 |
+
mian2
|
742 |
+
mian3
|
743 |
+
mian4
|
744 |
+
miao1
|
745 |
+
miao2
|
746 |
+
miao3
|
747 |
+
miao4
|
748 |
+
mie1
|
749 |
+
mie4
|
750 |
+
min2
|
751 |
+
min3
|
752 |
+
ming2
|
753 |
+
ming3
|
754 |
+
ming4
|
755 |
+
miu4
|
756 |
+
mo1
|
757 |
+
mo2
|
758 |
+
mo3
|
759 |
+
mo4
|
760 |
+
mou1
|
761 |
+
mou2
|
762 |
+
mou3
|
763 |
+
mu2
|
764 |
+
mu3
|
765 |
+
mu4
|
766 |
+
n
|
767 |
+
n2
|
768 |
+
na1
|
769 |
+
na2
|
770 |
+
na3
|
771 |
+
na4
|
772 |
+
nai2
|
773 |
+
nai3
|
774 |
+
nai4
|
775 |
+
nan1
|
776 |
+
nan2
|
777 |
+
nan3
|
778 |
+
nan4
|
779 |
+
nang1
|
780 |
+
nang2
|
781 |
+
nang3
|
782 |
+
nao1
|
783 |
+
nao2
|
784 |
+
nao3
|
785 |
+
nao4
|
786 |
+
ne
|
787 |
+
ne2
|
788 |
+
ne4
|
789 |
+
nei3
|
790 |
+
nei4
|
791 |
+
nen4
|
792 |
+
neng2
|
793 |
+
ni1
|
794 |
+
ni2
|
795 |
+
ni3
|
796 |
+
ni4
|
797 |
+
nian1
|
798 |
+
nian2
|
799 |
+
nian3
|
800 |
+
nian4
|
801 |
+
niang2
|
802 |
+
niang4
|
803 |
+
niao2
|
804 |
+
niao3
|
805 |
+
niao4
|
806 |
+
nie1
|
807 |
+
nie4
|
808 |
+
nin2
|
809 |
+
ning2
|
810 |
+
ning3
|
811 |
+
ning4
|
812 |
+
niu1
|
813 |
+
niu2
|
814 |
+
niu3
|
815 |
+
niu4
|
816 |
+
nong2
|
817 |
+
nong4
|
818 |
+
nou4
|
819 |
+
nu2
|
820 |
+
nu3
|
821 |
+
nu4
|
822 |
+
nuan3
|
823 |
+
nuo2
|
824 |
+
nuo4
|
825 |
+
nv2
|
826 |
+
nv3
|
827 |
+
nve4
|
828 |
+
o
|
829 |
+
o1
|
830 |
+
o2
|
831 |
+
ou1
|
832 |
+
ou2
|
833 |
+
ou3
|
834 |
+
ou4
|
835 |
+
p
|
836 |
+
pa1
|
837 |
+
pa2
|
838 |
+
pa4
|
839 |
+
pai1
|
840 |
+
pai2
|
841 |
+
pai3
|
842 |
+
pai4
|
843 |
+
pan1
|
844 |
+
pan2
|
845 |
+
pan4
|
846 |
+
pang1
|
847 |
+
pang2
|
848 |
+
pang4
|
849 |
+
pao1
|
850 |
+
pao2
|
851 |
+
pao3
|
852 |
+
pao4
|
853 |
+
pei1
|
854 |
+
pei2
|
855 |
+
pei4
|
856 |
+
pen1
|
857 |
+
pen2
|
858 |
+
pen4
|
859 |
+
peng1
|
860 |
+
peng2
|
861 |
+
peng3
|
862 |
+
peng4
|
863 |
+
pi1
|
864 |
+
pi2
|
865 |
+
pi3
|
866 |
+
pi4
|
867 |
+
pian1
|
868 |
+
pian2
|
869 |
+
pian4
|
870 |
+
piao1
|
871 |
+
piao2
|
872 |
+
piao3
|
873 |
+
piao4
|
874 |
+
pie1
|
875 |
+
pie2
|
876 |
+
pie3
|
877 |
+
pin1
|
878 |
+
pin2
|
879 |
+
pin3
|
880 |
+
pin4
|
881 |
+
ping1
|
882 |
+
ping2
|
883 |
+
po1
|
884 |
+
po2
|
885 |
+
po3
|
886 |
+
po4
|
887 |
+
pou1
|
888 |
+
pu1
|
889 |
+
pu2
|
890 |
+
pu3
|
891 |
+
pu4
|
892 |
+
q
|
893 |
+
qi1
|
894 |
+
qi2
|
895 |
+
qi3
|
896 |
+
qi4
|
897 |
+
qia1
|
898 |
+
qia3
|
899 |
+
qia4
|
900 |
+
qian1
|
901 |
+
qian2
|
902 |
+
qian3
|
903 |
+
qian4
|
904 |
+
qiang1
|
905 |
+
qiang2
|
906 |
+
qiang3
|
907 |
+
qiang4
|
908 |
+
qiao1
|
909 |
+
qiao2
|
910 |
+
qiao3
|
911 |
+
qiao4
|
912 |
+
qie1
|
913 |
+
qie2
|
914 |
+
qie3
|
915 |
+
qie4
|
916 |
+
qin1
|
917 |
+
qin2
|
918 |
+
qin3
|
919 |
+
qin4
|
920 |
+
qing1
|
921 |
+
qing2
|
922 |
+
qing3
|
923 |
+
qing4
|
924 |
+
qiong1
|
925 |
+
qiong2
|
926 |
+
qiu1
|
927 |
+
qiu2
|
928 |
+
qiu3
|
929 |
+
qu1
|
930 |
+
qu2
|
931 |
+
qu3
|
932 |
+
qu4
|
933 |
+
quan1
|
934 |
+
quan2
|
935 |
+
quan3
|
936 |
+
quan4
|
937 |
+
que1
|
938 |
+
que2
|
939 |
+
que4
|
940 |
+
qun2
|
941 |
+
r
|
942 |
+
ran2
|
943 |
+
ran3
|
944 |
+
rang1
|
945 |
+
rang2
|
946 |
+
rang3
|
947 |
+
rang4
|
948 |
+
rao2
|
949 |
+
rao3
|
950 |
+
rao4
|
951 |
+
re2
|
952 |
+
re3
|
953 |
+
re4
|
954 |
+
ren2
|
955 |
+
ren3
|
956 |
+
ren4
|
957 |
+
reng1
|
958 |
+
reng2
|
959 |
+
ri4
|
960 |
+
rong1
|
961 |
+
rong2
|
962 |
+
rong3
|
963 |
+
rou2
|
964 |
+
rou4
|
965 |
+
ru2
|
966 |
+
ru3
|
967 |
+
ru4
|
968 |
+
ruan2
|
969 |
+
ruan3
|
970 |
+
rui3
|
971 |
+
rui4
|
972 |
+
run4
|
973 |
+
ruo4
|
974 |
+
s
|
975 |
+
sa1
|
976 |
+
sa2
|
977 |
+
sa3
|
978 |
+
sa4
|
979 |
+
sai1
|
980 |
+
sai4
|
981 |
+
san1
|
982 |
+
san2
|
983 |
+
san3
|
984 |
+
san4
|
985 |
+
sang1
|
986 |
+
sang3
|
987 |
+
sang4
|
988 |
+
sao1
|
989 |
+
sao2
|
990 |
+
sao3
|
991 |
+
sao4
|
992 |
+
se4
|
993 |
+
sen1
|
994 |
+
seng1
|
995 |
+
sha1
|
996 |
+
sha2
|
997 |
+
sha3
|
998 |
+
sha4
|
999 |
+
shai1
|
1000 |
+
shai2
|
1001 |
+
shai3
|
1002 |
+
shai4
|
1003 |
+
shan1
|
1004 |
+
shan3
|
1005 |
+
shan4
|
1006 |
+
shang
|
1007 |
+
shang1
|
1008 |
+
shang3
|
1009 |
+
shang4
|
1010 |
+
shao1
|
1011 |
+
shao2
|
1012 |
+
shao3
|
1013 |
+
shao4
|
1014 |
+
she1
|
1015 |
+
she2
|
1016 |
+
she3
|
1017 |
+
she4
|
1018 |
+
shei2
|
1019 |
+
shen1
|
1020 |
+
shen2
|
1021 |
+
shen3
|
1022 |
+
shen4
|
1023 |
+
sheng1
|
1024 |
+
sheng2
|
1025 |
+
sheng3
|
1026 |
+
sheng4
|
1027 |
+
shi
|
1028 |
+
shi1
|
1029 |
+
shi2
|
1030 |
+
shi3
|
1031 |
+
shi4
|
1032 |
+
shou1
|
1033 |
+
shou2
|
1034 |
+
shou3
|
1035 |
+
shou4
|
1036 |
+
shu1
|
1037 |
+
shu2
|
1038 |
+
shu3
|
1039 |
+
shu4
|
1040 |
+
shua1
|
1041 |
+
shua2
|
1042 |
+
shua3
|
1043 |
+
shua4
|
1044 |
+
shuai1
|
1045 |
+
shuai3
|
1046 |
+
shuai4
|
1047 |
+
shuan1
|
1048 |
+
shuan4
|
1049 |
+
shuang1
|
1050 |
+
shuang3
|
1051 |
+
shui2
|
1052 |
+
shui3
|
1053 |
+
shui4
|
1054 |
+
shun3
|
1055 |
+
shun4
|
1056 |
+
shuo1
|
1057 |
+
shuo4
|
1058 |
+
si1
|
1059 |
+
si2
|
1060 |
+
si3
|
1061 |
+
si4
|
1062 |
+
song1
|
1063 |
+
song3
|
1064 |
+
song4
|
1065 |
+
sou1
|
1066 |
+
sou3
|
1067 |
+
sou4
|
1068 |
+
su1
|
1069 |
+
su2
|
1070 |
+
su4
|
1071 |
+
suan1
|
1072 |
+
suan4
|
1073 |
+
sui1
|
1074 |
+
sui2
|
1075 |
+
sui3
|
1076 |
+
sui4
|
1077 |
+
sun1
|
1078 |
+
sun3
|
1079 |
+
suo
|
1080 |
+
suo1
|
1081 |
+
suo2
|
1082 |
+
suo3
|
1083 |
+
t
|
1084 |
+
ta1
|
1085 |
+
ta2
|
1086 |
+
ta3
|
1087 |
+
ta4
|
1088 |
+
tai1
|
1089 |
+
tai2
|
1090 |
+
tai4
|
1091 |
+
tan1
|
1092 |
+
tan2
|
1093 |
+
tan3
|
1094 |
+
tan4
|
1095 |
+
tang1
|
1096 |
+
tang2
|
1097 |
+
tang3
|
1098 |
+
tang4
|
1099 |
+
tao1
|
1100 |
+
tao2
|
1101 |
+
tao3
|
1102 |
+
tao4
|
1103 |
+
te4
|
1104 |
+
teng2
|
1105 |
+
ti1
|
1106 |
+
ti2
|
1107 |
+
ti3
|
1108 |
+
ti4
|
1109 |
+
tian1
|
1110 |
+
tian2
|
1111 |
+
tian3
|
1112 |
+
tiao1
|
1113 |
+
tiao2
|
1114 |
+
tiao3
|
1115 |
+
tiao4
|
1116 |
+
tie1
|
1117 |
+
tie2
|
1118 |
+
tie3
|
1119 |
+
tie4
|
1120 |
+
ting1
|
1121 |
+
ting2
|
1122 |
+
ting3
|
1123 |
+
tong1
|
1124 |
+
tong2
|
1125 |
+
tong3
|
1126 |
+
tong4
|
1127 |
+
tou
|
1128 |
+
tou1
|
1129 |
+
tou2
|
1130 |
+
tou4
|
1131 |
+
tu1
|
1132 |
+
tu2
|
1133 |
+
tu3
|
1134 |
+
tu4
|
1135 |
+
tuan1
|
1136 |
+
tuan2
|
1137 |
+
tui1
|
1138 |
+
tui2
|
1139 |
+
tui3
|
1140 |
+
tui4
|
1141 |
+
tun1
|
1142 |
+
tun2
|
1143 |
+
tun4
|
1144 |
+
tuo1
|
1145 |
+
tuo2
|
1146 |
+
tuo3
|
1147 |
+
tuo4
|
1148 |
+
u
|
1149 |
+
v
|
1150 |
+
w
|
1151 |
+
wa
|
1152 |
+
wa1
|
1153 |
+
wa2
|
1154 |
+
wa3
|
1155 |
+
wa4
|
1156 |
+
wai1
|
1157 |
+
wai3
|
1158 |
+
wai4
|
1159 |
+
wan1
|
1160 |
+
wan2
|
1161 |
+
wan3
|
1162 |
+
wan4
|
1163 |
+
wang1
|
1164 |
+
wang2
|
1165 |
+
wang3
|
1166 |
+
wang4
|
1167 |
+
wei1
|
1168 |
+
wei2
|
1169 |
+
wei3
|
1170 |
+
wei4
|
1171 |
+
wen1
|
1172 |
+
wen2
|
1173 |
+
wen3
|
1174 |
+
wen4
|
1175 |
+
weng1
|
1176 |
+
weng4
|
1177 |
+
wo1
|
1178 |
+
wo2
|
1179 |
+
wo3
|
1180 |
+
wo4
|
1181 |
+
wu1
|
1182 |
+
wu2
|
1183 |
+
wu3
|
1184 |
+
wu4
|
1185 |
+
x
|
1186 |
+
xi1
|
1187 |
+
xi2
|
1188 |
+
xi3
|
1189 |
+
xi4
|
1190 |
+
xia1
|
1191 |
+
xia2
|
1192 |
+
xia4
|
1193 |
+
xian1
|
1194 |
+
xian2
|
1195 |
+
xian3
|
1196 |
+
xian4
|
1197 |
+
xiang1
|
1198 |
+
xiang2
|
1199 |
+
xiang3
|
1200 |
+
xiang4
|
1201 |
+
xiao1
|
1202 |
+
xiao2
|
1203 |
+
xiao3
|
1204 |
+
xiao4
|
1205 |
+
xie1
|
1206 |
+
xie2
|
1207 |
+
xie3
|
1208 |
+
xie4
|
1209 |
+
xin1
|
1210 |
+
xin2
|
1211 |
+
xin4
|
1212 |
+
xing1
|
1213 |
+
xing2
|
1214 |
+
xing3
|
1215 |
+
xing4
|
1216 |
+
xiong1
|
1217 |
+
xiong2
|
1218 |
+
xiu1
|
1219 |
+
xiu3
|
1220 |
+
xiu4
|
1221 |
+
xu
|
1222 |
+
xu1
|
1223 |
+
xu2
|
1224 |
+
xu3
|
1225 |
+
xu4
|
1226 |
+
xuan1
|
1227 |
+
xuan2
|
1228 |
+
xuan3
|
1229 |
+
xuan4
|
1230 |
+
xue1
|
1231 |
+
xue2
|
1232 |
+
xue3
|
1233 |
+
xue4
|
1234 |
+
xun1
|
1235 |
+
xun2
|
1236 |
+
xun4
|
1237 |
+
y
|
1238 |
+
ya
|
1239 |
+
ya1
|
1240 |
+
ya2
|
1241 |
+
ya3
|
1242 |
+
ya4
|
1243 |
+
yan1
|
1244 |
+
yan2
|
1245 |
+
yan3
|
1246 |
+
yan4
|
1247 |
+
yang1
|
1248 |
+
yang2
|
1249 |
+
yang3
|
1250 |
+
yang4
|
1251 |
+
yao1
|
1252 |
+
yao2
|
1253 |
+
yao3
|
1254 |
+
yao4
|
1255 |
+
ye1
|
1256 |
+
ye2
|
1257 |
+
ye3
|
1258 |
+
ye4
|
1259 |
+
yi
|
1260 |
+
yi1
|
1261 |
+
yi2
|
1262 |
+
yi3
|
1263 |
+
yi4
|
1264 |
+
yin1
|
1265 |
+
yin2
|
1266 |
+
yin3
|
1267 |
+
yin4
|
1268 |
+
ying1
|
1269 |
+
ying2
|
1270 |
+
ying3
|
1271 |
+
ying4
|
1272 |
+
yo1
|
1273 |
+
yong1
|
1274 |
+
yong2
|
1275 |
+
yong3
|
1276 |
+
yong4
|
1277 |
+
you1
|
1278 |
+
you2
|
1279 |
+
you3
|
1280 |
+
you4
|
1281 |
+
yu1
|
1282 |
+
yu2
|
1283 |
+
yu3
|
1284 |
+
yu4
|
1285 |
+
yuan1
|
1286 |
+
yuan2
|
1287 |
+
yuan3
|
1288 |
+
yuan4
|
1289 |
+
yue1
|
1290 |
+
yue4
|
1291 |
+
yun1
|
1292 |
+
yun2
|
1293 |
+
yun3
|
1294 |
+
yun4
|
1295 |
+
z
|
1296 |
+
za1
|
1297 |
+
za2
|
1298 |
+
za3
|
1299 |
+
zai1
|
1300 |
+
zai3
|
1301 |
+
zai4
|
1302 |
+
zan1
|
1303 |
+
zan2
|
1304 |
+
zan3
|
1305 |
+
zan4
|
1306 |
+
zang1
|
1307 |
+
zang4
|
1308 |
+
zao1
|
1309 |
+
zao2
|
1310 |
+
zao3
|
1311 |
+
zao4
|
1312 |
+
ze2
|
1313 |
+
ze4
|
1314 |
+
zei2
|
1315 |
+
zen3
|
1316 |
+
zeng1
|
1317 |
+
zeng4
|
1318 |
+
zha1
|
1319 |
+
zha2
|
1320 |
+
zha3
|
1321 |
+
zha4
|
1322 |
+
zhai1
|
1323 |
+
zhai2
|
1324 |
+
zhai3
|
1325 |
+
zhai4
|
1326 |
+
zhan1
|
1327 |
+
zhan2
|
1328 |
+
zhan3
|
1329 |
+
zhan4
|
1330 |
+
zhang1
|
1331 |
+
zhang2
|
1332 |
+
zhang3
|
1333 |
+
zhang4
|
1334 |
+
zhao1
|
1335 |
+
zhao2
|
1336 |
+
zhao3
|
1337 |
+
zhao4
|
1338 |
+
zhe
|
1339 |
+
zhe1
|
1340 |
+
zhe2
|
1341 |
+
zhe3
|
1342 |
+
zhe4
|
1343 |
+
zhen1
|
1344 |
+
zhen2
|
1345 |
+
zhen3
|
1346 |
+
zhen4
|
1347 |
+
zheng1
|
1348 |
+
zheng2
|
1349 |
+
zheng3
|
1350 |
+
zheng4
|
1351 |
+
zhi1
|
1352 |
+
zhi2
|
1353 |
+
zhi3
|
1354 |
+
zhi4
|
1355 |
+
zhong1
|
1356 |
+
zhong2
|
1357 |
+
zhong3
|
1358 |
+
zhong4
|
1359 |
+
zhou1
|
1360 |
+
zhou2
|
1361 |
+
zhou3
|
1362 |
+
zhou4
|
1363 |
+
zhu1
|
1364 |
+
zhu2
|
1365 |
+
zhu3
|
1366 |
+
zhu4
|
1367 |
+
zhua1
|
1368 |
+
zhua2
|
1369 |
+
zhua3
|
1370 |
+
zhuai1
|
1371 |
+
zhuai3
|
1372 |
+
zhuai4
|
1373 |
+
zhuan1
|
1374 |
+
zhuan2
|
1375 |
+
zhuan3
|
1376 |
+
zhuan4
|
1377 |
+
zhuang1
|
1378 |
+
zhuang4
|
1379 |
+
zhui1
|
1380 |
+
zhui4
|
1381 |
+
zhun1
|
1382 |
+
zhun2
|
1383 |
+
zhun3
|
1384 |
+
zhuo1
|
1385 |
+
zhuo2
|
1386 |
+
zi
|
1387 |
+
zi1
|
1388 |
+
zi2
|
1389 |
+
zi3
|
1390 |
+
zi4
|
1391 |
+
zong1
|
1392 |
+
zong2
|
1393 |
+
zong3
|
1394 |
+
zong4
|
1395 |
+
zou1
|
1396 |
+
zou2
|
1397 |
+
zou3
|
1398 |
+
zou4
|
1399 |
+
zu1
|
1400 |
+
zu2
|
1401 |
+
zu3
|
1402 |
+
zuan1
|
1403 |
+
zuan3
|
1404 |
+
zuan4
|
1405 |
+
zui2
|
1406 |
+
zui3
|
1407 |
+
zui4
|
1408 |
+
zun1
|
1409 |
+
zuo
|
1410 |
+
zuo1
|
1411 |
+
zuo2
|
1412 |
+
zuo3
|
1413 |
+
zuo4
|
1414 |
+
{
|
1415 |
+
~
|
1416 |
+
¡
|
1417 |
+
¢
|
1418 |
+
£
|
1419 |
+
¥
|
1420 |
+
§
|
1421 |
+
¨
|
1422 |
+
©
|
1423 |
+
«
|
1424 |
+
®
|
1425 |
+
¯
|
1426 |
+
°
|
1427 |
+
±
|
1428 |
+
²
|
1429 |
+
³
|
1430 |
+
´
|
1431 |
+
µ
|
1432 |
+
·
|
1433 |
+
¹
|
1434 |
+
º
|
1435 |
+
»
|
1436 |
+
¼
|
1437 |
+
½
|
1438 |
+
¾
|
1439 |
+
¿
|
1440 |
+
À
|
1441 |
+
Á
|
1442 |
+
Â
|
1443 |
+
Ã
|
1444 |
+
Ä
|
1445 |
+
Å
|
1446 |
+
Æ
|
1447 |
+
Ç
|
1448 |
+
È
|
1449 |
+
É
|
1450 |
+
Ê
|
1451 |
+
Í
|
1452 |
+
Î
|
1453 |
+
Ñ
|
1454 |
+
Ó
|
1455 |
+
Ö
|
1456 |
+
×
|
1457 |
+
Ø
|
1458 |
+
Ú
|
1459 |
+
Ü
|
1460 |
+
Ý
|
1461 |
+
Þ
|
1462 |
+
ß
|
1463 |
+
à
|
1464 |
+
á
|
1465 |
+
â
|
1466 |
+
ã
|
1467 |
+
ä
|
1468 |
+
å
|
1469 |
+
æ
|
1470 |
+
ç
|
1471 |
+
è
|
1472 |
+
é
|
1473 |
+
ê
|
1474 |
+
ë
|
1475 |
+
ì
|
1476 |
+
í
|
1477 |
+
î
|
1478 |
+
ï
|
1479 |
+
ð
|
1480 |
+
ñ
|
1481 |
+
ò
|
1482 |
+
ó
|
1483 |
+
ô
|
1484 |
+
õ
|
1485 |
+
ö
|
1486 |
+
ø
|
1487 |
+
ù
|
1488 |
+
ú
|
1489 |
+
û
|
1490 |
+
ü
|
1491 |
+
ý
|
1492 |
+
Ā
|
1493 |
+
ā
|
1494 |
+
ă
|
1495 |
+
ą
|
1496 |
+
ć
|
1497 |
+
Č
|
1498 |
+
č
|
1499 |
+
Đ
|
1500 |
+
đ
|
1501 |
+
ē
|
1502 |
+
ė
|
1503 |
+
ę
|
1504 |
+
ě
|
1505 |
+
ĝ
|
1506 |
+
ğ
|
1507 |
+
ħ
|
1508 |
+
ī
|
1509 |
+
į
|
1510 |
+
İ
|
1511 |
+
ı
|
1512 |
+
Ł
|
1513 |
+
ł
|
1514 |
+
ń
|
1515 |
+
ņ
|
1516 |
+
ň
|
1517 |
+
ŋ
|
1518 |
+
Ō
|
1519 |
+
ō
|
1520 |
+
ő
|
1521 |
+
œ
|
1522 |
+
ř
|
1523 |
+
Ś
|
1524 |
+
ś
|
1525 |
+
Ş
|
1526 |
+
ş
|
1527 |
+
Š
|
1528 |
+
š
|
1529 |
+
Ť
|
1530 |
+
ť
|
1531 |
+
ũ
|
1532 |
+
ū
|
1533 |
+
ź
|
1534 |
+
Ż
|
1535 |
+
ż
|
1536 |
+
Ž
|
1537 |
+
ž
|
1538 |
+
ơ
|
1539 |
+
ư
|
1540 |
+
ǎ
|
1541 |
+
ǐ
|
1542 |
+
ǒ
|
1543 |
+
ǔ
|
1544 |
+
ǚ
|
1545 |
+
ș
|
1546 |
+
ț
|
1547 |
+
ɑ
|
1548 |
+
ɔ
|
1549 |
+
ɕ
|
1550 |
+
ə
|
1551 |
+
ɛ
|
1552 |
+
ɜ
|
1553 |
+
ɡ
|
1554 |
+
ɣ
|
1555 |
+
ɪ
|
1556 |
+
ɫ
|
1557 |
+
ɴ
|
1558 |
+
ɹ
|
1559 |
+
ɾ
|
1560 |
+
ʃ
|
1561 |
+
ʊ
|
1562 |
+
ʌ
|
1563 |
+
ʒ
|
1564 |
+
ʔ
|
1565 |
+
ʰ
|
1566 |
+
ʷ
|
1567 |
+
ʻ
|
1568 |
+
ʾ
|
1569 |
+
ʿ
|
1570 |
+
ˈ
|
1571 |
+
ː
|
1572 |
+
˙
|
1573 |
+
˜
|
1574 |
+
ˢ
|
1575 |
+
́
|
1576 |
+
̅
|
1577 |
+
Α
|
1578 |
+
Β
|
1579 |
+
Δ
|
1580 |
+
Ε
|
1581 |
+
Θ
|
1582 |
+
Κ
|
1583 |
+
Λ
|
1584 |
+
Μ
|
1585 |
+
Ξ
|
1586 |
+
Π
|
1587 |
+
Σ
|
1588 |
+
Τ
|
1589 |
+
Φ
|
1590 |
+
Χ
|
1591 |
+
Ψ
|
1592 |
+
Ω
|
1593 |
+
ά
|
1594 |
+
έ
|
1595 |
+
ή
|
1596 |
+
ί
|
1597 |
+
α
|
1598 |
+
β
|
1599 |
+
γ
|
1600 |
+
δ
|
1601 |
+
ε
|
1602 |
+
ζ
|
1603 |
+
η
|
1604 |
+
θ
|
1605 |
+
ι
|
1606 |
+
κ
|
1607 |
+
λ
|
1608 |
+
μ
|
1609 |
+
ν
|
1610 |
+
ξ
|
1611 |
+
ο
|
1612 |
+
π
|
1613 |
+
ρ
|
1614 |
+
ς
|
1615 |
+
σ
|
1616 |
+
τ
|
1617 |
+
υ
|
1618 |
+
φ
|
1619 |
+
χ
|
1620 |
+
ψ
|
1621 |
+
ω
|
1622 |
+
ϊ
|
1623 |
+
ό
|
1624 |
+
ύ
|
1625 |
+
ώ
|
1626 |
+
ϕ
|
1627 |
+
ϵ
|
1628 |
+
Ё
|
1629 |
+
А
|
1630 |
+
Б
|
1631 |
+
В
|
1632 |
+
Г
|
1633 |
+
Д
|
1634 |
+
Е
|
1635 |
+
Ж
|
1636 |
+
З
|
1637 |
+
И
|
1638 |
+
Й
|
1639 |
+
К
|
1640 |
+
Л
|
1641 |
+
М
|
1642 |
+
Н
|
1643 |
+
О
|
1644 |
+
П
|
1645 |
+
Р
|
1646 |
+
С
|
1647 |
+
Т
|
1648 |
+
У
|
1649 |
+
Ф
|
1650 |
+
Х
|
1651 |
+
Ц
|
1652 |
+
Ч
|
1653 |
+
Ш
|
1654 |
+
Щ
|
1655 |
+
Ы
|
1656 |
+
Ь
|
1657 |
+
Э
|
1658 |
+
Ю
|
1659 |
+
Я
|
1660 |
+
а
|
1661 |
+
б
|
1662 |
+
в
|
1663 |
+
г
|
1664 |
+
д
|
1665 |
+
е
|
1666 |
+
ж
|
1667 |
+
з
|
1668 |
+
и
|
1669 |
+
й
|
1670 |
+
к
|
1671 |
+
л
|
1672 |
+
м
|
1673 |
+
н
|
1674 |
+
о
|
1675 |
+
п
|
1676 |
+
р
|
1677 |
+
с
|
1678 |
+
т
|
1679 |
+
у
|
1680 |
+
ф
|
1681 |
+
х
|
1682 |
+
ц
|
1683 |
+
ч
|
1684 |
+
ш
|
1685 |
+
щ
|
1686 |
+
ъ
|
1687 |
+
ы
|
1688 |
+
ь
|
1689 |
+
э
|
1690 |
+
ю
|
1691 |
+
я
|
1692 |
+
ё
|
1693 |
+
і
|
1694 |
+
ְ
|
1695 |
+
ִ
|
1696 |
+
ֵ
|
1697 |
+
ֶ
|
1698 |
+
ַ
|
1699 |
+
ָ
|
1700 |
+
ֹ
|
1701 |
+
ּ
|
1702 |
+
־
|
1703 |
+
ׁ
|
1704 |
+
א
|
1705 |
+
ב
|
1706 |
+
ג
|
1707 |
+
ד
|
1708 |
+
ה
|
1709 |
+
ו
|
1710 |
+
ז
|
1711 |
+
ח
|
1712 |
+
ט
|
1713 |
+
י
|
1714 |
+
כ
|
1715 |
+
ל
|
1716 |
+
ם
|
1717 |
+
מ
|
1718 |
+
ן
|
1719 |
+
נ
|
1720 |
+
ס
|
1721 |
+
ע
|
1722 |
+
פ
|
1723 |
+
ק
|
1724 |
+
ר
|
1725 |
+
ש
|
1726 |
+
ת
|
1727 |
+
أ
|
1728 |
+
ب
|
1729 |
+
ة
|
1730 |
+
ت
|
1731 |
+
ج
|
1732 |
+
ح
|
1733 |
+
د
|
1734 |
+
ر
|
1735 |
+
ز
|
1736 |
+
س
|
1737 |
+
ص
|
1738 |
+
ط
|
1739 |
+
ع
|
1740 |
+
ق
|
1741 |
+
ك
|
1742 |
+
ل
|
1743 |
+
م
|
1744 |
+
ن
|
1745 |
+
ه
|
1746 |
+
و
|
1747 |
+
ي
|
1748 |
+
َ
|
1749 |
+
ُ
|
1750 |
+
ِ
|
1751 |
+
ْ
|
1752 |
+
ก
|
1753 |
+
ข
|
1754 |
+
ง
|
1755 |
+
จ
|
1756 |
+
ต
|
1757 |
+
ท
|
1758 |
+
น
|
1759 |
+
ป
|
1760 |
+
ย
|
1761 |
+
ร
|
1762 |
+
ว
|
1763 |
+
ส
|
1764 |
+
ห
|
1765 |
+
อ
|
1766 |
+
ฮ
|
1767 |
+
ั
|
1768 |
+
า
|
1769 |
+
ี
|
1770 |
+
ึ
|
1771 |
+
โ
|
1772 |
+
ใ
|
1773 |
+
ไ
|
1774 |
+
่
|
1775 |
+
้
|
1776 |
+
์
|
1777 |
+
ḍ
|
1778 |
+
Ḥ
|
1779 |
+
ḥ
|
1780 |
+
ṁ
|
1781 |
+
ṃ
|
1782 |
+
ṅ
|
1783 |
+
ṇ
|
1784 |
+
Ṛ
|
1785 |
+
ṛ
|
1786 |
+
Ṣ
|
1787 |
+
ṣ
|
1788 |
+
Ṭ
|
1789 |
+
ṭ
|
1790 |
+
ạ
|
1791 |
+
ả
|
1792 |
+
Ấ
|
1793 |
+
ấ
|
1794 |
+
ầ
|
1795 |
+
ậ
|
1796 |
+
ắ
|
1797 |
+
ằ
|
1798 |
+
ẻ
|
1799 |
+
ẽ
|
1800 |
+
ế
|
1801 |
+
ề
|
1802 |
+
ể
|
1803 |
+
ễ
|
1804 |
+
ệ
|
1805 |
+
ị
|
1806 |
+
ọ
|
1807 |
+
ỏ
|
1808 |
+
ố
|
1809 |
+
ồ
|
1810 |
+
ộ
|
1811 |
+
ớ
|
1812 |
+
ờ
|
1813 |
+
ở
|
1814 |
+
ụ
|
1815 |
+
ủ
|
1816 |
+
ứ
|
1817 |
+
ữ
|
1818 |
+
ἀ
|
1819 |
+
ἁ
|
1820 |
+
Ἀ
|
1821 |
+
ἐ
|
1822 |
+
ἔ
|
1823 |
+
ἰ
|
1824 |
+
ἱ
|
1825 |
+
ὀ
|
1826 |
+
ὁ
|
1827 |
+
ὐ
|
1828 |
+
ὲ
|
1829 |
+
ὸ
|
1830 |
+
���
|
1831 |
+
᾽
|
1832 |
+
ῆ
|
1833 |
+
ῇ
|
1834 |
+
ῶ
|
1835 |
+
|
1836 |
+
‑
|
1837 |
+
‒
|
1838 |
+
–
|
1839 |
+
—
|
1840 |
+
―
|
1841 |
+
‖
|
1842 |
+
†
|
1843 |
+
‡
|
1844 |
+
•
|
1845 |
+
…
|
1846 |
+
‧
|
1847 |
+
|
1848 |
+
′
|
1849 |
+
″
|
1850 |
+
⁄
|
1851 |
+
|
1852 |
+
⁰
|
1853 |
+
⁴
|
1854 |
+
⁵
|
1855 |
+
⁶
|
1856 |
+
⁷
|
1857 |
+
⁸
|
1858 |
+
⁹
|
1859 |
+
₁
|
1860 |
+
₂
|
1861 |
+
₃
|
1862 |
+
€
|
1863 |
+
₱
|
1864 |
+
₹
|
1865 |
+
₽
|
1866 |
+
℃
|
1867 |
+
ℏ
|
1868 |
+
ℓ
|
1869 |
+
№
|
1870 |
+
ℝ
|
1871 |
+
™
|
1872 |
+
⅓
|
1873 |
+
⅔
|
1874 |
+
⅛
|
1875 |
+
→
|
1876 |
+
∂
|
1877 |
+
∈
|
1878 |
+
∑
|
1879 |
+
−
|
1880 |
+
∗
|
1881 |
+
√
|
1882 |
+
∞
|
1883 |
+
∫
|
1884 |
+
≈
|
1885 |
+
≠
|
1886 |
+
≡
|
1887 |
+
≤
|
1888 |
+
≥
|
1889 |
+
⋅
|
1890 |
+
⋯
|
1891 |
+
█
|
1892 |
+
♪
|
1893 |
+
⟨
|
1894 |
+
⟩
|
1895 |
+
、
|
1896 |
+
。
|
1897 |
+
《
|
1898 |
+
》
|
1899 |
+
「
|
1900 |
+
」
|
1901 |
+
【
|
1902 |
+
】
|
1903 |
+
あ
|
1904 |
+
う
|
1905 |
+
え
|
1906 |
+
お
|
1907 |
+
か
|
1908 |
+
が
|
1909 |
+
き
|
1910 |
+
ぎ
|
1911 |
+
く
|
1912 |
+
ぐ
|
1913 |
+
け
|
1914 |
+
げ
|
1915 |
+
こ
|
1916 |
+
ご
|
1917 |
+
さ
|
1918 |
+
し
|
1919 |
+
じ
|
1920 |
+
す
|
1921 |
+
ず
|
1922 |
+
せ
|
1923 |
+
ぜ
|
1924 |
+
そ
|
1925 |
+
ぞ
|
1926 |
+
た
|
1927 |
+
だ
|
1928 |
+
ち
|
1929 |
+
っ
|
1930 |
+
つ
|
1931 |
+
で
|
1932 |
+
と
|
1933 |
+
ど
|
1934 |
+
な
|
1935 |
+
に
|
1936 |
+
ね
|
1937 |
+
の
|
1938 |
+
は
|
1939 |
+
ば
|
1940 |
+
ひ
|
1941 |
+
ぶ
|
1942 |
+
へ
|
1943 |
+
べ
|
1944 |
+
ま
|
1945 |
+
み
|
1946 |
+
む
|
1947 |
+
め
|
1948 |
+
も
|
1949 |
+
ゃ
|
1950 |
+
や
|
1951 |
+
ゆ
|
1952 |
+
ょ
|
1953 |
+
よ
|
1954 |
+
ら
|
1955 |
+
り
|
1956 |
+
る
|
1957 |
+
れ
|
1958 |
+
ろ
|
1959 |
+
わ
|
1960 |
+
を
|
1961 |
+
ん
|
1962 |
+
ァ
|
1963 |
+
ア
|
1964 |
+
ィ
|
1965 |
+
イ
|
1966 |
+
ウ
|
1967 |
+
ェ
|
1968 |
+
エ
|
1969 |
+
オ
|
1970 |
+
カ
|
1971 |
+
ガ
|
1972 |
+
キ
|
1973 |
+
ク
|
1974 |
+
ケ
|
1975 |
+
ゲ
|
1976 |
+
コ
|
1977 |
+
ゴ
|
1978 |
+
サ
|
1979 |
+
ザ
|
1980 |
+
シ
|
1981 |
+
ジ
|
1982 |
+
ス
|
1983 |
+
ズ
|
1984 |
+
セ
|
1985 |
+
ゾ
|
1986 |
+
タ
|
1987 |
+
ダ
|
1988 |
+
チ
|
1989 |
+
ッ
|
1990 |
+
ツ
|
1991 |
+
テ
|
1992 |
+
デ
|
1993 |
+
ト
|
1994 |
+
ド
|
1995 |
+
ナ
|
1996 |
+
ニ
|
1997 |
+
ネ
|
1998 |
+
ノ
|
1999 |
+
バ
|
2000 |
+
パ
|
2001 |
+
ビ
|
2002 |
+
ピ
|
2003 |
+
フ
|
2004 |
+
プ
|
2005 |
+
ヘ
|
2006 |
+
ベ
|
2007 |
+
ペ
|
2008 |
+
ホ
|
2009 |
+
ボ
|
2010 |
+
ポ
|
2011 |
+
マ
|
2012 |
+
ミ
|
2013 |
+
ム
|
2014 |
+
メ
|
2015 |
+
モ
|
2016 |
+
ャ
|
2017 |
+
ヤ
|
2018 |
+
ュ
|
2019 |
+
ユ
|
2020 |
+
ョ
|
2021 |
+
ヨ
|
2022 |
+
ラ
|
2023 |
+
リ
|
2024 |
+
ル
|
2025 |
+
レ
|
2026 |
+
ロ
|
2027 |
+
ワ
|
2028 |
+
ン
|
2029 |
+
・
|
2030 |
+
ー
|
2031 |
+
ㄋ
|
2032 |
+
ㄍ
|
2033 |
+
ㄎ
|
2034 |
+
ㄏ
|
2035 |
+
ㄓ
|
2036 |
+
ㄕ
|
2037 |
+
ㄚ
|
2038 |
+
ㄜ
|
2039 |
+
ㄟ
|
2040 |
+
ㄤ
|
2041 |
+
ㄥ
|
2042 |
+
ㄧ
|
2043 |
+
ㄱ
|
2044 |
+
ㄴ
|
2045 |
+
ㄷ
|
2046 |
+
ㄹ
|
2047 |
+
ㅁ
|
2048 |
+
ㅂ
|
2049 |
+
ㅅ
|
2050 |
+
ㅈ
|
2051 |
+
ㅍ
|
2052 |
+
ㅎ
|
2053 |
+
ㅏ
|
2054 |
+
ㅓ
|
2055 |
+
ㅗ
|
2056 |
+
ㅜ
|
2057 |
+
ㅡ
|
2058 |
+
ㅣ
|
2059 |
+
㗎
|
2060 |
+
가
|
2061 |
+
각
|
2062 |
+
간
|
2063 |
+
갈
|
2064 |
+
감
|
2065 |
+
갑
|
2066 |
+
갓
|
2067 |
+
갔
|
2068 |
+
강
|
2069 |
+
같
|
2070 |
+
개
|
2071 |
+
거
|
2072 |
+
건
|
2073 |
+
걸
|
2074 |
+
겁
|
2075 |
+
것
|
2076 |
+
겉
|
2077 |
+
게
|
2078 |
+
겠
|
2079 |
+
겨
|
2080 |
+
결
|
2081 |
+
겼
|
2082 |
+
경
|
2083 |
+
계
|
2084 |
+
고
|
2085 |
+
곤
|
2086 |
+
골
|
2087 |
+
곱
|
2088 |
+
공
|
2089 |
+
과
|
2090 |
+
관
|
2091 |
+
광
|
2092 |
+
교
|
2093 |
+
구
|
2094 |
+
국
|
2095 |
+
굴
|
2096 |
+
귀
|
2097 |
+
귄
|
2098 |
+
그
|
2099 |
+
근
|
2100 |
+
글
|
2101 |
+
금
|
2102 |
+
기
|
2103 |
+
긴
|
2104 |
+
길
|
2105 |
+
까
|
2106 |
+
깍
|
2107 |
+
깔
|
2108 |
+
깜
|
2109 |
+
깨
|
2110 |
+
께
|
2111 |
+
꼬
|
2112 |
+
꼭
|
2113 |
+
꽃
|
2114 |
+
꾸
|
2115 |
+
꿔
|
2116 |
+
끔
|
2117 |
+
끗
|
2118 |
+
끝
|
2119 |
+
끼
|
2120 |
+
나
|
2121 |
+
난
|
2122 |
+
날
|
2123 |
+
남
|
2124 |
+
납
|
2125 |
+
내
|
2126 |
+
냐
|
2127 |
+
냥
|
2128 |
+
너
|
2129 |
+
넘
|
2130 |
+
넣
|
2131 |
+
네
|
2132 |
+
녁
|
2133 |
+
년
|
2134 |
+
녕
|
2135 |
+
노
|
2136 |
+
녹
|
2137 |
+
놀
|
2138 |
+
누
|
2139 |
+
눈
|
2140 |
+
느
|
2141 |
+
는
|
2142 |
+
늘
|
2143 |
+
니
|
2144 |
+
님
|
2145 |
+
닙
|
2146 |
+
다
|
2147 |
+
닥
|
2148 |
+
단
|
2149 |
+
달
|
2150 |
+
닭
|
2151 |
+
당
|
2152 |
+
대
|
2153 |
+
더
|
2154 |
+
덕
|
2155 |
+
던
|
2156 |
+
덥
|
2157 |
+
데
|
2158 |
+
도
|
2159 |
+
독
|
2160 |
+
동
|
2161 |
+
돼
|
2162 |
+
됐
|
2163 |
+
되
|
2164 |
+
된
|
2165 |
+
될
|
2166 |
+
두
|
2167 |
+
둑
|
2168 |
+
둥
|
2169 |
+
드
|
2170 |
+
들
|
2171 |
+
등
|
2172 |
+
디
|
2173 |
+
따
|
2174 |
+
딱
|
2175 |
+
딸
|
2176 |
+
땅
|
2177 |
+
때
|
2178 |
+
떤
|
2179 |
+
떨
|
2180 |
+
떻
|
2181 |
+
또
|
2182 |
+
똑
|
2183 |
+
뚱
|
2184 |
+
뛰
|
2185 |
+
뜻
|
2186 |
+
띠
|
2187 |
+
라
|
2188 |
+
락
|
2189 |
+
란
|
2190 |
+
람
|
2191 |
+
랍
|
2192 |
+
랑
|
2193 |
+
래
|
2194 |
+
랜
|
2195 |
+
러
|
2196 |
+
런
|
2197 |
+
럼
|
2198 |
+
렇
|
2199 |
+
레
|
2200 |
+
려
|
2201 |
+
력
|
2202 |
+
렵
|
2203 |
+
렸
|
2204 |
+
로
|
2205 |
+
록
|
2206 |
+
롬
|
2207 |
+
루
|
2208 |
+
르
|
2209 |
+
른
|
2210 |
+
를
|
2211 |
+
름
|
2212 |
+
릉
|
2213 |
+
리
|
2214 |
+
릴
|
2215 |
+
림
|
2216 |
+
마
|
2217 |
+
막
|
2218 |
+
만
|
2219 |
+
많
|
2220 |
+
말
|
2221 |
+
맑
|
2222 |
+
맙
|
2223 |
+
맛
|
2224 |
+
매
|
2225 |
+
머
|
2226 |
+
먹
|
2227 |
+
멍
|
2228 |
+
메
|
2229 |
+
면
|
2230 |
+
명
|
2231 |
+
몇
|
2232 |
+
모
|
2233 |
+
목
|
2234 |
+
몸
|
2235 |
+
못
|
2236 |
+
무
|
2237 |
+
문
|
2238 |
+
물
|
2239 |
+
뭐
|
2240 |
+
뭘
|
2241 |
+
미
|
2242 |
+
민
|
2243 |
+
밌
|
2244 |
+
밑
|
2245 |
+
바
|
2246 |
+
박
|
2247 |
+
밖
|
2248 |
+
반
|
2249 |
+
받
|
2250 |
+
발
|
2251 |
+
밤
|
2252 |
+
밥
|
2253 |
+
방
|
2254 |
+
배
|
2255 |
+
백
|
2256 |
+
밸
|
2257 |
+
뱀
|
2258 |
+
버
|
2259 |
+
번
|
2260 |
+
벌
|
2261 |
+
벚
|
2262 |
+
베
|
2263 |
+
벼
|
2264 |
+
벽
|
2265 |
+
별
|
2266 |
+
병
|
2267 |
+
보
|
2268 |
+
복
|
2269 |
+
본
|
2270 |
+
볼
|
2271 |
+
봐
|
2272 |
+
봤
|
2273 |
+
부
|
2274 |
+
분
|
2275 |
+
불
|
2276 |
+
비
|
2277 |
+
빔
|
2278 |
+
빛
|
2279 |
+
빠
|
2280 |
+
빨
|
2281 |
+
뼈
|
2282 |
+
뽀
|
2283 |
+
뿅
|
2284 |
+
쁘
|
2285 |
+
사
|
2286 |
+
산
|
2287 |
+
살
|
2288 |
+
삼
|
2289 |
+
샀
|
2290 |
+
상
|
2291 |
+
새
|
2292 |
+
색
|
2293 |
+
생
|
2294 |
+
서
|
2295 |
+
선
|
2296 |
+
설
|
2297 |
+
섭
|
2298 |
+
섰
|
2299 |
+
성
|
2300 |
+
세
|
2301 |
+
셔
|
2302 |
+
션
|
2303 |
+
셨
|
2304 |
+
소
|
2305 |
+
속
|
2306 |
+
손
|
2307 |
+
송
|
2308 |
+
수
|
2309 |
+
숙
|
2310 |
+
순
|
2311 |
+
술
|
2312 |
+
숫
|
2313 |
+
숭
|
2314 |
+
숲
|
2315 |
+
쉬
|
2316 |
+
쉽
|
2317 |
+
스
|
2318 |
+
슨
|
2319 |
+
습
|
2320 |
+
슷
|
2321 |
+
시
|
2322 |
+
식
|
2323 |
+
신
|
2324 |
+
실
|
2325 |
+
싫
|
2326 |
+
심
|
2327 |
+
십
|
2328 |
+
싶
|
2329 |
+
싸
|
2330 |
+
써
|
2331 |
+
쓰
|
2332 |
+
쓴
|
2333 |
+
씌
|
2334 |
+
씨
|
2335 |
+
씩
|
2336 |
+
씬
|
2337 |
+
아
|
2338 |
+
악
|
2339 |
+
안
|
2340 |
+
않
|
2341 |
+
알
|
2342 |
+
야
|
2343 |
+
약
|
2344 |
+
얀
|
2345 |
+
양
|
2346 |
+
얘
|
2347 |
+
어
|
2348 |
+
언
|
2349 |
+
얼
|
2350 |
+
엄
|
2351 |
+
업
|
2352 |
+
없
|
2353 |
+
었
|
2354 |
+
엉
|
2355 |
+
에
|
2356 |
+
여
|
2357 |
+
역
|
2358 |
+
연
|
2359 |
+
염
|
2360 |
+
엽
|
2361 |
+
영
|
2362 |
+
옆
|
2363 |
+
예
|
2364 |
+
옛
|
2365 |
+
오
|
2366 |
+
온
|
2367 |
+
올
|
2368 |
+
옷
|
2369 |
+
옹
|
2370 |
+
와
|
2371 |
+
왔
|
2372 |
+
왜
|
2373 |
+
요
|
2374 |
+
욕
|
2375 |
+
용
|
2376 |
+
우
|
2377 |
+
운
|
2378 |
+
울
|
2379 |
+
웃
|
2380 |
+
워
|
2381 |
+
원
|
2382 |
+
월
|
2383 |
+
웠
|
2384 |
+
위
|
2385 |
+
윙
|
2386 |
+
유
|
2387 |
+
육
|
2388 |
+
윤
|
2389 |
+
으
|
2390 |
+
은
|
2391 |
+
을
|
2392 |
+
음
|
2393 |
+
응
|
2394 |
+
의
|
2395 |
+
이
|
2396 |
+
익
|
2397 |
+
인
|
2398 |
+
일
|
2399 |
+
읽
|
2400 |
+
임
|
2401 |
+
입
|
2402 |
+
있
|
2403 |
+
자
|
2404 |
+
작
|
2405 |
+
잔
|
2406 |
+
잖
|
2407 |
+
잘
|
2408 |
+
잡
|
2409 |
+
잤
|
2410 |
+
장
|
2411 |
+
재
|
2412 |
+
저
|
2413 |
+
전
|
2414 |
+
점
|
2415 |
+
정
|
2416 |
+
제
|
2417 |
+
져
|
2418 |
+
졌
|
2419 |
+
조
|
2420 |
+
족
|
2421 |
+
좀
|
2422 |
+
종
|
2423 |
+
좋
|
2424 |
+
죠
|
2425 |
+
주
|
2426 |
+
준
|
2427 |
+
줄
|
2428 |
+
중
|
2429 |
+
줘
|
2430 |
+
즈
|
2431 |
+
즐
|
2432 |
+
즘
|
2433 |
+
지
|
2434 |
+
진
|
2435 |
+
집
|
2436 |
+
짜
|
2437 |
+
짝
|
2438 |
+
쩌
|
2439 |
+
쪼
|
2440 |
+
쪽
|
2441 |
+
쫌
|
2442 |
+
쭈
|
2443 |
+
쯔
|
2444 |
+
찌
|
2445 |
+
찍
|
2446 |
+
차
|
2447 |
+
착
|
2448 |
+
찾
|
2449 |
+
책
|
2450 |
+
처
|
2451 |
+
천
|
2452 |
+
철
|
2453 |
+
체
|
2454 |
+
쳐
|
2455 |
+
쳤
|
2456 |
+
초
|
2457 |
+
촌
|
2458 |
+
추
|
2459 |
+
출
|
2460 |
+
춤
|
2461 |
+
춥
|
2462 |
+
춰
|
2463 |
+
치
|
2464 |
+
친
|
2465 |
+
칠
|
2466 |
+
침
|
2467 |
+
칩
|
2468 |
+
칼
|
2469 |
+
커
|
2470 |
+
켓
|
2471 |
+
코
|
2472 |
+
콩
|
2473 |
+
쿠
|
2474 |
+
퀴
|
2475 |
+
크
|
2476 |
+
큰
|
2477 |
+
큽
|
2478 |
+
키
|
2479 |
+
킨
|
2480 |
+
타
|
2481 |
+
태
|
2482 |
+
터
|
2483 |
+
턴
|
2484 |
+
털
|
2485 |
+
테
|
2486 |
+
토
|
2487 |
+
통
|
2488 |
+
투
|
2489 |
+
트
|
2490 |
+
특
|
2491 |
+
튼
|
2492 |
+
틀
|
2493 |
+
티
|
2494 |
+
팀
|
2495 |
+
파
|
2496 |
+
팔
|
2497 |
+
패
|
2498 |
+
페
|
2499 |
+
펜
|
2500 |
+
펭
|
2501 |
+
평
|
2502 |
+
포
|
2503 |
+
폭
|
2504 |
+
표
|
2505 |
+
품
|
2506 |
+
풍
|
2507 |
+
프
|
2508 |
+
플
|
2509 |
+
피
|
2510 |
+
필
|
2511 |
+
하
|
2512 |
+
학
|
2513 |
+
한
|
2514 |
+
할
|
2515 |
+
함
|
2516 |
+
합
|
2517 |
+
항
|
2518 |
+
해
|
2519 |
+
햇
|
2520 |
+
했
|
2521 |
+
행
|
2522 |
+
허
|
2523 |
+
험
|
2524 |
+
형
|
2525 |
+
혜
|
2526 |
+
호
|
2527 |
+
혼
|
2528 |
+
홀
|
2529 |
+
화
|
2530 |
+
회
|
2531 |
+
획
|
2532 |
+
후
|
2533 |
+
휴
|
2534 |
+
흐
|
2535 |
+
흔
|
2536 |
+
희
|
2537 |
+
히
|
2538 |
+
힘
|
2539 |
+
ﷺ
|
2540 |
+
ﷻ
|
2541 |
+
!
|
2542 |
+
,
|
2543 |
+
?
|
2544 |
+
�
|
2545 |
+
𠮶
|
src/f5_tts/infer/infer_cli.py
ADDED
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import codecs
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
from datetime import datetime
|
6 |
+
from importlib.resources import files
|
7 |
+
from pathlib import Path
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
import soundfile as sf
|
11 |
+
import tomli
|
12 |
+
from cached_path import cached_path
|
13 |
+
from hydra.utils import get_class
|
14 |
+
from omegaconf import OmegaConf
|
15 |
+
from unidecode import unidecode
|
16 |
+
|
17 |
+
from f5_tts.infer.utils_infer import (
|
18 |
+
cfg_strength,
|
19 |
+
cross_fade_duration,
|
20 |
+
device,
|
21 |
+
fix_duration,
|
22 |
+
infer_process,
|
23 |
+
load_model,
|
24 |
+
load_vocoder,
|
25 |
+
mel_spec_type,
|
26 |
+
nfe_step,
|
27 |
+
preprocess_ref_audio_text,
|
28 |
+
remove_silence_for_generated_wav,
|
29 |
+
speed,
|
30 |
+
sway_sampling_coef,
|
31 |
+
target_rms,
|
32 |
+
)
|
33 |
+
|
34 |
+
|
35 |
+
parser = argparse.ArgumentParser(
|
36 |
+
prog="python3 infer-cli.py",
|
37 |
+
description="Commandline interface for E2/F5 TTS with Advanced Batch Processing.",
|
38 |
+
epilog="Specify options above to override one or more settings from config.",
|
39 |
+
)
|
40 |
+
parser.add_argument(
|
41 |
+
"-c",
|
42 |
+
"--config",
|
43 |
+
type=str,
|
44 |
+
default=os.path.join(files("f5_tts").joinpath("infer/examples/basic"), "basic.toml"),
|
45 |
+
help="The configuration file, default see infer/examples/basic/basic.toml",
|
46 |
+
)
|
47 |
+
|
48 |
+
|
49 |
+
# Note. Not to provide default value here in order to read default from config file
|
50 |
+
|
51 |
+
parser.add_argument(
|
52 |
+
"-m",
|
53 |
+
"--model",
|
54 |
+
type=str,
|
55 |
+
help="The model name: F5TTS_v1_Base | F5TTS_Base | E2TTS_Base | etc.",
|
56 |
+
)
|
57 |
+
parser.add_argument(
|
58 |
+
"-mc",
|
59 |
+
"--model_cfg",
|
60 |
+
type=str,
|
61 |
+
help="The path to F5-TTS model config file .yaml",
|
62 |
+
)
|
63 |
+
parser.add_argument(
|
64 |
+
"-p",
|
65 |
+
"--ckpt_file",
|
66 |
+
type=str,
|
67 |
+
help="The path to model checkpoint .pt, leave blank to use default",
|
68 |
+
)
|
69 |
+
parser.add_argument(
|
70 |
+
"-v",
|
71 |
+
"--vocab_file",
|
72 |
+
type=str,
|
73 |
+
help="The path to vocab file .txt, leave blank to use default",
|
74 |
+
)
|
75 |
+
parser.add_argument(
|
76 |
+
"-r",
|
77 |
+
"--ref_audio",
|
78 |
+
type=str,
|
79 |
+
help="The reference audio file.",
|
80 |
+
)
|
81 |
+
parser.add_argument(
|
82 |
+
"-s",
|
83 |
+
"--ref_text",
|
84 |
+
type=str,
|
85 |
+
help="The transcript/subtitle for the reference audio",
|
86 |
+
)
|
87 |
+
parser.add_argument(
|
88 |
+
"-t",
|
89 |
+
"--gen_text",
|
90 |
+
type=str,
|
91 |
+
help="The text to make model synthesize a speech",
|
92 |
+
)
|
93 |
+
parser.add_argument(
|
94 |
+
"-f",
|
95 |
+
"--gen_file",
|
96 |
+
type=str,
|
97 |
+
help="The file with text to generate, will ignore --gen_text",
|
98 |
+
)
|
99 |
+
parser.add_argument(
|
100 |
+
"-o",
|
101 |
+
"--output_dir",
|
102 |
+
type=str,
|
103 |
+
help="The path to output folder",
|
104 |
+
)
|
105 |
+
parser.add_argument(
|
106 |
+
"-w",
|
107 |
+
"--output_file",
|
108 |
+
type=str,
|
109 |
+
help="The name of output file",
|
110 |
+
)
|
111 |
+
parser.add_argument(
|
112 |
+
"--save_chunk",
|
113 |
+
action="store_true",
|
114 |
+
help="To save each audio chunks during inference",
|
115 |
+
)
|
116 |
+
parser.add_argument(
|
117 |
+
"--no_legacy_text",
|
118 |
+
action="store_false",
|
119 |
+
help="Not to use lossy ASCII transliterations of unicode text in saved file names.",
|
120 |
+
)
|
121 |
+
parser.add_argument(
|
122 |
+
"--remove_silence",
|
123 |
+
action="store_true",
|
124 |
+
help="To remove long silence found in ouput",
|
125 |
+
)
|
126 |
+
parser.add_argument(
|
127 |
+
"--load_vocoder_from_local",
|
128 |
+
action="store_true",
|
129 |
+
help="To load vocoder from local dir, default to ../checkpoints/vocos-mel-24khz",
|
130 |
+
)
|
131 |
+
parser.add_argument(
|
132 |
+
"--vocoder_name",
|
133 |
+
type=str,
|
134 |
+
choices=["vocos", "bigvgan"],
|
135 |
+
help=f"Used vocoder name: vocos | bigvgan, default {mel_spec_type}",
|
136 |
+
)
|
137 |
+
parser.add_argument(
|
138 |
+
"--target_rms",
|
139 |
+
type=float,
|
140 |
+
help=f"Target output speech loudness normalization value, default {target_rms}",
|
141 |
+
)
|
142 |
+
parser.add_argument(
|
143 |
+
"--cross_fade_duration",
|
144 |
+
type=float,
|
145 |
+
help=f"Duration of cross-fade between audio segments in seconds, default {cross_fade_duration}",
|
146 |
+
)
|
147 |
+
parser.add_argument(
|
148 |
+
"--nfe_step",
|
149 |
+
type=int,
|
150 |
+
help=f"The number of function evaluation (denoising steps), default {nfe_step}",
|
151 |
+
)
|
152 |
+
parser.add_argument(
|
153 |
+
"--cfg_strength",
|
154 |
+
type=float,
|
155 |
+
help=f"Classifier-free guidance strength, default {cfg_strength}",
|
156 |
+
)
|
157 |
+
parser.add_argument(
|
158 |
+
"--sway_sampling_coef",
|
159 |
+
type=float,
|
160 |
+
help=f"Sway Sampling coefficient, default {sway_sampling_coef}",
|
161 |
+
)
|
162 |
+
parser.add_argument(
|
163 |
+
"--speed",
|
164 |
+
type=float,
|
165 |
+
help=f"The speed of the generated audio, default {speed}",
|
166 |
+
)
|
167 |
+
parser.add_argument(
|
168 |
+
"--fix_duration",
|
169 |
+
type=float,
|
170 |
+
help=f"Fix the total duration (ref and gen audios) in seconds, default {fix_duration}",
|
171 |
+
)
|
172 |
+
parser.add_argument(
|
173 |
+
"--device",
|
174 |
+
type=str,
|
175 |
+
help="Specify the device to run on",
|
176 |
+
)
|
177 |
+
args = parser.parse_args()
|
178 |
+
|
179 |
+
|
180 |
+
# config file
|
181 |
+
|
182 |
+
config = tomli.load(open(args.config, "rb"))
|
183 |
+
|
184 |
+
|
185 |
+
# command-line interface parameters
|
186 |
+
|
187 |
+
model = args.model or config.get("model", "F5TTS_v1_Base")
|
188 |
+
ckpt_file = args.ckpt_file or config.get("ckpt_file", "")
|
189 |
+
vocab_file = args.vocab_file or config.get("vocab_file", "")
|
190 |
+
|
191 |
+
ref_audio = args.ref_audio or config.get("ref_audio", "infer/examples/basic/basic_ref_en.wav")
|
192 |
+
ref_text = (
|
193 |
+
args.ref_text
|
194 |
+
if args.ref_text is not None
|
195 |
+
else config.get("ref_text", "Some call me nature, others call me mother nature.")
|
196 |
+
)
|
197 |
+
gen_text = args.gen_text or config.get("gen_text", "Here we generate something just for test.")
|
198 |
+
gen_file = args.gen_file or config.get("gen_file", "")
|
199 |
+
|
200 |
+
output_dir = args.output_dir or config.get("output_dir", "tests")
|
201 |
+
output_file = args.output_file or config.get(
|
202 |
+
"output_file", f"infer_cli_{datetime.now().strftime(r'%Y%m%d_%H%M%S')}.wav"
|
203 |
+
)
|
204 |
+
|
205 |
+
save_chunk = args.save_chunk or config.get("save_chunk", False)
|
206 |
+
use_legacy_text = args.no_legacy_text or config.get("no_legacy_text", False) # no_legacy_text is a store_false arg
|
207 |
+
if save_chunk and use_legacy_text:
|
208 |
+
print(
|
209 |
+
"\nWarning to --save_chunk: lossy ASCII transliterations of unicode text for legacy (.wav) file names, --no_legacy_text to disable.\n"
|
210 |
+
)
|
211 |
+
|
212 |
+
remove_silence = args.remove_silence or config.get("remove_silence", False)
|
213 |
+
load_vocoder_from_local = args.load_vocoder_from_local or config.get("load_vocoder_from_local", False)
|
214 |
+
|
215 |
+
vocoder_name = args.vocoder_name or config.get("vocoder_name", mel_spec_type)
|
216 |
+
target_rms = args.target_rms or config.get("target_rms", target_rms)
|
217 |
+
cross_fade_duration = args.cross_fade_duration or config.get("cross_fade_duration", cross_fade_duration)
|
218 |
+
nfe_step = args.nfe_step or config.get("nfe_step", nfe_step)
|
219 |
+
cfg_strength = args.cfg_strength or config.get("cfg_strength", cfg_strength)
|
220 |
+
sway_sampling_coef = args.sway_sampling_coef or config.get("sway_sampling_coef", sway_sampling_coef)
|
221 |
+
speed = args.speed or config.get("speed", speed)
|
222 |
+
fix_duration = args.fix_duration or config.get("fix_duration", fix_duration)
|
223 |
+
device = args.device or config.get("device", device)
|
224 |
+
|
225 |
+
|
226 |
+
# patches for pip pkg user
|
227 |
+
if "infer/examples/" in ref_audio:
|
228 |
+
ref_audio = str(files("f5_tts").joinpath(f"{ref_audio}"))
|
229 |
+
if "infer/examples/" in gen_file:
|
230 |
+
gen_file = str(files("f5_tts").joinpath(f"{gen_file}"))
|
231 |
+
if "voices" in config:
|
232 |
+
for voice in config["voices"]:
|
233 |
+
voice_ref_audio = config["voices"][voice]["ref_audio"]
|
234 |
+
if "infer/examples/" in voice_ref_audio:
|
235 |
+
config["voices"][voice]["ref_audio"] = str(files("f5_tts").joinpath(f"{voice_ref_audio}"))
|
236 |
+
|
237 |
+
|
238 |
+
# ignore gen_text if gen_file provided
|
239 |
+
|
240 |
+
if gen_file:
|
241 |
+
gen_text = codecs.open(gen_file, "r", "utf-8").read()
|
242 |
+
|
243 |
+
|
244 |
+
# output path
|
245 |
+
|
246 |
+
wave_path = Path(output_dir) / output_file
|
247 |
+
# spectrogram_path = Path(output_dir) / "infer_cli_out.png"
|
248 |
+
if save_chunk:
|
249 |
+
output_chunk_dir = os.path.join(output_dir, f"{Path(output_file).stem}_chunks")
|
250 |
+
if not os.path.exists(output_chunk_dir):
|
251 |
+
os.makedirs(output_chunk_dir)
|
252 |
+
|
253 |
+
|
254 |
+
# load vocoder
|
255 |
+
|
256 |
+
if vocoder_name == "vocos":
|
257 |
+
vocoder_local_path = "../checkpoints/vocos-mel-24khz"
|
258 |
+
elif vocoder_name == "bigvgan":
|
259 |
+
vocoder_local_path = "../checkpoints/bigvgan_v2_24khz_100band_256x"
|
260 |
+
|
261 |
+
vocoder = load_vocoder(
|
262 |
+
vocoder_name=vocoder_name, is_local=load_vocoder_from_local, local_path=vocoder_local_path, device=device
|
263 |
+
)
|
264 |
+
|
265 |
+
|
266 |
+
# load TTS model
|
267 |
+
|
268 |
+
model_cfg = OmegaConf.load(
|
269 |
+
args.model_cfg or config.get("model_cfg", str(files("f5_tts").joinpath(f"configs/{model}.yaml")))
|
270 |
+
)
|
271 |
+
model_cls = get_class(f"f5_tts.model.{model_cfg.model.backbone}")
|
272 |
+
model_arc = model_cfg.model.arch
|
273 |
+
|
274 |
+
repo_name, ckpt_step, ckpt_type = "F5-TTS", 1250000, "safetensors"
|
275 |
+
|
276 |
+
if model != "F5TTS_Base":
|
277 |
+
assert vocoder_name == model_cfg.model.mel_spec.mel_spec_type
|
278 |
+
|
279 |
+
# override for previous models
|
280 |
+
if model == "F5TTS_Base":
|
281 |
+
if vocoder_name == "vocos":
|
282 |
+
ckpt_step = 1200000
|
283 |
+
elif vocoder_name == "bigvgan":
|
284 |
+
model = "F5TTS_Base_bigvgan"
|
285 |
+
ckpt_type = "pt"
|
286 |
+
elif model == "E2TTS_Base":
|
287 |
+
repo_name = "E2-TTS"
|
288 |
+
ckpt_step = 1200000
|
289 |
+
|
290 |
+
if not ckpt_file:
|
291 |
+
ckpt_file = str(cached_path(f"hf://SWivid/{repo_name}/{model}/model_{ckpt_step}.{ckpt_type}"))
|
292 |
+
|
293 |
+
print(f"Using {model}...")
|
294 |
+
ema_model = load_model(
|
295 |
+
model_cls, model_arc, ckpt_file, mel_spec_type=vocoder_name, vocab_file=vocab_file, device=device
|
296 |
+
)
|
297 |
+
|
298 |
+
|
299 |
+
# inference process
|
300 |
+
|
301 |
+
|
302 |
+
def main():
|
303 |
+
main_voice = {"ref_audio": ref_audio, "ref_text": ref_text}
|
304 |
+
if "voices" not in config:
|
305 |
+
voices = {"main": main_voice}
|
306 |
+
else:
|
307 |
+
voices = config["voices"]
|
308 |
+
voices["main"] = main_voice
|
309 |
+
for voice in voices:
|
310 |
+
print("Voice:", voice)
|
311 |
+
print("ref_audio ", voices[voice]["ref_audio"])
|
312 |
+
voices[voice]["ref_audio"], voices[voice]["ref_text"] = preprocess_ref_audio_text(
|
313 |
+
voices[voice]["ref_audio"], voices[voice]["ref_text"]
|
314 |
+
)
|
315 |
+
print("ref_audio_", voices[voice]["ref_audio"], "\n\n")
|
316 |
+
|
317 |
+
generated_audio_segments = []
|
318 |
+
reg1 = r"(?=\[\w+\])"
|
319 |
+
chunks = re.split(reg1, gen_text)
|
320 |
+
reg2 = r"\[(\w+)\]"
|
321 |
+
for text in chunks:
|
322 |
+
if not text.strip():
|
323 |
+
continue
|
324 |
+
match = re.match(reg2, text)
|
325 |
+
if match:
|
326 |
+
voice = match[1]
|
327 |
+
else:
|
328 |
+
print("No voice tag found, using main.")
|
329 |
+
voice = "main"
|
330 |
+
if voice not in voices:
|
331 |
+
print(f"Voice {voice} not found, using main.")
|
332 |
+
voice = "main"
|
333 |
+
text = re.sub(reg2, "", text)
|
334 |
+
ref_audio_ = voices[voice]["ref_audio"]
|
335 |
+
ref_text_ = voices[voice]["ref_text"]
|
336 |
+
local_speed = voices[voice].get("speed", speed)
|
337 |
+
gen_text_ = text.strip()
|
338 |
+
print(f"Voice: {voice}")
|
339 |
+
audio_segment, final_sample_rate, spectrogram = infer_process(
|
340 |
+
ref_audio_,
|
341 |
+
ref_text_,
|
342 |
+
gen_text_,
|
343 |
+
ema_model,
|
344 |
+
vocoder,
|
345 |
+
mel_spec_type=vocoder_name,
|
346 |
+
target_rms=target_rms,
|
347 |
+
cross_fade_duration=cross_fade_duration,
|
348 |
+
nfe_step=nfe_step,
|
349 |
+
cfg_strength=cfg_strength,
|
350 |
+
sway_sampling_coef=sway_sampling_coef,
|
351 |
+
speed=local_speed,
|
352 |
+
fix_duration=fix_duration,
|
353 |
+
device=device,
|
354 |
+
)
|
355 |
+
generated_audio_segments.append(audio_segment)
|
356 |
+
|
357 |
+
if save_chunk:
|
358 |
+
if len(gen_text_) > 200:
|
359 |
+
gen_text_ = gen_text_[:200] + " ... "
|
360 |
+
if use_legacy_text:
|
361 |
+
gen_text_ = unidecode(gen_text_)
|
362 |
+
sf.write(
|
363 |
+
os.path.join(output_chunk_dir, f"{len(generated_audio_segments) - 1}_{gen_text_}.wav"),
|
364 |
+
audio_segment,
|
365 |
+
final_sample_rate,
|
366 |
+
)
|
367 |
+
|
368 |
+
if generated_audio_segments:
|
369 |
+
final_wave = np.concatenate(generated_audio_segments)
|
370 |
+
|
371 |
+
if not os.path.exists(output_dir):
|
372 |
+
os.makedirs(output_dir)
|
373 |
+
|
374 |
+
with open(wave_path, "wb") as f:
|
375 |
+
sf.write(f.name, final_wave, final_sample_rate)
|
376 |
+
# Remove silence
|
377 |
+
if remove_silence:
|
378 |
+
remove_silence_for_generated_wav(f.name)
|
379 |
+
print(f.name)
|
380 |
+
|
381 |
+
|
382 |
+
if __name__ == "__main__":
|
383 |
+
main()
|
src/f5_tts/infer/infer_gradio.py
ADDED
@@ -0,0 +1,1121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ruff: noqa: E402
|
2 |
+
# Above allows ruff to ignore E402: module level import not at top of file
|
3 |
+
|
4 |
+
import gc
|
5 |
+
import json
|
6 |
+
import os
|
7 |
+
import re
|
8 |
+
import tempfile
|
9 |
+
from collections import OrderedDict
|
10 |
+
from functools import lru_cache
|
11 |
+
from importlib.resources import files
|
12 |
+
|
13 |
+
import click
|
14 |
+
import gradio as gr
|
15 |
+
import numpy as np
|
16 |
+
import soundfile as sf
|
17 |
+
import torch
|
18 |
+
import torchaudio
|
19 |
+
from cached_path import cached_path
|
20 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
21 |
+
|
22 |
+
|
23 |
+
try:
|
24 |
+
import spaces
|
25 |
+
|
26 |
+
USING_SPACES = True
|
27 |
+
except ImportError:
|
28 |
+
USING_SPACES = False
|
29 |
+
|
30 |
+
|
31 |
+
def gpu_decorator(func):
|
32 |
+
if USING_SPACES:
|
33 |
+
return spaces.GPU(func)
|
34 |
+
else:
|
35 |
+
return func
|
36 |
+
|
37 |
+
|
38 |
+
from f5_tts.infer.utils_infer import (
|
39 |
+
infer_process,
|
40 |
+
load_model,
|
41 |
+
load_vocoder,
|
42 |
+
preprocess_ref_audio_text,
|
43 |
+
remove_silence_for_generated_wav,
|
44 |
+
save_spectrogram,
|
45 |
+
tempfile_kwargs,
|
46 |
+
)
|
47 |
+
from f5_tts.model import DiT, UNetT
|
48 |
+
|
49 |
+
|
50 |
+
DEFAULT_TTS_MODEL = "F5-TTS_v1"
|
51 |
+
tts_model_choice = DEFAULT_TTS_MODEL
|
52 |
+
|
53 |
+
DEFAULT_TTS_MODEL_CFG = [
|
54 |
+
"hf://SWivid/F5-TTS/F5TTS_v1_Base/model_1250000.safetensors",
|
55 |
+
"hf://SWivid/F5-TTS/F5TTS_v1_Base/vocab.txt",
|
56 |
+
json.dumps(dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4)),
|
57 |
+
]
|
58 |
+
|
59 |
+
|
60 |
+
# load models
|
61 |
+
|
62 |
+
vocoder = load_vocoder()
|
63 |
+
|
64 |
+
|
65 |
+
def load_f5tts():
|
66 |
+
ckpt_path = str(cached_path(DEFAULT_TTS_MODEL_CFG[0]))
|
67 |
+
F5TTS_model_cfg = json.loads(DEFAULT_TTS_MODEL_CFG[2])
|
68 |
+
return load_model(DiT, F5TTS_model_cfg, ckpt_path)
|
69 |
+
|
70 |
+
|
71 |
+
def load_e2tts():
|
72 |
+
ckpt_path = str(cached_path("hf://SWivid/E2-TTS/E2TTS_Base/model_1200000.safetensors"))
|
73 |
+
E2TTS_model_cfg = dict(dim=1024, depth=24, heads=16, ff_mult=4, text_mask_padding=False, pe_attn_head=1)
|
74 |
+
return load_model(UNetT, E2TTS_model_cfg, ckpt_path)
|
75 |
+
|
76 |
+
|
77 |
+
def load_custom(ckpt_path: str, vocab_path="", model_cfg=None):
|
78 |
+
ckpt_path, vocab_path = ckpt_path.strip(), vocab_path.strip()
|
79 |
+
if ckpt_path.startswith("hf://"):
|
80 |
+
ckpt_path = str(cached_path(ckpt_path))
|
81 |
+
if vocab_path.startswith("hf://"):
|
82 |
+
vocab_path = str(cached_path(vocab_path))
|
83 |
+
if model_cfg is None:
|
84 |
+
model_cfg = json.loads(DEFAULT_TTS_MODEL_CFG[2])
|
85 |
+
elif isinstance(model_cfg, str):
|
86 |
+
model_cfg = json.loads(model_cfg)
|
87 |
+
return load_model(DiT, model_cfg, ckpt_path, vocab_file=vocab_path)
|
88 |
+
|
89 |
+
|
90 |
+
F5TTS_ema_model = load_f5tts()
|
91 |
+
E2TTS_ema_model = load_e2tts() if USING_SPACES else None
|
92 |
+
custom_ema_model, pre_custom_path = None, ""
|
93 |
+
|
94 |
+
chat_model_state = None
|
95 |
+
chat_tokenizer_state = None
|
96 |
+
|
97 |
+
|
98 |
+
@gpu_decorator
|
99 |
+
def chat_model_inference(messages, model, tokenizer):
|
100 |
+
"""Generate response using Qwen"""
|
101 |
+
text = tokenizer.apply_chat_template(
|
102 |
+
messages,
|
103 |
+
tokenize=False,
|
104 |
+
add_generation_prompt=True,
|
105 |
+
)
|
106 |
+
|
107 |
+
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
108 |
+
generated_ids = model.generate(
|
109 |
+
**model_inputs,
|
110 |
+
max_new_tokens=512,
|
111 |
+
temperature=0.7,
|
112 |
+
top_p=0.95,
|
113 |
+
)
|
114 |
+
|
115 |
+
generated_ids = [
|
116 |
+
output_ids[len(input_ids) :] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
117 |
+
]
|
118 |
+
return tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
119 |
+
|
120 |
+
|
121 |
+
@gpu_decorator
|
122 |
+
def load_text_from_file(file):
|
123 |
+
if file:
|
124 |
+
with open(file, "r", encoding="utf-8") as f:
|
125 |
+
text = f.read().strip()
|
126 |
+
else:
|
127 |
+
text = ""
|
128 |
+
return gr.update(value=text)
|
129 |
+
|
130 |
+
|
131 |
+
@lru_cache(maxsize=1000) # NOTE. need to ensure params of infer() hashable
|
132 |
+
@gpu_decorator
|
133 |
+
def infer(
|
134 |
+
ref_audio_orig,
|
135 |
+
ref_text,
|
136 |
+
gen_text,
|
137 |
+
model,
|
138 |
+
remove_silence,
|
139 |
+
seed,
|
140 |
+
cross_fade_duration=0.15,
|
141 |
+
nfe_step=32,
|
142 |
+
speed=1,
|
143 |
+
show_info=gr.Info,
|
144 |
+
):
|
145 |
+
if not ref_audio_orig:
|
146 |
+
gr.Warning("Please provide reference audio.")
|
147 |
+
return gr.update(), gr.update(), ref_text
|
148 |
+
|
149 |
+
# Set inference seed
|
150 |
+
if seed < 0 or seed > 2**31 - 1:
|
151 |
+
gr.Warning("Seed must in range 0 ~ 2147483647. Using random seed instead.")
|
152 |
+
seed = np.random.randint(0, 2**31 - 1)
|
153 |
+
torch.manual_seed(seed)
|
154 |
+
used_seed = seed
|
155 |
+
|
156 |
+
if not gen_text.strip():
|
157 |
+
gr.Warning("Please enter text to generate or upload a text file.")
|
158 |
+
return gr.update(), gr.update(), ref_text
|
159 |
+
|
160 |
+
ref_audio, ref_text = preprocess_ref_audio_text(ref_audio_orig, ref_text, show_info=show_info)
|
161 |
+
|
162 |
+
if model == DEFAULT_TTS_MODEL:
|
163 |
+
ema_model = F5TTS_ema_model
|
164 |
+
elif model == "E2-TTS":
|
165 |
+
global E2TTS_ema_model
|
166 |
+
if E2TTS_ema_model is None:
|
167 |
+
show_info("Loading E2-TTS model...")
|
168 |
+
E2TTS_ema_model = load_e2tts()
|
169 |
+
ema_model = E2TTS_ema_model
|
170 |
+
elif isinstance(model, tuple) and model[0] == "Custom":
|
171 |
+
assert not USING_SPACES, "Only official checkpoints allowed in Spaces."
|
172 |
+
global custom_ema_model, pre_custom_path
|
173 |
+
if pre_custom_path != model[1]:
|
174 |
+
show_info("Loading Custom TTS model...")
|
175 |
+
custom_ema_model = load_custom(model[1], vocab_path=model[2], model_cfg=model[3])
|
176 |
+
pre_custom_path = model[1]
|
177 |
+
ema_model = custom_ema_model
|
178 |
+
|
179 |
+
final_wave, final_sample_rate, combined_spectrogram = infer_process(
|
180 |
+
ref_audio,
|
181 |
+
ref_text,
|
182 |
+
gen_text,
|
183 |
+
ema_model,
|
184 |
+
vocoder,
|
185 |
+
cross_fade_duration=cross_fade_duration,
|
186 |
+
nfe_step=nfe_step,
|
187 |
+
speed=speed,
|
188 |
+
show_info=show_info,
|
189 |
+
progress=gr.Progress(),
|
190 |
+
)
|
191 |
+
|
192 |
+
# Remove silence
|
193 |
+
if remove_silence:
|
194 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", **tempfile_kwargs) as f:
|
195 |
+
temp_path = f.name
|
196 |
+
try:
|
197 |
+
sf.write(temp_path, final_wave, final_sample_rate)
|
198 |
+
remove_silence_for_generated_wav(f.name)
|
199 |
+
final_wave, _ = torchaudio.load(f.name)
|
200 |
+
finally:
|
201 |
+
os.unlink(temp_path)
|
202 |
+
final_wave = final_wave.squeeze().cpu().numpy()
|
203 |
+
|
204 |
+
# Save the spectrogram
|
205 |
+
with tempfile.NamedTemporaryFile(suffix=".png", **tempfile_kwargs) as tmp_spectrogram:
|
206 |
+
spectrogram_path = tmp_spectrogram.name
|
207 |
+
save_spectrogram(combined_spectrogram, spectrogram_path)
|
208 |
+
|
209 |
+
return (final_sample_rate, final_wave), spectrogram_path, ref_text, used_seed
|
210 |
+
|
211 |
+
|
212 |
+
with gr.Blocks() as app_tts:
|
213 |
+
gr.Markdown("# Batched TTS")
|
214 |
+
ref_audio_input = gr.Audio(label="Reference Audio", type="filepath")
|
215 |
+
with gr.Row():
|
216 |
+
gen_text_input = gr.Textbox(
|
217 |
+
label="Text to Generate",
|
218 |
+
lines=10,
|
219 |
+
max_lines=40,
|
220 |
+
scale=4,
|
221 |
+
)
|
222 |
+
gen_text_file = gr.File(label="Load Text to Generate from File (.txt)", file_types=[".txt"], scale=1)
|
223 |
+
generate_btn = gr.Button("Synthesize", variant="primary")
|
224 |
+
with gr.Accordion("Advanced Settings", open=False):
|
225 |
+
with gr.Row():
|
226 |
+
ref_text_input = gr.Textbox(
|
227 |
+
label="Reference Text",
|
228 |
+
info="Leave blank to automatically transcribe the reference audio. If you enter text or upload a file, it will override automatic transcription.",
|
229 |
+
lines=2,
|
230 |
+
scale=4,
|
231 |
+
)
|
232 |
+
ref_text_file = gr.File(label="Load Reference Text from File (.txt)", file_types=[".txt"], scale=1)
|
233 |
+
with gr.Row():
|
234 |
+
randomize_seed = gr.Checkbox(
|
235 |
+
label="Randomize Seed",
|
236 |
+
info="Check to use a random seed for each generation. Uncheck to use the seed specified.",
|
237 |
+
value=True,
|
238 |
+
scale=3,
|
239 |
+
)
|
240 |
+
seed_input = gr.Number(show_label=False, value=0, precision=0, scale=1)
|
241 |
+
with gr.Column(scale=4):
|
242 |
+
remove_silence = gr.Checkbox(
|
243 |
+
label="Remove Silences",
|
244 |
+
info="If undesired long silence(s) produced, turn on to automatically detect and crop.",
|
245 |
+
value=False,
|
246 |
+
)
|
247 |
+
speed_slider = gr.Slider(
|
248 |
+
label="Speed",
|
249 |
+
minimum=0.3,
|
250 |
+
maximum=2.0,
|
251 |
+
value=1.0,
|
252 |
+
step=0.1,
|
253 |
+
info="Adjust the speed of the audio.",
|
254 |
+
)
|
255 |
+
nfe_slider = gr.Slider(
|
256 |
+
label="NFE Steps",
|
257 |
+
minimum=4,
|
258 |
+
maximum=64,
|
259 |
+
value=32,
|
260 |
+
step=2,
|
261 |
+
info="Set the number of denoising steps.",
|
262 |
+
)
|
263 |
+
cross_fade_duration_slider = gr.Slider(
|
264 |
+
label="Cross-Fade Duration (s)",
|
265 |
+
minimum=0.0,
|
266 |
+
maximum=1.0,
|
267 |
+
value=0.15,
|
268 |
+
step=0.01,
|
269 |
+
info="Set the duration of the cross-fade between audio clips.",
|
270 |
+
)
|
271 |
+
|
272 |
+
audio_output = gr.Audio(label="Synthesized Audio")
|
273 |
+
spectrogram_output = gr.Image(label="Spectrogram")
|
274 |
+
|
275 |
+
@gpu_decorator
|
276 |
+
def basic_tts(
|
277 |
+
ref_audio_input,
|
278 |
+
ref_text_input,
|
279 |
+
gen_text_input,
|
280 |
+
remove_silence,
|
281 |
+
randomize_seed,
|
282 |
+
seed_input,
|
283 |
+
cross_fade_duration_slider,
|
284 |
+
nfe_slider,
|
285 |
+
speed_slider,
|
286 |
+
):
|
287 |
+
if randomize_seed:
|
288 |
+
seed_input = np.random.randint(0, 2**31 - 1)
|
289 |
+
|
290 |
+
audio_out, spectrogram_path, ref_text_out, used_seed = infer(
|
291 |
+
ref_audio_input,
|
292 |
+
ref_text_input,
|
293 |
+
gen_text_input,
|
294 |
+
tts_model_choice,
|
295 |
+
remove_silence,
|
296 |
+
seed=seed_input,
|
297 |
+
cross_fade_duration=cross_fade_duration_slider,
|
298 |
+
nfe_step=nfe_slider,
|
299 |
+
speed=speed_slider,
|
300 |
+
)
|
301 |
+
return audio_out, spectrogram_path, ref_text_out, used_seed
|
302 |
+
|
303 |
+
gen_text_file.upload(
|
304 |
+
load_text_from_file,
|
305 |
+
inputs=[gen_text_file],
|
306 |
+
outputs=[gen_text_input],
|
307 |
+
)
|
308 |
+
|
309 |
+
ref_text_file.upload(
|
310 |
+
load_text_from_file,
|
311 |
+
inputs=[ref_text_file],
|
312 |
+
outputs=[ref_text_input],
|
313 |
+
)
|
314 |
+
|
315 |
+
ref_audio_input.clear(
|
316 |
+
lambda: [None, None],
|
317 |
+
None,
|
318 |
+
[ref_text_input, ref_text_file],
|
319 |
+
)
|
320 |
+
|
321 |
+
generate_btn.click(
|
322 |
+
basic_tts,
|
323 |
+
inputs=[
|
324 |
+
ref_audio_input,
|
325 |
+
ref_text_input,
|
326 |
+
gen_text_input,
|
327 |
+
remove_silence,
|
328 |
+
randomize_seed,
|
329 |
+
seed_input,
|
330 |
+
cross_fade_duration_slider,
|
331 |
+
nfe_slider,
|
332 |
+
speed_slider,
|
333 |
+
],
|
334 |
+
outputs=[audio_output, spectrogram_output, ref_text_input, seed_input],
|
335 |
+
)
|
336 |
+
|
337 |
+
|
338 |
+
def parse_speechtypes_text(gen_text):
|
339 |
+
# Pattern to find {str} or {"name": str, "seed": int, "speed": float}
|
340 |
+
pattern = r"(\{.*?\})"
|
341 |
+
|
342 |
+
# Split the text by the pattern
|
343 |
+
tokens = re.split(pattern, gen_text)
|
344 |
+
|
345 |
+
segments = []
|
346 |
+
|
347 |
+
current_type_dict = {
|
348 |
+
"name": "Regular",
|
349 |
+
"seed": -1,
|
350 |
+
"speed": 1.0,
|
351 |
+
}
|
352 |
+
|
353 |
+
for i in range(len(tokens)):
|
354 |
+
if i % 2 == 0:
|
355 |
+
# This is text
|
356 |
+
text = tokens[i].strip()
|
357 |
+
if text:
|
358 |
+
current_type_dict["text"] = text
|
359 |
+
segments.append(current_type_dict)
|
360 |
+
else:
|
361 |
+
# This is type
|
362 |
+
type_str = tokens[i].strip()
|
363 |
+
try: # if type dict
|
364 |
+
current_type_dict = json.loads(type_str)
|
365 |
+
except json.decoder.JSONDecodeError:
|
366 |
+
type_str = type_str[1:-1] # remove brace {}
|
367 |
+
current_type_dict = {"name": type_str, "seed": -1, "speed": 1.0}
|
368 |
+
|
369 |
+
return segments
|
370 |
+
|
371 |
+
|
372 |
+
with gr.Blocks() as app_multistyle:
|
373 |
+
# New section for multistyle generation
|
374 |
+
gr.Markdown(
|
375 |
+
"""
|
376 |
+
# Multiple Speech-Type Generation
|
377 |
+
|
378 |
+
This section allows you to generate multiple speech types or multiple people's voices. Enter your text in the format shown below, or upload a .txt file with the same format. The system will generate speech using the appropriate type. If unspecified, the model will use the regular speech type. The current speech type will be used until the next speech type is specified.
|
379 |
+
"""
|
380 |
+
)
|
381 |
+
|
382 |
+
with gr.Row():
|
383 |
+
gr.Markdown(
|
384 |
+
"""
|
385 |
+
**Example Input:** <br>
|
386 |
+
{Regular} Hello, I'd like to order a sandwich please. <br>
|
387 |
+
{Surprised} What do you mean you're out of bread? <br>
|
388 |
+
{Sad} I really wanted a sandwich though... <br>
|
389 |
+
{Angry} You know what, darn you and your little shop! <br>
|
390 |
+
{Whisper} I'll just go back home and cry now. <br>
|
391 |
+
{Shouting} Why me?!
|
392 |
+
"""
|
393 |
+
)
|
394 |
+
|
395 |
+
gr.Markdown(
|
396 |
+
"""
|
397 |
+
**Example Input 2:** <br>
|
398 |
+
{"name": "Speaker1_Happy", "seed": -1, "speed": 1} Hello, I'd like to order a sandwich please. <br>
|
399 |
+
{"name": "Speaker2_Regular", "seed": -1, "speed": 1} Sorry, we're out of bread. <br>
|
400 |
+
{"name": "Speaker1_Sad", "seed": -1, "speed": 1} I really wanted a sandwich though... <br>
|
401 |
+
{"name": "Speaker2_Whisper", "seed": -1, "speed": 1} I'll give you the last one I was hiding.
|
402 |
+
"""
|
403 |
+
)
|
404 |
+
|
405 |
+
gr.Markdown(
|
406 |
+
'Upload different audio clips for each speech type. The first speech type is mandatory. You can add additional speech types by clicking the "Add Speech Type" button.'
|
407 |
+
)
|
408 |
+
|
409 |
+
# Regular speech type (mandatory)
|
410 |
+
with gr.Row(variant="compact") as regular_row:
|
411 |
+
with gr.Column(scale=1, min_width=160):
|
412 |
+
regular_name = gr.Textbox(value="Regular", label="Speech Type Name")
|
413 |
+
regular_insert = gr.Button("Insert Label", variant="secondary")
|
414 |
+
with gr.Column(scale=3):
|
415 |
+
regular_audio = gr.Audio(label="Regular Reference Audio", type="filepath")
|
416 |
+
with gr.Column(scale=3):
|
417 |
+
regular_ref_text = gr.Textbox(label="Reference Text (Regular)", lines=4)
|
418 |
+
with gr.Row():
|
419 |
+
regular_seed_slider = gr.Slider(
|
420 |
+
show_label=False, minimum=-1, maximum=999, value=-1, step=1, info="Seed, -1 for random"
|
421 |
+
)
|
422 |
+
regular_speed_slider = gr.Slider(
|
423 |
+
show_label=False, minimum=0.3, maximum=2.0, value=1.0, step=0.1, info="Adjust the speed"
|
424 |
+
)
|
425 |
+
with gr.Column(scale=1, min_width=160):
|
426 |
+
regular_ref_text_file = gr.File(label="Load Reference Text from File (.txt)", file_types=[".txt"])
|
427 |
+
|
428 |
+
# Regular speech type (max 100)
|
429 |
+
max_speech_types = 100
|
430 |
+
speech_type_rows = [regular_row]
|
431 |
+
speech_type_names = [regular_name]
|
432 |
+
speech_type_audios = [regular_audio]
|
433 |
+
speech_type_ref_texts = [regular_ref_text]
|
434 |
+
speech_type_ref_text_files = [regular_ref_text_file]
|
435 |
+
speech_type_seeds = [regular_seed_slider]
|
436 |
+
speech_type_speeds = [regular_speed_slider]
|
437 |
+
speech_type_delete_btns = [None]
|
438 |
+
speech_type_insert_btns = [regular_insert]
|
439 |
+
|
440 |
+
# Additional speech types (99 more)
|
441 |
+
for i in range(max_speech_types - 1):
|
442 |
+
with gr.Row(variant="compact", visible=False) as row:
|
443 |
+
with gr.Column(scale=1, min_width=160):
|
444 |
+
name_input = gr.Textbox(label="Speech Type Name")
|
445 |
+
insert_btn = gr.Button("Insert Label", variant="secondary")
|
446 |
+
delete_btn = gr.Button("Delete Type", variant="stop")
|
447 |
+
with gr.Column(scale=3):
|
448 |
+
audio_input = gr.Audio(label="Reference Audio", type="filepath")
|
449 |
+
with gr.Column(scale=3):
|
450 |
+
ref_text_input = gr.Textbox(label="Reference Text", lines=4)
|
451 |
+
with gr.Row():
|
452 |
+
seed_input = gr.Slider(
|
453 |
+
show_label=False, minimum=-1, maximum=999, value=-1, step=1, info="Seed. -1 for random"
|
454 |
+
)
|
455 |
+
speed_input = gr.Slider(
|
456 |
+
show_label=False, minimum=0.3, maximum=2.0, value=1.0, step=0.1, info="Adjust the speed"
|
457 |
+
)
|
458 |
+
with gr.Column(scale=1, min_width=160):
|
459 |
+
ref_text_file_input = gr.File(label="Load Reference Text from File (.txt)", file_types=[".txt"])
|
460 |
+
speech_type_rows.append(row)
|
461 |
+
speech_type_names.append(name_input)
|
462 |
+
speech_type_audios.append(audio_input)
|
463 |
+
speech_type_ref_texts.append(ref_text_input)
|
464 |
+
speech_type_ref_text_files.append(ref_text_file_input)
|
465 |
+
speech_type_seeds.append(seed_input)
|
466 |
+
speech_type_speeds.append(speed_input)
|
467 |
+
speech_type_delete_btns.append(delete_btn)
|
468 |
+
speech_type_insert_btns.append(insert_btn)
|
469 |
+
|
470 |
+
# Global logic for all speech types
|
471 |
+
for i in range(max_speech_types):
|
472 |
+
speech_type_audios[i].clear(
|
473 |
+
lambda: [None, None],
|
474 |
+
None,
|
475 |
+
[speech_type_ref_texts[i], speech_type_ref_text_files[i]],
|
476 |
+
)
|
477 |
+
speech_type_ref_text_files[i].upload(
|
478 |
+
load_text_from_file,
|
479 |
+
inputs=[speech_type_ref_text_files[i]],
|
480 |
+
outputs=[speech_type_ref_texts[i]],
|
481 |
+
)
|
482 |
+
|
483 |
+
# Button to add speech type
|
484 |
+
add_speech_type_btn = gr.Button("Add Speech Type")
|
485 |
+
|
486 |
+
# Keep track of autoincrement of speech types, no roll back
|
487 |
+
speech_type_count = 1
|
488 |
+
|
489 |
+
# Function to add a speech type
|
490 |
+
def add_speech_type_fn():
|
491 |
+
row_updates = [gr.update() for _ in range(max_speech_types)]
|
492 |
+
global speech_type_count
|
493 |
+
if speech_type_count < max_speech_types:
|
494 |
+
row_updates[speech_type_count] = gr.update(visible=True)
|
495 |
+
speech_type_count += 1
|
496 |
+
else:
|
497 |
+
gr.Warning("Exhausted maximum number of speech types. Consider restart the app.")
|
498 |
+
return row_updates
|
499 |
+
|
500 |
+
add_speech_type_btn.click(add_speech_type_fn, outputs=speech_type_rows)
|
501 |
+
|
502 |
+
# Function to delete a speech type
|
503 |
+
def delete_speech_type_fn():
|
504 |
+
return gr.update(visible=False), None, None, None, None
|
505 |
+
|
506 |
+
# Update delete button clicks and ref text file changes
|
507 |
+
for i in range(1, len(speech_type_delete_btns)):
|
508 |
+
speech_type_delete_btns[i].click(
|
509 |
+
delete_speech_type_fn,
|
510 |
+
outputs=[
|
511 |
+
speech_type_rows[i],
|
512 |
+
speech_type_names[i],
|
513 |
+
speech_type_audios[i],
|
514 |
+
speech_type_ref_texts[i],
|
515 |
+
speech_type_ref_text_files[i],
|
516 |
+
],
|
517 |
+
)
|
518 |
+
|
519 |
+
# Text input for the prompt
|
520 |
+
with gr.Row():
|
521 |
+
gen_text_input_multistyle = gr.Textbox(
|
522 |
+
label="Text to Generate",
|
523 |
+
lines=10,
|
524 |
+
max_lines=40,
|
525 |
+
scale=4,
|
526 |
+
placeholder="Enter the script with speaker names (or emotion types) at the start of each block, e.g.:\n\n{Regular} Hello, I'd like to order a sandwich please.\n{Surprised} What do you mean you're out of bread?\n{Sad} I really wanted a sandwich though...\n{Angry} You know what, darn you and your little shop!\n{Whisper} I'll just go back home and cry now.\n{Shouting} Why me?!",
|
527 |
+
)
|
528 |
+
gen_text_file_multistyle = gr.File(label="Load Text to Generate from File (.txt)", file_types=[".txt"], scale=1)
|
529 |
+
|
530 |
+
def make_insert_speech_type_fn(index):
|
531 |
+
def insert_speech_type_fn(current_text, speech_type_name, speech_type_seed, speech_type_speed):
|
532 |
+
current_text = current_text or ""
|
533 |
+
if not speech_type_name:
|
534 |
+
gr.Warning("Please enter speech type name before insert.")
|
535 |
+
return current_text
|
536 |
+
speech_type_dict = {
|
537 |
+
"name": speech_type_name,
|
538 |
+
"seed": speech_type_seed,
|
539 |
+
"speed": speech_type_speed,
|
540 |
+
}
|
541 |
+
updated_text = current_text + json.dumps(speech_type_dict) + " "
|
542 |
+
return updated_text
|
543 |
+
|
544 |
+
return insert_speech_type_fn
|
545 |
+
|
546 |
+
for i, insert_btn in enumerate(speech_type_insert_btns):
|
547 |
+
insert_fn = make_insert_speech_type_fn(i)
|
548 |
+
insert_btn.click(
|
549 |
+
insert_fn,
|
550 |
+
inputs=[gen_text_input_multistyle, speech_type_names[i], speech_type_seeds[i], speech_type_speeds[i]],
|
551 |
+
outputs=gen_text_input_multistyle,
|
552 |
+
)
|
553 |
+
|
554 |
+
with gr.Accordion("Advanced Settings", open=True):
|
555 |
+
with gr.Row():
|
556 |
+
with gr.Column():
|
557 |
+
show_cherrypick_multistyle = gr.Checkbox(
|
558 |
+
label="Show Cherry-pick Interface",
|
559 |
+
info="Turn on to show interface, picking seeds from previous generations.",
|
560 |
+
value=False,
|
561 |
+
)
|
562 |
+
with gr.Column():
|
563 |
+
remove_silence_multistyle = gr.Checkbox(
|
564 |
+
label="Remove Silences",
|
565 |
+
info="Turn on to automatically detect and crop long silences.",
|
566 |
+
value=True,
|
567 |
+
)
|
568 |
+
|
569 |
+
# Generate button
|
570 |
+
generate_multistyle_btn = gr.Button("Generate Multi-Style Speech", variant="primary")
|
571 |
+
|
572 |
+
# Output audio
|
573 |
+
audio_output_multistyle = gr.Audio(label="Synthesized Audio")
|
574 |
+
|
575 |
+
# Used seed gallery
|
576 |
+
cherrypick_interface_multistyle = gr.Textbox(
|
577 |
+
label="Cherry-pick Interface",
|
578 |
+
lines=10,
|
579 |
+
max_lines=40,
|
580 |
+
show_copy_button=True,
|
581 |
+
interactive=False,
|
582 |
+
visible=False,
|
583 |
+
)
|
584 |
+
|
585 |
+
# Logic control to show/hide the cherrypick interface
|
586 |
+
show_cherrypick_multistyle.change(
|
587 |
+
lambda is_visible: gr.update(visible=is_visible),
|
588 |
+
show_cherrypick_multistyle,
|
589 |
+
cherrypick_interface_multistyle,
|
590 |
+
)
|
591 |
+
|
592 |
+
# Function to load text to generate from file
|
593 |
+
gen_text_file_multistyle.upload(
|
594 |
+
load_text_from_file,
|
595 |
+
inputs=[gen_text_file_multistyle],
|
596 |
+
outputs=[gen_text_input_multistyle],
|
597 |
+
)
|
598 |
+
|
599 |
+
@gpu_decorator
|
600 |
+
def generate_multistyle_speech(
|
601 |
+
gen_text,
|
602 |
+
*args,
|
603 |
+
):
|
604 |
+
speech_type_names_list = args[:max_speech_types]
|
605 |
+
speech_type_audios_list = args[max_speech_types : 2 * max_speech_types]
|
606 |
+
speech_type_ref_texts_list = args[2 * max_speech_types : 3 * max_speech_types]
|
607 |
+
remove_silence = args[3 * max_speech_types]
|
608 |
+
# Collect the speech types and their audios into a dict
|
609 |
+
speech_types = OrderedDict()
|
610 |
+
|
611 |
+
ref_text_idx = 0
|
612 |
+
for name_input, audio_input, ref_text_input in zip(
|
613 |
+
speech_type_names_list, speech_type_audios_list, speech_type_ref_texts_list
|
614 |
+
):
|
615 |
+
if name_input and audio_input:
|
616 |
+
speech_types[name_input] = {"audio": audio_input, "ref_text": ref_text_input}
|
617 |
+
else:
|
618 |
+
speech_types[f"@{ref_text_idx}@"] = {"audio": "", "ref_text": ""}
|
619 |
+
ref_text_idx += 1
|
620 |
+
|
621 |
+
# Parse the gen_text into segments
|
622 |
+
segments = parse_speechtypes_text(gen_text)
|
623 |
+
|
624 |
+
# For each segment, generate speech
|
625 |
+
generated_audio_segments = []
|
626 |
+
current_type_name = "Regular"
|
627 |
+
inference_meta_data = ""
|
628 |
+
|
629 |
+
for segment in segments:
|
630 |
+
name = segment["name"]
|
631 |
+
seed_input = segment["seed"]
|
632 |
+
speed = segment["speed"]
|
633 |
+
text = segment["text"]
|
634 |
+
|
635 |
+
if name in speech_types:
|
636 |
+
current_type_name = name
|
637 |
+
else:
|
638 |
+
gr.Warning(f"Type {name} is not available, will use Regular as default.")
|
639 |
+
current_type_name = "Regular"
|
640 |
+
|
641 |
+
try:
|
642 |
+
ref_audio = speech_types[current_type_name]["audio"]
|
643 |
+
except KeyError:
|
644 |
+
gr.Warning(f"Please provide reference audio for type {current_type_name}.")
|
645 |
+
return [None] + [speech_types[name]["ref_text"] for name in speech_types] + [None]
|
646 |
+
ref_text = speech_types[current_type_name].get("ref_text", "")
|
647 |
+
|
648 |
+
if seed_input == -1:
|
649 |
+
seed_input = np.random.randint(0, 2**31 - 1)
|
650 |
+
|
651 |
+
# Generate or retrieve speech for this segment
|
652 |
+
audio_out, _, ref_text_out, used_seed = infer(
|
653 |
+
ref_audio,
|
654 |
+
ref_text,
|
655 |
+
text,
|
656 |
+
tts_model_choice,
|
657 |
+
remove_silence,
|
658 |
+
seed=seed_input,
|
659 |
+
cross_fade_duration=0,
|
660 |
+
speed=speed,
|
661 |
+
show_info=print, # no pull to top when generating
|
662 |
+
)
|
663 |
+
sr, audio_data = audio_out
|
664 |
+
|
665 |
+
generated_audio_segments.append(audio_data)
|
666 |
+
speech_types[current_type_name]["ref_text"] = ref_text_out
|
667 |
+
inference_meta_data += json.dumps(dict(name=name, seed=used_seed, speed=speed)) + f" {text}\n"
|
668 |
+
|
669 |
+
# Concatenate all audio segments
|
670 |
+
if generated_audio_segments:
|
671 |
+
final_audio_data = np.concatenate(generated_audio_segments)
|
672 |
+
return (
|
673 |
+
[(sr, final_audio_data)]
|
674 |
+
+ [speech_types[name]["ref_text"] for name in speech_types]
|
675 |
+
+ [inference_meta_data]
|
676 |
+
)
|
677 |
+
else:
|
678 |
+
gr.Warning("No audio generated.")
|
679 |
+
return [None] + [speech_types[name]["ref_text"] for name in speech_types] + [None]
|
680 |
+
|
681 |
+
generate_multistyle_btn.click(
|
682 |
+
generate_multistyle_speech,
|
683 |
+
inputs=[
|
684 |
+
gen_text_input_multistyle,
|
685 |
+
]
|
686 |
+
+ speech_type_names
|
687 |
+
+ speech_type_audios
|
688 |
+
+ speech_type_ref_texts
|
689 |
+
+ [
|
690 |
+
remove_silence_multistyle,
|
691 |
+
],
|
692 |
+
outputs=[audio_output_multistyle] + speech_type_ref_texts + [cherrypick_interface_multistyle],
|
693 |
+
)
|
694 |
+
|
695 |
+
# Validation function to disable Generate button if speech types are missing
|
696 |
+
def validate_speech_types(gen_text, regular_name, *args):
|
697 |
+
speech_type_names_list = args
|
698 |
+
|
699 |
+
# Collect the speech types names
|
700 |
+
speech_types_available = set()
|
701 |
+
if regular_name:
|
702 |
+
speech_types_available.add(regular_name)
|
703 |
+
for name_input in speech_type_names_list:
|
704 |
+
if name_input:
|
705 |
+
speech_types_available.add(name_input)
|
706 |
+
|
707 |
+
# Parse the gen_text to get the speech types used
|
708 |
+
segments = parse_speechtypes_text(gen_text)
|
709 |
+
speech_types_in_text = set(segment["name"] for segment in segments)
|
710 |
+
|
711 |
+
# Check if all speech types in text are available
|
712 |
+
missing_speech_types = speech_types_in_text - speech_types_available
|
713 |
+
|
714 |
+
if missing_speech_types:
|
715 |
+
# Disable the generate button
|
716 |
+
return gr.update(interactive=False)
|
717 |
+
else:
|
718 |
+
# Enable the generate button
|
719 |
+
return gr.update(interactive=True)
|
720 |
+
|
721 |
+
gen_text_input_multistyle.change(
|
722 |
+
validate_speech_types,
|
723 |
+
inputs=[gen_text_input_multistyle, regular_name] + speech_type_names,
|
724 |
+
outputs=generate_multistyle_btn,
|
725 |
+
)
|
726 |
+
|
727 |
+
|
728 |
+
with gr.Blocks() as app_chat:
|
729 |
+
gr.Markdown(
|
730 |
+
"""
|
731 |
+
# Voice Chat
|
732 |
+
Have a conversation with an AI using your reference voice!
|
733 |
+
1. Upload a reference audio clip and optionally its transcript (via text or .txt file).
|
734 |
+
2. Load the chat model.
|
735 |
+
3. Record your message through your microphone or type it.
|
736 |
+
4. The AI will respond using the reference voice.
|
737 |
+
"""
|
738 |
+
)
|
739 |
+
|
740 |
+
chat_model_name_list = [
|
741 |
+
"Qwen/Qwen2.5-3B-Instruct",
|
742 |
+
"microsoft/Phi-4-mini-instruct",
|
743 |
+
]
|
744 |
+
|
745 |
+
@gpu_decorator
|
746 |
+
def load_chat_model(chat_model_name):
|
747 |
+
show_info = gr.Info
|
748 |
+
global chat_model_state, chat_tokenizer_state
|
749 |
+
if chat_model_state is not None:
|
750 |
+
chat_model_state = None
|
751 |
+
chat_tokenizer_state = None
|
752 |
+
gc.collect()
|
753 |
+
torch.cuda.empty_cache()
|
754 |
+
|
755 |
+
show_info(f"Loading chat model: {chat_model_name}")
|
756 |
+
chat_model_state = AutoModelForCausalLM.from_pretrained(chat_model_name, torch_dtype="auto", device_map="auto")
|
757 |
+
chat_tokenizer_state = AutoTokenizer.from_pretrained(chat_model_name)
|
758 |
+
show_info(f"Chat model {chat_model_name} loaded successfully!")
|
759 |
+
|
760 |
+
return gr.update(visible=False), gr.update(visible=True)
|
761 |
+
|
762 |
+
if USING_SPACES:
|
763 |
+
load_chat_model(chat_model_name_list[0])
|
764 |
+
|
765 |
+
chat_model_name_input = gr.Dropdown(
|
766 |
+
choices=chat_model_name_list,
|
767 |
+
value=chat_model_name_list[0],
|
768 |
+
label="Chat Model Name",
|
769 |
+
info="Enter the name of a HuggingFace chat model",
|
770 |
+
allow_custom_value=not USING_SPACES,
|
771 |
+
)
|
772 |
+
load_chat_model_btn = gr.Button("Load Chat Model", variant="primary", visible=not USING_SPACES)
|
773 |
+
chat_interface_container = gr.Column(visible=USING_SPACES)
|
774 |
+
|
775 |
+
chat_model_name_input.change(
|
776 |
+
lambda: gr.update(visible=True),
|
777 |
+
None,
|
778 |
+
load_chat_model_btn,
|
779 |
+
show_progress="hidden",
|
780 |
+
)
|
781 |
+
load_chat_model_btn.click(
|
782 |
+
load_chat_model, inputs=[chat_model_name_input], outputs=[load_chat_model_btn, chat_interface_container]
|
783 |
+
)
|
784 |
+
|
785 |
+
with chat_interface_container:
|
786 |
+
with gr.Row():
|
787 |
+
with gr.Column():
|
788 |
+
ref_audio_chat = gr.Audio(label="Reference Audio", type="filepath")
|
789 |
+
with gr.Column():
|
790 |
+
with gr.Accordion("Advanced Settings", open=False):
|
791 |
+
with gr.Row():
|
792 |
+
ref_text_chat = gr.Textbox(
|
793 |
+
label="Reference Text",
|
794 |
+
info="Optional: Leave blank to auto-transcribe",
|
795 |
+
lines=2,
|
796 |
+
scale=3,
|
797 |
+
)
|
798 |
+
ref_text_file_chat = gr.File(
|
799 |
+
label="Load Reference Text from File (.txt)", file_types=[".txt"], scale=1
|
800 |
+
)
|
801 |
+
with gr.Row():
|
802 |
+
randomize_seed_chat = gr.Checkbox(
|
803 |
+
label="Randomize Seed",
|
804 |
+
value=True,
|
805 |
+
info="Uncheck to use the seed specified.",
|
806 |
+
scale=3,
|
807 |
+
)
|
808 |
+
seed_input_chat = gr.Number(show_label=False, value=0, precision=0, scale=1)
|
809 |
+
remove_silence_chat = gr.Checkbox(
|
810 |
+
label="Remove Silences",
|
811 |
+
value=True,
|
812 |
+
)
|
813 |
+
system_prompt_chat = gr.Textbox(
|
814 |
+
label="System Prompt",
|
815 |
+
value="You are not an AI assistant, you are whoever the user says you are. You must stay in character. Keep your responses concise since they will be spoken out loud.",
|
816 |
+
lines=2,
|
817 |
+
)
|
818 |
+
|
819 |
+
chatbot_interface = gr.Chatbot(label="Conversation", type="messages")
|
820 |
+
|
821 |
+
with gr.Row():
|
822 |
+
with gr.Column():
|
823 |
+
audio_input_chat = gr.Microphone(
|
824 |
+
label="Speak your message",
|
825 |
+
type="filepath",
|
826 |
+
)
|
827 |
+
audio_output_chat = gr.Audio(autoplay=True)
|
828 |
+
with gr.Column():
|
829 |
+
text_input_chat = gr.Textbox(
|
830 |
+
label="Type your message",
|
831 |
+
lines=1,
|
832 |
+
)
|
833 |
+
send_btn_chat = gr.Button("Send Message")
|
834 |
+
clear_btn_chat = gr.Button("Clear Conversation")
|
835 |
+
|
836 |
+
# Modify process_audio_input to generate user input
|
837 |
+
@gpu_decorator
|
838 |
+
def process_audio_input(conv_state, audio_path, text):
|
839 |
+
"""Handle audio or text input from user"""
|
840 |
+
|
841 |
+
if not audio_path and not text.strip():
|
842 |
+
return conv_state
|
843 |
+
|
844 |
+
if audio_path:
|
845 |
+
text = preprocess_ref_audio_text(audio_path, text)[1]
|
846 |
+
if not text.strip():
|
847 |
+
return conv_state
|
848 |
+
|
849 |
+
conv_state.append({"role": "user", "content": text})
|
850 |
+
return conv_state
|
851 |
+
|
852 |
+
# Use model and tokenizer from state to get text response
|
853 |
+
@gpu_decorator
|
854 |
+
def generate_text_response(conv_state, system_prompt):
|
855 |
+
"""Generate text response from AI"""
|
856 |
+
|
857 |
+
system_prompt_state = [{"role": "system", "content": system_prompt}]
|
858 |
+
response = chat_model_inference(system_prompt_state + conv_state, chat_model_state, chat_tokenizer_state)
|
859 |
+
|
860 |
+
conv_state.append({"role": "assistant", "content": response})
|
861 |
+
return conv_state
|
862 |
+
|
863 |
+
@gpu_decorator
|
864 |
+
def generate_audio_response(conv_state, ref_audio, ref_text, remove_silence, randomize_seed, seed_input):
|
865 |
+
"""Generate TTS audio for AI response"""
|
866 |
+
if not conv_state or not ref_audio:
|
867 |
+
return None, ref_text, seed_input
|
868 |
+
|
869 |
+
last_ai_response = conv_state[-1]["content"]
|
870 |
+
if not last_ai_response or conv_state[-1]["role"] != "assistant":
|
871 |
+
return None, ref_text, seed_input
|
872 |
+
|
873 |
+
if randomize_seed:
|
874 |
+
seed_input = np.random.randint(0, 2**31 - 1)
|
875 |
+
|
876 |
+
audio_result, _, ref_text_out, used_seed = infer(
|
877 |
+
ref_audio,
|
878 |
+
ref_text,
|
879 |
+
last_ai_response,
|
880 |
+
tts_model_choice,
|
881 |
+
remove_silence,
|
882 |
+
seed=seed_input,
|
883 |
+
cross_fade_duration=0.15,
|
884 |
+
speed=1.0,
|
885 |
+
show_info=print, # show_info=print no pull to top when generating
|
886 |
+
)
|
887 |
+
return audio_result, ref_text_out, used_seed
|
888 |
+
|
889 |
+
def clear_conversation():
|
890 |
+
"""Reset the conversation"""
|
891 |
+
return [], None
|
892 |
+
|
893 |
+
ref_text_file_chat.upload(
|
894 |
+
load_text_from_file,
|
895 |
+
inputs=[ref_text_file_chat],
|
896 |
+
outputs=[ref_text_chat],
|
897 |
+
)
|
898 |
+
|
899 |
+
for user_operation in [audio_input_chat.stop_recording, text_input_chat.submit, send_btn_chat.click]:
|
900 |
+
user_operation(
|
901 |
+
process_audio_input,
|
902 |
+
inputs=[chatbot_interface, audio_input_chat, text_input_chat],
|
903 |
+
outputs=[chatbot_interface],
|
904 |
+
).then(
|
905 |
+
generate_text_response,
|
906 |
+
inputs=[chatbot_interface, system_prompt_chat],
|
907 |
+
outputs=[chatbot_interface],
|
908 |
+
).then(
|
909 |
+
generate_audio_response,
|
910 |
+
inputs=[
|
911 |
+
chatbot_interface,
|
912 |
+
ref_audio_chat,
|
913 |
+
ref_text_chat,
|
914 |
+
remove_silence_chat,
|
915 |
+
randomize_seed_chat,
|
916 |
+
seed_input_chat,
|
917 |
+
],
|
918 |
+
outputs=[audio_output_chat, ref_text_chat, seed_input_chat],
|
919 |
+
).then(
|
920 |
+
lambda: [None, None],
|
921 |
+
None,
|
922 |
+
[audio_input_chat, text_input_chat],
|
923 |
+
)
|
924 |
+
|
925 |
+
# Handle clear button or system prompt change and reset conversation
|
926 |
+
for user_operation in [clear_btn_chat.click, system_prompt_chat.change, chatbot_interface.clear]:
|
927 |
+
user_operation(
|
928 |
+
clear_conversation,
|
929 |
+
outputs=[chatbot_interface, audio_output_chat],
|
930 |
+
)
|
931 |
+
|
932 |
+
|
933 |
+
with gr.Blocks() as app_credits:
|
934 |
+
gr.Markdown("""
|
935 |
+
# Credits
|
936 |
+
|
937 |
+
* [mrfakename](https://github.com/fakerybakery) for the original [online demo](https://huggingface.co/spaces/mrfakename/E2-F5-TTS)
|
938 |
+
* [RootingInLoad](https://github.com/RootingInLoad) for initial chunk generation and podcast app exploration
|
939 |
+
* [jpgallegoar](https://github.com/jpgallegoar) for multiple speech-type generation & voice chat
|
940 |
+
""")
|
941 |
+
|
942 |
+
|
943 |
+
with gr.Blocks() as app:
|
944 |
+
gr.Markdown(
|
945 |
+
f"""
|
946 |
+
# E2/F5 TTS
|
947 |
+
|
948 |
+
This is {"a local web UI for [F5 TTS](https://github.com/SWivid/F5-TTS)" if not USING_SPACES else "an online demo for [F5-TTS](https://github.com/SWivid/F5-TTS)"} with advanced batch processing support. This app supports the following TTS models:
|
949 |
+
|
950 |
+
* [F5-TTS](https://arxiv.org/abs/2410.06885) (A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching)
|
951 |
+
* [E2 TTS](https://arxiv.org/abs/2406.18009) (Embarrassingly Easy Fully Non-Autoregressive Zero-Shot TTS)
|
952 |
+
|
953 |
+
The checkpoints currently support English and Chinese.
|
954 |
+
|
955 |
+
If you're having issues, try converting your reference audio to WAV or MP3, clipping it to 12s with ✂ in the bottom right corner (otherwise might have non-optimal auto-trimmed result).
|
956 |
+
|
957 |
+
**NOTE: Reference text will be automatically transcribed with Whisper if not provided. For best results, keep your reference clips short (<12s). Ensure the audio is fully uploaded before generating.**
|
958 |
+
"""
|
959 |
+
)
|
960 |
+
|
961 |
+
last_used_custom = files("f5_tts").joinpath("infer/.cache/last_used_custom_model_info_v1.txt")
|
962 |
+
|
963 |
+
def load_last_used_custom():
|
964 |
+
try:
|
965 |
+
custom = []
|
966 |
+
with open(last_used_custom, "r", encoding="utf-8") as f:
|
967 |
+
for line in f:
|
968 |
+
custom.append(line.strip())
|
969 |
+
return custom
|
970 |
+
except FileNotFoundError:
|
971 |
+
last_used_custom.parent.mkdir(parents=True, exist_ok=True)
|
972 |
+
return DEFAULT_TTS_MODEL_CFG
|
973 |
+
|
974 |
+
def switch_tts_model(new_choice):
|
975 |
+
global tts_model_choice
|
976 |
+
if new_choice == "Custom": # override in case webpage is refreshed
|
977 |
+
custom_ckpt_path, custom_vocab_path, custom_model_cfg = load_last_used_custom()
|
978 |
+
tts_model_choice = ("Custom", custom_ckpt_path, custom_vocab_path, custom_model_cfg)
|
979 |
+
return (
|
980 |
+
gr.update(visible=True, value=custom_ckpt_path),
|
981 |
+
gr.update(visible=True, value=custom_vocab_path),
|
982 |
+
gr.update(visible=True, value=custom_model_cfg),
|
983 |
+
)
|
984 |
+
else:
|
985 |
+
tts_model_choice = new_choice
|
986 |
+
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
|
987 |
+
|
988 |
+
def set_custom_model(custom_ckpt_path, custom_vocab_path, custom_model_cfg):
|
989 |
+
global tts_model_choice
|
990 |
+
tts_model_choice = ("Custom", custom_ckpt_path, custom_vocab_path, custom_model_cfg)
|
991 |
+
with open(last_used_custom, "w", encoding="utf-8") as f:
|
992 |
+
f.write(custom_ckpt_path + "\n" + custom_vocab_path + "\n" + custom_model_cfg + "\n")
|
993 |
+
|
994 |
+
with gr.Row():
|
995 |
+
if not USING_SPACES:
|
996 |
+
choose_tts_model = gr.Radio(
|
997 |
+
choices=[DEFAULT_TTS_MODEL, "E2-TTS", "Custom"], label="Choose TTS Model", value=DEFAULT_TTS_MODEL
|
998 |
+
)
|
999 |
+
else:
|
1000 |
+
choose_tts_model = gr.Radio(
|
1001 |
+
choices=[DEFAULT_TTS_MODEL, "E2-TTS"], label="Choose TTS Model", value=DEFAULT_TTS_MODEL
|
1002 |
+
)
|
1003 |
+
custom_ckpt_path = gr.Dropdown(
|
1004 |
+
choices=[DEFAULT_TTS_MODEL_CFG[0]],
|
1005 |
+
value=load_last_used_custom()[0],
|
1006 |
+
allow_custom_value=True,
|
1007 |
+
label="Model: local_path | hf://user_id/repo_id/model_ckpt",
|
1008 |
+
visible=False,
|
1009 |
+
)
|
1010 |
+
custom_vocab_path = gr.Dropdown(
|
1011 |
+
choices=[DEFAULT_TTS_MODEL_CFG[1]],
|
1012 |
+
value=load_last_used_custom()[1],
|
1013 |
+
allow_custom_value=True,
|
1014 |
+
label="Vocab: local_path | hf://user_id/repo_id/vocab_file",
|
1015 |
+
visible=False,
|
1016 |
+
)
|
1017 |
+
custom_model_cfg = gr.Dropdown(
|
1018 |
+
choices=[
|
1019 |
+
DEFAULT_TTS_MODEL_CFG[2],
|
1020 |
+
json.dumps(
|
1021 |
+
dict(
|
1022 |
+
dim=1024,
|
1023 |
+
depth=22,
|
1024 |
+
heads=16,
|
1025 |
+
ff_mult=2,
|
1026 |
+
text_dim=512,
|
1027 |
+
text_mask_padding=False,
|
1028 |
+
conv_layers=4,
|
1029 |
+
pe_attn_head=1,
|
1030 |
+
)
|
1031 |
+
),
|
1032 |
+
json.dumps(
|
1033 |
+
dict(
|
1034 |
+
dim=768,
|
1035 |
+
depth=18,
|
1036 |
+
heads=12,
|
1037 |
+
ff_mult=2,
|
1038 |
+
text_dim=512,
|
1039 |
+
text_mask_padding=False,
|
1040 |
+
conv_layers=4,
|
1041 |
+
pe_attn_head=1,
|
1042 |
+
)
|
1043 |
+
),
|
1044 |
+
],
|
1045 |
+
value=load_last_used_custom()[2],
|
1046 |
+
allow_custom_value=True,
|
1047 |
+
label="Config: in a dictionary form",
|
1048 |
+
visible=False,
|
1049 |
+
)
|
1050 |
+
|
1051 |
+
choose_tts_model.change(
|
1052 |
+
switch_tts_model,
|
1053 |
+
inputs=[choose_tts_model],
|
1054 |
+
outputs=[custom_ckpt_path, custom_vocab_path, custom_model_cfg],
|
1055 |
+
show_progress="hidden",
|
1056 |
+
)
|
1057 |
+
custom_ckpt_path.change(
|
1058 |
+
set_custom_model,
|
1059 |
+
inputs=[custom_ckpt_path, custom_vocab_path, custom_model_cfg],
|
1060 |
+
show_progress="hidden",
|
1061 |
+
)
|
1062 |
+
custom_vocab_path.change(
|
1063 |
+
set_custom_model,
|
1064 |
+
inputs=[custom_ckpt_path, custom_vocab_path, custom_model_cfg],
|
1065 |
+
show_progress="hidden",
|
1066 |
+
)
|
1067 |
+
custom_model_cfg.change(
|
1068 |
+
set_custom_model,
|
1069 |
+
inputs=[custom_ckpt_path, custom_vocab_path, custom_model_cfg],
|
1070 |
+
show_progress="hidden",
|
1071 |
+
)
|
1072 |
+
|
1073 |
+
gr.TabbedInterface(
|
1074 |
+
[app_tts, app_multistyle, app_chat, app_credits],
|
1075 |
+
["Basic-TTS", "Multi-Speech", "Voice-Chat", "Credits"],
|
1076 |
+
)
|
1077 |
+
|
1078 |
+
|
1079 |
+
@click.command()
|
1080 |
+
@click.option("--port", "-p", default=None, type=int, help="Port to run the app on")
|
1081 |
+
@click.option("--host", "-H", default=None, help="Host to run the app on")
|
1082 |
+
@click.option(
|
1083 |
+
"--share",
|
1084 |
+
"-s",
|
1085 |
+
default=False,
|
1086 |
+
is_flag=True,
|
1087 |
+
help="Share the app via Gradio share link",
|
1088 |
+
)
|
1089 |
+
@click.option("--api", "-a", default=True, is_flag=True, help="Allow API access")
|
1090 |
+
@click.option(
|
1091 |
+
"--root_path",
|
1092 |
+
"-r",
|
1093 |
+
default=None,
|
1094 |
+
type=str,
|
1095 |
+
help='The root path (or "mount point") of the application, if it\'s not served from the root ("/") of the domain. Often used when the application is behind a reverse proxy that forwards requests to the application, e.g. set "/myapp" or full URL for application served at "https://example.com/myapp".',
|
1096 |
+
)
|
1097 |
+
@click.option(
|
1098 |
+
"--inbrowser",
|
1099 |
+
"-i",
|
1100 |
+
is_flag=True,
|
1101 |
+
default=False,
|
1102 |
+
help="Automatically launch the interface in the default web browser",
|
1103 |
+
)
|
1104 |
+
def main(port, host, share, api, root_path, inbrowser):
|
1105 |
+
global app
|
1106 |
+
print("Starting app...")
|
1107 |
+
app.queue(api_open=api).launch(
|
1108 |
+
server_name=host,
|
1109 |
+
server_port=port,
|
1110 |
+
share=share,
|
1111 |
+
show_api=api,
|
1112 |
+
root_path=root_path,
|
1113 |
+
inbrowser=inbrowser,
|
1114 |
+
)
|
1115 |
+
|
1116 |
+
|
1117 |
+
if __name__ == "__main__":
|
1118 |
+
if not USING_SPACES:
|
1119 |
+
main()
|
1120 |
+
else:
|
1121 |
+
app.queue().launch()
|
src/f5_tts/infer/speech_edit.py
ADDED
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
|
4 |
+
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" # for MPS device compatibility
|
5 |
+
|
6 |
+
from importlib.resources import files
|
7 |
+
|
8 |
+
import torch
|
9 |
+
import torch.nn.functional as F
|
10 |
+
import torchaudio
|
11 |
+
from cached_path import cached_path
|
12 |
+
from hydra.utils import get_class
|
13 |
+
from omegaconf import OmegaConf
|
14 |
+
|
15 |
+
from f5_tts.infer.utils_infer import load_checkpoint, load_vocoder, save_spectrogram
|
16 |
+
from f5_tts.model import CFM
|
17 |
+
from f5_tts.model.utils import convert_char_to_pinyin, get_tokenizer
|
18 |
+
|
19 |
+
|
20 |
+
device = (
|
21 |
+
"cuda"
|
22 |
+
if torch.cuda.is_available()
|
23 |
+
else "xpu"
|
24 |
+
if torch.xpu.is_available()
|
25 |
+
else "mps"
|
26 |
+
if torch.backends.mps.is_available()
|
27 |
+
else "cpu"
|
28 |
+
)
|
29 |
+
|
30 |
+
|
31 |
+
# ---------------------- infer setting ---------------------- #
|
32 |
+
|
33 |
+
seed = None # int | None
|
34 |
+
|
35 |
+
exp_name = "F5TTS_v1_Base" # F5TTS_v1_Base | E2TTS_Base
|
36 |
+
ckpt_step = 1250000
|
37 |
+
|
38 |
+
nfe_step = 32 # 16, 32
|
39 |
+
cfg_strength = 2.0
|
40 |
+
ode_method = "euler" # euler | midpoint
|
41 |
+
sway_sampling_coef = -1.0
|
42 |
+
speed = 1.0
|
43 |
+
target_rms = 0.1
|
44 |
+
|
45 |
+
|
46 |
+
model_cfg = OmegaConf.load(str(files("f5_tts").joinpath(f"configs/{exp_name}.yaml")))
|
47 |
+
model_cls = get_class(f"f5_tts.model.{model_cfg.model.backbone}")
|
48 |
+
model_arc = model_cfg.model.arch
|
49 |
+
|
50 |
+
dataset_name = model_cfg.datasets.name
|
51 |
+
tokenizer = model_cfg.model.tokenizer
|
52 |
+
|
53 |
+
mel_spec_type = model_cfg.model.mel_spec.mel_spec_type
|
54 |
+
target_sample_rate = model_cfg.model.mel_spec.target_sample_rate
|
55 |
+
n_mel_channels = model_cfg.model.mel_spec.n_mel_channels
|
56 |
+
hop_length = model_cfg.model.mel_spec.hop_length
|
57 |
+
win_length = model_cfg.model.mel_spec.win_length
|
58 |
+
n_fft = model_cfg.model.mel_spec.n_fft
|
59 |
+
|
60 |
+
|
61 |
+
# ckpt_path = str(files("f5_tts").joinpath("../../")) + f"/ckpts/{exp_name}/model_{ckpt_step}.safetensors"
|
62 |
+
ckpt_path = str(cached_path(f"hf://SWivid/F5-TTS/{exp_name}/model_{ckpt_step}.safetensors"))
|
63 |
+
output_dir = "tests"
|
64 |
+
|
65 |
+
|
66 |
+
# [leverage https://github.com/MahmoudAshraf97/ctc-forced-aligner to get char level alignment]
|
67 |
+
# pip install git+https://github.com/MahmoudAshraf97/ctc-forced-aligner.git
|
68 |
+
# [write the origin_text into a file, e.g. tests/test_edit.txt]
|
69 |
+
# ctc-forced-aligner --audio_path "src/f5_tts/infer/examples/basic/basic_ref_en.wav" --text_path "tests/test_edit.txt" --language "zho" --romanize --split_size "char"
|
70 |
+
# [result will be saved at same path of audio file]
|
71 |
+
# [--language "zho" for Chinese, "eng" for English]
|
72 |
+
# [if local ckpt, set --alignment_model "../checkpoints/mms-300m-1130-forced-aligner"]
|
73 |
+
|
74 |
+
audio_to_edit = str(files("f5_tts").joinpath("infer/examples/basic/basic_ref_en.wav"))
|
75 |
+
origin_text = "Some call me nature, others call me mother nature."
|
76 |
+
target_text = "Some call me optimist, others call me realist."
|
77 |
+
parts_to_edit = [
|
78 |
+
[1.42, 2.44],
|
79 |
+
[4.04, 4.9],
|
80 |
+
] # stard_ends of "nature" & "mother nature", in seconds
|
81 |
+
fix_duration = [
|
82 |
+
1.2,
|
83 |
+
1,
|
84 |
+
] # fix duration for "optimist" & "realist", in seconds
|
85 |
+
|
86 |
+
# audio_to_edit = "src/f5_tts/infer/examples/basic/basic_ref_zh.wav"
|
87 |
+
# origin_text = "对,这就是我,万人敬仰的太乙真人。"
|
88 |
+
# target_text = "对,那就是你,万人敬仰的太白金星。"
|
89 |
+
# parts_to_edit = [[0.84, 1.4], [1.92, 2.4], [4.26, 6.26], ]
|
90 |
+
# fix_duration = None # use origin text duration
|
91 |
+
|
92 |
+
|
93 |
+
# -------------------------------------------------#
|
94 |
+
|
95 |
+
use_ema = True
|
96 |
+
|
97 |
+
if not os.path.exists(output_dir):
|
98 |
+
os.makedirs(output_dir)
|
99 |
+
|
100 |
+
# Vocoder model
|
101 |
+
local = False
|
102 |
+
if mel_spec_type == "vocos":
|
103 |
+
vocoder_local_path = "../checkpoints/charactr/vocos-mel-24khz"
|
104 |
+
elif mel_spec_type == "bigvgan":
|
105 |
+
vocoder_local_path = "../checkpoints/bigvgan_v2_24khz_100band_256x"
|
106 |
+
vocoder = load_vocoder(vocoder_name=mel_spec_type, is_local=local, local_path=vocoder_local_path)
|
107 |
+
|
108 |
+
# Tokenizer
|
109 |
+
vocab_char_map, vocab_size = get_tokenizer(dataset_name, tokenizer)
|
110 |
+
|
111 |
+
# Model
|
112 |
+
model = CFM(
|
113 |
+
transformer=model_cls(**model_arc, text_num_embeds=vocab_size, mel_dim=n_mel_channels),
|
114 |
+
mel_spec_kwargs=dict(
|
115 |
+
n_fft=n_fft,
|
116 |
+
hop_length=hop_length,
|
117 |
+
win_length=win_length,
|
118 |
+
n_mel_channels=n_mel_channels,
|
119 |
+
target_sample_rate=target_sample_rate,
|
120 |
+
mel_spec_type=mel_spec_type,
|
121 |
+
),
|
122 |
+
odeint_kwargs=dict(
|
123 |
+
method=ode_method,
|
124 |
+
),
|
125 |
+
vocab_char_map=vocab_char_map,
|
126 |
+
).to(device)
|
127 |
+
|
128 |
+
dtype = torch.float32 if mel_spec_type == "bigvgan" else None
|
129 |
+
model = load_checkpoint(model, ckpt_path, device, dtype=dtype, use_ema=use_ema)
|
130 |
+
|
131 |
+
# Audio
|
132 |
+
audio, sr = torchaudio.load(audio_to_edit)
|
133 |
+
if audio.shape[0] > 1:
|
134 |
+
audio = torch.mean(audio, dim=0, keepdim=True)
|
135 |
+
rms = torch.sqrt(torch.mean(torch.square(audio)))
|
136 |
+
if rms < target_rms:
|
137 |
+
audio = audio * target_rms / rms
|
138 |
+
if sr != target_sample_rate:
|
139 |
+
resampler = torchaudio.transforms.Resample(sr, target_sample_rate)
|
140 |
+
audio = resampler(audio)
|
141 |
+
offset = 0
|
142 |
+
audio_ = torch.zeros(1, 0)
|
143 |
+
edit_mask = torch.zeros(1, 0, dtype=torch.bool)
|
144 |
+
for part in parts_to_edit:
|
145 |
+
start, end = part
|
146 |
+
part_dur = end - start if fix_duration is None else fix_duration.pop(0)
|
147 |
+
part_dur = part_dur * target_sample_rate
|
148 |
+
start = start * target_sample_rate
|
149 |
+
audio_ = torch.cat((audio_, audio[:, round(offset) : round(start)], torch.zeros(1, round(part_dur))), dim=-1)
|
150 |
+
edit_mask = torch.cat(
|
151 |
+
(
|
152 |
+
edit_mask,
|
153 |
+
torch.ones(1, round((start - offset) / hop_length), dtype=torch.bool),
|
154 |
+
torch.zeros(1, round(part_dur / hop_length), dtype=torch.bool),
|
155 |
+
),
|
156 |
+
dim=-1,
|
157 |
+
)
|
158 |
+
offset = end * target_sample_rate
|
159 |
+
audio = torch.cat((audio_, audio[:, round(offset) :]), dim=-1)
|
160 |
+
edit_mask = F.pad(edit_mask, (0, audio.shape[-1] // hop_length - edit_mask.shape[-1] + 1), value=True)
|
161 |
+
audio = audio.to(device)
|
162 |
+
edit_mask = edit_mask.to(device)
|
163 |
+
|
164 |
+
# Text
|
165 |
+
text_list = [target_text]
|
166 |
+
if tokenizer == "pinyin":
|
167 |
+
final_text_list = convert_char_to_pinyin(text_list)
|
168 |
+
else:
|
169 |
+
final_text_list = [text_list]
|
170 |
+
print(f"text : {text_list}")
|
171 |
+
print(f"pinyin: {final_text_list}")
|
172 |
+
|
173 |
+
# Duration
|
174 |
+
ref_audio_len = 0
|
175 |
+
duration = audio.shape[-1] // hop_length
|
176 |
+
|
177 |
+
# Inference
|
178 |
+
with torch.inference_mode():
|
179 |
+
generated, trajectory = model.sample(
|
180 |
+
cond=audio,
|
181 |
+
text=final_text_list,
|
182 |
+
duration=duration,
|
183 |
+
steps=nfe_step,
|
184 |
+
cfg_strength=cfg_strength,
|
185 |
+
sway_sampling_coef=sway_sampling_coef,
|
186 |
+
seed=seed,
|
187 |
+
edit_mask=edit_mask,
|
188 |
+
)
|
189 |
+
print(f"Generated mel: {generated.shape}")
|
190 |
+
|
191 |
+
# Final result
|
192 |
+
generated = generated.to(torch.float32)
|
193 |
+
generated = generated[:, ref_audio_len:, :]
|
194 |
+
gen_mel_spec = generated.permute(0, 2, 1)
|
195 |
+
if mel_spec_type == "vocos":
|
196 |
+
generated_wave = vocoder.decode(gen_mel_spec).cpu()
|
197 |
+
elif mel_spec_type == "bigvgan":
|
198 |
+
generated_wave = vocoder(gen_mel_spec).squeeze(0).cpu()
|
199 |
+
|
200 |
+
if rms < target_rms:
|
201 |
+
generated_wave = generated_wave * rms / target_rms
|
202 |
+
|
203 |
+
save_spectrogram(gen_mel_spec[0].cpu().numpy(), f"{output_dir}/speech_edit_out.png")
|
204 |
+
torchaudio.save(f"{output_dir}/speech_edit_out.wav", generated_wave, target_sample_rate)
|
205 |
+
print(f"Generated wav: {generated_wave.shape}")
|
src/f5_tts/infer/utils_infer.py
ADDED
@@ -0,0 +1,605 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# A unified script for inference process
|
2 |
+
# Make adjustments inside functions, and consider both gradio and cli scripts if need to change func output format
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
from concurrent.futures import ThreadPoolExecutor
|
6 |
+
|
7 |
+
|
8 |
+
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" # for MPS device compatibility
|
9 |
+
sys.path.append(f"{os.path.dirname(os.path.abspath(__file__))}/../../third_party/BigVGAN/")
|
10 |
+
|
11 |
+
import hashlib
|
12 |
+
import re
|
13 |
+
import tempfile
|
14 |
+
from importlib.resources import files
|
15 |
+
|
16 |
+
import matplotlib
|
17 |
+
|
18 |
+
|
19 |
+
matplotlib.use("Agg")
|
20 |
+
|
21 |
+
import matplotlib.pylab as plt
|
22 |
+
import numpy as np
|
23 |
+
import torch
|
24 |
+
import torchaudio
|
25 |
+
import tqdm
|
26 |
+
from huggingface_hub import hf_hub_download
|
27 |
+
from pydub import AudioSegment, silence
|
28 |
+
from transformers import pipeline
|
29 |
+
from vocos import Vocos
|
30 |
+
|
31 |
+
from f5_tts.model import CFM
|
32 |
+
from f5_tts.model.utils import convert_char_to_pinyin, get_tokenizer
|
33 |
+
|
34 |
+
|
35 |
+
_ref_audio_cache = {}
|
36 |
+
_ref_text_cache = {}
|
37 |
+
|
38 |
+
device = (
|
39 |
+
"cuda"
|
40 |
+
if torch.cuda.is_available()
|
41 |
+
else "xpu"
|
42 |
+
if torch.xpu.is_available()
|
43 |
+
else "mps"
|
44 |
+
if torch.backends.mps.is_available()
|
45 |
+
else "cpu"
|
46 |
+
)
|
47 |
+
|
48 |
+
tempfile_kwargs = {"delete_on_close": False} if sys.version_info >= (3, 12) else {"delete": False}
|
49 |
+
|
50 |
+
# -----------------------------------------
|
51 |
+
|
52 |
+
target_sample_rate = 24000
|
53 |
+
n_mel_channels = 100
|
54 |
+
hop_length = 256
|
55 |
+
win_length = 1024
|
56 |
+
n_fft = 1024
|
57 |
+
mel_spec_type = "vocos"
|
58 |
+
target_rms = 0.1
|
59 |
+
cross_fade_duration = 0.15
|
60 |
+
ode_method = "euler"
|
61 |
+
nfe_step = 32 # 16, 32
|
62 |
+
cfg_strength = 2.0
|
63 |
+
sway_sampling_coef = -1.0
|
64 |
+
speed = 1.0
|
65 |
+
fix_duration = None
|
66 |
+
|
67 |
+
# -----------------------------------------
|
68 |
+
|
69 |
+
|
70 |
+
# chunk text into smaller pieces
|
71 |
+
|
72 |
+
|
73 |
+
def chunk_text(text, max_chars=135):
|
74 |
+
"""
|
75 |
+
Splits the input text into chunks, each with a maximum number of characters.
|
76 |
+
|
77 |
+
Args:
|
78 |
+
text (str): The text to be split.
|
79 |
+
max_chars (int): The maximum number of characters per chunk.
|
80 |
+
|
81 |
+
Returns:
|
82 |
+
List[str]: A list of text chunks.
|
83 |
+
"""
|
84 |
+
chunks = []
|
85 |
+
current_chunk = ""
|
86 |
+
# Split the text into sentences based on punctuation followed by whitespace
|
87 |
+
sentences = re.split(r"(?<=[;:,.!?])\s+|(?<=[;:,。!?])", text)
|
88 |
+
|
89 |
+
for sentence in sentences:
|
90 |
+
if len(current_chunk.encode("utf-8")) + len(sentence.encode("utf-8")) <= max_chars:
|
91 |
+
current_chunk += sentence + " " if sentence and len(sentence[-1].encode("utf-8")) == 1 else sentence
|
92 |
+
else:
|
93 |
+
if current_chunk:
|
94 |
+
chunks.append(current_chunk.strip())
|
95 |
+
current_chunk = sentence + " " if sentence and len(sentence[-1].encode("utf-8")) == 1 else sentence
|
96 |
+
|
97 |
+
if current_chunk:
|
98 |
+
chunks.append(current_chunk.strip())
|
99 |
+
|
100 |
+
return chunks
|
101 |
+
|
102 |
+
|
103 |
+
# load vocoder
|
104 |
+
def load_vocoder(vocoder_name="vocos", is_local=False, local_path="", device=device, hf_cache_dir=None):
|
105 |
+
if vocoder_name == "vocos":
|
106 |
+
# vocoder = Vocos.from_pretrained("charactr/vocos-mel-24khz").to(device)
|
107 |
+
if is_local:
|
108 |
+
print(f"Load vocos from local path {local_path}")
|
109 |
+
config_path = f"{local_path}/config.yaml"
|
110 |
+
model_path = f"{local_path}/pytorch_model.bin"
|
111 |
+
else:
|
112 |
+
print("Download Vocos from huggingface charactr/vocos-mel-24khz")
|
113 |
+
repo_id = "charactr/vocos-mel-24khz"
|
114 |
+
config_path = hf_hub_download(repo_id=repo_id, cache_dir=hf_cache_dir, filename="config.yaml")
|
115 |
+
model_path = hf_hub_download(repo_id=repo_id, cache_dir=hf_cache_dir, filename="pytorch_model.bin")
|
116 |
+
vocoder = Vocos.from_hparams(config_path)
|
117 |
+
state_dict = torch.load(model_path, map_location="cpu", weights_only=True)
|
118 |
+
from vocos.feature_extractors import EncodecFeatures
|
119 |
+
|
120 |
+
if isinstance(vocoder.feature_extractor, EncodecFeatures):
|
121 |
+
encodec_parameters = {
|
122 |
+
"feature_extractor.encodec." + key: value
|
123 |
+
for key, value in vocoder.feature_extractor.encodec.state_dict().items()
|
124 |
+
}
|
125 |
+
state_dict.update(encodec_parameters)
|
126 |
+
vocoder.load_state_dict(state_dict)
|
127 |
+
vocoder = vocoder.eval().to(device)
|
128 |
+
elif vocoder_name == "bigvgan":
|
129 |
+
try:
|
130 |
+
from third_party.BigVGAN import bigvgan
|
131 |
+
except ImportError:
|
132 |
+
print("You need to follow the README to init submodule and change the BigVGAN source code.")
|
133 |
+
if is_local:
|
134 |
+
# download generator from https://huggingface.co/nvidia/bigvgan_v2_24khz_100band_256x/tree/main
|
135 |
+
vocoder = bigvgan.BigVGAN.from_pretrained(local_path, use_cuda_kernel=False)
|
136 |
+
else:
|
137 |
+
vocoder = bigvgan.BigVGAN.from_pretrained(
|
138 |
+
"nvidia/bigvgan_v2_24khz_100band_256x", use_cuda_kernel=False, cache_dir=hf_cache_dir
|
139 |
+
)
|
140 |
+
|
141 |
+
vocoder.remove_weight_norm()
|
142 |
+
vocoder = vocoder.eval().to(device)
|
143 |
+
return vocoder
|
144 |
+
|
145 |
+
|
146 |
+
# load asr pipeline
|
147 |
+
|
148 |
+
asr_pipe = None
|
149 |
+
|
150 |
+
|
151 |
+
def initialize_asr_pipeline(device: str = device, dtype=None):
|
152 |
+
if dtype is None:
|
153 |
+
dtype = (
|
154 |
+
torch.float16
|
155 |
+
if "cuda" in device
|
156 |
+
and torch.cuda.get_device_properties(device).major >= 7
|
157 |
+
and not torch.cuda.get_device_name().endswith("[ZLUDA]")
|
158 |
+
else torch.float32
|
159 |
+
)
|
160 |
+
global asr_pipe
|
161 |
+
asr_pipe = pipeline(
|
162 |
+
"automatic-speech-recognition",
|
163 |
+
model="openai/whisper-large-v3-turbo",
|
164 |
+
torch_dtype=dtype,
|
165 |
+
device=device,
|
166 |
+
)
|
167 |
+
|
168 |
+
|
169 |
+
# transcribe
|
170 |
+
|
171 |
+
|
172 |
+
def transcribe(ref_audio, language=None):
|
173 |
+
global asr_pipe
|
174 |
+
if asr_pipe is None:
|
175 |
+
initialize_asr_pipeline(device=device)
|
176 |
+
return asr_pipe(
|
177 |
+
ref_audio,
|
178 |
+
chunk_length_s=30,
|
179 |
+
batch_size=128,
|
180 |
+
generate_kwargs={"task": "transcribe", "language": language} if language else {"task": "transcribe"},
|
181 |
+
return_timestamps=False,
|
182 |
+
)["text"].strip()
|
183 |
+
|
184 |
+
|
185 |
+
# load model checkpoint for inference
|
186 |
+
|
187 |
+
|
188 |
+
def load_checkpoint(model, ckpt_path, device: str, dtype=None, use_ema=True):
|
189 |
+
if dtype is None:
|
190 |
+
dtype = (
|
191 |
+
torch.float16
|
192 |
+
if "cuda" in device
|
193 |
+
and torch.cuda.get_device_properties(device).major >= 7
|
194 |
+
and not torch.cuda.get_device_name().endswith("[ZLUDA]")
|
195 |
+
else torch.float32
|
196 |
+
)
|
197 |
+
model = model.to(dtype)
|
198 |
+
|
199 |
+
ckpt_type = ckpt_path.split(".")[-1]
|
200 |
+
if ckpt_type == "safetensors":
|
201 |
+
from safetensors.torch import load_file
|
202 |
+
|
203 |
+
checkpoint = load_file(ckpt_path, device=device)
|
204 |
+
else:
|
205 |
+
checkpoint = torch.load(ckpt_path, map_location=device, weights_only=True)
|
206 |
+
|
207 |
+
if use_ema:
|
208 |
+
if ckpt_type == "safetensors":
|
209 |
+
checkpoint = {"ema_model_state_dict": checkpoint}
|
210 |
+
checkpoint["model_state_dict"] = {
|
211 |
+
k.replace("ema_model.", ""): v
|
212 |
+
for k, v in checkpoint["ema_model_state_dict"].items()
|
213 |
+
if k not in ["initted", "step"]
|
214 |
+
}
|
215 |
+
|
216 |
+
# patch for backward compatibility, 305e3ea
|
217 |
+
for key in ["mel_spec.mel_stft.mel_scale.fb", "mel_spec.mel_stft.spectrogram.window"]:
|
218 |
+
if key in checkpoint["model_state_dict"]:
|
219 |
+
del checkpoint["model_state_dict"][key]
|
220 |
+
|
221 |
+
model.load_state_dict(checkpoint["model_state_dict"])
|
222 |
+
else:
|
223 |
+
if ckpt_type == "safetensors":
|
224 |
+
checkpoint = {"model_state_dict": checkpoint}
|
225 |
+
model.load_state_dict(checkpoint["model_state_dict"])
|
226 |
+
|
227 |
+
del checkpoint
|
228 |
+
torch.cuda.empty_cache()
|
229 |
+
|
230 |
+
return model.to(device)
|
231 |
+
|
232 |
+
|
233 |
+
# load model for inference
|
234 |
+
|
235 |
+
|
236 |
+
def load_model(
|
237 |
+
model_cls,
|
238 |
+
model_cfg,
|
239 |
+
ckpt_path,
|
240 |
+
mel_spec_type=mel_spec_type,
|
241 |
+
vocab_file="",
|
242 |
+
ode_method=ode_method,
|
243 |
+
use_ema=True,
|
244 |
+
device=device,
|
245 |
+
):
|
246 |
+
if vocab_file == "":
|
247 |
+
vocab_file = str(files("f5_tts").joinpath("infer/examples/vocab.txt"))
|
248 |
+
tokenizer = "custom"
|
249 |
+
|
250 |
+
print("\nvocab : ", vocab_file)
|
251 |
+
print("token : ", tokenizer)
|
252 |
+
print("model : ", ckpt_path, "\n")
|
253 |
+
|
254 |
+
vocab_char_map, vocab_size = get_tokenizer(vocab_file, tokenizer)
|
255 |
+
model = CFM(
|
256 |
+
transformer=model_cls(**model_cfg, text_num_embeds=vocab_size, mel_dim=n_mel_channels),
|
257 |
+
mel_spec_kwargs=dict(
|
258 |
+
n_fft=n_fft,
|
259 |
+
hop_length=hop_length,
|
260 |
+
win_length=win_length,
|
261 |
+
n_mel_channels=n_mel_channels,
|
262 |
+
target_sample_rate=target_sample_rate,
|
263 |
+
mel_spec_type=mel_spec_type,
|
264 |
+
),
|
265 |
+
odeint_kwargs=dict(
|
266 |
+
method=ode_method,
|
267 |
+
),
|
268 |
+
vocab_char_map=vocab_char_map,
|
269 |
+
).to(device)
|
270 |
+
|
271 |
+
dtype = torch.float32 if mel_spec_type == "bigvgan" else None
|
272 |
+
model = load_checkpoint(model, ckpt_path, device, dtype=dtype, use_ema=use_ema)
|
273 |
+
|
274 |
+
return model
|
275 |
+
|
276 |
+
|
277 |
+
def remove_silence_edges(audio, silence_threshold=-42):
|
278 |
+
# Remove silence from the start
|
279 |
+
non_silent_start_idx = silence.detect_leading_silence(audio, silence_threshold=silence_threshold)
|
280 |
+
audio = audio[non_silent_start_idx:]
|
281 |
+
|
282 |
+
# Remove silence from the end
|
283 |
+
non_silent_end_duration = audio.duration_seconds
|
284 |
+
for ms in reversed(audio):
|
285 |
+
if ms.dBFS > silence_threshold:
|
286 |
+
break
|
287 |
+
non_silent_end_duration -= 0.001
|
288 |
+
trimmed_audio = audio[: int(non_silent_end_duration * 1000)]
|
289 |
+
|
290 |
+
return trimmed_audio
|
291 |
+
|
292 |
+
|
293 |
+
# preprocess reference audio and text
|
294 |
+
|
295 |
+
|
296 |
+
def preprocess_ref_audio_text(ref_audio_orig, ref_text, show_info=print):
|
297 |
+
show_info("Converting audio...")
|
298 |
+
|
299 |
+
# Compute a hash of the reference audio file
|
300 |
+
with open(ref_audio_orig, "rb") as audio_file:
|
301 |
+
audio_data = audio_file.read()
|
302 |
+
audio_hash = hashlib.md5(audio_data).hexdigest()
|
303 |
+
|
304 |
+
global _ref_audio_cache
|
305 |
+
|
306 |
+
if audio_hash in _ref_audio_cache:
|
307 |
+
show_info("Using cached preprocessed reference audio...")
|
308 |
+
ref_audio = _ref_audio_cache[audio_hash]
|
309 |
+
|
310 |
+
else: # first pass, do preprocess
|
311 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", **tempfile_kwargs) as f:
|
312 |
+
temp_path = f.name
|
313 |
+
|
314 |
+
aseg = AudioSegment.from_file(ref_audio_orig)
|
315 |
+
|
316 |
+
# 1. try to find long silence for clipping
|
317 |
+
non_silent_segs = silence.split_on_silence(
|
318 |
+
aseg, min_silence_len=1000, silence_thresh=-50, keep_silence=1000, seek_step=10
|
319 |
+
)
|
320 |
+
non_silent_wave = AudioSegment.silent(duration=0)
|
321 |
+
for non_silent_seg in non_silent_segs:
|
322 |
+
if len(non_silent_wave) > 6000 and len(non_silent_wave + non_silent_seg) > 12000:
|
323 |
+
show_info("Audio is over 12s, clipping short. (1)")
|
324 |
+
break
|
325 |
+
non_silent_wave += non_silent_seg
|
326 |
+
|
327 |
+
# 2. try to find short silence for clipping if 1. failed
|
328 |
+
if len(non_silent_wave) > 12000:
|
329 |
+
non_silent_segs = silence.split_on_silence(
|
330 |
+
aseg, min_silence_len=100, silence_thresh=-40, keep_silence=1000, seek_step=10
|
331 |
+
)
|
332 |
+
non_silent_wave = AudioSegment.silent(duration=0)
|
333 |
+
for non_silent_seg in non_silent_segs:
|
334 |
+
if len(non_silent_wave) > 6000 and len(non_silent_wave + non_silent_seg) > 12000:
|
335 |
+
show_info("Audio is over 12s, clipping short. (2)")
|
336 |
+
break
|
337 |
+
non_silent_wave += non_silent_seg
|
338 |
+
|
339 |
+
aseg = non_silent_wave
|
340 |
+
|
341 |
+
# 3. if no proper silence found for clipping
|
342 |
+
if len(aseg) > 12000:
|
343 |
+
aseg = aseg[:12000]
|
344 |
+
show_info("Audio is over 12s, clipping short. (3)")
|
345 |
+
|
346 |
+
aseg = remove_silence_edges(aseg) + AudioSegment.silent(duration=50)
|
347 |
+
aseg.export(temp_path, format="wav")
|
348 |
+
ref_audio = temp_path
|
349 |
+
|
350 |
+
# Cache the processed reference audio
|
351 |
+
_ref_audio_cache[audio_hash] = ref_audio
|
352 |
+
|
353 |
+
if not ref_text.strip():
|
354 |
+
global _ref_text_cache
|
355 |
+
if audio_hash in _ref_text_cache:
|
356 |
+
# Use cached asr transcription
|
357 |
+
show_info("Using cached reference text...")
|
358 |
+
ref_text = _ref_text_cache[audio_hash]
|
359 |
+
else:
|
360 |
+
show_info("No reference text provided, transcribing reference audio...")
|
361 |
+
ref_text = transcribe(ref_audio)
|
362 |
+
# Cache the transcribed text (not caching custom ref_text, enabling users to do manual tweak)
|
363 |
+
_ref_text_cache[audio_hash] = ref_text
|
364 |
+
else:
|
365 |
+
show_info("Using custom reference text...")
|
366 |
+
|
367 |
+
# Ensure ref_text ends with a proper sentence-ending punctuation
|
368 |
+
if not ref_text.endswith(". ") and not ref_text.endswith("。"):
|
369 |
+
if ref_text.endswith("."):
|
370 |
+
ref_text += " "
|
371 |
+
else:
|
372 |
+
ref_text += ". "
|
373 |
+
|
374 |
+
print("\nref_text ", ref_text)
|
375 |
+
|
376 |
+
return ref_audio, ref_text
|
377 |
+
|
378 |
+
|
379 |
+
# infer process: chunk text -> infer batches [i.e. infer_batch_process()]
|
380 |
+
|
381 |
+
|
382 |
+
def infer_process(
|
383 |
+
ref_audio,
|
384 |
+
ref_text,
|
385 |
+
gen_text,
|
386 |
+
model_obj,
|
387 |
+
vocoder,
|
388 |
+
mel_spec_type=mel_spec_type,
|
389 |
+
show_info=print,
|
390 |
+
progress=tqdm,
|
391 |
+
target_rms=target_rms,
|
392 |
+
cross_fade_duration=cross_fade_duration,
|
393 |
+
nfe_step=nfe_step,
|
394 |
+
cfg_strength=cfg_strength,
|
395 |
+
sway_sampling_coef=sway_sampling_coef,
|
396 |
+
speed=speed,
|
397 |
+
fix_duration=fix_duration,
|
398 |
+
device=device,
|
399 |
+
):
|
400 |
+
# Split the input text into batches
|
401 |
+
audio, sr = torchaudio.load(ref_audio)
|
402 |
+
max_chars = int(len(ref_text.encode("utf-8")) / (audio.shape[-1] / sr) * (22 - audio.shape[-1] / sr) * speed)
|
403 |
+
gen_text_batches = chunk_text(gen_text, max_chars=max_chars)
|
404 |
+
for i, gen_text in enumerate(gen_text_batches):
|
405 |
+
print(f"gen_text {i}", gen_text)
|
406 |
+
print("\n")
|
407 |
+
|
408 |
+
show_info(f"Generating audio in {len(gen_text_batches)} batches...")
|
409 |
+
return next(
|
410 |
+
infer_batch_process(
|
411 |
+
(audio, sr),
|
412 |
+
ref_text,
|
413 |
+
gen_text_batches,
|
414 |
+
model_obj,
|
415 |
+
vocoder,
|
416 |
+
mel_spec_type=mel_spec_type,
|
417 |
+
progress=progress,
|
418 |
+
target_rms=target_rms,
|
419 |
+
cross_fade_duration=cross_fade_duration,
|
420 |
+
nfe_step=nfe_step,
|
421 |
+
cfg_strength=cfg_strength,
|
422 |
+
sway_sampling_coef=sway_sampling_coef,
|
423 |
+
speed=speed,
|
424 |
+
fix_duration=fix_duration,
|
425 |
+
device=device,
|
426 |
+
)
|
427 |
+
)
|
428 |
+
|
429 |
+
|
430 |
+
# infer batches
|
431 |
+
|
432 |
+
|
433 |
+
def infer_batch_process(
|
434 |
+
ref_audio,
|
435 |
+
ref_text,
|
436 |
+
gen_text_batches,
|
437 |
+
model_obj,
|
438 |
+
vocoder,
|
439 |
+
mel_spec_type="vocos",
|
440 |
+
progress=tqdm,
|
441 |
+
target_rms=0.1,
|
442 |
+
cross_fade_duration=0.15,
|
443 |
+
nfe_step=32,
|
444 |
+
cfg_strength=2.0,
|
445 |
+
sway_sampling_coef=-1,
|
446 |
+
speed=1,
|
447 |
+
fix_duration=None,
|
448 |
+
device=None,
|
449 |
+
streaming=False,
|
450 |
+
chunk_size=2048,
|
451 |
+
):
|
452 |
+
audio, sr = ref_audio
|
453 |
+
if audio.shape[0] > 1:
|
454 |
+
audio = torch.mean(audio, dim=0, keepdim=True)
|
455 |
+
|
456 |
+
rms = torch.sqrt(torch.mean(torch.square(audio)))
|
457 |
+
if rms < target_rms:
|
458 |
+
audio = audio * target_rms / rms
|
459 |
+
if sr != target_sample_rate:
|
460 |
+
resampler = torchaudio.transforms.Resample(sr, target_sample_rate)
|
461 |
+
audio = resampler(audio)
|
462 |
+
audio = audio.to(device)
|
463 |
+
|
464 |
+
generated_waves = []
|
465 |
+
spectrograms = []
|
466 |
+
|
467 |
+
if len(ref_text[-1].encode("utf-8")) == 1:
|
468 |
+
ref_text = ref_text + " "
|
469 |
+
|
470 |
+
def process_batch(gen_text):
|
471 |
+
local_speed = speed
|
472 |
+
if len(gen_text.encode("utf-8")) < 10:
|
473 |
+
local_speed = 0.3
|
474 |
+
|
475 |
+
# Prepare the text
|
476 |
+
text_list = [ref_text + gen_text]
|
477 |
+
final_text_list = convert_char_to_pinyin(text_list)
|
478 |
+
|
479 |
+
ref_audio_len = audio.shape[-1] // hop_length
|
480 |
+
if fix_duration is not None:
|
481 |
+
duration = int(fix_duration * target_sample_rate / hop_length)
|
482 |
+
else:
|
483 |
+
# Calculate duration
|
484 |
+
ref_text_len = len(ref_text.encode("utf-8"))
|
485 |
+
gen_text_len = len(gen_text.encode("utf-8"))
|
486 |
+
duration = ref_audio_len + int(ref_audio_len / ref_text_len * gen_text_len / local_speed)
|
487 |
+
|
488 |
+
# inference
|
489 |
+
with torch.inference_mode():
|
490 |
+
generated, _ = model_obj.sample(
|
491 |
+
cond=audio,
|
492 |
+
text=final_text_list,
|
493 |
+
duration=duration,
|
494 |
+
steps=nfe_step,
|
495 |
+
cfg_strength=cfg_strength,
|
496 |
+
sway_sampling_coef=sway_sampling_coef,
|
497 |
+
)
|
498 |
+
del _
|
499 |
+
|
500 |
+
generated = generated.to(torch.float32) # generated mel spectrogram
|
501 |
+
generated = generated[:, ref_audio_len:, :]
|
502 |
+
generated = generated.permute(0, 2, 1)
|
503 |
+
if mel_spec_type == "vocos":
|
504 |
+
generated_wave = vocoder.decode(generated)
|
505 |
+
elif mel_spec_type == "bigvgan":
|
506 |
+
generated_wave = vocoder(generated)
|
507 |
+
if rms < target_rms:
|
508 |
+
generated_wave = generated_wave * rms / target_rms
|
509 |
+
|
510 |
+
# wav -> numpy
|
511 |
+
generated_wave = generated_wave.squeeze().cpu().numpy()
|
512 |
+
|
513 |
+
if streaming:
|
514 |
+
for j in range(0, len(generated_wave), chunk_size):
|
515 |
+
yield generated_wave[j : j + chunk_size], target_sample_rate
|
516 |
+
else:
|
517 |
+
generated_cpu = generated[0].cpu().numpy()
|
518 |
+
del generated
|
519 |
+
yield generated_wave, generated_cpu
|
520 |
+
|
521 |
+
if streaming:
|
522 |
+
for gen_text in progress.tqdm(gen_text_batches) if progress is not None else gen_text_batches:
|
523 |
+
for chunk in process_batch(gen_text):
|
524 |
+
yield chunk
|
525 |
+
else:
|
526 |
+
with ThreadPoolExecutor() as executor:
|
527 |
+
futures = [executor.submit(process_batch, gen_text) for gen_text in gen_text_batches]
|
528 |
+
for future in progress.tqdm(futures) if progress is not None else futures:
|
529 |
+
result = future.result()
|
530 |
+
if result:
|
531 |
+
generated_wave, generated_mel_spec = next(result)
|
532 |
+
generated_waves.append(generated_wave)
|
533 |
+
spectrograms.append(generated_mel_spec)
|
534 |
+
|
535 |
+
if generated_waves:
|
536 |
+
if cross_fade_duration <= 0:
|
537 |
+
# Simply concatenate
|
538 |
+
final_wave = np.concatenate(generated_waves)
|
539 |
+
else:
|
540 |
+
# Combine all generated waves with cross-fading
|
541 |
+
final_wave = generated_waves[0]
|
542 |
+
for i in range(1, len(generated_waves)):
|
543 |
+
prev_wave = final_wave
|
544 |
+
next_wave = generated_waves[i]
|
545 |
+
|
546 |
+
# Calculate cross-fade samples, ensuring it does not exceed wave lengths
|
547 |
+
cross_fade_samples = int(cross_fade_duration * target_sample_rate)
|
548 |
+
cross_fade_samples = min(cross_fade_samples, len(prev_wave), len(next_wave))
|
549 |
+
|
550 |
+
if cross_fade_samples <= 0:
|
551 |
+
# No overlap possible, concatenate
|
552 |
+
final_wave = np.concatenate([prev_wave, next_wave])
|
553 |
+
continue
|
554 |
+
|
555 |
+
# Overlapping parts
|
556 |
+
prev_overlap = prev_wave[-cross_fade_samples:]
|
557 |
+
next_overlap = next_wave[:cross_fade_samples]
|
558 |
+
|
559 |
+
# Fade out and fade in
|
560 |
+
fade_out = np.linspace(1, 0, cross_fade_samples)
|
561 |
+
fade_in = np.linspace(0, 1, cross_fade_samples)
|
562 |
+
|
563 |
+
# Cross-faded overlap
|
564 |
+
cross_faded_overlap = prev_overlap * fade_out + next_overlap * fade_in
|
565 |
+
|
566 |
+
# Combine
|
567 |
+
new_wave = np.concatenate(
|
568 |
+
[prev_wave[:-cross_fade_samples], cross_faded_overlap, next_wave[cross_fade_samples:]]
|
569 |
+
)
|
570 |
+
|
571 |
+
final_wave = new_wave
|
572 |
+
|
573 |
+
# Create a combined spectrogram
|
574 |
+
combined_spectrogram = np.concatenate(spectrograms, axis=1)
|
575 |
+
|
576 |
+
yield final_wave, target_sample_rate, combined_spectrogram
|
577 |
+
|
578 |
+
else:
|
579 |
+
yield None, target_sample_rate, None
|
580 |
+
|
581 |
+
|
582 |
+
# remove silence from generated wav
|
583 |
+
|
584 |
+
|
585 |
+
def remove_silence_for_generated_wav(filename):
|
586 |
+
aseg = AudioSegment.from_file(filename)
|
587 |
+
non_silent_segs = silence.split_on_silence(
|
588 |
+
aseg, min_silence_len=1000, silence_thresh=-50, keep_silence=500, seek_step=10
|
589 |
+
)
|
590 |
+
non_silent_wave = AudioSegment.silent(duration=0)
|
591 |
+
for non_silent_seg in non_silent_segs:
|
592 |
+
non_silent_wave += non_silent_seg
|
593 |
+
aseg = non_silent_wave
|
594 |
+
aseg.export(filename, format="wav")
|
595 |
+
|
596 |
+
|
597 |
+
# save spectrogram
|
598 |
+
|
599 |
+
|
600 |
+
def save_spectrogram(spectrogram, path):
|
601 |
+
plt.figure(figsize=(12, 4))
|
602 |
+
plt.imshow(spectrogram, origin="lower", aspect="auto")
|
603 |
+
plt.colorbar()
|
604 |
+
plt.savefig(path)
|
605 |
+
plt.close()
|
src/f5_tts/model/__init__.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from f5_tts.model.backbones.dit import DiT
|
2 |
+
from f5_tts.model.backbones.mmdit import MMDiT
|
3 |
+
from f5_tts.model.backbones.unett import UNetT
|
4 |
+
from f5_tts.model.cfm import CFM
|
5 |
+
from f5_tts.model.trainer import Trainer
|
6 |
+
|
7 |
+
|
8 |
+
__all__ = ["CFM", "UNetT", "DiT", "MMDiT", "Trainer"]
|
src/f5_tts/model/backbones/README.md
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Backbones quick introduction
|
2 |
+
|
3 |
+
|
4 |
+
### unett.py
|
5 |
+
- flat unet transformer
|
6 |
+
- structure same as in e2-tts & voicebox paper except using rotary pos emb
|
7 |
+
- possible abs pos emb & convnextv2 blocks for embedded text before concat
|
8 |
+
|
9 |
+
### dit.py
|
10 |
+
- adaln-zero dit
|
11 |
+
- embedded timestep as condition
|
12 |
+
- concatted noised_input + masked_cond + embedded_text, linear proj in
|
13 |
+
- possible abs pos emb & convnextv2 blocks for embedded text before concat
|
14 |
+
- possible long skip connection (first layer to last layer)
|
15 |
+
|
16 |
+
### mmdit.py
|
17 |
+
- stable diffusion 3 block structure
|
18 |
+
- timestep as condition
|
19 |
+
- left stream: text embedded and applied a abs pos emb
|
20 |
+
- right stream: masked_cond & noised_input concatted and with same conv pos emb as unett
|