Staticaliza commited on
Commit
cd19b02
·
verified ·
1 Parent(s): 30febb1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -43
app.py CHANGED
@@ -1,48 +1,28 @@
1
- import spaces
2
- @spaces.GPU(duration=15)
3
- def gpu():
4
- print("[GPU] | GPU maintained.")
5
 
6
- import os
7
- import sys
8
- import subprocess
9
- import multiprocessing
10
- import gradio as gr
11
 
12
- bitnet_dir = os.path.join(os.getcwd(), "bitnet")
13
- if not os.path.isdir(bitnet_dir):
14
- subprocess.run(["git", "clone", "--depth", "1", "--recursive", "https://github.com/microsoft/BitNet.git", "bitnet"], check=True)
15
- build_dir = os.path.join(bitnet_dir, "build")
16
- if not os.path.isdir(build_dir):
17
- os.makedirs(build_dir, exist_ok=True)
18
- subprocess.run(["cmake", ".."], cwd=build_dir, check=True)
19
- subprocess.run(["cmake", "--build", ".", "--config", "Release", "--parallel", str(multiprocessing.cpu_count())], cwd=build_dir, check=True)
 
20
 
21
- script_path = os.path.join(bitnet_dir, "run_inference.py")
22
- model_path = os.environ.get("MODEL_PATH", "models/BitNet-b1.58-2B-4T/ggml-model-i2_s.gguf")
23
-
24
- def generate(prompt, max_tokens=128, temperature=0.7):
25
- cmd = [
26
- sys.executable,
27
- script_path,
28
- "-m", model_path,
29
- "-p", prompt,
30
- "-n", str(max_tokens),
31
- "-temp", str(temperature)
32
- ]
33
- proc = subprocess.run(cmd, cwd=bitnet_dir, capture_output=True, text=True)
34
- return proc.stdout.strip() if proc.returncode == 0 else proc.stderr.strip()
35
-
36
- iface = gr.Interface(
37
- fn=generate,
38
- inputs=[
39
- gr.Textbox(lines=2, placeholder="enter your prompt here", label="prompt"),
40
- gr.Slider(1, 512, value=128, step=1, label="max tokens"),
41
- gr.Slider(0.0, 1.0, value=0.7, step=0.01, label="temperature")
42
- ],
43
- outputs=gr.Textbox(label="completion"),
44
- title="bitnet.cpp completion demo"
45
- )
46
 
47
  if __name__ == "__main__":
48
- iface.launch()
 
1
+ # app.py
2
+ import spaces, os, gradio as gr, torch
3
+ from huggingface_hub import snapshot_download
4
+ from indextts.infer import IndexTTS
5
 
6
+ model_dir = snapshot_download("IndexTeam/IndexTTS-1.5", local_dir="checkpoints", local_dir_use_symlinks=False)
7
+ cfg_path = os.path.join(model_dir, "config.yaml")
8
+ tts = None
 
 
9
 
10
+ @spaces.GPU
11
+ def synth(audio_path, text):
12
+ global tts
13
+ if tts is None:
14
+ tts = IndexTTS(model_dir=model_dir, cfg_path=cfg_path)
15
+ if torch.cuda.is_available(): tts.to("cuda")
16
+ out_path = "output.wav"
17
+ tts.infer(audio_path, text, out_path)
18
+ return out_path
19
 
20
+ with gr.Blocks() as demo:
21
+ gr.Markdown("# index-tts 1.5 (english)")
22
+ text_in = gr.Textbox(label="text prompt")
23
+ audio_in = gr.Audio(label="reference voice", type="filepath")
24
+ audio_out= gr.Audio(label="generated speech", type="filepath")
25
+ gr.Button("generate").click(synth, [audio_in, text_in], audio_out)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  if __name__ == "__main__":
28
+ demo.launch()