ahk-d commited on
Commit
10d4684
Β·
verified Β·
1 Parent(s): 8814906

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -14
app.py CHANGED
@@ -6,7 +6,6 @@ import torchaudio
6
  from demucs.pretrained import get_model
7
  from demucs.apply import apply_model
8
  import os
9
- import base64
10
 
11
  # --- Setup the model ---
12
  print("Setting up the model...")
@@ -18,17 +17,11 @@ model = model.to(device)
18
  model.eval()
19
  print("Model loaded successfully.")
20
 
21
- # --- Helper function to convert WAV to base64 data URI ---
22
- def file_to_data_uri(path):
23
- with open(path, "rb") as f:
24
- data = f.read()
25
- return f"data:audio/wav;base64,{base64.b64encode(data).decode()}"
26
-
27
  # --- Separation function ---
28
  def separate_stems(audio_path):
29
  """
30
  Separates an audio file into drums, bass, other, and vocals.
31
- Returns base64-encoded audio URIs for frontend playback.
32
  """
33
  if audio_path is None:
34
  return None, None, None, None, "Please upload an audio file."
@@ -48,19 +41,19 @@ def separate_stems(audio_path):
48
  sources = apply_model(model, wav[None], device=device, progress=True)[0]
49
  print("Separation complete.")
50
 
51
- # Save stems temporarily & encode to base64 URIs
52
  stem_names = ["drums", "bass", "other", "vocals"]
53
  output_dir = "separated_stems"
54
  os.makedirs(output_dir, exist_ok=True)
55
 
56
- output_uris = []
57
  for i, name in enumerate(stem_names):
58
  out_path = os.path.join(output_dir, f"{name}.wav")
59
  torchaudio.save(out_path, sources[i].cpu(), sr)
60
- output_uris.append(file_to_data_uri(out_path))
61
- print(f"Encoded {name} to base64 URI")
62
 
63
- return output_uris[0], output_uris[1], output_uris[2], output_uris[3], "βœ… Separation successful!"
64
 
65
  except Exception as e:
66
  print(f"Error: {e}")
@@ -92,5 +85,4 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
92
 
93
  gr.Markdown("---\n<p style='text-align: center; font-size: small;'>Powered by HT Demucs</p>")
94
 
95
- # βœ… Enable API for Next.js
96
  demo.launch(share=True)
 
6
  from demucs.pretrained import get_model
7
  from demucs.apply import apply_model
8
  import os
 
9
 
10
  # --- Setup the model ---
11
  print("Setting up the model...")
 
17
  model.eval()
18
  print("Model loaded successfully.")
19
 
 
 
 
 
 
 
20
  # --- Separation function ---
21
  def separate_stems(audio_path):
22
  """
23
  Separates an audio file into drums, bass, other, and vocals.
24
+ Returns FILE PATHS (not base64).
25
  """
26
  if audio_path is None:
27
  return None, None, None, None, "Please upload an audio file."
 
41
  sources = apply_model(model, wav[None], device=device, progress=True)[0]
42
  print("Separation complete.")
43
 
44
+ # Save stems temporarily
45
  stem_names = ["drums", "bass", "other", "vocals"]
46
  output_dir = "separated_stems"
47
  os.makedirs(output_dir, exist_ok=True)
48
 
49
+ output_paths = []
50
  for i, name in enumerate(stem_names):
51
  out_path = os.path.join(output_dir, f"{name}.wav")
52
  torchaudio.save(out_path, sources[i].cpu(), sr)
53
+ output_paths.append(out_path)
54
+ print(f"βœ… Saved {name} to {out_path}")
55
 
56
+ return output_paths[0], output_paths[1], output_paths[2], output_paths[3], "βœ… Separation successful!"
57
 
58
  except Exception as e:
59
  print(f"Error: {e}")
 
85
 
86
  gr.Markdown("---\n<p style='text-align: center; font-size: small;'>Powered by HT Demucs</p>")
87
 
 
88
  demo.launch(share=True)