Staticaliza commited on
Commit
60de375
·
verified ·
1 Parent(s): 90d6ab1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -12
app.py CHANGED
@@ -17,11 +17,7 @@ print(f"[SYSTEM] | Using {DEVICE} type compute device.")
17
  DEFAULT_TASK = "transcribe"
18
  BATCH_SIZE = 8
19
 
20
- # repo = pipeline(task="automatic-speech-recognition", model="deepdml/faster-whisper-large-v3-turbo-ct2", chunk_length_s=30, device=DEVICE)
21
-
22
- snapshot_download(repo_id="deepdml/faster-whisper-large-v3-turbo-ct2", local_dir="faster-whisper-large-v3-turbo-ct2", repo_type="model")
23
-
24
- repo = WhisperModel("faster-whisper-large-v3-turbo-ct2")
25
 
26
  css = '''
27
  .gradio-container{max-width: 560px !important}
@@ -35,13 +31,7 @@ footer {
35
  def transcribe(input=None, task=DEFAULT_TASK):
36
  print(input)
37
  if input is None: raise gr.Error("Invalid input.")
38
-
39
- segments, info = repo.transcribe(input)
40
- for segment in segments:
41
- print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text))
42
- print(segments)
43
- print(info)
44
- # output = repo(input, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
45
  return 0
46
 
47
  def cloud():
 
17
  DEFAULT_TASK = "transcribe"
18
  BATCH_SIZE = 8
19
 
20
+ repo = pipeline(task="automatic-speech-recognition", model="onnx-community/whisper-large-v3-turbo", chunk_length_s=30, device=DEVICE)
 
 
 
 
21
 
22
  css = '''
23
  .gradio-container{max-width: 560px !important}
 
31
  def transcribe(input=None, task=DEFAULT_TASK):
32
  print(input)
33
  if input is None: raise gr.Error("Invalid input.")
34
+ output = repo(input, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
 
 
 
 
 
 
35
  return 0
36
 
37
  def cloud():