ricardo-lsantos commited on
Commit
82a7fca
·
1 Parent(s): 7b2c089

Fixed subs and whisper device

Browse files
Files changed (2) hide show
  1. models/subtitles.py +10 -2
  2. models/whisper.py +3 -3
models/subtitles.py CHANGED
@@ -3,7 +3,15 @@ def save_subtitles(prediction, output_file):
3
  ## Save prediction to subtitles file format
4
  with open(output_file, "w") as f:
5
  for i, chunk in enumerate(prediction):
6
- f.write(f"{i+1}")
7
- f.write(f"{chunk['timestamp'][0]} --> {chunk['timestamp'][1]}")
 
 
8
  f.write(f"{chunk['text']}\n")
9
  f.write("\n")
 
 
 
 
 
 
 
3
  ## Save prediction to subtitles file format
4
  with open(output_file, "w") as f:
5
  for i, chunk in enumerate(prediction):
6
+ start_time = format_time(chunk['timestamp'][0])
7
+ end_time = format_time(chunk['timestamp'][1])
8
+ f.write(f"{i+1}\n")
9
+ f.write(f"{start_time} --> {end_time}\n")
10
  f.write(f"{chunk['text']}\n")
11
  f.write("\n")
12
+
13
+ def format_time(seconds):
14
+ hours = int(seconds // 3600)
15
+ minutes = int((seconds % 3600) // 60)
16
+ seconds = seconds % 60
17
+ return f"{hours:02d}:{minutes:02d}:{seconds:04.1f}"
models/whisper.py CHANGED
@@ -1,12 +1,12 @@
1
- import torch_directml
2
  from transformers import pipeline
3
 
4
  MODEL_CHECKPOINT = "openai/whisper-small"
5
  CHUNK_LENGTH_S = 30
6
 
7
  def get_device():
8
- # return "cpu"
9
- return torch_directml.device()
10
 
11
  def get_pipe(device, model_checkpoint=MODEL_CHECKPOINT, chunk_length_s=CHUNK_LENGTH_S):
12
  return pipeline(
 
1
+ # import torch_directml
2
  from transformers import pipeline
3
 
4
  MODEL_CHECKPOINT = "openai/whisper-small"
5
  CHUNK_LENGTH_S = 30
6
 
7
  def get_device():
8
+ return "cpu"
9
+ # return torch_directml.device()
10
 
11
  def get_pipe(device, model_checkpoint=MODEL_CHECKPOINT, chunk_length_s=CHUNK_LENGTH_S):
12
  return pipeline(