Spaces:
mskov
/
Runtime error

mskov commited on
Commit
b7f64d1
·
1 Parent(s): d63c230

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -46,7 +46,7 @@ def inference(audio):
46
  input_ids = tokenizer(result.text, return_tensors="pt").input_ids
47
  print("inputs ", input_ids)
48
  generated_outputs = gpt2.generate(input_ids, do_sample=True, num_return_sequences=3, output_scores=True)
49
- print("outputs generated ", generated_outputs)
50
  # only use id's that were generated
51
  # gen_sequences has shape [3, 15]
52
  gen_sequences = generated_outputs.sequences[:, input_ids.shape[-1]:]
@@ -58,7 +58,7 @@ def inference(audio):
58
  # now we need to collect the probability of the generated token
59
  # we need to add a dummy dim in the end to make gather work
60
  gen_probs = torch.gather(probs, 2, gen_sequences[:, :, None]).squeeze(-1)
61
-
62
  # now we can do all kinds of things with the probs
63
 
64
  # 1) the probs that exactly those sequences are generated again
 
46
  input_ids = tokenizer(result.text, return_tensors="pt").input_ids
47
  print("inputs ", input_ids)
48
  generated_outputs = gpt2.generate(input_ids, do_sample=True, num_return_sequences=3, output_scores=True)
49
+ print("outputs generated ", generated_outputs[0])
50
  # only use id's that were generated
51
  # gen_sequences has shape [3, 15]
52
  gen_sequences = generated_outputs.sequences[:, input_ids.shape[-1]:]
 
58
  # now we need to collect the probability of the generated token
59
  # we need to add a dummy dim in the end to make gather work
60
  gen_probs = torch.gather(probs, 2, gen_sequences[:, :, None]).squeeze(-1)
61
+ print("gen probs result: ", gen_probs)
62
  # now we can do all kinds of things with the probs
63
 
64
  # 1) the probs that exactly those sequences are generated again