jason-moore commited on
Commit
d85b2a7
·
1 Parent(s): ca4db09

Add attn mask

Browse files
Files changed (1) hide show
  1. app.py +10 -4
app.py CHANGED
@@ -14,7 +14,6 @@ def load_model():
14
 
15
  return model, tokenizer
16
 
17
- # Function to generate SOAP notes
18
  def generate_soap_note(doctor_patient_conversation):
19
  if not doctor_patient_conversation.strip():
20
  return "Please enter a doctor-patient conversation."
@@ -26,11 +25,18 @@ Please generate a structured SOAP (Subjective, Objective, Assessment, Plan) note
26
  {doctor_patient_conversation}
27
  <|assistant|>"""
28
 
29
- # Tokenize and generate
30
- inputs = tokenizer(prompt, return_tensors="pt")
 
 
 
 
 
 
31
 
32
  generate_ids = model.generate(
33
- inputs.input_ids,
 
34
  max_length=2048,
35
  num_beams=5,
36
  no_repeat_ngram_size=2,
 
14
 
15
  return model, tokenizer
16
 
 
17
  def generate_soap_note(doctor_patient_conversation):
18
  if not doctor_patient_conversation.strip():
19
  return "Please enter a doctor-patient conversation."
 
25
  {doctor_patient_conversation}
26
  <|assistant|>"""
27
 
28
+ # Tokenize and generate with explicit padding settings
29
+ inputs = tokenizer(
30
+ prompt,
31
+ return_tensors="pt",
32
+ padding=True,
33
+ truncation=True,
34
+ max_length=tokenizer.model_max_length
35
+ )
36
 
37
  generate_ids = model.generate(
38
+ inputs.input_ids,
39
+ attention_mask=inputs.attention_mask, # Explicitly pass attention mask
40
  max_length=2048,
41
  num_beams=5,
42
  no_repeat_ngram_size=2,