gaur3009 commited on
Commit
698aba0
Β·
verified Β·
1 Parent(s): 5a36579

Update llm.py

Browse files
Files changed (1) hide show
  1. llm.py +3 -4
llm.py CHANGED
@@ -8,13 +8,12 @@ def generate_answer(context, question):
8
  {context}
9
 
10
  Based on the above context, answer the question:
11
- Question: {question}
12
- Answer:"""
13
-
14
  inputs = tokenizer(prompt, return_tensors='pt', truncation=True, max_length=512)
15
  outputs = model.generate(
16
  **inputs,
17
  max_new_tokens=80,
18
- do_sample=False # deterministic
19
  )
20
  return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
 
8
  {context}
9
 
10
  Based on the above context, answer the question:
11
+ {question}
12
+ """
 
13
  inputs = tokenizer(prompt, return_tensors='pt', truncation=True, max_length=512)
14
  outputs = model.generate(
15
  **inputs,
16
  max_new_tokens=80,
17
+ do_sample=False
18
  )
19
  return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()