gaur3009 commited on
Commit
3d98aa9
Β·
verified Β·
1 Parent(s): 698aba0

Update llm.py

Browse files
Files changed (1) hide show
  1. llm.py +11 -7
llm.py CHANGED
@@ -1,19 +1,23 @@
1
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
2
 
3
- tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-small")
4
- model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-small")
5
 
6
  def generate_answer(context, question):
7
- prompt = f"""Context:
 
 
 
8
  {context}
9
 
10
- Based on the above context, answer the question:
11
- {question}
12
- """
 
13
  inputs = tokenizer(prompt, return_tensors='pt', truncation=True, max_length=512)
14
  outputs = model.generate(
15
  **inputs,
16
- max_new_tokens=80,
17
  do_sample=False
18
  )
19
  return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
 
1
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
2
 
3
+ tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
4
+ model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")
5
 
6
  def generate_answer(context, question):
7
+ prompt = f"""
8
+ You are a helpful AI assistant.
9
+
10
+ Context:
11
  {context}
12
 
13
+ Question: {question}
14
+
15
+ Answer as a helpful paragraph:"""
16
+
17
  inputs = tokenizer(prompt, return_tensors='pt', truncation=True, max_length=512)
18
  outputs = model.generate(
19
  **inputs,
20
+ max_new_tokens=100,
21
  do_sample=False
22
  )
23
  return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()