jyo01 commited on
Commit
79e26b9
·
verified ·
1 Parent(s): 98cafe0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -1
app.py CHANGED
@@ -150,7 +150,14 @@ def get_llm_response(prompt: str, model_name: str = "EleutherAI/gpt-neo-125M", m
150
  )
151
 
152
  text_gen = pipeline("text-generation", model=model, tokenizer=tokenizer)
153
- outputs = text_gen(prompt, max_new_tokens=max_new_tokens, do_sample=True, temperature=0.7)
 
 
 
 
 
 
 
154
  full_response = outputs[0]['generated_text']
155
 
156
  marker = "Answer:"
 
150
  )
151
 
152
  text_gen = pipeline("text-generation", model=model, tokenizer=tokenizer)
153
+ outputs = text_gen(
154
+ prompt,
155
+ max_new_tokens=max_new_tokens,
156
+ do_sample=True,
157
+ temperature=0.9, # Increased temperature
158
+ top_p=0.9, # Using nucleus sampling
159
+ top_k=50 # Limit to top 50 tokens per step
160
+ )
161
  full_response = outputs[0]['generated_text']
162
 
163
  marker = "Answer:"