Manasa1 commited on
Commit
386fde5
·
verified ·
1 Parent(s): 6f13b9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -4,8 +4,8 @@ import re
4
 
5
  # Load the fine-tuned model and tokenizer
6
  try:
7
- model = GPT2LMHeadModel.from_pretrained("Manasa1/finetuned_GPTb") # Path to your fine-tuned GPT-2 model
8
- tokenizer = GPT2Tokenizer.from_pretrained("Manasa1/finetuned_GPTb") # Path to tokenizer
9
  tokenizer.pad_token = tokenizer.eos_token # Ensure pad_token is set correctly
10
  except Exception as e:
11
  print(f"Error loading model or tokenizer: {e}")
@@ -27,7 +27,7 @@ def generate_answer(question):
27
  no_repeat_ngram_size=2,
28
  top_p=0.9,
29
  top_k=50,
30
- temperature=0.7,
31
  do_sample=True,
32
  pad_token_id=tokenizer.eos_token_id
33
  )
@@ -75,4 +75,3 @@ with gr.Blocks() as app:
75
  # Run the app
76
  if __name__ == "__main__":
77
  app.launch()
78
-
 
4
 
5
  # Load the fine-tuned model and tokenizer
6
  try:
7
+ model = GPT2LMHeadModel.from_pretrained("Manasa1/finetuned_distillGPT2") # Path to your fine-tuned GPT-2 model
8
+ tokenizer = GPT2Tokenizer.from_pretrained("Manasa1/finetuned_distillGPT2") # Path to tokenizer
9
  tokenizer.pad_token = tokenizer.eos_token # Ensure pad_token is set correctly
10
  except Exception as e:
11
  print(f"Error loading model or tokenizer: {e}")
 
27
  no_repeat_ngram_size=2,
28
  top_p=0.9,
29
  top_k=50,
30
+ temperature=0.8,
31
  do_sample=True,
32
  pad_token_id=tokenizer.eos_token_id
33
  )
 
75
  # Run the app
76
  if __name__ == "__main__":
77
  app.launch()