aaliyaan commited on
Commit
6316352
·
1 Parent(s): 9905f1f

gave server more time to process

Browse files
Files changed (1) hide show
  1. app.py +10 -3
app.py CHANGED
@@ -34,9 +34,16 @@ def chat_with_model(model_choice, user_message, chat_history, file=None):
34
  # Tokenize Input
35
  inputs = tokenizer(user_message, return_tensors="pt", padding=True, truncation=True, max_length=512)
36
 
37
- # Adjust max_length for longer responses if "PDF Summarizer (T5)" is selected
38
  max_length = 512 if model_choice == "PDF Summarizer (T5)" else 150
39
- outputs = model.generate(**inputs, max_length=max_length, num_beams=5, early_stopping=True)
 
 
 
 
 
 
 
40
 
41
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
42
 
@@ -105,4 +112,4 @@ def create_chat_interface():
105
 
106
  if __name__ == "__main__":
107
  interface = create_chat_interface()
108
- interface.launch()
 
34
  # Tokenize Input
35
  inputs = tokenizer(user_message, return_tensors="pt", padding=True, truncation=True, max_length=512)
36
 
37
+ # Adjust max_length and parameters for the PDF summarizer
38
  max_length = 512 if model_choice == "PDF Summarizer (T5)" else 150
39
+ num_beams = 4 if model_choice == "PDF Summarizer (T5)" else 5
40
+ outputs = model.generate(
41
+ **inputs,
42
+ max_length=max_length,
43
+ num_beams=num_beams,
44
+ early_stopping=True,
45
+ no_repeat_ngram_size=2 # Allow some flexibility in repetition
46
+ )
47
 
48
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
49
 
 
112
 
113
  if __name__ == "__main__":
114
  interface = create_chat_interface()
115
+ interface.launch(server_name="0.0.0.0", server_port=7860, server_timeout=300)