aaliyaan commited on
Commit
dda6a92
·
1 Parent(s): 67a4143
Files changed (1) hide show
  1. app.py +4 -14
app.py CHANGED
@@ -33,18 +33,8 @@ def chat_with_model(model_choice, user_message, chat_history, file=None):
33
 
34
  # Tokenize Input
35
  inputs = tokenizer(user_message, return_tensors="pt", padding=True, truncation=True, max_length=512)
36
-
37
- # Adjust max_length and parameters for the PDF summarizer
38
- max_length = 512 if model_choice == "PDF Summarizer (T5)" else 150
39
- num_beams = 4 if model_choice == "PDF Summarizer (T5)" else 5
40
- outputs = model.generate(
41
- **inputs,
42
- max_length=max_length,
43
- num_beams=num_beams,
44
- early_stopping=True,
45
- no_repeat_ngram_size=2 # Allow some flexibility in repetition
46
- )
47
-
48
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
49
 
50
  # Update Chat History
@@ -60,7 +50,7 @@ def extract_text_from_pdf(file):
60
 
61
  # Interface Setup
62
  def create_chat_interface():
63
- with gr.Blocks(css="""
64
  .chatbox {
65
  background-color: #f7f7f8;
66
  border-radius: 12px;
@@ -112,4 +102,4 @@ def create_chat_interface():
112
 
113
  if __name__ == "__main__":
114
  interface = create_chat_interface()
115
- interface.launch(server_name="0.0.0.0", server_port=7860)
 
33
 
34
  # Tokenize Input
35
  inputs = tokenizer(user_message, return_tensors="pt", padding=True, truncation=True, max_length=512)
36
+ # Generate Output
37
+ outputs = model.generate(**inputs, max_length=150, num_beams=5, early_stopping=True)
 
 
 
 
 
 
 
 
 
 
38
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
39
 
40
  # Update Chat History
 
50
 
51
  # Interface Setup
52
  def create_chat_interface():
53
+ with gr.Blocks(css="""
54
  .chatbox {
55
  background-color: #f7f7f8;
56
  border-radius: 12px;
 
102
 
103
  if __name__ == "__main__":
104
  interface = create_chat_interface()
105
+ interface.launch()