PuristanLabs1 commited on
Commit
b362593
·
verified ·
1 Parent(s): ee078cd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -188,7 +188,8 @@ def split_text_with_optimized_overlap(text, max_tokens=1024, overlap_tokens=25):
188
  return chunks
189
  def summarize_text(text, max_input_tokens=1024, max_output_tokens=200):
190
  """Generates summary for a given chunk of text."""
191
- inputs = tokenizer.encode("summarize: " + text, return_tensors="pt", max_length=max_input_tokens, truncation=True).to(device)
 
192
  summary_ids = model.generate(inputs, max_length=max_output_tokens, min_length=50, length_penalty=2.0, num_beams=4, early_stopping=True)
193
  return tokenizer.decode(summary_ids[0], skip_special_tokens=True)
194
  def hierarchical_summarization(text):
 
188
  return chunks
189
  def summarize_text(text, max_input_tokens=1024, max_output_tokens=200):
190
  """Generates summary for a given chunk of text."""
191
+ #inputs = tokenizer.encode("summarize: " + text, return_tensors="pt", max_length=max_input_tokens, truncation=True).to(device)
192
+ inputs = tokenizer.encode("summarize: " + text, return_tensors="pt", max_length=max_input_tokens, truncation=True)
193
  summary_ids = model.generate(inputs, max_length=max_output_tokens, min_length=50, length_penalty=2.0, num_beams=4, early_stopping=True)
194
  return tokenizer.decode(summary_ids[0], skip_special_tokens=True)
195
  def hierarchical_summarization(text):