import torch from transformers import BartForConditionalGeneration, BartTokenizer class TextSummarizer: def __init__(self): print("Initializing Text Summarizer...") self.device = "cuda" if torch.cuda.is_available() else "cpu" print(f"Using device: {self.device}") # Load model and tokenizer self.model_name = "facebook/bart-large-cnn" self.tokenizer = BartTokenizer.from_pretrained(self.model_name) self.model = BartForConditionalGeneration.from_pretrained(self.model_name).to(self.device) print(f"Loaded {self.model_name} model and moved to {self.device}") def summarize(self, text, max_length=130, min_length=30): try: # Tokenize the input text inputs = self.tokenizer(text, return_tensors="pt", max_length=1024, truncation=True) inputs = inputs.to(self.device) # Generate summary summary_ids = self.model.generate( inputs["input_ids"], max_length=max_length, min_length=min_length, num_beams=4, length_penalty=2.0, early_stopping=True ) # Decode the generated summary summary = self.tokenizer.decode(summary_ids[0], skip_special_tokens=True) return summary except Exception as e: return f"Error generating summary: {str(e)}"