JoeArmani
updates through 4th iteration
300fe5d
raw
history blame
1.7 kB
from transformers import (
AutoTokenizer,
AutoModelForSeq2SeqLM,
)
class Paraphraser:
def __init__(self, model_name='humarin/chatgpt_paraphraser_on_T5_base'):
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
self.model.eval()
def paraphrase(self, text, num_return_sequences=5, num_beams=5,
num_beam_groups=1, diversity_penalty=0.0, device=None):
try:
input_text = "paraphrase: " + text + " </s>"
encoding = self.tokenizer.encode_plus(input_text, return_tensors="pt")
# Move input tensors to specified device if provided
if device is not None:
input_ids = encoding["input_ids"].to(device)
self.model = self.model.to(device)
else:
input_ids = encoding["input_ids"]
outputs = self.model.generate(
input_ids=input_ids,
max_length=256,
num_beams=num_beams,
num_beam_groups=num_beam_groups,
num_return_sequences=num_return_sequences,
diversity_penalty=diversity_penalty,
early_stopping=True
)
# Move outputs back to CPU for tokenizer decoding
outputs = outputs.cpu() if device is not None else outputs
paraphrases = [self.tokenizer.decode(output, skip_special_tokens=True)
for output in outputs]
return paraphrases
except Exception as e:
print(f"Error in paraphrasing: {e}")
return []