import gradio as gr from transformers import pipeline # Load the paraphrase model model_name = "AventIQ-AI/t5-paraphrase-generation" paraphrase_pipeline = pipeline("text2text-generation", model=model_name) def generate_paraphrase(text, temperature): """Generate a paraphrased version of the input text.""" if not text.strip(): return "⚠️ Please enter some text to paraphrase." # Limit input to 700 words words = text.split() if len(words) > 700: return "⚠️ Input too long! Please enter a maximum of 700 words." try: result = paraphrase_pipeline( text, temperature=temperature, top_k=50, do_sample=True, max_new_tokens=1500, # 🚀 Allows much longer outputs repetition_penalty=1.2, # 🔄 Reduces repetition early_stopping=False, # ⛔ Prevents early cutoff num_return_sequences=1 # ✅ Ensures a single, complete paraphrase ) # Extract and format output properly paraphrased_text = result[0].get("generated_text", "⚠️ Paraphrasing failed. Please try again.").strip() return paraphrased_text except Exception as e: return f"⚠️ An error occurred: {str(e)}" # Define Gradio Interface description = """ ## ✨ AI Paraphrasing Tool Enter text and let AI generate a paraphrased version! - **Creativity (Temperature)** controls how varied the output is. - **Input is limited to 700 words.** - **Now supports much longer paraphrased outputs!** """ demo = gr.Interface( fn=generate_paraphrase, inputs=[ gr.Textbox(label="Enter text", placeholder="Type your text to paraphrase...", lines=10), # Bigger input box gr.Slider(0.5, 1.5, value=1.0, step=0.1, label="Creativity (Temperature)"), ], outputs=gr.Textbox(label="Paraphrased Text", lines=20), # 🔥 Expands output display title="📝 AI Paraphraser", description=description, theme="huggingface", live=True, ) if __name__ == "__main__": demo.launch()