import gradio as gr from transformers import pipeline # Load the paraphrase model model_name = "AventIQ-AI/t5-paraphrase-generation" paraphrase_pipeline = pipeline("text2text-generation", model=model_name) def generate_paraphrase(text, temperature): """Generate a paraphrased version of the input text.""" if not text.strip(): return "⚠️ Please enter some text to paraphrase." # Limit input to 700 words words = text.split() if len(words) > 700: return "⚠️ Input too long! Please enter a maximum of 700 words." try: result = paraphrase_pipeline( text, temperature=temperature, top_k=50, do_sample=True, max_new_tokens=500 # 🚀 Allows longer outputs ) # Ensure correct output formatting return result[0].get("generated_text", "⚠️ Paraphrasing failed. Please try again.") except Exception as e: return f"⚠️ An error occurred: {str(e)}" # Define Gradio Interface description = """ ## ✨ AI Paraphrasing Tool Enter text and let AI generate a paraphrased version! - **Creativity (Temperature)** controls how varied the output is. - **Input is limited to 700 words.** """ demo = gr.Interface( fn=generate_paraphrase, inputs=[ gr.Textbox(label="Enter text", placeholder="Type your text to paraphrase...", lines=5), gr.Slider(0.5, 1.5, value=1.0, step=0.1, label="Creativity (Temperature)"), ], outputs=gr.Textbox(label="Paraphrased Text", lines=10), # 📝 More space for longer output title="📝 AI Paraphraser", description=description, theme="huggingface", live=True, ) if __name__ == "__main__": demo.launch()