import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM from transformers import AutoModelForCausalLM, AutoTokenizer # Load model and tokenizer directly from Hugging Face Hub model = AutoModelForCausalLM.from_pretrained("Manasa1/gpt-finetuned-tweets") tokenizer = AutoTokenizer.from_pretrained("Manasa1/gpt-finetuned-tweets") def generate_tweet(): prompt = "Write a concise, creative tweet reflecting the style and personality in the fine-tuned dataset." inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=100) outputs = model.generate( inputs["input_ids"], max_length=140, num_return_sequences=1, top_p=0.8, temperature=0.6, repetition_penalty=1.2, # Penalizes repetitive tokens ) generated_tweet = tokenizer.decode(outputs[0], skip_special_tokens=True) return generated_tweet.strip() # Gradio Interface with gr.Blocks() as app: gr.Markdown("# AI Tweet Generator") gr.Markdown("Click the button below to generate a tweet reflecting the fine-tuned personality.") generate_button = gr.Button("Generate") output_box = gr.Textbox(label="Generated Tweet") generate_button.click(generate_tweet, inputs=None, outputs=output_box) # Launch the app locally app.launch()