tweets_clone / app.py
Manasa1's picture
Update app.py
6efad01 verified
raw
history blame
1.75 kB
import gradio as gr
from transformers import GPT2LMHeadModel, GPT2Tokenizer, pipeline
# Replace 'username/your_model_name' with your Hugging Face model name
model_dir = "Manasa1/your_model_name" # Make sure to use your actual model path
fine_tuned_model = GPT2LMHeadModel.from_pretrained(model_dir)
fine_tuned_tokenizer = GPT2Tokenizer.from_pretrained(model_dir)
# Create a text-generation pipeline
generator = pipeline('text-generation', model=fine_tuned_model, tokenizer=fine_tuned_tokenizer)
# Function to generate tweets
def generate_tweet(prompt):
# Updated input prompt to encourage creativity and engagement
input_prompt = f"Write a creative and engaging tweet about {prompt}. Keep it concise and interesting, and include a call to action or a question for followers to engage with."
output = generator(
input_prompt,
max_length=150, # Limit the total length of the generated text
num_return_sequences=1,
temperature=0.7, # Control creativity
top_p=0.9, # Use nucleus sampling
pad_token_id=fine_tuned_tokenizer.eos_token_id, # Avoid padding issues
)
# Extract the generated text and remove the input prompt from the output
generated_tweet = output[0]['generated_text'].replace(input_prompt, "").strip()
return generated_tweet
# Gradio Interface
interface = gr.Interface(
fn=generate_tweet,
inputs=gr.Textbox(label="Prompt", placeholder="Enter a topic for the tweet (e.g., AI, technology)"),
outputs=gr.Textbox(label="Generated Tweet"),
title="AI Tweet Generator",
description="Enter a topic or phrase, and the AI will generate a creative tweet. Powered by a fine-tuned GPT-2 model."
)
# Launch the app
interface.launch()