tweets_clone / app.py
Manasa1's picture
Update app.py
22a0b97 verified
raw
history blame
1.67 kB
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import random
from datetime import datetime
from PyPDF2 import PdfReader
import json
from transformers import AutoModelForCausalLM, AutoTokenizer
# Replace 'username/your_model_name' with your Hugging Face model name
model_dir = "Manasa1/your_model_name"
fine_tuned_model = GPT2LMHeadModel.from_pretrained(model_dir)
fine_tuned_tokenizer = GPT2Tokenizer.from_pretrained(model_dir)
# Create a text-generation pipeline
generator = pipeline('text-generation', model=fine_tuned_model, tokenizer=fine_tuned_tokenizer)
# Function to generate tweets
def generate_tweet(prompt):
input_prompt = f"{prompt}\n\nTweet:" # Format input for clarity
output = generator(
input_prompt,
max_length=50, # Limit the total length of the generated text
num_return_sequences=1,
temperature=0.7, # Control creativity
top_p=0.9, # Use nucleus sampling
pad_token_id=fine_tuned_tokenizer.eos_token_id, # Avoid padding issues
)
# Extract the generated text and remove the input prompt from the output
generated_tweet = output[0]['generated_text'].replace(input_prompt, "").strip()
return generated_tweet
# Gradio Interface
interface = gr.Interface(
fn=generate_tweet,
inputs=gr.inputs.Textbox(label="Prompt", placeholder="Enter a topic for the tweet (e.g., AI, technology)"),
outputs=gr.outputs.Textbox(label="Generated Tweet"),
title="AI Tweet Generator",
description="Enter a topic or phrase, and the AI will generate a creative tweet. Powered by a fine-tuned GPT-2 model."
)
# Launch the app
interface.launch()