Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import pipeline, GPT2LMHeadModel, GPT2Tokenizer | |
# Load the fine-tuned GPT-2 model and tokenizer | |
model_dir = "Manasa1/finetuned_GPT23" | |
fine_tuned_model = GPT2LMHeadModel.from_pretrained(model_dir) | |
fine_tuned_tokenizer = GPT2Tokenizer.from_pretrained(model_dir) | |
# Create a text-generation pipeline | |
generator = pipeline('text-generation', model=fine_tuned_model, tokenizer=fine_tuned_tokenizer) | |
def generate_tweet(input_question): | |
# Format the prompt | |
prompt = f"Question: {input_question} Answer:" | |
# Generate the output | |
output = generator(prompt, max_length=100, num_return_sequences=1, temperature=0.9, top_p=0.9) | |
# Extract and return the generated text | |
return output[0]['generated_text'] | |
# Create the Gradio interface | |
interface = gr.Interface( | |
fn=generate_tweet, | |
inputs=gr.Textbox(label="Enter a prompt/question", placeholder="Write a tweet about startup."), | |
outputs=gr.Textbox(label="Generated Tweet"), | |
title="Tweet Generator", | |
description="Generate tweets based on prompts using a fine-tuned GPT-2 model." | |
) | |
# Launch the interface | |
interface.launch() | |