Spaces:
Sleeping
Sleeping
File size: 1,670 Bytes
26532db c2c3e4f 07099e3 c2c3e4f 07099e3 22a0b97 07099e3 22a0b97 07099e3 22a0b97 07099e3 22a0b97 07099e3 22a0b97 07099e3 22a0b97 07099e3 22a0b97 c2c3e4f 07099e3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import random
from datetime import datetime
from PyPDF2 import PdfReader
import json
from transformers import AutoModelForCausalLM, AutoTokenizer
# Replace 'username/your_model_name' with your Hugging Face model name
model_dir = "Manasa1/your_model_name"
fine_tuned_model = GPT2LMHeadModel.from_pretrained(model_dir)
fine_tuned_tokenizer = GPT2Tokenizer.from_pretrained(model_dir)
# Create a text-generation pipeline
generator = pipeline('text-generation', model=fine_tuned_model, tokenizer=fine_tuned_tokenizer)
# Function to generate tweets
def generate_tweet(prompt):
input_prompt = f"{prompt}\n\nTweet:" # Format input for clarity
output = generator(
input_prompt,
max_length=50, # Limit the total length of the generated text
num_return_sequences=1,
temperature=0.7, # Control creativity
top_p=0.9, # Use nucleus sampling
pad_token_id=fine_tuned_tokenizer.eos_token_id, # Avoid padding issues
)
# Extract the generated text and remove the input prompt from the output
generated_tweet = output[0]['generated_text'].replace(input_prompt, "").strip()
return generated_tweet
# Gradio Interface
interface = gr.Interface(
fn=generate_tweet,
inputs=gr.inputs.Textbox(label="Prompt", placeholder="Enter a topic for the tweet (e.g., AI, technology)"),
outputs=gr.outputs.Textbox(label="Generated Tweet"),
title="AI Tweet Generator",
description="Enter a topic or phrase, and the AI will generate a creative tweet. Powered by a fine-tuned GPT-2 model."
)
# Launch the app
interface.launch()
|