Spaces:
Sleeping
Sleeping
File size: 3,122 Bytes
26532db 5630dd5 c685a0d c2c3e4f 5630dd5 2f4a891 22a0b97 07099e3 22a0b97 c685a0d 9d629bd c685a0d 5630dd5 c685a0d 9d629bd c685a0d 9d629bd c685a0d 07099e3 5630dd5 07099e3 22a0b97 c685a0d eafd83e 5630dd5 c685a0d c2c3e4f 5630dd5 9d629bd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
import gradio as gr
from transformers import pipeline, GPT2LMHeadModel, GPT2Tokenizer
import re
# Load the fine-tuned GPT-2 model and tokenizer
model_dir = "Manasa1/finetuned_GPT23"
fine_tuned_model = GPT2LMHeadModel.from_pretrained(model_dir)
fine_tuned_tokenizer = GPT2Tokenizer.from_pretrained(model_dir)
# Create a text-generation pipeline
generator = pipeline('text-generation', model=fine_tuned_model, tokenizer=fine_tuned_tokenizer)
# Function to dynamically truncate output while keeping it meaningful
def truncate_tweet(tweet, max_length=280):
# Ensure the tweet is concise, ending on a complete sentence
if len(tweet) > max_length:
tweet = tweet[:max_length]
last_period = tweet.rfind(".")
if last_period != -1:
tweet = tweet[:last_period + 1]
return tweet.strip()
# Function to intelligently add relevant hashtags and emojis
def add_relevant_tags(tweet, input_question):
# Pre-defined mappings of topics to hashtags and emojis
topic_to_hashtags = {
"startup": ["#Startups", "#Innovation", "#Entrepreneurship"],
"AI": ["#AI", "#ArtificialIntelligence", "#Tech"],
"technology": ["#Technology", "#Future", "#Tech"],
"future": ["#Future", "#Vision", "#Tech"],
}
topic_to_emojis = {
"startup": "π",
"AI": "π€",
"technology": "π»",
"future": "π",
}
# Determine topic from input question (using keywords)
topic = None
for key in topic_to_hashtags.keys():
if key.lower() in input_question.lower():
topic = key
break
# Add relevant hashtags and emoji if a topic is detected
if topic:
hashtags = " ".join(topic_to_hashtags[topic][:2]) # Take up to 2 hashtags
emoji = topic_to_emojis[topic]
tweet = f"{tweet} {emoji} {hashtags}"
return tweet.strip()
def generate_tweet(input_question):
# Format the input without "Question:" and "Answer:"
prompt = input_question.strip()
# Generate the output with a higher max_length for longer responses
output = generator(prompt, max_length=300, num_return_sequences=1, temperature=0.7, top_p=0.9)
# Extract the generated text and clean it
tweet = output[0]['generated_text']
# Remove "Question:" and "Answer:" from the generated text
tweet = re.sub(r"(Question:|Answer:|A:|)", "", tweet).strip()
# Remove any part of the tweet that starts with the input question
tweet = tweet.replace(input_question, "").strip()
# Truncate and add relevant hashtags and emojis
tweet = truncate_tweet(tweet)
tweet = add_relevant_tags(tweet, input_question)
return tweet
# Create the Gradio interface
interface = gr.Interface(
fn=generate_tweet,
inputs=gr.Textbox(label="Enter a prompt/question", placeholder="Write a tweet about AI."),
outputs=gr.Textbox(label="Generated Tweet"),
title="Tweet Generator",
description="Generate concise, relevant tweets enriched with appropriate emojis and hashtags using a fine-tuned GPT-2 model."
)
# Launch the interface
interface.launch() |