File size: 1,315 Bytes
26532db
 
 
 
 
 
 
 
 
 
 
d29b4df
 
26532db
 
d29b4df
26532db
d29b4df
 
 
26532db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM

from transformers import AutoModelForCausalLM, AutoTokenizer

# Load model and tokenizer directly from Hugging Face Hub
model = AutoModelForCausalLM.from_pretrained("Manasa1/gpt-finetuned-tweets")
tokenizer = AutoTokenizer.from_pretrained("Manasa1/gpt-finetuned-tweets")


def generate_tweet():
    prompt = "Write a concise, creative tweet reflecting the style and personality in the fine-tuned dataset."
    inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=100)
    outputs = model.generate(
        inputs["input_ids"],
        max_length=140,
        num_return_sequences=1,
        top_p=0.8,
        temperature=0.6,
        repetition_penalty=1.2,  # Penalizes repetitive tokens
    )
    generated_tweet = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return generated_tweet.strip()

# Gradio Interface
with gr.Blocks() as app:
    gr.Markdown("# AI Tweet Generator")
    gr.Markdown("Click the button below to generate a tweet reflecting the fine-tuned personality.")
    generate_button = gr.Button("Generate")
    output_box = gr.Textbox(label="Generated Tweet")
    
    generate_button.click(generate_tweet, inputs=None, outputs=output_box)

# Launch the app locally
app.launch()