Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import random | |
from datetime import datetime | |
from PyPDF2 import PdfReader | |
import json | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
# Replace 'username/your_model_name' with your Hugging Face model name | |
model_name = "username/your_model_name" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
def generate_tweet(prompt): | |
# Tokenize the input | |
inputs = tokenizer(prompt, return_tensors="pt") | |
# Generate text using the model | |
outputs = model.generate( | |
inputs["input_ids"], | |
max_length=280, # Limit tweets to 280 characters | |
num_return_sequences=1, # Number of tweets to generate | |
top_k=50, # Sampling from top k tokens | |
top_p=0.95, # Sampling from top p cumulative probability | |
temperature=0.7, # Adjust creativity | |
do_sample=True, # Enable sampling | |
) | |
# Decode the generated text | |
tweet = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return tweet | |
interface = gr.Interface( | |
fn=generate_tweet, # The function to call | |
inputs="text", # User input is a single text box | |
outputs="text", # Output is text | |
title="AI Tweet Generator", | |
description="Enter a topic or a few words, and the AI will generate a creative tweet!" | |
) | |
# Launch the app | |
interface.launch() | |