Spaces:
Sleeping
Sleeping
import gradio as gr | |
import torch | |
from transformers import GPT2Tokenizer, GPT2LMHeadModel | |
import subprocess | |
#upgrade_command = "python -m pip install --upgrade pip" | |
# Run the command | |
#subprocess.run(upgrade_command, shell=True) | |
# Load the pre-trained GPT-2 model and tokenizer | |
model_name = "gpt2" # You can change this to another model if needed | |
tokenizer = GPT2Tokenizer.from_pretrained(model_name) | |
model = GPT2LMHeadModel.from_pretrained(model_name) | |
def generate_text(prompt): | |
# Encode the input text and generate a continuation | |
input_ids = tokenizer.encode(prompt, return_tensors="pt") | |
output = model.generate(input_ids, max_length=100, num_return_sequences=1, pad_token_id=50256) | |
generated_text = tokenizer.decode(output[0], skip_special_tokens=True) | |
# Save the generated text to a text file | |
with open("generated_text.txt", "w") as file: | |
file.write(generated_text) | |
return generated_text | |
iface = gr.Interface(fn=generate_text, inputs="text", outputs="text") | |
iface.launch() | |