SakanaAI / app.py
tamatwi's picture
Update app.py
c74d313 verified
raw
history blame
1.04 kB
from transformers import pipeline
import gradio as gr
import spaces
# Initialize the text generation pipeline with optimizations
pipe = pipeline("text-generation", model="SakanaAI/EvoLLM-JP-v1-7B")
# Define a function to generate text based on user input
def generate_text(prompt):
result = pipe(prompt, max_length=50, num_return_sequences=1)
return result[0]['generated_text']
# Define a function to generate text based on user input
@spaces.GPU
def generate_text(prompt):
result = pipe(prompt, max_length=50, num_return_sequences=1)
return result[0]['generated_text']
# Create a Gradio interface with batching enabled
iface = gr.Interface(
fn=generate_text,
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here..."),
outputs="text",
title="Text Generation with SakanaAI/EvoLLM-JP-v1-7B",
description="Enter a prompt and the model will generate a continuation of the text.",
batch=True,
max_batch_size=4
)
# Launch the interface
if __name__ == "__main__":
iface.launch()