SakanaAI / app.py
tamatwi's picture
Update app.py
546a17c verified
raw
history blame
827 Bytes
from transformers import pipeline
import gradio as gr
# Initialize the text generation pipeline with optimizations
pipe = pipeline("text-generation", model="SakanaAI/EvoLLM-JP-v1-7B")
# Define a function to generate text based on user input
def generate_text(prompt):
result = pipe(prompt, max_length=50, num_return_sequences=1)
return result[0]['generated_text']
# Create a Gradio interface with batching enabled
iface = gr.Interface(
fn=generate_text,
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here..."),
outputs="text",
title="Text Generation with DiscoPOP-zephyr-7b-gemma",
description="Enter a prompt and the model will generate a continuation of the text.",
batch=True,
max_batch_size=4
)
# Launch the interface
if __name__ == "__main__":
iface.launch()