Spaces:
Sleeping
Sleeping
import gradio as gr | |
import torch | |
from transformers import AutoTokenizer, pipeline | |
# Load the model and tokenizer | |
model_name = "akjindal53244/Llama-3.1-Storm-8B" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
pipe = pipeline( | |
"text-generation", | |
model=model_name, | |
torch_dtype=torch.bfloat16, | |
device_map="auto" | |
) | |
# HTML content | |
HTML_CONTENT = """ | |
<h1>Llama-3.1-Storm-8B Text Generation</h1> | |
<p>Generate text using the powerful Llama-3.1-Storm-8B model. Enter a prompt and let the AI create!</p> | |
<div class="llama-image"> | |
<img src="https://cdn-uploads.huggingface.co/production/uploads/64c75c1237333ccfef30a602/tmOlbERGKP7JSODa6T06J.jpeg" alt="Llama" style="width:200px; border-radius:10px;"> | |
</div> | |
""" | |
def generate_text(prompt, max_length, temperature): | |
messages = [ | |
{"role": "system", "content": "You are a helpful assistant."}, | |
{"role": "user", "content": prompt} | |
] | |
formatted_prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) | |
outputs = pipe( | |
formatted_prompt, | |
max_new_tokens=max_length, | |
do_sample=True, | |
temperature=temperature, | |
top_k=100, | |
top_p=0.95, | |
) | |
return outputs[0]['generated_text'][len(formatted_prompt):] | |
with gr.Blocks() as demo: | |
gr.HTML(HTML_CONTENT) | |
with gr.Row(): | |
with gr.Column(scale=2): | |
prompt = gr.Textbox(label="Prompt", lines=5) | |
max_length = gr.Slider(minimum=1, maximum=500, value=128, step=1, label="Max Length") | |
temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature") | |
submit_button = gr.Button("Generate") | |
with gr.Column(scale=2): | |
output = gr.Textbox(label="Generated Text", lines=10) | |
submit_button.click(generate_text, inputs=[prompt, max_length, temperature], outputs=[output]) | |
if __name__ == "__main__": | |
demo.launch() |