Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
import spaces | |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline | |
model_id = "google/gemma-3-12b-it" | |
hf_token = os.environ.get("HUGGINGFACE_TOKEN") | |
# ε ε«ζ¨‘εε θ½½ + ζ¨η | |
def generate(prompt): | |
tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token) | |
model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_token, device_map="auto") | |
pipeline = TextGenerationPipeline(model=model, tokenizer=tokenizer) | |
output = pipeline(prompt, max_new_tokens=100, do_sample=True, temperature=0.7) | |
return output[0]["generated_text"] | |
# ζε»Ίηι’ | |
gr.Interface( | |
fn=generate, | |
inputs=gr.Text(label="Enter your prompt"), | |
outputs=gr.Textbox(label="Generated Text"), | |
title="Gemma-3-27B Inference (ZeroGPU)" | |
).launch() | |