AR_Testing / app.py
LoufAn's picture
Update app.py
fe53d5f
raw
history blame
834 Bytes
import os
import gradio as gr
import spaces
from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline
model_id = "google/gemma-3-12b-it"
hf_token = os.environ.get("HUGGINGFACE_TOKEN")
# εŒ…ε«ζ¨‘εž‹εŠ θ½½ + ζŽ¨η†
@spaces.GPU
def generate(prompt):
tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token)
model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_token, device_map="auto")
pipeline = TextGenerationPipeline(model=model, tokenizer=tokenizer)
output = pipeline(prompt, max_new_tokens=100, do_sample=True, temperature=0.7)
return output[0]["generated_text"]
# ζž„ε»Ίη•Œι’
gr.Interface(
fn=generate,
inputs=gr.Text(label="Enter your prompt"),
outputs=gr.Textbox(label="Generated Text"),
title="Gemma-3-27B Inference (ZeroGPU)"
).launch()