LoufAn commited on
Commit
7010950
·
1 Parent(s): 15ca679

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -12
app.py CHANGED
@@ -1,20 +1,27 @@
1
- import spaces
2
- from diffusers import DiffusionPipeline
 
3
 
 
 
 
 
4
  model_id = "google/gemma-3-27b-it"
 
 
5
 
6
- pipe = DiffusionPipeline.from_pretrained(
7
- model_id,
8
- device_map="balanced"
9
- )
10
- pipe.to('cuda')
11
 
12
- @spaces.GPU
13
  def generate(prompt):
14
- return pipe(prompt).images
 
15
 
 
16
  gr.Interface(
17
  fn=generate,
18
- inputs=gr.Text(),
19
- outputs=gr.Gallery(),
20
- ).launch()
 
 
1
+ import os
2
+ import gradio as gr
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline
4
 
5
+ # 使用你在 Spaces 中添加的 Secret
6
+ hf_token = os.environ.get("HUGGINGFACE_TOKEN")
7
+
8
+ # 加载 tokenizer 和模型(带token)
9
  model_id = "google/gemma-3-27b-it"
10
+ tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token)
11
+ model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_token, device_map="auto")
12
 
13
+ # 构造推理管道
14
+ pipe = TextGenerationPipeline(model=model, tokenizer=tokenizer)
 
 
 
15
 
16
+ # 推理函数
17
  def generate(prompt):
18
+ output = pipe(prompt, max_new_tokens=100, do_sample=True, temperature=0.7)
19
+ return output[0]["generated_text"]
20
 
21
+ # Gradio UI
22
  gr.Interface(
23
  fn=generate,
24
+ inputs=gr.Text(label="Enter your prompt"),
25
+ outputs=gr.Textbox(label="Generated Text"),
26
+ title="Gemma-3-27B Text Generation"
27
+ ).launch()