Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,52 +1,7 @@
|
|
1 |
-
import os
|
2 |
import gradio as gr
|
3 |
-
from huggingface_hub import login
|
4 |
-
from transformers import pipeline
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
if token:
|
9 |
-
login(token)
|
10 |
-
else:
|
11 |
-
print("讗讝讛专讛: 诇讗 谞诪爪讗 诪驻转讞 HF_TOKEN!")
|
12 |
-
|
13 |
-
# 讟注讬谞转 讛诪讜讚诇
|
14 |
-
model_name = "google/gemma-3n-E2B-it-litert-preview"
|
15 |
-
print(f"讟讜注谉 诪讜讚诇 {model_name}...")
|
16 |
-
|
17 |
-
try:
|
18 |
-
generator = pipeline(
|
19 |
-
"text-generation",
|
20 |
-
model=model_name,
|
21 |
-
device_map="auto",
|
22 |
-
torch_dtype="auto",
|
23 |
-
model_kwargs={"quantization_config": {"load_in_4bit": True}}
|
24 |
-
)
|
25 |
-
print("讛诪讜讚诇 谞讟注谉 讘讛爪诇讞讛!")
|
26 |
-
except Exception as e:
|
27 |
-
print(f"砖讙讬讗讛 讘讟注讬谞转 讛诪讜讚诇: {str(e)}")
|
28 |
-
generator = None
|
29 |
-
|
30 |
-
def ask_model(prompt):
|
31 |
-
if generator is None:
|
32 |
-
return "讛诪讜讚诇 诇讗 谞讟注谉 讻专讗讜讬. 讘讚讜拽 讗转 讛诇讜讙讬诐."
|
33 |
-
|
34 |
-
try:
|
35 |
-
outputs = generator(
|
36 |
-
[{"role": "user", "content": prompt}],
|
37 |
-
max_new_tokens=200,
|
38 |
-
return_full_text=False
|
39 |
-
)
|
40 |
-
return outputs[0]["generated_text"]
|
41 |
-
except Exception as e:
|
42 |
-
return f"砖讙讬讗讛 讘拽讘诇转 转砖讜讘讛: {str(e)}"
|
43 |
-
|
44 |
-
demo = gr.Interface(
|
45 |
-
fn=ask_model,
|
46 |
-
inputs=gr.Textbox(lines=3, placeholder="砖讗诇 砖讗诇讛..."),
|
47 |
-
outputs=gr.Textbox(label="转砖讜讘讛"),
|
48 |
-
title="Genie - 爪'讗讟讘讜讟 Gemma 3n",
|
49 |
-
description="爪'讗讟讘讜讟 诪讘讜住住 注诇 诪讜讚诇 Gemma 3n 砖诇 Google"
|
50 |
-
)
|
51 |
|
|
|
52 |
demo.launch()
|
|
|
|
|
1 |
import gradio as gr
|
|
|
|
|
2 |
|
3 |
+
def hello(name):
|
4 |
+
return f"砖诇讜诐 {name}!"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
+
demo = gr.Interface(fn=hello, inputs="text", outputs="text")
|
7 |
demo.launch()
|