dror201031 commited on
Commit
d0b32be
verified
1 Parent(s): f068274

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -32
app.py CHANGED
@@ -1,58 +1,69 @@
 
1
  import sys
2
  import subprocess
3
- import os
 
 
 
4
 
5
- # 讛转拽谞转 讞讘讬诇讜转 讞住专讜转
6
- print("诪转拽讬谉 讞讘讬诇讜转 谞讚专砖讜转...")
7
  try:
8
- subprocess.check_call([sys.executable, "-m", "pip", "install",
9
- "transformers>=4.38.0",
10
- "huggingface_hub>=0.20.0",
11
- "torch>=2.0.0",
12
- "accelerate>=0.25.0",
13
- "bitsandbytes>=0.40.0"])
 
 
 
 
 
 
 
 
 
14
  print("讛转拽谞转 讛讞讘讬诇讜转 讛讜砖诇诪讛 讘讛爪诇讞讛!")
15
- except Exception as e:
16
- print(f"砖讙讬讗讛 讘讛转拽谞转 讛讞讘讬诇讜转: {str(e)}")
17
 
18
- # 讬讘讜讗 砖诇 讛住驻专讬讜转 讛谞讚专砖讜转
19
  try:
20
- import gradio as gr
21
  from huggingface_hub import login
22
- from transformers import pipeline
 
23
  print("讬讘讜讗 讛住驻专讬讜转 讛爪诇讬讞!")
24
- except Exception as e:
25
  print(f"砖讙讬讗讛 讘讬讘讜讗 讛住驻专讬讜转: {str(e)}")
26
  sys.exit(1)
27
 
28
- # 讛转讞讘专讜转 诇-Hugging Face 注诐 诪驻转讞 诪讛讙讚专讜转 讛住讜讚讜转
29
  token = os.environ.get("HF_TOKEN")
30
  if token:
31
  login(token)
32
  print("讛转讞讘专讜转 诇-Hugging Face 讛爪诇讬讞讛!")
33
  else:
34
- print("讗讝讛专讛: 诇讗 谞诪爪讗 诪驻转讞 HF_TOKEN!")
35
 
36
- # 讟注讬谞转 讛诪讜讚诇
37
  model_name = "google/gemma-3n-E2B-it-litert-preview"
38
- print(f"讟讜注谉 诪讜讚诇 {model_name}...")
39
 
 
 
 
40
  try:
41
- generator = pipeline(
42
  "text-generation",
43
  model=model_name,
44
  device_map="auto",
45
  torch_dtype="auto",
46
  model_kwargs={"quantization_config": {"load_in_4bit": True}}
47
  )
48
- print("讛诪讜讚诇 谞讟注谉 讘讛爪诇讞讛!")
49
  except Exception as e:
50
  print(f"砖讙讬讗讛 讘讟注讬谞转 讛诪讜讚诇: {str(e)}")
51
- generator = None
52
 
53
  def ask_model(prompt):
54
  if generator is None:
55
- return "讛诪讜讚诇 诇讗 谞讟注谉 讻专讗讜讬. 讘讚讜拽 讗转 讛诇讜讙讬诐."
56
 
57
  try:
58
  outputs = generator(
@@ -62,16 +73,32 @@ def ask_model(prompt):
62
  )
63
  return outputs[0]["generated_text"]
64
  except Exception as e:
65
- return f"砖讙讬讗讛 讘拽讘诇转 转砖讜讘讛: {str(e)}"
66
 
67
- # 讬爪讬专转 诪诪砖拽 讛诪砖转诪砖
68
- demo = gr.Interface(
69
- fn=ask_model,
70
- inputs=gr.Textbox(lines=3, placeholder="砖讗诇 砖讗诇讛..."),
71
- outputs=gr.Textbox(label="转砖讜讘讛"),
72
- title="Genie - 爪'讗讟讘讜讟 Gemma 3n",
73
- description="爪'讗讟讘讜讟 诪讘讜住住 注诇 诪讜讚诇 Gemma 3n 砖诇 Google"
74
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
  # 讛驻注诇转 讛诪诪砖拽
77
  demo.launch()
 
1
+ import os
2
  import sys
3
  import subprocess
4
+ import gradio as gr
5
+
6
+ # 诪转拽讬谉 讞讘讬诇讜转 谞讚专砖讜转 讗诐 讞住专讜转
7
+ print("===== Application Startup at", os.popen('date "+%Y-%m-%d %H:%M:%S"').read().strip(), "=====")
8
 
 
 
9
  try:
10
+ import transformers
11
+ import huggingface_hub
12
+ import torch
13
+ import accelerate
14
+ import bitsandbytes
15
+ except ImportError:
16
+ print("诪转拽讬谉 讞讘讬诇讜转 谞讚专砖讜转...")
17
+ packages = [
18
+ "transformers>=4.38.0",
19
+ "huggingface_hub>=0.20.0",
20
+ "torch>=2.0.0",
21
+ "accelerate>=0.25.0",
22
+ "bitsandbytes>=0.40.0"
23
+ ]
24
+ subprocess.check_call([sys.executable, "-m", "pip", "install"] + packages)
25
  print("讛转拽谞转 讛讞讘讬诇讜转 讛讜砖诇诪讛 讘讛爪诇讞讛!")
 
 
26
 
27
+ # 诪讬讬讘讗 讗转 讛住驻专讬讜转 讗讞专讬 讛转拽谞讛 讘诪讬讚转 讛爪讜专讱
28
  try:
29
+ import transformers
30
  from huggingface_hub import login
31
+ import torch
32
+ import gradio as gr
33
  print("讬讘讜讗 讛住驻专讬讜转 讛爪诇讬讞!")
34
+ except ImportError as e:
35
  print(f"砖讙讬讗讛 讘讬讘讜讗 讛住驻专讬讜转: {str(e)}")
36
  sys.exit(1)
37
 
38
+ # 诪转讞讘专 诇讞砖讘讜谉 Hugging Face
39
  token = os.environ.get("HF_TOKEN")
40
  if token:
41
  login(token)
42
  print("讛转讞讘专讜转 诇-Hugging Face 讛爪诇讬讞讛!")
43
  else:
44
+ print("讗讝讛专讛: 讟讜拽谉 HF_TOKEN 诇讗 诪讜讙讚专 讘住讘讬讘转 讛注讘讜讚讛. 讬讬转讻谉 砖诇讗 转讛讬讛 讙讬砖讛 诇诪讜讚诇.")
45
 
46
+ # 讛讙讚专转 砖诐 讛诪讜讚诇
47
  model_name = "google/gemma-3n-E2B-it-litert-preview"
 
48
 
49
+ # 讟讜注谉 讗转 讛诪讜讚诇
50
+ print(f"讟讜注谉 诪讜讚诇 {model_name}...")
51
+ generator = None
52
  try:
53
+ generator = transformers.pipeline(
54
  "text-generation",
55
  model=model_name,
56
  device_map="auto",
57
  torch_dtype="auto",
58
  model_kwargs={"quantization_config": {"load_in_4bit": True}}
59
  )
60
+ print("讟注讬谞转 讛诪讜讚诇 讛爪诇讬讞讛!")
61
  except Exception as e:
62
  print(f"砖讙讬讗讛 讘讟注讬谞转 讛诪讜讚诇: {str(e)}")
 
63
 
64
  def ask_model(prompt):
65
  if generator is None:
66
+ return "讛诪讜讚诇 诇讗 谞讟注谉 讘讛爪诇讞讛. 讘讚讜拽 讗转 诇讜讙 讛砖讙讬讗讜转."
67
 
68
  try:
69
  outputs = generator(
 
73
  )
74
  return outputs[0]["generated_text"]
75
  except Exception as e:
76
+ return f"砖讙讬讗讛 讘讛驻注诇转 讛诪讜讚诇: {str(e)}"
77
 
78
+ # 讬爪讬专转 诪诪砖拽 诪砖转诪砖
79
+ with gr.Blocks() as demo:
80
+ gr.Markdown("# 诪讜讚诇 讙'诪讛 3n")
81
+ gr.Markdown("讻转讜讘 砖讗诇讛 讗讜 讘拽砖讛 讜讛诪讜讚诇 讬注谞讛:")
82
+
83
+ with gr.Row():
84
+ input_text = gr.Textbox(
85
+ placeholder="讻转讜讘 讻讗谉 讗转 讛砖讗诇讛 砖诇讱...",
86
+ lines=3,
87
+ label="拽诇讟"
88
+ )
89
+ output_text = gr.Textbox(label="转砖讜讘转 讛诪讜讚诇", lines=10)
90
+
91
+ submit_btn = gr.Button("砖诇讞")
92
+ submit_btn.click(fn=ask_model, inputs=input_text, outputs=output_text)
93
+
94
+ gr.Examples(
95
+ [
96
+ "诪讛 讚注转讱 注诇 讘讬谞讛 诪诇讗讻讜转讬转?",
97
+ "讻转讜讘 住讬驻讜专 拽爪专 注诇 讞转讜诇 讜讻诇讘 砖讛诐 讞讘专讬诐",
98
+ "讛住讘专 诇讬 讗转 转讜专转 讛讬讞住讜转 讘爪讜专讛 驻砖讜讟讛"
99
+ ],
100
+ input_text
101
+ )
102
 
103
  # 讛驻注诇转 讛诪诪砖拽
104
  demo.launch()