dror201031 commited on
Commit
2d6369f
verified
1 Parent(s): 2f394e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +164 -53
app.py CHANGED
@@ -2,6 +2,7 @@ import os
2
  import sys
3
  import subprocess
4
  import gradio as gr
 
5
 
6
  # 诪转拽讬谉 讞讘讬诇讜转 谞讚专砖讜转 讗诐 讞住专讜转
7
  print("===== Application Startup at", os.popen('date "+%Y-%m-%d %H:%M:%S"').read().strip(), "=====")
@@ -32,6 +33,17 @@ except ImportError as e:
32
  print(f"砖讙讬讗讛 讘讬讘讜讗 讛住驻专讬讜转: {str(e)}")
33
  sys.exit(1)
34
 
 
 
 
 
 
 
 
 
 
 
 
35
  # 诪转讞讘专 诇讞砖讘讜谉 Hugging Face
36
  token = os.environ.get("HF_TOKEN")
37
  if token:
@@ -40,90 +52,189 @@ if token:
40
  else:
41
  print("讗讝讛专讛: 讟讜拽谉 HF_TOKEN 诇讗 诪讜讙讚专 讘住讘讬讘转 讛注讘讜讚讛. 讬讬转讻谉 砖诇讗 转讛讬讛 讙讬砖讛 诇诪讜讚诇讬诐 诪讜讙讘诇讬诐.")
42
 
43
- # 讘讞讬专转 诪讜讚诇 驻转讜讞 诇讞诇讜讟讬谉
44
- model_name = "facebook/opt-125m" # 诪讜讚诇 拽讟谉 讜驻转讜讞 诇讙诪专讬 诇诇讗 讛讙讘诇讜转 讙讬砖讛
 
45
 
46
- # 谞讬住讬讜谉 讟注讬谞讛 砖诇 讛诪讜讚诇
47
- print(f"讟讜注谉 诪讜讚诇 {model_name}...")
48
  generator = None
49
- try:
50
- generator = transformers.pipeline(
51
- "text-generation",
52
- model=model_name,
53
- device_map="auto",
54
- torch_dtype="auto"
55
- )
56
- print("讟注讬谞转 讛诪讜讚诇 讛爪诇讬讞讛!")
57
- except Exception as e:
58
- print(f"砖讙讬讗讛 讘讟注讬谞转 讛诪讜讚诇: {str(e)}")
59
- # 谞讬住讬讜谉 诇讟注讜谉 诪讜讚诇 驻转讜讞 驻砖讜讟 讬讜转专 讘诪拽专讛 砖诇 讻讬砖诇讜谉
60
  try:
61
- print("诪谞住讛 诇讟注讜谉 诪讜讚诇 讞诇讜驻讬...")
62
- model_name = "distilbert-base-uncased" # 诪讜讚诇 拽讟谉 注讜讚 讬讜转专 讜驻转讜讞
63
- generator = transformers.pipeline(
64
- "text-generation",
65
- model=model_name,
66
- device_map="auto",
67
- torch_dtype="auto"
68
- )
69
- print("讟注讬谞转 讛诪讜讚诇 讛讞诇讜驻讬 讛爪诇讬讞讛!")
70
- except Exception as e2:
71
- print(f"砖讙讬讗讛 讘讟注讬谞转 讛诪讜讚诇 讛讞诇讜驻讬: {str(e2)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  def ask_model(prompt):
 
 
74
  if generator is None:
75
- return "讛诪讜讚诇 诇讗 谞讟注谉 讘讛爪诇讞讛. 讘讚讜拽 讗转 诇讜讙 讛砖讙讬讗讜转."
76
 
77
  try:
78
- outputs = generator(
79
- prompt,
80
- max_new_tokens=100,
81
- do_sample=True,
82
- temperature=0.7,
83
- top_p=0.95,
84
- return_full_text=False
85
- )
86
-
87
- # 诪讞讝讬专 讗转 讛讟拽住讟 砖谞讜爪专
88
- if isinstance(outputs, list) and len(outputs) > 0:
89
- return outputs[0]["generated_text"]
 
 
 
 
 
 
 
 
 
90
  else:
91
- return str(outputs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  except Exception as e:
93
  return f"砖讙讬讗讛 讘讛驻注诇转 讛诪讜讚诇: {str(e)}"
94
 
95
  # 讬爪讬专转 诪诪砖拽 诪砖转诪砖
96
  with gr.Blocks() as demo:
97
- gr.Markdown("# 爪'讗讟 讞讻诐")
98
- gr.Markdown("讻转讜讘 砖讗诇讛 讗讜 讘拽砖讛 讜讛诪讜讚诇 讬注谞讛:")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
 
100
  with gr.Row():
101
  input_text = gr.Textbox(
102
  placeholder="讻转讜讘 讻讗谉 讗转 讛砖讗诇讛 砖诇讱...",
103
  lines=3,
104
- label="拽诇讟"
105
  )
106
  output_text = gr.Textbox(label="转砖讜讘转 讛诪讜讚诇", lines=10)
107
 
108
- submit_btn = gr.Button("砖诇讞")
109
  submit_btn.click(fn=ask_model, inputs=input_text, outputs=output_text)
110
 
111
- gr.Examples(
 
 
112
  [
113
  "住驻专 诇讬 注诇 讘讬谞讛 诪诇讗讻讜转讬转",
114
- "讻转讜讘 住讬驻讜专 拽爪专",
115
- "诪讛讜 驻讬讬转讜谉?",
116
- "诪讛诐 讬转专讜谞讜转 砖诇 诇诪讬讚转 诪讻讜谞讛?"
 
 
117
  ],
118
  input_text
119
  )
120
 
121
  gr.Markdown("""
122
  ### 讛注专讜转:
123
- - 讛诪注专讻转 诪砖转诪砖转 讘诪讜讚诇 砖驻讛 驻转讜讞 诇讞诇讜讟讬谉
124
- - 讛拽诇讬讚讜 砖讗诇讛 讗讜 讘拽砖讛 讜诇讞爪讜 注诇 '砖诇讞'
125
- - 讛转砖讜讘讜转 谞讜爪专讜转 讘讝诪谉 讗诪转
 
126
  """)
127
 
128
- # 讛驻注诇转 讛诪诪砖拽
129
- demo.launch()
 
2
  import sys
3
  import subprocess
4
  import gradio as gr
5
+ import time
6
 
7
  # 诪转拽讬谉 讞讘讬诇讜转 谞讚专砖讜转 讗诐 讞住专讜转
8
  print("===== Application Startup at", os.popen('date "+%Y-%m-%d %H:%M:%S"').read().strip(), "=====")
 
33
  print(f"砖讙讬讗讛 讘讬讘讜讗 讛住驻专讬讜转: {str(e)}")
34
  sys.exit(1)
35
 
36
+ # 专砖讬诪转 诪讜讚诇讬诐 转讜诪讻讬 注讘专讬转 讗讜 专讘-诇砖讜谞讬讬诐 拽讟谞讬诐
37
+ MODELS = {
38
+ "facebook/opt-125m": "诪讜讚诇 拽讟谉 (125M) 讻诇诇讬 - 诪讛讬专",
39
+ "onlplab/alephbert-base": "诪讜讚诇 BERT 讘注讘专讬转",
40
+ "avichr/heBERT": "诪讜讚诇 BERT 注讘专讬 谞讜住祝",
41
+ "google/mt5-small": "诪讜讚诇 T5 专讘-诇砖讜谞讬 转讜诪讱 注讘专讬转",
42
+ "xlm-roberta-base": "诪讜讚诇 专讘-诇砖讜谞讬 转讜诪讱 注讘专讬转",
43
+ "google/flan-t5-small": "诪讜讚诇 讛谞讞讬讜转 拽讟谉 转讜诪讱 讘诪讙讜讜谉 砖驻讜转",
44
+ "distilgpt2": "诪讜讚诇 GPT-2 拽讟谉 讜诪讛讬专"
45
+ }
46
+
47
  # 诪转讞讘专 诇讞砖讘讜谉 Hugging Face
48
  token = os.environ.get("HF_TOKEN")
49
  if token:
 
52
  else:
53
  print("讗讝讛专讛: 讟讜拽谉 HF_TOKEN 诇讗 诪讜讙讚专 讘住讘讬讘转 讛注讘讜讚讛. 讬讬转讻谉 砖诇讗 转讛讬讛 讙讬砖讛 诇诪讜讚诇讬诐 诪讜讙讘诇讬诐.")
54
 
55
+ # 诪讜讚诇 讘专讬专转 诪讞讚诇
56
+ default_model_name = "facebook/opt-125m"
57
+ current_model_name = default_model_name
58
 
59
+ # 诪砖转谞讛 讙诇讜讘诇讬 诇讙谞专讟讜专
 
60
  generator = None
61
+
62
+ def load_model(model_name, status_box=None):
63
+ """讟注讬谞转 诪讜讚诇 讜注讚讻讜谉 住讟讟讜住"""
64
+ global generator, current_model_name
65
+ current_model_name = model_name
66
+
67
+ if status_box is not None:
68
+ status_box = gr.Markdown(f"讟讜注谉 讗转 讛诪讜讚诇: {model_name}...")
69
+
 
 
70
  try:
71
+ # 讘讚讬拽讛 讗诐 诪讚讜讘专 讘诪讜讚诇 诪住讜讙 BERT 砖讗讬谞讜 诪转讗讬诐 诇讞讬讝讜讬 讟拽住讟
72
+ is_bert_model = "bert" in model_name.lower() and "bart" not in model_name.lower()
73
+
74
+ if is_bert_model:
75
+ # BERT 诪讜讚诇讬诐 诪转讗讬诪讬诐 讬讜转专 诇诪砖讬诪讜转 讗讞专讜转, 讗讘诇 谞谞住讛 诇讛驻注讬诇 讗讜转诐 注诐 masked fill
76
+ generator = transformers.pipeline(
77
+ "fill-mask",
78
+ model=model_name,
79
+ device_map="auto",
80
+ torch_dtype="auto"
81
+ )
82
+ elif "t5" in model_name.lower() or "mt5" in model_name.lower() or "flan-t5" in model_name.lower():
83
+ # 诪讜讚诇讬 T5 诪转讗讬诪讬诐 诇讟拽住讟-诇讟拽住讟
84
+ generator = transformers.pipeline(
85
+ "text2text-generation",
86
+ model=model_name,
87
+ device_map="auto",
88
+ torch_dtype="auto"
89
+ )
90
+ else:
91
+ # 诪讜讚诇讬诐 专讙讬诇讬诐 诇讬爪讬专转 讟拽住讟
92
+ generator = transformers.pipeline(
93
+ "text-generation",
94
+ model=model_name,
95
+ device_map="auto",
96
+ torch_dtype="auto"
97
+ )
98
+
99
+ if status_box is not None:
100
+ status_box = gr.Markdown(f"**讛诪讜讚诇 {model_name} 谞讟注谉 讘讛爪诇讞讛!**")
101
+
102
+ return f"讛诪讜讚诇 {model_name} 谞讟注谉 讘讛爪诇讞讛!", status_box
103
+
104
+ except Exception as e:
105
+ error_msg = f"砖讙讬讗讛 讘讟注讬谞转 讛诪讜讚诇 {model_name}: {str(e)}"
106
+ print(error_msg)
107
+
108
+ if status_box is not None:
109
+ status_box = gr.Markdown(f"**砖讙讬讗讛:** {error_msg}")
110
+
111
+ return error_msg, status_box
112
+
113
+ # 讟注讬谞转 诪讜讚诇 讘专讬专转 诪讞讚诇
114
+ print(f"讟讜注谉 诪讜讚诇 讘专讬专转 诪讞讚诇 {default_model_name}...")
115
+ _, _ = load_model(default_model_name)
116
 
117
  def ask_model(prompt):
118
+ global generator, current_model_name
119
+
120
  if generator is None:
121
+ return "讛诪讜讚诇 诇讗 谞讟注谉 讘讛爪诇讞讛. 谞住讛 诇讟注讜谉 诪讜讚诇 转讞讬诇讛."
122
 
123
  try:
124
+ if "bert" in current_model_name.lower() and "bart" not in current_model_name.lower():
125
+ # 讟讬驻讜诇 讘诪讜讚诇讬 BERT (fill-mask)
126
+ # 谞讜住讬祝 诪住讻讛 [MASK] 诇住讜祝 讛讟拽住讟
127
+ if "[MASK]" not in prompt:
128
+ prompt = prompt + " [MASK]"
129
+ outputs = generator(prompt, top_k=5)
130
+ return "\n".join([f"{item['token_str']} (讜讚讗讜转: {item['score']:.4f})" for item in outputs])
131
+
132
+ elif "t5" in current_model_name.lower() or "mt5" in current_model_name.lower() or "flan" in current_model_name.lower():
133
+ # 讟讬驻讜诇 讘诪讜讚诇讬 T5
134
+ outputs = generator(
135
+ prompt,
136
+ max_length=100,
137
+ do_sample=True,
138
+ temperature=0.7,
139
+ top_p=0.95
140
+ )
141
+ if isinstance(outputs, list) and len(outputs) > 0:
142
+ return outputs[0]["generated_text"]
143
+ else:
144
+ return str(outputs)
145
  else:
146
+ # 讟讬驻讜诇 讘诪讜讚诇讬诐 专讙讬诇讬诐
147
+ outputs = generator(
148
+ prompt,
149
+ max_new_tokens=100,
150
+ do_sample=True,
151
+ temperature=0.7,
152
+ top_p=0.95,
153
+ return_full_text=False
154
+ )
155
+
156
+ # 诪讞讝讬专 讗转 讛讟拽住讟 砖谞讜爪专
157
+ if isinstance(outputs, list) and len(outputs) > 0:
158
+ return outputs[0]["generated_text"]
159
+ else:
160
+ return str(outputs)
161
  except Exception as e:
162
  return f"砖讙讬讗讛 讘讛驻注诇转 讛诪讜讚诇: {str(e)}"
163
 
164
  # 讬爪讬专转 诪诪砖拽 诪砖转诪砖
165
  with gr.Blocks() as demo:
166
+ gr.Markdown("# 诪注专讻转 爪'讗讟 注诐 诪讜讚诇讬诐 讘注讘专讬转")
167
+
168
+ # 讗讝讜专 讘讞讬专转 诪讜讚诇 讜讟注讬谞讛
169
+ with gr.Row():
170
+ with gr.Column(scale=3):
171
+ # 专砖讬诪讛 谞驻转讞转 诇讘讞讬专转 诪讜讚诇
172
+ model_dropdown = gr.Dropdown(
173
+ choices=list(MODELS.keys()),
174
+ value=default_model_name,
175
+ label="讘讞专 诪讜讚诇",
176
+ info="讘讞专 诪讜讚诇 诪讛专砖讬诪讛 讜诇讞抓 注诇 '讟注谉 诪讜讚诇'"
177
+ )
178
+
179
+ # 转讬讗讜专 讛诪讜讚诇 讛谞讘讞专
180
+ model_description = gr.Markdown(f"**转讬讗讜专 讛诪讜讚诇:** {MODELS[default_model_name]}")
181
+
182
+ with gr.Column(scale=1):
183
+ # 讻驻转讜专 讟注讬谞转 诪讜讚诇
184
+ load_button = gr.Button("讟注谉 诪讜讚诇", variant="primary")
185
+
186
+ # 讗讝讜专 住讟讟讜住 讟注讬谞转 诪讜讚诇
187
+ model_status = gr.Markdown(f"**诪讜讚诇 谞讜讻讞讬:** {default_model_name}")
188
+
189
+ # 驻讜谞拽爪讬讛 诇讛爪讙转 转讬讗讜专 讛诪讜讚诇 讘注转 讘讞讬专讛
190
+ def update_model_description(model_name):
191
+ return f"**转讬讗讜专 讛诪讜讚诇:** {MODELS[model_name]}"
192
+
193
+ model_dropdown.change(fn=update_model_description, inputs=model_dropdown, outputs=model_description)
194
+
195
+ # 驻讜谞拽爪讬讛 诇讟注讬谞转 讛诪讜讚诇 讛谞讘讞专
196
+ def load_selected_model(model_name):
197
+ message, _ = load_model(model_name)
198
+ return message, f"**诪讜讚诇 谞讜讻讞讬:** {model_name}"
199
+
200
+ load_button.click(fn=load_selected_model, inputs=model_dropdown, outputs=[gr.Textbox(visible=False), model_status])
201
+
202
+ gr.Markdown("---")
203
+ gr.Markdown("### 砖讗诇 讗转 讛诪讜讚诇")
204
 
205
+ # 讗讝讜专 砖讗诇讜转 讜转砖讜讘讜转
206
  with gr.Row():
207
  input_text = gr.Textbox(
208
  placeholder="讻转讜讘 讻讗谉 讗转 讛砖讗诇讛 砖诇讱...",
209
  lines=3,
210
+ label="砖讗诇讛/讘拽砖讛"
211
  )
212
  output_text = gr.Textbox(label="转砖讜讘转 讛诪讜讚诇", lines=10)
213
 
214
+ submit_btn = gr.Button("砖诇讞", variant="primary")
215
  submit_btn.click(fn=ask_model, inputs=input_text, outputs=output_text)
216
 
217
+ # 讚讜讙诪讗讜转 诇砖讗诇讜转
218
+ gr.Markdown("### 讚讜讙诪讗讜转 诇砖讗诇讜转:")
219
+ examples = gr.Examples(
220
  [
221
  "住驻专 诇讬 注诇 讘讬谞讛 诪诇讗讻讜转讬转",
222
+ "诪讛 讚注转讱 注诇 讛砖驻讛 讛注讘专讬转?",
223
+ "讻转讜讘 住讬驻讜专 拽爪专 注诇 讬专讜砖诇讬诐",
224
+ "诪讛诐 讛讬转专讜谞讜转 砖诇 诇诪讬讚转 诪讻讜谞讛?",
225
+ "转专讙诐 讗转 讛诪砖驻讟 讛讝讛 诇讗谞讙诇讬转: 砖诇讜诐, 诪讛 砖诇讜诪讱 讛讬讜诐?",
226
+ "讛砖诇诐 讗转 讛诪砖驻讟: 讬专讜砖诇讬诐 讛讬讗 [MASK]"
227
  ],
228
  input_text
229
  )
230
 
231
  gr.Markdown("""
232
  ### 讛注专讜转:
233
+ - 讘讞专 诪讜讚诇 诪讛专砖讬诪讛 讜诇讞抓 注诇 '讟注谉 诪讜讚诇' 讻讚讬 诇讛讞诇讬祝 讗转 讛诪讜讚诇 讛谞讜讻讞讬
234
+ - 讛诪讜讚诇讬诐 诪住讜讙 BERT (讻诪讜 AlephBERT 讜-heBERT) 诪转讗讬诪讬诐 诇讛砖诇诪转 诪讬诇讬诐 讜诇讗 诇砖讬讞讛 诪诇讗讛
235
+ - 讛诪讜讚诇讬诐 诪住讜讙 T5 讜-OPT 讟讜讘讬诐 讬讜转专 诇砖讬讞讛 讜讬爪讬专转 转讜讻谉
236
+ - 讛讟注讬谞讛 讛专讗砖讜谞讬转 砖诇 讻诇 诪讜讚诇 注砖讜讬讛 诇拽讞转 诪住驻专 砖谞讬讜转 注讚 讚拽讜转
237
  """)
238
 
239
+ # 讛驻注诇转 讛诪诪砖拽 注诐 讛讙讚专讜转 谞讜住驻讜转
240
+ demo.launch(show_error=True)