sundea commited on
Commit
ef16cf3
·
1 Parent(s): 85bf895

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +247 -13
app.py CHANGED
@@ -1,16 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import argparse
2
  import os
3
  from importlib import import_module
4
 
5
  import gradio as gr
6
- from tqdm import tqdm
7
  import models.TextCNN
8
  import torch
9
  import pickle as pkl
10
- from utils import build_dataset
11
 
12
- classes = ['金融类', '房地产类', '股票类', '教育类', '科技类', '社会类', '政治类', '体育类', '游戏类',
13
- '娱乐类']
14
 
15
  MAX_VOCAB_SIZE = 10000 # 词表长度限制
16
  UNK, PAD = '<UNK>', '<PAD>' # 未知字,padding符号
@@ -33,12 +145,6 @@ def build_vocab(file_path, tokenizer, max_size, min_freq):
33
  return vocab_dic
34
 
35
 
36
-
37
-
38
-
39
-
40
-
41
-
42
  def greet(text):
43
  parser = argparse.ArgumentParser(description='Chinese Text Classification')
44
  parser.add_argument('--word', default=False, type=bool, help='True for word, False for char')
@@ -101,14 +207,142 @@ def greet(text):
101
  # print('类别为:{}'.format(classes[predic[0]]))
102
  return classes[predic[0]]
103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
 
 
 
105
 
 
 
 
 
 
 
 
 
 
 
 
106
 
 
 
 
 
107
 
108
- demo = gr.Interface(fn=greet, inputs="text", outputs="text", title="text-classification app",
109
- layout="vertical", description="This is a demo for text classification.")
110
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
 
 
 
 
112
 
 
113
 
114
 
 
1
+ # import argparse
2
+ # import os
3
+ # from importlib import import_module
4
+
5
+ # import gradio as gr
6
+ # from tqdm import tqdm
7
+ # import models.TextCNN
8
+ # import torch
9
+ # import pickle as pkl
10
+ # from utils import build_dataset
11
+
12
+ # classes = ['金融类', '房地产类', '股票类', '教育类', '科技类', '社会类', '政治类', '体育类', '游戏类',
13
+ # '娱乐类']
14
+
15
+ # MAX_VOCAB_SIZE = 10000 # 词表长度限制
16
+ # UNK, PAD = '<UNK>', '<PAD>' # 未知字,padding符号
17
+
18
+
19
+ # def build_vocab(file_path, tokenizer, max_size, min_freq):
20
+ # vocab_dic = {}
21
+ # with open(file_path, 'r', encoding='UTF-8') as f:
22
+ # for line in tqdm(f):
23
+ # lin = line.strip()
24
+ # if not lin:
25
+ # continue
26
+ # content = lin.split('\t')[0]
27
+ # for word in tokenizer(content):
28
+ # vocab_dic[word] = vocab_dic.get(word, 0) + 1
29
+ # vocab_list = sorted([_ for _ in vocab_dic.items() if _[1] >= min_freq], key=lambda x: x[1], reverse=True)[
30
+ # :max_size]
31
+ # vocab_dic = {word_count[0]: idx for idx, word_count in enumerate(vocab_list)}
32
+ # vocab_dic.update({UNK: len(vocab_dic), PAD: len(vocab_dic) + 1})
33
+ # return vocab_dic
34
+
35
+
36
+
37
+
38
+
39
+
40
+
41
+
42
+ # def greet(text):
43
+ # parser = argparse.ArgumentParser(description='Chinese Text Classification')
44
+ # parser.add_argument('--word', default=False, type=bool, help='True for word, False for char')
45
+ # args = parser.parse_args()
46
+ # model_name = 'TextCNN'
47
+ # dataset = 'THUCNews' # 数据集
48
+ # embedding = 'embedding_SougouNews.npz'
49
+ # x = import_module('models.' + model_name)
50
+
51
+ # config = x.Config(dataset, embedding)
52
+ # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
53
+
54
+ # model = models.TextCNN.Model(config)
55
+
56
+ # # vocab, train_data, dev_data, test_data = build_dataset(config, args.word)
57
+ # model.load_state_dict(torch.load('THUCNews/saved_dict/TextCNN.ckpt', map_location=torch.device('cpu')))
58
+ # model.to(device)
59
+ # model.eval()
60
+
61
+ # tokenizer = lambda x: [y for y in x] # char-level
62
+ # if os.path.exists(config.vocab_path):
63
+ # vocab = pkl.load(open(config.vocab_path, 'rb'))
64
+ # else:
65
+ # vocab = build_vocab(config.train_path, tokenizer=tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1)
66
+ # pkl.dump(vocab, open(config.vocab_path, 'wb'))
67
+ # # print(f"Vocab size: {len(vocab)}")
68
+
69
+ # # content='时评:“国学小天才”录取缘何少佳话'
70
+ # content = text
71
+
72
+ # words_line = []
73
+ # token = tokenizer(content)
74
+ # seq_len = len(token)
75
+ # pad_size = 32
76
+ # contents = []
77
+
78
+ # if pad_size:
79
+ # if len(token) < pad_size:
80
+ # token.extend([PAD] * (pad_size - len(token)))
81
+ # else:
82
+ # token = token[:pad_size]
83
+ # seq_len = pad_size
84
+ # # word to id
85
+ # for word in token:
86
+ # words_line.append(vocab.get(word, vocab.get(UNK)))
87
+
88
+ # contents.append((words_line, seq_len))
89
+ # # print(words_line)
90
+ # # input = torch.LongTensor(words_line).unsqueeze(1).to(device) # convert words_line to LongTensor and add batch dimension
91
+ # x = torch.LongTensor([_[0] for _ in contents]).to(device)
92
+
93
+ # # pad前的长度(超过pad_size的设为pad_size)
94
+ # seq_len = torch.LongTensor([_[1] for _ in contents]).to(device)
95
+ # input = (x, seq_len)
96
+ # # print(input)
97
+ # with torch.no_grad():
98
+ # output = model(input)
99
+ # predic = torch.max(output.data, 1)[1].cpu().numpy()
100
+ # # print(predic)
101
+ # # print('类别为:{}'.format(classes[predic[0]]))
102
+ # return classes[predic[0]]
103
+
104
+
105
+
106
+
107
+
108
+ # demo = gr.Interface(fn=greet, inputs="text", outputs="text", title="text-classification app",
109
+ # layout="vertical", description="This is a demo for text classification.")
110
+ # demo.launch()
111
+
112
+ #你可以使用 CSS 和 HTML 来自定义你的 Gradio 界面,以使其更具吸引力。以下是一个示例,其中使用了一些 CSS 样式和 HTML 标记来改进界面布局和风格:
113
+
114
+
115
  import argparse
116
  import os
117
  from importlib import import_module
118
 
119
  import gradio as gr
 
120
  import models.TextCNN
121
  import torch
122
  import pickle as pkl
123
+ from tqdm import tqdm
124
 
125
+ classes = ['金融类', '房地产类', '股票类', '教育类', '科技类', '社会类', '政治类', '体育类', '游戏类', '娱乐类']
 
126
 
127
  MAX_VOCAB_SIZE = 10000 # 词表长度限制
128
  UNK, PAD = '<UNK>', '<PAD>' # 未知字,padding符号
 
145
  return vocab_dic
146
 
147
 
 
 
 
 
 
 
148
  def greet(text):
149
  parser = argparse.ArgumentParser(description='Chinese Text Classification')
150
  parser.add_argument('--word', default=False, type=bool, help='True for word, False for char')
 
207
  # print('类别为:{}'.format(classes[predic[0]]))
208
  return classes[predic[0]]
209
 
210
+ # 自定义样式和布局
211
+ css = """
212
+ body {
213
+ background-color: #f8f8f8;
214
+ font-family: Arial, sans-serif;
215
+ }
216
+
217
+ .container {
218
+ max-width: 800px;
219
+ margin: 0 auto;
220
+ padding: 50px;
221
+ }
222
+
223
+ h1 {
224
+ font-size: 36px;
225
+ font-weight: bold;
226
+ color: #333333;
227
+ text-align: center;
228
+ margin-bottom: 50px;
229
+ }
230
+
231
+ .gradio-interface {
232
+ border: none;
233
+ box-shadow: 0px 0px 10px rgba(0, 0, 0, 0.1);
234
+ border-radius: 10px;
235
+ overflow: hidden;
236
+ margin-bottom: 50px;
237
+ }
238
+
239
+ .gradio-input {
240
+ background-color: #ffffff;
241
+ border: none;
242
+ border-radius: 5px;
243
+ padding: 15px;
244
+ box-shadow: 0px 0px 10px rgba(0, 0, 0, 0.1);
245
+ font-size: 18px;
246
+ color: #333333;
247
+ width: 100%;
248
+ margin-bottom: 20px;
249
+ }
250
+
251
+ .gradio-output {
252
+ background-color: #ffffff;
253
+ border: none;
254
+ border-radius: 5px;
255
+ padding: 15px;
256
+ box-shadow: 0px 0px 10px rgba(0, 0, 0, 0.1);
257
+ font-size: 18px;
258
+ color: #333333;
259
+ width: 100%;
260
+ margin-bottom: 20px;
261
+ }
262
+
263
+ .gradio-interface:hover {
264
+ box-shadow: 0px 0px 20px rgba(0, 0, 0, 0.2);
265
+ }
266
+
267
+ .gradio-interface:focus-within {
268
+ box-shadow: 0px 0px 20px rgba(0, 0, 0, 0.2);
269
+ }
270
+
271
+ .gradio-interface .input-group {
272
+ margin-bottom: 20px;
273
+ }
274
+
275
+ .gradio-interface .input-label {
276
+ font-size: 24px;
277
+ font-weight: bold;
278
+ color: #333333;
279
+ margin-bottom: 10px;
280
+ }
281
+
282
+ .gradio-interface .input-description {
283
+ font-size: 16px;
284
+ color: #666666;
285
+ margin-bottom: 20px;
286
+ }
287
+
288
+ .gradio-interface .output-label {
289
+ font-size: 24px;
290
+ font-weight: bold;
291
+ color: #333333;
292
+ margin-bottom: 10px;
293
+ }
294
+
295
+ .gradio-interface .output-description {
296
+ font-size: 16px;
297
+ color: #666666;
298
+ margin-bottom: 20px;
299
+ }
300
 
301
+ .gradio-interface .input-group input[type="text"]::placeholder {
302
+ color: #999999;
303
+ }
304
 
305
+ .gradio-button {
306
+ background-color: #333333;
307
+ color: #ffffff;
308
+ border: none;
309
+ border-radius: 5px;
310
+ padding: 15px 30px;
311
+ font-size: 18px;
312
+ font-weight: bold;
313
+ cursor: pointer;
314
+ transition: background-color 0.2s ease;
315
+ }
316
 
317
+ .gradio-button:hover {
318
+ background-color: #111111;
319
+ }
320
+ """
321
 
322
+ html = """
323
+ <div class="container">
324
+ <h1>Text Classification</h1>
325
+ <div class="gradio-interface">
326
+ <div class="input-group">
327
+ <label class="input-label">Input Text</label>
328
+ <div class="input-description">Enter the text that you want to classify:</div>
329
+ <input type="text" class="gradio-input" placeholder="Enter your text here" id="input_text">
330
+ </div>
331
+ <button type="button" class="gradio-button" id="classify_button">Classify</button>
332
+ <div class="output-group">
333
+ <label class="output-label">Classification Result</label>
334
+ <div class="output-description">The predicted class is:</div>
335
+ <div class="gradio-output" id="output_text"></div>
336
+ </div>
337
+ </div>
338
+ </div>
339
+ """
340
 
341
+ iface = gr.Interface(fn=greet, inputs="text", outputs="text", title="Text Classification App",
342
+ layout="unaligned", description="This is a demo for text classification.", css=css,
343
+ allow_screenshot=False, allow_flagging=False, allow_share=False, allow_download=False,
344
+ examples=[["今天天气真好"], ["这个手机真不错"], ["新冠疫情对经济的影响"]])
345
 
346
+ iface.launch(html=html)
347
 
348