jayebaku commited on
Commit
65a7bbd
·
verified ·
1 Parent(s): 593adc9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -82
app.py CHANGED
@@ -94,7 +94,7 @@ def load_and_classify_csv_dataframe(file, text_field, event_model, threshold):
94
  return result_df, result_df, gr.update(choices=sorted(filters+extra_filters),
95
  value='All',
96
  label="Filter data by label",
97
- visible=True)
98
 
99
 
100
  def calculate_accuracy(flood_selections, fire_selections, none_selections, num_posts, text_field, data_df):
@@ -172,7 +172,6 @@ with gr.Blocks(fill_width=True) as demo:
172
 
173
  T_data_ss_state = gr.State(value=pd.DataFrame())
174
 
175
-
176
  with gr.Tab("Event Type Classification"):
177
  gr.Markdown(
178
  """
@@ -213,75 +212,6 @@ with gr.Blocks(fill_width=True) as demo:
213
 
214
 
215
 
216
- with gr.Tab("Event Type Classification Eval"):
217
- gr.Markdown(
218
- """
219
- # T4.5 Relevance Classifier Demo
220
- This is a demo created to explore floods and wildfire classification in social media posts.\n
221
- Usage:\n
222
- - Upload .tsv or .csv data file (must contain a text column with social media posts).\n
223
- - Next, type the name of the text column.\n
224
- - Then, choose a BERT classifier model from the drop down.\n
225
- - Finally, click the 'start prediction' buttton.\n
226
- Evaluation:\n
227
- - To evaluate the model's accuracy select the INCORRECT classifications using the checkboxes in front of each post.\n
228
- - Then, click on the 'Calculate Accuracy' button.\n
229
- - Then, click on the 'Download data as CSV' to get the classifications and evaluation data as a .csv file.
230
- """)
231
- with gr.Row():
232
- with gr.Column(scale=4):
233
- file_input = gr.File(label="Upload CSV or TSV File", file_types=['.tsv', '.csv'])
234
-
235
- with gr.Column(scale=6):
236
- text_field = gr.Textbox(label="Text field name", value="tweet_text")
237
- event_model = gr.Dropdown(event_models, value=event_models[0], label="Select classification model")
238
- ETCE_predict_button = gr.Button("Start Prediction")
239
- with gr.Accordion("Prediction threshold", open=False):
240
- threshold = gr.Slider(0, 1, value=0, step=0.01, label="Prediction threshold", show_label=False,
241
- info="This value sets a threshold by which texts classified flood or fire are accepted, \
242
- higher values makes the classifier stricter (CAUTION: A value of 1 will set all predictions as none)", interactive=True)
243
-
244
- with gr.Row():
245
- with gr.Column():
246
- gr.Markdown("""### Flood-related""")
247
- flood_checkbox_output = gr.CheckboxGroup(label="Select ONLY incorrect classifications", interactive=True)
248
-
249
- with gr.Column():
250
- gr.Markdown("""### Fire-related""")
251
- fire_checkbox_output = gr.CheckboxGroup(label="Select ONLY incorrect classifications", interactive=True)
252
-
253
- with gr.Column():
254
- gr.Markdown("""### None""")
255
- none_checkbox_output = gr.CheckboxGroup(label="Select ONLY incorrect classifications", interactive=True)
256
-
257
- with gr.Row():
258
- with gr.Column(scale=5):
259
- gr.Markdown(r"""
260
- Accuracy: is the model's ability to make correct predicitons.
261
- It is the fraction of correct prediction out of the total predictions.
262
-
263
- $$
264
- \text{Accuracy} = \frac{\text{Correct predictions}}{\text{All predictions}} * 100
265
- $$
266
-
267
- Model Confidence: is the mean probabilty of each case
268
- belonging to their assigned classes. A value of 1 is best.
269
- """, latex_delimiters=[{ "left": "$$", "right": "$$", "display": True }])
270
- gr.Markdown("\n\n\n")
271
- model_confidence = gr.Number(label="Model Confidence")
272
-
273
- with gr.Column(scale=5):
274
- correct = gr.Number(label="Number of correct classifications")
275
- incorrect = gr.Number(label="Number of incorrect classifications")
276
- accuracy = gr.Number(label="Model Accuracy (%)")
277
-
278
- ETCE_accuracy_button = gr.Button("Calculate Accuracy")
279
- download_csv = gr.DownloadButton(visible=False)
280
- num_posts = gr.Number(visible=False)
281
- data = gr.DataFrame(visible=False)
282
- data_eval = gr.DataFrame(visible=False)
283
-
284
-
285
  qa_tab = gr.Tab("Question Answering")
286
  with qa_tab:
287
  gr.Markdown(
@@ -313,7 +243,7 @@ with gr.Blocks(fill_width=True) as demo:
313
  QA_run_button = gr.Button("Start QA", interactive=False)
314
  hsummary = gr.Textbox(label="Summary")
315
 
316
- qa_df = gr.DataFrame()
317
 
318
 
319
  with gr.Tab("Single Text Classification"):
@@ -346,8 +276,75 @@ with gr.Blocks(fill_width=True) as demo:
346
  classification_score = gr.Number(label="Classification Score")
347
 
348
 
349
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
 
352
 
353
 
@@ -360,6 +357,7 @@ with gr.Blocks(fill_width=True) as demo:
360
  }
361
 
362
  """
 
363
  # Test event listeners
364
  T_predict_button.click(
365
  load_and_classify_csv_dataframe,
@@ -382,22 +380,22 @@ with gr.Blocks(fill_width=True) as demo:
382
 
383
 
384
  # Button clicks ETC Eval
385
- ETCE_predict_button.click(
386
- load_and_classify_csv,
387
- inputs=[file_input, text_field, event_model, threshold],
388
- outputs=[flood_checkbox_output, fire_checkbox_output, none_checkbox_output, model_confidence, num_posts, data, QA_addqry_button, QA_run_button])
389
 
390
- ETCE_accuracy_button.click(
391
- calculate_accuracy,
392
- inputs=[flood_checkbox_output, fire_checkbox_output, none_checkbox_output, num_posts, text_field, data],
393
- outputs=[incorrect, correct, accuracy, data_eval, download_csv])
394
 
395
 
396
  # Button clicks QA
397
  QA_addqry_button.click(add_query, inputs=[query_inp, queries_state], outputs=[selected_queries, queries_state])
398
 
399
  QA_run_button.click(qa_summarise,
400
- inputs=[selected_queries, qa_llm_model, text_field, data], ## XXX fix text_field
401
  outputs=[hsummary, qa_df])
402
 
403
 
 
94
  return result_df, result_df, gr.update(choices=sorted(filters+extra_filters),
95
  value='All',
96
  label="Filter data by label",
97
+ visible=True), gr.update(interactive=True), gr.update(interactive=True)
98
 
99
 
100
  def calculate_accuracy(flood_selections, fire_selections, none_selections, num_posts, text_field, data_df):
 
172
 
173
  T_data_ss_state = gr.State(value=pd.DataFrame())
174
 
 
175
  with gr.Tab("Event Type Classification"):
176
  gr.Markdown(
177
  """
 
212
 
213
 
214
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215
  qa_tab = gr.Tab("Question Answering")
216
  with qa_tab:
217
  gr.Markdown(
 
243
  QA_run_button = gr.Button("Start QA", interactive=False)
244
  hsummary = gr.Textbox(label="Summary")
245
 
246
+ qa_df = gr.DataFrame(visible=False)
247
 
248
 
249
  with gr.Tab("Single Text Classification"):
 
276
  classification_score = gr.Number(label="Classification Score")
277
 
278
 
279
+ # with gr.Tab("Event Type Classification Eval"):
280
+ # gr.Markdown(
281
+ # """
282
+ # # T4.5 Relevance Classifier Demo
283
+ # This is a demo created to explore floods and wildfire classification in social media posts.\n
284
+ # Usage:\n
285
+ # - Upload .tsv or .csv data file (must contain a text column with social media posts).\n
286
+ # - Next, type the name of the text column.\n
287
+ # - Then, choose a BERT classifier model from the drop down.\n
288
+ # - Finally, click the 'start prediction' buttton.\n
289
+ # Evaluation:\n
290
+ # - To evaluate the model's accuracy select the INCORRECT classifications using the checkboxes in front of each post.\n
291
+ # - Then, click on the 'Calculate Accuracy' button.\n
292
+ # - Then, click on the 'Download data as CSV' to get the classifications and evaluation data as a .csv file.
293
+ # """)
294
+ # with gr.Row():
295
+ # with gr.Column(scale=4):
296
+ # file_input = gr.File(label="Upload CSV or TSV File", file_types=['.tsv', '.csv'])
297
+
298
+ # with gr.Column(scale=6):
299
+ # text_field = gr.Textbox(label="Text field name", value="tweet_text")
300
+ # event_model = gr.Dropdown(event_models, value=event_models[0], label="Select classification model")
301
+ # ETCE_predict_button = gr.Button("Start Prediction")
302
+ # with gr.Accordion("Prediction threshold", open=False):
303
+ # threshold = gr.Slider(0, 1, value=0, step=0.01, label="Prediction threshold", show_label=False,
304
+ # info="This value sets a threshold by which texts classified flood or fire are accepted, \
305
+ # higher values makes the classifier stricter (CAUTION: A value of 1 will set all predictions as none)", interactive=True)
306
+
307
+ # with gr.Row():
308
+ # with gr.Column():
309
+ # gr.Markdown("""### Flood-related""")
310
+ # flood_checkbox_output = gr.CheckboxGroup(label="Select ONLY incorrect classifications", interactive=True)
311
 
312
+ # with gr.Column():
313
+ # gr.Markdown("""### Fire-related""")
314
+ # fire_checkbox_output = gr.CheckboxGroup(label="Select ONLY incorrect classifications", interactive=True)
315
+
316
+ # with gr.Column():
317
+ # gr.Markdown("""### None""")
318
+ # none_checkbox_output = gr.CheckboxGroup(label="Select ONLY incorrect classifications", interactive=True)
319
+
320
+ # with gr.Row():
321
+ # with gr.Column(scale=5):
322
+ # gr.Markdown(r"""
323
+ # Accuracy: is the model's ability to make correct predicitons.
324
+ # It is the fraction of correct prediction out of the total predictions.
325
+
326
+ # $$
327
+ # \text{Accuracy} = \frac{\text{Correct predictions}}{\text{All predictions}} * 100
328
+ # $$
329
+
330
+ # Model Confidence: is the mean probabilty of each case
331
+ # belonging to their assigned classes. A value of 1 is best.
332
+ # """, latex_delimiters=[{ "left": "$$", "right": "$$", "display": True }])
333
+ # gr.Markdown("\n\n\n")
334
+ # model_confidence = gr.Number(label="Model Confidence")
335
+
336
+ # with gr.Column(scale=5):
337
+ # correct = gr.Number(label="Number of correct classifications")
338
+ # incorrect = gr.Number(label="Number of incorrect classifications")
339
+ # accuracy = gr.Number(label="Model Accuracy (%)")
340
+
341
+ # ETCE_accuracy_button = gr.Button("Calculate Accuracy")
342
+ # download_csv = gr.DownloadButton(visible=False)
343
+ # num_posts = gr.Number(visible=False)
344
+ # data = gr.DataFrame(visible=False)
345
+ # data_eval = gr.DataFrame(visible=False)
346
+
347
+
348
 
349
 
350
 
 
357
  }
358
 
359
  """
360
+
361
  # Test event listeners
362
  T_predict_button.click(
363
  load_and_classify_csv_dataframe,
 
380
 
381
 
382
  # Button clicks ETC Eval
383
+ # ETCE_predict_button.click(
384
+ # load_and_classify_csv,
385
+ # inputs=[file_input, text_field, event_model, threshold],
386
+ # outputs=[flood_checkbox_output, fire_checkbox_output, none_checkbox_output, model_confidence, num_posts, data, QA_addqry_button, QA_run_button])
387
 
388
+ # ETCE_accuracy_button.click(
389
+ # calculate_accuracy,
390
+ # inputs=[flood_checkbox_output, fire_checkbox_output, none_checkbox_output, num_posts, text_field, data],
391
+ # outputs=[incorrect, correct, accuracy, data_eval, download_csv])
392
 
393
 
394
  # Button clicks QA
395
  QA_addqry_button.click(add_query, inputs=[query_inp, queries_state], outputs=[selected_queries, queries_state])
396
 
397
  QA_run_button.click(qa_summarise,
398
+ inputs=[selected_queries, qa_llm_model, T_text_field, T_data_ss_state],
399
  outputs=[hsummary, qa_df])
400
 
401