Spaces:
Sleeping
Sleeping
Michelle Lam
commited on
Commit
·
32f0b26
0
Parent(s):
Transfer IndieLabel demo version
Browse files- README.txt +47 -0
- audit_utils.py +1569 -0
- indie_label_svelte/.gitignore +4 -0
- indie_label_svelte/README.md +9 -0
- indie_label_svelte/package-lock.json +0 -0
- indie_label_svelte/package.json +72 -0
- indie_label_svelte/public/favicon.png +0 -0
- indie_label_svelte/public/global.css +311 -0
- indie_label_svelte/public/index.html +29 -0
- indie_label_svelte/public/logo.png +0 -0
- indie_label_svelte/rollup.config.js +87 -0
- indie_label_svelte/src/App.svelte +112 -0
- indie_label_svelte/src/AppOld.svelte +127 -0
- indie_label_svelte/src/Auditing.svelte +464 -0
- indie_label_svelte/src/ClusterResults.svelte +562 -0
- indie_label_svelte/src/CommentTable.svelte +263 -0
- indie_label_svelte/src/Explore.svelte +149 -0
- indie_label_svelte/src/HelpTooltip.svelte +18 -0
- indie_label_svelte/src/Hunch.svelte +85 -0
- indie_label_svelte/src/HypothesisPanel.svelte +608 -0
- indie_label_svelte/src/IterativeClustering.svelte +164 -0
- indie_label_svelte/src/KeywordSearch.svelte +141 -0
- indie_label_svelte/src/Labeling.svelte +374 -0
- indie_label_svelte/src/MainPanel.svelte +79 -0
- indie_label_svelte/src/ModelPerf.svelte +82 -0
- indie_label_svelte/src/OverallResults.svelte +156 -0
- indie_label_svelte/src/Results.svelte +206 -0
- indie_label_svelte/src/Section.svelte +36 -0
- indie_label_svelte/src/SelectUserDialog.svelte +66 -0
- indie_label_svelte/src/StudyLinks.svelte +59 -0
- indie_label_svelte/src/TopicTraining.svelte +236 -0
- indie_label_svelte/src/main.ts +7 -0
- indie_label_svelte/src/stores/all_users_store.js +6 -0
- indie_label_svelte/src/stores/cur_model_store.js +3 -0
- indie_label_svelte/src/stores/cur_topic_store.js +3 -0
- indie_label_svelte/src/stores/cur_user_store.js +3 -0
- indie_label_svelte/src/stores/error_type_store.js +3 -0
- indie_label_svelte/src/stores/new_evidence_store.js +3 -0
- indie_label_svelte/src/stores/open_evidence_store.js +3 -0
- indie_label_svelte/tsconfig.json +5 -0
- requirements.txt +15 -0
- server.py +797 -0
README.txt
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# IndieLabel
|
2 |
+
|
3 |
+
## Installation / Setup
|
4 |
+
- Activate your virtual environment (tested with Python 3.8)
|
5 |
+
- Install requirements:
|
6 |
+
```
|
7 |
+
$ pip install -r requirements.txt
|
8 |
+
```
|
9 |
+
|
10 |
+
- Start the Flask server:
|
11 |
+
```
|
12 |
+
$ python server.py
|
13 |
+
```
|
14 |
+
|
15 |
+
- Concurrently build and run the Svelte app in another terminal session:
|
16 |
+
```
|
17 |
+
$ cd indie_label_svelte/
|
18 |
+
$ HOST=0.0.0.0 PORT=5000 npm run dev autobuild
|
19 |
+
```
|
20 |
+
|
21 |
+
- You can now visit `localhost:5001` to view the IndieLabel app!
|
22 |
+
|
23 |
+
## Main paths
|
24 |
+
Here's a summary of the relevant pages used for each participant in our study. For easier setup and navigation, we added URL parameters for the different labeling and auditing modes used in the study.
|
25 |
+
- Participant's page: `localhost:5001/?user=<USER_NAME>`
|
26 |
+
- Labeling task pages:
|
27 |
+
- Group-based model (group selection): `localhost:5001/?user=<USER_NAME>&tab=labeling&label_mode=3`
|
28 |
+
- End-user model (data labeling): `localhost:5001/?user=<USER_NAME>&tab=labeling&label_mode=0`
|
29 |
+
- Tutorial page: `localhost:5001/?user=DemoUser&scaffold=tutorial `
|
30 |
+
- Auditing task pages:
|
31 |
+
- Fixed audit, end-user model: `localhost:5001/?user=<USER_NAME>&scaffold=personal`
|
32 |
+
- Fixed audit, group-based model: `localhost:5001/?user=<USER_NAME>&scaffold=personal_group`
|
33 |
+
- Free-form audit, end-user model: `localhost:5001/?user=<USER_NAME>&scaffold=prompts`
|
34 |
+
|
35 |
+
## Setting up a new model
|
36 |
+
- Set up your username and navigate to the laveling page
|
37 |
+
- Using a direct URL parameter
|
38 |
+
- Go to `localhost:5001/?user=<USER_NAME>&tab=labeling&label_mode=0`, where in place of `<USER_NAME>`, you've entered your desired username
|
39 |
+
- Using the UI
|
40 |
+
- Go to the Labeling page and ensure that the "Create a new model" mode is selected.
|
41 |
+
- Select the User button on the top menu and enter your desired username.
|
42 |
+
|
43 |
+
- Label all of the examples in the table
|
44 |
+
- When you're done, click the "Get Number of Comments Labeled" button to verify the number of comments that have been labeled. If there are at least 40 comments labeled, the "Train Model" button will be enabled.
|
45 |
+
- Click on the "Train Model" button and wait for the model to train.
|
46 |
+
- Then, go to the Auditing page and use your new model.
|
47 |
+
- To view the different auditing modes that we provided for our evaluation task, please refer to the URL paths listed in the "Auditing task pages" section above.
|
audit_utils.py
ADDED
@@ -0,0 +1,1569 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
UTILS FILE
|
3 |
+
"""
|
4 |
+
import random
|
5 |
+
import json
|
6 |
+
import numpy as np
|
7 |
+
import matplotlib.pyplot as plt
|
8 |
+
import pandas as pd
|
9 |
+
import pickle
|
10 |
+
import os
|
11 |
+
import mne
|
12 |
+
|
13 |
+
from surprise import Dataset, Reader, SVD, accuracy, KNNBasic, KNNWithMeans, KNNWithZScore
|
14 |
+
from surprise.model_selection import train_test_split
|
15 |
+
from sklearn.utils import resample
|
16 |
+
from sklearn.metrics import mean_absolute_error
|
17 |
+
from sklearn.metrics import mean_squared_error
|
18 |
+
from scipy import stats
|
19 |
+
import math
|
20 |
+
import altair as alt
|
21 |
+
import matplotlib.pyplot as plt
|
22 |
+
import time
|
23 |
+
from sentence_transformers import SentenceTransformer, util
|
24 |
+
import torch
|
25 |
+
from bertopic import BERTopic
|
26 |
+
|
27 |
+
########################################
|
28 |
+
# PRE-LOADING
|
29 |
+
|
30 |
+
YOUR_COLOR = '#6CADFD'
|
31 |
+
OTHER_USERS_COLOR = '#ccc'
|
32 |
+
BINS = [0, 0.5, 1.5, 2.5, 3.5, 4]
|
33 |
+
BIN_LABELS = ['0: Not at all toxic', '1: Slightly toxic', '2: Moderately toxic', '3: Very toxic', '4: Extremely toxic']
|
34 |
+
TOXIC_THRESHOLD = 2.0
|
35 |
+
|
36 |
+
alt.renderers.enable('altair_saver', fmts=['vega-lite', 'png'])
|
37 |
+
|
38 |
+
# Data-loading
|
39 |
+
module_dir = "./"
|
40 |
+
perf_dir = f"data/perf/"
|
41 |
+
|
42 |
+
# # TEMP reset
|
43 |
+
# with open(os.path.join(module_dir, "./data/all_model_names.pkl"), "wb") as f:
|
44 |
+
# all_model_names = []
|
45 |
+
# pickle.dump(all_model_names, f)
|
46 |
+
# with open(f"./data/users_to_models.pkl", "wb") as f:
|
47 |
+
# users_to_models = {}
|
48 |
+
# pickle.dump(users_to_models, f)
|
49 |
+
|
50 |
+
|
51 |
+
with open(os.path.join(module_dir, "data/ids_to_comments.pkl"), "rb") as f:
|
52 |
+
ids_to_comments = pickle.load(f)
|
53 |
+
with open(os.path.join(module_dir, "data/comments_to_ids.pkl"), "rb") as f:
|
54 |
+
comments_to_ids = pickle.load(f)
|
55 |
+
|
56 |
+
all_model_names = sorted([name for name in os.listdir(os.path.join(perf_dir)) if os.path.isdir(os.path.join(perf_dir, name))])
|
57 |
+
comments_grouped_full_topic_cat = pd.read_pickle("data/comments_grouped_full_topic_cat2_persp.pkl")
|
58 |
+
sys_eval_df = pd.read_pickle(os.path.join(module_dir, "data/split_data/sys_eval_df.pkl"))
|
59 |
+
train_df = pd.read_pickle(os.path.join(module_dir, "data/split_data/train_df.pkl"))
|
60 |
+
train_df_ids = train_df["item_id"].unique().tolist()
|
61 |
+
model_eval_df = pd.read_pickle(os.path.join(module_dir, "data/split_data/model_eval_df.pkl"))
|
62 |
+
ratings_df_full = pd.read_pickle(os.path.join(module_dir, "data/ratings_df_full.pkl"))
|
63 |
+
|
64 |
+
worker_info_df = pd.read_pickle("./data/worker_info_df.pkl")
|
65 |
+
|
66 |
+
with open(f"./data/users_to_models.pkl", "rb") as f:
|
67 |
+
users_to_models = pickle.load(f)
|
68 |
+
|
69 |
+
with open("data/perf_1000_topics.pkl", "rb") as f:
|
70 |
+
perf_1000_topics = pickle.load(f)
|
71 |
+
with open("data/perf_1000_tox_cat.pkl", "rb") as f:
|
72 |
+
perf_1000_tox_cat = pickle.load(f)
|
73 |
+
with open("data/perf_1000_tox_severity.pkl", "rb") as f:
|
74 |
+
perf_1000_tox_severity = pickle.load(f)
|
75 |
+
with open("data/user_perf_metrics.pkl", "rb") as f:
|
76 |
+
user_perf_metrics = pickle.load(f)
|
77 |
+
|
78 |
+
topic_ids = comments_grouped_full_topic_cat.topic_id
|
79 |
+
topics = comments_grouped_full_topic_cat.topic
|
80 |
+
topic_ids_to_topics = {topic_ids[i]: topics[i] for i in range(len(topic_ids))}
|
81 |
+
topics_to_topic_ids = {topics[i]: topic_ids[i] for i in range(len(topic_ids))}
|
82 |
+
unique_topics_ids = sorted(comments_grouped_full_topic_cat.topic_id.unique())
|
83 |
+
unique_topics = [topic_ids_to_topics[topic_id] for topic_id in range(len(topic_ids_to_topics) - 1)]
|
84 |
+
|
85 |
+
def get_toxic_threshold():
|
86 |
+
return TOXIC_THRESHOLD
|
87 |
+
|
88 |
+
def get_all_model_names(user=None):
|
89 |
+
if (user is None) or (user not in users_to_models):
|
90 |
+
all_model_names = sorted([name for name in os.listdir(os.path.join(perf_dir)) if os.path.isdir(os.path.join(perf_dir, name))])
|
91 |
+
return all_model_names
|
92 |
+
else:
|
93 |
+
# Fetch the user's models
|
94 |
+
user_models = users_to_models[user]
|
95 |
+
user_models.sort()
|
96 |
+
return user_models
|
97 |
+
|
98 |
+
def get_unique_topics():
|
99 |
+
return unique_topics
|
100 |
+
|
101 |
+
def get_large_clusters(min_n):
|
102 |
+
counts_df = comments_grouped_full_topic_cat.groupby(by=["topic_id"]).size().reset_index(name='counts')
|
103 |
+
counts_df = counts_df[counts_df["counts"] >= min_n]
|
104 |
+
return [topic_ids_to_topics[t_id] for t_id in sorted(counts_df["topic_id"].tolist()[1:])]
|
105 |
+
|
106 |
+
def get_ids_to_comments():
|
107 |
+
return ids_to_comments
|
108 |
+
|
109 |
+
def get_workers_in_group(sel_gender, sel_race, sel_relig, sel_pol, sel_lgbtq):
|
110 |
+
df = worker_info_df.copy()
|
111 |
+
if sel_gender != "null":
|
112 |
+
df = df[df["gender"] == sel_gender]
|
113 |
+
if sel_relig != "null":
|
114 |
+
df = df[df["religion_important"] == sel_relig]
|
115 |
+
if sel_pol != "null":
|
116 |
+
df = df[df["political_affilation"] == sel_pol]
|
117 |
+
if sel_lgbtq != "null":
|
118 |
+
if sel_lgbtq == "LGBTQ+":
|
119 |
+
df = df[(df["lgbtq_status"] == "Homosexual") | (df["lgbtq_status"] == "Bisexual")]
|
120 |
+
else:
|
121 |
+
df = df[df["lgbtq_status"] == "Heterosexual"]
|
122 |
+
if sel_race != "":
|
123 |
+
df = df.dropna(subset=['race'])
|
124 |
+
for r in sel_race:
|
125 |
+
# Filter to rows with the indicated race
|
126 |
+
df = df[df["race"].str.contains(r)]
|
127 |
+
return df, len(df)
|
128 |
+
|
129 |
+
readable_to_internal = {
|
130 |
+
"Mean Absolute Error (MAE)": "MAE",
|
131 |
+
"Root Mean Squared Error (RMSE)": "RMSE",
|
132 |
+
"Mean Squared Error (MSE)": "MSE",
|
133 |
+
"Average rating difference": "avg_diff",
|
134 |
+
"Topic": "topic",
|
135 |
+
"Toxicity Category": "toxicity_category",
|
136 |
+
"Toxicity Severity": "toxicity_severity",
|
137 |
+
}
|
138 |
+
internal_to_readable = {v: k for k, v in readable_to_internal.items()}
|
139 |
+
|
140 |
+
# Embeddings for neighbor retrieval
|
141 |
+
model_name = "paraphrase-MiniLM-L6-v2"
|
142 |
+
model = SentenceTransformer(model_name)
|
143 |
+
with open("./data/comments.pkl", "rb") as f:
|
144 |
+
comments = pickle.load(f)
|
145 |
+
embeddings = torch.load("./data/embeddings/21_10_embeddings.pt")
|
146 |
+
|
147 |
+
# Perspective API recalibration
|
148 |
+
def recalib_v1(s):
|
149 |
+
# convert Perspective score to 0-4 toxicity score
|
150 |
+
# map 0 persp to 0 (not at all toxic); 0.5 persp to 1 (slightly toxic), 1.0 persp to 4 (extremely toxic)
|
151 |
+
if s < 0.5:
|
152 |
+
return (s * 2.)
|
153 |
+
else:
|
154 |
+
return ((s - 0.5) * 6.) + 1
|
155 |
+
|
156 |
+
def recalib_v2(s):
|
157 |
+
# convert Perspective score to 0-4 toxicity score
|
158 |
+
# just 4x the perspective score
|
159 |
+
return (s * 4.)
|
160 |
+
|
161 |
+
comments_grouped_full_topic_cat["rating_avg_orig"] = comments_grouped_full_topic_cat["rating"]
|
162 |
+
comments_grouped_full_topic_cat["rating"] = [recalib_v2(score) for score in comments_grouped_full_topic_cat["persp_score"].tolist()]
|
163 |
+
|
164 |
+
def get_comments_grouped_full_topic_cat():
|
165 |
+
return comments_grouped_full_topic_cat
|
166 |
+
|
167 |
+
########################################
|
168 |
+
# General utils
|
169 |
+
def get_metric_ind(metric):
|
170 |
+
if metric == "MAE":
|
171 |
+
ind = 0
|
172 |
+
elif metric == "MSE":
|
173 |
+
ind = 1
|
174 |
+
elif metric == "RMSE":
|
175 |
+
ind = 2
|
176 |
+
elif metric == "avg_diff":
|
177 |
+
ind = 3
|
178 |
+
return ind
|
179 |
+
|
180 |
+
def my_bootstrap(vals, n_boot, alpha):
|
181 |
+
bs_samples = []
|
182 |
+
sample_size = len(vals)
|
183 |
+
for i in range(n_boot):
|
184 |
+
samp = resample(vals, n_samples=sample_size)
|
185 |
+
bs_samples.append(np.median(samp))
|
186 |
+
|
187 |
+
p = ((1.0 - alpha) / 2.0) * 100
|
188 |
+
ci_low = np.percentile(bs_samples, p)
|
189 |
+
p = (alpha + ((1.0 - alpha) / 2.0)) * 100
|
190 |
+
ci_high = np.percentile(bs_samples, p)
|
191 |
+
return bs_samples, (ci_low, ci_high)
|
192 |
+
|
193 |
+
########################################
|
194 |
+
# GET_AUDIT utils
|
195 |
+
def other_users_perf(perf_metrics, metric, user_metric, alpha=0.95, n_boot=501):
|
196 |
+
ind = get_metric_ind(metric)
|
197 |
+
|
198 |
+
metric_vals = [metric_vals[ind] for metric_vals in perf_metrics.values()]
|
199 |
+
metric_avg = np.median(metric_vals)
|
200 |
+
|
201 |
+
# Future: use provided sample to perform bootstrap sampling
|
202 |
+
ci_1 = mne.stats.bootstrap_confidence_interval(np.array(metric_vals), ci=alpha, n_bootstraps=n_boot, stat_fun="median")
|
203 |
+
|
204 |
+
bs_samples, ci = my_bootstrap(metric_vals, n_boot, alpha)
|
205 |
+
|
206 |
+
# Get user's percentile
|
207 |
+
percentile = stats.percentileofscore(bs_samples, user_metric)
|
208 |
+
|
209 |
+
return metric_avg, ci, percentile, metric_vals
|
210 |
+
|
211 |
+
def plot_metric_histogram(metric, user_metric, other_metric_vals, n_bins=10):
|
212 |
+
hist, bin_edges = np.histogram(other_metric_vals, bins=n_bins, density=False)
|
213 |
+
data = pd.DataFrame({
|
214 |
+
"bin_min": bin_edges[:-1],
|
215 |
+
"bin_max": bin_edges[1:],
|
216 |
+
"bin_count": hist,
|
217 |
+
"user_metric": [user_metric for i in range(len(hist))]
|
218 |
+
})
|
219 |
+
base = alt.Chart(data)
|
220 |
+
|
221 |
+
bar = base.mark_bar(color=OTHER_USERS_COLOR).encode(
|
222 |
+
x=alt.X("bin_min", bin="binned", title=internal_to_readable[metric]),
|
223 |
+
x2='bin_max',
|
224 |
+
y=alt.Y("bin_count", title="Number of users"),
|
225 |
+
tooltip=[
|
226 |
+
alt.Tooltip('bin_min', title=f'{metric} bin min', format=".2f"),
|
227 |
+
alt.Tooltip('bin_max', title=f'{metric} bin max', format=".2f"),
|
228 |
+
alt.Tooltip('bin_count', title=f'Number of OTHER users', format=","),
|
229 |
+
]
|
230 |
+
)
|
231 |
+
|
232 |
+
rule = base.mark_rule(color=YOUR_COLOR).encode(
|
233 |
+
x = "mean(user_metric):Q",
|
234 |
+
size=alt.value(2),
|
235 |
+
tooltip=[
|
236 |
+
alt.Tooltip('mean(user_metric)', title=f'{metric} with YOUR labels', format=".2f"),
|
237 |
+
]
|
238 |
+
)
|
239 |
+
|
240 |
+
return (bar + rule).interactive()
|
241 |
+
|
242 |
+
def get_toxicity_severity_bins(perf_metric, user_df, other_dfs, bins=BINS, bin_labels=BIN_LABELS, ci=0.95, n_boot=501):
|
243 |
+
# Note: not using other_dfs anymore
|
244 |
+
y_user = []
|
245 |
+
y_other = []
|
246 |
+
used_bins = []
|
247 |
+
other_ci_low = []
|
248 |
+
other_ci_high = []
|
249 |
+
for severity_i in range(len(bin_labels)):
|
250 |
+
metric_others = [metrics[get_metric_ind(perf_metric)] for metrics in perf_1000_tox_severity[severity_i].values() if metrics[get_metric_ind(perf_metric)]]
|
251 |
+
ci_low, ci_high = mne.stats.bootstrap_confidence_interval(np.array(metric_others), ci=ci, n_bootstraps=n_boot, stat_fun='median')
|
252 |
+
metric_other = np.median(metric_others)
|
253 |
+
|
254 |
+
cur_user_df = user_df[user_df["prediction_bin"] == severity_i]
|
255 |
+
y_true_user = cur_user_df.pred.to_numpy() # user's label
|
256 |
+
y_pred = cur_user_df.rating_avg.to_numpy() # system's label (avg)
|
257 |
+
|
258 |
+
if len(y_true_user) > 0:
|
259 |
+
used_bins.append(bin_labels[severity_i])
|
260 |
+
metric_user = calc_metric_user(y_true_user, y_pred, perf_metric)
|
261 |
+
y_user.append(metric_user)
|
262 |
+
y_other.append(metric_other)
|
263 |
+
other_ci_low.append(ci_low)
|
264 |
+
other_ci_high.append(ci_high)
|
265 |
+
|
266 |
+
return y_user, y_other, used_bins, other_ci_low, other_ci_high
|
267 |
+
|
268 |
+
def get_topic_bins(perf_metric, user_df, other_dfs, n_topics, ci=0.95, n_boot=501):
|
269 |
+
# Note: not using other_dfs anymore
|
270 |
+
y_user = []
|
271 |
+
y_other = []
|
272 |
+
used_bins = []
|
273 |
+
other_ci_low = []
|
274 |
+
other_ci_high = []
|
275 |
+
selected_topics = unique_topics_ids[1:(n_topics + 1)]
|
276 |
+
|
277 |
+
for topic_id in selected_topics:
|
278 |
+
cur_topic = topic_ids_to_topics[topic_id]
|
279 |
+
metric_others = [metrics[get_metric_ind(perf_metric)] for metrics in perf_1000_topics[topic_id].values() if metrics[get_metric_ind(perf_metric)]]
|
280 |
+
ci_low, ci_high = mne.stats.bootstrap_confidence_interval(np.array(metric_others), ci=ci, n_bootstraps=n_boot, stat_fun='median')
|
281 |
+
metric_other = np.median(metric_others)
|
282 |
+
|
283 |
+
cur_user_df = user_df[user_df["topic"] == cur_topic]
|
284 |
+
y_true_user = cur_user_df.pred.to_numpy() # user's label
|
285 |
+
y_pred = cur_user_df.rating_avg.to_numpy() # system's label (avg)
|
286 |
+
|
287 |
+
if len(y_true_user) > 0:
|
288 |
+
used_bins.append(cur_topic)
|
289 |
+
metric_user = calc_metric_user(y_true_user, y_pred, perf_metric)
|
290 |
+
y_user.append(metric_user)
|
291 |
+
y_other.append(metric_other)
|
292 |
+
other_ci_low.append(ci_low)
|
293 |
+
other_ci_high.append(ci_high)
|
294 |
+
|
295 |
+
return y_user, y_other, used_bins, other_ci_low, other_ci_high
|
296 |
+
|
297 |
+
def calc_metric_user(y_true_user, y_pred, perf_metric):
|
298 |
+
if perf_metric == "MAE":
|
299 |
+
metric_user = mean_absolute_error(y_true_user, y_pred)
|
300 |
+
|
301 |
+
elif perf_metric == "MSE":
|
302 |
+
metric_user = mean_squared_error(y_true_user, y_pred)
|
303 |
+
|
304 |
+
elif perf_metric == "RMSE":
|
305 |
+
metric_user = mean_squared_error(y_true_user, y_pred, squared=False)
|
306 |
+
|
307 |
+
elif perf_metric == "avg_diff":
|
308 |
+
metric_user = np.mean(y_true_user - y_pred)
|
309 |
+
|
310 |
+
return metric_user
|
311 |
+
|
312 |
+
def get_toxicity_category_bins(perf_metric, user_df, other_dfs, threshold=0.5, ci=0.95, n_boot=501):
|
313 |
+
# Note: not using other_dfs anymore; threshold from pre-calculation is 0.5
|
314 |
+
cat_cols = ["is_profane_frac", "is_threat_frac", "is_identity_attack_frac", "is_insult_frac", "is_sexual_harassment_frac"]
|
315 |
+
cat_labels = ["Profanity", "Threats", "Identity Attacks", "Insults", "Sexual Harassment"]
|
316 |
+
y_user = []
|
317 |
+
y_other = []
|
318 |
+
used_bins = []
|
319 |
+
other_ci_low = []
|
320 |
+
other_ci_high = []
|
321 |
+
for i, cur_col_name in enumerate(cat_cols):
|
322 |
+
metric_others = [metrics[get_metric_ind(perf_metric)] for metrics in perf_1000_tox_cat[cur_col_name].values() if metrics[get_metric_ind(perf_metric)]]
|
323 |
+
ci_low, ci_high = mne.stats.bootstrap_confidence_interval(np.array(metric_others), ci=ci, n_bootstraps=n_boot, stat_fun='median')
|
324 |
+
metric_other = np.median(metric_others)
|
325 |
+
|
326 |
+
# Filter to rows where a comment received an average label >= the provided threshold for the category
|
327 |
+
cur_user_df = user_df[user_df[cur_col_name] >= threshold]
|
328 |
+
y_true_user = cur_user_df.pred.to_numpy() # user's label
|
329 |
+
y_pred = cur_user_df.rating_avg.to_numpy() # system's label (avg)
|
330 |
+
|
331 |
+
if len(y_true_user) > 0:
|
332 |
+
used_bins.append(cat_labels[i])
|
333 |
+
metric_user = calc_metric_user(y_true_user, y_pred, perf_metric)
|
334 |
+
y_user.append(metric_user)
|
335 |
+
y_other.append(metric_other)
|
336 |
+
other_ci_low.append(ci_low)
|
337 |
+
other_ci_high.append(ci_high)
|
338 |
+
|
339 |
+
return y_user, y_other, used_bins, other_ci_low, other_ci_high
|
340 |
+
|
341 |
+
def plot_class_cond_results(preds_df, breakdown_axis, perf_metric, other_ids, sort_bars, n_topics, worker_id="A"):
|
342 |
+
# Note: preds_df already has binned results
|
343 |
+
# Prepare dfs
|
344 |
+
user_df = preds_df[preds_df.user_id == worker_id].sort_values(by=["item_id"]).reset_index()
|
345 |
+
other_dfs = [preds_df[preds_df.user_id == other_id].sort_values(by=["item_id"]).reset_index() for other_id in other_ids]
|
346 |
+
|
347 |
+
if breakdown_axis == "toxicity_severity":
|
348 |
+
y_user, y_other, used_bins, other_ci_low, other_ci_high = get_toxicity_severity_bins(perf_metric, user_df, other_dfs)
|
349 |
+
elif breakdown_axis == "topic":
|
350 |
+
y_user, y_other, used_bins, other_ci_low, other_ci_high = get_topic_bins(perf_metric, user_df, other_dfs, n_topics)
|
351 |
+
elif breakdown_axis == "toxicity_category":
|
352 |
+
y_user, y_other, used_bins, other_ci_low, other_ci_high = get_toxicity_category_bins(perf_metric, user_df, other_dfs)
|
353 |
+
|
354 |
+
diffs = list(np.array(y_user) - np.array(y_other))
|
355 |
+
|
356 |
+
# Generate bar chart
|
357 |
+
data = pd.DataFrame({
|
358 |
+
"metric_val": y_user + y_other,
|
359 |
+
"Labeler": ["You" for _ in range(len(y_user))] + ["Other users" for _ in range(len(y_user))],
|
360 |
+
"used_bins": used_bins + used_bins,
|
361 |
+
"diffs": diffs + diffs,
|
362 |
+
"lower_cis": y_user + other_ci_low,
|
363 |
+
"upper_cis": y_user + other_ci_high,
|
364 |
+
})
|
365 |
+
|
366 |
+
color_domain = ['You', 'Other users']
|
367 |
+
color_range = [YOUR_COLOR, OTHER_USERS_COLOR]
|
368 |
+
|
369 |
+
base = alt.Chart()
|
370 |
+
chart_title=f"{internal_to_readable[breakdown_axis]} Results"
|
371 |
+
x_axis = alt.X("Labeler:O", sort=("You", "Other users"), title=None, axis=None)
|
372 |
+
y_axis = alt.Y("metric_val:Q", title=internal_to_readable[perf_metric])
|
373 |
+
if sort_bars:
|
374 |
+
col_content = alt.Column("used_bins:O", sort=alt.EncodingSortField(field="diffs", op="mean", order='descending'))
|
375 |
+
else:
|
376 |
+
col_content = alt.Column("used_bins:O")
|
377 |
+
|
378 |
+
if n_topics is not None and n_topics > 10:
|
379 |
+
# Change to horizontal bar chart
|
380 |
+
bar = base.mark_bar(lineBreak="_").encode(
|
381 |
+
y=x_axis,
|
382 |
+
x=y_axis,
|
383 |
+
color=alt.Color("Labeler:O", scale=alt.Scale(domain=color_domain, range=color_range)),
|
384 |
+
tooltip=[
|
385 |
+
alt.Tooltip('Labeler:O', title='Labeler'),
|
386 |
+
alt.Tooltip('metric_val:Q', title=perf_metric, format=".3f"),
|
387 |
+
]
|
388 |
+
)
|
389 |
+
error_bars = base.mark_errorbar().encode(
|
390 |
+
y=x_axis,
|
391 |
+
x = alt.X("lower_cis:Q", title=internal_to_readable[perf_metric]),
|
392 |
+
x2 = alt.X2("upper_cis:Q", title=None),
|
393 |
+
tooltip=[
|
394 |
+
alt.Tooltip('lower_cis:Q', title='Lower CI', format=".3f"),
|
395 |
+
alt.Tooltip('upper_cis:Q', title='Upper CI', format=".3f"),
|
396 |
+
]
|
397 |
+
)
|
398 |
+
combined = alt.layer(
|
399 |
+
bar, error_bars, data=data
|
400 |
+
).facet(
|
401 |
+
row=col_content
|
402 |
+
).properties(
|
403 |
+
title=chart_title,
|
404 |
+
).interactive()
|
405 |
+
else:
|
406 |
+
bar = base.mark_bar(lineBreak="_").encode(
|
407 |
+
x=x_axis,
|
408 |
+
y=y_axis,
|
409 |
+
color=alt.Color("Labeler:O", scale=alt.Scale(domain=color_domain, range=color_range)),
|
410 |
+
tooltip=[
|
411 |
+
alt.Tooltip('Labeler:O', title='Labeler'),
|
412 |
+
alt.Tooltip('metric_val:Q', title=perf_metric, format=".3f"),
|
413 |
+
]
|
414 |
+
)
|
415 |
+
error_bars = base.mark_errorbar().encode(
|
416 |
+
x=x_axis,
|
417 |
+
y = alt.Y("lower_cis:Q", title=internal_to_readable[perf_metric]),
|
418 |
+
y2 = alt.Y2("upper_cis:Q", title=None),
|
419 |
+
tooltip=[
|
420 |
+
alt.Tooltip('lower_cis:Q', title='Lower CI', format=".3f"),
|
421 |
+
alt.Tooltip('upper_cis:Q', title='Upper CI', format=".3f"),
|
422 |
+
]
|
423 |
+
)
|
424 |
+
combined = alt.layer(
|
425 |
+
bar, error_bars, data=data
|
426 |
+
).facet(
|
427 |
+
column=col_content
|
428 |
+
).properties(
|
429 |
+
title=chart_title,
|
430 |
+
).interactive()
|
431 |
+
|
432 |
+
return combined
|
433 |
+
|
434 |
+
def show_overall_perf(variant, error_type, cur_user, threshold=TOXIC_THRESHOLD, breakdown_axis=None, topic_vis_method="median"):
|
435 |
+
# Your perf (calculate using model and testset)
|
436 |
+
breakdown_axis = readable_to_internal[breakdown_axis]
|
437 |
+
|
438 |
+
if breakdown_axis is not None:
|
439 |
+
with open(os.path.join(module_dir, f"data/preds_dfs/{variant}.pkl"), "rb") as f:
|
440 |
+
preds_df = pickle.load(f)
|
441 |
+
|
442 |
+
# Read from file
|
443 |
+
chart_dir = "./data/charts"
|
444 |
+
chart_file = os.path.join(chart_dir, f"{cur_user}_{variant}.pkl")
|
445 |
+
if os.path.isfile(chart_file):
|
446 |
+
with open(chart_file, "r") as f:
|
447 |
+
topic_overview_plot_json = json.load(f)
|
448 |
+
else:
|
449 |
+
preds_df_mod = preds_df.merge(comments_grouped_full_topic_cat, on="item_id", how="left", suffixes=('_', '_avg'))
|
450 |
+
if topic_vis_method == "median":
|
451 |
+
preds_df_mod_grp = preds_df_mod.groupby(["topic_", "user_id"]).median()
|
452 |
+
elif topic_vis_method == "mean":
|
453 |
+
preds_df_mod_grp = preds_df_mod.groupby(["topic_", "user_id"]).mean()
|
454 |
+
topic_overview_plot_json = plot_overall_vis(preds_df=preds_df_mod_grp, n_topics=200, threshold=threshold, error_type=error_type, cur_user=cur_user, cur_model=variant)
|
455 |
+
|
456 |
+
return {
|
457 |
+
"topic_overview_plot_json": json.loads(topic_overview_plot_json),
|
458 |
+
}
|
459 |
+
|
460 |
+
########################################
|
461 |
+
# GET_CLUSTER_RESULTS utils
|
462 |
+
def get_overall_perf3(preds_df, perf_metric, other_ids, worker_id="A"):
|
463 |
+
# Prepare dataset to calculate performance
|
464 |
+
# Note: true is user and pred is system
|
465 |
+
y_true = preds_df[preds_df["user_id"] == worker_id].pred.to_numpy()
|
466 |
+
y_pred_user = preds_df[preds_df["user_id"] == worker_id].rating_avg.to_numpy()
|
467 |
+
|
468 |
+
y_true_others = y_pred_others = [preds_df[preds_df["user_id"] == other_id].pred.to_numpy() for other_id in other_ids]
|
469 |
+
y_pred_others = [preds_df[preds_df["user_id"] == other_id].rating_avg.to_numpy() for other_id in other_ids]
|
470 |
+
|
471 |
+
# Get performance for user's model and for other users
|
472 |
+
if perf_metric == "MAE":
|
473 |
+
user_perf = mean_absolute_error(y_true, y_pred_user)
|
474 |
+
other_perfs = [mean_absolute_error(y_true_others[i], y_pred_others[i]) for i in range(len(y_true_others))]
|
475 |
+
elif perf_metric == "MSE":
|
476 |
+
user_perf = mean_squared_error(y_true, y_pred_user)
|
477 |
+
other_perfs = [mean_squared_error(y_true_others[i], y_pred_others[i]) for i in range(len(y_true_others))]
|
478 |
+
elif perf_metric == "RMSE":
|
479 |
+
user_perf = mean_squared_error(y_true, y_pred_user, squared=False)
|
480 |
+
other_perfs = [mean_squared_error(y_true_others[i], y_pred_others[i], squared=False) for i in range(len(y_true_others))]
|
481 |
+
elif perf_metric == "avg_diff":
|
482 |
+
user_perf = np.mean(y_true - y_pred_user)
|
483 |
+
other_perfs = [np.mean(y_true_others[i] - y_pred_others[i]) for i in range(len(y_true_others))]
|
484 |
+
|
485 |
+
other_perf = np.mean(other_perfs) # average across all other users
|
486 |
+
return user_perf, other_perf
|
487 |
+
|
488 |
+
def style_color_difference(row):
|
489 |
+
full_opacity_diff = 3.
|
490 |
+
pred_user_col = "Your predicted rating"
|
491 |
+
pred_other_col = "Other users' predicted rating"
|
492 |
+
pred_system_col = "Status-quo system rating"
|
493 |
+
diff_user = row[pred_user_col] - row[pred_system_col]
|
494 |
+
diff_other = row[pred_other_col] - row[pred_system_col]
|
495 |
+
red = "234, 133, 125"
|
496 |
+
green = "142, 205, 162"
|
497 |
+
bkgd_user = green if diff_user < 0 else red # red if more toxic; green if less toxic
|
498 |
+
opac_user = min(abs(diff_user / full_opacity_diff), 1.)
|
499 |
+
bkgd_other = green if diff_other < 0 else red # red if more toxic; green if less toxic
|
500 |
+
opac_other = min(abs(diff_other / full_opacity_diff), 1.)
|
501 |
+
return ["", f"background-color: rgba({bkgd_user}, {opac_user});", f"background-color: rgba({bkgd_other}, {opac_other});", "", ""]
|
502 |
+
|
503 |
+
def display_examples_cluster(preds_df, other_ids, num_examples, sort_ascending, worker_id="A"):
|
504 |
+
user_df = preds_df[preds_df.user_id == worker_id].sort_values(by=["item_id"]).reset_index()
|
505 |
+
others_df = preds_df[preds_df.user_id == other_ids[0]]
|
506 |
+
for i in range(1, len(other_ids)):
|
507 |
+
others_df.append(preds_df[preds_df.user_id == other_ids[i]])
|
508 |
+
others_df.groupby(["item_id"]).mean()
|
509 |
+
others_df = others_df.sort_values(by=["item_id"]).reset_index()
|
510 |
+
|
511 |
+
df = pd.merge(user_df, others_df, on="item_id", how="left", suffixes=('_user', '_other'))
|
512 |
+
df["Comment"] = df["comment_user"]
|
513 |
+
df["Your predicted rating"] = df["pred_user"]
|
514 |
+
df["Other users' predicted rating"] = df["pred_other"]
|
515 |
+
df["Status-quo system rating"] = df["rating_avg_user"]
|
516 |
+
df["Status-quo system std dev"] = df["rating_stddev_user"]
|
517 |
+
df = df[["Comment", "Your predicted rating", "Other users' predicted rating", "Status-quo system rating", "Status-quo system std dev"]]
|
518 |
+
|
519 |
+
# Add styling
|
520 |
+
df = df.sort_values(by=['Status-quo system std dev'], ascending=sort_ascending)
|
521 |
+
n_to_sample = np.min([num_examples, len(df)])
|
522 |
+
df = df.sample(n=n_to_sample).reset_index(drop=True)
|
523 |
+
return df.style.apply(style_color_difference, axis=1).render()
|
524 |
+
|
525 |
+
def calc_odds_ratio(df, comparison_group, toxic_threshold=1.5, worker_id="A", debug=False, smoothing_factor=1):
|
526 |
+
if comparison_group == "status_quo":
|
527 |
+
other_pred_col = "rating_avg"
|
528 |
+
# Get unique comments, but fetch average labeler rating
|
529 |
+
num_toxic_other = len(df[(df.user_id == "A") & (df[other_pred_col] >= toxic_threshold)]) + smoothing_factor
|
530 |
+
num_nontoxic_other = len(df[(df.user_id == "A") & (df[other_pred_col] < toxic_threshold)]) + smoothing_factor
|
531 |
+
elif comparison_group == "other_users":
|
532 |
+
other_pred_col = "pred"
|
533 |
+
num_toxic_other = len(df[(df.user_id != "A") & (df[other_pred_col] >= toxic_threshold)]) + smoothing_factor
|
534 |
+
num_nontoxic_other = len(df[(df.user_id != "A") & (df[other_pred_col] < toxic_threshold)]) + smoothing_factor
|
535 |
+
|
536 |
+
num_toxic_user = len(df[(df.user_id == "A") & (df.pred >= toxic_threshold)]) + smoothing_factor
|
537 |
+
num_nontoxic_user = len(df[(df.user_id == "A") & (df.pred < toxic_threshold)]) + smoothing_factor
|
538 |
+
|
539 |
+
toxic_ratio = num_toxic_user / num_toxic_other
|
540 |
+
nontoxic_ratio = num_nontoxic_user / num_nontoxic_other
|
541 |
+
odds_ratio = toxic_ratio / nontoxic_ratio
|
542 |
+
|
543 |
+
if debug:
|
544 |
+
print(f"Odds ratio: {odds_ratio}")
|
545 |
+
print(f"num_toxic_user: {num_toxic_user}, num_nontoxic_user: {num_nontoxic_user}")
|
546 |
+
print(f"num_toxic_other: {num_toxic_other}, num_nontoxic_other: {num_nontoxic_other}")
|
547 |
+
|
548 |
+
contingency_table = [[num_toxic_user, num_nontoxic_user], [num_toxic_other, num_nontoxic_other]]
|
549 |
+
odds_ratio, p_val = stats.fisher_exact(contingency_table, alternative='two-sided')
|
550 |
+
if debug:
|
551 |
+
print(f"Odds ratio: {odds_ratio}, p={p_val}")
|
552 |
+
|
553 |
+
return odds_ratio
|
554 |
+
|
555 |
+
# Neighbor search
|
556 |
+
def get_match(comment_inds, K=20, threshold=None, debug=False):
|
557 |
+
match_ids = []
|
558 |
+
rows = []
|
559 |
+
for i in comment_inds:
|
560 |
+
if debug:
|
561 |
+
print(f"\nComment: {comments[i]}")
|
562 |
+
query_embedding = model.encode(comments[i], convert_to_tensor=True)
|
563 |
+
hits = util.semantic_search(query_embedding, embeddings, score_function=util.cos_sim, top_k=K)
|
564 |
+
# print(hits[0])
|
565 |
+
for hit in hits[0]:
|
566 |
+
c_id = hit['corpus_id']
|
567 |
+
score = np.round(hit['score'], 3)
|
568 |
+
if threshold is None or score > threshold:
|
569 |
+
match_ids.append(c_id)
|
570 |
+
if debug:
|
571 |
+
print(f"\t(ID={c_id}, Score={score}): {comments[c_id]}")
|
572 |
+
rows.append([c_id, score, comments[c_id]])
|
573 |
+
|
574 |
+
df = pd.DataFrame(rows, columns=["id", "score", "comment"])
|
575 |
+
return match_ids
|
576 |
+
|
577 |
+
def display_examples_auto_cluster(preds_df, cluster, other_ids, perf_metric, sort_ascending=True, worker_id="A", num_examples=10):
|
578 |
+
# Overall performance
|
579 |
+
topic_df = preds_df
|
580 |
+
topic_df = topic_df[topic_df["topic"] == cluster]
|
581 |
+
user_perf, other_perf = get_overall_perf3(topic_df, perf_metric, other_ids)
|
582 |
+
|
583 |
+
user_direction = "LOWER" if user_perf < 0 else "HIGHER"
|
584 |
+
other_direction = "LOWER" if other_perf < 0 else "HIGHER"
|
585 |
+
print(f"Your ratings are on average {np.round(abs(user_perf), 3)} {user_direction} than the existing system for this cluster")
|
586 |
+
print(f"Others' ratings (based on {len(other_ids)} users) are on average {np.round(abs(other_perf), 3)} {other_direction} than the existing system for this cluster")
|
587 |
+
|
588 |
+
# Display example comments
|
589 |
+
df = display_examples_cluster(preds_df, other_ids, num_examples, sort_ascending)
|
590 |
+
return df
|
591 |
+
|
592 |
+
|
593 |
+
# function to get results for a new provided cluster
|
594 |
+
def display_examples_manual_cluster(preds_df, cluster_comments, other_ids, perf_metric, sort_ascending=True, worker_id="A"):
|
595 |
+
# Overall performance
|
596 |
+
cluster_df = preds_df[preds_df["comment"].isin(cluster_comments)]
|
597 |
+
user_perf, other_perf = get_overall_perf3(cluster_df, perf_metric, other_ids)
|
598 |
+
|
599 |
+
user_direction = "LOWER" if user_perf < 0 else "HIGHER"
|
600 |
+
other_direction = "LOWER" if other_perf < 0 else "HIGHER"
|
601 |
+
print(f"Your ratings are on average {np.round(abs(user_perf), 3)} {user_direction} than the existing system for this cluster")
|
602 |
+
print(f"Others' ratings (based on {len(other_ids)} users) are on average {np.round(abs(other_perf), 3)} {other_direction} than the existing system for this cluster")
|
603 |
+
|
604 |
+
user_df = preds_df[preds_df.user_id == worker_id].sort_values(by=["item_id"]).reset_index()
|
605 |
+
others_df = preds_df[preds_df.user_id == other_ids[0]]
|
606 |
+
for i in range(1, len(other_ids)):
|
607 |
+
others_df.append(preds_df[preds_df.user_id == other_ids[i]])
|
608 |
+
others_df.groupby(["item_id"]).mean()
|
609 |
+
others_df = others_df.sort_values(by=["item_id"]).reset_index()
|
610 |
+
|
611 |
+
# Get cluster_comments
|
612 |
+
user_df = user_df[user_df["comment"].isin(cluster_comments)]
|
613 |
+
others_df = others_df[others_df["comment"].isin(cluster_comments)]
|
614 |
+
|
615 |
+
df = pd.merge(user_df, others_df, on="item_id", how="left", suffixes=('_user', '_other'))
|
616 |
+
df["pred_system"] = df["rating_avg_user"]
|
617 |
+
df["pred_system_stddev"] = df["rating_stddev_user"]
|
618 |
+
df = df[["item_id", "comment_user", "pred_user", "pred_other", "pred_system", "pred_system_stddev"]]
|
619 |
+
|
620 |
+
# Add styling
|
621 |
+
df = df.sort_values(by=['pred_system_stddev'], ascending=sort_ascending)
|
622 |
+
df = df.style.apply(style_color_difference, axis=1).render()
|
623 |
+
return df
|
624 |
+
|
625 |
+
########################################
|
626 |
+
# GET_LABELING utils
|
627 |
+
def create_example_sets(comments_df, n_label_per_bin, score_bins, keyword=None, topic=None):
|
628 |
+
# Restrict to the keyword, if provided
|
629 |
+
df = comments_df.copy()
|
630 |
+
if keyword != None:
|
631 |
+
df = df[df["comment"].str.contains(keyword)]
|
632 |
+
|
633 |
+
if topic != None:
|
634 |
+
df = df[df["topic"] == topic]
|
635 |
+
|
636 |
+
# Try to choose n values from each provided score bin
|
637 |
+
ex_to_label = []
|
638 |
+
bin_names = []
|
639 |
+
bin_label_counts = []
|
640 |
+
for i, score_bin in enumerate(score_bins):
|
641 |
+
min_score, max_score = score_bin
|
642 |
+
cur_df = df[(df["rating"] >= min_score) & (df["rating"] < max_score) & (df["item_id"].isin(train_df_ids))]
|
643 |
+
# sample rows for label
|
644 |
+
comment_ids = cur_df.item_id.tolist()
|
645 |
+
cur_n_label_per_bin = n_label_per_bin[i]
|
646 |
+
cap = min(len(comment_ids), (cur_n_label_per_bin))
|
647 |
+
to_label = np.random.choice(comment_ids, cap, replace=False)
|
648 |
+
ex_to_label.extend(to_label)
|
649 |
+
bin_names.append(f"[{min_score}, {max_score})")
|
650 |
+
bin_label_counts.append(len(to_label))
|
651 |
+
|
652 |
+
return ex_to_label
|
653 |
+
|
654 |
+
def get_grp_model_labels(comments_df, n_label_per_bin, score_bins, grp_ids):
|
655 |
+
df = comments_df.copy()
|
656 |
+
|
657 |
+
train_df_grp = train_df[train_df["user_id"].isin(grp_ids)]
|
658 |
+
train_df_grp_avg = train_df_grp.groupby(by=["item_id"]).median().reset_index()
|
659 |
+
train_df_grp_avg_ids = train_df_grp_avg["item_id"].tolist()
|
660 |
+
|
661 |
+
ex_to_label = [] # IDs of comments to use for group model training
|
662 |
+
for i, score_bin in enumerate(score_bins):
|
663 |
+
min_score, max_score = score_bin
|
664 |
+
# get eligible comments to sample
|
665 |
+
cur_df = df[(df["rating"] >= min_score) & (df["rating"] < max_score) & (df["item_id"].isin(train_df_grp_avg_ids))]
|
666 |
+
comment_ids = cur_df.item_id.unique().tolist()
|
667 |
+
# sample comments
|
668 |
+
cur_n_label_per_bin = n_label_per_bin[i]
|
669 |
+
cap = min(len(comment_ids), (cur_n_label_per_bin))
|
670 |
+
to_label = np.random.choice(comment_ids, cap, replace=False)
|
671 |
+
ex_to_label.extend((to_label))
|
672 |
+
|
673 |
+
train_df_grp_avg = train_df_grp_avg[train_df_grp_avg["item_id"].isin(ex_to_label)]
|
674 |
+
|
675 |
+
ratings_grp = {ids_to_comments[int(r["item_id"])]: r["rating"] for _, r in train_df_grp_avg.iterrows()}
|
676 |
+
|
677 |
+
return ratings_grp
|
678 |
+
|
679 |
+
########################################
|
680 |
+
# GET_PERSONALIZED_MODEL utils
|
681 |
+
def fetch_existing_data(model_name, last_label_i):
|
682 |
+
# Check if we have cached model performance
|
683 |
+
perf_dir = f"./data/perf/{model_name}"
|
684 |
+
label_dir = f"./data/labels/{model_name}"
|
685 |
+
if os.path.isdir(os.path.join(module_dir, perf_dir)):
|
686 |
+
# Fetch cached results
|
687 |
+
last_i = len([name for name in os.listdir(os.path.join(module_dir, perf_dir)) if os.path.isfile(os.path.join(module_dir, perf_dir, name))])
|
688 |
+
with open(os.path.join(module_dir, perf_dir, f"{last_i}.pkl"), "rb") as f:
|
689 |
+
mae, mse, rmse, avg_diff = pickle.load(f)
|
690 |
+
else:
|
691 |
+
# Fetch results from trained model
|
692 |
+
with open(os.path.join(module_dir, f"./data/trained_models/{model_name}.pkl"), "rb") as f:
|
693 |
+
cur_model = pickle.load(f)
|
694 |
+
mae, mse, rmse, avg_diff = users_perf(cur_model)
|
695 |
+
# Cache results
|
696 |
+
os.mkdir(os.path.join(module_dir, perf_dir))
|
697 |
+
with open(os.path.join(module_dir, perf_dir, "1.pkl"), "wb") as f:
|
698 |
+
pickle.dump((mae, mse, rmse, avg_diff), f)
|
699 |
+
|
700 |
+
# Fetch previous user-provided labels
|
701 |
+
ratings_prev = None
|
702 |
+
if last_label_i > 0:
|
703 |
+
with open(os.path.join(module_dir, label_dir, f"{last_i}.pkl"), "rb") as f:
|
704 |
+
ratings_prev = pickle.load(f)
|
705 |
+
return mae, mse, rmse, avg_diff, ratings_prev
|
706 |
+
|
707 |
+
def train_updated_model(model_name, last_label_i, ratings, user, top_n=20, topic=None):
|
708 |
+
# Check if there is previously-labeled data; if so, combine it with this data
|
709 |
+
perf_dir = f"./data/perf/{model_name}"
|
710 |
+
label_dir = f"./data/labels/{model_name}"
|
711 |
+
labeled_df = format_labeled_data(ratings) # Treat ratings as full batch of all ratings
|
712 |
+
ratings_prev = None
|
713 |
+
|
714 |
+
# Filter out rows with "unsure" (-1)
|
715 |
+
labeled_df = labeled_df[labeled_df["rating"] != -1]
|
716 |
+
|
717 |
+
# Filter to top N for user study
|
718 |
+
if topic is None:
|
719 |
+
# labeled_df = labeled_df.head(top_n)
|
720 |
+
labeled_df = labeled_df.tail(top_n)
|
721 |
+
else:
|
722 |
+
# For topic tuning, need to fetch old labels
|
723 |
+
if (last_label_i > 0):
|
724 |
+
# Concatenate previous set of labels with this new batch of labels
|
725 |
+
with open(os.path.join(module_dir, label_dir, f"{last_label_i}.pkl"), "rb") as f:
|
726 |
+
ratings_prev = pickle.load(f)
|
727 |
+
labeled_df_prev = format_labeled_data(ratings_prev)
|
728 |
+
labeled_df_prev = labeled_df_prev[labeled_df_prev["rating"] != -1]
|
729 |
+
ratings.update(ratings_prev) # append old ratings to ratings
|
730 |
+
labeled_df = pd.concat([labeled_df_prev, labeled_df])
|
731 |
+
|
732 |
+
print("len ratings for training:", len(labeled_df))
|
733 |
+
|
734 |
+
cur_model, perf, _, _ = train_user_model(ratings_df=labeled_df)
|
735 |
+
|
736 |
+
user_perf_metrics[model_name] = users_perf(cur_model)
|
737 |
+
|
738 |
+
mae, mse, rmse, avg_diff = user_perf_metrics[model_name]
|
739 |
+
|
740 |
+
cur_preds_df = get_preds_df(cur_model, ["A"], sys_eval_df=ratings_df_full, topic=topic, model_name=model_name) # Just get results for user
|
741 |
+
|
742 |
+
# Save this batch of labels
|
743 |
+
with open(os.path.join(module_dir, label_dir, f"{last_label_i + 1}.pkl"), "wb") as f:
|
744 |
+
pickle.dump(ratings, f)
|
745 |
+
|
746 |
+
# Save model results
|
747 |
+
with open(os.path.join(module_dir, f"./data/preds_dfs/{model_name}.pkl"), "wb") as f:
|
748 |
+
pickle.dump(cur_preds_df, f)
|
749 |
+
|
750 |
+
if model_name not in all_model_names:
|
751 |
+
all_model_names.append(model_name)
|
752 |
+
with open(os.path.join(module_dir, "./data/all_model_names.pkl"), "wb") as f:
|
753 |
+
pickle.dump(all_model_names, f)
|
754 |
+
|
755 |
+
# Handle user
|
756 |
+
if user not in users_to_models:
|
757 |
+
users_to_models[user] = [] # New user
|
758 |
+
if model_name not in users_to_models[user]:
|
759 |
+
users_to_models[user].append(model_name) # New model
|
760 |
+
with open(f"./data/users_to_models.pkl", "wb") as f:
|
761 |
+
pickle.dump(users_to_models, f)
|
762 |
+
|
763 |
+
with open(os.path.join(module_dir, "./data/user_perf_metrics.pkl"), "wb") as f:
|
764 |
+
pickle.dump(user_perf_metrics, f)
|
765 |
+
with open(os.path.join(module_dir, f"./data/trained_models/{model_name}.pkl"), "wb") as f:
|
766 |
+
pickle.dump(cur_model, f)
|
767 |
+
|
768 |
+
# Cache performance results
|
769 |
+
if not os.path.isdir(os.path.join(module_dir, perf_dir)):
|
770 |
+
os.mkdir(os.path.join(module_dir, perf_dir))
|
771 |
+
last_perf_i = len([name for name in os.listdir(os.path.join(module_dir, perf_dir)) if os.path.isfile(os.path.join(module_dir, perf_dir, name))])
|
772 |
+
with open(os.path.join(module_dir, perf_dir, f"{last_perf_i + 1}.pkl"), "wb") as f:
|
773 |
+
pickle.dump((mae, mse, rmse, avg_diff), f)
|
774 |
+
|
775 |
+
ratings_prev = ratings
|
776 |
+
return mae, mse, rmse, avg_diff, ratings_prev
|
777 |
+
|
778 |
+
def format_labeled_data(ratings, worker_id="A", debug=False):
|
779 |
+
all_rows = []
|
780 |
+
for comment, rating in ratings.items():
|
781 |
+
comment_id = comments_to_ids[comment]
|
782 |
+
row = [worker_id, comment_id, int(rating)]
|
783 |
+
all_rows.append(row)
|
784 |
+
|
785 |
+
df = pd.DataFrame(all_rows, columns=["user_id", "item_id", "rating"])
|
786 |
+
return df
|
787 |
+
|
788 |
+
def users_perf(model, sys_eval_df=sys_eval_df, avg_ratings_df=comments_grouped_full_topic_cat, worker_id="A"):
|
789 |
+
# Load the full empty dataset
|
790 |
+
sys_eval_comment_ids = sys_eval_df.item_id.unique().tolist()
|
791 |
+
empty_ratings_rows = [[worker_id, c_id, 0] for c_id in sys_eval_comment_ids]
|
792 |
+
empty_ratings_df = pd.DataFrame(empty_ratings_rows, columns=["user_id", "item_id", "rating"])
|
793 |
+
|
794 |
+
# Compute predictions for full dataset
|
795 |
+
reader = Reader(rating_scale=(0, 4))
|
796 |
+
eval_set_data = Dataset.load_from_df(empty_ratings_df, reader)
|
797 |
+
_, testset = train_test_split(eval_set_data, test_size=1.)
|
798 |
+
predictions = model.test(testset)
|
799 |
+
|
800 |
+
df = empty_ratings_df # user_id, item_id, rating
|
801 |
+
user_item_preds = get_predictions_by_user_and_item(predictions)
|
802 |
+
df["pred"] = df.apply(lambda row: user_item_preds[(row.user_id, row.item_id)] if (row.user_id, row.item_id) in user_item_preds else np.nan, axis=1)
|
803 |
+
|
804 |
+
df = df.merge(avg_ratings_df, on="item_id", how="left", suffixes=('_', '_avg'))
|
805 |
+
df.dropna(subset = ["pred"], inplace=True)
|
806 |
+
df["rating_"] = df.rating_.astype("int32")
|
807 |
+
|
808 |
+
perf_metrics = get_overall_perf(df, "A") # mae, mse, rmse, avg_diff
|
809 |
+
return perf_metrics
|
810 |
+
|
811 |
+
def get_overall_perf(preds_df, user_id):
|
812 |
+
# Prepare dataset to calculate performance
|
813 |
+
y_pred = preds_df[preds_df["user_id"] == user_id].rating_avg.to_numpy() # Assume system is just average of true labels
|
814 |
+
y_true = preds_df[preds_df["user_id"] == user_id].pred.to_numpy()
|
815 |
+
|
816 |
+
# Get performance for user's model
|
817 |
+
mae = mean_absolute_error(y_true, y_pred)
|
818 |
+
mse = mean_squared_error(y_true, y_pred)
|
819 |
+
rmse = mean_squared_error(y_true, y_pred, squared=False)
|
820 |
+
avg_diff = np.mean(y_true - y_pred)
|
821 |
+
|
822 |
+
return mae, mse, rmse, avg_diff
|
823 |
+
|
824 |
+
def get_predictions_by_user_and_item(predictions):
|
825 |
+
user_item_preds = {}
|
826 |
+
for uid, iid, true_r, est, _ in predictions:
|
827 |
+
user_item_preds[(uid, iid)] = est
|
828 |
+
return user_item_preds
|
829 |
+
|
830 |
+
def get_preds_df(model, user_ids, orig_df=ratings_df_full, avg_ratings_df=comments_grouped_full_topic_cat, sys_eval_df=sys_eval_df, bins=BINS, topic=None, model_name=None):
|
831 |
+
# Prep dataframe for all predictions we'd like to request
|
832 |
+
start = time.time()
|
833 |
+
sys_eval_comment_ids = sys_eval_df.item_id.unique().tolist()
|
834 |
+
|
835 |
+
empty_ratings_rows = []
|
836 |
+
for user_id in user_ids:
|
837 |
+
empty_ratings_rows.extend([[user_id, c_id, 0] for c_id in sys_eval_comment_ids])
|
838 |
+
empty_ratings_df = pd.DataFrame(empty_ratings_rows, columns=["user_id", "item_id", "rating"])
|
839 |
+
print("setup", time.time() - start)
|
840 |
+
|
841 |
+
# Evaluate model to get predictions
|
842 |
+
start = time.time()
|
843 |
+
reader = Reader(rating_scale=(0, 4))
|
844 |
+
eval_set_data = Dataset.load_from_df(empty_ratings_df, reader)
|
845 |
+
_, testset = train_test_split(eval_set_data, test_size=1.)
|
846 |
+
predictions = model.test(testset)
|
847 |
+
print("train_test_split", time.time() - start)
|
848 |
+
|
849 |
+
# Update dataframe with predictions
|
850 |
+
start = time.time()
|
851 |
+
df = empty_ratings_df.copy() # user_id, item_id, rating
|
852 |
+
user_item_preds = get_predictions_by_user_and_item(predictions)
|
853 |
+
df["pred"] = df.apply(lambda row: user_item_preds[(row.user_id, row.item_id)] if (row.user_id, row.item_id) in user_item_preds else np.nan, axis=1)
|
854 |
+
df = df.merge(avg_ratings_df, on="item_id", how="left", suffixes=('_', '_avg'))
|
855 |
+
df.dropna(subset = ["pred"], inplace=True)
|
856 |
+
df["rating_"] = df.rating_.astype("int32")
|
857 |
+
|
858 |
+
# Get binned predictions (based on user prediction)
|
859 |
+
df["prediction_bin"], out_bins = pd.cut(df["pred"], bins, labels=False, retbins=True)
|
860 |
+
df = df.sort_values(by=["item_id"])
|
861 |
+
|
862 |
+
return df
|
863 |
+
|
864 |
+
def train_user_model(ratings_df, train_df=train_df, model_eval_df=model_eval_df, train_frac=0.75, model_type="SVD", sim_type=None, user_based=True):
|
865 |
+
# Sample from shuffled labeled dataframe and add batch to train set; specified set size to model_eval set
|
866 |
+
labeled = ratings_df.sample(frac=1)
|
867 |
+
batch_size = math.floor(len(labeled) * train_frac)
|
868 |
+
labeled_train = labeled[:batch_size]
|
869 |
+
labeled_model_eval = labeled[batch_size:]
|
870 |
+
|
871 |
+
train_df_ext = train_df.append(labeled_train)
|
872 |
+
model_eval_df_ext = model_eval_df.append(labeled_model_eval)
|
873 |
+
|
874 |
+
# Train model and show model eval set results
|
875 |
+
model, perf = train_model(train_df_ext, model_eval_df_ext, model_type=model_type, sim_type=sim_type, user_based=user_based)
|
876 |
+
|
877 |
+
return model, perf, labeled_train, labeled_model_eval
|
878 |
+
|
879 |
+
def train_model(train_df, model_eval_df, model_type="SVD", sim_type=None, user_based=True):
|
880 |
+
# Train model
|
881 |
+
reader = Reader(rating_scale=(0, 4))
|
882 |
+
train_data = Dataset.load_from_df(train_df, reader)
|
883 |
+
model_eval_data = Dataset.load_from_df(model_eval_df, reader)
|
884 |
+
|
885 |
+
train_set = train_data.build_full_trainset()
|
886 |
+
_, model_eval_set = train_test_split(model_eval_data, test_size=1.)
|
887 |
+
|
888 |
+
sim_options = {
|
889 |
+
"name": sim_type,
|
890 |
+
"user_based": user_based, # compute similarity between users or items
|
891 |
+
}
|
892 |
+
if model_type == "SVD":
|
893 |
+
algo = SVD() # SVD doesn't have similarity metric
|
894 |
+
elif model_type == "KNNBasic":
|
895 |
+
algo = KNNBasic(sim_options=sim_options)
|
896 |
+
elif model_type == "KNNWithMeans":
|
897 |
+
algo = KNNWithMeans(sim_options=sim_options)
|
898 |
+
elif model_type == "KNNWithZScore":
|
899 |
+
algo = KNNWithZScore(sim_options=sim_options)
|
900 |
+
algo.fit(train_set)
|
901 |
+
|
902 |
+
predictions = algo.test(model_eval_set)
|
903 |
+
rmse = accuracy.rmse(predictions)
|
904 |
+
fcp = accuracy.fcp(predictions)
|
905 |
+
mae = accuracy.mae(predictions)
|
906 |
+
mse = accuracy.mse(predictions)
|
907 |
+
|
908 |
+
print(f"MAE: {mae}, MSE: {mse}, RMSE: {rmse}, FCP: {fcp}")
|
909 |
+
perf = [mae, mse, rmse, fcp]
|
910 |
+
|
911 |
+
return algo, perf
|
912 |
+
|
913 |
+
def plot_train_perf_results2(model_name):
|
914 |
+
# Open labels
|
915 |
+
label_dir = f"./data/labels/{model_name}"
|
916 |
+
n_label_files = len([name for name in os.listdir(os.path.join(module_dir, label_dir)) if os.path.isfile(os.path.join(module_dir, label_dir, name))])
|
917 |
+
|
918 |
+
all_rows = []
|
919 |
+
with open(os.path.join(module_dir, label_dir, f"{n_label_files}.pkl"), "rb") as f:
|
920 |
+
ratings = pickle.load(f)
|
921 |
+
|
922 |
+
labeled_df = format_labeled_data(ratings)
|
923 |
+
labeled_df = labeled_df[labeled_df["rating"] != -1]
|
924 |
+
|
925 |
+
# Iterate through batches of 5 labels
|
926 |
+
n_batches = int(np.ceil(len(labeled_df) / 5.))
|
927 |
+
for i in range(n_batches):
|
928 |
+
start = time.time()
|
929 |
+
n_to_sample = np.min([5 * (i + 1), len(labeled_df)])
|
930 |
+
cur_model, _, _, _ = train_user_model(ratings_df=labeled_df.head(n_to_sample))
|
931 |
+
mae, mse, rmse, avg_diff = users_perf(cur_model)
|
932 |
+
all_rows.append([n_to_sample, mae, "MAE"])
|
933 |
+
print(f"iter {i}: {time.time() - start}")
|
934 |
+
|
935 |
+
print("all_rows", all_rows)
|
936 |
+
|
937 |
+
df = pd.DataFrame(all_rows, columns=["n_to_sample", "perf", "metric"])
|
938 |
+
chart = alt.Chart(df).mark_line(point=True).encode(
|
939 |
+
x=alt.X("n_to_sample:Q", title="Number of Comments Labeled"),
|
940 |
+
y="perf",
|
941 |
+
color="metric",
|
942 |
+
tooltip=[
|
943 |
+
alt.Tooltip('n_to_sample:Q', title="Number of Comments Labeled"),
|
944 |
+
alt.Tooltip('metric:N', title="Metric"),
|
945 |
+
alt.Tooltip('perf:Q', title="Metric Value", format=".3f"),
|
946 |
+
],
|
947 |
+
).properties(
|
948 |
+
title=f"Performance over number of examples: {model_name}",
|
949 |
+
width=500,
|
950 |
+
)
|
951 |
+
return chart
|
952 |
+
|
953 |
+
def plot_train_perf_results(model_name, mae):
|
954 |
+
perf_dir = f"./data/perf/{model_name}"
|
955 |
+
n_perf_files = len([name for name in os.listdir(os.path.join(module_dir, perf_dir)) if os.path.isfile(os.path.join(module_dir, perf_dir, name))])
|
956 |
+
|
957 |
+
all_rows = []
|
958 |
+
for i in range(1, n_perf_files + 1):
|
959 |
+
with open(os.path.join(module_dir, perf_dir, f"{i}.pkl"), "rb") as f:
|
960 |
+
mae, mse, rmse, avg_diff = pickle.load(f)
|
961 |
+
all_rows.append([i, mae, "Your MAE"])
|
962 |
+
|
963 |
+
df = pd.DataFrame(all_rows, columns=["version", "perf", "metric"])
|
964 |
+
chart = alt.Chart(df).mark_line(point=True).encode(
|
965 |
+
x="version:O",
|
966 |
+
y="perf",
|
967 |
+
color=alt.Color("metric", title="Performance metric"),
|
968 |
+
tooltip=[
|
969 |
+
alt.Tooltip('version:O', title='Version'),
|
970 |
+
alt.Tooltip('metric:N', title="Metric"),
|
971 |
+
alt.Tooltip('perf:Q', title="Metric Value", format=".3f"),
|
972 |
+
],
|
973 |
+
).properties(
|
974 |
+
title=f"Performance over model versions: {model_name}",
|
975 |
+
width=500,
|
976 |
+
)
|
977 |
+
|
978 |
+
PCT_50 = 0.591
|
979 |
+
PCT_75 = 0.662
|
980 |
+
PCT_90 = 0.869
|
981 |
+
|
982 |
+
plot_dim_width = 500
|
983 |
+
domain_min = 0.0
|
984 |
+
domain_max = 1.0
|
985 |
+
bkgd = alt.Chart(pd.DataFrame({
|
986 |
+
"start": [PCT_90, PCT_75, domain_min],
|
987 |
+
"stop": [domain_max, PCT_90, PCT_75],
|
988 |
+
"bkgd": ["Needs improvement (< top 90%)", "Okay (top 90%)", "Good (top 75%)"],
|
989 |
+
})).mark_rect(opacity=0.2).encode(
|
990 |
+
y=alt.Y("start:Q", scale=alt.Scale(domain=[0, domain_max])),
|
991 |
+
y2=alt.Y2("stop:Q"),
|
992 |
+
x=alt.value(0),
|
993 |
+
x2=alt.value(plot_dim_width),
|
994 |
+
color=alt.Color("bkgd:O", scale=alt.Scale(
|
995 |
+
domain=["Needs improvement (< top 90%)", "Okay (top 90%)", "Good (top 75%)"],
|
996 |
+
range=["red", "yellow", "green"]),
|
997 |
+
title="How good is your MAE?"
|
998 |
+
)
|
999 |
+
)
|
1000 |
+
|
1001 |
+
plot = (bkgd + chart).properties(width=plot_dim_width).resolve_scale(color='independent')
|
1002 |
+
mae_status = None
|
1003 |
+
if mae < PCT_75:
|
1004 |
+
mae_status = "Your MAE is in the <b>Good</b> range, which means that it's in the top 75% of scores compared to other users. Your model looks good to go."
|
1005 |
+
elif mae < PCT_90:
|
1006 |
+
mae_status = "Your MAE is in the <b>Okay</b> range, which means that it's in the top 90% of scores compared to other users. Your model can be used, but you can provide additional labels to improve it."
|
1007 |
+
else:
|
1008 |
+
mae_status = "Your MAE is in the <b>Needs improvement</b> range, which means that it's in below the top 95% of scores compared to other users. Your model may need additional labels to improve."
|
1009 |
+
return plot, mae_status
|
1010 |
+
|
1011 |
+
########################################
|
1012 |
+
# New visualizations
|
1013 |
+
# Constants
|
1014 |
+
VIS_BINS = np.round(np.arange(0, 4.01, 0.05), 3)
|
1015 |
+
VIS_BINS_LABELS = [np.round(np.mean([x, y]), 3) for x, y in zip(VIS_BINS[:-1], VIS_BINS[1:])]
|
1016 |
+
|
1017 |
+
def get_key(sys, user, threshold):
|
1018 |
+
if sys <= threshold and user <= threshold:
|
1019 |
+
return "System agrees: Non-toxic"
|
1020 |
+
elif sys > threshold and user > threshold:
|
1021 |
+
return "System agrees: Toxic"
|
1022 |
+
else:
|
1023 |
+
if abs(sys - threshold) > 1.5:
|
1024 |
+
return "System differs: Error > 1.5"
|
1025 |
+
elif abs(sys - threshold) > 1.0:
|
1026 |
+
return "System differs: Error > 1.0"
|
1027 |
+
elif abs(sys - threshold) > 0.5:
|
1028 |
+
return "System differs: Error > 0.5"
|
1029 |
+
else:
|
1030 |
+
return "System differs: Error <=0.5"
|
1031 |
+
|
1032 |
+
def get_key_no_model(sys, threshold):
|
1033 |
+
if sys <= threshold:
|
1034 |
+
return "System says: Non-toxic"
|
1035 |
+
else:
|
1036 |
+
return "System says: Toxic"
|
1037 |
+
|
1038 |
+
def get_user_color(user, threshold):
|
1039 |
+
if user <= threshold:
|
1040 |
+
return "#FFF" # white
|
1041 |
+
else:
|
1042 |
+
return "#808080" # grey
|
1043 |
+
|
1044 |
+
def get_system_color(sys, user, threshold):
|
1045 |
+
if sys <= threshold and user <= threshold:
|
1046 |
+
return "#FFF" # white
|
1047 |
+
elif sys > threshold and user > threshold:
|
1048 |
+
return "#808080" # grey
|
1049 |
+
else:
|
1050 |
+
if abs(sys - threshold) > 1.5:
|
1051 |
+
return "#d62728" # red
|
1052 |
+
elif abs(sys - threshold) > 1.0:
|
1053 |
+
return "#ff7a5c" # med red
|
1054 |
+
elif abs(sys - threshold) > 0.5:
|
1055 |
+
return "#ffa894" # light red
|
1056 |
+
else:
|
1057 |
+
return "#ffd1c7" # very light red
|
1058 |
+
|
1059 |
+
def get_error_type(sys, user, threshold):
|
1060 |
+
if sys <= threshold and user <= threshold:
|
1061 |
+
return "No error (agree non-toxic)"
|
1062 |
+
elif sys > threshold and user > threshold:
|
1063 |
+
return "No error (agree toxic)"
|
1064 |
+
elif sys <= threshold and user > threshold:
|
1065 |
+
return "System may be under-sensitive"
|
1066 |
+
elif sys > threshold and user <= threshold:
|
1067 |
+
return "System may be over-sensitive"
|
1068 |
+
|
1069 |
+
def get_error_type_radio(sys, user, threshold):
|
1070 |
+
if sys <= threshold and user <= threshold:
|
1071 |
+
return "Show errors and non-errors"
|
1072 |
+
elif sys > threshold and user > threshold:
|
1073 |
+
return "Show errors and non-errors"
|
1074 |
+
elif sys <= threshold and user > threshold:
|
1075 |
+
return "System is under-sensitive"
|
1076 |
+
elif sys > threshold and user <= threshold:
|
1077 |
+
return "System is over-sensitive"
|
1078 |
+
|
1079 |
+
def get_error_magnitude(sys, user, threshold):
|
1080 |
+
if sys <= threshold and user <= threshold:
|
1081 |
+
return 0 # no classification error
|
1082 |
+
elif sys > threshold and user > threshold:
|
1083 |
+
return 0 # no classification error
|
1084 |
+
elif sys <= threshold and user > threshold:
|
1085 |
+
return abs(sys - user)
|
1086 |
+
elif sys > threshold and user <= threshold:
|
1087 |
+
return abs(sys - user)
|
1088 |
+
|
1089 |
+
def get_error_size(sys, user, threshold):
|
1090 |
+
if sys <= threshold and user <= threshold:
|
1091 |
+
return 0 # no classification error
|
1092 |
+
elif sys > threshold and user > threshold:
|
1093 |
+
return 0 # no classification error
|
1094 |
+
elif sys <= threshold and user > threshold:
|
1095 |
+
return sys - user
|
1096 |
+
elif sys > threshold and user <= threshold:
|
1097 |
+
return sys - user
|
1098 |
+
|
1099 |
+
def get_decision(rating, threshold):
|
1100 |
+
if rating <= threshold:
|
1101 |
+
return "Non-toxic"
|
1102 |
+
else:
|
1103 |
+
return "Toxic"
|
1104 |
+
|
1105 |
+
def get_category(row, threshold=0.3):
|
1106 |
+
k_to_category = {
|
1107 |
+
"is_profane_frac_": "Profanity",
|
1108 |
+
"is_threat_frac_": "Threat",
|
1109 |
+
"is_identity_attack_frac_": "Identity Attack",
|
1110 |
+
"is_insult_frac_": "Insult",
|
1111 |
+
"is_sexual_harassment_frac_": "Sexual Harassment",
|
1112 |
+
}
|
1113 |
+
categories = []
|
1114 |
+
for k in ["is_profane_frac_", "is_threat_frac_", "is_identity_attack_frac_", "is_insult_frac_", "is_sexual_harassment_frac_"]:
|
1115 |
+
if row[k] > threshold:
|
1116 |
+
categories.append(k_to_category[k])
|
1117 |
+
|
1118 |
+
if len(categories) > 0:
|
1119 |
+
return ", ".join(categories)
|
1120 |
+
else:
|
1121 |
+
return ""
|
1122 |
+
|
1123 |
+
def get_comment_url(row):
|
1124 |
+
return f"#{row['item_id']}/#comment"
|
1125 |
+
|
1126 |
+
def get_topic_url(row):
|
1127 |
+
return f"#{row['topic_']}/#topic"
|
1128 |
+
|
1129 |
+
def plot_overall_vis(preds_df, error_type, cur_user, cur_model, n_topics=None, bins=VIS_BINS, threshold=TOXIC_THRESHOLD, bin_step=0.05):
|
1130 |
+
df = preds_df.copy().reset_index()
|
1131 |
+
|
1132 |
+
if n_topics is not None:
|
1133 |
+
df = df[df["topic_id_"] < n_topics]
|
1134 |
+
|
1135 |
+
df["vis_pred_bin"], out_bins = pd.cut(df["pred"], bins, labels=VIS_BINS_LABELS, retbins=True)
|
1136 |
+
df = df[df["user_id"] == "A"].sort_values(by=["item_id"]).reset_index()
|
1137 |
+
df["system_label"] = [("toxic" if r > threshold else "non-toxic") for r in df["rating"].tolist()]
|
1138 |
+
df["threshold"] = [threshold for r in df["rating"].tolist()]
|
1139 |
+
df["key"] = [get_key(sys, user, threshold) for sys, user in zip(df["rating"].tolist(), df["pred"].tolist())]
|
1140 |
+
df["url"] = df.apply(lambda row: get_topic_url(row), axis=1)
|
1141 |
+
|
1142 |
+
# Plot sizing
|
1143 |
+
domain_min = 0
|
1144 |
+
domain_max = 4
|
1145 |
+
|
1146 |
+
plot_dim_height = 500
|
1147 |
+
plot_dim_width = 750
|
1148 |
+
max_items = np.max(df["vis_pred_bin"].value_counts().tolist())
|
1149 |
+
mark_size = np.round(plot_dim_height / max_items) * 8
|
1150 |
+
if mark_size > 75:
|
1151 |
+
mark_size = 75
|
1152 |
+
plot_dim_height = 13 * max_items
|
1153 |
+
|
1154 |
+
# Main chart
|
1155 |
+
chart = alt.Chart(df).mark_square(opacity=0.8, size=mark_size, stroke="grey", strokeWidth=0.5).transform_window(
|
1156 |
+
groupby=['vis_pred_bin'],
|
1157 |
+
sort=[{'field': 'rating'}],
|
1158 |
+
id='row_number()',
|
1159 |
+
ignorePeers=True,
|
1160 |
+
).encode(
|
1161 |
+
x=alt.X('vis_pred_bin:Q', title="Our prediction of your rating", scale=alt.Scale(domain=(domain_min, domain_max))),
|
1162 |
+
y=alt.Y('id:O', title="Comments (ordered by System toxicity rating)", axis=alt.Axis(values=list(range(0, max_items, 5))), sort='descending'),
|
1163 |
+
color = alt.Color("key:O", scale=alt.Scale(
|
1164 |
+
domain=["System agrees: Non-toxic", "System agrees: Toxic", "System differs: Error > 1.5", "System differs: Error > 1.0", "System differs: Error > 0.5", "System differs: Error <=0.5"],
|
1165 |
+
range=["white", "#cbcbcb", "red", "#ff7a5c", "#ffa894", "#ffd1c7"]),
|
1166 |
+
title="System rating (box color)"
|
1167 |
+
),
|
1168 |
+
href="url:N",
|
1169 |
+
tooltip = [
|
1170 |
+
alt.Tooltip("topic_:N", title="Topic"),
|
1171 |
+
alt.Tooltip("system_label:N", title="System label"),
|
1172 |
+
alt.Tooltip("rating:Q", title="System rating", format=".2f"),
|
1173 |
+
alt.Tooltip("pred:Q", title="Your rating", format=".2f")
|
1174 |
+
]
|
1175 |
+
)
|
1176 |
+
|
1177 |
+
# Filter to specified error type
|
1178 |
+
if error_type == "System is under-sensitive":
|
1179 |
+
# FN: system rates non-toxic, but user rates toxic
|
1180 |
+
chart = chart.transform_filter(
|
1181 |
+
alt.FieldGTPredicate(field="pred", gt=threshold)
|
1182 |
+
)
|
1183 |
+
elif error_type == "System is over-sensitive":
|
1184 |
+
# FP: system rates toxic, but user rates non-toxic
|
1185 |
+
chart = chart.transform_filter(
|
1186 |
+
alt.FieldLTEPredicate(field="pred", lte=threshold)
|
1187 |
+
)
|
1188 |
+
|
1189 |
+
# Threshold line
|
1190 |
+
rule = alt.Chart(pd.DataFrame({
|
1191 |
+
"threshold": [threshold],
|
1192 |
+
"System threshold": [f"Threshold = {threshold}"]
|
1193 |
+
})).mark_rule().encode(
|
1194 |
+
x=alt.X("mean(threshold):Q", scale=alt.Scale(domain=(domain_min, domain_max)), title=""),
|
1195 |
+
color=alt.Color("System threshold:N", scale=alt.Scale(domain=[f"Threshold = {threshold}"], range=["grey"])),
|
1196 |
+
size=alt.value(2),
|
1197 |
+
)
|
1198 |
+
|
1199 |
+
# Plot region annotations
|
1200 |
+
nontoxic_x = (domain_min + threshold) / 2.
|
1201 |
+
toxic_x = (domain_max + threshold) / 2.
|
1202 |
+
annotation = alt.Chart(pd.DataFrame({
|
1203 |
+
"annotation_text": ["Non-toxic", "Toxic"],
|
1204 |
+
"x": [nontoxic_x, toxic_x],
|
1205 |
+
"y": [max_items, max_items],
|
1206 |
+
})).mark_text(
|
1207 |
+
align="center",
|
1208 |
+
baseline="middle",
|
1209 |
+
fontSize=16,
|
1210 |
+
dy=10,
|
1211 |
+
color="grey"
|
1212 |
+
).encode(
|
1213 |
+
x=alt.X("x", title=""),
|
1214 |
+
y=alt.Y("y", title="", axis=None),
|
1215 |
+
text="annotation_text"
|
1216 |
+
)
|
1217 |
+
|
1218 |
+
# Plot region background colors
|
1219 |
+
bkgd = alt.Chart(pd.DataFrame({
|
1220 |
+
"start": [domain_min, threshold],
|
1221 |
+
"stop": [threshold, domain_max],
|
1222 |
+
"bkgd": ["Non-toxic (L side)", "Toxic (R side)"],
|
1223 |
+
})).mark_rect(opacity=1.0, stroke="grey", strokeWidth=0.25).encode(
|
1224 |
+
x=alt.X("start:Q", scale=alt.Scale(domain=[domain_min, domain_max])),
|
1225 |
+
x2=alt.X2("stop:Q"),
|
1226 |
+
y=alt.value(0),
|
1227 |
+
y2=alt.value(plot_dim_height),
|
1228 |
+
color=alt.Color("bkgd:O", scale=alt.Scale(
|
1229 |
+
domain=["Non-toxic (L side)", "Toxic (R side)"],
|
1230 |
+
range=["white", "#cbcbcb"]),
|
1231 |
+
title="Your rating (background color)"
|
1232 |
+
)
|
1233 |
+
)
|
1234 |
+
|
1235 |
+
plot = (bkgd + annotation + chart + rule).properties(height=(plot_dim_height), width=plot_dim_width).resolve_scale(color='independent').to_json()
|
1236 |
+
|
1237 |
+
# Save to file
|
1238 |
+
chart_dir = "./data/charts"
|
1239 |
+
chart_file = os.path.join(chart_dir, f"{cur_user}_{cur_model}.pkl")
|
1240 |
+
with open(chart_file, "w") as f:
|
1241 |
+
json.dump(plot, f)
|
1242 |
+
|
1243 |
+
return plot
|
1244 |
+
|
1245 |
+
def get_cluster_overview_plot(preds_df, error_type, threshold=TOXIC_THRESHOLD, use_model=True):
|
1246 |
+
preds_df_mod = preds_df.merge(comments_grouped_full_topic_cat, on="item_id", how="left", suffixes=('_', '_avg'))
|
1247 |
+
|
1248 |
+
if use_model:
|
1249 |
+
return plot_overall_vis_cluster(preds_df_mod, error_type=error_type, n_comments=500, threshold=threshold)
|
1250 |
+
else:
|
1251 |
+
return plot_overall_vis_cluster2(preds_df_mod, error_type=error_type, n_comments=500, threshold=threshold)
|
1252 |
+
|
1253 |
+
def plot_overall_vis_cluster2(preds_df, error_type, n_comments=None, bins=VIS_BINS, threshold=TOXIC_THRESHOLD, bin_step=0.05):
|
1254 |
+
df = preds_df.copy().reset_index()
|
1255 |
+
|
1256 |
+
df["vis_pred_bin"], out_bins = pd.cut(df["rating"], bins, labels=VIS_BINS_LABELS, retbins=True)
|
1257 |
+
df = df[df["user_id"] == "A"].sort_values(by=["rating"]).reset_index()
|
1258 |
+
df["system_label"] = [("toxic" if r > threshold else "non-toxic") for r in df["rating"].tolist()]
|
1259 |
+
df["key"] = [get_key_no_model(sys, threshold) for sys in df["rating"].tolist()]
|
1260 |
+
print("len(df)", len(df)) # always 0 for some reason (from keyword search)
|
1261 |
+
df["category"] = df.apply(lambda row: get_category(row), axis=1)
|
1262 |
+
df["url"] = df.apply(lambda row: get_comment_url(row), axis=1)
|
1263 |
+
|
1264 |
+
if n_comments is not None:
|
1265 |
+
n_to_sample = np.min([n_comments, len(df)])
|
1266 |
+
df = df.sample(n=n_to_sample)
|
1267 |
+
|
1268 |
+
# Plot sizing
|
1269 |
+
domain_min = 0
|
1270 |
+
domain_max = 4
|
1271 |
+
plot_dim_height = 500
|
1272 |
+
plot_dim_width = 750
|
1273 |
+
max_items = np.max(df["vis_pred_bin"].value_counts().tolist())
|
1274 |
+
mark_size = np.round(plot_dim_height / max_items) * 8
|
1275 |
+
if mark_size > 75:
|
1276 |
+
mark_size = 75
|
1277 |
+
plot_dim_height = 13 * max_items
|
1278 |
+
|
1279 |
+
# Main chart
|
1280 |
+
chart = alt.Chart(df).mark_square(opacity=0.8, size=mark_size, stroke="grey", strokeWidth=0.25).transform_window(
|
1281 |
+
groupby=['vis_pred_bin'],
|
1282 |
+
sort=[{'field': 'rating'}],
|
1283 |
+
id='row_number()',
|
1284 |
+
ignorePeers=True
|
1285 |
+
).encode(
|
1286 |
+
x=alt.X('vis_pred_bin:Q', title="System toxicity rating", scale=alt.Scale(domain=(domain_min, domain_max))),
|
1287 |
+
y=alt.Y('id:O', title="Comments (ordered by System toxicity rating)", axis=alt.Axis(values=list(range(0, max_items, 5))), sort='descending'),
|
1288 |
+
color = alt.Color("key:O", scale=alt.Scale(
|
1289 |
+
domain=["System says: Non-toxic", "System says: Toxic"],
|
1290 |
+
range=["white", "#cbcbcb"]),
|
1291 |
+
title="System rating",
|
1292 |
+
legend=None,
|
1293 |
+
),
|
1294 |
+
href="url:N",
|
1295 |
+
tooltip = [
|
1296 |
+
alt.Tooltip("comment_:N", title="comment"),
|
1297 |
+
alt.Tooltip("rating:Q", title="System rating", format=".2f"),
|
1298 |
+
]
|
1299 |
+
)
|
1300 |
+
|
1301 |
+
# Threshold line
|
1302 |
+
rule = alt.Chart(pd.DataFrame({
|
1303 |
+
"threshold": [threshold],
|
1304 |
+
})).mark_rule(color='grey').encode(
|
1305 |
+
x=alt.X("mean(threshold):Q", scale=alt.Scale(domain=[domain_min, domain_max]), title=""),
|
1306 |
+
size=alt.value(2),
|
1307 |
+
)
|
1308 |
+
|
1309 |
+
# Plot region annotations
|
1310 |
+
nontoxic_x = (domain_min + threshold) / 2.
|
1311 |
+
toxic_x = (domain_max + threshold) / 2.
|
1312 |
+
annotation = alt.Chart(pd.DataFrame({
|
1313 |
+
"annotation_text": ["Non-toxic", "Toxic"],
|
1314 |
+
"x": [nontoxic_x, toxic_x],
|
1315 |
+
"y": [max_items, max_items],
|
1316 |
+
})).mark_text(
|
1317 |
+
align="center",
|
1318 |
+
baseline="middle",
|
1319 |
+
fontSize=16,
|
1320 |
+
dy=10,
|
1321 |
+
color="grey"
|
1322 |
+
).encode(
|
1323 |
+
x=alt.X("x", title=""),
|
1324 |
+
y=alt.Y("y", title="", axis=None),
|
1325 |
+
text="annotation_text"
|
1326 |
+
)
|
1327 |
+
|
1328 |
+
# Plot region background colors
|
1329 |
+
bkgd = alt.Chart(pd.DataFrame({
|
1330 |
+
"start": [domain_min, threshold],
|
1331 |
+
"stop": [threshold, domain_max],
|
1332 |
+
"bkgd": ["Non-toxic", "Toxic"],
|
1333 |
+
})).mark_rect(opacity=1.0, stroke="grey", strokeWidth=0.25).encode(
|
1334 |
+
x=alt.X("start:Q", scale=alt.Scale(domain=[domain_min, domain_max])),
|
1335 |
+
x2=alt.X2("stop:Q"),
|
1336 |
+
y=alt.value(0),
|
1337 |
+
y2=alt.value(plot_dim_height),
|
1338 |
+
color=alt.Color("bkgd:O", scale=alt.Scale(
|
1339 |
+
domain=["Non-toxic", "Toxic"],
|
1340 |
+
range=["white", "#cbcbcb"]),
|
1341 |
+
title="System rating"
|
1342 |
+
)
|
1343 |
+
)
|
1344 |
+
|
1345 |
+
final_plot = (bkgd + annotation + chart + rule).properties(height=(plot_dim_height), width=plot_dim_width).resolve_scale(color='independent').to_json()
|
1346 |
+
|
1347 |
+
return final_plot, df
|
1348 |
+
|
1349 |
+
def plot_overall_vis_cluster(preds_df, error_type, n_comments=None, bins=VIS_BINS, threshold=TOXIC_THRESHOLD, bin_step=0.05):
|
1350 |
+
df = preds_df.copy().reset_index(drop=True)
|
1351 |
+
# df = df[df["topic_"] == topic]
|
1352 |
+
|
1353 |
+
df["vis_pred_bin"], out_bins = pd.cut(df["pred"], bins, labels=VIS_BINS_LABELS, retbins=True)
|
1354 |
+
df = df[df["user_id"] == "A"].sort_values(by=["rating"]).reset_index(drop=True)
|
1355 |
+
df["system_label"] = [("toxic" if r > threshold else "non-toxic") for r in df["rating"].tolist()]
|
1356 |
+
df["key"] = [get_key(sys, user, threshold) for sys, user in zip(df["rating"].tolist(), df["pred"].tolist())]
|
1357 |
+
print("len(df)", len(df)) # always 0 for some reason (from keyword search)
|
1358 |
+
# print("columns", df.columns)
|
1359 |
+
df["category"] = df.apply(lambda row: get_category(row), axis=1)
|
1360 |
+
df["url"] = df.apply(lambda row: get_comment_url(row), axis=1)
|
1361 |
+
|
1362 |
+
if n_comments is not None:
|
1363 |
+
n_to_sample = np.min([n_comments, len(df)])
|
1364 |
+
df = df.sample(n=n_to_sample)
|
1365 |
+
|
1366 |
+
# Plot sizing
|
1367 |
+
domain_min = 0
|
1368 |
+
domain_max = 4
|
1369 |
+
plot_dim_height = 500
|
1370 |
+
plot_dim_width = 750
|
1371 |
+
max_items = np.max(df["vis_pred_bin"].value_counts().tolist())
|
1372 |
+
mark_size = np.round(plot_dim_height / max_items) * 8
|
1373 |
+
if mark_size > 75:
|
1374 |
+
mark_size = 75
|
1375 |
+
plot_dim_height = 13 * max_items
|
1376 |
+
|
1377 |
+
# Main chart
|
1378 |
+
chart = alt.Chart(df).mark_square(opacity=0.8, size=mark_size, stroke="grey", strokeWidth=0.25).transform_window(
|
1379 |
+
groupby=['vis_pred_bin'],
|
1380 |
+
sort=[{'field': 'rating'}],
|
1381 |
+
id='row_number()',
|
1382 |
+
ignorePeers=True
|
1383 |
+
).encode(
|
1384 |
+
x=alt.X('vis_pred_bin:Q', title="Our prediction of your rating", scale=alt.Scale(domain=(domain_min, domain_max))),
|
1385 |
+
y=alt.Y('id:O', title="Comments (ordered by System toxicity rating)", axis=alt.Axis(values=list(range(0, max_items, 5))), sort='descending'),
|
1386 |
+
color = alt.Color("key:O", scale=alt.Scale(
|
1387 |
+
domain=["System agrees: Non-toxic", "System agrees: Toxic", "System differs: Error > 1.5", "System differs: Error > 1.0", "System differs: Error > 0.5", "System differs: Error <=0.5"],
|
1388 |
+
range=["white", "#cbcbcb", "red", "#ff7a5c", "#ffa894", "#ffd1c7"]),
|
1389 |
+
title="System rating (box color)"
|
1390 |
+
),
|
1391 |
+
href="url:N",
|
1392 |
+
tooltip = [
|
1393 |
+
alt.Tooltip("comment_:N", title="comment"),
|
1394 |
+
alt.Tooltip("rating:Q", title="System rating", format=".2f"),
|
1395 |
+
alt.Tooltip("pred:Q", title="Your rating", format=".2f"),
|
1396 |
+
alt.Tooltip("category:N", title="Potential toxicity categories")
|
1397 |
+
]
|
1398 |
+
)
|
1399 |
+
|
1400 |
+
# Filter to specified error type
|
1401 |
+
if error_type == "System is under-sensitive":
|
1402 |
+
# FN: system rates non-toxic, but user rates toxic
|
1403 |
+
chart = chart.transform_filter(
|
1404 |
+
alt.FieldGTPredicate(field="pred", gt=threshold)
|
1405 |
+
)
|
1406 |
+
elif error_type == "System is over-sensitive":
|
1407 |
+
# FP: system rates toxic, but user rates non-toxic
|
1408 |
+
chart = chart.transform_filter(
|
1409 |
+
alt.FieldLTEPredicate(field="pred", lte=threshold)
|
1410 |
+
)
|
1411 |
+
|
1412 |
+
# Threshold line
|
1413 |
+
rule = alt.Chart(pd.DataFrame({
|
1414 |
+
"threshold": [threshold],
|
1415 |
+
})).mark_rule(color='grey').encode(
|
1416 |
+
x=alt.X("mean(threshold):Q", scale=alt.Scale(domain=[domain_min, domain_max]), title=""),
|
1417 |
+
size=alt.value(2),
|
1418 |
+
)
|
1419 |
+
|
1420 |
+
# Plot region annotations
|
1421 |
+
nontoxic_x = (domain_min + threshold) / 2.
|
1422 |
+
toxic_x = (domain_max + threshold) / 2.
|
1423 |
+
annotation = alt.Chart(pd.DataFrame({
|
1424 |
+
"annotation_text": ["Non-toxic", "Toxic"],
|
1425 |
+
"x": [nontoxic_x, toxic_x],
|
1426 |
+
"y": [max_items, max_items],
|
1427 |
+
})).mark_text(
|
1428 |
+
align="center",
|
1429 |
+
baseline="middle",
|
1430 |
+
fontSize=16,
|
1431 |
+
dy=10,
|
1432 |
+
color="grey"
|
1433 |
+
).encode(
|
1434 |
+
x=alt.X("x", title=""),
|
1435 |
+
y=alt.Y("y", title="", axis=None),
|
1436 |
+
text="annotation_text"
|
1437 |
+
)
|
1438 |
+
|
1439 |
+
# Plot region background colors
|
1440 |
+
bkgd = alt.Chart(pd.DataFrame({
|
1441 |
+
"start": [domain_min, threshold],
|
1442 |
+
"stop": [threshold, domain_max],
|
1443 |
+
"bkgd": ["Non-toxic (L side)", "Toxic (R side)"],
|
1444 |
+
})).mark_rect(opacity=1.0, stroke="grey", strokeWidth=0.25).encode(
|
1445 |
+
x=alt.X("start:Q", scale=alt.Scale(domain=[domain_min, domain_max])),
|
1446 |
+
x2=alt.X2("stop:Q"),
|
1447 |
+
y=alt.value(0),
|
1448 |
+
y2=alt.value(plot_dim_height),
|
1449 |
+
color=alt.Color("bkgd:O", scale=alt.Scale(
|
1450 |
+
domain=["Non-toxic (L side)", "Toxic (R side)"],
|
1451 |
+
range=["white", "#cbcbcb"]),
|
1452 |
+
title="Your rating (background color)"
|
1453 |
+
)
|
1454 |
+
)
|
1455 |
+
|
1456 |
+
final_plot = (bkgd + annotation + chart + rule).properties(height=(plot_dim_height), width=plot_dim_width).resolve_scale(color='independent').to_json()
|
1457 |
+
|
1458 |
+
return final_plot, df
|
1459 |
+
|
1460 |
+
def get_cluster_comments(df, error_type, threshold=TOXIC_THRESHOLD, worker_id="A", num_examples=50, use_model=True):
|
1461 |
+
df["user_color"] = [get_user_color(user, threshold) for user in df["pred"].tolist()] # get cell colors
|
1462 |
+
df["system_color"] = [get_user_color(sys, threshold) for sys in df["rating"].tolist()] # get cell colors
|
1463 |
+
df["error_color"] = [get_system_color(sys, user, threshold) for sys, user in zip(df["rating"].tolist(), df["pred"].tolist())] # get cell colors
|
1464 |
+
df["error_type"] = [get_error_type(sys, user, threshold) for sys, user in zip(df["rating"].tolist(), df["pred"].tolist())] # get error type in words
|
1465 |
+
df["error_amt"] = [abs(sys - threshold) for sys in df["rating"].tolist()] # get raw error
|
1466 |
+
df["judgment"] = ["" for _ in range(len(df))] # template for "agree" or "disagree" buttons
|
1467 |
+
|
1468 |
+
if use_model:
|
1469 |
+
df = df.sort_values(by=["error_amt"], ascending=False) # surface largest errors first
|
1470 |
+
else:
|
1471 |
+
print("get_cluster_comments; not using model")
|
1472 |
+
df = df.sort_values(by=["rating"], ascending=True)
|
1473 |
+
|
1474 |
+
df["id"] = df["item_id"]
|
1475 |
+
# df["comment"] already exists
|
1476 |
+
df["comment"] = df["comment_"]
|
1477 |
+
df["toxicity_category"] = df["category"]
|
1478 |
+
df["user_rating"] = df["pred"]
|
1479 |
+
df["user_decision"] = [get_decision(rating, threshold) for rating in df["pred"].tolist()]
|
1480 |
+
df["system_rating"] = df["rating"]
|
1481 |
+
df["system_decision"] = [get_decision(rating, threshold) for rating in df["rating"].tolist()]
|
1482 |
+
df["error_type"] = df["error_type"]
|
1483 |
+
df = df.head(num_examples)
|
1484 |
+
df = df.round(decimals=2)
|
1485 |
+
|
1486 |
+
# Filter to specified error type
|
1487 |
+
if error_type == "System is under-sensitive":
|
1488 |
+
# FN: system rates non-toxic, but user rates toxic
|
1489 |
+
df = df[df["error_type"] == "System may be under-sensitive"]
|
1490 |
+
elif error_type == "System is over-sensitive":
|
1491 |
+
# FP: system rates toxic, but user rates non-toxic
|
1492 |
+
df = df[df["error_type"] == "System may be over-sensitive" ]
|
1493 |
+
elif error_type == "Both":
|
1494 |
+
df = df[(df["error_type"] == "System may be under-sensitive") | (df["error_type"] == "System may be over-sensitive")]
|
1495 |
+
|
1496 |
+
return df.to_json(orient="records")
|
1497 |
+
|
1498 |
+
# PERSONALIZED CLUSTERS utils
|
1499 |
+
def get_disagreement_comments(preds_df, mode, n=10_000, threshold=TOXIC_THRESHOLD):
|
1500 |
+
# Get difference between user rating and system rating
|
1501 |
+
df = preds_df.copy()
|
1502 |
+
df["diff"] = [get_error_size(sys, user, threshold) for sys, user in zip(df["rating"].tolist(), df["pred"].tolist())]
|
1503 |
+
df["error_type"] = [get_error_type(sys, user, threshold) for sys, user in zip(df["rating"].tolist(), df["pred"].tolist())]
|
1504 |
+
# asc = low to high; lowest = sys lower than user (under-sensitive)
|
1505 |
+
# desc = high to low; lowest = sys higher than user (over-sensitive)
|
1506 |
+
if mode == "under-sensitive":
|
1507 |
+
df = df[df["error_type"] == "System may be under-sensitive"]
|
1508 |
+
asc = True
|
1509 |
+
elif mode == "over-sensitive":
|
1510 |
+
df = df[df["error_type"] == "System may be over-sensitive"]
|
1511 |
+
asc = False
|
1512 |
+
df = df.sort_values(by=["diff"], ascending=asc)
|
1513 |
+
df = df.head(n)
|
1514 |
+
|
1515 |
+
return df["comment_"].tolist(), df
|
1516 |
+
|
1517 |
+
def get_personal_clusters(model, n=3):
|
1518 |
+
personal_cluster_file = f"./data/personal_cluster_dfs/{model}.pkl"
|
1519 |
+
if (os.path.isfile(personal_cluster_file)):
|
1520 |
+
with open(personal_cluster_file, "rb") as f:
|
1521 |
+
cluster_df = pickle.load(f)
|
1522 |
+
cluster_df = cluster_df.sort_values(by=["topic_id"])
|
1523 |
+
topics_under = cluster_df[cluster_df["error_type"] == "System may be under-sensitive"]["topic"].unique().tolist()
|
1524 |
+
topics_under = topics_under[1:(n + 1)]
|
1525 |
+
topics_over = cluster_df[cluster_df["error_type"] == "System may be over-sensitive"]["topic"].unique().tolist()
|
1526 |
+
topics_over = topics_over[1:(n + 1)]
|
1527 |
+
return topics_under, topics_over
|
1528 |
+
else:
|
1529 |
+
topics_under_top = []
|
1530 |
+
topics_over_top = []
|
1531 |
+
preds_df_file = f"./data/preds_dfs/{model}.pkl"
|
1532 |
+
if (os.path.isfile(preds_df_file)):
|
1533 |
+
with open(preds_df_file, "rb") as f:
|
1534 |
+
preds_df = pickle.load(f)
|
1535 |
+
preds_df_mod = preds_df.merge(comments_grouped_full_topic_cat, on="item_id", how="left", suffixes=('_', '_avg')).reset_index()
|
1536 |
+
preds_df_mod = preds_df_mod[preds_df_mod["user_id"] == "A"]
|
1537 |
+
|
1538 |
+
comments_under, comments_under_df = get_disagreement_comments(preds_df_mod, mode="under-sensitive", n=1000)
|
1539 |
+
if len(comments_under) > 0:
|
1540 |
+
topics_under = BERTopic(embedding_model="paraphrase-MiniLM-L6-v2").fit(comments_under)
|
1541 |
+
topics_under_top = topics_under.get_topic_info().head(n)["Name"].tolist()
|
1542 |
+
print("topics_under", topics_under_top)
|
1543 |
+
# Get topics per comment
|
1544 |
+
topics_assigned, _ = topics_under.transform(comments_under)
|
1545 |
+
comments_under_df["topic_id"] = topics_assigned
|
1546 |
+
cur_topic_ids = topics_under.get_topic_info().Topic
|
1547 |
+
topic_short_names = topics_under.get_topic_info().Name
|
1548 |
+
topic_ids_to_names = {cur_topic_ids[i]: topic_short_names[i] for i in range(len(cur_topic_ids))}
|
1549 |
+
comments_under_df["topic"] = [topic_ids_to_names[topic_id] for topic_id in comments_under_df["topic_id"].tolist()]
|
1550 |
+
|
1551 |
+
comments_over, comments_over_df = get_disagreement_comments(preds_df_mod, mode="over-sensitive", n=1000)
|
1552 |
+
if len(comments_over) > 0:
|
1553 |
+
topics_over = BERTopic(embedding_model="paraphrase-MiniLM-L6-v2").fit(comments_over)
|
1554 |
+
topics_over_top = topics_over.get_topic_info().head(n)["Name"].tolist()
|
1555 |
+
print("topics_over", topics_over_top)
|
1556 |
+
# Get topics per comment
|
1557 |
+
topics_assigned, _ = topics_over.transform(comments_over)
|
1558 |
+
comments_over_df["topic_id"] = topics_assigned
|
1559 |
+
cur_topic_ids = topics_over.get_topic_info().Topic
|
1560 |
+
topic_short_names = topics_over.get_topic_info().Name
|
1561 |
+
topic_ids_to_names = {cur_topic_ids[i]: topic_short_names[i] for i in range(len(cur_topic_ids))}
|
1562 |
+
comments_over_df["topic"] = [topic_ids_to_names[topic_id] for topic_id in comments_over_df["topic_id"].tolist()]
|
1563 |
+
|
1564 |
+
cluster_df = pd.concat([comments_under_df, comments_over_df])
|
1565 |
+
with open(f"./data/personal_cluster_dfs/{model}.pkl", "wb") as f:
|
1566 |
+
pickle.dump(cluster_df, f)
|
1567 |
+
|
1568 |
+
return topics_under_top, topics_over_top
|
1569 |
+
return [], []
|
indie_label_svelte/.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/node_modules/
|
2 |
+
/public/build/
|
3 |
+
|
4 |
+
.DS_Store
|
indie_label_svelte/README.md
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Sample Project
|
2 |
+
|
3 |
+
This example illustrates how svelte-vega can be used in your application.
|
4 |
+
If you want to build something using our `Vega` component, you just need to install `svelte-vega` to be able to import it.
|
5 |
+
|
6 |
+
## Dependencies
|
7 |
+
|
8 |
+
To run this project, `svelte-vega` needs to be built first.
|
9 |
+
It is easiest to use from the [main repository](https://github.com/vega/svelte-vega).
|
indie_label_svelte/package-lock.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
indie_label_svelte/package.json
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "svelte-vega-sample",
|
3 |
+
"version": "0.0.0",
|
4 |
+
"author": "Alex Bäuerle <[email protected]> (https://a13x.io)",
|
5 |
+
"repository": "[email protected]:vega/svelte-vega.git",
|
6 |
+
"bugs": {
|
7 |
+
"url": "https://github.com/vega/svelte-vega/issues"
|
8 |
+
},
|
9 |
+
"private": true,
|
10 |
+
"scripts": {
|
11 |
+
"build": "rollup -c",
|
12 |
+
"dev": "rollup -c -w",
|
13 |
+
"start": "sirv public --no-clear",
|
14 |
+
"validate": "svelte-check"
|
15 |
+
},
|
16 |
+
"devDependencies": {
|
17 |
+
"@babel/core": "^7.16.0",
|
18 |
+
"@material/typography": "^13.0.0",
|
19 |
+
"@rollup/plugin-commonjs": "^21.0.1",
|
20 |
+
"@rollup/plugin-json": "^4.1.0",
|
21 |
+
"@rollup/plugin-node-resolve": "^13.0.6",
|
22 |
+
"@rollup/plugin-typescript": "^8.3.0",
|
23 |
+
"@smui/button": "^6.0.0-beta.0",
|
24 |
+
"@smui/card": "^6.0.0-beta.4",
|
25 |
+
"@smui/checkbox": "^6.0.0-beta.2",
|
26 |
+
"@smui/chips": "^6.0.0-beta.2",
|
27 |
+
"@smui/circular-progress": "^6.0.0-beta.4",
|
28 |
+
"@smui/common": "^6.0.0-beta.2",
|
29 |
+
"@smui/data-table": "^6.0.0-beta.2",
|
30 |
+
"@smui/drawer": "^6.0.0-beta.4",
|
31 |
+
"@smui/form-field": "^6.0.0-beta.2",
|
32 |
+
"@smui/icon-button": "^6.0.0-beta.4",
|
33 |
+
"@smui/layout-grid": "^6.0.0-beta.2",
|
34 |
+
"@smui/linear-progress": "^6.0.0-beta.2",
|
35 |
+
"@smui/list": "^6.0.0-beta.4",
|
36 |
+
"@smui/radio": "^6.0.0-beta.2",
|
37 |
+
"@smui/select": "^6.0.0-beta.0",
|
38 |
+
"@smui/switch": "^6.0.0-beta.4",
|
39 |
+
"@smui/tab": "^6.0.0-beta.2",
|
40 |
+
"@smui/tab-bar": "^6.0.0-beta.2",
|
41 |
+
"@smui/textfield": "^6.0.0-beta.2",
|
42 |
+
"@smui/tooltip": "^6.0.0-beta.4",
|
43 |
+
"@tsconfig/svelte": "^2.0.1",
|
44 |
+
"autoprefixer": "^10.4.0",
|
45 |
+
"coffeescript": "^2.6.1",
|
46 |
+
"less": "^4.1.2",
|
47 |
+
"postcss": "^8.3.11",
|
48 |
+
"postcss-load-config": "^3.1.0",
|
49 |
+
"pug": "^3.0.2",
|
50 |
+
"rollup": "^2.59.0",
|
51 |
+
"rollup-plugin-css-only": "^3.1.0",
|
52 |
+
"rollup-plugin-livereload": "^2.0.5",
|
53 |
+
"rollup-plugin-svelte": "^7.0.0",
|
54 |
+
"rollup-plugin-terser": "^7.0.0",
|
55 |
+
"sass": "^1.43.4",
|
56 |
+
"stylus": "^0.55.0",
|
57 |
+
"sugarss": "^4.0.1",
|
58 |
+
"svelte-check": "^2.2.8",
|
59 |
+
"svelte-preprocess": "^4.9.8",
|
60 |
+
"typescript": "^4.5.2"
|
61 |
+
},
|
62 |
+
"dependencies": {
|
63 |
+
"sirv-cli": "^1.0.14",
|
64 |
+
"svelecte": "^3.4.0",
|
65 |
+
"svelte": "^3.44.1",
|
66 |
+
"svelte-material-ui": "^6.0.0-beta.0",
|
67 |
+
"svelte-vega": "^1.0.0",
|
68 |
+
"tslib": "^2.3.1",
|
69 |
+
"vega": "^5.21.0",
|
70 |
+
"vega-lite": "^5.1.0"
|
71 |
+
}
|
72 |
+
}
|
indie_label_svelte/public/favicon.png
ADDED
![]() |
indie_label_svelte/public/global.css
ADDED
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
html, body {
|
2 |
+
position: relative;
|
3 |
+
width: 100%;
|
4 |
+
height: 100%;
|
5 |
+
}
|
6 |
+
|
7 |
+
* {
|
8 |
+
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen-Sans, Ubuntu, Cantarell, "Helvetica Neue", sans-serif !important;
|
9 |
+
}
|
10 |
+
|
11 |
+
.material-icons {
|
12 |
+
font-family: 'Material Icons' !important;
|
13 |
+
}
|
14 |
+
|
15 |
+
body {
|
16 |
+
color: #333;
|
17 |
+
margin: 0;
|
18 |
+
/* padding: 8px; */
|
19 |
+
box-sizing: border-box;
|
20 |
+
/* font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen-Sans, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; */
|
21 |
+
/* font-family: Roboto, "Helvetica Neue", sans-serif; */
|
22 |
+
}
|
23 |
+
|
24 |
+
a {
|
25 |
+
color: rgb(0,100,200);
|
26 |
+
text-decoration: none;
|
27 |
+
}
|
28 |
+
|
29 |
+
p, li {
|
30 |
+
line-height: 150%;
|
31 |
+
}
|
32 |
+
|
33 |
+
a:hover {
|
34 |
+
text-decoration: underline;
|
35 |
+
}
|
36 |
+
|
37 |
+
a:visited {
|
38 |
+
color: rgb(0,80,160);
|
39 |
+
}
|
40 |
+
|
41 |
+
label {
|
42 |
+
display: block;
|
43 |
+
}
|
44 |
+
|
45 |
+
input, button, select, textarea {
|
46 |
+
font-family: inherit;
|
47 |
+
font-size: inherit;
|
48 |
+
-webkit-padding: 0.4em 0;
|
49 |
+
padding: 0.4em;
|
50 |
+
margin: 0 0 0.5em 0;
|
51 |
+
box-sizing: border-box;
|
52 |
+
border: 1px solid #ccc;
|
53 |
+
border-radius: 2px;
|
54 |
+
}
|
55 |
+
|
56 |
+
input:disabled {
|
57 |
+
color: #ccc;
|
58 |
+
}
|
59 |
+
|
60 |
+
button {
|
61 |
+
color: #333;
|
62 |
+
background-color: #f4f4f4;
|
63 |
+
outline: none;
|
64 |
+
}
|
65 |
+
|
66 |
+
button:disabled {
|
67 |
+
color: #999;
|
68 |
+
}
|
69 |
+
|
70 |
+
button:not(:disabled):active {
|
71 |
+
background-color: #ddd;
|
72 |
+
}
|
73 |
+
|
74 |
+
button:focus {
|
75 |
+
border-color: #666;
|
76 |
+
}
|
77 |
+
|
78 |
+
/* IndieLabel custom styles */
|
79 |
+
main {
|
80 |
+
text-align: left;
|
81 |
+
/* padding: 1em; */
|
82 |
+
max-width: 240px;
|
83 |
+
margin: 0 0;
|
84 |
+
}
|
85 |
+
h3 {
|
86 |
+
color: rgb(80, 80, 80);
|
87 |
+
font-size: 30px;
|
88 |
+
}
|
89 |
+
h5 {
|
90 |
+
color: rgb(80, 80, 80);
|
91 |
+
font-size: 25px;
|
92 |
+
}
|
93 |
+
h6 {
|
94 |
+
margin-top: 50px;
|
95 |
+
margin-bottom: 20px;
|
96 |
+
text-transform: uppercase;
|
97 |
+
font-size: 18px;
|
98 |
+
}
|
99 |
+
|
100 |
+
.head_3 {
|
101 |
+
color: rgb(80, 80, 80);
|
102 |
+
font-size: 30px;
|
103 |
+
font-weight: bold;
|
104 |
+
margin-top: 30px;
|
105 |
+
margin-bottom: 30px;
|
106 |
+
}
|
107 |
+
|
108 |
+
.head_5 {
|
109 |
+
color: rgb(80, 80, 80);
|
110 |
+
font-size: 24px;
|
111 |
+
font-weight: bold;
|
112 |
+
margin-top: 25px;
|
113 |
+
margin-bottom: 25px;
|
114 |
+
}
|
115 |
+
|
116 |
+
.head_6 {
|
117 |
+
color: rgb(80, 80, 80);
|
118 |
+
text-transform: uppercase;
|
119 |
+
font-weight: bold;
|
120 |
+
font-size: 18px;
|
121 |
+
margin-top: 25px;
|
122 |
+
margin-bottom: 25px;
|
123 |
+
}
|
124 |
+
|
125 |
+
.head_6_non_cap {
|
126 |
+
color: rgb(80, 80, 80);
|
127 |
+
font-weight: bold;
|
128 |
+
font-size: 18px;
|
129 |
+
margin-top: 25px;
|
130 |
+
margin-bottom: 25px;
|
131 |
+
}
|
132 |
+
|
133 |
+
.head_6_highlight {
|
134 |
+
color: #6200ee;
|
135 |
+
text-transform: uppercase;
|
136 |
+
font-weight: 400;
|
137 |
+
font-size: 14px;
|
138 |
+
margin-top: 25px;
|
139 |
+
letter-spacing: 0.75px;
|
140 |
+
}
|
141 |
+
|
142 |
+
@media (min-width: 640px) {
|
143 |
+
main {
|
144 |
+
max-width: none;
|
145 |
+
}
|
146 |
+
}
|
147 |
+
|
148 |
+
table {
|
149 |
+
font-size: 14px;
|
150 |
+
}
|
151 |
+
|
152 |
+
.bold {
|
153 |
+
font-weight: bold;
|
154 |
+
}
|
155 |
+
|
156 |
+
.bold-large {
|
157 |
+
font-weight: bold;
|
158 |
+
font-size: 16px;
|
159 |
+
}
|
160 |
+
|
161 |
+
.custom-blue {
|
162 |
+
color: #3c94ff;
|
163 |
+
}
|
164 |
+
|
165 |
+
.mdc-data-table__cell {
|
166 |
+
white-space: normal;
|
167 |
+
word-break: break-word;
|
168 |
+
padding: 10px;
|
169 |
+
}
|
170 |
+
|
171 |
+
.app_loading {
|
172 |
+
width: 40%;
|
173 |
+
padding-top: 20px;
|
174 |
+
padding-bottom: 100px;
|
175 |
+
}
|
176 |
+
|
177 |
+
.app_loading_fullwidth {
|
178 |
+
width: 100%;
|
179 |
+
padding-top: 20px;
|
180 |
+
padding-bottom: 100px;
|
181 |
+
}
|
182 |
+
|
183 |
+
.button_float_right {
|
184 |
+
float:right;
|
185 |
+
margin-left: 10px;
|
186 |
+
}
|
187 |
+
|
188 |
+
.spacing_vert {
|
189 |
+
/* margin: 20px 0;
|
190 |
+
padding: 10px 0; */
|
191 |
+
margin: 10px 0;
|
192 |
+
}
|
193 |
+
|
194 |
+
.spacing_vert_20 {
|
195 |
+
margin: 20px 0;
|
196 |
+
}
|
197 |
+
|
198 |
+
.spacing_vert_40 {
|
199 |
+
margin: 40px 0;
|
200 |
+
}
|
201 |
+
|
202 |
+
.spacing_vert_60 {
|
203 |
+
margin: 60px 0;
|
204 |
+
}
|
205 |
+
|
206 |
+
.page_title {
|
207 |
+
font-size: 30px;
|
208 |
+
font-weight: bold;
|
209 |
+
/* color: #945ec9; */
|
210 |
+
/* color: #6200ee; */
|
211 |
+
color: #7826ed;
|
212 |
+
padding: 0 20px;
|
213 |
+
margin: 15px 0;
|
214 |
+
}
|
215 |
+
|
216 |
+
.hypothesis_panel {
|
217 |
+
width: 30%;
|
218 |
+
height: 100%;
|
219 |
+
position: fixed;
|
220 |
+
z-index: 10;
|
221 |
+
top: 0;
|
222 |
+
left: 0;
|
223 |
+
overflow-x: hidden;
|
224 |
+
overflow-y: hidden;
|
225 |
+
background: #f3f3f3;
|
226 |
+
/* border-right: 1px solid grey; */
|
227 |
+
}
|
228 |
+
|
229 |
+
.auditing_panel {
|
230 |
+
position: fixed;
|
231 |
+
width: 70%;
|
232 |
+
height: 100%;
|
233 |
+
left: 30%;
|
234 |
+
overflow-y: scroll;
|
235 |
+
}
|
236 |
+
|
237 |
+
.tab_header {
|
238 |
+
position: fixed;
|
239 |
+
background-color: #e3d6fd;
|
240 |
+
z-index: 1001;
|
241 |
+
width: 70%;
|
242 |
+
/* border-bottom: 1px solid grey; */
|
243 |
+
}
|
244 |
+
|
245 |
+
.label_table {
|
246 |
+
height: 750px;
|
247 |
+
overflow-y: scroll;
|
248 |
+
width: 90%;
|
249 |
+
}
|
250 |
+
|
251 |
+
.label_table_expandable {
|
252 |
+
width: 90%;
|
253 |
+
}
|
254 |
+
|
255 |
+
.edit_button_row {
|
256 |
+
display: flex;
|
257 |
+
align-items: center;
|
258 |
+
}
|
259 |
+
|
260 |
+
.edit_button_row_input {
|
261 |
+
flex-grow: 1;
|
262 |
+
}
|
263 |
+
|
264 |
+
.grey_button {
|
265 |
+
color: grey !important;
|
266 |
+
}
|
267 |
+
|
268 |
+
.white_button {
|
269 |
+
color: white !important;
|
270 |
+
}
|
271 |
+
|
272 |
+
.grey_text {
|
273 |
+
color: grey;
|
274 |
+
}
|
275 |
+
|
276 |
+
.section_indent {
|
277 |
+
margin-left: 40px;
|
278 |
+
}
|
279 |
+
|
280 |
+
.audit_section {
|
281 |
+
padding-top: 40px;
|
282 |
+
}
|
283 |
+
|
284 |
+
.comment_table_small {
|
285 |
+
/* width: 150%; */
|
286 |
+
}
|
287 |
+
|
288 |
+
.comment_table_small .mdc-data-table__header-cell, .comment_table_small .mdc-data-table__cell, .comment_table_small .mdc-data-table__cell span {
|
289 |
+
font-size: 13px !important;
|
290 |
+
}
|
291 |
+
|
292 |
+
/* Material UI Tab formatting */
|
293 |
+
.mdc-tab {
|
294 |
+
height: 85px !important;
|
295 |
+
}
|
296 |
+
|
297 |
+
.mdc-tab-indicator .mdc-tab-indicator__content--underline {
|
298 |
+
border-top-width: 3px !important;
|
299 |
+
}
|
300 |
+
|
301 |
+
.comment_table .mdc-data-table__header-cell, .comment_table .mdc-data-table {
|
302 |
+
background-color: transparent !important;
|
303 |
+
}
|
304 |
+
|
305 |
+
.label_table th.mdc-data-table__header-cell {
|
306 |
+
border-top: 1px solid rgb(224 224 224);
|
307 |
+
}
|
308 |
+
|
309 |
+
.mdc-drawer .mdc-deprecated-list-item {
|
310 |
+
height: 64px !important;
|
311 |
+
}
|
indie_label_svelte/public/index.html
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
|
4 |
+
<head>
|
5 |
+
<meta charset='utf-8'>
|
6 |
+
<meta name='viewport' content='width=device-width,initial-scale=1'>
|
7 |
+
|
8 |
+
<title>Svelte app</title>
|
9 |
+
|
10 |
+
<link rel='icon' type='image/png' href='/favicon.png'>
|
11 |
+
<link rel='stylesheet' href='/global.css'>
|
12 |
+
<link rel='stylesheet' href='/build/bundle.css'>
|
13 |
+
<link rel='stylesheet' href='/build/extra.css'>
|
14 |
+
<!-- <link rel="stylesheet" href="../node_modules/svelte-material-ui/bare.css" /> -->
|
15 |
+
<!-- <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/bare.min.css" /> -->
|
16 |
+
<link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons" />
|
17 |
+
|
18 |
+
<link
|
19 |
+
rel="stylesheet"
|
20 |
+
href="https://unpkg.com/@material/[email protected]/dist/mdc.typography.css"
|
21 |
+
/>
|
22 |
+
|
23 |
+
<script defer src='/build/bundle.js'></script>
|
24 |
+
</head>
|
25 |
+
|
26 |
+
<body>
|
27 |
+
</body>
|
28 |
+
|
29 |
+
</html>
|
indie_label_svelte/public/logo.png
ADDED
![]() |
indie_label_svelte/rollup.config.js
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import svelte from "rollup-plugin-svelte";
|
2 |
+
import commonjs from "@rollup/plugin-commonjs";
|
3 |
+
import resolve from "@rollup/plugin-node-resolve";
|
4 |
+
import livereload from "rollup-plugin-livereload";
|
5 |
+
import { terser } from "rollup-plugin-terser";
|
6 |
+
import sveltePreprocess from "svelte-preprocess";
|
7 |
+
import typescript from "@rollup/plugin-typescript";
|
8 |
+
import css from "rollup-plugin-css-only";
|
9 |
+
import json from "@rollup/plugin-json";
|
10 |
+
import * as child from "child_process";
|
11 |
+
|
12 |
+
const production = !process.env.ROLLUP_WATCH;
|
13 |
+
|
14 |
+
function serve() {
|
15 |
+
let server;
|
16 |
+
|
17 |
+
function toExit() {
|
18 |
+
if (server) server.kill(0);
|
19 |
+
}
|
20 |
+
|
21 |
+
return {
|
22 |
+
writeBundle() {
|
23 |
+
if (server) return;
|
24 |
+
server = child.spawn("npm", ["run", "start", "--", "--dev"], {
|
25 |
+
stdio: ["ignore", "inherit", "inherit"],
|
26 |
+
shell: true,
|
27 |
+
});
|
28 |
+
|
29 |
+
process.on("SIGTERM", toExit);
|
30 |
+
process.on("exit", toExit);
|
31 |
+
},
|
32 |
+
};
|
33 |
+
}
|
34 |
+
|
35 |
+
export default {
|
36 |
+
input: "src/main.ts",
|
37 |
+
output: {
|
38 |
+
sourcemap: true,
|
39 |
+
format: "iife",
|
40 |
+
name: "app",
|
41 |
+
file: "public/build/bundle.js",
|
42 |
+
},
|
43 |
+
plugins: [
|
44 |
+
svelte({
|
45 |
+
preprocess: sveltePreprocess({ sourceMap: !production }),
|
46 |
+
compilerOptions: {
|
47 |
+
// enable run-time checks when not in production
|
48 |
+
dev: !production,
|
49 |
+
},
|
50 |
+
}),
|
51 |
+
// we'll extract any component CSS out into
|
52 |
+
// a separate file - better for performance
|
53 |
+
css({ output: "bundle.css" }),
|
54 |
+
// css({ output: 'public/build/extra.css' }),
|
55 |
+
|
56 |
+
// If you have external dependencies installed from
|
57 |
+
// npm, you'll most likely need these plugins. In
|
58 |
+
// some cases you'll need additional configuration -
|
59 |
+
// consult the documentation for details:
|
60 |
+
// https://github.com/rollup/plugins/tree/master/packages/commonjs
|
61 |
+
resolve({
|
62 |
+
browser: true,
|
63 |
+
dedupe: ["svelte"],
|
64 |
+
}),
|
65 |
+
commonjs(),
|
66 |
+
typescript({
|
67 |
+
sourceMap: !production,
|
68 |
+
inlineSources: !production,
|
69 |
+
}),
|
70 |
+
json(),
|
71 |
+
|
72 |
+
// In dev mode, call `npm run start` once
|
73 |
+
// the bundle has been generated
|
74 |
+
!production && serve(),
|
75 |
+
|
76 |
+
// Watch the `public` directory and refresh the
|
77 |
+
// browser on changes when not in production
|
78 |
+
!production && livereload("public"),
|
79 |
+
|
80 |
+
// If we're building for production (npm run build
|
81 |
+
// instead of npm run dev), minify
|
82 |
+
production && terser(),
|
83 |
+
],
|
84 |
+
watch: {
|
85 |
+
clearScreen: false,
|
86 |
+
},
|
87 |
+
};
|
indie_label_svelte/src/App.svelte
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import { onMount } from "svelte";
|
3 |
+
import "../node_modules/svelte-material-ui/bare.css";
|
4 |
+
|
5 |
+
import HypothesisPanel from "./HypothesisPanel.svelte";
|
6 |
+
import MainPanel from "./MainPanel.svelte";
|
7 |
+
import SelectUserDialog from "./SelectUserDialog.svelte";
|
8 |
+
import Explore from "./Explore.svelte";
|
9 |
+
import Results from "./Results.svelte";
|
10 |
+
import StudyLinks from "./StudyLinks.svelte";
|
11 |
+
import { user } from './stores/cur_user_store.js';
|
12 |
+
import { users } from "./stores/all_users_store.js";
|
13 |
+
|
14 |
+
let personalized_model;
|
15 |
+
let personalized_models = [];
|
16 |
+
|
17 |
+
// let topic = "";
|
18 |
+
|
19 |
+
let error_type_options = ['Both', 'System is under-sensitive', 'System is over-sensitive', 'Show errors and non-errors'];
|
20 |
+
let error_type = error_type_options[0];
|
21 |
+
|
22 |
+
// Handle routing
|
23 |
+
let searchParams = new URLSearchParams(window.location.search);
|
24 |
+
let mode = searchParams.get("mode");
|
25 |
+
let cur_user = searchParams.get("user");
|
26 |
+
|
27 |
+
// Set cur_user if it's provided in URL params
|
28 |
+
if (cur_user !== null) {
|
29 |
+
user.update((value) => cur_user);
|
30 |
+
}
|
31 |
+
|
32 |
+
// Handle user dialog
|
33 |
+
let user_dialog_open = false;
|
34 |
+
user.subscribe(value => {
|
35 |
+
cur_user = value;
|
36 |
+
});
|
37 |
+
|
38 |
+
// Handle all users
|
39 |
+
let all_users = [];
|
40 |
+
async function getUsers() {
|
41 |
+
const response = await fetch("./get_users");
|
42 |
+
const text = await response.text();
|
43 |
+
const data = JSON.parse(text);
|
44 |
+
all_users = data["users"];
|
45 |
+
users.update((value) => all_users);
|
46 |
+
}
|
47 |
+
|
48 |
+
function getAuditSettings() {
|
49 |
+
let req_params = {
|
50 |
+
user: cur_user,
|
51 |
+
};
|
52 |
+
let params = new URLSearchParams(req_params).toString();
|
53 |
+
fetch("./audit_settings?" + params)
|
54 |
+
.then((r) => r.text())
|
55 |
+
.then(function (r_orig) {
|
56 |
+
let r = JSON.parse(r_orig);
|
57 |
+
personalized_models = r["personalized_models"];
|
58 |
+
personalized_model = personalized_models[0]; // TEMP
|
59 |
+
console.log("personalized_model", personalized_model);
|
60 |
+
// personalized_model = "model_1632886687_iterA";
|
61 |
+
// let clusters = r["clusters"];
|
62 |
+
// topic = clusters[0]; // TEMP
|
63 |
+
});
|
64 |
+
|
65 |
+
// fetch("./audit_settings")
|
66 |
+
// .then((r) => r.text())
|
67 |
+
// .then(function (r_orig) {
|
68 |
+
// let r = JSON.parse(r_orig);
|
69 |
+
// personalized_models = r["personalized_models"];
|
70 |
+
// personalized_model = personalized_models[0]; // TEMP
|
71 |
+
// // personalized_model = "model_1632886687_iterA";
|
72 |
+
// let clusters = r["clusters"];
|
73 |
+
// topic = clusters[0]; // TEMP
|
74 |
+
// });
|
75 |
+
}
|
76 |
+
onMount(async () => {
|
77 |
+
getAuditSettings();
|
78 |
+
getUsers();
|
79 |
+
});
|
80 |
+
</script>
|
81 |
+
|
82 |
+
<svelte:head>
|
83 |
+
<title>IndieLabel</title>
|
84 |
+
</svelte:head>
|
85 |
+
|
86 |
+
<main>
|
87 |
+
{#if mode == "explore"}
|
88 |
+
<div>
|
89 |
+
<Explore />
|
90 |
+
</div>
|
91 |
+
{:else if mode == "results"}
|
92 |
+
<div>
|
93 |
+
<Results />
|
94 |
+
</div>
|
95 |
+
{:else if mode == "study_links"}
|
96 |
+
<div>
|
97 |
+
<StudyLinks />
|
98 |
+
</div>
|
99 |
+
{:else }
|
100 |
+
<SelectUserDialog bind:open={user_dialog_open} cur_user={cur_user} />
|
101 |
+
<div>
|
102 |
+
{#key personalized_model }
|
103 |
+
<HypothesisPanel model={personalized_model} bind:user_dialog_open={user_dialog_open}/>
|
104 |
+
{/key}
|
105 |
+
|
106 |
+
<MainPanel bind:model={personalized_model} bind:error_type={error_type} on:change />
|
107 |
+
</div>
|
108 |
+
{/if}
|
109 |
+
</main>
|
110 |
+
|
111 |
+
<style>
|
112 |
+
</style>
|
indie_label_svelte/src/AppOld.svelte
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<svelte:head>
|
2 |
+
<title>IndieLabel</title>
|
3 |
+
</svelte:head>
|
4 |
+
|
5 |
+
<script lang="ts">
|
6 |
+
import { onMount } from "svelte";
|
7 |
+
import Section from "./Section.svelte";
|
8 |
+
import IterativeClustering from "./IterativeClustering.svelte";
|
9 |
+
import OverallResults from "./OverallResults.svelte";
|
10 |
+
import Labeling from "./Labeling.svelte";
|
11 |
+
import HypothesisPanel from "./HypothesisPanel.svelte"
|
12 |
+
|
13 |
+
let personalized_model;
|
14 |
+
let personalized_models = [];
|
15 |
+
let breakdown_category;
|
16 |
+
let breakdown_categories = [];
|
17 |
+
let systems = ["Perspective comment toxicity classifier"]; // Only one system for now
|
18 |
+
let clusters = [];
|
19 |
+
let promise = Promise.resolve(null);
|
20 |
+
|
21 |
+
function getAuditSettings() {
|
22 |
+
fetch("./audit_settings")
|
23 |
+
.then((r) => r.text())
|
24 |
+
.then(function (r_orig) {
|
25 |
+
let r = JSON.parse(r_orig);
|
26 |
+
breakdown_categories = r["breakdown_categories"];
|
27 |
+
breakdown_category = breakdown_categories[0];
|
28 |
+
personalized_models = r["personalized_models"];
|
29 |
+
personalized_model = personalized_models[0];
|
30 |
+
clusters = r["clusters"];
|
31 |
+
});
|
32 |
+
}
|
33 |
+
onMount(async () => {
|
34 |
+
getAuditSettings();
|
35 |
+
});
|
36 |
+
|
37 |
+
function handleAuditButton() {
|
38 |
+
promise = getAudit();
|
39 |
+
}
|
40 |
+
|
41 |
+
async function getAudit() {
|
42 |
+
let req_params = {
|
43 |
+
pers_model: personalized_model,
|
44 |
+
breakdown_axis: breakdown_category,
|
45 |
+
perf_metric: "avg_diff",
|
46 |
+
breakdown_sort: "difference",
|
47 |
+
n_topics: 10,
|
48 |
+
};
|
49 |
+
let params = new URLSearchParams(req_params).toString();
|
50 |
+
const response = await fetch("./get_audit?" + params);
|
51 |
+
const text = await response.text();
|
52 |
+
const data = JSON.parse(text);
|
53 |
+
return data;
|
54 |
+
}
|
55 |
+
|
56 |
+
</script>
|
57 |
+
|
58 |
+
<main>
|
59 |
+
<HypothesisPanel model={personalized_model} />
|
60 |
+
|
61 |
+
<Labeling />
|
62 |
+
|
63 |
+
<IterativeClustering clusters={clusters} ind={1} personalized_model={personalized_model} />
|
64 |
+
|
65 |
+
<div id="audit-settings" class="section">
|
66 |
+
<h5>Audit settings</h5>
|
67 |
+
<Section
|
68 |
+
section_id="systems"
|
69 |
+
section_title="What status-quo system would you like to audit?"
|
70 |
+
section_opts={systems}
|
71 |
+
bind:value={systems[0]}
|
72 |
+
/>
|
73 |
+
<Section
|
74 |
+
section_id="personalized_model"
|
75 |
+
section_title="What model would you like to use to represent your views?"
|
76 |
+
section_opts={personalized_models}
|
77 |
+
bind:value={personalized_model}
|
78 |
+
/>
|
79 |
+
<Section
|
80 |
+
section_id="breakdown_category"
|
81 |
+
section_title="How would you like to explore the performance of the system?"
|
82 |
+
section_opts={breakdown_categories}
|
83 |
+
bind:value={breakdown_category}
|
84 |
+
/>
|
85 |
+
<button on:click={handleAuditButton}> Generate results </button>
|
86 |
+
<div>
|
87 |
+
Personalized model: {personalized_model}, Breakdown category: {breakdown_category}
|
88 |
+
</div>
|
89 |
+
</div>
|
90 |
+
|
91 |
+
{#await promise}
|
92 |
+
<p>...waiting</p>
|
93 |
+
{:then audit_results}
|
94 |
+
{#if audit_results}
|
95 |
+
<OverallResults data={audit_results} clusters={clusters} personalized_model={personalized_model} />
|
96 |
+
{/if}
|
97 |
+
{:catch error}
|
98 |
+
<p style="color: red">{error.message}</p>
|
99 |
+
{/await}
|
100 |
+
</main>
|
101 |
+
|
102 |
+
<style>
|
103 |
+
main {
|
104 |
+
text-align: left;
|
105 |
+
padding: 1em;
|
106 |
+
max-width: 240px;
|
107 |
+
margin: 0 0;
|
108 |
+
}
|
109 |
+
h3 {
|
110 |
+
color: rgb(80, 80, 80);
|
111 |
+
font-size: 30px;
|
112 |
+
}
|
113 |
+
h5 {
|
114 |
+
color: rgb(80, 80, 80);
|
115 |
+
font-size: 25px;
|
116 |
+
}
|
117 |
+
h6 {
|
118 |
+
margin-top: 50px;
|
119 |
+
text-transform: uppercase;
|
120 |
+
font-size: 14px;
|
121 |
+
}
|
122 |
+
@media (min-width: 640px) {
|
123 |
+
main {
|
124 |
+
max-width: none;
|
125 |
+
}
|
126 |
+
}
|
127 |
+
</style>
|
indie_label_svelte/src/Auditing.svelte
ADDED
@@ -0,0 +1,464 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import { onMount } from "svelte";
|
3 |
+
import Section from "./Section.svelte";
|
4 |
+
import KeywordSearch from "./KeywordSearch.svelte";
|
5 |
+
import OverallResults from "./OverallResults.svelte";
|
6 |
+
import ClusterResults from "./ClusterResults.svelte";
|
7 |
+
import HelpTooltip from "./HelpTooltip.svelte";
|
8 |
+
import TopicTraining from "./TopicTraining.svelte";
|
9 |
+
|
10 |
+
import { user } from './stores/cur_user_store.js';
|
11 |
+
import { error_type } from './stores/error_type_store.js';
|
12 |
+
import { topic_chosen } from './stores/cur_topic_store.js';
|
13 |
+
import { model_chosen } from './stores/cur_model_store.js';
|
14 |
+
|
15 |
+
import Button, { Label } from "@smui/button";
|
16 |
+
import LinearProgress from "@smui/linear-progress";
|
17 |
+
import LayoutGrid, { Cell } from "@smui/layout-grid";
|
18 |
+
import Radio from '@smui/radio';
|
19 |
+
import FormField from '@smui/form-field';
|
20 |
+
import Card, { Content } from '@smui/card';
|
21 |
+
import{ Wrapper } from '@smui/tooltip';
|
22 |
+
import IconButton from '@smui/icon-button';
|
23 |
+
import Select, { Option } from "@smui/select";
|
24 |
+
import Svelecte from '../node_modules/svelecte/src/Svelecte.svelte';
|
25 |
+
|
26 |
+
export let personalized_model;
|
27 |
+
// export let topic;
|
28 |
+
export let cur_error_type = "Both";
|
29 |
+
|
30 |
+
let evidence = [];
|
31 |
+
let show_audit_settings = false;
|
32 |
+
|
33 |
+
let error_type_options = [
|
34 |
+
{
|
35 |
+
"opt": 'Both',
|
36 |
+
"descr": '(System is under- or over-sensitive)',
|
37 |
+
"help": "View both types of potential system errors"
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"opt": 'System is under-sensitive',
|
41 |
+
"descr": '(Incorrectly rates as non-toxic)',
|
42 |
+
"help": "Focus on system errors where the system labeled content as Non-toxic when it should have been labeled as Toxic."
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"opt": 'System is over-sensitive',
|
46 |
+
"descr": '(Incorrectly rates as toxic)',
|
47 |
+
"help": "Focus on system errors where the system labeled content as Toxic when it should have been labeled as Non-toxic."
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"opt": 'Show errors and non-errors',
|
51 |
+
"descr": '',
|
52 |
+
"help": "Also show cases that are not likely to be potential errors"
|
53 |
+
},
|
54 |
+
]
|
55 |
+
|
56 |
+
let personalized_models = [];
|
57 |
+
let breakdown_category;
|
58 |
+
let breakdown_categories = [];
|
59 |
+
let systems = ["YouSocial comment toxicity classifier"]; // Only one system for now
|
60 |
+
let clusters = [];
|
61 |
+
let clusters_for_tuning = []
|
62 |
+
let promise = Promise.resolve(null);
|
63 |
+
|
64 |
+
// Handle routing
|
65 |
+
let searchParams = new URLSearchParams(window.location.search);
|
66 |
+
let scaffold_method = searchParams.get("scaffold");
|
67 |
+
let mode = searchParams.get("mode");
|
68 |
+
let topic_vis_method = searchParams.get("topic_vis_method");
|
69 |
+
|
70 |
+
// Set audit type
|
71 |
+
let audit_types = [
|
72 |
+
"All topic exploration",
|
73 |
+
"Single topic exploration"
|
74 |
+
];
|
75 |
+
let audit_type;
|
76 |
+
if (scaffold_method == "fixed" || scaffold_method == "personal" || scaffold_method == "personal_group" || scaffold_method == "personal_test" || scaffold_method == "personal_cluster" || scaffold_method == "topic_train" || scaffold_method == "prompts") {
|
77 |
+
audit_type = audit_types[1];
|
78 |
+
// audit_type = audit_types[0];
|
79 |
+
} else {
|
80 |
+
// No scaffolding mode or tutorial
|
81 |
+
audit_type = audit_types[0];
|
82 |
+
}
|
83 |
+
|
84 |
+
let show_topic_training = false;
|
85 |
+
if (scaffold_method == "topic_train") {
|
86 |
+
show_topic_training = true;
|
87 |
+
}
|
88 |
+
|
89 |
+
// Handle non-model mode
|
90 |
+
let use_model = true;
|
91 |
+
if (mode == "no_model") {
|
92 |
+
use_model = false;
|
93 |
+
cur_error_type = "Show errors and non-errors";
|
94 |
+
}
|
95 |
+
|
96 |
+
// Handle group model
|
97 |
+
let use_group_model = false;
|
98 |
+
if (scaffold_method == "personal_group") {
|
99 |
+
use_group_model = true;
|
100 |
+
}
|
101 |
+
|
102 |
+
// TEMP
|
103 |
+
let promise_cluster = Promise.resolve(null);
|
104 |
+
|
105 |
+
// Get current user from store
|
106 |
+
let cur_user;
|
107 |
+
user.subscribe(value => {
|
108 |
+
if (value != cur_user) {
|
109 |
+
cur_user = value;
|
110 |
+
personalized_model = "";
|
111 |
+
getAuditSettings();
|
112 |
+
}
|
113 |
+
});
|
114 |
+
|
115 |
+
// Get current topic from store
|
116 |
+
let topic;
|
117 |
+
topic_chosen.subscribe(value => {
|
118 |
+
topic = value;
|
119 |
+
handleClusterButton(); // re-render cluster results
|
120 |
+
});
|
121 |
+
|
122 |
+
// Get current model from store
|
123 |
+
model_chosen.subscribe(value => {
|
124 |
+
personalized_model = value;
|
125 |
+
// Add to personalized_models if not there
|
126 |
+
if (!personalized_models.includes(personalized_model)) {
|
127 |
+
personalized_models.push(personalized_model);
|
128 |
+
}
|
129 |
+
|
130 |
+
handleClusterButton(); // re-render cluster results
|
131 |
+
});
|
132 |
+
|
133 |
+
// Save current error type
|
134 |
+
async function updateErrorType() {
|
135 |
+
error_type.update((value) => cur_error_type);
|
136 |
+
handleAuditButton();
|
137 |
+
handleClusterButton();
|
138 |
+
}
|
139 |
+
|
140 |
+
// Handle topic-specific training
|
141 |
+
// let topic_training = null;
|
142 |
+
|
143 |
+
async function updateTopicChosen() {
|
144 |
+
if (topic != null) {
|
145 |
+
console.log("updateTopicChosen", topic)
|
146 |
+
topic_chosen.update((value) => topic);
|
147 |
+
}
|
148 |
+
}
|
149 |
+
|
150 |
+
function getAuditSettings() {
|
151 |
+
let req_params = {
|
152 |
+
user: cur_user,
|
153 |
+
scaffold_method: scaffold_method,
|
154 |
+
};
|
155 |
+
let params = new URLSearchParams(req_params).toString();
|
156 |
+
fetch("./audit_settings?" + params)
|
157 |
+
.then((r) => r.text())
|
158 |
+
.then(function (r_orig) {
|
159 |
+
let r = JSON.parse(r_orig);
|
160 |
+
breakdown_categories = r["breakdown_categories"];
|
161 |
+
breakdown_category = breakdown_categories[0];
|
162 |
+
personalized_models = r["personalized_models"];
|
163 |
+
if (use_group_model) {
|
164 |
+
let personalized_model_grp = r["personalized_model_grp"];
|
165 |
+
personalized_model = personalized_model_grp[0];
|
166 |
+
} else {
|
167 |
+
personalized_model = personalized_models[0]; // TEMP
|
168 |
+
}
|
169 |
+
|
170 |
+
model_chosen.update((value) => personalized_model);
|
171 |
+
clusters = r["clusters"];
|
172 |
+
clusters_for_tuning = r["clusters_for_tuning"];
|
173 |
+
console.log("clusters", clusters); // TEMP
|
174 |
+
topic = clusters[0]["options"][0]["text"];
|
175 |
+
topic_chosen.update((value) => topic);
|
176 |
+
handleAuditButton(); // TEMP
|
177 |
+
handleClusterButton(); // TEMP
|
178 |
+
});
|
179 |
+
}
|
180 |
+
onMount(async () => {
|
181 |
+
getAuditSettings();
|
182 |
+
});
|
183 |
+
|
184 |
+
function handleAuditButton() {
|
185 |
+
model_chosen.update((value) => personalized_model);
|
186 |
+
promise = getAudit();
|
187 |
+
}
|
188 |
+
|
189 |
+
async function getAudit() {
|
190 |
+
let req_params = {
|
191 |
+
pers_model: personalized_model,
|
192 |
+
breakdown_axis: breakdown_category,
|
193 |
+
perf_metric: "avg_diff",
|
194 |
+
breakdown_sort: "difference",
|
195 |
+
n_topics: 10,
|
196 |
+
error_type: "Both", // Only allow both error types
|
197 |
+
cur_user: cur_user,
|
198 |
+
topic_vis_method: topic_vis_method,
|
199 |
+
};
|
200 |
+
let params = new URLSearchParams(req_params).toString();
|
201 |
+
const response = await fetch("./get_audit?" + params);
|
202 |
+
const text = await response.text();
|
203 |
+
const data = JSON.parse(text);
|
204 |
+
return data;
|
205 |
+
}
|
206 |
+
|
207 |
+
function handleClusterButton() {
|
208 |
+
promise_cluster = getCluster();
|
209 |
+
}
|
210 |
+
|
211 |
+
async function getCluster() {
|
212 |
+
if (personalized_model == "" || personalized_model == undefined) {
|
213 |
+
return null;
|
214 |
+
}
|
215 |
+
let req_params = {
|
216 |
+
cluster: topic,
|
217 |
+
topic_df_ids: [],
|
218 |
+
n_examples: 500, // TEMP
|
219 |
+
pers_model: personalized_model,
|
220 |
+
example_sort: "descending", // TEMP
|
221 |
+
comparison_group: "status_quo", // TEMP
|
222 |
+
search_type: "cluster",
|
223 |
+
keyword: "",
|
224 |
+
n_neighbors: 0,
|
225 |
+
error_type: cur_error_type,
|
226 |
+
use_model: use_model,
|
227 |
+
scaffold_method: scaffold_method,
|
228 |
+
};
|
229 |
+
let params = new URLSearchParams(req_params).toString();
|
230 |
+
const response = await fetch("./get_cluster_results?" + params);
|
231 |
+
const text = await response.text();
|
232 |
+
const data = JSON.parse(text);
|
233 |
+
console.log(topic);
|
234 |
+
return data;
|
235 |
+
}
|
236 |
+
</script>
|
237 |
+
|
238 |
+
<div>
|
239 |
+
<!-- 0: Audit settings -->
|
240 |
+
<div>
|
241 |
+
<div style="margin-top: 30px">
|
242 |
+
<span class="head_3">Auditing</span>
|
243 |
+
<IconButton
|
244 |
+
class="material-icons grey_button"
|
245 |
+
size="normal"
|
246 |
+
on:click={() => (show_audit_settings = !show_audit_settings)}
|
247 |
+
>
|
248 |
+
help_outline
|
249 |
+
</IconButton>
|
250 |
+
</div>
|
251 |
+
<div style="width: 80%">
|
252 |
+
<p>In this section, we'll be auditing the content moderation system. Here, you’ll be aided by a personalized model that will help direct your attention towards potential problem areas in the model’s performance. This model isn’t meant to be perfect, but is designed to help you better focus on areas that need human review.</p>
|
253 |
+
</div>
|
254 |
+
|
255 |
+
{#if show_audit_settings}
|
256 |
+
<div class="audit_section">
|
257 |
+
<div class="head_5">Audit settings</div>
|
258 |
+
<div style="width: 50%">
|
259 |
+
<p>Choose your audit settings here. These settings will affect all of the visualizations that follow, so you can return back here to make changes.</p>
|
260 |
+
</div>
|
261 |
+
<div class="section_indent">
|
262 |
+
<Section
|
263 |
+
section_id="systems"
|
264 |
+
section_title="What status-quo system would you like to audit?"
|
265 |
+
section_opts={systems}
|
266 |
+
bind:value={systems[0]}
|
267 |
+
width_pct={40}
|
268 |
+
/>
|
269 |
+
{#key personalized_model}
|
270 |
+
<Section
|
271 |
+
section_id="personalized_model"
|
272 |
+
section_title="What model would you like to use to represent your views?"
|
273 |
+
section_opts={personalized_models}
|
274 |
+
bind:value={personalized_model}
|
275 |
+
width_pct={40}
|
276 |
+
on:change
|
277 |
+
/>
|
278 |
+
{/key}
|
279 |
+
|
280 |
+
<Section
|
281 |
+
section_id="audit_type"
|
282 |
+
section_title="What type of audit are you conducting?"
|
283 |
+
section_opts={audit_types}
|
284 |
+
bind:value={audit_type}
|
285 |
+
width_pct={40}
|
286 |
+
on:change
|
287 |
+
/>
|
288 |
+
|
289 |
+
<LayoutGrid>
|
290 |
+
<Cell span={7}>
|
291 |
+
<Button
|
292 |
+
on:click={handleAuditButton}
|
293 |
+
variant="outlined"
|
294 |
+
class="button_float_right"
|
295 |
+
>
|
296 |
+
<Label>Start your audit</Label>
|
297 |
+
</Button>
|
298 |
+
</Cell>
|
299 |
+
</LayoutGrid>
|
300 |
+
</div>
|
301 |
+
</div>
|
302 |
+
<p>Current model: {personalized_model}</p>
|
303 |
+
{/if}
|
304 |
+
</div>
|
305 |
+
|
306 |
+
<!-- 1: All topics overview -->
|
307 |
+
{#if audit_type == audit_types[0]}
|
308 |
+
<div class="audit_section">
|
309 |
+
<div class="head_5">Overview of all topics</div>
|
310 |
+
<p>First, browse the system performance by different auto-generated comment topic areas.</p>
|
311 |
+
|
312 |
+
<div class="section_indent">
|
313 |
+
{#await promise}
|
314 |
+
<div class="app_loading">
|
315 |
+
<LinearProgress indeterminate />
|
316 |
+
</div>
|
317 |
+
{:then audit_results}
|
318 |
+
{#if audit_results}
|
319 |
+
<OverallResults
|
320 |
+
data={audit_results}
|
321 |
+
clusters={clusters}
|
322 |
+
personalized_model={personalized_model}
|
323 |
+
cluster={topic}
|
324 |
+
/>
|
325 |
+
{/if}
|
326 |
+
{:catch error}
|
327 |
+
<p style="color: red">{error.message}</p>
|
328 |
+
{/await}
|
329 |
+
</div>
|
330 |
+
</div>
|
331 |
+
{/if}
|
332 |
+
|
333 |
+
<!-- 2a: Topic training -->
|
334 |
+
{#if show_topic_training}
|
335 |
+
<div class="audit_section">
|
336 |
+
<div class="head_5">Topic model training</div>
|
337 |
+
<p></p>
|
338 |
+
<div class="section_indent">
|
339 |
+
<div>
|
340 |
+
<p>In what topic area would you like to tune your model?</p>
|
341 |
+
<Svelecte
|
342 |
+
options={clusters_for_tuning}
|
343 |
+
labelAsValue={true}
|
344 |
+
bind:value={topic}
|
345 |
+
placeholder="Select topic"
|
346 |
+
on:change={null}
|
347 |
+
style="width: 50%"
|
348 |
+
>
|
349 |
+
</Svelecte>
|
350 |
+
</div>
|
351 |
+
|
352 |
+
<div style="padding-top: 30px">
|
353 |
+
<!-- Labeling -->
|
354 |
+
<h6>Comments to label</h6>
|
355 |
+
<ul>
|
356 |
+
<li>
|
357 |
+
Comments with scores <b>0</b> and <b>1</b> will be allowed to <b>remain</b> on the platform.
|
358 |
+
</li>
|
359 |
+
<li>
|
360 |
+
Comments with scores <b>2</b>, <b>3</b>, or <b>4</b> will be <b>deleted</b> from the platform.
|
361 |
+
</li>
|
362 |
+
<li>
|
363 |
+
Given that some comments may lack context, if you're not sure, feel free to mark the <b>unsure</b> option to skip a comment.
|
364 |
+
</li>
|
365 |
+
</ul>
|
366 |
+
{#key topic}
|
367 |
+
<TopicTraining topic={topic} />
|
368 |
+
{/key}
|
369 |
+
</div>
|
370 |
+
|
371 |
+
</div>
|
372 |
+
</div>
|
373 |
+
{/if}
|
374 |
+
|
375 |
+
<!-- 2: Topic overview -->
|
376 |
+
<div class="audit_section">
|
377 |
+
<div class="head_5">Topic exploration</div>
|
378 |
+
<p></p>
|
379 |
+
<div class="section_indent">
|
380 |
+
<div>
|
381 |
+
<div>
|
382 |
+
<p><b>What topic would you like to explore further?</b></p>
|
383 |
+
<Svelecte
|
384 |
+
options={clusters}
|
385 |
+
labelAsValue={true}
|
386 |
+
bind:value={topic}
|
387 |
+
placeholder="Select topic"
|
388 |
+
on:change={updateTopicChosen}
|
389 |
+
style="width: 50%"
|
390 |
+
>
|
391 |
+
</Svelecte>
|
392 |
+
</div>
|
393 |
+
|
394 |
+
{#if use_model}
|
395 |
+
<div style="padding-top: 30px">
|
396 |
+
<p><b>What kind of system errors do you want to focus on?</b></p>
|
397 |
+
{#each error_type_options as e}
|
398 |
+
<div style="display: flex; align-items: center;">
|
399 |
+
<Wrapper rich>
|
400 |
+
<FormField>
|
401 |
+
<Radio bind:group={cur_error_type} value={e.opt} on:change={updateErrorType} color="secondary" />
|
402 |
+
<span slot="label">
|
403 |
+
<b>{e.opt}</b> {e.descr}
|
404 |
+
<IconButton class="material-icons" size="button" disabled>help_outline</IconButton>
|
405 |
+
</span>
|
406 |
+
</FormField>
|
407 |
+
<HelpTooltip text={e.help} />
|
408 |
+
</Wrapper>
|
409 |
+
</div>
|
410 |
+
{/each}
|
411 |
+
</div>
|
412 |
+
{/if}
|
413 |
+
</div>
|
414 |
+
|
415 |
+
<div style="padding-top: 30px">
|
416 |
+
{#await promise_cluster}
|
417 |
+
<div class="app_loading">
|
418 |
+
<LinearProgress indeterminate />
|
419 |
+
</div>
|
420 |
+
{:then cluster_results}
|
421 |
+
{#if cluster_results}
|
422 |
+
{#if topic}
|
423 |
+
<ClusterResults
|
424 |
+
cluster={topic}
|
425 |
+
clusters={clusters}
|
426 |
+
model={personalized_model}
|
427 |
+
data={cluster_results}
|
428 |
+
table_width_pct={90}
|
429 |
+
table_id={"main"}
|
430 |
+
use_model={use_model}
|
431 |
+
bind:evidence={evidence}
|
432 |
+
on:change
|
433 |
+
/>
|
434 |
+
{/if}
|
435 |
+
{/if}
|
436 |
+
{:catch error}
|
437 |
+
<p style="color: red">{error.message}</p>
|
438 |
+
{/await}
|
439 |
+
</div>
|
440 |
+
|
441 |
+
</div>
|
442 |
+
</div>
|
443 |
+
|
444 |
+
<!-- 3: Gather evidence -->
|
445 |
+
<div class="audit_section">
|
446 |
+
<div class="head_5">Gather additional evidence</div>
|
447 |
+
<p>Next, you can optionally search for more comments to serve as evidence through manual keyword search (for individual words or phrases).</p>
|
448 |
+
<div class="section_indent">
|
449 |
+
{#key error_type}
|
450 |
+
<KeywordSearch clusters={clusters} personalized_model={personalized_model} bind:evidence={evidence} use_model={use_model} on:change/>
|
451 |
+
{/key}
|
452 |
+
</div>
|
453 |
+
</div>
|
454 |
+
|
455 |
+
<!-- 4: Test hunch -->
|
456 |
+
<div class="audit_section">
|
457 |
+
<div class="head_5">Finalize your current report</div>
|
458 |
+
<p>Finally, review the report you've generated on the side panel and provide a brief summary of the problem you see. You may also list suggestions or insights into addressing this problem if you have ideas. This report will be directly used by the model developers to address the issue you've raised</p>
|
459 |
+
</div>
|
460 |
+
|
461 |
+
</div>
|
462 |
+
|
463 |
+
<style>
|
464 |
+
</style>
|
indie_label_svelte/src/ClusterResults.svelte
ADDED
@@ -0,0 +1,562 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import { VegaLite } from "svelte-vega";
|
3 |
+
import type { View } from "svelte-vega";
|
4 |
+
import DataTable, {
|
5 |
+
Head,
|
6 |
+
Body,
|
7 |
+
Row,
|
8 |
+
Cell,
|
9 |
+
Pagination,
|
10 |
+
} from "@smui/data-table";
|
11 |
+
import Select, { Option } from "@smui/select";
|
12 |
+
import IconButton from "@smui/icon-button";
|
13 |
+
import Button from "@smui/button";
|
14 |
+
import { Label } from "@smui/common";
|
15 |
+
import Checkbox from '@smui/checkbox';
|
16 |
+
import Radio from '@smui/radio';
|
17 |
+
import FormField from '@smui/form-field';
|
18 |
+
import Tooltip, { Wrapper } from '@smui/tooltip';
|
19 |
+
import LayoutGrid, { Cell as LGCell } from "@smui/layout-grid";
|
20 |
+
import Card, { Content } from '@smui/card';
|
21 |
+
|
22 |
+
import HelpTooltip from "./HelpTooltip.svelte";
|
23 |
+
import { topic_chosen } from './stores/cur_topic_store.js';
|
24 |
+
import { new_evidence } from './stores/new_evidence_store.js';
|
25 |
+
import { open_evidence } from './stores/open_evidence_store.js';
|
26 |
+
|
27 |
+
export let data;
|
28 |
+
export let cluster;
|
29 |
+
export let clusters = null;
|
30 |
+
export let model;
|
31 |
+
export let show_vis = true;
|
32 |
+
export let show_checkboxes = true;
|
33 |
+
export let table_width_pct = 80;
|
34 |
+
export let rowsPerPage = 10;
|
35 |
+
export let evidence;
|
36 |
+
export let table_id;
|
37 |
+
export let use_model = true;
|
38 |
+
|
39 |
+
let N_COMMENTS = 500;
|
40 |
+
let show_num_ratings = false;
|
41 |
+
let show_your_decision_ratings = false;
|
42 |
+
let show_step2_info = false;
|
43 |
+
|
44 |
+
let comment_table_style;
|
45 |
+
if (show_checkboxes) {
|
46 |
+
comment_table_style = ""
|
47 |
+
} else {
|
48 |
+
comment_table_style = "comment_table_small"
|
49 |
+
}
|
50 |
+
|
51 |
+
// Handle Altair selections
|
52 |
+
let selected_comment_id = 0;
|
53 |
+
window.addEventListener("popstate", function (event) {
|
54 |
+
//your code goes here on location change
|
55 |
+
let cur_url = window.location.href;
|
56 |
+
let cur_url_elems = cur_url.split("#");
|
57 |
+
// console.log(cur_url_elems)
|
58 |
+
if (cur_url_elems.length > 0) {
|
59 |
+
let path = cur_url_elems[2];
|
60 |
+
if (path == "comment") {
|
61 |
+
let comment_id = cur_url_elems[1].split("/")[0];
|
62 |
+
console.log("comment_id", comment_id)
|
63 |
+
selected_comment_id = parseInt(comment_id);
|
64 |
+
let table_ind = null;
|
65 |
+
for (let i = 0; i < items.length; i++) {
|
66 |
+
if (items[i]["id"] == selected_comment_id) {
|
67 |
+
table_ind = i;
|
68 |
+
break;
|
69 |
+
}
|
70 |
+
}
|
71 |
+
currentPage = table_ind / rowsPerPage;
|
72 |
+
} else if (path == "topic") {
|
73 |
+
let topic = cur_url_elems[1].split("/")[0];
|
74 |
+
topic_chosen.update((value) => topic); // update in store
|
75 |
+
}
|
76 |
+
// window.history.replaceState({}, document.title, "/"); // remove URL parameter
|
77 |
+
}
|
78 |
+
});
|
79 |
+
|
80 |
+
// Cluster Overview Plot
|
81 |
+
let cluster_overview_data = null;
|
82 |
+
let cluster_overview_spec = null;
|
83 |
+
let cluster_overview_view = null;
|
84 |
+
if (show_vis) {
|
85 |
+
let cluster_overview_json = data["cluster_overview_plot_json"];
|
86 |
+
cluster_overview_data =
|
87 |
+
cluster_overview_json["datasets"][
|
88 |
+
cluster_overview_json["layer"][0]["data"]["name"]
|
89 |
+
];
|
90 |
+
cluster_overview_spec = cluster_overview_json;
|
91 |
+
cluster_overview_view = null;
|
92 |
+
}
|
93 |
+
|
94 |
+
type ClusterComment = {
|
95 |
+
id: number;
|
96 |
+
comment: string;
|
97 |
+
user_decision: string;
|
98 |
+
user_rating: number;
|
99 |
+
system_decision: string;
|
100 |
+
system_rating: number;
|
101 |
+
user_color: string;
|
102 |
+
system_color: string;
|
103 |
+
error_type: string;
|
104 |
+
error_color: string;
|
105 |
+
judgment: string;
|
106 |
+
toxicity_category: string;
|
107 |
+
};
|
108 |
+
let items: ClusterComment[];
|
109 |
+
|
110 |
+
let selected = [];
|
111 |
+
|
112 |
+
// Pagination
|
113 |
+
let currentPage = 0;
|
114 |
+
$: start = currentPage * rowsPerPage;
|
115 |
+
$: end = Math.min(start + rowsPerPage, items.length);
|
116 |
+
$: slice = items.slice(start, end);
|
117 |
+
$: lastPage = Math.max(Math.ceil(items.length / rowsPerPage) - 1, 0);
|
118 |
+
|
119 |
+
$: if (currentPage > lastPage) {
|
120 |
+
currentPage = lastPage;
|
121 |
+
}
|
122 |
+
|
123 |
+
let set_length = 0;
|
124 |
+
// if (typeof(data["cluster_comments"] == "string")) {
|
125 |
+
if (show_checkboxes) {
|
126 |
+
items = JSON.parse(data["cluster_comments"]);
|
127 |
+
set_length = data["topic_df_ids"].length;
|
128 |
+
} else {
|
129 |
+
items = data["cluster_comments"];
|
130 |
+
set_length = items.length;
|
131 |
+
}
|
132 |
+
// console.log(set_length);
|
133 |
+
|
134 |
+
let cur_open_evidence;
|
135 |
+
open_evidence.subscribe(value => {
|
136 |
+
cur_open_evidence = value;
|
137 |
+
});
|
138 |
+
|
139 |
+
function saveToEvidence() {
|
140 |
+
new_evidence.update((value) => []); // clear prior evidence
|
141 |
+
selected.forEach(function(s) {
|
142 |
+
if (!cur_open_evidence.includes(s)) {
|
143 |
+
new_evidence.update((value) => s); // update in store
|
144 |
+
}
|
145 |
+
});
|
146 |
+
selected = [];
|
147 |
+
|
148 |
+
// Clear highlighted rows
|
149 |
+
let rows = document.getElementsByTagName("tr");
|
150 |
+
let row_list = Array.prototype.slice.call(rows);
|
151 |
+
row_list.forEach(function(r) {
|
152 |
+
r.classList.remove("mdc-data-table__row--selected");
|
153 |
+
});
|
154 |
+
|
155 |
+
let checkbox_header_divs = document.getElementsByClassName("mdc-data-table__header-row-checkbox");
|
156 |
+
let checkbox_header_list = Array.prototype.slice.call(checkbox_header_divs);
|
157 |
+
checkbox_header_list.forEach(function(c) {
|
158 |
+
let c_input = c.getElementsByTagName("input");
|
159 |
+
for (let i = 0; i < c_input.length; i++) {
|
160 |
+
c_input[i].setAttribute("data-indeterminate", "false");
|
161 |
+
c_input[i].indeterminate = false;
|
162 |
+
}
|
163 |
+
});
|
164 |
+
}
|
165 |
+
|
166 |
+
function handleAdd(comment_to_remove) {
|
167 |
+
new_evidence.update((value) => []); // clear prior evidence
|
168 |
+
if (!cur_open_evidence.includes(comment_to_remove)) {
|
169 |
+
new_evidence.update((value) => comment_to_remove); // update in store
|
170 |
+
}
|
171 |
+
}
|
172 |
+
|
173 |
+
function handleRemove(comment_to_remove) {
|
174 |
+
// Update local open evidence
|
175 |
+
cur_open_evidence = cur_open_evidence.filter(item => item.comment != comment_to_remove)
|
176 |
+
// Update open evidence in store
|
177 |
+
open_evidence.update((value) => cur_open_evidence);
|
178 |
+
}
|
179 |
+
</script>
|
180 |
+
|
181 |
+
<div class="padding-top: 30px;">
|
182 |
+
{#if show_vis}
|
183 |
+
<div>
|
184 |
+
<span class="head_6">Topic overview: {cluster}</span>
|
185 |
+
<IconButton
|
186 |
+
class="material-icons grey_button"
|
187 |
+
size="normal"
|
188 |
+
on:click={() => (show_step2_info = !show_step2_info)}
|
189 |
+
>
|
190 |
+
help_outline
|
191 |
+
</IconButton>
|
192 |
+
</div>
|
193 |
+
{#if N_COMMENTS < set_length}
|
194 |
+
<p>Showing a random sample of {N_COMMENTS} comments (out of {set_length} comments)</p>
|
195 |
+
{:else}
|
196 |
+
<p>Showing all {set_length} comments</p>
|
197 |
+
{/if}
|
198 |
+
|
199 |
+
{#if show_step2_info}
|
200 |
+
<LayoutGrid>
|
201 |
+
<LGCell span={8}>
|
202 |
+
<div class="card-container">
|
203 |
+
<Card variant="outlined" padded>
|
204 |
+
<p class="mdc-typography--button"><b>Interpreting this visualization</b></p>
|
205 |
+
<ul>
|
206 |
+
<li>
|
207 |
+
This plot has the same layout as the <b>All Topics</b> visualization, but now, each <b>box</b> in this plot represents an <b>individual comment</b> that belongs to your <b>selected topic area</b>.
|
208 |
+
</li>
|
209 |
+
<li>
|
210 |
+
The <b>x-axis</b> represents our prediction of <b>your</b> toxicity rating for each comment (we'll call these "your ratings")
|
211 |
+
<ul>
|
212 |
+
<li>
|
213 |
+
The <b>left side</b> (white background) is the <b>Non-toxic</b> side (comments that'll be allowed to remain)
|
214 |
+
</li>
|
215 |
+
<li>
|
216 |
+
The <b>right side</b> (grey background) is the <b>Toxic</b> side (comments that will be deleted)
|
217 |
+
</li>
|
218 |
+
<li>
|
219 |
+
Comment boxes are plotted along the x-axis based on our prediction of your toxicity rating for that comment
|
220 |
+
</li>
|
221 |
+
</ul>
|
222 |
+
</li>
|
223 |
+
|
224 |
+
<li>
|
225 |
+
The <b>color</b> of the box indicates the <b>system's rating</b> for the same comment; you may want to focus on the <b>red-colored boxes</b> that indicate <b>disagreements</b> between "your ratings" and the system's ratings
|
226 |
+
</li>
|
227 |
+
</ul>
|
228 |
+
</Card>
|
229 |
+
</div>
|
230 |
+
</LGCell>
|
231 |
+
</LayoutGrid>
|
232 |
+
{/if}
|
233 |
+
|
234 |
+
<div class="row">
|
235 |
+
<div class="col s8">
|
236 |
+
<VegaLite
|
237 |
+
{cluster_overview_data}
|
238 |
+
spec={cluster_overview_spec}
|
239 |
+
bind:view={cluster_overview_view}
|
240 |
+
/>
|
241 |
+
</div>
|
242 |
+
</div>
|
243 |
+
{/if}
|
244 |
+
|
245 |
+
{#if show_checkboxes}
|
246 |
+
<h6>Comments</h6>
|
247 |
+
{/if}
|
248 |
+
<!-- Display options -->
|
249 |
+
{#if show_checkboxes}
|
250 |
+
<div>
|
251 |
+
Numerical ratings:
|
252 |
+
<FormField>
|
253 |
+
<Radio bind:group={show_num_ratings} value={true} color="secondary" />
|
254 |
+
<span slot="label">Show</span>
|
255 |
+
</FormField>
|
256 |
+
<FormField>
|
257 |
+
<Radio bind:group={show_num_ratings} value={false} color="secondary" />
|
258 |
+
<span slot="label">Hide</span>
|
259 |
+
</FormField>
|
260 |
+
</div>
|
261 |
+
{#if use_model}
|
262 |
+
<div>
|
263 |
+
Our prediction of your decision + ratings:
|
264 |
+
<FormField>
|
265 |
+
<Radio bind:group={show_your_decision_ratings} value={true} color="secondary" />
|
266 |
+
<span slot="label">Show</span>
|
267 |
+
</FormField>
|
268 |
+
<FormField>
|
269 |
+
<Radio bind:group={show_your_decision_ratings} value={false} color="secondary" />
|
270 |
+
<span slot="label">Hide</span>
|
271 |
+
</FormField>
|
272 |
+
</div>
|
273 |
+
{/if}
|
274 |
+
|
275 |
+
<!-- <Wrapper>
|
276 |
+
<IconButton class="material-icons" size="button" disabled>help_outline</IconButton>
|
277 |
+
<Tooltip>White = Non-toxic, Grey = Toxic</Tooltip>
|
278 |
+
</Wrapper> -->
|
279 |
+
{/if}
|
280 |
+
|
281 |
+
{#key evidence}
|
282 |
+
<div class="comment_table {comment_table_style}">
|
283 |
+
<DataTable
|
284 |
+
table$aria-label="Comments in the topic cluster"
|
285 |
+
style="width: {table_width_pct}%;"
|
286 |
+
>
|
287 |
+
<Head>
|
288 |
+
<Row>
|
289 |
+
<!-- {#if show_checkboxes}
|
290 |
+
<Cell checkbox>
|
291 |
+
<Checkbox />
|
292 |
+
</Cell>
|
293 |
+
{/if} -->
|
294 |
+
|
295 |
+
<Cell style="width: 50%">Comment</Cell>
|
296 |
+
|
297 |
+
{#if show_your_decision_ratings}
|
298 |
+
<Cell>Our prediction<br>of your decision</Cell>
|
299 |
+
{#if show_num_ratings}
|
300 |
+
<Cell>Our prediction<br>of your rating</Cell>
|
301 |
+
{/if}
|
302 |
+
{/if}
|
303 |
+
|
304 |
+
<Cell>
|
305 |
+
System<br>decision<br>
|
306 |
+
{#if show_checkboxes}
|
307 |
+
<span style="font-size:12px; max-width:125px">White = Non-toxic, <br>Grey = Toxic</span>
|
308 |
+
{/if}
|
309 |
+
</Cell>
|
310 |
+
{#if show_num_ratings}
|
311 |
+
<Cell>System<br>rating</Cell>
|
312 |
+
{/if}
|
313 |
+
|
314 |
+
{#if show_checkboxes}
|
315 |
+
{#if use_model}
|
316 |
+
<Cell>
|
317 |
+
Potential error<br>type<br>
|
318 |
+
{#if show_checkboxes}
|
319 |
+
<span style="font-size:12px; max-width:125px">Darker red = Greater <br>potential system error</span>
|
320 |
+
{/if}
|
321 |
+
</Cell>
|
322 |
+
|
323 |
+
<Cell>Potential toxicity<br>categories</Cell>
|
324 |
+
{/if}
|
325 |
+
{/if}
|
326 |
+
|
327 |
+
<Cell>Do you agree<br>with the system?</Cell>
|
328 |
+
|
329 |
+
{#if !show_checkboxes}
|
330 |
+
<Cell>Remove</Cell>
|
331 |
+
{/if}
|
332 |
+
|
333 |
+
{#if show_checkboxes}
|
334 |
+
<Cell>Add<br>Evidence</Cell>
|
335 |
+
{/if}
|
336 |
+
</Row>
|
337 |
+
</Head>
|
338 |
+
<Body>
|
339 |
+
{#each slice as item (item.id + table_id)}
|
340 |
+
<Row>
|
341 |
+
<!-- {#if show_checkboxes}
|
342 |
+
<Cell checkbox>
|
343 |
+
<Checkbox
|
344 |
+
bind:group={selected}
|
345 |
+
value={{
|
346 |
+
"comment": item.comment,
|
347 |
+
"user_color": item.user_color,
|
348 |
+
"user_decision": item.user_decision,
|
349 |
+
"user_rating": item.user_rating,
|
350 |
+
"system_color": item.system_color,
|
351 |
+
"system_decision": item.system_decision,
|
352 |
+
"system_rating": item.system_rating,
|
353 |
+
"error_type": item.error_type,
|
354 |
+
"error_color": item.error_color,
|
355 |
+
"toxicity_category": item.toxicity_category,
|
356 |
+
"judgment": item.judgment,
|
357 |
+
"id": item.id
|
358 |
+
}}
|
359 |
+
valueKey={item.comment}
|
360 |
+
/>
|
361 |
+
</Cell>
|
362 |
+
{/if} -->
|
363 |
+
|
364 |
+
<Cell>
|
365 |
+
{item.comment}
|
366 |
+
</Cell>
|
367 |
+
|
368 |
+
{#if show_your_decision_ratings}
|
369 |
+
<Cell style="background-color: {item.user_color}; border-left: 1px solid rgba(0,0,0,.12); border-right: 1px solid rgba(0,0,0,.12); border-collapse: collapse;">
|
370 |
+
{item.user_decision}
|
371 |
+
</Cell>
|
372 |
+
{#if show_num_ratings}
|
373 |
+
<Cell style="background-color: {item.user_color}; border-left: 1px solid rgba(0,0,0,.12); border-right: 1px solid rgba(0,0,0,.12); border-collapse: collapse;">
|
374 |
+
{item.user_rating}
|
375 |
+
</Cell>
|
376 |
+
{/if}
|
377 |
+
{/if}
|
378 |
+
|
379 |
+
<Cell style="background-color: {item.system_color}; border-left: 1px solid rgba(0,0,0,.12); border-right: 1px solid rgba(0,0,0,.12); border-collapse: collapse;">
|
380 |
+
{item.system_decision}
|
381 |
+
</Cell>
|
382 |
+
{#if show_num_ratings}
|
383 |
+
<Cell style="background-color: {item.system_color}; border-left: 1px solid rgba(0,0,0,.12); border-right: 1px solid rgba(0,0,0,.12); border-collapse: collapse;">
|
384 |
+
{item.system_rating}
|
385 |
+
</Cell>
|
386 |
+
{/if}
|
387 |
+
|
388 |
+
{#if show_checkboxes}
|
389 |
+
{#if use_model}
|
390 |
+
<Cell style="background-color: {item.error_color}; border-left: 1px solid rgba(0,0,0,.12); border-right: 1px solid rgba(0,0,0,.12); border-collapse: collapse;">
|
391 |
+
{item.error_type}
|
392 |
+
</Cell>
|
393 |
+
|
394 |
+
<Cell>
|
395 |
+
{item.toxicity_category}
|
396 |
+
</Cell>
|
397 |
+
{/if}
|
398 |
+
{/if}
|
399 |
+
|
400 |
+
<Cell>
|
401 |
+
<div>
|
402 |
+
<FormField>
|
403 |
+
<Radio
|
404 |
+
bind:group={item.judgment}
|
405 |
+
value={"Agree"}
|
406 |
+
/>
|
407 |
+
<span slot="label">Agree</span>
|
408 |
+
</FormField>
|
409 |
+
</div>
|
410 |
+
<div>
|
411 |
+
<FormField>
|
412 |
+
<Radio
|
413 |
+
bind:group={item.judgment}
|
414 |
+
value={"Disagree"}
|
415 |
+
/>
|
416 |
+
<span slot="label">Disagree</span>
|
417 |
+
</FormField>
|
418 |
+
</div>
|
419 |
+
</Cell>
|
420 |
+
|
421 |
+
{#if !show_checkboxes}
|
422 |
+
<Cell>
|
423 |
+
<IconButton class="material-icons grey_button" on:click={() => handleRemove(item.comment)}>
|
424 |
+
remove_circle_outline
|
425 |
+
</IconButton>
|
426 |
+
</Cell>
|
427 |
+
{/if}
|
428 |
+
|
429 |
+
{#if show_checkboxes}
|
430 |
+
<Cell>
|
431 |
+
<IconButton class="material-icons grey_button" on:click={() => handleAdd(item)}>
|
432 |
+
add_circle_outline
|
433 |
+
</IconButton>
|
434 |
+
</Cell>
|
435 |
+
{/if}
|
436 |
+
</Row>
|
437 |
+
{/each}
|
438 |
+
</Body>
|
439 |
+
|
440 |
+
<!-- Table pagination -->
|
441 |
+
<Pagination slot="paginate">
|
442 |
+
<svelte:fragment slot="rowsPerPage">
|
443 |
+
<Label>Rows Per Page</Label>
|
444 |
+
<Select variant="outlined" bind:value={rowsPerPage} noLabel>
|
445 |
+
<Option value={5}>5</Option>
|
446 |
+
<Option value={10}>10</Option>
|
447 |
+
<Option value={25}>25</Option>
|
448 |
+
<Option value={100}>100</Option>
|
449 |
+
</Select>
|
450 |
+
</svelte:fragment>
|
451 |
+
<svelte:fragment slot="total">
|
452 |
+
{start + 1}-{end} of {items.length}
|
453 |
+
</svelte:fragment>
|
454 |
+
|
455 |
+
<IconButton
|
456 |
+
class="material-icons"
|
457 |
+
action="first-page"
|
458 |
+
title="First page"
|
459 |
+
on:click={() => (currentPage = 0)}
|
460 |
+
disabled={currentPage === 0}>first_page</IconButton
|
461 |
+
>
|
462 |
+
<IconButton
|
463 |
+
class="material-icons"
|
464 |
+
action="prev-page"
|
465 |
+
title="Prev page"
|
466 |
+
on:click={() => currentPage--}
|
467 |
+
disabled={currentPage === 0}>chevron_left</IconButton
|
468 |
+
>
|
469 |
+
<IconButton
|
470 |
+
class="material-icons"
|
471 |
+
action="next-page"
|
472 |
+
title="Next page"
|
473 |
+
on:click={() => currentPage++}
|
474 |
+
disabled={currentPage === lastPage}
|
475 |
+
>chevron_right</IconButton
|
476 |
+
>
|
477 |
+
<IconButton
|
478 |
+
class="material-icons"
|
479 |
+
action="last-page"
|
480 |
+
title="Last page"
|
481 |
+
on:click={() => (currentPage = lastPage)}
|
482 |
+
disabled={currentPage === lastPage}>last_page</IconButton
|
483 |
+
>
|
484 |
+
</Pagination>
|
485 |
+
</DataTable>
|
486 |
+
</div>
|
487 |
+
{/key}
|
488 |
+
|
489 |
+
<!-- {#if show_checkboxes}
|
490 |
+
<div class="spacing_vert">
|
491 |
+
<Button on:click={saveToEvidence} disabled={selected.length == 0} variant="outlined">
|
492 |
+
<Label>Save {selected.length} to evidence</Label>
|
493 |
+
</Button>
|
494 |
+
</div>
|
495 |
+
{/if} -->
|
496 |
+
|
497 |
+
<!-- Old visualization -->
|
498 |
+
<!-- {#if show_vis}
|
499 |
+
<div style="margin-top: 500px">
|
500 |
+
<table>
|
501 |
+
<tbody>
|
502 |
+
<tr class="custom-blue">
|
503 |
+
<td class="bold">
|
504 |
+
Compared to the system, YOUR labels are on average...
|
505 |
+
</td>
|
506 |
+
<td>
|
507 |
+
<span class="bold-large"
|
508 |
+
>{data["user_perf_rounded"]} points
|
509 |
+
{data["user_direction"]}</span
|
510 |
+
>
|
511 |
+
for this cluster
|
512 |
+
</td>
|
513 |
+
</tr>
|
514 |
+
<tr>
|
515 |
+
<td class="bold">
|
516 |
+
Compared to the system, OTHER USERS' labels are on
|
517 |
+
average...
|
518 |
+
</td>
|
519 |
+
<td>
|
520 |
+
<span class="bold-large"
|
521 |
+
>{data["other_perf_rounded"]} points
|
522 |
+
{data["other_direction"]}</span
|
523 |
+
>
|
524 |
+
for this cluster (based on {data["n_other_users"]} randomly-sampled
|
525 |
+
users)
|
526 |
+
</td>
|
527 |
+
</tr>
|
528 |
+
<tr>
|
529 |
+
<td class="bold"> Odds ratio </td>
|
530 |
+
<td>
|
531 |
+
<span class="bold-large">{data["odds_ratio"]}</span><br />
|
532 |
+
{data["odds_ratio_explanation"]}
|
533 |
+
</td>
|
534 |
+
</tr>
|
535 |
+
</tbody>
|
536 |
+
</table>
|
537 |
+
|
538 |
+
<h6>Cluster examples</h6>
|
539 |
+
<div class="row">
|
540 |
+
<div class="col s12">
|
541 |
+
<div id="cluster_results_elem">
|
542 |
+
{@html data["cluster_examples"]}
|
543 |
+
</div>
|
544 |
+
</div>
|
545 |
+
</div>
|
546 |
+
</div>
|
547 |
+
{/if} -->
|
548 |
+
</div>
|
549 |
+
|
550 |
+
<style>
|
551 |
+
/* Styles for table */
|
552 |
+
:global(html) {
|
553 |
+
height: auto;
|
554 |
+
width: auto;
|
555 |
+
position: static;
|
556 |
+
}
|
557 |
+
:global(#sapper),
|
558 |
+
:global(body) {
|
559 |
+
display: block;
|
560 |
+
height: auto;
|
561 |
+
}
|
562 |
+
</style>
|
indie_label_svelte/src/CommentTable.svelte
ADDED
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import { onMount } from "svelte";
|
3 |
+
import ModelPerf from "./ModelPerf.svelte";
|
4 |
+
import Button, { Label } from "@smui/button";
|
5 |
+
import DataTable, { Head, Body, Row, Cell } from "@smui/data-table";
|
6 |
+
import LinearProgress from '@smui/linear-progress';
|
7 |
+
import IconButton from '@smui/icon-button';
|
8 |
+
import { user } from './stores/cur_user_store.js';
|
9 |
+
|
10 |
+
export let mode;
|
11 |
+
export let model_name;
|
12 |
+
|
13 |
+
let to_label = {};
|
14 |
+
let promise = Promise.resolve(null);
|
15 |
+
let n_complete_ratings;
|
16 |
+
let n_unsure_ratings;
|
17 |
+
|
18 |
+
// Get current user
|
19 |
+
let cur_user;
|
20 |
+
user.subscribe(value => {
|
21 |
+
cur_user = value;
|
22 |
+
});
|
23 |
+
|
24 |
+
function getCommentsToLabel(cur_mode, n) {
|
25 |
+
if (cur_mode == "train") {
|
26 |
+
let req_params = {
|
27 |
+
n: n,
|
28 |
+
};
|
29 |
+
let params = new URLSearchParams(req_params).toString();
|
30 |
+
fetch("./get_comments_to_label?" + params)
|
31 |
+
.then((r) => r.text())
|
32 |
+
.then(function (r_orig) {
|
33 |
+
let r = JSON.parse(r_orig);
|
34 |
+
r["to_label"].forEach((key) => (to_label[key] = null));
|
35 |
+
});
|
36 |
+
} else if (cur_mode == "view") {
|
37 |
+
if (model_name != "" && model_name != undefined) {
|
38 |
+
promise = getModel(cur_mode);
|
39 |
+
}
|
40 |
+
}
|
41 |
+
}
|
42 |
+
onMount(async () => {
|
43 |
+
getCommentsToLabel(mode, 40);
|
44 |
+
});
|
45 |
+
|
46 |
+
function handleLoadCommentsButton(n = 5) {
|
47 |
+
getCommentsToLabel("train", n);
|
48 |
+
}
|
49 |
+
|
50 |
+
function handleTrainModelButton() {
|
51 |
+
promise = getModel("train");
|
52 |
+
}
|
53 |
+
|
54 |
+
function getCompleteRatings() {
|
55 |
+
let ratings = getRatings();
|
56 |
+
let complete_ratings = Object.entries(ratings).filter(([key, value]) => value != "-1");
|
57 |
+
let unsure_ratings = Object.entries(ratings).filter(([key, value]) => value == "-1");
|
58 |
+
n_complete_ratings = complete_ratings.length;
|
59 |
+
n_unsure_ratings = unsure_ratings.length;
|
60 |
+
}
|
61 |
+
|
62 |
+
function getRatings() {
|
63 |
+
// Get rating for each comment
|
64 |
+
let ratings = {};
|
65 |
+
Object.entries(to_label).forEach(function ([comment, orig_rating], i) {
|
66 |
+
var radio_btns = document.getElementsByName(
|
67 |
+
"comment_" + i.toString()
|
68 |
+
);
|
69 |
+
let length = radio_btns.length;
|
70 |
+
for (var i = 0; i < length; i++) {
|
71 |
+
if (radio_btns[i].checked) {
|
72 |
+
ratings[comment] = radio_btns[i].value;
|
73 |
+
break;
|
74 |
+
}
|
75 |
+
}
|
76 |
+
});
|
77 |
+
return ratings;
|
78 |
+
}
|
79 |
+
|
80 |
+
async function getModel(cur_mode) {
|
81 |
+
let ratings = null;
|
82 |
+
if (cur_mode == "train") {
|
83 |
+
ratings = getRatings();
|
84 |
+
ratings = JSON.stringify(ratings);
|
85 |
+
}
|
86 |
+
|
87 |
+
let req_params = {
|
88 |
+
model_name: model_name,
|
89 |
+
ratings: ratings,
|
90 |
+
mode: cur_mode,
|
91 |
+
user: cur_user,
|
92 |
+
};
|
93 |
+
let params = new URLSearchParams(req_params).toString();
|
94 |
+
const response = await fetch("./get_personalized_model?" + params);
|
95 |
+
const text = await response.text();
|
96 |
+
const data = JSON.parse(text);
|
97 |
+
to_label = data["ratings_prev"];
|
98 |
+
console.log(data);
|
99 |
+
return data;
|
100 |
+
}
|
101 |
+
</script>
|
102 |
+
|
103 |
+
<div>
|
104 |
+
<div class="label_table spacing_vert">
|
105 |
+
<DataTable
|
106 |
+
table$aria-label="Comments to label"
|
107 |
+
style="width: 100%;"
|
108 |
+
stickyHeader
|
109 |
+
>
|
110 |
+
<Head>
|
111 |
+
<Row>
|
112 |
+
<Cell style="width: 50%">Comment</Cell>
|
113 |
+
<Cell style="background-color: #c3ecdb">
|
114 |
+
0: <br>Not-at-all toxic<br>(Keep)<br>
|
115 |
+
</Cell>
|
116 |
+
<Cell style="background-color: white">
|
117 |
+
1: <br>Slightly toxic<br>(Keep)<br>
|
118 |
+
</Cell>
|
119 |
+
<Cell style="background-color: #ffa894">
|
120 |
+
2: <br>Moderately toxic<br>(Delete)<br>
|
121 |
+
</Cell>
|
122 |
+
<Cell style="background-color: #ff7a5c">
|
123 |
+
3: <br>Very toxic<br>(Delete)<br>
|
124 |
+
</Cell>
|
125 |
+
<Cell style="background-color: #d62728">
|
126 |
+
4: <br>Extremely toxic<br>(Delete)<br>
|
127 |
+
</Cell>
|
128 |
+
<Cell style="background-color: #808080">
|
129 |
+
<br>Unsure<br>(Skip)<br>
|
130 |
+
</Cell>
|
131 |
+
</Row>
|
132 |
+
</Head>
|
133 |
+
<Body>
|
134 |
+
{#if to_label}
|
135 |
+
{#each Object.keys(to_label) as comment, i}
|
136 |
+
<Row>
|
137 |
+
<Cell>
|
138 |
+
<div class="spacing_vert">{comment}</div>
|
139 |
+
</Cell>
|
140 |
+
<Cell>
|
141 |
+
<label>
|
142 |
+
<input
|
143 |
+
name="comment_{i}"
|
144 |
+
type="radio"
|
145 |
+
value="0"
|
146 |
+
checked={to_label[comment] == "0"}
|
147 |
+
/>
|
148 |
+
<span />
|
149 |
+
</label>
|
150 |
+
</Cell>
|
151 |
+
<Cell>
|
152 |
+
<label>
|
153 |
+
<input
|
154 |
+
name="comment_{i}"
|
155 |
+
type="radio"
|
156 |
+
value="1"
|
157 |
+
checked={to_label[comment] == "1"}
|
158 |
+
/>
|
159 |
+
<span />
|
160 |
+
</label>
|
161 |
+
</Cell>
|
162 |
+
<Cell>
|
163 |
+
<label>
|
164 |
+
<input
|
165 |
+
name="comment_{i}"
|
166 |
+
type="radio"
|
167 |
+
value="2"
|
168 |
+
checked={to_label[comment] == "2"}
|
169 |
+
/>
|
170 |
+
<span />
|
171 |
+
</label>
|
172 |
+
</Cell>
|
173 |
+
<Cell>
|
174 |
+
<label>
|
175 |
+
<input
|
176 |
+
name="comment_{i}"
|
177 |
+
type="radio"
|
178 |
+
value="3"
|
179 |
+
checked={to_label[comment] == "3"}
|
180 |
+
/>
|
181 |
+
<span />
|
182 |
+
</label>
|
183 |
+
</Cell>
|
184 |
+
<Cell>
|
185 |
+
<label>
|
186 |
+
<input
|
187 |
+
name="comment_{i}"
|
188 |
+
type="radio"
|
189 |
+
value="4"
|
190 |
+
checked={to_label[comment] == "4"}
|
191 |
+
/>
|
192 |
+
<span />
|
193 |
+
</label>
|
194 |
+
</Cell>
|
195 |
+
<Cell>
|
196 |
+
<label>
|
197 |
+
<input
|
198 |
+
name="comment_{i}"
|
199 |
+
type="radio"
|
200 |
+
value="-1"
|
201 |
+
checked={to_label[comment] == "-1"}
|
202 |
+
on:click={() => handleLoadCommentsButton(1)}
|
203 |
+
/>
|
204 |
+
<span />
|
205 |
+
</label>
|
206 |
+
</Cell>
|
207 |
+
</Row>
|
208 |
+
{/each}
|
209 |
+
{/if}
|
210 |
+
</Body>
|
211 |
+
</DataTable>
|
212 |
+
</div>
|
213 |
+
|
214 |
+
{#key n_complete_ratings}
|
215 |
+
{#if n_complete_ratings}
|
216 |
+
<div class="spacing_vert_40">
|
217 |
+
<p>Number labeled: {n_complete_ratings}</p>
|
218 |
+
<p>Number unsure: {n_unsure_ratings}</p>
|
219 |
+
</div>
|
220 |
+
{/if}
|
221 |
+
{/key}
|
222 |
+
|
223 |
+
<div class="spacing_vert_40">
|
224 |
+
<Button on:click={handleTrainModelButton} variant="outlined" disabled={(!n_complete_ratings) || (n_complete_ratings < 40)}>
|
225 |
+
<Label>Train Model</Label>
|
226 |
+
</Button>
|
227 |
+
<Button on:click={getCompleteRatings} variant="outlined">
|
228 |
+
<Label>Get Number of Comments Labeled</Label>
|
229 |
+
</Button>
|
230 |
+
<Button on:click={() => handleLoadCommentsButton(5)} variant="outlined">
|
231 |
+
<Label>Fetch More Comments To Label</Label>
|
232 |
+
</Button>
|
233 |
+
</div>
|
234 |
+
|
235 |
+
|
236 |
+
<!-- Performance -->
|
237 |
+
{#await promise}
|
238 |
+
<div class="app_loading spacing_vert_20">
|
239 |
+
<LinearProgress indeterminate />
|
240 |
+
</div>
|
241 |
+
{:then perf_results}
|
242 |
+
{#if perf_results}
|
243 |
+
<div class="spacing_vert_20">
|
244 |
+
<ModelPerf data={perf_results} />
|
245 |
+
</div>
|
246 |
+
{/if}
|
247 |
+
{:catch error}
|
248 |
+
<p style="color: red">{error.message}</p>
|
249 |
+
{/await}
|
250 |
+
</div>
|
251 |
+
|
252 |
+
<style>
|
253 |
+
:global(html) {
|
254 |
+
height: auto;
|
255 |
+
width: auto;
|
256 |
+
position: static;
|
257 |
+
}
|
258 |
+
:global(#sapper),
|
259 |
+
:global(body) {
|
260 |
+
display: block;
|
261 |
+
height: auto;
|
262 |
+
}
|
263 |
+
</style>
|
indie_label_svelte/src/Explore.svelte
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import { onMount } from "svelte";
|
3 |
+
import Button, { Label } from "@smui/button";
|
4 |
+
import LinearProgress from "@smui/linear-progress";
|
5 |
+
import DataTable, {
|
6 |
+
Head,
|
7 |
+
Body,
|
8 |
+
Row,
|
9 |
+
Cell,
|
10 |
+
Label,
|
11 |
+
SortValue,
|
12 |
+
} from "@smui/data-table";
|
13 |
+
import IconButton from '@smui/icon-button';
|
14 |
+
import Radio from "@smui/radio";
|
15 |
+
import FormField from "@smui/form-field";
|
16 |
+
|
17 |
+
let cur_examples = [];
|
18 |
+
let promise = Promise.resolve(null);
|
19 |
+
|
20 |
+
// let sort_examples = false;
|
21 |
+
let sort = "rating";
|
22 |
+
let sortDirection: Lowercase<keyof typeof SortValue> = "descending";
|
23 |
+
|
24 |
+
function handleSort() {
|
25 |
+
cur_examples.sort((a, b) => {
|
26 |
+
const [aVal, bVal] = [a[sort], b[sort]][
|
27 |
+
sortDirection === "ascending" ? "slice" : "reverse"
|
28 |
+
]();
|
29 |
+
return Number(aVal) - Number(bVal);
|
30 |
+
});
|
31 |
+
cur_examples = cur_examples;
|
32 |
+
}
|
33 |
+
|
34 |
+
onMount(async () => {
|
35 |
+
promise = getExamples();
|
36 |
+
});
|
37 |
+
|
38 |
+
function handleButton() {
|
39 |
+
promise = getExamples();
|
40 |
+
}
|
41 |
+
|
42 |
+
async function getExamples() {
|
43 |
+
let req_params = {
|
44 |
+
n_examples: 20,
|
45 |
+
};
|
46 |
+
let params = new URLSearchParams(req_params).toString();
|
47 |
+
const response = await fetch("./get_explore_examples?" + params);
|
48 |
+
const text = await response.text();
|
49 |
+
const data = JSON.parse(text);
|
50 |
+
cur_examples = JSON.parse(data["examples"]);
|
51 |
+
console.log(cur_examples); // TEMP
|
52 |
+
return true;
|
53 |
+
}
|
54 |
+
</script>
|
55 |
+
|
56 |
+
<svelte:head>
|
57 |
+
<title>Explore</title>
|
58 |
+
</svelte:head>
|
59 |
+
|
60 |
+
<div class="panel">
|
61 |
+
<div class="panel_contents">
|
62 |
+
<div>
|
63 |
+
<h3>Explore System</h3>
|
64 |
+
<div style="width: 50%">
|
65 |
+
<ul>
|
66 |
+
<li>
|
67 |
+
Take a few minutes to explore some examples of <b>comments on YouSocial</b> and the toxicity ratings provided by YouSocial's <b>content moderation system</b>.
|
68 |
+
</li>
|
69 |
+
<li>
|
70 |
+
You can optionally sort by the "System rating" by clicking on the arrow button in the header.
|
71 |
+
</li>
|
72 |
+
<li>
|
73 |
+
Feel free to click the button to fetch a new sample of examples if you'd like.
|
74 |
+
</li>
|
75 |
+
</ul>
|
76 |
+
</div>
|
77 |
+
<!-- <div>
|
78 |
+
Sort order:
|
79 |
+
<FormField>
|
80 |
+
<Radio bind:group={sort_examples} value={false} color="secondary" />
|
81 |
+
<span slot="label">None</span>
|
82 |
+
</FormField>
|
83 |
+
<FormField>
|
84 |
+
<Radio bind:group={sort_examples} value={true} color="secondary" />
|
85 |
+
<span slot="label">System toxicity rating (descending)</span>
|
86 |
+
</FormField>
|
87 |
+
</div> -->
|
88 |
+
<Button on:click={handleButton} variant="outlined" class="">
|
89 |
+
<Label>Get another sample of examples</Label>
|
90 |
+
</Button>
|
91 |
+
</div>
|
92 |
+
|
93 |
+
<div style="padding-top:50px">
|
94 |
+
{#await promise}
|
95 |
+
<div class="app_loading">
|
96 |
+
<LinearProgress indeterminate />
|
97 |
+
</div>
|
98 |
+
{:then examples}
|
99 |
+
{#if cur_examples}
|
100 |
+
<DataTable
|
101 |
+
table$aria-label="Example list"
|
102 |
+
style="max-width: 100%;"
|
103 |
+
sortable
|
104 |
+
bind:sort
|
105 |
+
bind:sortDirection
|
106 |
+
on:SMUIDataTable:sorted={handleSort}
|
107 |
+
>
|
108 |
+
<Head>
|
109 |
+
<Row>
|
110 |
+
<Cell sortable={false}>
|
111 |
+
<Label>Comment</Label>
|
112 |
+
</Cell>
|
113 |
+
<Cell sortable={false}>
|
114 |
+
<Label>System decision</Label>
|
115 |
+
</Cell>
|
116 |
+
<Cell numeric columnId="rating" sortable={true}>
|
117 |
+
<IconButton class="material-icons">arrow_upward</IconButton>
|
118 |
+
<Label>System rating</Label>
|
119 |
+
</Cell>
|
120 |
+
</Row>
|
121 |
+
</Head>
|
122 |
+
<Body>
|
123 |
+
{#each cur_examples as ex (ex.item_id)}
|
124 |
+
<Row>
|
125 |
+
<Cell>{ex.comment}</Cell>
|
126 |
+
<Cell
|
127 |
+
style="background-color: {ex.system_color}; border-left: 1px solid rgba(0,0,0,.12); border-right: 1px solid rgba(0,0,0,.12); border-collapse: collapse;"
|
128 |
+
>
|
129 |
+
{ex.system_decision}
|
130 |
+
</Cell>
|
131 |
+
<Cell numeric>{Number(ex.rating)}</Cell>
|
132 |
+
</Row>
|
133 |
+
{/each}
|
134 |
+
</Body>
|
135 |
+
</DataTable>
|
136 |
+
{/if}
|
137 |
+
{:catch error}
|
138 |
+
<p style="color: red">{error.message}</p>
|
139 |
+
{/await}
|
140 |
+
</div>
|
141 |
+
</div>
|
142 |
+
</div>
|
143 |
+
|
144 |
+
<style>
|
145 |
+
.panel {
|
146 |
+
width: 80%;
|
147 |
+
padding: 50px;
|
148 |
+
}
|
149 |
+
</style>
|
indie_label_svelte/src/HelpTooltip.svelte
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script>
|
2 |
+
import Tooltip, {
|
3 |
+
Wrapper,
|
4 |
+
Title,
|
5 |
+
Content,
|
6 |
+
Link,
|
7 |
+
RichActions,
|
8 |
+
} from '@smui/tooltip';
|
9 |
+
import IconButton from '@smui/icon-button';
|
10 |
+
|
11 |
+
export let text;
|
12 |
+
</script>
|
13 |
+
|
14 |
+
<Tooltip>
|
15 |
+
<Content>
|
16 |
+
{text}
|
17 |
+
</Content>
|
18 |
+
</Tooltip>
|
indie_label_svelte/src/Hunch.svelte
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import { onMount } from "svelte";
|
3 |
+
import IterativeClustering from "./IterativeClustering.svelte";
|
4 |
+
import Button, { Label } from "@smui/button";
|
5 |
+
import Textfield from '@smui/textfield';
|
6 |
+
import LinearProgress from "@smui/linear-progress";
|
7 |
+
|
8 |
+
export let ind;
|
9 |
+
export let hunch;
|
10 |
+
export let model;
|
11 |
+
export let topic;
|
12 |
+
|
13 |
+
let example_block = false;
|
14 |
+
let clusters;
|
15 |
+
|
16 |
+
function getAuditSettings() {
|
17 |
+
fetch("./audit_settings")
|
18 |
+
.then((r) => r.text())
|
19 |
+
.then(function (r_orig) {
|
20 |
+
let r = JSON.parse(r_orig);
|
21 |
+
clusters = r["clusters"];
|
22 |
+
});
|
23 |
+
}
|
24 |
+
onMount(async () => {
|
25 |
+
getAuditSettings();
|
26 |
+
});
|
27 |
+
|
28 |
+
function handleTestOnExamples() {
|
29 |
+
example_block = true;
|
30 |
+
}
|
31 |
+
</script>
|
32 |
+
|
33 |
+
<div>
|
34 |
+
<div>
|
35 |
+
<!-- <h6>Hunch {ind + 1}</h6> -->
|
36 |
+
<h6>Topic:</h6>
|
37 |
+
{topic}
|
38 |
+
</div>
|
39 |
+
<div class="spacing_vert">
|
40 |
+
<h6>Your summary/suggestions:</h6>
|
41 |
+
<Textfield
|
42 |
+
style="width: 100%;"
|
43 |
+
helperLine$style="width: 100%;"
|
44 |
+
textarea
|
45 |
+
bind:value={hunch}
|
46 |
+
label="My current hunch is that..."
|
47 |
+
>
|
48 |
+
</Textfield>
|
49 |
+
<!-- <Button
|
50 |
+
on:click={handleTestOnExamples}
|
51 |
+
class="button_float_right spacing_vert"
|
52 |
+
variant="outlined"
|
53 |
+
>
|
54 |
+
<Label>Test on examples</Label>
|
55 |
+
</Button> -->
|
56 |
+
</div>
|
57 |
+
|
58 |
+
<div class="spacing_vert">
|
59 |
+
<Button on:click={null} variant="outlined">
|
60 |
+
<Label>Save</Label>
|
61 |
+
</Button>
|
62 |
+
<Button on:click={null} variant="outlined">
|
63 |
+
<Label>Submit</Label>
|
64 |
+
</Button>
|
65 |
+
</div>
|
66 |
+
|
67 |
+
<!-- {#await example_block}
|
68 |
+
<div class="app_loading">
|
69 |
+
<LinearProgress indeterminate />
|
70 |
+
</div>
|
71 |
+
{:then} -->
|
72 |
+
<!-- {#if example_block}
|
73 |
+
<IterativeClustering clusters={clusters} ind={ind + 1} personalized_model={model} />
|
74 |
+
{/if} -->
|
75 |
+
<!-- {:catch error}
|
76 |
+
<p style="color: red">{error.message}</p>
|
77 |
+
{/await} -->
|
78 |
+
</div>
|
79 |
+
|
80 |
+
<style>
|
81 |
+
/* * {
|
82 |
+
z-index: 11;
|
83 |
+
overflow-x: hidden;
|
84 |
+
} */
|
85 |
+
</style>
|
indie_label_svelte/src/HypothesisPanel.svelte
ADDED
@@ -0,0 +1,608 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import { onMount } from "svelte";
|
3 |
+
import ClusterResults from "./ClusterResults.svelte";
|
4 |
+
import HelpTooltip from "./HelpTooltip.svelte";
|
5 |
+
|
6 |
+
import Button, { Label } from "@smui/button";
|
7 |
+
import Textfield from '@smui/textfield';
|
8 |
+
import { user } from './stores/cur_user_store.js';
|
9 |
+
import { error_type } from './stores/error_type_store.js';
|
10 |
+
import { new_evidence } from './stores/new_evidence_store.js';
|
11 |
+
import { open_evidence } from './stores/open_evidence_store.js';
|
12 |
+
import { topic_chosen } from './stores/cur_topic_store.js';
|
13 |
+
|
14 |
+
import Drawer, {
|
15 |
+
AppContent,
|
16 |
+
Content,
|
17 |
+
Header,
|
18 |
+
Title,
|
19 |
+
Subtitle,
|
20 |
+
} from '@smui/drawer';
|
21 |
+
import List, { Item, Text, Graphic, PrimaryText, SecondaryText } from '@smui/list';
|
22 |
+
import LinearProgress from "@smui/linear-progress";
|
23 |
+
import CircularProgress from '@smui/circular-progress';
|
24 |
+
import Checkbox from '@smui/checkbox';
|
25 |
+
import FormField from '@smui/form-field';
|
26 |
+
import IconButton from "@smui/icon-button";
|
27 |
+
import{ Wrapper } from '@smui/tooltip';
|
28 |
+
import Radio from '@smui/radio';
|
29 |
+
import Switch from '@smui/switch';
|
30 |
+
|
31 |
+
export let model;
|
32 |
+
// export let topic;
|
33 |
+
export let user_dialog_open;
|
34 |
+
|
35 |
+
let all_reports = [];
|
36 |
+
|
37 |
+
let cur_user;
|
38 |
+
user.subscribe(value => {
|
39 |
+
cur_user = value;
|
40 |
+
});
|
41 |
+
|
42 |
+
let cur_topic;
|
43 |
+
topic_chosen.subscribe(value => {
|
44 |
+
cur_topic = value;
|
45 |
+
});
|
46 |
+
|
47 |
+
// Handle routing
|
48 |
+
let searchParams = new URLSearchParams(window.location.search);
|
49 |
+
let scaffold_method = searchParams.get("scaffold");
|
50 |
+
let topic_vis_method = searchParams.get("topic_vis_method");
|
51 |
+
|
52 |
+
// TODO: connect to selected["error_type"] so changes on main panel affect report panel
|
53 |
+
// let cur_error_type;
|
54 |
+
// error_type.subscribe(value => {
|
55 |
+
// cur_error_type = value;
|
56 |
+
// });
|
57 |
+
|
58 |
+
// Handle drawer
|
59 |
+
let open = false;
|
60 |
+
let selected = null;
|
61 |
+
let promise = Promise.resolve(null);
|
62 |
+
let editTitle = false;
|
63 |
+
let editErrorType = false;
|
64 |
+
let unfinished_count = 0;
|
65 |
+
|
66 |
+
function setActive(value: string) {
|
67 |
+
selected = value;
|
68 |
+
// Set local and store value of open evidence to selected report's
|
69 |
+
cur_open_evidence = selected["evidence"];
|
70 |
+
open_evidence.update((value) => cur_open_evidence);
|
71 |
+
let isolated_topic = selected["title"].replace(/^(Topic: )/,'');
|
72 |
+
console.log("selected title", selected["title"]);
|
73 |
+
console.log(selected);
|
74 |
+
|
75 |
+
// Close panel
|
76 |
+
open = false;
|
77 |
+
|
78 |
+
// Update topic if in personal mode
|
79 |
+
if (scaffold_method == "personal" || scaffold_method == "personal_group" || scaffold_method == "personal_test" || scaffold_method == "tutorial") {
|
80 |
+
topic_chosen.update((value) => isolated_topic);
|
81 |
+
}
|
82 |
+
}
|
83 |
+
|
84 |
+
onMount(async () => {
|
85 |
+
promise = getReports();
|
86 |
+
});
|
87 |
+
|
88 |
+
async function getReports() {
|
89 |
+
if (model == "" || model == undefined){
|
90 |
+
return [];
|
91 |
+
}
|
92 |
+
let req_params = {
|
93 |
+
cur_user: cur_user,
|
94 |
+
scaffold_method: scaffold_method,
|
95 |
+
model: model,
|
96 |
+
topic_vis_method: topic_vis_method,
|
97 |
+
};
|
98 |
+
let params = new URLSearchParams(req_params).toString();
|
99 |
+
const response = await fetch("./get_reports?" + params);
|
100 |
+
const text = await response.text();
|
101 |
+
const data = JSON.parse(text);
|
102 |
+
all_reports = data["reports"]
|
103 |
+
// Select first report initially
|
104 |
+
selected = all_reports[0];
|
105 |
+
setActive(selected);
|
106 |
+
cur_open_evidence = selected["evidence"];
|
107 |
+
unfinished_count = all_reports.filter(item => !item.complete_status).length
|
108 |
+
return all_reports;
|
109 |
+
}
|
110 |
+
|
111 |
+
// Handle evidence saving
|
112 |
+
let cur_open_evidence = [];
|
113 |
+
new_evidence.subscribe(value => {
|
114 |
+
if (value != []) {
|
115 |
+
// Check if any values with the same ID exist
|
116 |
+
for (let i = 0; i < cur_open_evidence.length; i++) {
|
117 |
+
if (cur_open_evidence[i]["id"] == value["id"]) {
|
118 |
+
return; // If so, don't add the item
|
119 |
+
}
|
120 |
+
}
|
121 |
+
cur_open_evidence = cur_open_evidence.concat(value); // add new evidence item
|
122 |
+
|
123 |
+
// Add to open evidence in store
|
124 |
+
open_evidence.update((value) => cur_open_evidence);
|
125 |
+
// Save to selected value
|
126 |
+
if (selected != null) {
|
127 |
+
selected["evidence"] = cur_open_evidence;
|
128 |
+
}
|
129 |
+
}
|
130 |
+
});
|
131 |
+
|
132 |
+
// Handle evidence removal
|
133 |
+
open_evidence.subscribe(value => {
|
134 |
+
if ((value != cur_open_evidence) && (value.length < cur_open_evidence.length)) {
|
135 |
+
// Update local open evidence
|
136 |
+
cur_open_evidence = value;
|
137 |
+
// Save to selected value
|
138 |
+
if (selected != null) {
|
139 |
+
selected["evidence"] = cur_open_evidence;
|
140 |
+
}
|
141 |
+
}
|
142 |
+
});
|
143 |
+
|
144 |
+
let promise_save = Promise.resolve(null);
|
145 |
+
function handleSaveReport() {
|
146 |
+
promise_save = saveReport();
|
147 |
+
}
|
148 |
+
|
149 |
+
async function saveReport() {
|
150 |
+
let req_params = {
|
151 |
+
cur_user: cur_user,
|
152 |
+
reports: JSON.stringify(all_reports),
|
153 |
+
scaffold_method: scaffold_method,
|
154 |
+
};
|
155 |
+
let params = new URLSearchParams(req_params).toString();
|
156 |
+
const response = await fetch("./save_reports?" + params);
|
157 |
+
const text = await response.text();
|
158 |
+
const data = JSON.parse(text);
|
159 |
+
return data;
|
160 |
+
}
|
161 |
+
|
162 |
+
function handleNewReport() {
|
163 |
+
let new_report = {
|
164 |
+
title: "",
|
165 |
+
error_type: "",
|
166 |
+
evidence: [],
|
167 |
+
text_entry: "",
|
168 |
+
complete_status: false,
|
169 |
+
};
|
170 |
+
all_reports = all_reports.concat(new_report);
|
171 |
+
promise = Promise.resolve(all_reports);
|
172 |
+
// Open this new report
|
173 |
+
selected = all_reports[all_reports.length - 1];
|
174 |
+
cur_open_evidence = selected["evidence"];
|
175 |
+
selected["complete_status"] = false;
|
176 |
+
unfinished_count = all_reports.filter(item => !item.complete_status).length
|
177 |
+
}
|
178 |
+
|
179 |
+
function handleDeleteReport() {
|
180 |
+
// Remove selected item from reports
|
181 |
+
all_reports = all_reports.filter(item => item != selected);
|
182 |
+
promise = Promise.resolve(all_reports);
|
183 |
+
selected = all_reports[0];
|
184 |
+
cur_open_evidence = selected["evidence"];
|
185 |
+
unfinished_count = all_reports.filter(item => !item.complete_status).length
|
186 |
+
}
|
187 |
+
|
188 |
+
function handleMarkComplete() {
|
189 |
+
selected["complete_status"] = !selected["complete_status"];
|
190 |
+
unfinished_count = all_reports.filter(item => !item.complete_status).length
|
191 |
+
handleSaveReport(); // Auto-save report
|
192 |
+
}
|
193 |
+
|
194 |
+
// Error type
|
195 |
+
let error_type_options = [
|
196 |
+
{
|
197 |
+
"opt": 'Both',
|
198 |
+
"descr": '(System is under- or over-sensitive)',
|
199 |
+
"help": "View both types of potential system errors"
|
200 |
+
},
|
201 |
+
{
|
202 |
+
"opt": 'System is under-sensitive',
|
203 |
+
"descr": '(Incorrectly rates as non-toxic)',
|
204 |
+
"help": "Focus on system errors where the system labeled content as Non-toxic when it should have been labeled as Toxic."
|
205 |
+
},
|
206 |
+
{
|
207 |
+
"opt": 'System is over-sensitive',
|
208 |
+
"descr": '(Incorrectly rates as toxic)',
|
209 |
+
"help": "Focus on system errors where the system labeled content as Toxic when it should have been labeled as Non-toxic."
|
210 |
+
},
|
211 |
+
{
|
212 |
+
"opt": 'Show errors and non-errors',
|
213 |
+
"descr": '',
|
214 |
+
"help": "Also show cases that are not likely to be potential errors"
|
215 |
+
},
|
216 |
+
]
|
217 |
+
|
218 |
+
// Save current error type
|
219 |
+
async function updateErrorType() {
|
220 |
+
// Update error type on main page to be the selected error type
|
221 |
+
// error_type.update((value) => cur_error_type);
|
222 |
+
// selected["error_type"] = cur_error_type;
|
223 |
+
editErrorType = false;
|
224 |
+
}
|
225 |
+
|
226 |
+
</script>
|
227 |
+
|
228 |
+
<div class="hypothesis_panel">
|
229 |
+
<div class="panel_header">
|
230 |
+
<div class="panel_header_content">
|
231 |
+
<div class="page_header">
|
232 |
+
<!-- <span class="page_title">IndieLabel</span> -->
|
233 |
+
<img src="/logo.png" style="height: 60px; padding: 0px 20px;" alt="IndieLabel" />
|
234 |
+
<Button on:click={() => (user_dialog_open = true)} class="user_button" color="secondary" style="margin: 12px 10px;" >
|
235 |
+
<Label>User: {cur_user}</Label>
|
236 |
+
</Button>
|
237 |
+
</div>
|
238 |
+
<div class="hypotheses_header">
|
239 |
+
<h5 style="float: left; margin: 0; padding: 5px 20px;">Your Audit Reports</h5>
|
240 |
+
<Button
|
241 |
+
on:click={() => (open = !open)}
|
242 |
+
color="primary"
|
243 |
+
style="float: right; padding: 10px; margin-right: 10px;"
|
244 |
+
>
|
245 |
+
{#if open}
|
246 |
+
<Label>Close</Label>
|
247 |
+
{:else}
|
248 |
+
{#key unfinished_count}
|
249 |
+
<Label>Unfinished reports ({unfinished_count})</Label>
|
250 |
+
{/key}
|
251 |
+
{/if}
|
252 |
+
</Button>
|
253 |
+
</div>
|
254 |
+
</div>
|
255 |
+
</div>
|
256 |
+
|
257 |
+
<div class="panel_contents">
|
258 |
+
<!-- Drawer -->
|
259 |
+
{#await promise}
|
260 |
+
<div class="app_loading_fullwidth">
|
261 |
+
<LinearProgress indeterminate />
|
262 |
+
</div>
|
263 |
+
{:then reports}
|
264 |
+
{#if reports}
|
265 |
+
<div class="drawer-container">
|
266 |
+
{#key open}
|
267 |
+
<Drawer variant="dismissible" bind:open>
|
268 |
+
<Header>
|
269 |
+
<Title>Your Reports</Title>
|
270 |
+
<Subtitle>Select a report to view.</Subtitle>
|
271 |
+
</Header>
|
272 |
+
<Content>
|
273 |
+
<List twoLine>
|
274 |
+
{#each reports as report}
|
275 |
+
<Item
|
276 |
+
href="javascript:void(0)"
|
277 |
+
on:click={() => setActive(report)}
|
278 |
+
activated={selected === report}
|
279 |
+
>
|
280 |
+
{#if report["complete_status"]}
|
281 |
+
<Graphic class="material-icons" aria-hidden="true">task_alt</Graphic>
|
282 |
+
{:else}
|
283 |
+
<Graphic class="material-icons" aria-hidden="true">radio_button_unchecked</Graphic>
|
284 |
+
{/if}
|
285 |
+
<Text>
|
286 |
+
<PrimaryText>
|
287 |
+
{report["title"]}
|
288 |
+
</PrimaryText>
|
289 |
+
<SecondaryText>
|
290 |
+
{report["error_type"]}
|
291 |
+
</SecondaryText>
|
292 |
+
</Text>
|
293 |
+
</Item>
|
294 |
+
{/each}
|
295 |
+
</List>
|
296 |
+
</Content>
|
297 |
+
</Drawer>
|
298 |
+
{/key}
|
299 |
+
<AppContent class="app-content">
|
300 |
+
<main class="main-content">
|
301 |
+
{#if selected}
|
302 |
+
<div class="head_6_highlight">
|
303 |
+
Current Report
|
304 |
+
</div>
|
305 |
+
<div class="panel_contents2">
|
306 |
+
<!-- Title -->
|
307 |
+
<div class="spacing_vert">
|
308 |
+
<div class="edit_button_row">
|
309 |
+
{#if editTitle}
|
310 |
+
<div class="edit_button_row_input">
|
311 |
+
<Textfield
|
312 |
+
bind:value={selected["title"]}
|
313 |
+
label="Your report title"
|
314 |
+
input$rows={4}
|
315 |
+
textarea
|
316 |
+
variant="outlined"
|
317 |
+
style="width: 100%;"
|
318 |
+
helperLine$style="width: 100%;"
|
319 |
+
/>
|
320 |
+
</div>
|
321 |
+
<div>
|
322 |
+
<IconButton class="material-icons grey_button" size="button" on:click={() => (editTitle = false)}>
|
323 |
+
check
|
324 |
+
</IconButton>
|
325 |
+
</div>
|
326 |
+
{:else}
|
327 |
+
{#if selected["title"] != ""}
|
328 |
+
<div class="head_5">
|
329 |
+
{selected["title"]}
|
330 |
+
</div>
|
331 |
+
{:else}
|
332 |
+
<div class="grey_text">Enter a report title</div>
|
333 |
+
{/if}
|
334 |
+
|
335 |
+
<div>
|
336 |
+
<IconButton class="material-icons grey_button" size="button" on:click={() => (editTitle = true)}>
|
337 |
+
create
|
338 |
+
</IconButton>
|
339 |
+
</div>
|
340 |
+
{/if}
|
341 |
+
</div>
|
342 |
+
</div>
|
343 |
+
|
344 |
+
<!-- Error type -->
|
345 |
+
<div class="spacing_vert_40">
|
346 |
+
<div class="head_6">
|
347 |
+
<b>Error Type</b>
|
348 |
+
</div>
|
349 |
+
<div class="edit_button_row">
|
350 |
+
{#if editErrorType}
|
351 |
+
<div>
|
352 |
+
{#each error_type_options as e}
|
353 |
+
<div style="display: flex; align-items: center;">
|
354 |
+
<!-- <Wrapper rich>
|
355 |
+
<FormField>
|
356 |
+
<Radio bind:group={selected["error_type"]} value={e.opt} on:change={updateErrorType} color="secondary" />
|
357 |
+
<span slot="label">
|
358 |
+
{e.opt}
|
359 |
+
<IconButton class="material-icons" size="button" disabled>help_outline</IconButton>
|
360 |
+
</span>
|
361 |
+
</FormField>
|
362 |
+
<HelpTooltip text={e.help} />
|
363 |
+
</Wrapper> -->
|
364 |
+
|
365 |
+
<FormField>
|
366 |
+
<Radio bind:group={selected["error_type"]} value={e.opt} on:change={updateErrorType} color="secondary" />
|
367 |
+
<span slot="label">
|
368 |
+
<b>{e.opt}</b> {e.descr}
|
369 |
+
</span>
|
370 |
+
</FormField>
|
371 |
+
</div>
|
372 |
+
{/each}
|
373 |
+
</div>
|
374 |
+
<!-- <div>
|
375 |
+
<IconButton class="material-icons grey_button" size="button" on:click={() => (editErrorType = false)}>
|
376 |
+
check
|
377 |
+
</IconButton>
|
378 |
+
</div> -->
|
379 |
+
{:else}
|
380 |
+
{#if selected["error_type"] != ""}
|
381 |
+
<div>
|
382 |
+
<p>{selected["error_type"]}</p>
|
383 |
+
</div>
|
384 |
+
{:else}
|
385 |
+
<div class="grey_text">Select an error type</div>
|
386 |
+
{/if}
|
387 |
+
|
388 |
+
<div>
|
389 |
+
<IconButton class="material-icons grey_button" size="button" on:click={() => (editErrorType = true)}>
|
390 |
+
create
|
391 |
+
</IconButton>
|
392 |
+
</div>
|
393 |
+
{/if}
|
394 |
+
</div>
|
395 |
+
</div>
|
396 |
+
|
397 |
+
<!-- Evidence -->
|
398 |
+
<div class="spacing_vert_40">
|
399 |
+
<div class="head_6">
|
400 |
+
<b>Evidence</b>
|
401 |
+
</div>
|
402 |
+
{#key cur_open_evidence}
|
403 |
+
<div>
|
404 |
+
{#if cur_open_evidence.length > 0}
|
405 |
+
<ClusterResults
|
406 |
+
cluster={cur_topic}
|
407 |
+
model={model}
|
408 |
+
data={{"cluster_comments": cur_open_evidence}}
|
409 |
+
show_vis={false}
|
410 |
+
show_checkboxes={false}
|
411 |
+
table_width_pct={100}
|
412 |
+
rowsPerPage={25}
|
413 |
+
table_id={"panel"}
|
414 |
+
/>
|
415 |
+
{:else}
|
416 |
+
<p class="grey_text">
|
417 |
+
Add examples from the main panel to see them here!
|
418 |
+
</p>
|
419 |
+
{/if}
|
420 |
+
</div>
|
421 |
+
{/key}
|
422 |
+
</div>
|
423 |
+
|
424 |
+
<div class="spacing_vert_60">
|
425 |
+
<div class="head_6">
|
426 |
+
<b>Summary/Suggestions</b>
|
427 |
+
</div>
|
428 |
+
<div class="spacing_vert">
|
429 |
+
<Textfield
|
430 |
+
style="width: 100%;"
|
431 |
+
helperLine$style="width: 100%;"
|
432 |
+
input$rows={8}
|
433 |
+
textarea
|
434 |
+
bind:value={selected["text_entry"]}
|
435 |
+
label="My current hunch is that..."
|
436 |
+
>
|
437 |
+
</Textfield>
|
438 |
+
</div>
|
439 |
+
|
440 |
+
</div>
|
441 |
+
|
442 |
+
<div class="spacing_vert_40">
|
443 |
+
<div class="head_6">
|
444 |
+
<b>Mark report as complete?</b>
|
445 |
+
<FormField>
|
446 |
+
<Checkbox checked={selected["complete_status"]} on:change={handleMarkComplete} />
|
447 |
+
</FormField>
|
448 |
+
</div>
|
449 |
+
|
450 |
+
</div>
|
451 |
+
</div>
|
452 |
+
{/if}
|
453 |
+
</main>
|
454 |
+
</AppContent>
|
455 |
+
</div>
|
456 |
+
{/if}
|
457 |
+
{:catch error}
|
458 |
+
<p style="color: red">{error.message}</p>
|
459 |
+
{/await}
|
460 |
+
</div>
|
461 |
+
|
462 |
+
<div class="panel_footer">
|
463 |
+
<div class="panel_footer_contents">
|
464 |
+
|
465 |
+
|
466 |
+
<Button
|
467 |
+
on:click={handleNewReport}
|
468 |
+
variant="outlined"
|
469 |
+
color="secondary"
|
470 |
+
style=""
|
471 |
+
>
|
472 |
+
<Label>New</Label>
|
473 |
+
</Button>
|
474 |
+
|
475 |
+
<Button
|
476 |
+
on:click={handleDeleteReport}
|
477 |
+
variant="outlined"
|
478 |
+
color="secondary"
|
479 |
+
style=""
|
480 |
+
>
|
481 |
+
<Label>Delete</Label>
|
482 |
+
</Button>
|
483 |
+
|
484 |
+
<Button
|
485 |
+
on:click={handleSaveReport}
|
486 |
+
variant="outlined"
|
487 |
+
color="secondary"
|
488 |
+
>
|
489 |
+
<Label>Save</Label>
|
490 |
+
</Button>
|
491 |
+
|
492 |
+
<div>
|
493 |
+
<span style="color: grey"><i>Last saved:
|
494 |
+
{#await promise_save}
|
495 |
+
<CircularProgress style="height: 32px; width: 32px;" indeterminate />
|
496 |
+
{:then result}
|
497 |
+
{#if result}
|
498 |
+
{new Date().toLocaleTimeString()}
|
499 |
+
{:else}
|
500 |
+
—
|
501 |
+
{/if}
|
502 |
+
{:catch error}
|
503 |
+
<p style="color: red">{error.message}</p>
|
504 |
+
{/await}
|
505 |
+
</i></span>
|
506 |
+
</div>
|
507 |
+
</div>
|
508 |
+
</div>
|
509 |
+
|
510 |
+
<!-- TEMP -->
|
511 |
+
<!-- {#key model}
|
512 |
+
<div>Model: {model}</div>
|
513 |
+
{/key} -->
|
514 |
+
</div>
|
515 |
+
|
516 |
+
<style>
|
517 |
+
/* Drawer */
|
518 |
+
/* .drawer-container {
|
519 |
+
position: relative;
|
520 |
+
display: flex;
|
521 |
+
height: 350px;
|
522 |
+
max-width: 600px;
|
523 |
+
border: 1px solid
|
524 |
+
var(--mdc-theme-text-hint-on-background, rgba(0, 0, 0, 0.1));
|
525 |
+
overflow: hidden;
|
526 |
+
z-index: 0;
|
527 |
+
}
|
528 |
+
|
529 |
+
* :global(.app-content) {
|
530 |
+
flex: auto;
|
531 |
+
overflow: auto;
|
532 |
+
position: relative;
|
533 |
+
flex-grow: 1;
|
534 |
+
}
|
535 |
+
|
536 |
+
.main-content {
|
537 |
+
overflow: auto;
|
538 |
+
padding: 16px;
|
539 |
+
height: 100%;
|
540 |
+
box-sizing: border-box;
|
541 |
+
} */
|
542 |
+
|
543 |
+
.panel_contents {
|
544 |
+
padding: 0 20px;
|
545 |
+
overflow-y: auto;
|
546 |
+
top: 150px;
|
547 |
+
position: relative;
|
548 |
+
height: 82%;
|
549 |
+
}
|
550 |
+
.panel_contents2 {
|
551 |
+
padding-left: 10px;
|
552 |
+
}
|
553 |
+
|
554 |
+
.panel_header {
|
555 |
+
position: fixed;
|
556 |
+
width: 30%;
|
557 |
+
border-bottom: 1px solid #d7d7d7; /* c5c5c5 */
|
558 |
+
background: #f3f3f3;
|
559 |
+
z-index: 11;
|
560 |
+
}
|
561 |
+
|
562 |
+
.panel_footer {
|
563 |
+
position: fixed;
|
564 |
+
width: 30%;
|
565 |
+
border-top: 1px solid #d7d7d7;
|
566 |
+
background: #f3f3f3;
|
567 |
+
z-index: 11;
|
568 |
+
bottom: 0;
|
569 |
+
padding: 15px 0px;
|
570 |
+
}
|
571 |
+
.panel_footer_contents {
|
572 |
+
/* padding: 0px 20px; */
|
573 |
+
display: flex;
|
574 |
+
justify-content: space-around;
|
575 |
+
align-items: center;
|
576 |
+
}
|
577 |
+
|
578 |
+
:global(.mdc-button.user_button) {
|
579 |
+
float: right;
|
580 |
+
margin-right: 20px;
|
581 |
+
}
|
582 |
+
|
583 |
+
.page_header {
|
584 |
+
width: 100%;
|
585 |
+
background: #e3d6fd;
|
586 |
+
/* padding: 21px 0; */
|
587 |
+
/* border-bottom: 1px solid #e3d6fd; */
|
588 |
+
padding: 10.5px 0;
|
589 |
+
position: relative;
|
590 |
+
display: inline-block;
|
591 |
+
}
|
592 |
+
|
593 |
+
.page_header:before {
|
594 |
+
content: '';
|
595 |
+
border-right: 1px solid rgb(0 0 0 / 7%);
|
596 |
+
position: absolute;
|
597 |
+
height: 80%;
|
598 |
+
top: 10%;
|
599 |
+
right: 0;
|
600 |
+
}
|
601 |
+
|
602 |
+
.hypotheses_header {
|
603 |
+
display: inline-block;
|
604 |
+
width: 100%;
|
605 |
+
padding: 10px 0;
|
606 |
+
vertical-align: middle;
|
607 |
+
}
|
608 |
+
</style>
|
indie_label_svelte/src/IterativeClustering.svelte
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script>
|
2 |
+
import Section from "./Section.svelte";
|
3 |
+
import ClusterResults from "./ClusterResults.svelte";
|
4 |
+
import Button, { Label } from "@smui/button";
|
5 |
+
import Textfield from "@smui/textfield";
|
6 |
+
import LayoutGrid, { Cell } from "@smui/layout-grid";
|
7 |
+
import LinearProgress from "@smui/linear-progress";
|
8 |
+
import Chip, { Set, Text } from '@smui/chips';
|
9 |
+
|
10 |
+
export let clusters;
|
11 |
+
export let personalized_model;
|
12 |
+
export let evidence;
|
13 |
+
export let width_pct = 80;
|
14 |
+
|
15 |
+
let topic_df_ids = [];
|
16 |
+
let promise_iter_cluster = Promise.resolve(null);
|
17 |
+
let keyword = null;
|
18 |
+
let n_neighbors = null;
|
19 |
+
let cur_iter_cluster = null;
|
20 |
+
let history = [];
|
21 |
+
|
22 |
+
async function getIterCluster(search_type) {
|
23 |
+
let req_params = {
|
24 |
+
cluster: cur_iter_cluster,
|
25 |
+
topic_df_ids: topic_df_ids,
|
26 |
+
n_examples: 500, // TEMP
|
27 |
+
pers_model: personalized_model,
|
28 |
+
example_sort: "descending", // TEMP
|
29 |
+
comparison_group: "status_quo", // TEMP
|
30 |
+
search_type: search_type,
|
31 |
+
keyword: keyword,
|
32 |
+
n_neighbors: n_neighbors,
|
33 |
+
};
|
34 |
+
console.log("topic_df_ids", topic_df_ids);
|
35 |
+
let params = new URLSearchParams(req_params).toString();
|
36 |
+
const response = await fetch("./get_cluster_results?" + params);
|
37 |
+
const text = await response.text();
|
38 |
+
const data = JSON.parse(text);
|
39 |
+
// if (data["cluster_comments"] == null) {
|
40 |
+
// return false
|
41 |
+
// }
|
42 |
+
topic_df_ids = data["topic_df_ids"];
|
43 |
+
return data;
|
44 |
+
}
|
45 |
+
|
46 |
+
function findCluster() {
|
47 |
+
promise_iter_cluster = getIterCluster("cluster");
|
48 |
+
history = history.concat("bulk-add cluster: " + cur_iter_cluster);
|
49 |
+
}
|
50 |
+
|
51 |
+
function findNeighbors() {
|
52 |
+
promise_iter_cluster = getIterCluster("neighbors");
|
53 |
+
history = history.concat("find " + n_neighbors + " neighbors");
|
54 |
+
}
|
55 |
+
|
56 |
+
function findKeywords() {
|
57 |
+
promise_iter_cluster = getIterCluster("keyword");
|
58 |
+
history = history.concat("keyword search: " + keyword);
|
59 |
+
}
|
60 |
+
</script>
|
61 |
+
|
62 |
+
<div>
|
63 |
+
<div>
|
64 |
+
<!-- <h6>Hunch {ind} examples</h6> -->
|
65 |
+
<div>
|
66 |
+
<h6>Search Settings</h6>
|
67 |
+
<!-- Start with cluster -->
|
68 |
+
<!-- <div class="">
|
69 |
+
<Section
|
70 |
+
section_id="iter_cluster"
|
71 |
+
section_title="Bulk-add cluster"
|
72 |
+
section_opts={clusters}
|
73 |
+
bind:value={cur_iter_cluster}
|
74 |
+
width_pct={100}
|
75 |
+
/>
|
76 |
+
<Button
|
77 |
+
on:click={findCluster}
|
78 |
+
variant="outlined"
|
79 |
+
class="button_float_right"
|
80 |
+
disabled={cur_iter_cluster == null}
|
81 |
+
>
|
82 |
+
<Label>Search</Label>
|
83 |
+
</Button>
|
84 |
+
</div> -->
|
85 |
+
|
86 |
+
<!-- Manual keyword -->
|
87 |
+
<div class="spacing_vert">
|
88 |
+
<Textfield
|
89 |
+
bind:value={keyword}
|
90 |
+
label="Keyword search"
|
91 |
+
variant="outlined"
|
92 |
+
style="width: {width_pct}%"
|
93 |
+
/>
|
94 |
+
<Button
|
95 |
+
on:click={findKeywords}
|
96 |
+
variant="outlined"
|
97 |
+
class="button_float_right spacing_vert"
|
98 |
+
disabled={keyword == null}
|
99 |
+
>
|
100 |
+
<Label>Search</Label>
|
101 |
+
</Button>
|
102 |
+
</div>
|
103 |
+
|
104 |
+
<!-- Find neighbors of current set -->
|
105 |
+
<div class="spacing_vert">
|
106 |
+
<Textfield
|
107 |
+
bind:value={n_neighbors}
|
108 |
+
label="Number of neighbors to retrieve"
|
109 |
+
type="number"
|
110 |
+
min="1"
|
111 |
+
max="50"
|
112 |
+
variant="outlined"
|
113 |
+
style="width: {width_pct}%"
|
114 |
+
/>
|
115 |
+
<Button
|
116 |
+
on:click={findNeighbors}
|
117 |
+
variant="outlined"
|
118 |
+
class="button_float_right spacing_vert"
|
119 |
+
disabled={n_neighbors == null}
|
120 |
+
>
|
121 |
+
<Label>Search</Label>
|
122 |
+
</Button>
|
123 |
+
</div>
|
124 |
+
</div>
|
125 |
+
</div>
|
126 |
+
|
127 |
+
{#await promise_iter_cluster}
|
128 |
+
<div class="app_loading" style="width: {width_pct}%">
|
129 |
+
<LinearProgress indeterminate />
|
130 |
+
</div>
|
131 |
+
{:then iter_cluster_results}
|
132 |
+
{#if iter_cluster_results}
|
133 |
+
{#if history.length > 0}
|
134 |
+
<div class="bold" style="padding-top:40px;">Search History</div>
|
135 |
+
<Set chips={history} let:chip choice>
|
136 |
+
<Chip {chip}>
|
137 |
+
<Text>{chip}</Text>
|
138 |
+
</Chip>
|
139 |
+
</Set>
|
140 |
+
{/if}
|
141 |
+
{#if iter_cluster_results.cluster_comments != null}
|
142 |
+
<ClusterResults
|
143 |
+
cluster={""}
|
144 |
+
clusters={clusters}
|
145 |
+
model={personalized_model}
|
146 |
+
data={iter_cluster_results}
|
147 |
+
show_vis={false}
|
148 |
+
table_width_pct={80}
|
149 |
+
bind:evidence={evidence}
|
150 |
+
on:change
|
151 |
+
/>
|
152 |
+
{:else}
|
153 |
+
<div class="bold" style="padding-top:40px;">
|
154 |
+
No results found
|
155 |
+
</div>
|
156 |
+
{/if}
|
157 |
+
{/if}
|
158 |
+
{:catch error}
|
159 |
+
<p style="color: red">{error.message}</p>
|
160 |
+
{/await}
|
161 |
+
</div>
|
162 |
+
|
163 |
+
<style>
|
164 |
+
</style>
|
indie_label_svelte/src/KeywordSearch.svelte
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script>
|
2 |
+
import ClusterResults from "./ClusterResults.svelte";
|
3 |
+
import { error_type } from './stores/error_type_store.js';
|
4 |
+
|
5 |
+
import Button, { Label } from "@smui/button";
|
6 |
+
import Textfield from "@smui/textfield";
|
7 |
+
import LinearProgress from "@smui/linear-progress";
|
8 |
+
import Chip, { Set, Text } from '@smui/chips';
|
9 |
+
|
10 |
+
|
11 |
+
export let clusters;
|
12 |
+
export let personalized_model;
|
13 |
+
export let evidence;
|
14 |
+
export let width_pct = 80;
|
15 |
+
export let use_model = true;
|
16 |
+
|
17 |
+
let topic_df_ids = [];
|
18 |
+
let promise_iter_cluster = Promise.resolve(null);
|
19 |
+
let keyword = null;
|
20 |
+
let n_neighbors = null;
|
21 |
+
let cur_iter_cluster = null;
|
22 |
+
let history = [];
|
23 |
+
|
24 |
+
let cur_error_type;
|
25 |
+
error_type.subscribe(value => {
|
26 |
+
cur_error_type = value;
|
27 |
+
});
|
28 |
+
|
29 |
+
async function getIterCluster(search_type) {
|
30 |
+
let req_params = {
|
31 |
+
cluster: cur_iter_cluster,
|
32 |
+
topic_df_ids: topic_df_ids,
|
33 |
+
n_examples: 500, // TEMP
|
34 |
+
pers_model: personalized_model,
|
35 |
+
example_sort: "descending", // TEMP
|
36 |
+
comparison_group: "status_quo", // TEMP
|
37 |
+
search_type: search_type,
|
38 |
+
keyword: keyword,
|
39 |
+
n_neighbors: n_neighbors,
|
40 |
+
error_type: cur_error_type,
|
41 |
+
};
|
42 |
+
console.log("topic_df_ids", topic_df_ids);
|
43 |
+
let params = new URLSearchParams(req_params).toString();
|
44 |
+
const response = await fetch("./get_cluster_results?" + params);
|
45 |
+
const text = await response.text();
|
46 |
+
const data = JSON.parse(text);
|
47 |
+
// if (data["cluster_comments"] == null) {
|
48 |
+
// return false
|
49 |
+
// }
|
50 |
+
topic_df_ids = data["topic_df_ids"];
|
51 |
+
return data;
|
52 |
+
}
|
53 |
+
|
54 |
+
function findKeywords() {
|
55 |
+
history = [];
|
56 |
+
topic_df_ids = [];
|
57 |
+
promise_iter_cluster = getIterCluster("keyword");
|
58 |
+
history = history.concat("keyword search: " + keyword);
|
59 |
+
}
|
60 |
+
|
61 |
+
function clearHistory() {
|
62 |
+
history = [];
|
63 |
+
promise_iter_cluster = Promise.resolve(null);
|
64 |
+
keyword = "";
|
65 |
+
topic_df_ids = [];
|
66 |
+
}
|
67 |
+
</script>
|
68 |
+
|
69 |
+
<div>
|
70 |
+
<div>
|
71 |
+
<div>
|
72 |
+
<h6>Keyword Search</h6>
|
73 |
+
<!-- Manual keyword -->
|
74 |
+
<div class="spacing_vert edit_button_row" style="width: 90%; justify-content: space-between">
|
75 |
+
<Textfield
|
76 |
+
bind:value={keyword}
|
77 |
+
label="Your keyword or phrase"
|
78 |
+
variant="outlined"
|
79 |
+
style="width: 60%"
|
80 |
+
/>
|
81 |
+
<Button
|
82 |
+
on:click={findKeywords}
|
83 |
+
variant="outlined"
|
84 |
+
class="spacing_vert"
|
85 |
+
disabled={keyword == null}
|
86 |
+
>
|
87 |
+
<Label>Search</Label>
|
88 |
+
</Button>
|
89 |
+
<!-- <Button
|
90 |
+
on:click={clearHistory}
|
91 |
+
variant="outlined"
|
92 |
+
class="spacing_vert"
|
93 |
+
disabled={history.length == 0}
|
94 |
+
>
|
95 |
+
<Label>Clear Search</Label>
|
96 |
+
</Button> -->
|
97 |
+
</div>
|
98 |
+
|
99 |
+
<!-- {#if history.length > 0}
|
100 |
+
<div class="head_6">Search History</div>
|
101 |
+
<Set chips={history} let:chip choice>
|
102 |
+
<Chip {chip}>
|
103 |
+
<Text>{chip}</Text>
|
104 |
+
</Chip>
|
105 |
+
</Set>
|
106 |
+
{/if} -->
|
107 |
+
</div>
|
108 |
+
</div>
|
109 |
+
|
110 |
+
{#await promise_iter_cluster}
|
111 |
+
<div class="app_loading" style="width: {width_pct}%">
|
112 |
+
<LinearProgress indeterminate />
|
113 |
+
</div>
|
114 |
+
{:then iter_cluster_results}
|
115 |
+
{#if iter_cluster_results}
|
116 |
+
{#if iter_cluster_results.cluster_comments != null}
|
117 |
+
<ClusterResults
|
118 |
+
cluster={""}
|
119 |
+
clusters={clusters}
|
120 |
+
model={personalized_model}
|
121 |
+
data={iter_cluster_results}
|
122 |
+
show_vis={false}
|
123 |
+
table_width_pct={90}
|
124 |
+
table_id={"keyword"}
|
125 |
+
use_model={use_model}
|
126 |
+
bind:evidence={evidence}
|
127 |
+
on:change
|
128 |
+
/>
|
129 |
+
{:else}
|
130 |
+
<div class="bold" style="padding-top:40px;">
|
131 |
+
No results found
|
132 |
+
</div>
|
133 |
+
{/if}
|
134 |
+
{/if}
|
135 |
+
{:catch error}
|
136 |
+
<p style="color: red">{error.message}</p>
|
137 |
+
{/await}
|
138 |
+
</div>
|
139 |
+
|
140 |
+
<style>
|
141 |
+
</style>
|
indie_label_svelte/src/Labeling.svelte
ADDED
@@ -0,0 +1,374 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import { onMount } from "svelte";
|
3 |
+
import Section from "./Section.svelte";
|
4 |
+
import TopicTraining from "./TopicTraining.svelte";
|
5 |
+
import CommentTable from "./CommentTable.svelte";
|
6 |
+
|
7 |
+
import Textfield from '@smui/textfield';
|
8 |
+
import Button, { Label } from "@smui/button";
|
9 |
+
import LinearProgress from '@smui/linear-progress';
|
10 |
+
import Svelecte from '../node_modules/svelecte/src/Svelecte.svelte';
|
11 |
+
import { user } from './stores/cur_user_store.js';
|
12 |
+
|
13 |
+
let model_name = "";
|
14 |
+
let personalized_models = [];
|
15 |
+
let existing_model_name;
|
16 |
+
let label_modes = [
|
17 |
+
"Create a new model",
|
18 |
+
"Edit an existing model",
|
19 |
+
"Tune your model for a topic area",
|
20 |
+
"Set up a group-based model",
|
21 |
+
];
|
22 |
+
|
23 |
+
let clusters_for_tuning = [];
|
24 |
+
let topic;
|
25 |
+
|
26 |
+
// Get current user
|
27 |
+
let cur_user;
|
28 |
+
user.subscribe(value => {
|
29 |
+
if (value != cur_user) {
|
30 |
+
cur_user = value;
|
31 |
+
personalized_models = [];
|
32 |
+
getLabeling();
|
33 |
+
}
|
34 |
+
cur_user = value;
|
35 |
+
});
|
36 |
+
|
37 |
+
// Handle routing
|
38 |
+
let label_mode = label_modes[0];
|
39 |
+
let searchParams = new URLSearchParams(window.location.search);
|
40 |
+
let req_label_mode = parseInt(searchParams.get("label_mode"));
|
41 |
+
if (req_label_mode == 0) {
|
42 |
+
label_mode = label_modes[0];
|
43 |
+
} else if (req_label_mode == 1) {
|
44 |
+
label_mode = label_modes[1];
|
45 |
+
} else if (req_label_mode == 2) {
|
46 |
+
label_mode = label_modes[2];
|
47 |
+
} else if (req_label_mode == 3) {
|
48 |
+
label_mode = label_modes[3];
|
49 |
+
}
|
50 |
+
|
51 |
+
// Handle group options
|
52 |
+
let options_pol = ['Conservative', 'Liberal', 'Independent', 'Other'];
|
53 |
+
let sel_pol;
|
54 |
+
let options_relig = ['Not important', 'Not too important', 'Somewhat important', 'Very important'];
|
55 |
+
let sel_relig;
|
56 |
+
let options_race = ["White", "Black or African American", "Hispanic", "Asian", "American Indian or Alaskan Native", "Native Hawaiian or Pacific Islander", "Other"];
|
57 |
+
let sel_race;
|
58 |
+
let options_gender = ['Male', 'Female', 'Nonbinary'];
|
59 |
+
let sel_gender;
|
60 |
+
let options_lgbtq = ["Non-LGBTQ+", "LGBTQ+"];
|
61 |
+
let sel_lgbtq;
|
62 |
+
|
63 |
+
let options_axis = ["A: Political affiliation", "B: Gender", "C: Race", "D: LGBTQ+ Identity", "E: Importance of religion"]
|
64 |
+
let selected_axis;
|
65 |
+
|
66 |
+
let group_size;
|
67 |
+
|
68 |
+
function getGroupSize() {
|
69 |
+
let req_params = {
|
70 |
+
sel_gender: sel_gender,
|
71 |
+
sel_pol: sel_pol,
|
72 |
+
sel_relig: sel_relig,
|
73 |
+
sel_race: sel_race,
|
74 |
+
sel_lgbtq: sel_lgbtq,
|
75 |
+
};
|
76 |
+
let params = new URLSearchParams(req_params).toString();
|
77 |
+
fetch("./get_group_size?" + params)
|
78 |
+
.then((r) => r.text())
|
79 |
+
.then(function (r_orig) {
|
80 |
+
let r = JSON.parse(r_orig);
|
81 |
+
group_size = r["group_size"];
|
82 |
+
});
|
83 |
+
}
|
84 |
+
|
85 |
+
let promise = Promise.resolve(null);
|
86 |
+
function handleGroupModel() {
|
87 |
+
promise = getGroupModel();
|
88 |
+
}
|
89 |
+
|
90 |
+
async function getGroupModel() {
|
91 |
+
let req_params = {
|
92 |
+
user: cur_user,
|
93 |
+
model_name: model_name,
|
94 |
+
sel_gender: sel_gender,
|
95 |
+
sel_pol: sel_pol,
|
96 |
+
sel_relig: sel_relig,
|
97 |
+
sel_race: sel_race,
|
98 |
+
sel_lgbtq: sel_lgbtq,
|
99 |
+
};
|
100 |
+
let params = new URLSearchParams(req_params).toString();
|
101 |
+
const response = await fetch("./get_group_model?" + params);
|
102 |
+
const text = await response.text();
|
103 |
+
const data = JSON.parse(text);
|
104 |
+
console.log("getGroupModel", data);
|
105 |
+
return data
|
106 |
+
}
|
107 |
+
|
108 |
+
function getLabeling() {
|
109 |
+
let req_params = {
|
110 |
+
user: cur_user,
|
111 |
+
};
|
112 |
+
let params = new URLSearchParams(req_params).toString();
|
113 |
+
fetch("./get_labeling?" + params)
|
114 |
+
.then((r) => r.text())
|
115 |
+
.then(function (r_orig) {
|
116 |
+
let r = JSON.parse(r_orig);
|
117 |
+
personalized_models = r["personalized_models"];
|
118 |
+
model_name = r["model_name_suggestion"];
|
119 |
+
clusters_for_tuning = r["clusters_for_tuning"];
|
120 |
+
topic = clusters_for_tuning[0]["text"];
|
121 |
+
existing_model_name = personalized_models[0];
|
122 |
+
});
|
123 |
+
}
|
124 |
+
onMount(async () => {
|
125 |
+
getLabeling();
|
126 |
+
});
|
127 |
+
</script>
|
128 |
+
|
129 |
+
<div>
|
130 |
+
<h3>Labeling</h3>
|
131 |
+
|
132 |
+
<!-- MODE SELECTION -->
|
133 |
+
{#if label_mode != label_modes[3]}
|
134 |
+
<div id="audit-mode" class="section">
|
135 |
+
<Section
|
136 |
+
section_id="label_mode"
|
137 |
+
section_title="What labeling mode do you want to use?"
|
138 |
+
section_opts={label_modes}
|
139 |
+
bind:value={label_mode}
|
140 |
+
width_pct={40}
|
141 |
+
/>
|
142 |
+
</div>
|
143 |
+
{/if}
|
144 |
+
|
145 |
+
{#if label_mode == label_modes[0]}
|
146 |
+
<!-- NEW MODEL -->
|
147 |
+
<div style="width: 80%">
|
148 |
+
In this section, you’ll label some example comments to give a sense of your perspectives on what is toxic or not.
|
149 |
+
We’ll then train a simple model (which we’ll refer to as "your model") that estimates what your toxicity rating would be for the full dataset (with tens of thousands of comments) based on an existing dataset of toxicity ratings provided by different users.
|
150 |
+
</div>
|
151 |
+
|
152 |
+
<div id="new-model" class="section">
|
153 |
+
<h5>Create a New Model</h5>
|
154 |
+
|
155 |
+
<Textfield
|
156 |
+
bind:value={model_name}
|
157 |
+
label="Name your personalized model"
|
158 |
+
style="width: 40%"
|
159 |
+
/>
|
160 |
+
|
161 |
+
<!-- Labeling -->
|
162 |
+
<h6>Comments to label</h6>
|
163 |
+
<ul>
|
164 |
+
<li>
|
165 |
+
Comments with scores <b>0</b> and <b>1</b> will be allowed to <b>remain</b> on the platform.
|
166 |
+
</li>
|
167 |
+
<li>
|
168 |
+
Comments with scores <b>2</b>, <b>3</b>, or <b>4</b> will be <b>deleted</b> from the platform.
|
169 |
+
</li>
|
170 |
+
<li>
|
171 |
+
Given that some comments may lack context, if you're not sure, feel free to mark the <b>unsure</b> option to skip a comment.
|
172 |
+
</li>
|
173 |
+
</ul>
|
174 |
+
|
175 |
+
<CommentTable mode={"train"} model_name={model_name}/>
|
176 |
+
</div>
|
177 |
+
{:else if label_mode == label_modes[1]}
|
178 |
+
<!-- EXISTING MODEL -->
|
179 |
+
<div id="existing-model" class="section">
|
180 |
+
<h5>Edit an Existing Model</h5>
|
181 |
+
{#key personalized_models}
|
182 |
+
<Section
|
183 |
+
section_id="personalized_model"
|
184 |
+
section_title="Select Your Personalized Model"
|
185 |
+
section_opts={personalized_models}
|
186 |
+
bind:value={existing_model_name}
|
187 |
+
width_pct={40}
|
188 |
+
/>
|
189 |
+
{/key}
|
190 |
+
|
191 |
+
<!-- Edit model -->
|
192 |
+
<h6>Comments to label</h6>
|
193 |
+
<ul>
|
194 |
+
<li>
|
195 |
+
Comments with scores <b>0</b> and <b>1</b> will be allowed to <b>remain</b> on the platform.
|
196 |
+
</li>
|
197 |
+
<li>
|
198 |
+
Comments with scores <b>2</b>, <b>3</b>, or <b>4</b> will be <b>deleted</b> from the platform.
|
199 |
+
</li>
|
200 |
+
<li>
|
201 |
+
Given that some comments may lack context, if you're not sure, feel free to mark the <b>unsure</b> option to skip a comment.
|
202 |
+
</li>
|
203 |
+
</ul>
|
204 |
+
{#key existing_model_name}
|
205 |
+
<CommentTable mode={"view"} model_name={existing_model_name}/>
|
206 |
+
{/key}
|
207 |
+
</div>
|
208 |
+
{:else if label_mode == label_modes[2]}
|
209 |
+
<!-- Topic training -->
|
210 |
+
<div class="audit_section">
|
211 |
+
<div class="head_5">Topic model training</div>
|
212 |
+
<p></p>
|
213 |
+
<div class="section_indent">
|
214 |
+
<div>
|
215 |
+
<p>In what topic area would you like to tune your model?</p>
|
216 |
+
<Svelecte
|
217 |
+
options={clusters_for_tuning}
|
218 |
+
labelAsValue={true}
|
219 |
+
bind:value={topic}
|
220 |
+
placeholder="Select topic"
|
221 |
+
on:change={null}
|
222 |
+
style="width: 50%"
|
223 |
+
>
|
224 |
+
</Svelecte>
|
225 |
+
</div>
|
226 |
+
|
227 |
+
<div style="padding-top: 30px">
|
228 |
+
<!-- Labeling -->
|
229 |
+
<h6>Comments to label</h6>
|
230 |
+
<ul>
|
231 |
+
<li>
|
232 |
+
Comments with scores <b>0</b> and <b>1</b> will be allowed to <b>remain</b> on the platform.
|
233 |
+
</li>
|
234 |
+
<li>
|
235 |
+
Comments with scores <b>2</b>, <b>3</b>, or <b>4</b> will be <b>deleted</b> from the platform.
|
236 |
+
</li>
|
237 |
+
<li>
|
238 |
+
Given that some comments may lack context, if you're not sure, feel free to mark the <b>unsure</b> option to skip a comment.
|
239 |
+
</li>
|
240 |
+
</ul>
|
241 |
+
{#key topic}
|
242 |
+
<TopicTraining topic={topic} model_name={model_name} />
|
243 |
+
{/key}
|
244 |
+
</div>
|
245 |
+
|
246 |
+
</div>
|
247 |
+
</div>
|
248 |
+
{:else if label_mode == label_modes[3]}
|
249 |
+
<!-- Group-based model setup -->
|
250 |
+
<div class="head_5">Group model training</div>
|
251 |
+
<p>Please select just <b>one</b> of these five demographic axes (A, B, C, D, or E) to identify with to set up your group-based model:</p>
|
252 |
+
|
253 |
+
<div>
|
254 |
+
<p><b>Demographic axes</b></p>
|
255 |
+
<Svelecte
|
256 |
+
options={options_axis}
|
257 |
+
labelAsValue={true}
|
258 |
+
bind:value={selected_axis}
|
259 |
+
placeholder="Select demographic axis"
|
260 |
+
on:change={null}
|
261 |
+
style="width: 50%"
|
262 |
+
>
|
263 |
+
</Svelecte>
|
264 |
+
</div>
|
265 |
+
|
266 |
+
<div class="spacing_vert_40">
|
267 |
+
<!-- {#if selected_axis != null}
|
268 |
+
<p>For this axis, please select a group that you would like to identify with to set up your group-based model:</p>
|
269 |
+
{/if} -->
|
270 |
+
<div style="{selected_axis == options_axis[0] ? 'display:initial': 'display:none'}" >
|
271 |
+
<p><b>A: Political affiliation</b></p>
|
272 |
+
<Svelecte
|
273 |
+
options={options_pol}
|
274 |
+
labelAsValue={true}
|
275 |
+
bind:value={sel_pol}
|
276 |
+
placeholder="Select political affiliation"
|
277 |
+
on:change={getGroupSize}
|
278 |
+
style="width: 50%"
|
279 |
+
>
|
280 |
+
</Svelecte>
|
281 |
+
</div>
|
282 |
+
<!-- {:else if selected_axis == options_axis[1]} -->
|
283 |
+
<div style="{selected_axis == options_axis[1] ? 'display:initial': 'display:none'}" >
|
284 |
+
<p><b>B: Gender</b></p>
|
285 |
+
<Svelecte
|
286 |
+
options={options_gender}
|
287 |
+
labelAsValue={true}
|
288 |
+
bind:value={sel_gender}
|
289 |
+
placeholder="Select gender"
|
290 |
+
on:change={getGroupSize}
|
291 |
+
style="width: 50%"
|
292 |
+
>
|
293 |
+
</Svelecte>
|
294 |
+
</div>
|
295 |
+
<!-- {:else if selected_axis == options_axis[2]} -->
|
296 |
+
<div style="{selected_axis == options_axis[2] ? 'display:initial': 'display:none'}" >
|
297 |
+
<p><b>C: Race (select all that apply)</b></p>
|
298 |
+
<Svelecte
|
299 |
+
options={options_race}
|
300 |
+
labelAsValue={true}
|
301 |
+
bind:value={sel_race}
|
302 |
+
placeholder="Select race(s)"
|
303 |
+
on:change={getGroupSize}
|
304 |
+
style="width: 50%"
|
305 |
+
multiple=true
|
306 |
+
>
|
307 |
+
</Svelecte>
|
308 |
+
</div>
|
309 |
+
<!-- {:else if selected_axis == options_axis[3]} -->
|
310 |
+
<div style="{selected_axis == options_axis[3] ? 'display:initial': 'display:none'}" >
|
311 |
+
<p><b>D: LGBTQ+ Identity</b></p>
|
312 |
+
<Svelecte
|
313 |
+
options={options_lgbtq}
|
314 |
+
labelAsValue={true}
|
315 |
+
bind:value={sel_lgbtq}
|
316 |
+
placeholder="Select LGBTQ+ identity"
|
317 |
+
on:change={getGroupSize}
|
318 |
+
style="width: 50%"
|
319 |
+
>
|
320 |
+
</Svelecte>
|
321 |
+
</div>
|
322 |
+
<!-- {:else if selected_axis == options_axis[4]} -->
|
323 |
+
<div style="{selected_axis == options_axis[4] ? 'display:initial': 'display:none'}" >
|
324 |
+
<p><b>E: Importance of religion</b></p>
|
325 |
+
<Svelecte
|
326 |
+
options={options_relig}
|
327 |
+
labelAsValue={true}
|
328 |
+
bind:value={sel_relig}
|
329 |
+
placeholder="Select importance of religion"
|
330 |
+
on:change={getGroupSize}
|
331 |
+
style="width: 50%"
|
332 |
+
>
|
333 |
+
</Svelecte>
|
334 |
+
</div>
|
335 |
+
<!-- {/if} -->
|
336 |
+
</div>
|
337 |
+
|
338 |
+
{#if group_size}
|
339 |
+
<div class="spacing_vert_40">
|
340 |
+
<b>Number of labelers with matching traits</b>: {group_size}
|
341 |
+
</div>
|
342 |
+
{/if}
|
343 |
+
|
344 |
+
<div class=spacing_vert_60>
|
345 |
+
<Button
|
346 |
+
on:click={handleGroupModel}
|
347 |
+
variant="outlined"
|
348 |
+
class=""
|
349 |
+
disabled={group_size == null}
|
350 |
+
>
|
351 |
+
<Label>Train group-based model</Label>
|
352 |
+
</Button>
|
353 |
+
</div>
|
354 |
+
|
355 |
+
<!-- Performance -->
|
356 |
+
{#await promise}
|
357 |
+
<div class="app_loading spacing_vert_20">
|
358 |
+
<LinearProgress indeterminate />
|
359 |
+
</div>
|
360 |
+
{:then group_model_res}
|
361 |
+
{#if group_model_res}
|
362 |
+
<div class="spacing_vert_20">
|
363 |
+
<p>Model for your selected group memberships has been successfully tuned.</p>
|
364 |
+
<p>MAE: {group_model_res["mae"]}</p>
|
365 |
+
</div>
|
366 |
+
{/if}
|
367 |
+
{:catch error}
|
368 |
+
<p style="color: red">{error.message}</p>
|
369 |
+
{/await}
|
370 |
+
{/if}
|
371 |
+
</div>
|
372 |
+
|
373 |
+
<style>
|
374 |
+
</style>
|
indie_label_svelte/src/MainPanel.svelte
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import Labeling from "./Labeling.svelte";
|
3 |
+
import Auditing from "./Auditing.svelte";
|
4 |
+
import AppOld from "./AppOld.svelte";
|
5 |
+
|
6 |
+
import Tab, { Label } from "@smui/tab";
|
7 |
+
import TabBar from "@smui/tab-bar";
|
8 |
+
|
9 |
+
export let model;
|
10 |
+
// export let topic;
|
11 |
+
export let error_type;
|
12 |
+
|
13 |
+
let app_versions = ["old", "new"];
|
14 |
+
let app_version = "new";
|
15 |
+
|
16 |
+
// Handle routing
|
17 |
+
let active = "auditing";
|
18 |
+
let searchParams = new URLSearchParams(window.location.search);
|
19 |
+
let tab = searchParams.get("tab");
|
20 |
+
if (tab == "labeling") {
|
21 |
+
active = "labeling";
|
22 |
+
}
|
23 |
+
|
24 |
+
</script>
|
25 |
+
|
26 |
+
<svelte:head>
|
27 |
+
<title>IndieLabel</title>
|
28 |
+
</svelte:head>
|
29 |
+
|
30 |
+
<div class="auditing_panel">
|
31 |
+
<div class="tab_header">
|
32 |
+
<TabBar tabs={["labeling", "auditing"]} let:tab bind:active>
|
33 |
+
<Tab {tab}>
|
34 |
+
<Label>{tab}</Label>
|
35 |
+
</Tab>
|
36 |
+
</TabBar>
|
37 |
+
</div>
|
38 |
+
|
39 |
+
<div class="panel_contents">
|
40 |
+
<!-- VERSION SELECTION -->
|
41 |
+
<!-- <div>
|
42 |
+
<Section
|
43 |
+
section_id="app_version"
|
44 |
+
section_title="What app version do you want to use?"
|
45 |
+
section_opts={app_versions}
|
46 |
+
width_pct={40}
|
47 |
+
bind:value={app_version}
|
48 |
+
/>
|
49 |
+
</div> -->
|
50 |
+
|
51 |
+
{#if app_version == app_versions[0]}
|
52 |
+
<!-- OLD VERSION -->
|
53 |
+
<AppOld />
|
54 |
+
{:else if app_version == app_versions[1]}
|
55 |
+
<!-- NEW VERSION -->
|
56 |
+
<div>
|
57 |
+
<div id="labeling" hidden={active == "auditing"} >
|
58 |
+
<Labeling/>
|
59 |
+
</div>
|
60 |
+
|
61 |
+
<div id="auditing" hidden={active == "labeling"} >
|
62 |
+
<Auditing bind:personalized_model={model} bind:cur_error_type={error_type} on:change/>
|
63 |
+
</div>
|
64 |
+
</div>
|
65 |
+
{/if}
|
66 |
+
|
67 |
+
<!-- TEMP -->
|
68 |
+
<!-- {#key model}
|
69 |
+
<div>Model: {model}</div>
|
70 |
+
{/key} -->
|
71 |
+
</div>
|
72 |
+
</div>
|
73 |
+
|
74 |
+
<style>
|
75 |
+
.panel_contents {
|
76 |
+
padding: 50px;
|
77 |
+
margin-top: 50px;
|
78 |
+
}
|
79 |
+
</style>
|
indie_label_svelte/src/ModelPerf.svelte
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import { VegaLite } from "svelte-vega";
|
3 |
+
import type { View } from "svelte-vega";
|
4 |
+
|
5 |
+
import LayoutGrid, { Cell } from "@smui/layout-grid";
|
6 |
+
import Card, { Content } from '@smui/card';
|
7 |
+
|
8 |
+
export let data;
|
9 |
+
|
10 |
+
let perf_plot_spec = data["perf_plot_json"];
|
11 |
+
let perf_plot_data = perf_plot_spec["datasets"][
|
12 |
+
perf_plot_spec["layer"][0]["data"]["name"]
|
13 |
+
];
|
14 |
+
let perf_plot_view: View;
|
15 |
+
|
16 |
+
// let perf_plot2_spec = data["perf_plot2_json"];
|
17 |
+
// let perf_plot2_data = perf_plot2_spec["datasets"][perf_plot2_spec["data"]["name"]];
|
18 |
+
// let perf_plot2_view: View;
|
19 |
+
</script>
|
20 |
+
|
21 |
+
<div>
|
22 |
+
<h6>Your Model Performance</h6>
|
23 |
+
<LayoutGrid>
|
24 |
+
<Cell span={8}>
|
25 |
+
<div class="card-container">
|
26 |
+
<Card variant="outlined" padded>
|
27 |
+
<p class="mdc-typography--button"><b>Interpreting your model performance</b></p>
|
28 |
+
<ul>
|
29 |
+
<li>
|
30 |
+
The <b>Mean Absolute Error (MAE)</b> metric indicates the average absolute difference between your model's rating and your actual rating on a held-out set of comments.
|
31 |
+
</li>
|
32 |
+
<li>
|
33 |
+
You want your model to have a <b>lower</b> MAE (indicating <b>less error</b>).
|
34 |
+
</li>
|
35 |
+
<li>
|
36 |
+
<b>Your current MAE: {data["mae"]}</b>
|
37 |
+
<ul>
|
38 |
+
<li>{@html data["mae_status"]}</li>
|
39 |
+
<!-- <li>
|
40 |
+
This is <b>better</b> (lower) than the average MAE for other users, so your model appears to <b>better capture</b> your views than the typical user model.
|
41 |
+
</li> -->
|
42 |
+
</ul>
|
43 |
+
</li>
|
44 |
+
</ul>
|
45 |
+
</Card>
|
46 |
+
</div>
|
47 |
+
</Cell>
|
48 |
+
</LayoutGrid>
|
49 |
+
<div>
|
50 |
+
<!-- Overall -->
|
51 |
+
<!-- <table>
|
52 |
+
<tbody>
|
53 |
+
<tr>
|
54 |
+
<td>
|
55 |
+
<span class="bold">Mean Absolute Error (MAE)</span><br>
|
56 |
+
|
57 |
+
</td>
|
58 |
+
<td>
|
59 |
+
<span class="bold-large">{data["mae"]}</span>
|
60 |
+
</td>
|
61 |
+
</tr>
|
62 |
+
<tr>
|
63 |
+
<td>
|
64 |
+
<span class="bold">Average rating difference</span><br>
|
65 |
+
This metric indicates the average difference between your model's rating and your actual rating on a held-out set of comments.
|
66 |
+
</td>
|
67 |
+
<td>
|
68 |
+
<span class="bold-large">{data["avg_diff"]}</span>
|
69 |
+
</td>
|
70 |
+
</tr>
|
71 |
+
</tbody>
|
72 |
+
</table> -->
|
73 |
+
|
74 |
+
<!-- Performance visualization -->
|
75 |
+
<div>
|
76 |
+
<VegaLite {perf_plot_data} spec={perf_plot_spec} bind:view={perf_plot_view}/>
|
77 |
+
</div>
|
78 |
+
</div>
|
79 |
+
</div>
|
80 |
+
|
81 |
+
<style>
|
82 |
+
</style>
|
indie_label_svelte/src/OverallResults.svelte
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import { VegaLite } from "svelte-vega";
|
3 |
+
import type { View } from "svelte-vega";
|
4 |
+
|
5 |
+
import IconButton from '@smui/icon-button';
|
6 |
+
import LayoutGrid, { Cell } from "@smui/layout-grid";
|
7 |
+
import Card, { Content } from '@smui/card';
|
8 |
+
|
9 |
+
export let data;
|
10 |
+
export let clusters;
|
11 |
+
export let personalized_model;
|
12 |
+
export let cluster = "";
|
13 |
+
|
14 |
+
let show_step1_info = false;
|
15 |
+
|
16 |
+
// Topic Overview Plot
|
17 |
+
let topic_overview_json = data["overall_perf"]["topic_overview_plot_json"];
|
18 |
+
let topic_overview_data = topic_overview_json["datasets"][topic_overview_json["layer"][0]["data"]["name"]];
|
19 |
+
let topic_overview_spec = topic_overview_json;
|
20 |
+
let topic_overview_view: View;
|
21 |
+
|
22 |
+
// // Overall Histogram
|
23 |
+
// let overall_hist_json = data["overall_perf"]["overall_hist_json"];
|
24 |
+
// let overall_hist_data = overall_hist_json["datasets"][overall_hist_json["data"]["name"]];
|
25 |
+
// let overall_hist_spec = overall_hist_json;
|
26 |
+
// let overall_hist_view: View;
|
27 |
+
|
28 |
+
// // Class-conditional Histogram
|
29 |
+
// let class_cond_plot_json = data["overall_perf"]["class_cond_plot_json"];
|
30 |
+
// let class_cond_plot_data = class_cond_plot_json["datasets"][class_cond_plot_json["data"]["name"]];
|
31 |
+
// let class_cond_plot_spec = class_cond_plot_json;
|
32 |
+
// let class_cond_plot_view: View;
|
33 |
+
|
34 |
+
</script>
|
35 |
+
|
36 |
+
<div>
|
37 |
+
<div>
|
38 |
+
<span class="head_6">All topics</span>
|
39 |
+
<IconButton
|
40 |
+
class="material-icons grey_button"
|
41 |
+
size="normal"
|
42 |
+
on:click={() => (show_step1_info = !show_step1_info)}
|
43 |
+
>
|
44 |
+
help_outline
|
45 |
+
</IconButton>
|
46 |
+
</div>
|
47 |
+
{#if show_step1_info}
|
48 |
+
<LayoutGrid>
|
49 |
+
<Cell span={8}>
|
50 |
+
<div class="card-container">
|
51 |
+
<Card variant="outlined" padded>
|
52 |
+
<p class="mdc-typography--button"><b>Interpreting this visualization</b></p>
|
53 |
+
<ul>
|
54 |
+
<li>
|
55 |
+
Each <b>box</b> in this plot represents a set of comments that belong in a given <b>topic area</b>
|
56 |
+
</li>
|
57 |
+
<li>
|
58 |
+
The <b>x-axis</b> represents our prediction of <b>your</b> toxicity rating for each topic (we'll call these "your ratings")
|
59 |
+
<ul>
|
60 |
+
<li>
|
61 |
+
The <b>left side</b> (white background) is the <b>Non-toxic</b> side (comments that'll be allowed to remain)
|
62 |
+
</li>
|
63 |
+
<li>
|
64 |
+
The <b>right side</b> (grey background) is the <b>Toxic</b> side (comments that will be deleted)
|
65 |
+
</li>
|
66 |
+
<li>
|
67 |
+
Comment topic area boxes are plotted along the x-axis based on our prediction of your <b>average</b> toxicity rating for comments in that set
|
68 |
+
</li>
|
69 |
+
</ul>
|
70 |
+
</li>
|
71 |
+
|
72 |
+
<li>
|
73 |
+
The <b>color</b> of the box indicates the <b>system's rating</b> for the same comment topic; you may want to focus on the <b>red-colored boxes</b> that indicate <b>disagreements</b> between "your ratings" and the system's ratings
|
74 |
+
</li>
|
75 |
+
</ul>
|
76 |
+
</Card>
|
77 |
+
</div>
|
78 |
+
</Cell>
|
79 |
+
</LayoutGrid>
|
80 |
+
{/if}
|
81 |
+
<div class="row">
|
82 |
+
<div class="col s8">
|
83 |
+
<VegaLite {topic_overview_data} spec={topic_overview_spec} bind:view={topic_overview_view}/>
|
84 |
+
</div>
|
85 |
+
</div>
|
86 |
+
|
87 |
+
<!-- Old visualizations -->
|
88 |
+
<!-- <div style="margin-top: 500px">
|
89 |
+
<h6>Overall Performance</h6>
|
90 |
+
<div class="row">
|
91 |
+
<div class="col s12">
|
92 |
+
<div id="overall_perf">
|
93 |
+
<table>
|
94 |
+
<tbody>
|
95 |
+
<tr class="custom-blue">
|
96 |
+
<td class="bold"
|
97 |
+
>System {data[
|
98 |
+
"overall_perf"
|
99 |
+
]["metric"]} with YOUR labels</td
|
100 |
+
>
|
101 |
+
<td>
|
102 |
+
<span class="bold-large"
|
103 |
+
>{data[
|
104 |
+
"overall_perf"
|
105 |
+
]["user_metric"]}</span
|
106 |
+
>
|
107 |
+
(Percentile: {data[
|
108 |
+
"overall_perf"
|
109 |
+
]["user_percentile"]})
|
110 |
+
</td>
|
111 |
+
</tr>
|
112 |
+
<tr>
|
113 |
+
<td class="bold"
|
114 |
+
>System {data[
|
115 |
+
"overall_perf"
|
116 |
+
]["metric"]} with OTHER USERS' labels</td
|
117 |
+
>
|
118 |
+
<td>
|
119 |
+
<span class="bold-large"
|
120 |
+
>{data[
|
121 |
+
"overall_perf"
|
122 |
+
]["other_metric"]}</span
|
123 |
+
>
|
124 |
+
(95% CI: [{data[
|
125 |
+
"overall_perf"
|
126 |
+
]["other_ci_low"]}, {data[
|
127 |
+
"overall_perf"
|
128 |
+
]["other_ci_high"]}])
|
129 |
+
</td>
|
130 |
+
</tr>
|
131 |
+
</tbody>
|
132 |
+
</table>
|
133 |
+
</div>
|
134 |
+
</div>
|
135 |
+
</div>
|
136 |
+
<div class="row">
|
137 |
+
<div class="col s8">
|
138 |
+
<VegaLite {overall_hist_data} spec={overall_hist_spec} bind:view={overall_hist_view}/>
|
139 |
+
</div>
|
140 |
+
</div>
|
141 |
+
|
142 |
+
<h6>Performance Breakdown</h6>
|
143 |
+
<div class="row">
|
144 |
+
<div class="col s12">
|
145 |
+
<div class="row">
|
146 |
+
<div class="col s12">
|
147 |
+
<VegaLite {class_cond_plot_data} spec={class_cond_plot_spec} bind:view={class_cond_plot_view} />
|
148 |
+
</div>
|
149 |
+
</div>
|
150 |
+
</div>
|
151 |
+
</div>
|
152 |
+
</div> -->
|
153 |
+
|
154 |
+
</div>
|
155 |
+
<style>
|
156 |
+
</style>
|
indie_label_svelte/src/Results.svelte
ADDED
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import { onMount } from "svelte";
|
3 |
+
import ClusterResults from "./ClusterResults.svelte";
|
4 |
+
|
5 |
+
import Button, { Label } from "@smui/button";
|
6 |
+
import LinearProgress from "@smui/linear-progress";
|
7 |
+
import Checkbox from '@smui/checkbox';
|
8 |
+
import DataTable, {
|
9 |
+
Head,
|
10 |
+
Body,
|
11 |
+
Row,
|
12 |
+
Cell,
|
13 |
+
Label,
|
14 |
+
SortValue,
|
15 |
+
} from "@smui/data-table";
|
16 |
+
import FormField from "@smui/form-field";
|
17 |
+
|
18 |
+
let cur_examples = [];
|
19 |
+
let promise = Promise.resolve(null);
|
20 |
+
|
21 |
+
let scaffold_methods = ["personal", "personal_group", "prompts"];
|
22 |
+
|
23 |
+
let all_users = [];
|
24 |
+
async function getUsers() {
|
25 |
+
const response = await fetch("./get_users");
|
26 |
+
const text = await response.text();
|
27 |
+
const data = JSON.parse(text);
|
28 |
+
all_users = data["users"];
|
29 |
+
promise = getResults();
|
30 |
+
}
|
31 |
+
|
32 |
+
onMount(async () => {
|
33 |
+
getUsers()
|
34 |
+
});
|
35 |
+
|
36 |
+
async function getResults() {
|
37 |
+
let req_params = {
|
38 |
+
users: all_users
|
39 |
+
};
|
40 |
+
let params = new URLSearchParams(req_params).toString();
|
41 |
+
const response = await fetch("./get_results?" + params);
|
42 |
+
const text = await response.text();
|
43 |
+
const data = JSON.parse(text);
|
44 |
+
|
45 |
+
let results = data["results"];
|
46 |
+
return results;
|
47 |
+
}
|
48 |
+
|
49 |
+
function get_complete_ratio(reports) {
|
50 |
+
let total = reports.length;
|
51 |
+
let complete = reports.filter(item => item.complete_status).length;
|
52 |
+
return "" + complete + "/" + total + " complete";
|
53 |
+
}
|
54 |
+
|
55 |
+
function get_complete_count(reports) {
|
56 |
+
return reports.filter(item => item.complete_status).length;
|
57 |
+
}
|
58 |
+
|
59 |
+
function get_summary(reports) {
|
60 |
+
let summary = "";
|
61 |
+
let total_audits = 0
|
62 |
+
for (const scaffold_method of scaffold_methods) {
|
63 |
+
if (reports[scaffold_method]) {
|
64 |
+
let cur_reports = reports[scaffold_method];
|
65 |
+
let cur_ratio = get_complete_ratio(cur_reports);
|
66 |
+
let cur_result = "<li><b>" + scaffold_method + "</b>: " + cur_ratio + "</li>";
|
67 |
+
summary += cur_result;
|
68 |
+
let cur_complete = get_complete_count(cur_reports);
|
69 |
+
total_audits += cur_complete;
|
70 |
+
}
|
71 |
+
}
|
72 |
+
|
73 |
+
let top_summary = "<li><b>Total audits</b>: " + total_audits + "</li>";
|
74 |
+
summary = "<ul>" + top_summary + summary + "</ul>";
|
75 |
+
return summary;
|
76 |
+
}
|
77 |
+
|
78 |
+
function get_url(user, scaffold_method) {
|
79 |
+
return "http://localhost:5001/?user=" + user + "&scaffold=" + scaffold_method;
|
80 |
+
}
|
81 |
+
</script>
|
82 |
+
|
83 |
+
<svelte:head>
|
84 |
+
<title>Results</title>
|
85 |
+
</svelte:head>
|
86 |
+
|
87 |
+
<div class="panel">
|
88 |
+
<div class="panel_contents">
|
89 |
+
<div>
|
90 |
+
<h3>Results</h3>
|
91 |
+
</div>
|
92 |
+
|
93 |
+
<div style="padding-top:50px">
|
94 |
+
{#await promise}
|
95 |
+
<div class="app_loading">
|
96 |
+
<LinearProgress indeterminate />
|
97 |
+
</div>
|
98 |
+
{:then results}
|
99 |
+
{#if results}
|
100 |
+
{#each results as user_report}
|
101 |
+
<div class="head_3">{user_report["user"]}</div>
|
102 |
+
<div class="section_indent">
|
103 |
+
<div class="head_5">Summary</div>
|
104 |
+
<div>{@html get_summary(user_report)}</div>
|
105 |
+
<ul>
|
106 |
+
<li>Labeling pages
|
107 |
+
<ul>
|
108 |
+
<li>
|
109 |
+
<a href="http://localhost:5001/?user={user_report["user"]}&tab=labeling&label_mode=3" target="_blank">Group-based model</a>
|
110 |
+
</li>
|
111 |
+
<li>
|
112 |
+
<a href="http://localhost:5001/?user={user_report["user"]}&tab=labeling&label_mode=0" target="_blank">Personalized model</a>
|
113 |
+
</li>
|
114 |
+
</ul>
|
115 |
+
</li>
|
116 |
+
<li>Auditing pages
|
117 |
+
<ul>
|
118 |
+
<li>
|
119 |
+
<a href="http://localhost:5001/?user={user_report["user"]}&scaffold=personal_group" target="_blank">Group-based audit - personal scaffold</a>
|
120 |
+
</li>
|
121 |
+
<li>
|
122 |
+
<a href="http://localhost:5001/?user={user_report["user"]}&scaffold=personal" target="_blank">Individual audit - personal scaffold</a>
|
123 |
+
</li>
|
124 |
+
<li>
|
125 |
+
<a href="http://localhost:5001/?user={user_report["user"]}&scaffold=prompts" target="_blank">Individual audit - prompt scaffold</a>
|
126 |
+
</li>
|
127 |
+
</ul>
|
128 |
+
</li>
|
129 |
+
</ul>
|
130 |
+
</div>
|
131 |
+
{#each scaffold_methods as scaffold_method}
|
132 |
+
{#if user_report[scaffold_method]}
|
133 |
+
<div class="spacing_vert_60 section_indent">
|
134 |
+
<div class="head_5">
|
135 |
+
{scaffold_method} ({get_complete_ratio(user_report[scaffold_method])})
|
136 |
+
[<a href={get_url(user_report["user"], scaffold_method)} target="_blank">link</a>]
|
137 |
+
</div>
|
138 |
+
{#each user_report[scaffold_method] as report}
|
139 |
+
<div class="spacing_vert_40 section_indent">
|
140 |
+
<div class="head_6_non_cap">
|
141 |
+
{report["title"]}
|
142 |
+
</div>
|
143 |
+
|
144 |
+
<div class="spacing_vert_20">
|
145 |
+
<div class="">
|
146 |
+
<b>Error type</b>
|
147 |
+
</div>
|
148 |
+
{report["error_type"]}
|
149 |
+
</div>
|
150 |
+
|
151 |
+
<div class="spacing_vert_20">
|
152 |
+
<div class="">
|
153 |
+
<b>Evidence</b>
|
154 |
+
</div>
|
155 |
+
{#if report["evidence"].length > 0}
|
156 |
+
<ClusterResults
|
157 |
+
cluster={null}
|
158 |
+
model={null}
|
159 |
+
data={{"cluster_comments": report["evidence"]}}
|
160 |
+
show_vis={false}
|
161 |
+
show_checkboxes={false}
|
162 |
+
table_width_pct={100}
|
163 |
+
rowsPerPage={10}
|
164 |
+
table_id={"panel"}
|
165 |
+
/>
|
166 |
+
{:else}
|
167 |
+
<p class="grey_text">
|
168 |
+
No examples added
|
169 |
+
</p>
|
170 |
+
{/if}
|
171 |
+
</div>
|
172 |
+
|
173 |
+
<div class="spacing_vert_20">
|
174 |
+
<div class="">
|
175 |
+
<b>Summary/Suggestions</b>
|
176 |
+
</div>
|
177 |
+
{report["text_entry"]}
|
178 |
+
</div>
|
179 |
+
|
180 |
+
<div class="spacing_vert_20">
|
181 |
+
<b>Completed</b>
|
182 |
+
<FormField>
|
183 |
+
<Checkbox checked={report["complete_status"]} disabled/>
|
184 |
+
</FormField>
|
185 |
+
</div>
|
186 |
+
|
187 |
+
</div>
|
188 |
+
{/each}
|
189 |
+
</div>
|
190 |
+
{/if}
|
191 |
+
{/each}
|
192 |
+
{/each}
|
193 |
+
{/if}
|
194 |
+
{:catch error}
|
195 |
+
<p style="color: red">{error.message}</p>
|
196 |
+
{/await}
|
197 |
+
</div>
|
198 |
+
</div>
|
199 |
+
</div>
|
200 |
+
|
201 |
+
<style>
|
202 |
+
.panel {
|
203 |
+
width: 80%;
|
204 |
+
padding: 50px;
|
205 |
+
}
|
206 |
+
</style>
|
indie_label_svelte/src/Section.svelte
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script>
|
2 |
+
import { createEventDispatcher } from 'svelte';
|
3 |
+
|
4 |
+
import Select, { Option } from "@smui/select";
|
5 |
+
export let section_id;
|
6 |
+
export let section_title;
|
7 |
+
export let section_opts;
|
8 |
+
export let value;
|
9 |
+
export let width_pct;
|
10 |
+
|
11 |
+
const dispatch = createEventDispatcher();
|
12 |
+
|
13 |
+
function sendChange() {
|
14 |
+
dispatch("select_changed", {});
|
15 |
+
}
|
16 |
+
</script>
|
17 |
+
|
18 |
+
<div>
|
19 |
+
<!-- <label for={section_id}>{section_title}</label> -->
|
20 |
+
<Select
|
21 |
+
label={section_title}
|
22 |
+
bind:value id={section_id}
|
23 |
+
style="width: {width_pct}%"
|
24 |
+
on:change
|
25 |
+
>
|
26 |
+
{#each section_opts as opt, i}
|
27 |
+
<Option value={opt}>{opt}</Option>
|
28 |
+
{/each}
|
29 |
+
</Select>
|
30 |
+
</div>
|
31 |
+
|
32 |
+
<style>
|
33 |
+
div {
|
34 |
+
padding: 20px 10px;
|
35 |
+
}
|
36 |
+
</style>
|
indie_label_svelte/src/SelectUserDialog.svelte
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import Dialog, { Title, Content, Actions } from "@smui/dialog";
|
3 |
+
import Button, { Label } from "@smui/button";
|
4 |
+
import Textfield from "@smui/textfield";
|
5 |
+
import Select, { Option } from "@smui/select";
|
6 |
+
import { user } from "./stores/cur_user_store.js";
|
7 |
+
import { users } from "./stores/all_users_store.js";
|
8 |
+
|
9 |
+
export let open;
|
10 |
+
export let cur_user;
|
11 |
+
let cur_user_tf = cur_user;
|
12 |
+
let cur_user_sel = cur_user;
|
13 |
+
|
14 |
+
let all_users;
|
15 |
+
users.subscribe((value) => {
|
16 |
+
all_users = value;
|
17 |
+
});
|
18 |
+
|
19 |
+
function updateUserTextField() {
|
20 |
+
user.update((value) => cur_user_tf);
|
21 |
+
if (!all_users.includes(user)) {
|
22 |
+
all_users = all_users.concat(cur_user_tf);
|
23 |
+
users.update(all_users);
|
24 |
+
}
|
25 |
+
open = false;
|
26 |
+
}
|
27 |
+
|
28 |
+
function updateUserSel() {
|
29 |
+
user.update((value) => cur_user_sel);
|
30 |
+
open = false;
|
31 |
+
}
|
32 |
+
</script>
|
33 |
+
|
34 |
+
<div>
|
35 |
+
<Dialog
|
36 |
+
bind:open
|
37 |
+
aria-labelledby="simple-title"
|
38 |
+
aria-describedby="simple-content"
|
39 |
+
>
|
40 |
+
<!-- Title cannot contain leading whitespace due to mdc-typography-baseline-top() -->
|
41 |
+
<Title id="simple-title">Select Current User</Title>
|
42 |
+
<Content id="simple-content">
|
43 |
+
<Textfield bind:value={cur_user_tf} label="Enter user's name" />
|
44 |
+
|
45 |
+
<Select bind:value={cur_user_sel} label="Select Menu">
|
46 |
+
{#each all_users as u}
|
47 |
+
<Option value={u}>{u}</Option>
|
48 |
+
{/each}
|
49 |
+
</Select>
|
50 |
+
</Content>
|
51 |
+
<Actions>
|
52 |
+
<Button on:click={updateUserTextField}>
|
53 |
+
<Label>Update from TextField</Label>
|
54 |
+
</Button>
|
55 |
+
<Button on:click={updateUserSel}>
|
56 |
+
<Label>Update from Select</Label>
|
57 |
+
</Button>
|
58 |
+
</Actions>
|
59 |
+
</Dialog>
|
60 |
+
</div>
|
61 |
+
|
62 |
+
<style>
|
63 |
+
:global(.mdc-dialog__surface) {
|
64 |
+
height: 300px;
|
65 |
+
}
|
66 |
+
</style>
|
indie_label_svelte/src/StudyLinks.svelte
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import { user } from "./stores/cur_user_store.js";
|
3 |
+
|
4 |
+
let cur_user;
|
5 |
+
user.subscribe((value) => {
|
6 |
+
cur_user = value;
|
7 |
+
});
|
8 |
+
|
9 |
+
</script>
|
10 |
+
|
11 |
+
<svelte:head>
|
12 |
+
<title>Study Links</title>
|
13 |
+
</svelte:head>
|
14 |
+
|
15 |
+
<div class="panel">
|
16 |
+
<div class="panel_contents">
|
17 |
+
<div>
|
18 |
+
<h3>Study Links</h3>
|
19 |
+
</div>
|
20 |
+
|
21 |
+
<div>
|
22 |
+
<!-- <div class="head_5">{cur_user}</div> -->
|
23 |
+
<div class="section_indent">
|
24 |
+
<ul>
|
25 |
+
<li>Labeling pages
|
26 |
+
<ul>
|
27 |
+
<li>
|
28 |
+
<a href="http://localhost:5001/?user={cur_user}&tab=labeling&label_mode=3" target="_blank">Group-based model</a>
|
29 |
+
</li>
|
30 |
+
<li>
|
31 |
+
<a href="http://localhost:5001/?user={cur_user}&tab=labeling&label_mode=0" target="_blank">Personalized model</a>
|
32 |
+
</li>
|
33 |
+
</ul>
|
34 |
+
</li>
|
35 |
+
<li>Auditing pages
|
36 |
+
<ul>
|
37 |
+
<li>
|
38 |
+
<a href="http://localhost:5001/?user={cur_user}&scaffold=personal_group" target="_blank">Group-based audit - personal scaffold</a>
|
39 |
+
</li>
|
40 |
+
<li>
|
41 |
+
<a href="http://localhost:5001/?user={cur_user}&scaffold=personal" target="_blank">Individual audit - personal scaffold</a>
|
42 |
+
</li>
|
43 |
+
<li>
|
44 |
+
<a href="http://localhost:5001/?user={cur_user}&scaffold=prompts" target="_blank">Individual audit - prompt scaffold</a>
|
45 |
+
</li>
|
46 |
+
</ul>
|
47 |
+
</li>
|
48 |
+
</ul>
|
49 |
+
</div>
|
50 |
+
</div>
|
51 |
+
</div>
|
52 |
+
</div>
|
53 |
+
|
54 |
+
<style>
|
55 |
+
.panel {
|
56 |
+
width: 80%;
|
57 |
+
padding: 50px;
|
58 |
+
}
|
59 |
+
</style>
|
indie_label_svelte/src/TopicTraining.svelte
ADDED
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
import { onMount } from "svelte";
|
3 |
+
import ModelPerf from "./ModelPerf.svelte";
|
4 |
+
import Button, { Label } from "@smui/button";
|
5 |
+
import DataTable, { Head, Body, Row, Cell } from "@smui/data-table";
|
6 |
+
import LinearProgress from '@smui/linear-progress';
|
7 |
+
import { user } from './stores/cur_user_store.js';
|
8 |
+
import { model_chosen } from './stores/cur_model_store.js';
|
9 |
+
|
10 |
+
export let topic;
|
11 |
+
export let model_name = null;
|
12 |
+
|
13 |
+
let to_label = {};
|
14 |
+
let promise = Promise.resolve(null);
|
15 |
+
|
16 |
+
// Get current user
|
17 |
+
let cur_user;
|
18 |
+
user.subscribe(value => {
|
19 |
+
cur_user = value;
|
20 |
+
});
|
21 |
+
|
22 |
+
// Get current model
|
23 |
+
if (model_name == null) {
|
24 |
+
model_chosen.subscribe(value => {
|
25 |
+
model_name = value;
|
26 |
+
});
|
27 |
+
}
|
28 |
+
|
29 |
+
function getCommentsToLabel() {
|
30 |
+
let req_params = {
|
31 |
+
topic: topic,
|
32 |
+
};
|
33 |
+
let params = new URLSearchParams(req_params).toString();
|
34 |
+
fetch("./get_comments_to_label_topic?" + params)
|
35 |
+
.then((r) => r.text())
|
36 |
+
.then(function (r_orig) {
|
37 |
+
let r = JSON.parse(r_orig);
|
38 |
+
// Append comment rows to to_label object
|
39 |
+
r["to_label"].forEach((key) => (to_label[key] = null));
|
40 |
+
});
|
41 |
+
}
|
42 |
+
|
43 |
+
onMount(async () => {
|
44 |
+
getCommentsToLabel();
|
45 |
+
});
|
46 |
+
|
47 |
+
function handleLoadCommentsButton() {
|
48 |
+
getCommentsToLabel();
|
49 |
+
}
|
50 |
+
|
51 |
+
function handleTrainModelButton() {
|
52 |
+
promise = trainModel();
|
53 |
+
}
|
54 |
+
|
55 |
+
function getRatings() {
|
56 |
+
// Get rating for each comment from HTML elems
|
57 |
+
let ratings = {};
|
58 |
+
Object.entries(to_label).forEach(function ([comment, orig_rating], i) {
|
59 |
+
var radio_btns = document.getElementsByName(
|
60 |
+
"comment_" + i.toString()
|
61 |
+
);
|
62 |
+
let length = radio_btns.length;
|
63 |
+
for (var i = 0; i < length; i++) {
|
64 |
+
if (radio_btns[i].checked) {
|
65 |
+
ratings[comment] = radio_btns[i].value;
|
66 |
+
break;
|
67 |
+
}
|
68 |
+
}
|
69 |
+
});
|
70 |
+
return ratings;
|
71 |
+
}
|
72 |
+
|
73 |
+
async function trainModel() {
|
74 |
+
let ratings = getRatings();
|
75 |
+
ratings = JSON.stringify(ratings);
|
76 |
+
|
77 |
+
let req_params = {
|
78 |
+
model_name: model_name,
|
79 |
+
ratings: ratings,
|
80 |
+
user: cur_user,
|
81 |
+
topic: topic,
|
82 |
+
};
|
83 |
+
|
84 |
+
console.log("topic training model name", model_name);
|
85 |
+
let params = new URLSearchParams(req_params).toString();
|
86 |
+
const response = await fetch("./get_personalized_model_topic?" + params); // TODO
|
87 |
+
const text = await response.text();
|
88 |
+
const data = JSON.parse(text);
|
89 |
+
// to_label = data["ratings_prev"];
|
90 |
+
model_name = data["new_model_name"];
|
91 |
+
model_chosen.update((value) => model_name);
|
92 |
+
|
93 |
+
console.log("topicTraining", data);
|
94 |
+
return data;
|
95 |
+
}
|
96 |
+
</script>
|
97 |
+
|
98 |
+
<div>
|
99 |
+
{#if topic}
|
100 |
+
<div class="label_table_expandable spacing_vert">
|
101 |
+
<DataTable
|
102 |
+
table$aria-label="Comments to label"
|
103 |
+
style="width: 100%;"
|
104 |
+
stickyHeader
|
105 |
+
>
|
106 |
+
<Head>
|
107 |
+
<Row>
|
108 |
+
<Cell style="width: 50%">Comment</Cell>
|
109 |
+
<Cell style="background-color: #c3ecdb">
|
110 |
+
0: <br>Not-at-all toxic<br>(Keep)<br>
|
111 |
+
</Cell>
|
112 |
+
<Cell style="background-color: white">
|
113 |
+
1: <br>Slightly toxic<br>(Keep)<br>
|
114 |
+
</Cell>
|
115 |
+
<Cell style="background-color: #ffa894">
|
116 |
+
2: <br>Moderately toxic<br>(Delete)<br>
|
117 |
+
</Cell>
|
118 |
+
<Cell style="background-color: #ff7a5c">
|
119 |
+
3: <br>Very toxic<br>(Delete)<br>
|
120 |
+
</Cell>
|
121 |
+
<Cell style="background-color: #d62728">
|
122 |
+
4: <br>Extremely toxic<br>(Delete)<br>
|
123 |
+
</Cell>
|
124 |
+
<Cell style="background-color: #808080">
|
125 |
+
<br>Unsure<br>(Skip)<br>
|
126 |
+
</Cell>
|
127 |
+
</Row>
|
128 |
+
</Head>
|
129 |
+
<Body>
|
130 |
+
{#if to_label}
|
131 |
+
{#each Object.keys(to_label) as comment, i}
|
132 |
+
<Row>
|
133 |
+
<Cell>
|
134 |
+
<div class="spacing_vert">{comment}</div>
|
135 |
+
</Cell>
|
136 |
+
<Cell>
|
137 |
+
<label>
|
138 |
+
<input
|
139 |
+
name="comment_{i}"
|
140 |
+
type="radio"
|
141 |
+
value="0"
|
142 |
+
checked={to_label[comment] == "0"}
|
143 |
+
/>
|
144 |
+
<span />
|
145 |
+
</label>
|
146 |
+
</Cell>
|
147 |
+
<Cell>
|
148 |
+
<label>
|
149 |
+
<input
|
150 |
+
name="comment_{i}"
|
151 |
+
type="radio"
|
152 |
+
value="1"
|
153 |
+
checked={to_label[comment] == "1"}
|
154 |
+
/>
|
155 |
+
<span />
|
156 |
+
</label>
|
157 |
+
</Cell>
|
158 |
+
<Cell>
|
159 |
+
<label>
|
160 |
+
<input
|
161 |
+
name="comment_{i}"
|
162 |
+
type="radio"
|
163 |
+
value="2"
|
164 |
+
checked={to_label[comment] == "2"}
|
165 |
+
/>
|
166 |
+
<span />
|
167 |
+
</label>
|
168 |
+
</Cell>
|
169 |
+
<Cell>
|
170 |
+
<label>
|
171 |
+
<input
|
172 |
+
name="comment_{i}"
|
173 |
+
type="radio"
|
174 |
+
value="3"
|
175 |
+
checked={to_label[comment] == "3"}
|
176 |
+
/>
|
177 |
+
<span />
|
178 |
+
</label>
|
179 |
+
</Cell>
|
180 |
+
<Cell>
|
181 |
+
<label>
|
182 |
+
<input
|
183 |
+
name="comment_{i}"
|
184 |
+
type="radio"
|
185 |
+
value="4"
|
186 |
+
checked={to_label[comment] == "4"}
|
187 |
+
/>
|
188 |
+
<span />
|
189 |
+
</label>
|
190 |
+
</Cell>
|
191 |
+
<Cell>
|
192 |
+
<label>
|
193 |
+
<input
|
194 |
+
name="comment_{i}"
|
195 |
+
type="radio"
|
196 |
+
value="-1"
|
197 |
+
checked={to_label[comment] == "-1"}
|
198 |
+
/>
|
199 |
+
<span />
|
200 |
+
</label>
|
201 |
+
</Cell>
|
202 |
+
</Row>
|
203 |
+
{/each}
|
204 |
+
{/if}
|
205 |
+
</Body>
|
206 |
+
</DataTable>
|
207 |
+
</div>
|
208 |
+
|
209 |
+
<div class="">
|
210 |
+
<Button on:click={handleTrainModelButton} variant="outlined">
|
211 |
+
<Label>Tune Model</Label>
|
212 |
+
</Button>
|
213 |
+
<Button on:click={handleLoadCommentsButton} variant="outlined">
|
214 |
+
<Label>Fetch More Comments To Label</Label>
|
215 |
+
</Button>
|
216 |
+
</div>
|
217 |
+
|
218 |
+
<!-- Performance -->
|
219 |
+
{#await promise}
|
220 |
+
<div class="app_loading spacing_vert_20">
|
221 |
+
<LinearProgress indeterminate />
|
222 |
+
</div>
|
223 |
+
{:then perf_results}
|
224 |
+
{#if perf_results}
|
225 |
+
<div class="spacing_vert_20">
|
226 |
+
Model for the topic {topic} has been successfully tuned. You can now proceed to explore this topic.
|
227 |
+
</div>
|
228 |
+
{/if}
|
229 |
+
{:catch error}
|
230 |
+
<p style="color: red">{error.message}</p>
|
231 |
+
{/await}
|
232 |
+
{/if}
|
233 |
+
</div>
|
234 |
+
|
235 |
+
<style>
|
236 |
+
</style>
|
indie_label_svelte/src/main.ts
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import App from "./App.svelte";
|
2 |
+
|
3 |
+
const app = new App({
|
4 |
+
target: document.body,
|
5 |
+
});
|
6 |
+
|
7 |
+
export default app;
|
indie_label_svelte/src/stores/all_users_store.js
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { writable } from 'svelte/store';
|
2 |
+
|
3 |
+
// Fallback if request doesn't work
|
4 |
+
let all_users = ["DemoUser"];
|
5 |
+
|
6 |
+
export const users = writable(all_users);
|
indie_label_svelte/src/stores/cur_model_store.js
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import { writable } from 'svelte/store';
|
2 |
+
|
3 |
+
export const model_chosen = writable("");
|
indie_label_svelte/src/stores/cur_topic_store.js
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import { writable } from 'svelte/store';
|
2 |
+
|
3 |
+
export const topic_chosen = writable("0_shes_woman_lady_face");
|
indie_label_svelte/src/stores/cur_user_store.js
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import { writable } from 'svelte/store';
|
2 |
+
|
3 |
+
export const user = writable("DemoUser");
|
indie_label_svelte/src/stores/error_type_store.js
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import { writable } from 'svelte/store';
|
2 |
+
|
3 |
+
export const error_type = writable("Both");
|
indie_label_svelte/src/stores/new_evidence_store.js
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import { writable } from 'svelte/store';
|
2 |
+
|
3 |
+
export const new_evidence = writable([]);
|
indie_label_svelte/src/stores/open_evidence_store.js
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import { writable } from 'svelte/store';
|
2 |
+
|
3 |
+
export const open_evidence = writable([]);
|
indie_label_svelte/tsconfig.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"extends": "@tsconfig/svelte/tsconfig.json",
|
3 |
+
"include": ["src/**/*"],
|
4 |
+
"exclude": ["node_modules/*", "__sapper__/*", "public/*"]
|
5 |
+
}
|
requirements.txt
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
altair==4.2.0
|
2 |
+
altair_saver==0.5.0
|
3 |
+
altair_transform==0.2.0
|
4 |
+
bertopic==0.11.0
|
5 |
+
Flask==2.2.2
|
6 |
+
matplotlib==3.5.3
|
7 |
+
mne==1.1.1
|
8 |
+
numpy==1.22.4
|
9 |
+
pandas==1.4.3
|
10 |
+
scikit_learn==1.1.2
|
11 |
+
scikit_surprise==1.1.1
|
12 |
+
scipy==1.9.0
|
13 |
+
sentence_transformers==2.2.2
|
14 |
+
surprise==0.1
|
15 |
+
torch==1.12.1
|
server.py
ADDED
@@ -0,0 +1,797 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, send_from_directory
|
2 |
+
from flask import request
|
3 |
+
|
4 |
+
import random
|
5 |
+
import json
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import matplotlib.pyplot as plt
|
9 |
+
import pandas as pd
|
10 |
+
import pickle
|
11 |
+
import os
|
12 |
+
|
13 |
+
from sklearn.metrics import mean_absolute_error
|
14 |
+
from sklearn.metrics import mean_squared_error
|
15 |
+
from sklearn.metrics import confusion_matrix
|
16 |
+
import math
|
17 |
+
import altair as alt
|
18 |
+
import matplotlib.pyplot as plt
|
19 |
+
import time
|
20 |
+
|
21 |
+
import audit_utils as utils
|
22 |
+
|
23 |
+
app = Flask(__name__)
|
24 |
+
|
25 |
+
# Path for our main Svelte page
|
26 |
+
@app.route("/")
|
27 |
+
def base():
|
28 |
+
return send_from_directory('indie_label_svelte/public', 'index.html')
|
29 |
+
|
30 |
+
# Path for all the static files (compiled JS/CSS, etc.)
|
31 |
+
@app.route("/<path:path>")
|
32 |
+
def home(path):
|
33 |
+
return send_from_directory('indie_label_svelte/public', path)
|
34 |
+
|
35 |
+
|
36 |
+
########################################
|
37 |
+
# ROUTE: /AUDIT_SETTINGS
|
38 |
+
comments_grouped_full_topic_cat = pd.read_pickle("data/comments_grouped_full_topic_cat2_persp.pkl")
|
39 |
+
|
40 |
+
@app.route("/audit_settings")
|
41 |
+
def audit_settings():
|
42 |
+
# Fetch page content
|
43 |
+
user = request.args.get("user")
|
44 |
+
scaffold_method = request.args.get("scaffold_method")
|
45 |
+
|
46 |
+
user_models = utils.get_all_model_names(user)
|
47 |
+
grp_models = [m for m in user_models if m.startswith(f"model_{user}_group_")]
|
48 |
+
|
49 |
+
clusters = utils.get_unique_topics()
|
50 |
+
if len(user_models) > 2 and scaffold_method != "tutorial" and user != "DemoUser":
|
51 |
+
# Highlight topics that have been tuned
|
52 |
+
tuned_clusters = [m.lstrip(f"model_{user}_") for m in user_models if (m != f"model_{user}" and not m.startswith(f"model_{user}_group_"))]
|
53 |
+
other_clusters = [c for c in clusters if c not in tuned_clusters]
|
54 |
+
tuned_options = {
|
55 |
+
"label": "Topics with tuned models",
|
56 |
+
"options": [{"value": i, "text": cluster} for i, cluster in enumerate(tuned_clusters)],
|
57 |
+
}
|
58 |
+
other_options = {
|
59 |
+
"label": "All other topics",
|
60 |
+
"options": [{"value": i, "text": cluster} for i, cluster in enumerate(other_clusters)],
|
61 |
+
}
|
62 |
+
clusters_options = [tuned_options, other_options]
|
63 |
+
else:
|
64 |
+
clusters_options = [{
|
65 |
+
"label": "All auto-generated topics",
|
66 |
+
"options": [{"value": i, "text": cluster} for i, cluster in enumerate(clusters)],
|
67 |
+
},]
|
68 |
+
|
69 |
+
if scaffold_method == "personal_cluster":
|
70 |
+
cluster_model = user_models[0]
|
71 |
+
personal_cluster_file = f"./data/personal_cluster_dfs/{cluster_model}.pkl"
|
72 |
+
if os.path.isfile(personal_cluster_file) and cluster_model != "":
|
73 |
+
print("audit_settings", personal_cluster_file, cluster_model)
|
74 |
+
topics_under_top, topics_over_top = utils.get_personal_clusters(cluster_model)
|
75 |
+
pers_cluster = topics_under_top + topics_over_top
|
76 |
+
pers_cluster_options = {
|
77 |
+
"label": "Personalized clusters",
|
78 |
+
"options": [{"value": i, "text": cluster} for i, cluster in enumerate(pers_cluster)],
|
79 |
+
}
|
80 |
+
clusters_options.insert(0, pers_cluster_options)
|
81 |
+
|
82 |
+
clusters_for_tuning = utils.get_large_clusters(min_n=150)
|
83 |
+
clusters_for_tuning_options = [{"value": i, "text": cluster} for i, cluster in enumerate(clusters_for_tuning)] # Format for Svelecte UI element
|
84 |
+
|
85 |
+
context = {
|
86 |
+
"personalized_models": user_models,
|
87 |
+
"personalized_model_grp": grp_models,
|
88 |
+
"perf_metrics": ["Average rating difference", "Mean Absolute Error (MAE)", "Root Mean Squared Error (RMSE)", "Mean Squared Error (MSE)"],
|
89 |
+
"breakdown_categories": ['Topic', 'Toxicity Category', 'Toxicity Severity'],
|
90 |
+
"clusters": clusters_options,
|
91 |
+
"clusters_for_tuning": clusters_for_tuning_options,
|
92 |
+
}
|
93 |
+
return json.dumps(context)
|
94 |
+
|
95 |
+
########################################
|
96 |
+
# ROUTE: /GET_USERS
|
97 |
+
@app.route("/get_users")
|
98 |
+
def get_users():
|
99 |
+
# Fetch page content
|
100 |
+
with open(f"./data/users_to_models.pkl", "rb") as f:
|
101 |
+
users_to_models = pickle.load(f)
|
102 |
+
users = list(users_to_models.keys())
|
103 |
+
context = {
|
104 |
+
"users": users,
|
105 |
+
}
|
106 |
+
return json.dumps(context)
|
107 |
+
|
108 |
+
########################################
|
109 |
+
# ROUTE: /GET_AUDIT
|
110 |
+
@app.route("/get_audit")
|
111 |
+
def get_audit():
|
112 |
+
pers_model = request.args.get("pers_model")
|
113 |
+
perf_metric = request.args.get("perf_metric")
|
114 |
+
breakdown_axis = request.args.get("breakdown_axis")
|
115 |
+
breakdown_sort = request.args.get("breakdown_sort")
|
116 |
+
n_topics = int(request.args.get("n_topics"))
|
117 |
+
error_type = request.args.get("error_type")
|
118 |
+
cur_user = request.args.get("cur_user")
|
119 |
+
topic_vis_method = request.args.get("topic_vis_method")
|
120 |
+
if topic_vis_method == "null":
|
121 |
+
topic_vis_method = "median"
|
122 |
+
|
123 |
+
if breakdown_sort == "difference":
|
124 |
+
sort_class_plot = True
|
125 |
+
elif breakdown_sort == "default":
|
126 |
+
sort_class_plot = False
|
127 |
+
else:
|
128 |
+
raise Exception("Invalid breakdown_sort value")
|
129 |
+
|
130 |
+
overall_perf = utils.show_overall_perf(
|
131 |
+
variant=pers_model,
|
132 |
+
error_type=error_type,
|
133 |
+
cur_user=cur_user,
|
134 |
+
breakdown_axis=breakdown_axis,
|
135 |
+
topic_vis_method=topic_vis_method,
|
136 |
+
)
|
137 |
+
|
138 |
+
results = {
|
139 |
+
"overall_perf": overall_perf,
|
140 |
+
}
|
141 |
+
return json.dumps(results)
|
142 |
+
|
143 |
+
########################################
|
144 |
+
# ROUTE: /GET_CLUSTER_RESULTS
|
145 |
+
@app.route("/get_cluster_results")
|
146 |
+
def get_cluster_results():
|
147 |
+
pers_model = request.args.get("pers_model")
|
148 |
+
n_examples = int(request.args.get("n_examples"))
|
149 |
+
cluster = request.args.get("cluster")
|
150 |
+
example_sort = request.args.get("example_sort")
|
151 |
+
comparison_group = request.args.get("comparison_group")
|
152 |
+
topic_df_ids = request.args.getlist("topic_df_ids")
|
153 |
+
topic_df_ids = [int(val) for val in topic_df_ids[0].split(",") if val != ""]
|
154 |
+
search_type = request.args.get("search_type")
|
155 |
+
keyword = request.args.get("keyword")
|
156 |
+
n_neighbors = request.args.get("n_neighbors")
|
157 |
+
if n_neighbors != "null":
|
158 |
+
n_neighbors = int(n_neighbors)
|
159 |
+
neighbor_threshold = 0.6
|
160 |
+
error_type = request.args.get("error_type")
|
161 |
+
use_model = request.args.get("use_model") == "true"
|
162 |
+
scaffold_method = request.args.get("scaffold_method")
|
163 |
+
|
164 |
+
|
165 |
+
# If user has a tuned model for this cluster, use that
|
166 |
+
cluster_model_file = f"./data/trained_models/{pers_model}_{cluster}.pkl"
|
167 |
+
if os.path.isfile(cluster_model_file):
|
168 |
+
pers_model = f"{pers_model}_{cluster}"
|
169 |
+
|
170 |
+
print(f"get_cluster_results using model {pers_model}")
|
171 |
+
|
172 |
+
other_ids = []
|
173 |
+
perf_metric = "avg_diff"
|
174 |
+
sort_ascending = True if example_sort == "ascending" else False
|
175 |
+
|
176 |
+
topic_df = None
|
177 |
+
|
178 |
+
personal_cluster_file = f"./data/personal_cluster_dfs/{pers_model}.pkl"
|
179 |
+
if (scaffold_method == "personal_cluster") and (os.path.isfile(personal_cluster_file)):
|
180 |
+
# Handle personal clusters
|
181 |
+
with open(personal_cluster_file, "rb") as f:
|
182 |
+
topic_df = pickle.load(f)
|
183 |
+
topic_df = topic_df[(topic_df["topic"] == cluster)]
|
184 |
+
else:
|
185 |
+
# Regular handling
|
186 |
+
with open(f"data/preds_dfs/{pers_model}.pkl", "rb") as f:
|
187 |
+
topic_df = pickle.load(f)
|
188 |
+
if search_type == "cluster":
|
189 |
+
# Display examples with comment, your pred, and other users' pred
|
190 |
+
topic_df = topic_df[(topic_df["topic"] == cluster) | (topic_df["item_id"].isin(topic_df_ids))]
|
191 |
+
|
192 |
+
elif search_type == "neighbors":
|
193 |
+
neighbor_ids = utils.get_match(topic_df_ids, K=n_neighbors, threshold=neighbor_threshold, debug=False)
|
194 |
+
topic_df = topic_df[(topic_df["item_id"].isin(neighbor_ids)) | (topic_df["item_id"].isin(topic_df_ids))]
|
195 |
+
elif search_type == "keyword":
|
196 |
+
topic_df = topic_df[(topic_df["comment"].str.contains(keyword, case=False, regex=False)) | (topic_df["item_id"].isin(topic_df_ids))]
|
197 |
+
|
198 |
+
topic_df = topic_df.drop_duplicates()
|
199 |
+
print("len topic_df", len(topic_df))
|
200 |
+
|
201 |
+
# Handle empty results
|
202 |
+
if len(topic_df) == 0:
|
203 |
+
results = {
|
204 |
+
"user_perf_rounded": None,
|
205 |
+
"user_direction": None,
|
206 |
+
"other_perf_rounded": None,
|
207 |
+
"other_direction": None,
|
208 |
+
"n_other_users": None,
|
209 |
+
"cluster_examples": None,
|
210 |
+
"odds_ratio": None,
|
211 |
+
"odds_ratio_explanation": None,
|
212 |
+
"topic_df_ids": [],
|
213 |
+
"cluster_overview_plot_json": None,
|
214 |
+
"cluster_comments": None,
|
215 |
+
}
|
216 |
+
return results
|
217 |
+
|
218 |
+
topic_df_ids = topic_df["item_id"].unique().tolist()
|
219 |
+
|
220 |
+
if (scaffold_method == "personal_cluster") and (os.path.isfile(personal_cluster_file)):
|
221 |
+
cluster_overview_plot_json, sampled_df = utils.plot_overall_vis_cluster(topic_df, error_type=error_type, n_comments=500)
|
222 |
+
else:
|
223 |
+
# Regular
|
224 |
+
cluster_overview_plot_json, sampled_df = utils.get_cluster_overview_plot(topic_df, error_type=error_type, use_model=use_model)
|
225 |
+
|
226 |
+
cluster_comments = utils.get_cluster_comments(sampled_df,error_type=error_type, num_examples=n_examples, use_model=use_model) # New version of cluster comment table
|
227 |
+
|
228 |
+
results = {
|
229 |
+
"topic_df_ids": topic_df_ids,
|
230 |
+
"cluster_overview_plot_json": json.loads(cluster_overview_plot_json),
|
231 |
+
"cluster_comments": cluster_comments,
|
232 |
+
}
|
233 |
+
return json.dumps(results)
|
234 |
+
|
235 |
+
########################################
|
236 |
+
# ROUTE: /GET_GROUP_SIZE
|
237 |
+
@app.route("/get_group_size")
|
238 |
+
def get_group_size():
|
239 |
+
# Fetch info for initial labeling component
|
240 |
+
sel_gender = request.args.get("sel_gender")
|
241 |
+
sel_pol = request.args.get("sel_pol")
|
242 |
+
sel_relig = request.args.get("sel_relig")
|
243 |
+
sel_race = request.args.get("sel_race")
|
244 |
+
sel_lgbtq = request.args.get("sel_lgbtq")
|
245 |
+
if sel_race != "":
|
246 |
+
sel_race = sel_race.split(",")
|
247 |
+
|
248 |
+
_, group_size = utils.get_workers_in_group(sel_gender, sel_race, sel_relig, sel_pol, sel_lgbtq)
|
249 |
+
|
250 |
+
context = {
|
251 |
+
"group_size": group_size,
|
252 |
+
}
|
253 |
+
return json.dumps(context)
|
254 |
+
|
255 |
+
########################################
|
256 |
+
# ROUTE: /GET_GROUP_MODEL
|
257 |
+
@app.route("/get_group_model")
|
258 |
+
def get_group_model():
|
259 |
+
# Fetch info for initial labeling component
|
260 |
+
model_name = request.args.get("model_name")
|
261 |
+
user = request.args.get("user")
|
262 |
+
sel_gender = request.args.get("sel_gender")
|
263 |
+
sel_pol = request.args.get("sel_pol")
|
264 |
+
sel_relig = request.args.get("sel_relig")
|
265 |
+
sel_lgbtq = request.args.get("sel_lgbtq")
|
266 |
+
sel_race_orig = request.args.get("sel_race")
|
267 |
+
if sel_race_orig != "":
|
268 |
+
sel_race = sel_race_orig.split(",")
|
269 |
+
else:
|
270 |
+
sel_race = ""
|
271 |
+
start = time.time()
|
272 |
+
|
273 |
+
grp_df, group_size = utils.get_workers_in_group(sel_gender, sel_race, sel_relig, sel_pol, sel_lgbtq)
|
274 |
+
|
275 |
+
grp_ids = grp_df["worker_id"].tolist()
|
276 |
+
|
277 |
+
ratings_grp = utils.get_grp_model_labels(
|
278 |
+
comments_df=comments_grouped_full_topic_cat,
|
279 |
+
n_label_per_bin=BIN_DISTRIB,
|
280 |
+
score_bins=SCORE_BINS,
|
281 |
+
grp_ids=grp_ids,
|
282 |
+
)
|
283 |
+
|
284 |
+
# print("ratings_grp", ratings_grp)
|
285 |
+
|
286 |
+
# Modify model name
|
287 |
+
model_name = f"{model_name}_group_gender{sel_gender}_relig{sel_relig}_pol{sel_pol}_race{sel_race_orig}_lgbtq_{sel_lgbtq}"
|
288 |
+
|
289 |
+
label_dir = f"./data/labels/{model_name}"
|
290 |
+
# Create directory for labels if it doesn't yet exist
|
291 |
+
if not os.path.isdir(label_dir):
|
292 |
+
os.mkdir(label_dir)
|
293 |
+
last_label_i = len([name for name in os.listdir(label_dir) if (os.path.isfile(os.path.join(label_dir, name)) and name.endswith('.pkl'))])
|
294 |
+
|
295 |
+
# Train group model
|
296 |
+
mae, mse, rmse, avg_diff, ratings_prev = utils.train_updated_model(model_name, last_label_i, ratings_grp, user)
|
297 |
+
|
298 |
+
duration = time.time() - start
|
299 |
+
print("Time to train/cache:", duration)
|
300 |
+
|
301 |
+
context = {
|
302 |
+
"group_size": group_size,
|
303 |
+
"mae": mae,
|
304 |
+
}
|
305 |
+
return json.dumps(context)
|
306 |
+
|
307 |
+
########################################
|
308 |
+
# ROUTE: /GET_LABELING
|
309 |
+
@app.route("/get_labeling")
|
310 |
+
def get_labeling():
|
311 |
+
# Fetch info for initial labeling component
|
312 |
+
user = request.args.get("user")
|
313 |
+
|
314 |
+
clusters_for_tuning = utils.get_large_clusters(min_n=150)
|
315 |
+
clusters_for_tuning_options = [{"value": i, "text": cluster} for i, cluster in enumerate(clusters_for_tuning)] # Format for Svelecte UI element
|
316 |
+
|
317 |
+
# model_name_suggestion = f"model_{int(time.time())}"
|
318 |
+
model_name_suggestion = f"model_{user}"
|
319 |
+
|
320 |
+
context = {
|
321 |
+
"personalized_models": utils.get_all_model_names(user),
|
322 |
+
"model_name_suggestion": model_name_suggestion,
|
323 |
+
"clusters_for_tuning": clusters_for_tuning_options,
|
324 |
+
}
|
325 |
+
return json.dumps(context)
|
326 |
+
|
327 |
+
########################################
|
328 |
+
# ROUTE: /GET_COMMENTS_TO_LABEL
|
329 |
+
N_LABEL_PER_BIN = 8 # 8 * 5 = 40 comments
|
330 |
+
BIN_DISTRIB = [4, 8, 16, 8, 4]
|
331 |
+
SCORE_BINS = [(0.0, 0.5), (0.5, 1.5), (1.5, 2.5), (2.5, 3.5), (3.5, 4.01)]
|
332 |
+
@app.route("/get_comments_to_label")
|
333 |
+
def get_comments_to_label():
|
334 |
+
n = int(request.args.get("n"))
|
335 |
+
# Fetch examples to label
|
336 |
+
to_label_ids = utils.create_example_sets(
|
337 |
+
comments_df=comments_grouped_full_topic_cat,
|
338 |
+
n_label_per_bin=BIN_DISTRIB,
|
339 |
+
score_bins=SCORE_BINS,
|
340 |
+
keyword=None
|
341 |
+
)
|
342 |
+
random.shuffle(to_label_ids) # randomize to not prime users
|
343 |
+
to_label_ids = to_label_ids[:n]
|
344 |
+
|
345 |
+
ids_to_comments = utils.get_ids_to_comments()
|
346 |
+
to_label = [ids_to_comments[comment_id] for comment_id in to_label_ids]
|
347 |
+
context = {
|
348 |
+
"to_label": to_label,
|
349 |
+
}
|
350 |
+
return json.dumps(context)
|
351 |
+
|
352 |
+
########################################
|
353 |
+
# ROUTE: /GET_COMMENTS_TO_LABEL_TOPIC
|
354 |
+
N_LABEL_PER_BIN_TOPIC = 2 # 2 * 5 = 10 comments
|
355 |
+
@app.route("/get_comments_to_label_topic")
|
356 |
+
def get_comments_to_label_topic():
|
357 |
+
# Fetch examples to label
|
358 |
+
topic = request.args.get("topic")
|
359 |
+
to_label_ids = utils.create_example_sets(
|
360 |
+
comments_df=comments_grouped_full_topic_cat,
|
361 |
+
# n_label_per_bin=N_LABEL_PER_BIN_TOPIC,
|
362 |
+
n_label_per_bin=BIN_DISTRIB,
|
363 |
+
score_bins=SCORE_BINS,
|
364 |
+
keyword=None,
|
365 |
+
topic=topic,
|
366 |
+
)
|
367 |
+
random.shuffle(to_label_ids) # randomize to not prime users
|
368 |
+
ids_to_comments = utils.get_ids_to_comments()
|
369 |
+
to_label = [ids_to_comments[comment_id] for comment_id in to_label_ids]
|
370 |
+
context = {
|
371 |
+
"to_label": to_label,
|
372 |
+
}
|
373 |
+
return json.dumps(context)
|
374 |
+
|
375 |
+
########################################
|
376 |
+
# ROUTE: /GET_PERSONALIZED_MODEL
|
377 |
+
@app.route("/get_personalized_model")
|
378 |
+
def get_personalized_model():
|
379 |
+
model_name = request.args.get("model_name")
|
380 |
+
ratings_json = request.args.get("ratings")
|
381 |
+
mode = request.args.get("mode")
|
382 |
+
user = request.args.get("user")
|
383 |
+
ratings = json.loads(ratings_json)
|
384 |
+
print(ratings)
|
385 |
+
start = time.time()
|
386 |
+
|
387 |
+
label_dir = f"./data/labels/{model_name}"
|
388 |
+
# Create directory for labels if it doesn't yet exist
|
389 |
+
if not os.path.isdir(label_dir):
|
390 |
+
os.mkdir(label_dir)
|
391 |
+
last_label_i = len([name for name in os.listdir(label_dir) if (os.path.isfile(os.path.join(label_dir, name)) and name.endswith('.pkl'))])
|
392 |
+
|
393 |
+
# Handle existing or new model cases
|
394 |
+
if mode == "view":
|
395 |
+
# Fetch prior model performance
|
396 |
+
if model_name not in utils.get_all_model_names():
|
397 |
+
raise Exception(f"Model {model_name} does not exist")
|
398 |
+
else:
|
399 |
+
mae, mse, rmse, avg_diff, ratings_prev = utils.fetch_existing_data(model_name, last_label_i)
|
400 |
+
|
401 |
+
elif mode == "train":
|
402 |
+
# Train model and cache predictions using new labels
|
403 |
+
print("get_personalized_model train")
|
404 |
+
mae, mse, rmse, avg_diff, ratings_prev = utils.train_updated_model(model_name, last_label_i, ratings, user)
|
405 |
+
|
406 |
+
duration = time.time() - start
|
407 |
+
print("Time to train/cache:", duration)
|
408 |
+
|
409 |
+
perf_plot, mae_status = utils.plot_train_perf_results(model_name, mae)
|
410 |
+
perf_plot_json = perf_plot.to_json()
|
411 |
+
|
412 |
+
def round_metric(x):
|
413 |
+
return np.round(abs(x), 3)
|
414 |
+
|
415 |
+
results = {
|
416 |
+
"model_name": model_name,
|
417 |
+
"mae": round_metric(mae),
|
418 |
+
"mae_status": mae_status,
|
419 |
+
"mse": round_metric(mse),
|
420 |
+
"rmse": round_metric(rmse),
|
421 |
+
"avg_diff": round_metric(avg_diff),
|
422 |
+
"duration": duration,
|
423 |
+
"ratings_prev": ratings_prev,
|
424 |
+
"perf_plot_json": json.loads(perf_plot_json),
|
425 |
+
}
|
426 |
+
return json.dumps(results)
|
427 |
+
|
428 |
+
|
429 |
+
########################################
|
430 |
+
# ROUTE: /GET_PERSONALIZED_MODEL_TOPIC
|
431 |
+
@app.route("/get_personalized_model_topic")
|
432 |
+
def get_personalized_model_topic():
|
433 |
+
model_name = request.args.get("model_name")
|
434 |
+
ratings_json = request.args.get("ratings")
|
435 |
+
user = request.args.get("user")
|
436 |
+
ratings = json.loads(ratings_json)
|
437 |
+
topic = request.args.get("topic")
|
438 |
+
print(ratings)
|
439 |
+
start = time.time()
|
440 |
+
|
441 |
+
# Modify model name
|
442 |
+
model_name = f"{model_name}_{topic}"
|
443 |
+
|
444 |
+
label_dir = f"./data/labels/{model_name}"
|
445 |
+
# Create directory for labels if it doesn't yet exist
|
446 |
+
if not os.path.isdir(label_dir):
|
447 |
+
os.mkdir(label_dir)
|
448 |
+
last_label_i = len([name for name in os.listdir(label_dir) if (os.path.isfile(os.path.join(label_dir, name)) and name.endswith('.pkl'))])
|
449 |
+
|
450 |
+
# Handle existing or new model cases
|
451 |
+
# Train model and cache predictions using new labels
|
452 |
+
print("get_personalized_model_topic train")
|
453 |
+
mae, mse, rmse, avg_diff, ratings_prev = utils.train_updated_model(model_name, last_label_i, ratings, user, topic=topic)
|
454 |
+
|
455 |
+
duration = time.time() - start
|
456 |
+
print("Time to train/cache:", duration)
|
457 |
+
|
458 |
+
def round_metric(x):
|
459 |
+
return np.round(abs(x), 3)
|
460 |
+
|
461 |
+
results = {
|
462 |
+
"success": "success",
|
463 |
+
"ratings_prev": ratings_prev,
|
464 |
+
"new_model_name": model_name,
|
465 |
+
}
|
466 |
+
return json.dumps(results)
|
467 |
+
|
468 |
+
|
469 |
+
########################################
|
470 |
+
# ROUTE: /GET_REPORTS
|
471 |
+
@app.route("/get_reports")
|
472 |
+
def get_reports():
|
473 |
+
cur_user = request.args.get("cur_user")
|
474 |
+
scaffold_method = request.args.get("scaffold_method")
|
475 |
+
model = request.args.get("model")
|
476 |
+
topic_vis_method = request.args.get("topic_vis_method")
|
477 |
+
if topic_vis_method == "null":
|
478 |
+
topic_vis_method = "fp_fn"
|
479 |
+
|
480 |
+
# Load reports for current user from stored files
|
481 |
+
report_dir = f"./data/user_reports"
|
482 |
+
user_file = os.path.join(report_dir, f"{cur_user}_{scaffold_method}.pkl")
|
483 |
+
|
484 |
+
if not os.path.isfile(user_file):
|
485 |
+
if scaffold_method == "fixed":
|
486 |
+
reports = get_fixed_scaffold()
|
487 |
+
elif (scaffold_method == "personal" or scaffold_method == "personal_group" or scaffold_method == "personal_test"):
|
488 |
+
reports = get_personal_scaffold(model, topic_vis_method)
|
489 |
+
elif (scaffold_method == "personal_cluster"):
|
490 |
+
reports = get_personal_cluster_scaffold(model)
|
491 |
+
elif scaffold_method == "prompts":
|
492 |
+
reports = get_prompts_scaffold()
|
493 |
+
elif scaffold_method == "tutorial":
|
494 |
+
reports = get_tutorial_scaffold()
|
495 |
+
else:
|
496 |
+
# Prepare empty report
|
497 |
+
reports = [
|
498 |
+
{
|
499 |
+
"title": "",
|
500 |
+
"error_type": "",
|
501 |
+
"evidence": [],
|
502 |
+
"text_entry": "",
|
503 |
+
"complete_status": False,
|
504 |
+
}
|
505 |
+
]
|
506 |
+
else:
|
507 |
+
# Load from pickle file
|
508 |
+
with open(user_file, "rb") as f:
|
509 |
+
reports = pickle.load(f)
|
510 |
+
|
511 |
+
results = {
|
512 |
+
"reports": reports,
|
513 |
+
}
|
514 |
+
return json.dumps(results)
|
515 |
+
|
516 |
+
def get_fixed_scaffold():
|
517 |
+
return [
|
518 |
+
{
|
519 |
+
"title": "Topic: 6_jews_jew_jewish_rabbi",
|
520 |
+
"error_type": "System is under-sensitive",
|
521 |
+
"evidence": [],
|
522 |
+
"text_entry": "",
|
523 |
+
"complete_status": False,
|
524 |
+
},
|
525 |
+
{
|
526 |
+
"title": "Topic: 73_troll_trolls_trolling_spammers",
|
527 |
+
"error_type": "System is over-sensitive",
|
528 |
+
"evidence": [],
|
529 |
+
"text_entry": "",
|
530 |
+
"complete_status": False,
|
531 |
+
},
|
532 |
+
{
|
533 |
+
"title": "Topic: 66_mexicans_mexico_mexican_spanish",
|
534 |
+
"error_type": "System is under-sensitive",
|
535 |
+
"evidence": [],
|
536 |
+
"text_entry": "",
|
537 |
+
"complete_status": False,
|
538 |
+
},
|
539 |
+
{
|
540 |
+
"title": "Topic: 89_cowards_coward_cowardly_brave",
|
541 |
+
"error_type": "System is over-sensitive",
|
542 |
+
"evidence": [],
|
543 |
+
"text_entry": "",
|
544 |
+
"complete_status": False,
|
545 |
+
},
|
546 |
+
{
|
547 |
+
"title": "Topic: 63_disgusting_gross_toxic_thicc",
|
548 |
+
"error_type": "System is under-sensitive",
|
549 |
+
"evidence": [],
|
550 |
+
"text_entry": "",
|
551 |
+
"complete_status": False,
|
552 |
+
},
|
553 |
+
]
|
554 |
+
|
555 |
+
def get_empty_report(title, error_type):
|
556 |
+
return {
|
557 |
+
"title": f"Topic: {title}",
|
558 |
+
"error_type": error_type,
|
559 |
+
"evidence": [],
|
560 |
+
"text_entry": "",
|
561 |
+
"complete_status": False,
|
562 |
+
}
|
563 |
+
|
564 |
+
def get_tutorial_scaffold():
|
565 |
+
return [
|
566 |
+
{
|
567 |
+
"title": "Topic: 79_idiot_dumb_stupid_dumber",
|
568 |
+
"error_type": "System is over-sensitive",
|
569 |
+
"evidence": [],
|
570 |
+
"text_entry": "",
|
571 |
+
"complete_status": False,
|
572 |
+
},
|
573 |
+
]
|
574 |
+
|
575 |
+
def get_personal_cluster_scaffold(model):
|
576 |
+
topics_under_top, topics_over_top = utils.get_personal_clusters(model)
|
577 |
+
|
578 |
+
report_under = [get_empty_report(topic, "System is under-sensitive") for topic in topics_under_top]
|
579 |
+
|
580 |
+
report_over = [get_empty_report(topic, "System is over-sensitive") for topic in topics_over_top]
|
581 |
+
reports = (report_under + report_over)
|
582 |
+
random.shuffle(reports)
|
583 |
+
return reports
|
584 |
+
|
585 |
+
def get_topic_errors(df, topic_vis_method, threshold=2):
|
586 |
+
topics = df["topic_"].unique().tolist()
|
587 |
+
topic_errors = {}
|
588 |
+
for topic in topics:
|
589 |
+
t_df = df[df["topic_"] == topic]
|
590 |
+
y_true = t_df["pred"].to_numpy()
|
591 |
+
y_pred = t_df["rating"].to_numpy()
|
592 |
+
if topic_vis_method == "mae":
|
593 |
+
t_err = mean_absolute_error(y_true, y_pred)
|
594 |
+
elif topic_vis_method == "mse":
|
595 |
+
t_err = mean_squared_error(y_true, y_pred)
|
596 |
+
elif topic_vis_method == "avg_diff":
|
597 |
+
t_err = np.mean(y_true - y_pred)
|
598 |
+
elif topic_vis_method == "fp_proportion":
|
599 |
+
y_true = [0 if rating < threshold else 1 for rating in t_df["pred"].tolist()]
|
600 |
+
y_pred = [0 if rating < threshold else 1 for rating in t_df["rating"].tolist()]
|
601 |
+
try:
|
602 |
+
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
|
603 |
+
except:
|
604 |
+
tn, fp, fn, tp = [0, 0, 0, 0] # ignore; set error to 0
|
605 |
+
total = float(len(y_true))
|
606 |
+
t_err = fp / total
|
607 |
+
elif topic_vis_method == "fn_proportion":
|
608 |
+
y_true = [0 if rating < threshold else 1 for rating in t_df["pred"].tolist()]
|
609 |
+
y_pred = [0 if rating < threshold else 1 for rating in t_df["rating"].tolist()]
|
610 |
+
try:
|
611 |
+
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
|
612 |
+
except:
|
613 |
+
tn, fp, fn, tp = [0, 0, 0, 0] # ignore; set error to 0
|
614 |
+
total = float(len(y_true))
|
615 |
+
t_err = fn / total
|
616 |
+
topic_errors[topic] = t_err
|
617 |
+
|
618 |
+
return topic_errors
|
619 |
+
|
620 |
+
def get_personal_scaffold(model, topic_vis_method, n_topics=200, n=5):
|
621 |
+
threshold = utils.get_toxic_threshold()
|
622 |
+
|
623 |
+
# Get topics with greatest amount of error
|
624 |
+
with open(f"./data/preds_dfs/{model}.pkl", "rb") as f:
|
625 |
+
preds_df = pickle.load(f)
|
626 |
+
preds_df_mod = preds_df.merge(utils.get_comments_grouped_full_topic_cat(), on="item_id", how="left", suffixes=('_', '_avg'))
|
627 |
+
preds_df_mod = preds_df_mod[preds_df_mod["user_id"] == "A"].sort_values(by=["item_id"]).reset_index()
|
628 |
+
preds_df_mod = preds_df_mod[preds_df_mod["topic_id_"] < n_topics]
|
629 |
+
|
630 |
+
if topic_vis_method == "median":
|
631 |
+
df = preds_df_mod.groupby(["topic_", "user_id"]).median().reset_index()
|
632 |
+
elif topic_vis_method == "mean":
|
633 |
+
df = preds_df_mod.groupby(["topic_", "user_id"]).mean().reset_index()
|
634 |
+
elif topic_vis_method == "fp_fn":
|
635 |
+
for error_type in ["fn_proportion", "fp_proportion"]:
|
636 |
+
topic_errors = get_topic_errors(preds_df_mod, error_type)
|
637 |
+
preds_df_mod[error_type] = [topic_errors[topic] for topic in preds_df_mod["topic_"].tolist()]
|
638 |
+
df = preds_df_mod.groupby(["topic_", "user_id"]).mean().reset_index()
|
639 |
+
else:
|
640 |
+
# Get error for each topic
|
641 |
+
topic_errors = get_topic_errors(preds_df_mod, topic_vis_method)
|
642 |
+
preds_df_mod[topic_vis_method] = [topic_errors[topic] for topic in preds_df_mod["topic_"].tolist()]
|
643 |
+
df = preds_df_mod.groupby(["topic_", "user_id"]).mean().reset_index()
|
644 |
+
|
645 |
+
# Get system error
|
646 |
+
df = df[(df["topic_"] != "53_maiareficco_kallystas_dyisisitmanila_tractorsazi") & (df["topic_"] != "79_idiot_dumb_stupid_dumber")]
|
647 |
+
|
648 |
+
if topic_vis_method == "median" or topic_vis_method == "mean":
|
649 |
+
df["error_magnitude"] = [utils.get_error_magnitude(sys, user, threshold) for sys, user in zip(df["rating"].tolist(), df["pred"].tolist())]
|
650 |
+
df["error_type"] = [utils.get_error_type_radio(sys, user, threshold) for sys, user in zip(df["rating"].tolist(), df["pred"].tolist())]
|
651 |
+
|
652 |
+
df_under = df[df["error_type"] == "System is under-sensitive"]
|
653 |
+
df_under = df_under.sort_values(by=["error_magnitude"], ascending=False).head(n) # surface largest errors first
|
654 |
+
report_under = [get_empty_report(row["topic_"], row["error_type"]) for _, row in df_under.iterrows()]
|
655 |
+
|
656 |
+
df_over = df[df["error_type"] == "System is over-sensitive"]
|
657 |
+
df_over = df_over.sort_values(by=["error_magnitude"], ascending=False).head(n) # surface largest errors first
|
658 |
+
report_over = [get_empty_report(row["topic_"], row["error_type"]) for _, row in df_over.iterrows()]
|
659 |
+
|
660 |
+
# Set up reports
|
661 |
+
# return [get_empty_report(row["topic_"], row["error_type"]) for index, row in df.iterrows()]
|
662 |
+
reports = (report_under + report_over)
|
663 |
+
random.shuffle(reports)
|
664 |
+
elif topic_vis_method == "fp_fn":
|
665 |
+
df_under = df.sort_values(by=["fn_proportion"], ascending=False).head(n)
|
666 |
+
df_under = df_under[df_under["fn_proportion"] > 0]
|
667 |
+
report_under = [get_empty_report(row["topic_"], "System is under-sensitive") for _, row in df_under.iterrows()]
|
668 |
+
|
669 |
+
df_over = df.sort_values(by=["fp_proportion"], ascending=False).head(n)
|
670 |
+
df_over = df_over[df_over["fp_proportion"] > 0]
|
671 |
+
report_over = [get_empty_report(row["topic_"], "System is over-sensitive") for _, row in df_over.iterrows()]
|
672 |
+
|
673 |
+
reports = (report_under + report_over)
|
674 |
+
random.shuffle(reports)
|
675 |
+
else:
|
676 |
+
df = df.sort_values(by=[topic_vis_method], ascending=False).head(n * 2)
|
677 |
+
df["error_type"] = [utils.get_error_type_radio(sys, user, threshold) for sys, user in zip(df["rating"].tolist(), df["pred"].tolist())]
|
678 |
+
reports = [get_empty_report(row["topic_"], row["error_type"]) for _, row in df.iterrows()]
|
679 |
+
|
680 |
+
return reports
|
681 |
+
|
682 |
+
def get_prompts_scaffold():
|
683 |
+
return [
|
684 |
+
{
|
685 |
+
"title": "Are there terms that are used in your identity group or community that tend to be flagged incorrectly as toxic?",
|
686 |
+
"error_type": "System is over-sensitive",
|
687 |
+
"evidence": [],
|
688 |
+
"text_entry": "",
|
689 |
+
"complete_status": False,
|
690 |
+
},
|
691 |
+
{
|
692 |
+
"title": "Are there terms that are used in your identity group or community that tend to be flagged incorrectly as non-toxic?",
|
693 |
+
"error_type": "System is under-sensitive",
|
694 |
+
"evidence": [],
|
695 |
+
"text_entry": "",
|
696 |
+
"complete_status": False,
|
697 |
+
},
|
698 |
+
{
|
699 |
+
"title": "Are there certain ways that your community tends to be targeted by outsiders?",
|
700 |
+
"error_type": "",
|
701 |
+
"evidence": [],
|
702 |
+
"text_entry": "",
|
703 |
+
"complete_status": False,
|
704 |
+
},
|
705 |
+
{
|
706 |
+
"title": "Are there other communities whose content should be very similar to your community's? Verify that this content is treated similarly by the system.",
|
707 |
+
"error_type": "",
|
708 |
+
"evidence": [],
|
709 |
+
"text_entry": "",
|
710 |
+
"complete_status": False,
|
711 |
+
},
|
712 |
+
{
|
713 |
+
"title": "Are there ways that you've seen individuals in your community actively try to thwart the rules of automated content moderation systems? Check whether these strategies work here.",
|
714 |
+
"error_type": "",
|
715 |
+
"evidence": [],
|
716 |
+
"text_entry": "",
|
717 |
+
"complete_status": False,
|
718 |
+
},
|
719 |
+
]
|
720 |
+
|
721 |
+
########################################
|
722 |
+
# ROUTE: /SAVE_REPORTS
|
723 |
+
@app.route("/save_reports")
|
724 |
+
def save_reports():
|
725 |
+
cur_user = request.args.get("cur_user")
|
726 |
+
reports_json = request.args.get("reports")
|
727 |
+
reports = json.loads(reports_json)
|
728 |
+
scaffold_method = request.args.get("scaffold_method")
|
729 |
+
|
730 |
+
# Save reports for current user to stored files
|
731 |
+
report_dir = f"./data/user_reports"
|
732 |
+
# Save to pickle file
|
733 |
+
with open(os.path.join(report_dir, f"{cur_user}_{scaffold_method}.pkl"), "wb") as f:
|
734 |
+
pickle.dump(reports, f)
|
735 |
+
|
736 |
+
results = {
|
737 |
+
"status": "success",
|
738 |
+
}
|
739 |
+
return json.dumps(results)
|
740 |
+
|
741 |
+
########################################
|
742 |
+
# ROUTE: /GET_EXPLORE_EXAMPLES
|
743 |
+
@app.route("/get_explore_examples")
|
744 |
+
def get_explore_examples():
|
745 |
+
threshold = utils.get_toxic_threshold()
|
746 |
+
n_examples = int(request.args.get("n_examples"))
|
747 |
+
|
748 |
+
# Get sample of examples
|
749 |
+
df = utils.get_comments_grouped_full_topic_cat().sample(n=n_examples)
|
750 |
+
|
751 |
+
df["system_decision"] = [utils.get_decision(rating, threshold) for rating in df["rating"].tolist()]
|
752 |
+
df["system_color"] = [utils.get_user_color(sys, threshold) for sys in df["rating"].tolist()] # get cell colors
|
753 |
+
|
754 |
+
ex_json = df.to_json(orient="records")
|
755 |
+
|
756 |
+
results = {
|
757 |
+
"examples": ex_json,
|
758 |
+
}
|
759 |
+
return json.dumps(results)
|
760 |
+
|
761 |
+
########################################
|
762 |
+
# ROUTE: /GET_RESULTS
|
763 |
+
@app.route("/get_results")
|
764 |
+
def get_results():
|
765 |
+
users = request.args.get("users")
|
766 |
+
if users != "":
|
767 |
+
users = users.split(",")
|
768 |
+
# print("users", users)
|
769 |
+
|
770 |
+
IGNORE_LIST = ["DemoUser"]
|
771 |
+
report_dir = f"./data/user_reports"
|
772 |
+
|
773 |
+
|
774 |
+
# For each user, get personal and prompt results
|
775 |
+
# Get links to label pages and audit pages
|
776 |
+
results = []
|
777 |
+
for user in users:
|
778 |
+
if user not in IGNORE_LIST:
|
779 |
+
user_results = {}
|
780 |
+
user_results["user"] = user
|
781 |
+
for scaffold_method in ["personal", "personal_group", "prompts"]:
|
782 |
+
# Get results
|
783 |
+
user_file = os.path.join(report_dir, f"{user}_{scaffold_method}.pkl")
|
784 |
+
if os.path.isfile(user_file):
|
785 |
+
with open(user_file, "rb") as f:
|
786 |
+
user_results[scaffold_method] = pickle.load(f)
|
787 |
+
results.append(user_results)
|
788 |
+
|
789 |
+
# print("results", results)
|
790 |
+
|
791 |
+
results = {
|
792 |
+
"results": results,
|
793 |
+
}
|
794 |
+
return json.dumps(results)
|
795 |
+
|
796 |
+
if __name__ == "__main__":
|
797 |
+
app.run(debug=True, port=5001)
|