File size: 13,030 Bytes
277ab09
98a427a
277ab09
 
 
 
 
 
 
 
 
 
ecbe10b
277ab09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ecbe10b
 
98a427a
 
277ab09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98a427a
5d89fbb
 
 
 
 
 
98a427a
 
 
277ab09
 
 
 
 
 
 
 
 
 
98a427a
277ab09
 
 
 
98a427a
 
277ab09
 
 
 
 
 
98a427a
 
277ab09
98a427a
277ab09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98a427a
277ab09
 
 
 
 
 
 
 
98a427a
277ab09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ecbe10b
277ab09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ecbe10b
277ab09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ecbe10b
277ab09
 
ecbe10b
 
 
277ab09
ecbe10b
 
 
277ab09
 
 
 
 
 
ecbe10b
277ab09
 
ecbe10b
 
277ab09
ecbe10b
277ab09
ecbe10b
277ab09
 
ecbe10b
277ab09
 
 
 
 
 
 
 
 
ecbe10b
 
277ab09
 
ecbe10b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277ab09
 
 
ecbe10b
 
277ab09
 
 
 
 
 
 
ecbe10b
277ab09
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForTokenClassification
import time
import json
import pandas as pd
from datetime import datetime
import os
from typing import List, Dict, Tuple
import re


MODEL_PATH = "CordwainerSmith/GolemPII-v1"

ENTITY_COLORS = {
    "PHONE_NUM": "#FF9999",
    "ID_NUM": "#99FF99",
    "CC_NUM": "#9999FF",
    "BANK_ACCOUNT_NUM": "#FFFF99",
    "FIRST_NAME": "#FF99FF",
    "LAST_NAME": "#99FFFF",
    "CITY": "#FFB366",
    "STREET": "#B366FF",
    "POSTAL_CODE": "#66FFB3",
    "EMAIL": "#66B3FF",
    "DATE": "#FFB3B3",
    "CC_PROVIDER": "#B3FFB3",
}

EXAMPLE_SENTENCES = [
    "שם מלא: תלמה אריאלי מספר תעודת זהות: 61453324-8 תאריך לידה: 15/09/1983 כתובת:  ארלוזורוב 22  פתח תקווה מיקוד 2731711 אימייל: [email protected] טלפון: 054-8884771  בפגישה זו נדונו פתרונות טכנולוגיים חדשניים לשיפור תהליכי עבודה. המשתתף יתבקש להציג מצגת בנושא בפגישה הבאה אשר שילם ב 5326-1003-5299-5478 מסטרקארד עם הוראת קבע ל 11-77-352300",
]

MODEL_DETAILS = {
    "name": "GolemPII-v1: Hebrew PII Detection Model",
    "description": 'The <a href="https://huggingface.co/CordwainerSmith/GolemPII-v1" target="_blank">GolemPII model</a> was specifically designed to identify and categorize various types of personally identifiable information (PII) present in Hebrew text. Its core intended usage revolves around enhancing privacy protection and facilitating the process of data anonymization. This makes it a good candidate for applications and systems that handle sensitive data, such as legal documents, medical records, or any text data containing PII, where the automatic redaction or removal of such information is essential for ensuring compliance with data privacy regulations and safeguarding individuals\' personal information. The model can be deployed on-premise with a relatively small hardware footprint, making it suitable for organizations with limited computing resources or those prioritizing local data processing.\n\nThe model was trained on the <a href="https://huggingface.co/datasets/CordwainerSmith/GolemGuard" target="_blank">GolemGuard</a> dataset, a Hebrew language dataset comprising over 115,000 examples of PII entities and containing both real and synthetically generated text examples. This data represents various document types and communication formats commonly found in Israeli professional and administrative contexts. GolemGuard covers a wide range of document types and encompasses a diverse array of PII entities, making it ideal for training and evaluating PII detection models.',
    "base_model": "xlm-roberta-base",
    "training_data": "Custom Hebrew PII dataset",
    "detected_pii_entities": [
        "FIRST_NAME",
        "LAST_NAME",
        "STREET",
        "CITY",
        "PHONE_NUM",
        "EMAIL",
        "ID_NUM",
        "BANK_ACCOUNT_NUM",
        "CC_NUM",
        "CC_PROVIDER",
        "DATE",
        "POSTAL_CODE",
    ],
}


class PIIMaskingModel:
    def __init__(self, model_name: str):
        self.model_name = model_name
        hf_token = st.secrets["hf_token"]
        self.tokenizer = AutoTokenizer.from_pretrained(
            model_name, use_auth_token=hf_token
        )
        self.model = AutoModelForTokenClassification.from_pretrained(
            model_name, use_auth_token=hf_token
        )
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model.to(self.device)
        self.model.eval()

    def process_text(
        self, text: str
    ) -> Tuple[str, float, str, List[str], List[str], List[Dict]]:
        start_time = time.time()

        tokenized_inputs = self.tokenizer(
            text,
            truncation=True,
            padding=False,
            return_tensors="pt",
            return_offsets_mapping=True,
            add_special_tokens=True,
        )

        input_ids = tokenized_inputs.input_ids.to(self.device)
        attention_mask = tokenized_inputs.attention_mask.to(self.device)
        offset_mapping = tokenized_inputs["offset_mapping"][0].tolist()

        # Handle special tokens
        offset_mapping[0] = None  # <s> token
        offset_mapping[-1] = None  # </s> token

        with torch.no_grad():
            outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)

        predictions = outputs.logits.argmax(dim=-1).cpu().numpy()
        predicted_labels = [
            self.model.config.id2label[label_id] for label_id in predictions[0]
        ]
        tokens = self.tokenizer.convert_ids_to_tokens(input_ids[0])

        masked_text, colored_text, privacy_masks = self.mask_pii_in_sentence(
            tokens, predicted_labels, text, offset_mapping
        )
        processing_time = time.time() - start_time

        return (
            masked_text,
            processing_time,
            colored_text,
            tokens,
            predicted_labels,
            privacy_masks,
        )

    def _find_entity_span(
        self,
        i: int,
        labels: List[str],
        tokens: List[str],
        offset_mapping: List[Tuple[int, int]],
    ) -> Tuple[int, str, int]:
        current_entity = labels[i][2:] if labels[i].startswith("B-") else labels[i][2:]
        j = i + 1
        last_valid_end = offset_mapping[i][1] if offset_mapping[i] else None

        while j < len(tokens):
            if offset_mapping[j] is None:
                j += 1
                continue

            next_label = labels[j]

            if next_label.startswith("B-") and tokens[j].startswith("▁"):
                break

            if next_label.startswith("I-") and next_label[2:] != current_entity:
                break

            if next_label.startswith("I-") and next_label[2:] == current_entity:
                last_valid_end = offset_mapping[j][1]
                j += 1
            elif next_label.startswith("B-") and not tokens[j].startswith("▁"):
                last_valid_end = offset_mapping[j][1]
                j += 1
            else:
                break

        return j, current_entity, last_valid_end

    def mask_pii_in_sentence(
        self,
        tokens: List[str],
        labels: List[str],
        original_text: str,
        offset_mapping: List[Tuple[int, int]],
    ) -> Tuple[str, str, List[Dict]]:
        privacy_masks = []
        current_pos = 0
        masked_text_parts = []
        colored_text_parts = []

        i = 0
        while i < len(tokens):
            if offset_mapping[i] is None:
                i += 1
                continue

            current_label = labels[i]

            if current_label.startswith(("B-", "I-")):
                start_char = offset_mapping[i][0]
                next_pos, entity_type, last_valid_end = self._find_entity_span(
                    i, labels, tokens, offset_mapping
                )

                if current_pos < start_char:
                    text_before = original_text[current_pos:start_char]
                    masked_text_parts.append(text_before)
                    colored_text_parts.append(text_before)

                entity_value = original_text[start_char:last_valid_end]
                mask = self._get_mask_for_entity(entity_type)

                privacy_masks.append(
                    {
                        "label": entity_type,
                        "start": start_char,
                        "end": last_valid_end,
                        "value": entity_value,
                        "label_index": len(privacy_masks) + 1,
                    }
                )

                masked_text_parts.append(mask)
                color = ENTITY_COLORS.get(entity_type, "#CCCCCC")
                colored_text_parts.append(
                    f'<span style="background-color: {color}; color: black; padding: 2px; border-radius: 3px;">{mask}</span>'
                )

                current_pos = last_valid_end
                i = next_pos
            else:
                if offset_mapping[i] is not None:
                    start_char = offset_mapping[i][0]
                    end_char = offset_mapping[i][1]

                    if current_pos < end_char:
                        text_chunk = original_text[current_pos:end_char]
                        masked_text_parts.append(text_chunk)
                        colored_text_parts.append(text_chunk)
                        current_pos = end_char
                i += 1

        if current_pos < len(original_text):
            remaining_text = original_text[current_pos:]
            masked_text_parts.append(remaining_text)
            colored_text_parts.append(remaining_text)

        return ("".join(masked_text_parts), "".join(colored_text_parts), privacy_masks)

    def _get_mask_for_entity(self, entity_type: str) -> str:
        return {
            "PHONE_NUM": "[טלפון]",
            "ID_NUM": "[ת.ז]",
            "CC_NUM": "[כרטיס אשראי]",
            "BANK_ACCOUNT_NUM": "[חשבון בנק]",
            "FIRST_NAME": "[שם פרטי]",
            "LAST_NAME": "[שם משפחה]",
            "CITY": "[עיר]",
            "STREET": "[רחוב]",
            "POSTAL_CODE": "[מיקוד]",
            "EMAIL": "[אימייל]",
            "DATE": "[תאריך]",
            "CC_PROVIDER": "[ספק כרטיס אשראי]",
            "BANK": "[בנק]",
        }.get(entity_type, f"[{entity_type}]")


def main():
    st.set_page_config(layout="wide")
    st.title("🗿 GolemPII: Hebrew PII Masking Application 🗿")

    st.markdown(
        """
    <style>
        .rtl { direction: rtl; text-align: right; }
        .entity-legend { padding: 5px; margin: 2px; border-radius: 3px; display: inline-block; }
        .masked-text { 
            direction: rtl; 
            text-align: right; 
            line-height: 2; 
            padding: 10px; 
            background-color: #f6f8fa; 
            border-radius: 5px; 
            color: black;             
            white-space: pre-wrap;
        }
        .main h3 {            
            margin-bottom: 10px;
        }
        textarea {
            direction: rtl !important;
            text-align: right !important;
        }
        .stTextArea label {
            direction: ltr !important;
            text-align: left !important;
        }
    </style>
    """,
        unsafe_allow_html=True,
    )

    # Sidebar with model details
    st.sidebar.markdown(
        f"""
        <div>
            <h2>{MODEL_DETAILS['name']}</h2>
            <p>{MODEL_DETAILS['description']}</p>
            <h3>Supported PII Entities</h3>
            <ul>
                {" ".join([f'<li><span style="background-color: {ENTITY_COLORS.get(entity, "#CCCCCC")}; color: black; padding: 3px 5px; border-radius: 3px; margin-right: 5px;">{entity}</span></li>' for entity in MODEL_DETAILS['detected_pii_entities']])}
            </ul>
        </div>
    """,
        unsafe_allow_html=True,
    )

    text_input = st.text_area(
        "Enter text to mask (separate multiple texts with commas):",
        value="\n".join(EXAMPLE_SENTENCES),
        height=200,
    )

    show_json = st.checkbox("Show JSON Output", value=True)

    if st.button("Process Text"):
        texts = [text.strip() for text in text_input.split(",") if text.strip()]
        model = PIIMaskingModel()

        for text in texts:
            st.markdown(
                '<h3 style="text-align: center;">Original Text</h3>',
                unsafe_allow_html=True,
            )
            st.markdown(f'<div class="rtl">{text}</div>', unsafe_allow_html=True)

            (
                masked_text,
                processing_time,
                colored_text,
                tokens,
                predicted_labels,
                privacy_masks,
            ) = model.process_text(text)

            st.markdown(
                '<h3 style="text-align: center;">Masked Text</h3>',
                unsafe_allow_html=True,
            )
            st.markdown(
                f'<div class="masked-text">{colored_text}</div>', unsafe_allow_html=True
            )

            st.markdown(f"Processing Time: {processing_time:.3f} seconds")

            if show_json:
                st.json(
                    {
                        "original": text,
                        "masked": masked_text,
                        "processing_time": processing_time,
                        "tokens": tokens,
                        "token_classes": predicted_labels,
                        "privacy_mask": privacy_masks,
                        "span_labels": [
                            [m["start"], m["end"], m["label"]] for m in privacy_masks
                        ],
                    }
                )

            st.markdown("---")


if __name__ == "__main__":
    main()