OneZero-Y commited on
Commit
49020ea
·
verified ·
1 Parent(s): 61f41b0

Upload lora_pii_detector_bert-base-uncased_model LoRA model

Browse files
README.md ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: bert-base-uncased
4
+ tags:
5
+ - lora
6
+ - semantic-router
7
+ - pii-classification
8
+ - text-classification
9
+ - candle
10
+ - rust
11
+ language:
12
+ - en
13
+ pipeline_tag: text-classification
14
+ library_name: candle
15
+ ---
16
+
17
+ # lora_pii_detector_bert-base-uncased_model
18
+
19
+ ## Model Description
20
+
21
+ This is a LoRA (Low-Rank Adaptation) fine-tuned model based on **bert-base-uncased** for PII Detection - Detects personally identifiable information in text using token classification.
22
+
23
+ This model is part of the [semantic-router](https://github.com/vllm-project/semantic-router) project and is optimized for use with the Candle framework in Rust.
24
+
25
+ ## Model Details
26
+
27
+ - **Base Model**: bert-base-uncased
28
+ - **Task**: Pii Classification
29
+ - **Framework**: Candle (Rust)
30
+ - **Model Size**: ~416MB
31
+ - **LoRA Rank**: 16
32
+ - **LoRA Alpha**: 32
33
+ - **Target Modules**: attention.self.query, attention.self.value, attention.output.dense, intermediate.dense, output.dense
34
+
35
+ ## Usage
36
+
37
+ ### With semantic-router (Recommended)
38
+
39
+ ```python
40
+ from semantic_router import SemanticRouter
41
+
42
+ # The model will be automatically downloaded and used
43
+ router = SemanticRouter()
44
+ results = router.classify_batch(["Your text here"])
45
+ ```
46
+
47
+ ### With Candle (Rust)
48
+
49
+ ```rust
50
+ use candle_core::{Device, Tensor};
51
+ use candle_transformers::models::bert::BertModel;
52
+
53
+ // Load the model using Candle
54
+ let device = Device::Cpu;
55
+ let model = BertModel::load(&device, &config, &weights)?;
56
+ ```
57
+
58
+ ## Training Details
59
+
60
+ This model was fine-tuned using LoRA (Low-Rank Adaptation) technique:
61
+
62
+ - **Rank**: 16
63
+ - **Alpha**: 32
64
+ - **Dropout**: 0.1
65
+ - **Target Modules**: attention.self.query, attention.self.value, attention.output.dense, intermediate.dense, output.dense
66
+
67
+ ## Performance
68
+
69
+ PII Detection - Detects personally identifiable information in text using token classification
70
+
71
+ For detailed performance metrics, see the [training results](https://github.com/vllm-project/semantic-router/blob/main/training-result.md).
72
+
73
+ ## Files
74
+
75
+ - `model.safetensors`: LoRA adapter weights
76
+ - `config.json`: Model configuration
77
+ - `lora_config.json`: LoRA-specific configuration
78
+ - `tokenizer.json`: Tokenizer configuration
79
+ - `label_mapping.json`: Label mappings for classification
80
+
81
+ ## Citation
82
+
83
+ If you use this model, please cite:
84
+
85
+ ```bibtex
86
+ @misc{semantic-router-lora,
87
+ title={LoRA Fine-tuned Models for Semantic Router},
88
+ author={Semantic Router Team},
89
+ year={2025},
90
+ url={https://github.com/vllm-project/semantic-router}
91
+ }
92
+ ```
93
+
94
+ ## License
95
+
96
+ Apache 2.0
config.json ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForTokenClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "dtype": "float32",
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "O",
14
+ "1": "B-AGE",
15
+ "2": "I-AGE",
16
+ "3": "B-CREDIT_CARD",
17
+ "4": "I-CREDIT_CARD",
18
+ "5": "B-DATE_TIME",
19
+ "6": "I-DATE_TIME",
20
+ "7": "B-DOMAIN_NAME",
21
+ "8": "I-DOMAIN_NAME",
22
+ "9": "B-EMAIL_ADDRESS",
23
+ "10": "I-EMAIL_ADDRESS",
24
+ "11": "B-GPE",
25
+ "12": "I-GPE",
26
+ "13": "B-IBAN_CODE",
27
+ "14": "I-IBAN_CODE",
28
+ "15": "B-IP_ADDRESS",
29
+ "16": "I-IP_ADDRESS",
30
+ "17": "B-NRP",
31
+ "18": "I-NRP",
32
+ "19": "B-ORGANIZATION",
33
+ "20": "I-ORGANIZATION",
34
+ "21": "B-PERSON",
35
+ "22": "I-PERSON",
36
+ "23": "B-PHONE_NUMBER",
37
+ "24": "I-PHONE_NUMBER",
38
+ "25": "B-STREET_ADDRESS",
39
+ "26": "I-STREET_ADDRESS",
40
+ "27": "B-TITLE",
41
+ "28": "I-TITLE",
42
+ "29": "B-US_DRIVER_LICENSE",
43
+ "30": "I-US_DRIVER_LICENSE",
44
+ "31": "B-US_SSN",
45
+ "32": "I-US_SSN",
46
+ "33": "B-ZIP_CODE",
47
+ "34": "I-ZIP_CODE"
48
+ },
49
+ "initializer_range": 0.02,
50
+ "intermediate_size": 3072,
51
+ "label2id": {
52
+ "O": 0,
53
+ "B-AGE": 1,
54
+ "I-AGE": 2,
55
+ "B-CREDIT_CARD": 3,
56
+ "I-CREDIT_CARD": 4,
57
+ "B-DATE_TIME": 5,
58
+ "I-DATE_TIME": 6,
59
+ "B-DOMAIN_NAME": 7,
60
+ "I-DOMAIN_NAME": 8,
61
+ "B-EMAIL_ADDRESS": 9,
62
+ "I-EMAIL_ADDRESS": 10,
63
+ "B-GPE": 11,
64
+ "I-GPE": 12,
65
+ "B-IBAN_CODE": 13,
66
+ "I-IBAN_CODE": 14,
67
+ "B-IP_ADDRESS": 15,
68
+ "I-IP_ADDRESS": 16,
69
+ "B-NRP": 17,
70
+ "I-NRP": 18,
71
+ "B-ORGANIZATION": 19,
72
+ "I-ORGANIZATION": 20,
73
+ "B-PERSON": 21,
74
+ "I-PERSON": 22,
75
+ "B-PHONE_NUMBER": 23,
76
+ "I-PHONE_NUMBER": 24,
77
+ "B-STREET_ADDRESS": 25,
78
+ "I-STREET_ADDRESS": 26,
79
+ "B-TITLE": 27,
80
+ "I-TITLE": 28,
81
+ "B-US_DRIVER_LICENSE": 29,
82
+ "I-US_DRIVER_LICENSE": 30,
83
+ "B-US_SSN": 31,
84
+ "I-US_SSN": 32,
85
+ "B-ZIP_CODE": 33,
86
+ "I-ZIP_CODE": 34
87
+ },
88
+ "layer_norm_eps": 1e-12,
89
+ "max_position_embeddings": 512,
90
+ "model_type": "bert",
91
+ "num_attention_heads": 12,
92
+ "num_hidden_layers": 12,
93
+ "pad_token_id": 0,
94
+ "position_embedding_type": "absolute",
95
+ "transformers_version": "4.56.1",
96
+ "type_vocab_size": 2,
97
+ "use_cache": true,
98
+ "vocab_size": 30522
99
+ }
label_mapping.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"label_to_id": {"O": 0, "B-AGE": 1, "I-AGE": 2, "B-CREDIT_CARD": 3, "I-CREDIT_CARD": 4, "B-DATE_TIME": 5, "I-DATE_TIME": 6, "B-DOMAIN_NAME": 7, "I-DOMAIN_NAME": 8, "B-EMAIL_ADDRESS": 9, "I-EMAIL_ADDRESS": 10, "B-GPE": 11, "I-GPE": 12, "B-IBAN_CODE": 13, "I-IBAN_CODE": 14, "B-IP_ADDRESS": 15, "I-IP_ADDRESS": 16, "B-NRP": 17, "I-NRP": 18, "B-ORGANIZATION": 19, "I-ORGANIZATION": 20, "B-PERSON": 21, "I-PERSON": 22, "B-PHONE_NUMBER": 23, "I-PHONE_NUMBER": 24, "B-STREET_ADDRESS": 25, "I-STREET_ADDRESS": 26, "B-TITLE": 27, "I-TITLE": 28, "B-US_DRIVER_LICENSE": 29, "I-US_DRIVER_LICENSE": 30, "B-US_SSN": 31, "I-US_SSN": 32, "B-ZIP_CODE": 33, "I-ZIP_CODE": 34}, "id_to_label": {"0": "O", "1": "B-AGE", "2": "I-AGE", "3": "B-CREDIT_CARD", "4": "I-CREDIT_CARD", "5": "B-DATE_TIME", "6": "I-DATE_TIME", "7": "B-DOMAIN_NAME", "8": "I-DOMAIN_NAME", "9": "B-EMAIL_ADDRESS", "10": "I-EMAIL_ADDRESS", "11": "B-GPE", "12": "I-GPE", "13": "B-IBAN_CODE", "14": "I-IBAN_CODE", "15": "B-IP_ADDRESS", "16": "I-IP_ADDRESS", "17": "B-NRP", "18": "I-NRP", "19": "B-ORGANIZATION", "20": "I-ORGANIZATION", "21": "B-PERSON", "22": "I-PERSON", "23": "B-PHONE_NUMBER", "24": "I-PHONE_NUMBER", "25": "B-STREET_ADDRESS", "26": "I-STREET_ADDRESS", "27": "B-TITLE", "28": "I-TITLE", "29": "B-US_DRIVER_LICENSE", "30": "I-US_DRIVER_LICENSE", "31": "B-US_SSN", "32": "I-US_SSN", "33": "B-ZIP_CODE", "34": "I-ZIP_CODE"}}
lora_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"rank": 16, "alpha": 32, "dropout": 0.1, "target_modules": ["attention.self.query", "attention.self.value", "attention.output.dense", "intermediate.dense", "output.dense"]}
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68536cf27db976e89cdf1dbb5b36f409932be494dd180020580b9d008d907605
3
+ size 435697596
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "extra_special_tokens": {},
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
+ "tokenize_chinese_chars": true,
54
+ "tokenizer_class": "BertTokenizer",
55
+ "unk_token": "[UNK]"
56
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff