jensjorisdecorte commited on
Commit
59f1449
·
verified ·
1 Parent(s): 0235ab0

TechWolf/JobBERT-v3

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
1_Pooling/config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": false,
9
+ "include_prompt": true
10
+ }
2_Asym/6235903824_Dense/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"in_features": 768, "out_features": 1024, "bias": true, "activation_function": "torch.nn.modules.activation.Tanh"}
2_Asym/6235903824_Dense/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8caaef08b8292a16478db1d41b423527ce8eec94d62a16fbfbb0eeb4a8ff868f
3
+ size 3149984
2_Asym/6235904160_Dense/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"in_features": 768, "out_features": 1024, "bias": true, "activation_function": "torch.nn.modules.activation.Tanh"}
2_Asym/6235904160_Dense/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b3fa8b97f9410fcd8cb59b8f3f438c4f9e691d87a1fe8c0a7b8fa4bb7fe6c8c
3
+ size 3149984
2_Asym/config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "types": {
3
+ "6235903824_Dense": "sentence_transformers.models.Dense",
4
+ "6235904160_Dense": "sentence_transformers.models.Dense"
5
+ },
6
+ "structure": {
7
+ "anchor": [
8
+ "6235903824_Dense"
9
+ ],
10
+ "positive": [
11
+ "6235904160_Dense"
12
+ ]
13
+ },
14
+ "parameters": {
15
+ "allow_empty_key": true
16
+ }
17
+ }
README.md ADDED
@@ -0,0 +1,499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - sentence-transformers
4
+ - sentence-similarity
5
+ - feature-extraction
6
+ - generated_from_trainer
7
+ - dataset_size:21123868
8
+ - loss:CachedMultipleNegativesRankingLoss
9
+ base_model: sentence-transformers/paraphrase-multilingual-mpnet-base-v2
10
+ widget:
11
+ - source_sentence: 系统管理员技术员——TS/SCI级别并拥有多项式验证
12
+ sentences:
13
+ - support development of annual budget, create a financial report, report analysis
14
+ results, Microsoft Access, accounting, use presentation software, interpret financial
15
+ statements, synthesise financial information, develop vaccines, handle financial
16
+ overviews of the store, produce statistical financial records, develop financial
17
+ statistics reports, explain accounting records, financial analysis, SAP R3, represent
18
+ the company, examine budgets, prepare presentation material, use spreadsheets
19
+ software, forecast account metrics, meet deadlines, prepare financial projections,
20
+ manage budgets, exercise self-control, financial statements
21
+ - ensure cross-department cooperation, establish customer rapport, improve business
22
+ processes, manage technical security systems, handle incidents, maintain ICT system,
23
+ explain characteristics of computer peripheral equipment, gather technical information,
24
+ collaborate in company's daily operations , apply change management, maintain
25
+ technical equipment, communicate with customers, solve technical problems, perform
26
+ ICT troubleshooting, use ICT equipment in maintenance activities, manage major
27
+ incidents, build business relationships, computer engineering, perform software
28
+ recovery testing, identify process improvements, maintain relationship with customers,
29
+ carry out project activities, collaborate in the development of marketing strategies,
30
+ computer technology, technical terminology
31
+ - utilise machine learning, cloud technologies, develop predictive models, assess
32
+ sportive performance, formulate findings , principles of artificial intelligence,
33
+ perform business research, communicate with stakeholders, computer engineering,
34
+ build predictive models, computer science, develop automated software tests, analyse
35
+ business objectives, Agile development, cloud monitoring and reporting, provide
36
+ written content, obtain relevant licenses, design prototypes, machine learning,
37
+ e-learning software infrastructure, analyse education system, disseminate results
38
+ to the scientific community, learning technologies, ML (computer programming),
39
+ task algorithmisation
40
+ - source_sentence: 安全运营官
41
+ sentences:
42
+ - deliver outstanding service, manage carriers, direct customers to merchandise,
43
+ improve customer interaction, manage time, support managers, assist customers,
44
+ process customer orders, manage customer service, satisfy customers, guarantee
45
+ customer satisfaction, respond to customers' inquiries
46
+ - manage several projects, implement operational business plans, identify improvement
47
+ actions, develop strategy to solve problems, manage website, carry out project
48
+ activities, follow reporting procedures, supervise site maintenance, adjust priorities,
49
+ schedule shifts, conduct public presentations, motivate others, manage operational
50
+ budgets, report to the team leader, encourage teams for continuous improvement,
51
+ lead the sustainability reporting process, implement sustainable procurement,
52
+ show an exemplary leading role in an organisation, manage manufacturing facilities,
53
+ develop training programmes, develop production line, supply chain management,
54
+ leadership principles, lead a team, coaching techniques
55
+ - provide emergency supplies, provide first aid, liaise with security authorities,
56
+ apply medical first aid in case of emergency, regulate traffic, train security
57
+ officers, maintain physical fitness, provide protective escort, ensure public
58
+ safety and security, ensure inspections of facilities, work in inclement conditions,
59
+ follow procedures in the event of an alarm, set safety and security standards,
60
+ comply with the principles of self-defence, present reports, maintain facility
61
+ security systems, conduct security screenings, types of evaluation , monitor security
62
+ measures, office equipment, escort pedestrians across streets, advise on security
63
+ staff selection, wear appropriate protective gear, work in outdoor conditions,
64
+ assist emergency services
65
+ - source_sentence: Empleado de control de COVID
66
+ sentences:
67
+ - maintain records of clients' prescriptions, assist people in contaminated areas,
68
+ label samples, maintain museum records, apply social distancing protocols, collect
69
+ biological samples from patients, infection control, label medical laboratory
70
+ samples, disinfect surfaces, maintain customer records, ensure health and safety
71
+ of staff, personal protective equipment, remove contaminated materials, store
72
+ contaminated materials, prepare prescription labels, use personal protection equipment
73
+ - promote organisational communication, provide legal advice, human resource management,
74
+ company policies, perform customer management, business processes, ensure compliance
75
+ with legal requirements, develop communications strategies, enforce company values,
76
+ develop outreach training plans, use consulting techniques, develop employment
77
+ policies, human resources department processes, personnel management, identify
78
+ training needs, participate in health personnel training, health and safety in
79
+ the workplace, lead police investigations, ensure compliance with policies, prepare
80
+ compliance documents, perform internal investigations, develop employee retention
81
+ programs, develop corporate training programmes, customer relationship management,
82
+ manage localisation
83
+ - perform escalation procedure, imprint visionary aspirations into the business
84
+ management, observe confidentiality, impart business plans to collaborators, lead
85
+ a team, human resources department processes, respect confidentiality obligations,
86
+ hire human resources, manage commercial risks, develop business plans, communicate
87
+ with stakeholders, maintain relationship with customers, manage several projects,
88
+ provide improvement strategies, manage technical security systems, knowledge management,
89
+ risk management, develop program ideas, perform project management, project management,
90
+ cope with uncertainty, address identified risks, provide performance feedback,
91
+ information confidentiality, track key performance indicators
92
+ - source_sentence: Aerie - Brand Ambassador (Sales Associate) - US
93
+ sentences:
94
+ - lay bricks, provide first aid, enforce park rules, conflict management, give swimming
95
+ lessons, assist in performing physical exercises, perform park safety inspections,
96
+ assist in the movement of heavy loads, lead a team, first aid, supervise pool
97
+ activities, swim, coach staff for running the performance, show an exemplary leading
98
+ role in an organisation, teach public speaking principles, collaborate with coaching
99
+ team, supervise work, calculate stairs rise and run, calculate compensation payments,
100
+ manage a team, information confidentiality
101
+ - react to events in time-critical environments, operate in a specific field of
102
+ nursing care, clinical science, promote healthy fitness environment, lead others,
103
+ comply with legislation related to health care, maintain a safe, hygienic and
104
+ secure working environment, provide healthcare services to patients in specialised
105
+ medicine, write English, conduct physical examinations, leadership principles,
106
+ use clinical assessment techniques, apply context specific clinical competences,
107
+ conduct health related research, conceptualise healthcare user’s needs, assessment
108
+ processes, communicate in healthcare, provide professional care in nursing, nursing
109
+ science, promote health and safety, implement policy in healthcare practices,
110
+ engage with stakeholders, identify problems, respond to changing situations in
111
+ health care, perform resource planning
112
+ - ensure the privacy of guests, provide customised products, company policies, exude
113
+ enthusiasm during the action sessions, provide customer guidance on product selection,
114
+ collect briefing regarding products, perform multiple tasks at the same time,
115
+ create solutions to problems, respond to visitor complaints
116
+ - source_sentence: 医师——危重症护理——重症监护专家——项目医务总监
117
+ sentences:
118
+ - handle incidents, provide technical documentation, coordinate operational activities,
119
+ ensure information security, work in teams, manage manufacturing documentation,
120
+ project configuration management, operate call distribution system, maintain computer
121
+ hardware, apply change management, manage aircraft support systems, perform escalation
122
+ procedure, manage production changeovers, maintenance operations, call-centre
123
+ technologies, manage service contracts in the drilling industry, encourage teambuilding,
124
+ manage major incidents, resolve equipment malfunctions, work independently, think
125
+ analytically, manage maintenance operations, maintain plan for continuity of operations
126
+ - develop recycling programs, receive actors' resumes, work in cold environments,
127
+ perform cleaning duties, operate floor cleaning equipment, operate forklift
128
+ - perform technical tasks with great care, supervise medical residents, manage a
129
+ multidisciplinary team involved in patient care, administrative tasks in a medical
130
+ environment, demonstrate technical skills during neurological surgery, apply problem
131
+ solving in social service, intensive care medicine, provide comprehensive care
132
+ for patients with surgical conditions, work in teams, solve problems
133
+ pipeline_tag: sentence-similarity
134
+ library_name: sentence-transformers
135
+ co2_eq_emissions:
136
+ emissions: 717.3535184611766
137
+ energy_consumed: 1.9440474755045436
138
+ source: codecarbon
139
+ training_type: fine-tuning
140
+ on_cloud: true
141
+ cpu_model: Intel(R) Xeon(R) CPU @ 2.20GHz
142
+ ram_total_size: 83.47684860229492
143
+ hours_used: 5.34
144
+ hardware_used: 1 x NVIDIA A100-SXM4-40GB
145
+ ---
146
+
147
+ # SentenceTransformer based on sentence-transformers/paraphrase-multilingual-mpnet-base-v2
148
+
149
+ This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/paraphrase-multilingual-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-mpnet-base-v2). It maps sentences & paragraphs to a 1024-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
150
+
151
+ ## Model Details
152
+
153
+ ### Model Description
154
+ - **Model Type:** Sentence Transformer
155
+ - **Base model:** [sentence-transformers/paraphrase-multilingual-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-mpnet-base-v2) <!-- at revision 84fccfe766bcfd679e39efefe4ebf45af190ad2d -->
156
+ - **Maximum Sequence Length:** 64 tokens
157
+ - **Output Dimensionality:** 1024 dimensions
158
+ - **Similarity Function:** Cosine Similarity
159
+ <!-- - **Training Dataset:** Unknown -->
160
+ <!-- - **Language:** Unknown -->
161
+ <!-- - **License:** Unknown -->
162
+
163
+ ### Model Sources
164
+
165
+ - **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
166
+ - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
167
+ - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
168
+
169
+ ### Full Model Architecture
170
+
171
+ ```
172
+ SentenceTransformer(
173
+ (0): Transformer({'max_seq_length': 64, 'do_lower_case': False}) with Transformer model: XLMRobertaModel
174
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
175
+ (2): Asym(
176
+ (anchor-0): Dense({'in_features': 768, 'out_features': 1024, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
177
+ (positive-0): Dense({'in_features': 768, 'out_features': 1024, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
178
+ )
179
+ )
180
+ ```
181
+
182
+ ## Usage
183
+
184
+ ### Direct Usage (Sentence Transformers)
185
+
186
+ First install the Sentence Transformers library:
187
+
188
+ ```bash
189
+ pip install -U sentence-transformers
190
+ ```
191
+
192
+ Then you can load this model and run inference.
193
+ ```python
194
+ from sentence_transformers import SentenceTransformer
195
+
196
+ # Download from the 🤗 Hub
197
+ model = SentenceTransformer("TechWolf/JobBERT-v3")
198
+ # Run inference
199
+ sentences = [
200
+ '医师——危重症护理——重症监护专家——项目医务总监',
201
+ 'perform technical tasks with great care, supervise medical residents, manage a multidisciplinary team involved in patient care, administrative tasks in a medical environment, demonstrate technical skills during neurological surgery, apply problem solving in social service, intensive care medicine, provide comprehensive care for patients with surgical conditions, work in teams, solve problems',
202
+ 'handle incidents, provide technical documentation, coordinate operational activities, ensure information security, work in teams, manage manufacturing documentation, project configuration management, operate call distribution system, maintain computer hardware, apply change management, manage aircraft support systems, perform escalation procedure, manage production changeovers, maintenance operations, call-centre technologies, manage service contracts in the drilling industry, encourage teambuilding, manage major incidents, resolve equipment malfunctions, work independently, think analytically, manage maintenance operations, maintain plan for continuity of operations',
203
+ ]
204
+ embeddings = model.encode(sentences)
205
+ print(embeddings.shape)
206
+ # [3, 1024]
207
+
208
+ # Get the similarity scores for the embeddings
209
+ similarities = model.similarity(embeddings, embeddings)
210
+ print(similarities.shape)
211
+ # [3, 3]
212
+ ```
213
+
214
+ <!--
215
+ ### Direct Usage (Transformers)
216
+
217
+ <details><summary>Click to see the direct usage in Transformers</summary>
218
+
219
+ </details>
220
+ -->
221
+
222
+ <!--
223
+ ### Downstream Usage (Sentence Transformers)
224
+
225
+ You can finetune this model on your own dataset.
226
+
227
+ <details><summary>Click to expand</summary>
228
+
229
+ </details>
230
+ -->
231
+
232
+ <!--
233
+ ### Out-of-Scope Use
234
+
235
+ *List how the model may foreseeably be misused and address what users ought not to do with the model.*
236
+ -->
237
+
238
+ <!--
239
+ ## Bias, Risks and Limitations
240
+
241
+ *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
242
+ -->
243
+
244
+ <!--
245
+ ### Recommendations
246
+
247
+ *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
248
+ -->
249
+
250
+ ## Training Details
251
+
252
+ ### Training Dataset
253
+
254
+ #### Unnamed Dataset
255
+
256
+ * Size: 21,123,868 training samples
257
+ * Columns: <code>anchor</code> and <code>positive</code>
258
+ * Approximate statistics based on the first 1000 samples:
259
+ | | anchor | positive |
260
+ |:--------|:----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|
261
+ | type | string | string |
262
+ | details | <ul><li>min: 4 tokens</li><li>mean: 10.56 tokens</li><li>max: 38 tokens</li></ul> | <ul><li>min: 19 tokens</li><li>mean: 61.08 tokens</li><li>max: 64 tokens</li></ul> |
263
+ * Samples:
264
+ | anchor | positive |
265
+ |:-----------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
266
+ | <code>通信与培训专员</code> | <code>deliver online training, liaise with educational support staff, interact with an audience, construct individual learning plans, lead a team, develop corporate training programmes, learning technologies, communication, identify with the company's goals, address an audience, learning management systems, use presentation software, motivate others, provide learning support, engage with stakeholders, identify skills gaps, meet expectations of target audience, develop training programmes</code> |
267
+ | <code>Associate Infrastructure Engineer</code> | <code>create solutions to problems, design user interface, cloud technologies, use databases, automate cloud tasks, keep up-to-date to computer trends, work in teams, use object-oriented programming, keep updated on innovations in various business fields, design principles, Angular, adapt to changing situations, JavaScript, Agile development, manage stable, Swift (computer programming), keep up-to-date to design industry trends, monitor technology trends, web programming, provide mentorship, advise on efficiency improvements, adapt to change, JavaScript Framework, database management systems, stimulate creative processes</code> |
268
+ | <code>客户顾问/出纳</code> | <code>customer service, handle financial transactions, adapt to changing situations, have computer literacy, manage cash desk, attend to detail, provide customer guidance on product selection, perform multiple tasks at the same time, carry out financial transactions, provide membership service, manage accounts, adapt to change, identify customer's needs, solve problems</code> |
269
+ * Loss: [<code>CachedMultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cachedmultiplenegativesrankingloss) with these parameters:
270
+ ```json
271
+ {
272
+ "scale": 20.0,
273
+ "similarity_fct": "cos_sim",
274
+ "mini_batch_size": 512
275
+ }
276
+ ```
277
+
278
+ ### Training Hyperparameters
279
+ #### Non-Default Hyperparameters
280
+
281
+ - `overwrite_output_dir`: True
282
+ - `per_device_train_batch_size`: 2048
283
+ - `per_device_eval_batch_size`: 2048
284
+ - `num_train_epochs`: 1
285
+ - `fp16`: True
286
+
287
+ #### All Hyperparameters
288
+ <details><summary>Click to expand</summary>
289
+
290
+ - `overwrite_output_dir`: True
291
+ - `do_predict`: False
292
+ - `eval_strategy`: no
293
+ - `prediction_loss_only`: True
294
+ - `per_device_train_batch_size`: 2048
295
+ - `per_device_eval_batch_size`: 2048
296
+ - `per_gpu_train_batch_size`: None
297
+ - `per_gpu_eval_batch_size`: None
298
+ - `gradient_accumulation_steps`: 1
299
+ - `eval_accumulation_steps`: None
300
+ - `torch_empty_cache_steps`: None
301
+ - `learning_rate`: 5e-05
302
+ - `weight_decay`: 0.0
303
+ - `adam_beta1`: 0.9
304
+ - `adam_beta2`: 0.999
305
+ - `adam_epsilon`: 1e-08
306
+ - `max_grad_norm`: 1.0
307
+ - `num_train_epochs`: 1
308
+ - `max_steps`: -1
309
+ - `lr_scheduler_type`: linear
310
+ - `lr_scheduler_kwargs`: {}
311
+ - `warmup_ratio`: 0.0
312
+ - `warmup_steps`: 0
313
+ - `log_level`: passive
314
+ - `log_level_replica`: warning
315
+ - `log_on_each_node`: True
316
+ - `logging_nan_inf_filter`: True
317
+ - `save_safetensors`: True
318
+ - `save_on_each_node`: False
319
+ - `save_only_model`: False
320
+ - `restore_callback_states_from_checkpoint`: False
321
+ - `no_cuda`: False
322
+ - `use_cpu`: False
323
+ - `use_mps_device`: False
324
+ - `seed`: 42
325
+ - `data_seed`: None
326
+ - `jit_mode_eval`: False
327
+ - `use_ipex`: False
328
+ - `bf16`: False
329
+ - `fp16`: True
330
+ - `fp16_opt_level`: O1
331
+ - `half_precision_backend`: auto
332
+ - `bf16_full_eval`: False
333
+ - `fp16_full_eval`: False
334
+ - `tf32`: None
335
+ - `local_rank`: 0
336
+ - `ddp_backend`: None
337
+ - `tpu_num_cores`: None
338
+ - `tpu_metrics_debug`: False
339
+ - `debug`: []
340
+ - `dataloader_drop_last`: False
341
+ - `dataloader_num_workers`: 0
342
+ - `dataloader_prefetch_factor`: None
343
+ - `past_index`: -1
344
+ - `disable_tqdm`: False
345
+ - `remove_unused_columns`: True
346
+ - `label_names`: None
347
+ - `load_best_model_at_end`: False
348
+ - `ignore_data_skip`: False
349
+ - `fsdp`: []
350
+ - `fsdp_min_num_params`: 0
351
+ - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
352
+ - `fsdp_transformer_layer_cls_to_wrap`: None
353
+ - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
354
+ - `deepspeed`: None
355
+ - `label_smoothing_factor`: 0.0
356
+ - `optim`: adamw_torch
357
+ - `optim_args`: None
358
+ - `adafactor`: False
359
+ - `group_by_length`: False
360
+ - `length_column_name`: length
361
+ - `ddp_find_unused_parameters`: None
362
+ - `ddp_bucket_cap_mb`: None
363
+ - `ddp_broadcast_buffers`: False
364
+ - `dataloader_pin_memory`: True
365
+ - `dataloader_persistent_workers`: False
366
+ - `skip_memory_metrics`: True
367
+ - `use_legacy_prediction_loop`: False
368
+ - `push_to_hub`: False
369
+ - `resume_from_checkpoint`: None
370
+ - `hub_model_id`: None
371
+ - `hub_strategy`: every_save
372
+ - `hub_private_repo`: None
373
+ - `hub_always_push`: False
374
+ - `gradient_checkpointing`: False
375
+ - `gradient_checkpointing_kwargs`: None
376
+ - `include_inputs_for_metrics`: False
377
+ - `include_for_metrics`: []
378
+ - `eval_do_concat_batches`: True
379
+ - `fp16_backend`: auto
380
+ - `push_to_hub_model_id`: None
381
+ - `push_to_hub_organization`: None
382
+ - `mp_parameters`:
383
+ - `auto_find_batch_size`: False
384
+ - `full_determinism`: False
385
+ - `torchdynamo`: None
386
+ - `ray_scope`: last
387
+ - `ddp_timeout`: 1800
388
+ - `torch_compile`: False
389
+ - `torch_compile_backend`: None
390
+ - `torch_compile_mode`: None
391
+ - `dispatch_batches`: None
392
+ - `split_batches`: None
393
+ - `include_tokens_per_second`: False
394
+ - `include_num_input_tokens_seen`: False
395
+ - `neftune_noise_alpha`: None
396
+ - `optim_target_modules`: None
397
+ - `batch_eval_metrics`: False
398
+ - `eval_on_start`: False
399
+ - `use_liger_kernel`: False
400
+ - `eval_use_gather_object`: False
401
+ - `average_tokens_across_devices`: False
402
+ - `prompts`: None
403
+ - `batch_sampler`: batch_sampler
404
+ - `multi_dataset_batch_sampler`: proportional
405
+
406
+ </details>
407
+
408
+ ### Training Logs
409
+ | Epoch | Step | Training Loss |
410
+ |:------:|:-----:|:-------------:|
411
+ | 0.0485 | 500 | 3.89 |
412
+ | 0.0969 | 1000 | 3.373 |
413
+ | 0.1454 | 1500 | 3.1715 |
414
+ | 0.1939 | 2000 | 3.0414 |
415
+ | 0.2424 | 2500 | 2.9462 |
416
+ | 0.2908 | 3000 | 2.8691 |
417
+ | 0.3393 | 3500 | 2.8048 |
418
+ | 0.3878 | 4000 | 2.7501 |
419
+ | 0.4363 | 4500 | 2.7026 |
420
+ | 0.4847 | 5000 | 2.6601 |
421
+ | 0.5332 | 5500 | 2.6247 |
422
+ | 0.5817 | 6000 | 2.5951 |
423
+ | 0.6302 | 6500 | 2.5692 |
424
+ | 0.6786 | 7000 | 2.5447 |
425
+ | 0.7271 | 7500 | 2.5221 |
426
+ | 0.7756 | 8000 | 2.5026 |
427
+ | 0.8240 | 8500 | 2.4912 |
428
+ | 0.8725 | 9000 | 2.4732 |
429
+ | 0.9210 | 9500 | 2.4608 |
430
+ | 0.9695 | 10000 | 2.4548 |
431
+
432
+
433
+ ### Environmental Impact
434
+ Carbon emissions were measured using [CodeCarbon](https://github.com/mlco2/codecarbon).
435
+ - **Energy Consumed**: 1.944 kWh
436
+ - **Carbon Emitted**: 0.717 kg of CO2
437
+ - **Hours Used**: 5.34 hours
438
+
439
+ ### Training Hardware
440
+ - **On Cloud**: Yes
441
+ - **GPU Model**: 1 x NVIDIA A100-SXM4-40GB
442
+ - **CPU Model**: Intel(R) Xeon(R) CPU @ 2.20GHz
443
+ - **RAM Size**: 83.48 GB
444
+
445
+ ### Framework Versions
446
+ - Python: 3.10.16
447
+ - Sentence Transformers: 4.1.0
448
+ - Transformers: 4.48.3
449
+ - PyTorch: 2.6.0+cu126
450
+ - Accelerate: 1.3.0
451
+ - Datasets: 3.5.1
452
+ - Tokenizers: 0.21.0
453
+
454
+ ## Citation
455
+
456
+ ### BibTeX
457
+
458
+ #### Sentence Transformers
459
+ ```bibtex
460
+ @inproceedings{reimers-2019-sentence-bert,
461
+ title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
462
+ author = "Reimers, Nils and Gurevych, Iryna",
463
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
464
+ month = "11",
465
+ year = "2019",
466
+ publisher = "Association for Computational Linguistics",
467
+ url = "https://arxiv.org/abs/1908.10084",
468
+ }
469
+ ```
470
+
471
+ #### CachedMultipleNegativesRankingLoss
472
+ ```bibtex
473
+ @misc{gao2021scaling,
474
+ title={Scaling Deep Contrastive Learning Batch Size under Memory Limited Setup},
475
+ author={Luyu Gao and Yunyi Zhang and Jiawei Han and Jamie Callan},
476
+ year={2021},
477
+ eprint={2101.06983},
478
+ archivePrefix={arXiv},
479
+ primaryClass={cs.LG}
480
+ }
481
+ ```
482
+
483
+ <!--
484
+ ## Glossary
485
+
486
+ *Clearly define terms in order to be accessible across audiences.*
487
+ -->
488
+
489
+ <!--
490
+ ## Model Card Authors
491
+
492
+ *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
493
+ -->
494
+
495
+ <!--
496
+ ## Model Card Contact
497
+
498
+ *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
499
+ -->
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "XLMRobertaModel"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "xlm-roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.52.4",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 250002
28
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "4.1.0",
4
+ "transformers": "4.52.4",
5
+ "pytorch": "2.7.1"
6
+ },
7
+ "prompts": {},
8
+ "default_prompt_name": null,
9
+ "similarity_fn_name": "cosine"
10
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb8bd7f0d3560899953315fe8e4a05cf3d6729eb14b32fea760a5b5e090c5c7f
3
+ size 1112197096
modules.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Asym",
18
+ "type": "sentence_transformers.models.Asym"
19
+ }
20
+ ]
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 64,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc5c1151948923156f20bcafd54fd796705d693f8d7b56c83aec49d651f6d602
3
+ size 17082986
tokenizer_config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": false,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "extra_special_tokens": {},
49
+ "mask_token": "<mask>",
50
+ "max_length": 128,
51
+ "model_max_length": 64,
52
+ "pad_to_multiple_of": null,
53
+ "pad_token": "<pad>",
54
+ "pad_token_type_id": 0,
55
+ "padding_side": "right",
56
+ "sep_token": "</s>",
57
+ "stride": 0,
58
+ "tokenizer_class": "XLMRobertaTokenizerFast",
59
+ "truncation_side": "right",
60
+ "truncation_strategy": "longest_first",
61
+ "unk_token": "<unk>"
62
+ }