Spaces:
Build error
Build error
File size: 9,773 Bytes
152453b ed684ca 7c61b5f 62583c5 a759d12 62583c5 a759d12 62583c5 a759d12 62583c5 9510376 69bf182 9510376 ee2c558 69bf182 ee2c558 62583c5 96f9f46 b3fa88f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 |
import re
import gradio as gr
from transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline
school_name_candidates = []
def mask_school_names(text):
global school_name_candidates
school_name_candidates = []
def replacer(match):
name = match.group(1)
full = match.group(0)
if 2 <= len(name) <= 20:
school_name_candidates.append(name)
return to_chosung(name) + match.group(2)
else:
return full
text = re.sub(r"(\b[가-힣]{2,20})(초등학교|중학교|고등학교)", replacer, text)
for name in school_name_candidates:
pattern = rf"{re.escape(name)}\s?(초등학교|중학교|고등학교)"
text = re.sub(pattern, to_chosung(name) + " " + r"\1", text)
return text
model_name = "Leo97/KoELECTRA-small-v3-modu-ner"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForTokenClassification.from_pretrained(model_name)
ner_pipeline = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple")
def extract_names(text):
try:
results = ner_pipeline(text)
except Exception as e:
print("NER 오류 발생:", e)
return []
names = []
for entity in results:
if entity.get("entity_group") == "PS":
name = entity["word"].replace("##", "").strip()
if len(name) >= 2 and name not in names:
names.append(name)
title_suffixes = [
# 직함/직책
'대표', '이사', '전무', '상무', '부장', '차장', '과장', '대리', '사원',
'실장', '팀장', '소장', '국장', '본부장', '주임', '총무', '회장', '부회장', '사무장',
'직원', '매니저', '지점장',
# 교육 관련
'선생님', '선생', '교사', '교장', '교감', '부교장', '조교수', '교수', '연구원', '강사',
# 학위/전문가
'박사', '석사', '학사', '의사', '간호사', '간병인',
# 학생 관련
'학생', '수험생', '초등학생', '중학생', '고등학생', '학부모',
# 가족/친척
'어머니', '아버지', '엄마', '아빠', '형', '누나', '언니', '오빠', '동생',
'아들', '딸', '할머니', '할아버지', '외할머니', '외할아버지',
'이모', '고모', '삼촌', '숙모', '외삼촌', '고모부', '이모부', '조카', '사촌',
'남편', '아내', '부인', '와이프', '신랑', '장모', '장인', '사위', '며느리',
'올케', '형수', '제수씨', '매형', '처제', '시누이',
# 그 외 지칭
'보호자', '피해자', '당사자', '대상자', '주민', '어르신', '기사님'
]
pattern = r'\b([가-힣]{2,4})(' + '|'.join(title_suffixes) + r')\b'
matches = re.findall(pattern, text)
for match in matches:
name = match[0]
if name not in names:
names.append(name)
honorific_suffixes = [
# 직함/직책
'대표', '이사', '전무', '상무', '부장', '차장', '과장', '대리', '사원',
'실장', '팀장', '소장', '국장', '본부장', '주임', '총무', '회장', '부회장', '사무장',
'직원', '매니저', '지점장',
# 교육 관련
'선생님', '선생', '교사', '교장', '교감', '부교장', '조교수', '교수', '연구원', '강사',
# 학위/전문가
'박사', '석사', '학사', '의사', '간호사', '간병인',
# 학생 관련
'학생', '수험생', '초등학생', '중학생', '고등학생', '학부모',
# 가족/친척
'어머니', '아버지', '엄마', '아빠', '형', '누나', '언니', '오빠', '동생',
'아들', '딸', '할머니', '할아버지', '외할머니', '외할아버지',
'이모', '고모', '삼촌', '숙모', '외삼촌', '고모부', '이모부', '조카', '사촌',
'남편', '아내', '부인', '와이프', '신랑', '장모', '장인', '사위', '며느리',
'올케', '형수', '제수씨', '매형', '처제', '시누이',
# 그 외 지칭
'보호자', '피해자', '당사자', '대상자', '주민', '어르신', '기사님'
]
spaced_pattern = r'\b([가-힣]{2,4})\s+(' + '|'.join(honorific_suffixes) + r')\b'
spaced_matches = re.findall(spaced_pattern, text)
for match in spaced_matches:
name = match[0]
if name not in names:
names.append(name)
return names
def refactored_mask_names(original_text, names, start_counter=100):
korean_josa = ['이가','를','은','는','을','도','만','과','와','에게','에서','으로',
'까지','조차','마저','이며','이다','이나','이나마','밖에','이든','이라도',
'이','가','의']
masked = original_text
mapping = {}
counter = start_counter
used_names = set()
for name in names:
for josa in korean_josa:
full = name + josa
pattern = rf'(?<![\w가-힣]){re.escape(full)}(?![\w가-힣])'
if re.search(pattern, masked):
tag = f"N{counter:03d}"
mapping[tag] = name
masked = re.sub(pattern, tag + josa, masked)
counter += 1
used_names.add(name)
break
for name in names:
if name in used_names:
continue
pattern = rf'(?<![\w가-힣]){re.escape(name)}(?![\w가-힣])'
if re.search(pattern, masked):
tag = f"N{counter:03d}"
mapping[tag] = name
masked = re.sub(pattern, tag, masked)
counter += 1
return masked, mapping
def to_chosung(text):
CHOSUNG_LIST = [chr(i) for i in range(0x1100, 0x1113)]
result = ""
for ch in text:
if '가' <= ch <= '힣':
code = ord(ch) - ord('가')
cho = code // 588
result += CHOSUNG_LIST[cho]
else:
result += ch
return result
def mask_department(text):
text = re.sub(r"([가-힣]{2,20}학과)", lambda m: to_chosung(m.group(1)[:-2]) + "학과", text)
return text
def sanitize_sensitive_info(text, keyword_string, replace_word):
text = mask_school_names(text)
text = mask_department(text)
text = re.sub(r"(\d)학년(\s?(\d)반)?", lambda m: "*학년" + (" *반" if m.group(3) else ""), text)
text = re.sub(r"(\d)학년\s?(\d)반", r"*학년 *반", text)
keywords = [k.strip() for k in keyword_string.split(",") if k.strip()]
for kw in keywords:
pattern = rf"\b{re.escape(kw)}\b"
text = re.sub(pattern, replace_word, text, flags=re.IGNORECASE)
text = re.sub(r"(\d{3})-(\d{4})-(\d{4})", r"\1-****-\3", text)
text = re.sub(r"(\d{4})년 (\d{1,2})월 (\d{1,2})일", r"19**년 \2월 *일", text)
text = re.sub(r"(\d{1,3})번지", r"***번지", text)
text = re.sub(r"(\d{1,3})동", r"***동", text)
text = re.sub(r"(\d{1,4})호", r"****호", text)
text = re.sub(r"[\w\.-]+@[\w\.-]+", r"******@****", text)
text = re.sub(r"(\d{6})[-](\d)\d{6}", r"*******-\2*****", text)
text = re.sub(r"([가-힣]+(대로|로|길))\s?(\d+)(호|번길|가)?", r"\1 ***", text)
text = re.sub(r"(\d{2,6})[-]?(\d{2,6})[-]?(\d{2,6})",
lambda m: f"{m.group(1)[:2]}{'*'*(len(m.group(1))-2)}{'*'*len(m.group(2))}{m.group(3)[-4:]}", text)
text = re.sub(r"(\d{4})[- ]?(\d{4})[- ]?(\d{4})[- ]?(\d{4})",
lambda m: f"{m.group(1)}-****-****-{m.group(4)}", text)
text = re.sub(r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})",
lambda m: f"{m.group(1)}.{m.group(2)}.*.*", text)
text = re.sub(r"([가-힣]{1,10})(은행|동|로|길)\s?([\d\-]{4,})",
lambda m: m.group(1) + m.group(2) + " " + re.sub(r"\d", "*", m.group(3)), text)
return text
def final_name_remask_exact_only(text, mapping_dict):
for tag, name in mapping_dict.items():
pattern = rf'(?<![\w가-힣]){re.escape(name)}(?![\w가-힣])'
text = re.sub(pattern, tag, text)
return text
def apply_masking(text, keywords, replace_word):
names = extract_names(text)
masked, mapping = refactored_mask_names(text, names)
sanitized = sanitize_sensitive_info(masked, keywords, replace_word)
sanitized = final_name_remask_exact_only(sanitized, mapping)
mapping_table = "\n".join([f"{k} → {v}" for k, v in mapping.items()])
return sanitized, mapping_table
def remask_with_mapping(text, mapping_string):
mapping = {}
for line in mapping_string.strip().split("\n"):
if "→" in line:
tag, name = line.split("→")
mapping[tag.strip()] = name.strip()
for tag, name in mapping.items():
pattern = rf'(?<![\w가-힣]){re.escape(name)}(?![\w가-힣])'
text = re.sub(pattern, tag, text)
return text
with gr.Blocks() as demo:
gr.Markdown("""
🛡️ **민감정보 마스킹 [땡땡이 마스킹]**
이름 + 민감정보 + 초/중/고 마스킹기 (초성 기반)
⚠️ *완벽하지 않을 수 있습니다. 반드시 직접 최종 점검하세요.*
""")
input_text = gr.Textbox(lines=15, label="📥 원본 텍스트 입력")
keyword_input = gr.Textbox(lines=1, label="기관 키워드 (쉼표로 구분)", value="굿네이버스, good neighbors, gn, 사회복지법인 굿네이버스")
replace_input = gr.Textbox(lines=1, label="치환할 텍스트", value="우리기관")
run_button = gr.Button("🚀 마스킹 실행")
masked_output = gr.Textbox(lines=15, label="🔐 마스킹된 텍스트")
mapping_output = gr.Textbox(lines=10, label="🏷️ 이름 태그 매핑", interactive=False)
run_button.click(fn=apply_masking, inputs=[input_text, keyword_input, replace_input], outputs=[masked_output, mapping_output])
demo.launch() |