import display_gloss as dg import synonyms_preprocess as sp from NLP_Spacy_base_translator import NlpSpacyBaseTranslator from flask import Flask, render_template, Response, request, send_file import io import cv2 import numpy as np import os import requests from urllib.parse import quote, unquote import tempfile import re app = Flask(__name__, static_folder='static') app.config['TITLE'] = 'Sign Language Translate' nlp, dict_docs_spacy = sp.load_spacy_values() dataset, list_2000_tokens = dg.load_data() def clean_quotes(text): """따옴표 정리 함수""" # 연속된 따옴표 제거 text = re.sub(r"'+", "'", text) # 불필요한 공백 제거 text = re.sub(r'\s+', ' ', text).strip() return text def is_korean(text): """한글이 포함되어 있는지 확인""" return bool(re.search('[가-힣]', text)) def normalize_quotes(text): """따옴표 형식을 정규화하는 함수""" # 먼저 모든 따옴표를 정리 text = clean_quotes(text) if is_korean(text): # 한글 문장의 경우, 첫 번째 명사구만 따옴표로 처리 words = text.split() first_word = words[0] if not (first_word.startswith("'") and first_word.endswith("'")): words[0] = f"'{first_word}'" return ' '.join(words) else: # 영어 문장의 경우, 첫 단어만 따옴표로 처리 words = text.split() if words: if not (words[0].startswith("'") and words[0].endswith("'")): words[0] = f"'{words[0]}'" return ' '.join(words) def find_quoted_words(text): """작은따옴표로 묶인 단어들을 찾는 함수""" return re.findall(r"'([^']*)'", text) def spell_out_word(word): """단어를 개별 알파벳으로 분리하는 함수""" return ' '.join(list(word.lower())) def is_english(text): """텍스트가 영어인지 확인하는 함수""" # 따옴표와 기본 문장부호를 제거하고 영어 알파벳과 공백만 남김 cleaned_text = re.sub(r'[^A-Za-z\s]', '', text) # 알파벳이 하나라도 있는지 확인 has_letters = bool(re.search('[A-Za-z]', cleaned_text)) # 알파벳과 공백 외의 문자가 없는지 확인 is_only_english = bool(re.match(r'^[A-Za-z\s]*$', cleaned_text)) return has_letters and is_only_english def translate_korean_to_english(text): """전체 텍스트 번역 함수""" try: # 입력 텍스트 정규화 text = normalize_quotes(text) # 영어 입력 확인 if is_english(text): # 영어 입력의 경우 첫 단어만 따옴표로 처리 words = text.split() if words: # 이미 따옴표가 있는 경우는 그대로, 없는 경우 추가 if not (words[0].startswith("'") and words[0].endswith("'")): words[0] = f"'{words[0]}'" return ' '.join(words) return text # 한글 입력 처리 # 따옴표로 묶인 단어 찾기 quoted_words = re.findall(r"'([^']*)'", text) if not quoted_words: # 따옴표로 묶인 단어가 없는 경우, 첫 단어를 따옴표로 묶기 words = text.split() if words: text = f"'{words[0]}'" + text[len(words[0]):] quoted_words = [words[0]] # 첫 번째 따옴표 단어 번역 url = "https://translate.googleapis.com/translate_a/single" params = { "client": "gtx", "sl": "ko", "tl": "en", "dt": "t", "q": quoted_words[0] } response = requests.get(url, params=params) if response.status_code == 200: translated_word = response.json()[0][0][0].upper() # 임시 마커로 대체 text = text.replace(f"'{quoted_words[0]}'", "QUOTED_WORD_MARKER") # 전체 문장 번역 params["q"] = text response = requests.get(url, params=params) if response.status_code == 200: translated_text = ' '.join(item[0] for item in response.json()[0] if item[0]) # 마커를 번역된 단어로 대체 translated_text = translated_text.replace("QUOTED_WORD_MARKER", f"'{translated_word}'") return translated_text return text except Exception as e: print(f"Translation error: {e}") return text @app.route('/') def index(): return render_template('index.html', title=app.config['TITLE']) @app.route('/translate/', methods=['POST']) def result(): if request.method == 'POST': input_text = request.form['inputSentence'].strip() if not input_text: return render_template('error.html', error="Please enter text to translate") try: # 입력 텍스트 정규화 input_text = normalize_quotes(input_text) # 번역 수행 english_text = translate_korean_to_english(input_text) if not english_text: raise Exception("Translation failed") # 따옴표로 묶인 단어 추출 (첫 번째 단어만) quoted_words = re.findall(r"'([^']*)'", english_text) first_quoted_word = quoted_words[0] if quoted_words else None # ASL 변환을 위해 따옴표 제거 clean_english = re.sub(r"'([^']*)'", r"\1", english_text) eng_to_asl_translator = NlpSpacyBaseTranslator(sentence=clean_english) generated_gloss = eng_to_asl_translator.translate_to_gloss() # 단어 처리 processed_gloss = [] words = generated_gloss.split() for word in words: word_upper = word.upper() if first_quoted_word and word_upper == first_quoted_word.upper(): # 고유명사인 경우 철자를 하나씩 분리 spelled_word = spell_out_word(word) processed_gloss.extend(['FINGERSPELL-START'] + spelled_word.split() + ['FINGERSPELL-END']) else: # 일반 단어는 기존 방식대로 처리 word_lower = word.lower() if word_lower.isalnum(): processed_gloss.append(word_lower) gloss_sentence_before_synonym = " ".join(processed_gloss) # 고유명사가 아닌 단어들만 동의어 처리 final_gloss = [] i = 0 while i < len(processed_gloss): if processed_gloss[i] == 'FINGERSPELL-START': final_gloss.extend(processed_gloss[i:i+2]) i += 2 while i < len(processed_gloss) and processed_gloss[i] != 'FINGERSPELL-END': final_gloss.append(processed_gloss[i]) i += 1 if i < len(processed_gloss): final_gloss.append(processed_gloss[i]) i += 1 else: word = processed_gloss[i] final_gloss.append(sp.find_synonyms(word, nlp, dict_docs_spacy, list_2000_tokens)) i += 1 gloss_sentence_after_synonym = " ".join(final_gloss) return render_template('result.html', title=app.config['TITLE'], original_sentence=input_text, english_translation=english_text, gloss_sentence_before_synonym=gloss_sentence_before_synonym, gloss_sentence_after_synonym=gloss_sentence_after_synonym) except Exception as e: return render_template('error.html', error=f"Translation error: {str(e)}") def generate_complete_video(gloss_list, dataset, list_2000_tokens): try: frames = [] is_spelling = False for gloss in gloss_list: if gloss == 'FINGERSPELL-START': is_spelling = True continue elif gloss == 'FINGERSPELL-END': is_spelling = False continue for frame in dg.generate_video([gloss], dataset, list_2000_tokens): frame_data = frame.split(b'\r\n\r\n')[1] nparr = np.frombuffer(frame_data, np.uint8) img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) frames.append(img) if not frames: raise Exception("No frames generated") height, width = frames[0].shape[:2] fourcc = cv2.VideoWriter_fourcc(*'mp4v') with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_file: temp_path = temp_file.name out = cv2.VideoWriter(temp_path, fourcc, 25, (width, height)) for frame in frames: out.write(frame) out.release() with open(temp_path, 'rb') as f: video_bytes = f.read() os.remove(temp_path) return video_bytes except Exception as e: print(f"Error generating video: {str(e)}") raise @app.route('/video_feed') def video_feed(): sentence = request.args.get('gloss_sentence_to_display', '') gloss_list = sentence.split() return Response(dg.generate_video(gloss_list, dataset, list_2000_tokens), mimetype='multipart/x-mixed-replace; boundary=frame') @app.route('/download_video/') def download_video(gloss_sentence): try: decoded_sentence = unquote(gloss_sentence) gloss_list = decoded_sentence.split() if not gloss_list: return "No gloss provided", 400 video_bytes = generate_complete_video(gloss_list, dataset, list_2000_tokens) if not video_bytes: return "Failed to generate video", 500 return send_file( io.BytesIO(video_bytes), mimetype='video/mp4', as_attachment=True, download_name='sign_language.mp4' ) except Exception as e: print(f"Download error: {str(e)}") return f"Error downloading video: {str(e)}", 500 if __name__ == "__main__": app.run(host="0.0.0.0", port=7860, debug=True)