import gradio as gr import numpy as np from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing.sequence import pad_sequences from sklearn.preprocessing import StandardScaler import json import re from konlpy.tag import Okt from tensorflow.keras.preprocessing.text import tokenizer_from_json import pickle import os import logging # 로그 설정 logging.basicConfig(filename='app.log', level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(message)s') # 환경 변수 설정 os.environ['JAVA_HOME'] = '/usr/lib/jvm/java-11-openjdk-amd64' os.environ['PATH'] = os.environ['JAVA_HOME'] + '/bin:' + os.environ['PATH'] # 모델 및 토크나이저 파일 로드 try: model = load_model('deep_learning_model(okt_drop).h5', compile=False) logging.info("Model loaded successfully.") with open('tokenizer(okt_drop).json', 'r', encoding='utf-8') as f: tokenizer_data = f.read() tokenizer = tokenizer_from_json(tokenizer_data) logging.info("Tokenizer loaded successfully.") with open('scaler.pkl', 'rb') as f: scaler = pickle.load(f) logging.info("Scaler loaded successfully.") except Exception as e: logging.error("Error loading model, tokenizer, or scaler: %s", str(e)) raise e def calculate_sentence_stats(paragraph): paragraph = re.sub(r'\.{2,}', '.', paragraph) sentences = re.split(r'[.!?]', paragraph) sentence_lengths = [len(s.strip()) for s in sentences if s.strip()] sentence_count = len(sentence_lengths) average_length = sum(sentence_lengths) / len(sentence_lengths) if sentence_lengths else 0 return sentence_count, average_length def process_text(text): try: okt = Okt() texts = ' '.join(okt.nouns(text)) sequences = tokenizer.texts_to_sequences([texts]) max_len = 301 X = pad_sequences(sequences, maxlen=max_len) return X except Exception as e: logging.error("Error processing text: %s", str(e)) raise e def predict_text(text, grade): try: X = process_text(text) sentence_count, sentence_average = calculate_sentence_stats(text) length = len(text) emoticon = 0 numeric_features = np.array([[int(grade), length, emoticon, sentence_count, sentence_average]]) numeric_features = scaler.transform(numeric_features) prediction = model.predict([X, numeric_features]) predicted_label = '인공지능이 생성한 독서감상문입니다.' if prediction[0][0] > 0.5 else '사람이 작성한 독서감상문입니다.' return predicted_label except Exception as e: logging.error("Error predicting text: %s", str(e)) raise e iface = gr.Interface( fn=predict_text, inputs=[gr.Textbox(lines=10, placeholder="Enter Text Here..."), gr.Textbox(label="Grade")], outputs="text", title="독서감상문 분석기", description="이 독서감상문이 학생에 의해 작성되었는지, 인공지능에 의해 생성되었는지 분석합니다." ) iface.launch(debug=True)