Spaces:
Runtime error
Runtime error
| # -*- coding: utf-8 -*- | |
| # Commented out IPython magic to ensure Python compatibility. | |
| # -*- coding: utf-8 -*- | |
| # Commented out IPython magic to ensure Python compatibility. | |
| import os | |
| import supervision as sv | |
| from PIL import Image, ImageFilter | |
| import numpy as np | |
| import cv2 | |
| import pycocotools.mask as mask_util | |
| import insightface | |
| from fastapi import FastAPI, File, UploadFile, Form | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from fastapi.responses import FileResponse, HTMLResponse | |
| import shutil | |
| import json | |
| from pathlib import Path | |
| import nest_asyncio | |
| import uvicorn | |
| from pyngrok import ngrok | |
| from diffusers import StableDiffusionInpaintPipeline | |
| import torch | |
| from simple_lama_inpainting import SimpleLama | |
| from sklearn.cluster import ( | |
| KMeans, AgglomerativeClustering, DBSCAN, MiniBatchKMeans, Birch, | |
| SpectralClustering, MeanShift, OPTICS | |
| ) | |
| from sklearn.decomposition import PCA | |
| from sklearn.metrics import silhouette_score | |
| from sklearn.neighbors import KNeighborsClassifier | |
| from torchvision import transforms | |
| import threading | |
| import concurrent.futures | |
| from typing import Tuple | |
| from types import SimpleNamespace | |
| import subprocess | |
| import uuid | |
| from datetime import datetime | |
| from ultralytics import YOLO | |
| import math | |
| import numpy as np | |
| import matplotlib.pyplot as plt | |
| #この下のコードは特定の領域をマスクしないタイプのコード | |
| import uuid | |
| from datetime import datetime | |
| import torch | |
| import cv2 | |
| import numpy as np | |
| from ultralytics import YOLO # YOLOv8ライブラリ | |
| import cv2 | |
| import numpy as np | |
| from datetime import datetime | |
| from ultralytics import YOLO | |
| from PIL import Image | |
| app = FastAPI() | |
| # CORSミドルウェアの追加 | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], # ここを適切なオリジンに設定することもできます | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| HOME = "./" | |
| dangerarray=[10,30,90,50,80,20,40,70,100,60]#ここに各クラスターの危険度を設定しておく | |
| #ここで認識する精度を上げたり下げたりできる | |
| thresholds = { | |
| 'text': 0.1, | |
| 'Name tag': 0.1, | |
| 'License plate': 0.1, | |
| 'Digital screens': 0.1, | |
| 'signboard': 0.1, | |
| 'documents': 0.1, | |
| 'information board': 0.1, | |
| 'poster': 0.1, | |
| 'sign': 0.1, | |
| 'Mail or envelope': 0.1, | |
| 'logo': 0.1, | |
| 'cardboard': 0.4, | |
| 'manhole': 0.6, | |
| 'electricity pole': 0.7 | |
| } | |
| ''' | |
| ''' | |
| # Define paths | |
| CONFIG_PATH = os.path.join(HOME, "GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py") | |
| WEIGHTS_NAME = "groundingdino_swint_ogc.pth" | |
| WEIGHTS_PATH = os.path.join(HOME, "weights", WEIGHTS_NAME) | |
| from PIL import Image | |
| def is_bright(pixel): | |
| # ピクセルの輝度を計算して明るさを判定する | |
| r, g, b = pixel | |
| brightness = (0.299 * r + 0.587 * g + 0.114 * b) # 輝度の計算 | |
| return brightness > 127 # 閾値を127に設定 | |
| def analyze_mask_brightness(original_image_path, mask_image_path): | |
| # 画像を開く | |
| original_img = Image.open(original_image_path).convert('RGB') | |
| mask_img = Image.open(mask_image_path).convert('L') # グレースケールに変換 | |
| width, height = original_img.size | |
| if mask_img.size != (width, height): | |
| print("エラー: マスク画像と元画像のサイズが一致していません。") | |
| return | |
| # 明るいピクセルと暗いピクセルのカウント | |
| bright_count = 0 | |
| dark_count = 0 | |
| for y in range(height): | |
| for x in range(width): | |
| mask_value = mask_img.getpixel((x, y)) | |
| if mask_value > 127: # マスクが白(対象領域)ならば | |
| pixel = original_img.getpixel((x, y)) | |
| if is_bright(pixel): | |
| bright_count += 1 | |
| else: | |
| dark_count += 1 | |
| # 明るさの結果を判定 | |
| brightness_result = 1 if bright_count > dark_count else 2 | |
| return brightness_result | |
| def classify_mask_size(mask_image_path, small_threshold, medium_threshold, large_threshold): | |
| # マスク画像を開く | |
| mask_img = Image.open(mask_image_path).convert('L') # グレースケールに変換 | |
| width, height = mask_img.size | |
| total_pixels = width * height | |
| white_pixel_count = 0 | |
| # マスク画像の白いピクセルをカウント | |
| for y in range(height): | |
| for x in range(width): | |
| mask_value = mask_img.getpixel((x, y)) | |
| if mask_value > 127: # 白いピクセルと判断 | |
| white_pixel_count += 1 | |
| # 白いピクセルの割合を計算 | |
| mask_area_ratio = (white_pixel_count / total_pixels) * 100 | |
| # マスクサイズを分類 | |
| if mask_area_ratio <= small_threshold: | |
| size_category = 1 # すごく小さい | |
| elif mask_area_ratio <= medium_threshold: | |
| size_category = 2 # 小さい | |
| elif mask_area_ratio <= large_threshold: | |
| size_category = 3 # 大きい | |
| else: | |
| size_category = 4 # すごく大きい | |
| return size_category | |
| def analyze_mask_combined(original_image_path, mask_image_path, small_threshold, medium_threshold, large_threshold): | |
| # マスクの大きさを判定 | |
| size_category = classify_mask_size(mask_image_path, small_threshold, medium_threshold, large_threshold) | |
| # マスク部分の明るさを判定 | |
| brightness_result = analyze_mask_brightness(original_image_path, mask_image_path) | |
| # 結果を出力 | |
| size_text = {1: "すごく小さい", 2: "小さい", 3: "大きい", 4: "すごく大きい"} | |
| print(f"マスクの大きさ: {size_text[size_category]} ({size_category})") | |
| print(f"マスクの明るさ: {brightness_result}") | |
| result={ | |
| 'size':size_category, | |
| 'brightness':brightness_result | |
| } | |
| return result | |
| import os | |
| import numpy as np | |
| import cv2 | |
| import torch | |
| from datetime import datetime | |
| from PIL import Image | |
| from ultralytics import YOLO | |
| def decide_to_object(risk_level): | |
| # `tex` の要素を `thresholds` のキーに合わせて書き換え | |
| tex = [ | |
| 'text', 'poster', 'Name tag', 'License plate', 'Digital screens', | |
| 'signboard', 'sign', 'logo', 'manhole', 'electricity pole', 'cardboard' | |
| ] | |
| # リスクレベルに応じたオブジェクト数の決定 | |
| num_objects = int(risk_level / 20) * (len(tex) // 5) # 個数決定(1/2) | |
| return tex[:int(num_objects) + 1] | |
| # マスクを生成する関数 | |
| def create_mask(image, x1, y1, x2, y2): | |
| mask = np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8) | |
| cv2.rectangle(mask, (int(x1), int(y1)), (int(x2), int(y2)), 255, -1) | |
| return mask | |
| # 特殊な処理を行う関数 | |
| def special_process_image_yolo(risk_level, image_path, point1, point2, thresholds=None): | |
| # デバイスの確認 | |
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
| print(f"Using device: {device}") | |
| # モデルファイルのパス | |
| model_path = './1113.pt' | |
| # モデルファイルの存在確認 | |
| if not os.path.isfile(model_path): | |
| raise FileNotFoundError(f"モデルファイル '{model_path}' が見つかりません。パスを確認してください。") | |
| # YOLOv8モデルをロードし、指定デバイスに移動 | |
| model = YOLO(model_path).to(device) | |
| print("モデルが正常にロードされ、デバイスに移動しました。") | |
| # タイムスタンプを作成 | |
| timestamp = datetime.now().strftime("%Y%m%d%H%M%S") | |
| # テキストラベルのリストとその優先順に基づいた閾値の減衰率の計算 | |
| tex = [ | |
| 'text', 'poster', 'Name tag', 'License plate', 'Digital screens', | |
| 'signboard', 'sign', 'logo', 'manhole', 'electricity pole', 'cardboard' | |
| ] | |
| def logistic_decay_for_label(risk_level, label_index, k=0.1, r0=50): | |
| base_decay = 1 / (1 + np.exp(-k * (risk_level - r0))) | |
| # ラベルの順序に応じた減衰の段階を追加 | |
| return max(base_decay + 0.05 * label_index, 0.01) | |
| adjusted_thresholds = {} | |
| for i, label in enumerate(tex): | |
| decay_factor = logistic_decay_for_label(risk_level, i) | |
| adjusted_thresholds[label] = max(0.01, decay_factor / 2) | |
| # 画像の読み込みとRGB変換 | |
| image = cv2.imread(image_path) | |
| image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
| # 推論実行 | |
| results = model(image_rgb) | |
| # 初期化したマスク画像 | |
| mask = np.zeros(image.shape[:2], dtype=np.uint8) | |
| # 全ての検出オブジェクトを対象としてマスク作成 | |
| for box in results[0].boxes: | |
| x1, y1, x2, y2 = map(int, box.xyxy[0]) | |
| confidence = box.conf[0] | |
| class_id = box.cls[0] | |
| object_type = model.names[int(class_id)] | |
| # オブジェクトの閾値を確認し、マスクを適用 | |
| threshold = adjusted_thresholds.get(object_type, 0.5) | |
| if confidence >= threshold: | |
| mask = create_mask(image, x1, y1, x2, y2) | |
| # 絶対座標に変換した点の範囲を黒に設定 | |
| p1_x, p1_y = int(point1[0] * image.shape[1]), int(point1[1] * image.shape[0]) | |
| p2_x, p2_y = int(point2[0] * image.shape[1]), int(point2[1] * image.shape[0]) | |
| x_min, y_min = max(0, min(p1_x, p2_x)), max(0, min(p1_y, p2_y)) | |
| x_max, y_max = min(image.shape[1], max(p1_x, p2_x)), min(image.shape[0], max(p1_y, p2_y)) | |
| mask[y_min:y_max, x_min:x_max] = 0 # 範囲を黒に設定 | |
| # デバッグ用に白い長方形を描画 | |
| debug_image = image_rgb.copy() | |
| cv2.rectangle(debug_image, (x_min, y_min), (x_max, y_max), (255, 255, 255), 2) | |
| # デバッグ画像とマスク画像を保存 | |
| save_dir = "./saved_images" | |
| os.makedirs(save_dir, exist_ok=True) | |
| debug_image_pil = Image.fromarray(debug_image) | |
| debug_image_path = os.path.join(save_dir, f"debug_image_with_rectangle_{timestamp}.jpg") | |
| debug_image_pil.save(debug_image_path) | |
| mask_image_pil = Image.fromarray(mask) | |
| mask_image_path = os.path.join(save_dir, f"final_mask_{timestamp}.jpg") | |
| mask_image_pil.save(mask_image_path) | |
| print(f"デバッグ画像が {debug_image_path} に保存されました。") | |
| print(f"マスク画像が {mask_image_path} に保存されました。") | |
| return mask_image_path | |
| def convert_image_format(input_path, output_format="png"): | |
| """ | |
| 画像をJPGからPNGまたはPNGからJPGに変換する関数。 | |
| Parameters: | |
| - input_path: 変換したい元画像のパス | |
| - output_format: 出力形式 ("png" または "jpg" を指定、デフォルトは "png") | |
| Returns: | |
| - output_path: 変換された画像の出力パス | |
| """ | |
| # サポートされているフォーマットかを確認 | |
| if output_format not in ["png", "jpg", "jpeg"]: | |
| raise ValueError("サポートされている出力形式は 'png' または 'jpg' です。") | |
| # 画像の読み込み | |
| image = cv2.imread(input_path) | |
| if image is None: | |
| raise ValueError(f"画像が見つかりません: {input_path}") | |
| # 出力パスの生成 | |
| base_name = os.path.splitext(os.path.basename(input_path))[0] | |
| output_path = f"{base_name}.{output_format}" | |
| # 画像の保存 | |
| if output_format == "png": | |
| cv2.imwrite(output_path, image, [cv2.IMWRITE_PNG_COMPRESSION, 9]) # PNG形式で最高圧縮率 | |
| else: | |
| cv2.imwrite(output_path, image, [cv2.IMWRITE_JPEG_QUALITY, 90]) # JPG形式で高画質 | |
| return output_path | |
| def mosaic_image_with_mask(image_path, mask_path, output_path, mosaic_level=15): | |
| """ | |
| マスク画像を使用して元画像の指定領域にモザイクをかける関数。 | |
| Parameters: | |
| - image_path: 元画像のパス | |
| - mask_path: モザイクをかけたい領域を白、その他を黒としたマスク画像のパス | |
| - output_path: モザイク処理結果の出力パス | |
| - mosaic_level: モザイクの強さ(値が大きいほど粗いモザイクになる) | |
| Returns: | |
| - output_path: モザイク処理された画像の出力パス | |
| """ | |
| # 画像とマスクを読み込み | |
| image = cv2.imread(image_path) | |
| mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) | |
| # 画像とマスクの読み込みチェック | |
| if image is None: | |
| raise ValueError(f"元画像が見つかりません: {image_path}") | |
| if mask is None: | |
| raise ValueError(f"マスク画像が見つかりません: {mask_path}") | |
| # マスク画像が元画像と同じサイズでない場合、リサイズ | |
| if image.shape[:2] != mask.shape[:2]: | |
| print(f"マスク画像のサイズを元画像に合わせてリサイズします: {mask.shape} -> {image.shape[:2]}") | |
| mask = cv2.resize(mask, (image.shape[1], image.shape[0])) | |
| # モザイクをかける領域を抽出 | |
| mosaic_area = cv2.bitwise_and(image, image, mask=mask) | |
| # モザイク処理 | |
| small = cv2.resize(mosaic_area, (image.shape[1] // mosaic_level, image.shape[0] // mosaic_level), interpolation=cv2.INTER_LINEAR) | |
| mosaic = cv2.resize(small, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_NEAREST) | |
| # マスクを使って元画像にモザイク部分を合成 | |
| mosaic_result = cv2.bitwise_and(mosaic, mosaic, mask=mask) | |
| image_no_mosaic = cv2.bitwise_and(image, image, mask=cv2.bitwise_not(mask)) | |
| result_image = cv2.add(image_no_mosaic, mosaic_result) | |
| # モザイク処理結果を保存 | |
| cv2.imwrite(output_path, result_image) | |
| return output_path | |
| #この下は、openCV | |
| def inpaint_image_with_mask(image_path, mask_path, output_path, inpaint_radius=5, inpaint_method=cv2.INPAINT_TELEA): | |
| """ | |
| マスク画像を使用して元画像のインペイントを行う関数。 | |
| Parameters: | |
| - image_path: 元画像のパス | |
| - mask_path: マスク画像のパス(修復したい領域が白、その他が黒) | |
| - output_path: インペイント結果の出力パス | |
| - inpaint_radius: インペイントの半径(デフォルトは5) | |
| - inpaint_method: インペイントのアルゴリズム(デフォルトはcv2.INPAINT_TELEA) | |
| Returns: | |
| - inpainted_image: インペイントされた画像 | |
| """ | |
| # 画像とマスクを読み込み | |
| image = cv2.imread(image_path) | |
| mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) # マスクはグレースケールで読み込み | |
| # マスク画像が正常に読み込めたかチェック | |
| if image is None: | |
| raise ValueError(f"元画像が見つかりません: {image_path}") | |
| if mask is None: | |
| raise ValueError(f"マスク画像が見つかりません: {mask_path}") | |
| # マスク画像が元画像と同じサイズでない場合、リサイズ | |
| if image.shape[:2] != mask.shape[:2]: | |
| print(f"マスク画像のサイズを元画像に合わせてリサイズします: {mask.shape} -> {image.shape[:2]}") | |
| mask = cv2.resize(mask, (image.shape[1], image.shape[0])) | |
| # インペイント処理 | |
| inpainted_image = cv2.inpaint(image, mask, inpaint_radius, inpaint_method) | |
| # インペイント結果を保存 | |
| cv2.imwrite(output_path, inpainted_image) | |
| return output_path | |
| def stamp_image_with_mask(base_image_path, mask_path,output_path,stamp_image_path='./main.png'): | |
| """ | |
| マスク画像を使用して元画像に別の画像を埋め込む関数。 | |
| Parameters: | |
| - base_image_path: 元画像のパス | |
| - mask_path: マスク画像のパス(埋め込みたい領域が白、その他が黒) | |
| - embed_image_path: 埋め込み用画像のパス | |
| - output_path: 結果の出力パス | |
| Returns: | |
| - output_path: 埋め込み処理された画像の出力パス | |
| """ | |
| # 画像とマスクを読み込み | |
| base_image = cv2.imread(base_image_path) | |
| mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) | |
| embed_image = cv2.imread(stamp_image_path) | |
| # 画像が正常に読み込めたかチェック | |
| if base_image is None: | |
| raise ValueError(f"元画像が見つかりません: {base_image_path}") | |
| if mask is None: | |
| raise ValueError(f"マスク画像が見つかりません: {mask_path}") | |
| if embed_image is None: | |
| raise ValueError(f"埋め込み用画像が見つかりません: {stamp_image_path}") | |
| # マスク画像と埋め込み画像を元画像と同じサイズにリサイズ | |
| if base_image.shape[:2] != mask.shape[:2]: | |
| print(f"マスク画像のサイズを元画像に合わせてリサイズします: {mask.shape} -> {base_image.shape[:2]}") | |
| mask = cv2.resize(mask, (base_image.shape[1], base_image.shape[0])) | |
| if base_image.shape[:2] != embed_image.shape[:2]: | |
| print(f"埋め込み画像のサイズを元画像に合わせてリサイズします: {embed_image.shape[:2]} -> {base_image.shape[:2]}") | |
| embed_image = cv2.resize(embed_image, (base_image.shape[1], base_image.shape[0])) | |
| # マスク領域に埋め込み画像を配置 | |
| embedded_image = base_image.copy() | |
| embedded_image[mask == 255] = embed_image[mask == 255] | |
| # 結果を保存 | |
| cv2.imwrite(output_path, embedded_image) | |
| return output_path | |
| import torch | |
| from PIL import Image, ImageFilter | |
| import numpy as np | |
| from simple_lama_inpainting import SimpleLama | |
| def inpaint_image_with_mask1(img_path, mask_path, output_path, resize_factor=0.5): | |
| print('lama') | |
| # GPUが利用可能か確認 | |
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
| # 画像とマスクを読み込み | |
| image = Image.open(img_path).convert("RGB") # 画像をRGBに変換 | |
| mask = Image.open(mask_path).convert('L') # マスクをグレースケールに変換 | |
| # 画像とマスクのサイズを合わせる | |
| mask = mask.resize(image.size, Image.NEAREST) | |
| # マスクのエッジをぼかす (Gaussian Blur) | |
| blurred_mask = mask.filter(ImageFilter.GaussianBlur(radius=3)) # 半径3ピクセルでぼかし | |
| # SimpleLama インスタンスを作成 | |
| simple_lama = SimpleLama() | |
| # 画像とマスクをNumPy配列に変換 | |
| image_np = np.array(image) | |
| mask_np = np.array(blurred_mask) / 255.0 # マスクを0-1範囲にスケーリング | |
| # 入力画像とマスクをSimpleLamaに渡してインペイント | |
| inpainted_np = simple_lama(image_np, mask_np) # NumPy配列を渡す | |
| # 結果を画像として保存 | |
| result_image = Image.fromarray(np.uint8(inpainted_np)) # NumPy array -> PIL Image | |
| # 出力画像をリサイズ | |
| new_size = (int(result_image.width * resize_factor), int(result_image.height * resize_factor)) | |
| result_image = result_image.resize(new_size, Image.ANTIALIAS) | |
| # 結果を保存 | |
| result_image.save(output_path) | |
| print(f"Inpainted image saved at {output_path}") | |
| return output_path | |
| # 保存先のディレクトリを指定 | |
| SAVE_DIR = Path("./saved_images") | |
| SAVE_DIR.mkdir(parents=True, exist_ok=True) | |
| def save_image(file, filename): | |
| """画像ファイルを指定ディレクトリに保存""" | |
| filepath = SAVE_DIR / filename | |
| with open(filepath, "wb") as buffer: | |
| shutil.copyfileobj(file, buffer) | |
| return filepath | |
| async def create_mask_and_inpaint_opencv(image: UploadFile = File(...), risk_level: int = Form(...)): | |
| point1 = (0.00000000000002, 0.00000000000002) | |
| point2 = (0.00000000000001, 0.00000000000001) | |
| input_path = save_image(image.file, "input.jpg") | |
| mask_path = special_process_image_yolo(risk_level, input_path, point1, point2, thresholds) | |
| output_path = SAVE_DIR / "output_opencv.jpg" | |
| # OpenCVでインペイント | |
| inpaint_image_with_mask(input_path, mask_path, output_path) | |
| return FileResponse(output_path) | |
| async def create_mask_and_inpaint_opencv(image: UploadFile = File(...), risk_level: int = Form(...)): | |
| point1 = (0.00000000000002, 0.00000000000002) | |
| point2 = (0.00000000000001, 0.00000000000001) | |
| input_path = save_image(image.file, "input.jpg") | |
| mask_path = special_process_image_yolo(risk_level, input_path, point1, point2, thresholds) | |
| output_path = SAVE_DIR / "output_opencv.jpg" | |
| # OpenCVでインペイント | |
| mosaic_image_with_mask(input_path, mask_path, output_path) | |
| return FileResponse(output_path) | |
| async def create_mask_and_inpaint_opencv(image: UploadFile = File(...), risk_level: int = Form(...)): | |
| point1 = (0.00000000000002, 0.00000000000002) | |
| point2 = (0.00000000000001, 0.00000000000001) | |
| input_path = save_image(image.file, "input.jpg") | |
| mask_path = special_process_image_yolo(risk_level, input_path, point1, point2, thresholds) | |
| output_path = SAVE_DIR / "output_opencv.jpg" | |
| # OpenCVでインペイント | |
| stamp_image_with_mask(input_path, mask_path, output_path) | |
| return FileResponse(output_path) | |
| async def create_mask_and_inpaint_simple_lama(image: UploadFile = File(...), risk_level: int = Form(...)): | |
| input_path = save_image(image.file, "input.jpg") | |
| point1 = (0.00000000000002, 0.00000000000002) | |
| point2 = (0.00000000000001, 0.00000000000001) | |
| mask_path = special_process_image_yolo(risk_level, input_path, point1, point2, thresholds) | |
| output_path = SAVE_DIR / "output_simple_lama.jpg" | |
| # SimpleLamaでインペイント | |
| inpaint_image_with_mask1(input_path, mask_path, output_path, resize_factor=1) | |
| return FileResponse(output_path) | |
| #下のendpointは特定領域をマスクしないタイプのもの | |
| #下記はDeepFillv2 | |
| # ベクトル化対象のオブジェクトリスト | |
| TEXT_PROMPTS = [ | |
| 'text','Name tag', 'License plate', 'Mail', 'Documents', 'QR codes', | |
| 'barcodes', 'Map', 'Digital screens', 'information board', | |
| 'signboard', 'poster', 'sign', 'logo', 'card', 'window', 'mirror', | |
| 'Famous landmark', 'cardboard', 'manhole', 'utility pole' | |
| ] | |
| BOX_THRESHOLD = 0.3 | |
| TEXT_THRESHOLD = 0.3 | |
| # クラスタリング結果をJSONファイルから読み込む関数 | |
| def load_sums_from_json(filepath): | |
| with open(filepath, 'r') as json_file: | |
| sums = json.load(json_file) | |
| return sums | |
| # ベクトルデータをJSONファイルから読み込む関数 | |
| def load_vectors_from_json(filepath): | |
| with open(filepath, 'r') as json_file: | |
| data = json.load(json_file) | |
| return data | |
| # 新しい画像を分類する関数 | |
| def classify_new_image(new_image_vector, sums_data, loaded_vectors, loaded_object_names, k=1): | |
| cluster_centers = [] | |
| for cluster in sums_data: | |
| indices = [loaded_object_names.index(obj_name) for obj_name in cluster] | |
| cluster_vectors = np.array([loaded_vectors[obj_name] for obj_name in cluster]) | |
| cluster_center = np.mean(cluster_vectors, axis=0) | |
| cluster_centers.append(cluster_center) | |
| knn = KNeighborsClassifier(n_neighbors=k) | |
| knn.fit(cluster_centers, range(len(cluster_centers))) | |
| new_image_label = knn.predict([new_image_vector]) | |
| return new_image_label[0] | |
| def process_image_vec(image_path): | |
| # GPUを使用できるか確認 | |
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
| print(device) | |
| # YOLOv8モデルをロードし、GPUに移動 | |
| model = YOLO('./1026.pt') # モデルのパスを指定 | |
| model.to(device) # モデルをGPUに移動 | |
| # 初期化 | |
| object_vector = np.zeros(len(TEXT_PROMPTS)) | |
| # 画像の読み込み | |
| image = cv2.imread(image_path) | |
| image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
| # YOLOで推論を実行 | |
| results = model(image_rgb) # 推論を実行 | |
| # 各プロンプトごとに確認 | |
| for i, text_prompt in enumerate(TEXT_PROMPTS): | |
| prompt_sum = 0 # 各プロンプトに対応するスコアの合計 | |
| for box in results[0].boxes: | |
| class_id = int(box.cls[0]) | |
| confidence = box.conf[0] | |
| detected_class = model.names[class_id] | |
| # 検出クラス名とテキストプロンプトの一致を確認 | |
| if text_prompt.lower() == detected_class.lower(): | |
| prompt_sum += confidence # クラスが一致した場合、信頼度を加算 | |
| # object_vectorにスコアを格納 | |
| object_vector[i] = prompt_sum | |
| print(object_vector) | |
| return object_vector.tolist() | |
| # APIのエンドポイント | |
| async def classify_image(file: UploadFile = File(...)): | |
| image_path = "./temp_image.jpg" | |
| # アップロードされた画像を保存 | |
| with open(image_path, "wb") as buffer: | |
| buffer.write(await file.read()) | |
| # 画像をベクトル化 | |
| new_image_vector = process_image_vec(image_path) | |
| # JSONファイルからデータを読み込む | |
| json_filepath = "./output_vectors.json" | |
| loaded_data = load_vectors_from_json(json_filepath) | |
| loaded_vectors = {obj_name: np.array(vector) for obj_name, vector in loaded_data.items()} | |
| loaded_object_names = list(loaded_vectors.keys()) | |
| # 既存のクラスタリング結果を読み込む | |
| sums_data = load_sums_from_json("./sums_data.json") | |
| # 新しい画像がどのクラスタに分類されるかを判定 | |
| new_image_cluster = classify_new_image(new_image_vector, sums_data, loaded_vectors, loaded_object_names) | |
| return {"danger":dangerarray[int(new_image_cluster + 1)]}#バグったらここを+にして | |
| async def create_mask_and_inpaint_simple_lama( | |
| image: UploadFile = File(...), | |
| risk_level: int = Form(...), | |
| x1: float = Form(...), | |
| y1: float = Form(...), | |
| x2: float = Form(...), | |
| y2: float = Form(...), | |
| ): | |
| # Extract points from the form data | |
| point1 = [x1, y1] | |
| point2 = [x2, y2] | |
| # Save the input image | |
| input_path = save_image(image.file, "input.jpg") | |
| print('1111',point1,point2) | |
| # Create a mask image (using the new process_image function) | |
| mask_path = special_process_image_yolo(risk_level, input_path, point1, point2,thresholds=thresholds) | |
| # Define the output path for the inpainted image | |
| output_path = "./output_simple_lama.jpg" | |
| # Perform inpainting with SimpleLama | |
| inpaint_image_with_mask1(input_path, mask_path, output_path, resize_factor=1) | |
| # Return the resulting image as a response | |
| return FileResponse(output_path, media_type="image/jpeg", filename="output_simple_lama.jpg") | |
| async def create_mask_sum(image: UploadFile = File(...), risk_level: int = Form(...), | |
| x1: float = Form(...), | |
| y1: float = Form(...), | |
| x2: float = Form(...), | |
| y2: float = Form(...),): | |
| default_x = 0.001 | |
| default_y = 0.001 | |
| point1 = [default_x if math.isnan(x1) else x1, default_y if math.isnan(y1) else y1] | |
| point2 = [default_x if math.isnan(x2) else x2, default_y if math.isnan(y2) else y2] | |
| timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
| # 一意な識別子を生成 | |
| unique_id = uuid.uuid4().hex | |
| input_path = save_image(image.file, f"./input_{timestamp}_{unique_id}.jpg") | |
| mask_path = special_process_image_yolo(risk_level, input_path, point1, point2,thresholds=thresholds) | |
| output_path = f"./output_simple_lama_{timestamp}_{unique_id}.jpg" | |
| # OpenCVでインペイント | |
| inpaint_image_with_mask1(input_path, mask_path, output_path) | |
| return FileResponse(output_path) | |
| # カスケードファイルの読み込み (顔検出) | |
| face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') | |
| def apply_mosaic(image, x, y, w, h, mosaic_level=15): | |
| """ 指定範囲にモザイク処理を適用 """ | |
| face = image[y:y+h, x:x+w] | |
| face = cv2.resize(face, (w // mosaic_level, h // mosaic_level)) | |
| face = cv2.resize(face, (w, h), interpolation=cv2.INTER_NEAREST) | |
| image[y:y+h, x:x+w] = face | |
| return image | |
| async def mosaic_face(file: UploadFile = File(...)): | |
| # 画像ファイルを読み込み | |
| image_data = await file.read() | |
| np_array = np.frombuffer(image_data, np.uint8) | |
| img = cv2.imdecode(np_array, cv2.IMREAD_COLOR) | |
| # グレースケール変換と顔検出 | |
| gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
| faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=4, minSize=(30, 30)) | |
| # 検出した顔にモザイクを適用 | |
| for (x, y, w, h) in faces: | |
| img = apply_mosaic(img, x, y, w, h) | |
| # 一時ファイルに保存 | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file: | |
| temp_file_path = Path(temp_file.name) | |
| cv2.imwrite(str(temp_file_path), img) | |
| # 一時ファイルをレスポンスとして返す | |
| return FileResponse(path=temp_file_path, media_type="image/jpeg", filename="mosaic_image.jpg") | |
| # Helper function to read image file | |
| def read_image(file: UploadFile): | |
| image = Image.open(BytesIO(file.file.read())) | |
| image = np.array(image) | |
| if image.shape[2] == 4: # Remove alpha channel if present | |
| image = image[:, :, :3] | |
| return cv2.cvtColor(image, cv2.COLOR_RGB2BGR) | |
| # Function to extract face embeddings and bounding boxes from an image | |
| def get_face_data(image): | |
| # Load InsightFace model | |
| model = insightface.app.FaceAnalysis() | |
| model.prepare(ctx_id=-1) # Use -1 for CPU, or 0 for GPU | |
| faces = model.get(image) | |
| if faces: | |
| embeddings = [face.embedding for face in faces] | |
| bboxes = [face.bbox for face in faces] | |
| return embeddings, bboxes, image | |
| else: | |
| return None, None, image | |
| # Function to apply mosaic to a specific region in an image | |
| def apply_mosaic(image, bbox, mosaic_size=10): | |
| x, y, w, h = int(bbox[0]), int(bbox[1]), int(bbox[2] - bbox[0]), int(bbox[3] - bbox[1]) | |
| face_region = image[y:y+h, x:x+w] | |
| if face_region.size == 0: | |
| return image | |
| face_region = cv2.resize(face_region, (mosaic_size, mosaic_size), interpolation=cv2.INTER_LINEAR) | |
| face_region = cv2.resize(face_region, (w, h), interpolation=cv2.INTER_NEAREST) | |
| image[y:y+h, x:x+w] = face_region | |
| return image | |
| from fastapi import FastAPI, File, UploadFile, HTTPException | |
| from io import BytesIO # | |
| async def mosaic_faces(reference_image: UploadFile = File(...), test_image: UploadFile = File(...)): | |
| try: | |
| # Load images | |
| ref_image = read_image(reference_image) | |
| test_image_data = read_image(test_image) | |
| # Extract face data from reference image | |
| reference_embeddings, _, _ = get_face_data(ref_image) | |
| if reference_embeddings is None: | |
| raise HTTPException(status_code=400, detail="No face detected in reference image.") | |
| # Extract face data from test image | |
| test_embeddings, test_bboxes, test_image_processed = get_face_data(test_image_data) | |
| if test_embeddings is None: | |
| raise HTTPException(status_code=400, detail="No face detected in test image.") | |
| # Process each detected face in the test image | |
| for test_embedding, bbox in zip(test_embeddings, test_bboxes): | |
| similarity = np.dot(reference_embeddings[0], test_embedding) / ( | |
| np.linalg.norm(reference_embeddings[0]) * np.linalg.norm(test_embedding)) | |
| # Apply mosaic to unrecognized faces only | |
| if similarity <= 0.4: | |
| test_image_processed = apply_mosaic(test_image_processed, bbox) | |
| # Save processed image to a temporary file | |
| temp_file = "/tmp/processed_image.jpg" | |
| cv2.imwrite(temp_file, test_image_processed) | |
| return FileResponse(temp_file, media_type="image/jpeg", filename="processed_image.jpg") | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=str(e)) | |
| async def read_root(): | |
| html_content = """ | |
| <!DOCTYPE html> | |
| <html lang="ja"> | |
| <head> | |
| <meta charset="UTF-8"> | |
| <meta name="viewport" content="width=device-width, initial-scale=1.0"> | |
| <title>画像処理アプリ</title> | |
| <link href="https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/css/bootstrap.min.css" rel="stylesheet"> | |
| <link rel="stylesheet" href="https://code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css"> | |
| <link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.4/css/all.min.css" rel="stylesheet"> | |
| <style> | |
| body { | |
| background-color: #f0f0f5; | |
| color: #333; | |
| padding: 40px 20px; | |
| } | |
| h1 { | |
| color: #555; | |
| margin-bottom: 30px; | |
| font-weight: bold; | |
| text-align: center; | |
| } | |
| .image-preview, .processed-preview { | |
| max-width: 100%; | |
| height: auto; | |
| border-radius: 10px; | |
| box-shadow: 0 4px 15px rgba(0, 0, 0, 0.2); | |
| margin-top: 20px; | |
| } | |
| #result { | |
| margin-top: 40px; | |
| display: none; | |
| } | |
| .slider-container { | |
| text-align: left; | |
| margin-top: 20px; | |
| } | |
| .slider-label { | |
| font-size: 1.2rem; | |
| color: #333; | |
| } | |
| .btn-primary { | |
| background-color: #007bff; | |
| border-color: #007bff; | |
| font-size: 1.2rem; | |
| padding: 10px 20px; | |
| border-radius: 50px; | |
| } | |
| .btn-primary:hover { | |
| background-color: #0056b3; | |
| border-color: #004085; | |
| } | |
| .form-control, .custom-select { | |
| border-radius: 20px; | |
| box-shadow: 0 2px 5px rgba(0, 0, 0, 0.1); | |
| } | |
| .nav-tabs { | |
| margin-bottom: 30px; | |
| border-bottom: 2px solid #007bff; | |
| } | |
| .nav-tabs .nav-link { | |
| border: none; | |
| color: #555; | |
| font-size: 1.1rem; | |
| padding: 12px 25px; | |
| border-radius: 10px 10px 0 0; | |
| } | |
| .nav-tabs .nav-link.active { | |
| background-color: #007bff; | |
| color: white; | |
| } | |
| .tab-content { | |
| padding: 20px; | |
| background: white; | |
| border-radius: 0 0 10px 10px; | |
| box-shadow: 0 2px 10px rgba(0,0,0,0.1); | |
| } | |
| #loadingSpinner { | |
| position: fixed; | |
| top: 50%; | |
| left: 50%; | |
| transform: translate(-50%, -50%); | |
| background: rgba(255,255,255,0.9); | |
| padding: 20px; | |
| border-radius: 10px; | |
| box-shadow: 0 0 15px rgba(0,0,0,0.2); | |
| z-index: 9999; | |
| } | |
| </style> | |
| </head> | |
| <body> | |
| <div id="loadingSpinner" style="display: none;"> | |
| <i class="fas fa-spinner fa-spin fa-3x"></i> | |
| <p>画像を処理中です。少々お待ちください...</p> | |
| </div> | |
| <div class="container"> | |
| <h1><i class="fas fa-image"></i> 画像処理アプリ</h1> | |
| <ul class="nav nav-tabs" role="tablist"> | |
| <li class="nav-item"> | |
| <a class="nav-link active" id="general-tab" data-toggle="tab" href="#general" role="tab"> | |
| <i class="fas fa-magic"></i> 一般処理モード | |
| </a> | |
| </li> | |
| <li class="nav-item"> | |
| <a class="nav-link" id="face-tab" data-toggle="tab" href="#face" role="tab"> | |
| <i class="fas fa-user-circle"></i> 顔照合モード | |
| </a> | |
| </li> | |
| </ul> | |
| <div class="tab-content"> | |
| <!-- 一般処理モード --> | |
| <div class="tab-pane fade show active" id="general" role="tabpanel"> | |
| <div class="form-group"> | |
| <label for="uploadImage1">画像をアップロード:</label> | |
| <input type="file" id="uploadImage1" class="form-control-file" accept="image/*" onchange="previewAndResizeImage('uploadImage1', 'uploadedImage1')"> | |
| </div> | |
| <img id="uploadedImage1" class="image-preview" src="#" alt="アップロードされた画像" style="display: none;"> | |
| <div class="form-group mt-4"> | |
| <label for="processingType">処理方法を選択:</label> | |
| <select id="processingType" class="custom-select"> | |
| <option value="opencv">OpenCVインペイント</option> | |
| <option value="simple_lama">Simple Lamaインペイント</option> | |
| <option value="stamp">stampインペイント</option> | |
| <option value="mosaic">mosaicインペイント</option> | |
| </select> | |
| </div> | |
| <div class="slider-container"> | |
| <label for="riskLevel1" class="slider-label">リスクレベル (0-100): <span id="riskLevelLabel1">50</span></label> | |
| <div id="slider1"></div> | |
| </div> | |
| <button class="btn btn-primary mt-4" onclick="processGeneralImage()">処理開始</button> | |
| </div> | |
| <!-- 顔照合モード --> | |
| <div class="tab-pane fade" id="face" role="tabpanel"> | |
| <div class="form-group"> | |
| <label for="uploadImage2">処理する画像をアップロード:</label> | |
| <input type="file" id="uploadImage2" class="form-control-file" accept="image/*" onchange="previewAndResizeImage('uploadImage2', 'uploadedImage2')"> | |
| </div> | |
| <img id="uploadedImage2" class="image-preview" src="#" alt="アップロードされた画像" style="display: none;"> | |
| <div class="form-group mt-4"> | |
| <label for="faceOption">自分の顔の入力方法を選択:</label> | |
| <select id="faceOption" class="custom-select" onchange="toggleFaceInput()"> | |
| <option value="upload">ファイルからアップロード</option> | |
| <option value="camera">カメラで撮影</option> | |
| </select> | |
| </div> | |
| <div class="form-group" id="uploadFaceGroup"> | |
| <label for="uploadFace">顔画像をアップロード:</label> | |
| <input type="file" id="uploadFace" class="form-control-file" accept="image/*" onchange="previewFaceImage()"> | |
| <img id="facePreview" class="image-preview" src="#" alt="顔画像のプレビュー" style="display: none;"> | |
| </div> | |
| <div class="form-group" id="cameraFaceGroup" style="display: none;"> | |
| <video id="cameraStream" width="100%" autoplay></video> | |
| <button class="btn btn-secondary mt-2" onclick="captureFaceImage()">顔をキャプチャ</button> | |
| <canvas id="cameraCanvas" style="display: none;"></canvas> | |
| </div> | |
| <div class="slider-container"> | |
| <label for="riskLevel2" class="slider-label">リスクレベル (0-100): <span id="riskLevelLabel2">50</span></label> | |
| <div id="slider2"></div> | |
| </div> | |
| <button class="btn btn-primary mt-4" onclick="processFaceImage()">処理開始</button> | |
| </div> | |
| <!-- 処理結果(共通) --> | |
| <div id="result" class="mt-5"> | |
| <h2>処理結果:</h2> | |
| <img id="processedImage" class="processed-preview" src="" alt=""> | |
| <a id="downloadLink" class="btn btn-success mt-3" href="#" download="processed_image.jpg">処理された画像をダウンロード</a> | |
| </div> | |
| </div> | |
| </div> | |
| <script src="https://code.jquery.com/jquery-3.5.1.min.js"></script> | |
| <script src="https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/js/bootstrap.min.js"></script> | |
| <script src="https://code.jquery.com/ui/1.12.1/jquery-ui.js"></script> | |
| <script> | |
| // スライダーの初期化 | |
| $(function() { | |
| $("#slider1, #slider2").slider({ | |
| range: "min", | |
| value: 50, | |
| min: 0, | |
| max: 100, | |
| slide: function(event, ui) { | |
| const labelId = $(this).attr('id') === 'slider1' ? 'riskLevelLabel1' : 'riskLevelLabel2'; | |
| $("#" + labelId).text(ui.value); | |
| } | |
| }); | |
| }); | |
| let resizedImageBlob1 = null; | |
| let resizedImageBlob2 = null; | |
| let faceImageBlob = null; | |
| function previewAndResizeImage(inputId, imageId) { | |
| const fileInput = document.getElementById(inputId); | |
| const uploadedImage = document.getElementById(imageId); | |
| if (fileInput.files && fileInput.files[0]) { | |
| const reader = new FileReader(); | |
| reader.onload = function(e) { | |
| const img = new Image(); | |
| img.onload = function() { | |
| const maxWidth = 1200; | |
| const maxHeight = 1200; | |
| let width = img.width; | |
| let height = img.height; | |
| if (width > maxWidth || height > maxHeight) { | |
| const ratio = Math.min(maxWidth / width, maxHeight / height); | |
| width *= ratio; | |
| height *= ratio; | |
| } | |
| const canvas = document.createElement('canvas'); | |
| canvas.width = width; | |
| canvas.height = height; | |
| const ctx = canvas.getContext('2d'); | |
| ctx.drawImage(img, 0, 0, width, height); | |
| uploadedImage.src = canvas.toDataURL('image/jpeg'); | |
| uploadedImage.style.display = 'block'; | |
| canvas.toBlob((blob) => { | |
| if (inputId === 'uploadImage1') { | |
| resizedImageBlob1 = blob; | |
| } else { | |
| resizedImageBlob2 = blob; | |
| } | |
| }, 'image/jpeg'); | |
| }; | |
| img.src = e.target.result; | |
| }; | |
| reader.readAsDataURL(fileInput.files[0]); | |
| } | |
| } | |
| function previewFaceImage() { | |
| const fileInput = document.getElementById('uploadFace'); | |
| if (fileInput.files && fileInput.files[0]) { | |
| const reader = new FileReader(); | |
| reader.onload = function(e) { | |
| document.getElementById('facePreview').src = e.target.result; | |
| document.getElementById('facePreview').style.display = 'block'; | |
| }; | |
| reader.readAsDataURL(fileInput.files[0]); | |
| faceImageBlob = fileInput.files[0]; | |
| } | |
| } | |
| function toggleFaceInput() { | |
| const faceOption = document.getElementById('faceOption').value; | |
| document.getElementById('uploadFaceGroup').style.display = faceOption === 'upload' ? 'block' : 'none'; | |
| document.getElementById('cameraFaceGroup').style.display = faceOption === 'camera' ? 'block' : 'none'; | |
| if (faceOption === 'camera') { | |
| startCamera(); | |
| } else { | |
| stopCamera(); | |
| } | |
| } | |
| function startCamera() { | |
| const video = document.getElementById('cameraStream'); | |
| navigator.mediaDevices.getUserMedia({ video: true }) | |
| .then(stream => video.srcObject = stream) | |
| .catch(error => console.error('カメラの起動に失敗しました:', error)); | |
| } | |
| function stopCamera() { | |
| const video = document.getElementById('cameraStream'); | |
| const stream = video.srcObject; | |
| if (stream) { | |
| stream.getTracks().forEach(track => track.stop()); | |
| video.srcObject = null; | |
| } | |
| } | |
| function captureFaceImage() { | |
| const video = document.getElementById('cameraStream'); | |
| const canvas = document.getElementById('cameraCanvas'); | |
| const context = canvas.getContext('2d'); | |
| canvas.width = video.videoWidth; | |
| canvas.height = video.videoHeight; | |
| context.drawImage(video, 0, 0, canvas.width, canvas.height); | |
| canvas.toBlob(blob => faceImageBlob = blob, 'image/jpeg'); | |
| stopCamera(); | |
| } | |
| function processGeneralImage() { | |
| if (!resizedImageBlob1) { | |
| alert("画像を選択してください。"); | |
| return; | |
| } | |
| const processingType = document.getElementById('processingType').value; | |
| const riskLevel = $("#slider1").slider("value"); | |
| showLoadingSpinner(); | |
| const formData = new FormData(); | |
| formData.append('image', resizedImageBlob1, 'resized_image.jpg'); | |
| formData.append('risk_level', riskLevel); | |
| let apiEndpoint; | |
| if (processingType === "opencv") { | |
| apiEndpoint = "/create-mask-and-inpaint-opencv"; | |
| } else if (processingType === "simple_lama") { | |
| apiEndpoint = "/create-mask-and-inpaint-simple-lama"; | |
| } else if (processingType === "stamp") { | |
| apiEndpoint = "/create-mask-and-inpaint-stamp"; | |
| } else if (processingType === "mosaic") { | |
| apiEndpoint = "/create-mask-and-inpaint-mosaic"; | |
| } | |
| processImageRequest(formData, "https://rein0421-aidentify.hf.space" + apiEndpoint); | |
| } | |
| function processFaceImage() { | |
| if (!resizedImageBlob2 || !faceImageBlob) { | |
| alert("処理する画像と顔画像の両方を設定してください。"); | |
| return; | |
| } | |
| const riskLevel = $("#slider2").slider("value"); | |
| showLoadingSpinner(); | |
| const formData = new FormData(); | |
| formData.append('reference_image', faceImageBlob, 'reference_image.jpg'); | |
| formData.append('test_image', resizedImageBlob2, 'test_image.jpg'); | |
| formData.append('risk_level', riskLevel); | |
| processImageRequest(formData, "https://rein0421-aidentify.hf.space/mosaic_faces"); | |
| } | |
| function processImageRequest(formData, url) { | |
| fetch(url, { | |
| method: 'POST', | |
| body: formData | |
| }) | |
| .then(response => { | |
| if (!response.ok) throw new Error("Network response was not ok"); | |
| return response.blob(); | |
| }) | |
| .then(blob => { | |
| const objectURL = URL.createObjectURL(blob); | |
| document.getElementById('processedImage').src = objectURL; | |
| document.getElementById('downloadLink').href = objectURL; | |
| document.getElementById('result').style.display = "block"; | |
| }) | |
| .catch(error => { | |
| console.error("画像処理に失敗しました。", error); | |
| alert("画像処理に失敗しました。"); | |
| }) | |
| .finally(() => { | |
| hideLoadingSpinner(); | |
| }); | |
| } | |
| function showLoadingSpinner() { | |
| document.getElementById('loadingSpinner').style.display = 'block'; | |
| } | |
| function hideLoadingSpinner() { | |
| document.getElementById('loadingSpinner').style.display = 'none'; | |
| } | |
| // タブ切り替え時の処理 | |
| $('.nav-tabs a').on('shown.bs.tab', function (e) { | |
| // 結果表示をリセット | |
| document.getElementById('result').style.display = 'none'; | |
| document.getElementById('processedImage').src = ''; | |
| document.getElementById('downloadLink').href = '#'; | |
| }); | |
| </script> | |
| </body> | |
| </html> | |
| """ | |
| return HTMLResponse(content=html_content) | |