Spaces:
Running
Running
# -*- coding: utf-8 -*- | |
# Commented out IPython magic to ensure Python compatibility. | |
# -*- coding: utf-8 -*- | |
# Commented out IPython magic to ensure Python compatibility. | |
import tempfile | |
from fastapi.templating import Jinja2Templates | |
import os | |
import supervision as sv | |
from PIL import Image, ImageFilter | |
import numpy as np | |
import cv2 | |
from LLM_package import ObjectDetector,GeminiInference | |
import pycocotools.mask as mask_util | |
#import insightface | |
from fastapi import FastAPI, File, UploadFile, Form | |
from fastapi.middleware.cors import CORSMiddleware | |
from fastapi.responses import FileResponse, HTMLResponse | |
import shutil | |
import json | |
from pathlib import Path | |
import nest_asyncio | |
import uvicorn | |
from pyngrok import ngrok | |
from diffusers import StableDiffusionInpaintPipeline | |
import torch | |
from simple_lama_inpainting import SimpleLama | |
from sklearn.cluster import ( | |
KMeans, AgglomerativeClustering, DBSCAN, MiniBatchKMeans, Birch, | |
SpectralClustering, MeanShift, OPTICS | |
) | |
from sklearn.decomposition import PCA | |
from sklearn.metrics import silhouette_score | |
from sklearn.neighbors import KNeighborsClassifier | |
from torchvision import transforms | |
import threading | |
import concurrent.futures | |
from typing import Tuple | |
from types import SimpleNamespace | |
import subprocess | |
import uuid | |
from datetime import datetime | |
from ultralytics import YOLO | |
import math | |
import numpy as np | |
import matplotlib.pyplot as plt | |
from dotenv import load_dotenv | |
import traceback | |
from pathlib import Path | |
#この下のコードは特定の領域をマスクしないタイプのコード | |
import uuid | |
from datetime import datetime | |
import torch | |
import cv2 | |
import numpy as np | |
from ultralytics import YOLO # YOLOv8ライブラリ | |
from fastapi.middleware.cors import CORSMiddleware | |
from fastapi.middleware.cors import CORSMiddleware | |
from fastapi import Request # Request は fastapi モジュールからインポートする | |
import random | |
import cv2 | |
import numpy as np | |
from datetime import datetime | |
from ultralytics import YOLO | |
from PIL import Image | |
from search import WebScraper | |
app = FastAPI() | |
# CORSミドルウェアの追加 | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=["*"], # ここを適切なオリジンに設定することもできます | |
allow_credentials=True, | |
allow_methods=["*"], | |
allow_headers=["*"], | |
) | |
# デバッグ用のダミーレスポンス設定 | |
USE_DUMMY_RESPONSE = False # ダミーレスポンスを有効化 | |
DUMMY_RESPONSE_TYPE = 'random' # 'random', 'fixed', 'error' のいずれかを選択 | |
# ダミー画像を生成する関数 | |
def create_dummy_image(width=512, height=512, color=(255, 0, 0)): | |
"""単色のダミー画像を生成して一時ファイルに保存""" | |
image = np.full((height, width, 3), color, dtype=np.uint8) | |
temp_path = SAVE_DIR / f"dummy_image_{uuid.uuid4().hex}.jpg" | |
cv2.imwrite(str(temp_path), image) | |
return temp_path | |
load_dotenv(dotenv_path='../.env') | |
HOME = "./" | |
templates = Jinja2Templates(directory="templates") | |
dangerarray=[10,30,90,50,80,20,40,70,100,60]#ここに各クラスターの危険度を設定しておく | |
#ここで認識する精度を上げたり下げたりできる | |
thresholds = { | |
'text': 0.1, | |
'Name tag': 0.1, | |
'License plate': 0.1, | |
'Digital screens': 0.1, | |
'signboard': 0.1, | |
'documents': 0.1, | |
'information board': 0.1, | |
'poster': 0.1, | |
'sign': 0.1, | |
'Mail or envelope': 0.1, | |
'logo': 0.1, | |
'cardboard': 0.4, | |
'manhole': 0.6, | |
'electricity pole': 0.7 | |
} | |
''' | |
''' | |
# Define paths | |
CONFIG_PATH = os.path.join(HOME, "GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py") | |
WEIGHTS_NAME = "groundingdino_swint_ogc.pth" | |
WEIGHTS_PATH = os.path.join(HOME, "weights", WEIGHTS_NAME) | |
from PIL import Image | |
def is_bright(pixel): | |
# ピクセルの輝度を計算して明るさを判定する | |
r, g, b = pixel | |
brightness = (0.299 * r + 0.587 * g + 0.114 * b) # 輝度の計算 | |
return brightness > 127 # 閾値を127に設定 | |
def analyze_mask_brightness(original_image_path, mask_image_path): | |
# 画像を開く | |
original_img = Image.open(original_image_path).convert('RGB') | |
mask_img = Image.open(mask_image_path).convert('L') # グレースケールに変換 | |
width, height = original_img.size | |
if mask_img.size != (width, height): | |
print("エラー: マスク画像と元画像のサイズが一致していません。") | |
return | |
# 明るいピクセルと暗いピクセルのカウント | |
bright_count = 0 | |
dark_count = 0 | |
for y in range(height): | |
for x in range(width): | |
mask_value = mask_img.getpixel((x, y)) | |
if mask_value > 127: # マスクが白(対象領域)ならば | |
pixel = original_img.getpixel((x, y)) | |
if is_bright(pixel): | |
bright_count += 1 | |
else: | |
dark_count += 1 | |
# 明るさの結果を判定 | |
brightness_result = 1 if bright_count > dark_count else 2 | |
return brightness_result | |
def classify_mask_size(mask_image_path, small_threshold, medium_threshold, large_threshold): | |
# マスク画像を開く | |
mask_img = Image.open(mask_image_path).convert('L') # グレースケールに変換 | |
width, height = mask_img.size | |
total_pixels = width * height | |
white_pixel_count = 0 | |
# マスク画像の白いピクセルをカウント | |
for y in range(height): | |
for x in range(width): | |
mask_value = mask_img.getpixel((x, y)) | |
if mask_value > 127: # 白いピクセルと判断 | |
white_pixel_count += 1 | |
# 白いピクセルの割合を計算 | |
mask_area_ratio = (white_pixel_count / total_pixels) * 100 | |
# マスクサイズを分類 | |
if mask_area_ratio <= small_threshold: | |
size_category = 1 # すごく小さい | |
elif mask_area_ratio <= medium_threshold: | |
size_category = 2 # 小さい | |
elif mask_area_ratio <= large_threshold: | |
size_category = 3 # 大きい | |
else: | |
size_category = 4 # すごく大きい | |
return size_category | |
def analyze_mask_combined(original_image_path, mask_image_path, small_threshold, medium_threshold, large_threshold): | |
# マスクの大きさを判定 | |
size_category = classify_mask_size(mask_image_path, small_threshold, medium_threshold, large_threshold) | |
# マスク部分の明るさを判定 | |
brightness_result = analyze_mask_brightness(original_image_path, mask_image_path) | |
# 結果を出力 | |
size_text = {1: "すごく小さい", 2: "小さい", 3: "大きい", 4: "すごく大きい"} | |
print(f"マスクの大きさ: {size_text[size_category]} ({size_category})") | |
print(f"マスクの明るさ: {brightness_result}") | |
result={ | |
'size':size_category, | |
'brightness':brightness_result | |
} | |
return result | |
import os | |
import numpy as np | |
import cv2 | |
import torch | |
from datetime import datetime | |
from PIL import Image | |
from ultralytics import YOLO | |
def decide_to_object(risk_level): | |
# `tex` の要素を `thresholds` のキーに合わせて書き換え | |
tex = [ | |
'text', 'poster', 'Name tag', 'License plate', 'Digital screens', | |
'signboard', 'sign', 'logo', 'manhole', 'electricity pole', 'cardboard' | |
] | |
# リスクレベルに応じたオブジェクト数の決定 | |
num_objects = int(risk_level / 20) * (len(tex) // 5) # 個数決定(1/2) | |
return tex[:int(num_objects) + 1] | |
# マスクを生成する関数 | |
def create_mask(image, x1, y1, x2, y2): | |
mask = np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8) | |
cv2.rectangle(mask, (int(x1), int(y1)), (int(x2), int(y2)), 255, -1) | |
return mask | |
async def search_llm(): | |
scraper = WebScraper(headless=True) # UIなしで実行 | |
# 個人情報流出に関する事例を検索し、上位2件のクリーンなコンテンツを取得 | |
personal_breach_docs = await scraper.get_processed_documents( | |
search_query="個人情報流出 事例 SNS", | |
num_search_results=10 | |
) | |
return personal_breach_docs["cleaned_html_content"] | |
def llm_to_process_image_simple(risk_level, image_path, point1, point2, thresholds=None): | |
print(risk_level, image_path, point1, point2, thresholds) | |
print('point1,point2', point1, point2) | |
GEMINI_API_KEY=os.getenv('GEMINI_API_KEY') | |
# 画像処理のロジックをここに追加 | |
Objectdetector = ObjectDetector(API_KEY=GEMINI_API_KEY) | |
debug_image_path='/test_llm.jpg' | |
Objectdetector.prompt_objects={'text', 'poster', 'Name tag', 'License plate', 'Digital screens', | |
'signboard', 'sign', 'logo', 'manhole', 'electricity pole', 'cardboard'} | |
# 画像の読み込みとRGB変換 | |
image = cv2.imread(image_path) | |
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
mask_llm = np.zeros(image.shape[:2], dtype=np.uint8) | |
llm_results = Objectdetector.detect_objects(image_path) | |
print(f"LLM Results: {llm_results}") | |
for result in llm_results: | |
bbox=result['box_2d'] | |
x1, y1 = int(bbox[1]* image.shape[1]), int(bbox[0]* image.shape[0]) | |
x2, y2 = int(bbox[3]* image.shape[1]), int(bbox[2]* image.shape[0]) | |
mask_llm[y1:y2, x1:x2] = 255 # テキスト領域をマスク | |
p1_x, p1_y = int(point1[0] * image.shape[1]), int(point1[1] * image.shape[0]) | |
p2_x, p2_y = int(point2[0] * image.shape[1]), int(point2[1] * image.shape[0]) | |
x_min, y_min = max(0, min(p1_x, p2_x)), max(0, min(p1_y, p2_y)) | |
x_max, y_max = min(image.shape[1], max(p1_x, p2_x)), min(image.shape[0], max(p1_y, p2_y)) | |
mask_llm[y_min:y_max, x_min:x_max] = 0 # 範囲を黒に設定 | |
save_dir = "./saved_images" | |
os.makedirs(save_dir, exist_ok=True) | |
debug_image_pil = Image.fromarray(mask_llm) | |
debug_image_pil.save(save_dir + debug_image_path) | |
return save_dir + debug_image_path | |
async def llm_to_process_image_simple_auto(risk_level, image_path, point1, point2, thresholds=None, scene=''): | |
print(f"リスクレベル: {risk_level}, 画像パス: {image_path}, point1: {point1}, point2: {point2}, しきい値: {thresholds}") | |
print(f"point1, point2: {point1}, {point2}") | |
GEMINI_API_KEY = os.getenv('GEMINI_API_KEY') | |
# 画像処理のロジックを追加 | |
Objectdetector = ObjectDetector(API_KEY=GEMINI_API_KEY) | |
Objectdetector.scene = scene # シーン情報を設定 | |
# デバッグ用の画像パスを定義。保存時にファイル名として使われます。 | |
debug_image_name = "masked_image.jpg" # デバッグ画像名を具体的に | |
debug_image_path = os.path.join("./saved_images", debug_image_name) | |
# 個人情報流出に関する事例を検索し、クリーンなコンテンツを取得 | |
scraper = WebScraper(headless=True) | |
personal_breach_docs = await scraper.get_processed_documents( | |
search_query="個人情報流出 事例 SNS", | |
num_search_results=10 | |
) | |
# 取得したドキュメントを結合し、Objectdetectorのテキストとして設定 | |
if personal_breach_docs: | |
# personal_breach_docsは辞書のリストなので、各辞書からコンテンツを取り出して結合します | |
all_content = "\n\n---\n\n".join([doc['cleaned_html_content'] for doc in personal_breach_docs]) | |
Objectdetector.text = all_content | |
print(f"Webスクレイピングの結果をコンテキストとして設定しました。文字数: {len(all_content)}") | |
# Webスクレイピングの結果を資料として渡し、リスク分析を実行 | |
response = Objectdetector.detect_auto(image_path) | |
print(f"削除対象オブジェクト: {response['objects_to_remove']}") | |
Objectdetector.prompt_objects = response["objects_to_remove"] | |
# 画像の読み込みとRGB変換 | |
print(f"Objectdetector.prompt_objects: {Objectdetector.prompt_objects}") | |
image = cv2.imread(image_path) | |
# 画像の読み込みに失敗した場合のハンドリング | |
if image is None: | |
print(f"エラー: {image_path} から画像を読み込めませんでした。") | |
return None, {"error": "画像を読み込めませんでした"} | |
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
mask_llm = np.zeros(image.shape[:2], dtype=np.uint8) | |
# LLMによるオブジェクト検出を実行 | |
llm_results = Objectdetector.detect_objects(image_path) | |
print(f"LLM検出結果: {llm_results}") | |
for result in llm_results: | |
bbox = result['box_2d'] | |
# バウンディングボックスの座標を画像サイズに変換し、範囲内に収める | |
x1, y1 = int(bbox[1] * image.shape[1]), int(bbox[0] * image.shape[0]) | |
x2, y2 = int(bbox[3] * image.shape[1]), int(bbox[2] * image.shape[0]) | |
x1, y1 = max(0, x1), max(0, y1) | |
x2, y2 = min(image.shape[1], x2), min(image.shape[0], y2) | |
mask_llm[y1:y2, x1:x2] = 255 # テキスト領域をマスク | |
# 指定された2点間の領域を黒く設定 | |
p1_x, p1_y = int(point1[0] * image.shape[1]), int(point1[1] * image.shape[0]) | |
p2_x, p2_y = int(point2[0] * image.shape[1]), int(point2[1] * image.shape[0]) | |
x_min, y_min = max(0, min(p1_x, p2_x)), max(0, min(p1_y, p2_y)) | |
x_max, y_max = min(image.shape[1], max(p1_x, p2_x)), min(image.shape[0], max(p1_y, p2_y)) | |
mask_llm[y_min:y_max, x_min:x_max] = 0 # 範囲を黒に設定 | |
save_dir = "./saved_images" | |
os.makedirs(save_dir, exist_ok=True) # 保存ディレクトリが存在しない場合に作成 | |
debug_image_pil = Image.fromarray(mask_llm) | |
mask_save_path = os.path.join(save_dir, debug_image_name) # 保存パスを構築 | |
debug_image_pil.save(mask_save_path) | |
return mask_save_path, response | |
# 特殊な処理を行う関数 | |
def special_process_image_yolo(risk_level, image_path, point1, point2, thresholds=None): | |
# デバイスの確認 | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
print(f"Using device: {device}") | |
# モデルファイルのパス | |
model_path = './1113.pt' | |
# モデルファイルの存在確認 | |
if not os.path.isfile(model_path): | |
raise FileNotFoundError(f"モデルファイル '{model_path}' が見つかりません。パスを確認してください。") | |
# YOLOv8モデルをロードし、指定デバイスに移動 | |
model = YOLO(model_path).to(device) | |
print("モデルが正常にロードされ、デバイスに移動しました。") | |
# OCRモデルの初期化 | |
# タイムスタンプを作成 | |
timestamp = datetime.now().strftime("%Y%m%d%H%M%S") | |
# テキストラベルのリストとその優先順に基づいた閾値の減衰率の計算 | |
tex = [ | |
'text', 'poster', 'Name tag', 'License plate', 'Digital screens', | |
'signboard', 'sign', 'logo', 'manhole', 'electricity pole', 'cardboard' | |
] | |
def logistic_decay_for_label(risk_level, label_index, k=0.1, r0=50): | |
base_decay = 1 / (1 + np.exp(-k * (risk_level - r0))) | |
return max(base_decay + 0.05 * label_index, 0.01) | |
adjusted_thresholds = {} | |
for i, label in enumerate(tex): | |
decay_factor = logistic_decay_for_label(risk_level, i) | |
adjusted_thresholds[label] = max(0.01, decay_factor / 2) | |
# 画像の読み込みとRGB変換 | |
image = cv2.imread(image_path) | |
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
# YOLO推論実行 | |
results = model(image_rgb) | |
# YOLOによる物体検出マスク作成 | |
mask_yolo = np.zeros(image.shape[:2], dtype=np.uint8) | |
for box in results[0].boxes: | |
x1, y1, x2, y2 = map(int, box.xyxy[0]) | |
confidence = box.conf[0] | |
class_id = box.cls[0] | |
object_type = model.names[int(class_id)] | |
# オブジェクトの閾値を確認し、マスクを適用 | |
threshold = adjusted_thresholds.get(object_type, 0.5) | |
if confidence >= threshold: | |
mask_yolo = create_mask(image, x1, y1, x2, y2) | |
# OCRマスクとYOLOマスクの結合 | |
final_mask = mask_yolo | |
# 絶対座標に変換した点の範囲を黒に設定 | |
p1_x, p1_y = int(point1[0] * image.shape[1]), int(point1[1] * image.shape[0]) | |
p2_x, p2_y = int(point2[0] * image.shape[1]), int(point2[1] * image.shape[0]) | |
x_min, y_min = max(0, min(p1_x, p2_x)), max(0, min(p1_y, p2_y)) | |
x_max, y_max = min(image.shape[1], max(p1_x, p2_x)), min(image.shape[0], max(p1_y, p2_y)) | |
final_mask[y_min:y_max, x_min:x_max] = 0 # 範囲を黒に設定 | |
# デバッグ用に白い長方形を描画 | |
debug_image = image_rgb.copy() | |
cv2.rectangle(debug_image, (x_min, y_min), (x_max, y_max), (255, 255, 255), 2) | |
# デバッグ画像とマスク画像を保存 | |
save_dir = "./saved_images" | |
os.makedirs(save_dir, exist_ok=True) | |
debug_image_pil = Image.fromarray(debug_image) | |
debug_image_path = os.path.join(save_dir, f"debug_image_with_rectangle_{timestamp}.jpg") | |
debug_image_pil.save(debug_image_path) | |
mask_image_pil = Image.fromarray(final_mask) | |
mask_image_path = os.path.join(save_dir, f"final_mask_{timestamp}.jpg") | |
mask_image_pil.save(mask_image_path) | |
print(f"デバッグ画像が {debug_image_path} に保存されました。") | |
print(f"マスク画像が {mask_image_path} に保存されました。") | |
return mask_image_path | |
def convert_image_format(input_path, output_format="png"): | |
""" | |
画像をJPGからPNGまたはPNGからJPGに変換する関数。 | |
Parameters: | |
- input_path: 変換したい元画像のパス | |
- output_format: 出力形式 ("png" または "jpg" を指定、デフォルトは "png") | |
Returns: | |
- output_path: 変換された画像の出力パス | |
""" | |
# サポートされているフォーマットかを確認 | |
if output_format not in ["png", "jpg", "jpeg"]: | |
raise ValueError("サポートされている出力形式は 'png' または 'jpg' です。") | |
# 画像の読み込み | |
image = cv2.imread(input_path) | |
if image is None: | |
raise ValueError(f"画像が見つかりません: {input_path}") | |
# 出力パスの生成 | |
base_name = os.path.splitext(os.path.basename(input_path))[0] | |
output_path = f"{base_name}.{output_format}" | |
# 画像の保存 | |
if output_format == "png": | |
cv2.imwrite(output_path, image, [cv2.IMWRITE_PNG_COMPRESSION, 9]) # PNG形式で最高圧縮率 | |
else: | |
cv2.imwrite(output_path, image, [cv2.IMWRITE_JPEG_QUALITY, 90]) # JPG形式で高画質 | |
return output_path | |
def mosaic_image_with_mask(image_path, mask_path, output_path, mosaic_level=15): | |
""" | |
マスク画像を使用して元画像の指定領域にモザイクをかける関数。 | |
Parameters: | |
- image_path: 元画像のパス | |
- mask_path: モザイクをかけたい領域を白、その他を黒としたマスク画像のパス | |
- output_path: モザイク処理結果の出力パス | |
- mosaic_level: モザイクの強さ(値が大きいほど粗いモザイクになる) | |
Returns: | |
- output_path: モザイク処理された画像の出力パス | |
""" | |
# 画像とマスクを読み込み | |
image = cv2.imread(image_path) | |
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) | |
# 画像とマスクの読み込みチェック | |
if image is None: | |
raise ValueError(f"元画像が見つかりません: {image_path}") | |
if mask is None: | |
raise ValueError(f"マスク画像が見つかりません: {mask_path}") | |
# マスク画像が元画像と同じサイズでない場合、リサイズ | |
if image.shape[:2] != mask.shape[:2]: | |
print(f"マスク画像のサイズを元画像に合わせてリサイズします: {mask.shape} -> {image.shape[:2]}") | |
mask = cv2.resize(mask, (image.shape[1], image.shape[0])) | |
# モザイクをかける領域を抽出 | |
mosaic_area = cv2.bitwise_and(image, image, mask=mask) | |
# モザイク処理 | |
small = cv2.resize(mosaic_area, (image.shape[1] // mosaic_level, image.shape[0] // mosaic_level), interpolation=cv2.INTER_LINEAR) | |
mosaic = cv2.resize(small, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_NEAREST) | |
# マスクを使って元画像にモザイク部分を合成 | |
mosaic_result = cv2.bitwise_and(mosaic, mosaic, mask=mask) | |
image_no_mosaic = cv2.bitwise_and(image, image, mask=cv2.bitwise_not(mask)) | |
result_image = cv2.add(image_no_mosaic, mosaic_result) | |
# モザイク処理結果を保存 | |
cv2.imwrite(output_path, result_image) | |
return output_path | |
#この下は、openCV | |
def inpaint_image_with_mask(image_path, mask_path, output_path, inpaint_radius=5, inpaint_method=cv2.INPAINT_TELEA): | |
""" | |
マスク画像を使用して元画像のインペイントを行う関数。 | |
Parameters: | |
- image_path: 元画像のパス | |
- mask_path: マスク画像のパス(修復したい領域が白、その他が黒) | |
- output_path: インペイント結果の出力パス | |
- inpaint_radius: インペイントの半径(デフォルトは5) | |
- inpaint_method: インペイントのアルゴリズム(デフォルトはcv2.INPAINT_TELEA) | |
Returns: | |
- inpainted_image: インペイントされた画像 | |
""" | |
# 画像とマスクを読み込み | |
image = cv2.imread(image_path) | |
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) # マスクはグレースケールで読み込み | |
# マスク画像が正常に読み込めたかチェック | |
if image is None: | |
raise ValueError(f"元画像が見つかりません: {image_path}") | |
if mask is None: | |
raise ValueError(f"マスク画像が見つかりません: {mask_path}") | |
# マスク画像が元画像と同じサイズでない場合、リサイズ | |
if image.shape[:2] != mask.shape[:2]: | |
print(f"マスク画像のサイズを元画像に合わせてリサイズします: {mask.shape} -> {image.shape[:2]}") | |
mask = cv2.resize(mask, (image.shape[1], image.shape[0])) | |
# インペイント処理 | |
inpainted_image = cv2.inpaint(image, mask, inpaint_radius, inpaint_method) | |
# インペイント結果を保存 | |
cv2.imwrite(output_path, inpainted_image) | |
return output_path | |
def stamp_image_with_mask(base_image_path, mask_path,output_path,stamp_image_path='./main.png'): | |
""" | |
マスク画像を使用して元画像に別の画像を埋め込む関数。 | |
Parameters: | |
- base_image_path: 元画像のパス | |
- mask_path: マスク画像のパス(埋め込みたい領域が白、その他が黒) | |
- embed_image_path: 埋め込み用画像のパス | |
- output_path: 結果の出力パス | |
Returns: | |
- output_path: 埋め込み処理された画像の出力パス | |
""" | |
# 画像とマスクを読み込み | |
base_image = cv2.imread(base_image_path) | |
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) | |
embed_image = cv2.imread(stamp_image_path) | |
# 画像が正常に読み込めたかチェック | |
if base_image is None: | |
raise ValueError(f"元画像が見つかりません: {base_image_path}") | |
if mask is None: | |
raise ValueError(f"マスク画像が見つかりません: {mask_path}") | |
if embed_image is None: | |
raise ValueError(f"埋め込み用画像が見つかりません: {stamp_image_path}") | |
# マスク画像と埋め込み画像を元画像と同じサイズにリサイズ | |
if base_image.shape[:2] != mask.shape[:2]: | |
print(f"マスク画像のサイズを元画像に合わせてリサイズします: {mask.shape} -> {base_image.shape[:2]}") | |
mask = cv2.resize(mask, (base_image.shape[1], base_image.shape[0])) | |
if base_image.shape[:2] != embed_image.shape[:2]: | |
print(f"埋め込み画像のサイズを元画像に合わせてリサイズします: {embed_image.shape[:2]} -> {base_image.shape[:2]}") | |
embed_image = cv2.resize(embed_image, (base_image.shape[1], base_image.shape[0])) | |
# マスク領域に埋め込み画像を配置 | |
embedded_image = base_image.copy() | |
embedded_image[mask == 255] = embed_image[mask == 255] | |
# 結果を保存 | |
cv2.imwrite(output_path, embedded_image) | |
return output_path | |
import torch | |
from PIL import Image, ImageFilter | |
import numpy as np | |
from simple_lama_inpainting import SimpleLama | |
def inpaint_image_with_mask1(img_path, mask_path, output_path, resize_factor=0.5): | |
print('lama') | |
# GPUが利用可能か確認 | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
# 画像とマスクを読み込み | |
image = Image.open(img_path).convert("RGB") # 画像をRGBに変換 | |
mask = Image.open(mask_path).convert('L') # マスクをグレースケールに変換 | |
# 画像とマスクのサイズを合わせる | |
mask = mask.resize(image.size, Image.NEAREST) | |
# マスクのエッジをぼかす (Gaussian Blur) | |
blurred_mask = mask.filter(ImageFilter.GaussianBlur(radius=3)) # 半径3ピクセルでぼかし | |
# SimpleLama インスタンスを作成 | |
simple_lama = SimpleLama() | |
# 画像とマスクをNumPy配列に変換 | |
image_np = np.array(image) | |
mask_np = np.array(blurred_mask) / 255.0 # マスクを0-1範囲にスケーリング | |
# 入力画像とマスクをSimpleLamaに渡してインペイント | |
inpainted_np = simple_lama(image_np, mask_np) # NumPy配列を渡す | |
# 結果を画像として保存 | |
result_image = Image.fromarray(np.uint8(inpainted_np)) # NumPy array -> PIL Image | |
# 出力画像をリサイズ | |
new_size = (int(result_image.width * resize_factor), int(result_image.height * resize_factor)) | |
result_image = result_image.resize(new_size, Image.ANTIALIAS) | |
# 結果を保存 | |
result_image.save(output_path) | |
print(f"Inpainted image saved at {output_path}") | |
return output_path | |
# 保存先のディレクトリを指定 | |
SAVE_DIR = Path("./saved_images") | |
SAVE_DIR.mkdir(parents=True, exist_ok=True) | |
def save_image(file, filename): | |
"""画像ファイルを指定ディレクトリに保存""" | |
filepath = SAVE_DIR / filename | |
with open(filepath, "wb") as buffer: | |
shutil.copyfileobj(file, buffer) | |
return filepath | |
async def create_mask_and_inpaint_opencv(image: UploadFile = File(...), risk_level: int = Form(...)): | |
point1 = (0.00000000000002, 0.00000000000002) | |
point2 = (0.00000000000001, 0.00000000000001) | |
input_path = save_image(image.file, "input.jpg") | |
mask_path = special_process_image_yolo(risk_level, input_path, point1, point2, thresholds) | |
output_path = SAVE_DIR / "output_opencv.jpg" | |
# OpenCVでインペイント | |
inpaint_image_with_mask(input_path, mask_path, output_path) | |
return FileResponse(output_path) | |
async def create_mask_and_inpaint_opencv(image: UploadFile = File(...), risk_level: int = Form(...)): | |
point1 = (0.00000000000002, 0.00000000000002) | |
point2 = (0.00000000000001, 0.00000000000001) | |
input_path = save_image(image.file, "input.jpg") | |
mask_path = special_process_image_yolo(risk_level, input_path, point1, point2, thresholds) | |
output_path = SAVE_DIR / "output_opencv.jpg" | |
# OpenCVでインペイント | |
mosaic_image_with_mask(input_path, mask_path, output_path) | |
return FileResponse(output_path) | |
async def create_mask_and_inpaint_opencv(image: UploadFile = File(...), risk_level: int = Form(...)): | |
point1 = (0.00000000000002, 0.00000000000002) | |
point2 = (0.00000000000001, 0.00000000000001) | |
input_path = save_image(image.file, "input.jpg") | |
mask_path = special_process_image_yolo(risk_level, input_path, point1, point2, thresholds) | |
output_path = SAVE_DIR / "output_opencv.jpg" | |
# OpenCVでインペイント | |
stamp_image_with_mask(input_path, mask_path, output_path) | |
return FileResponse(output_path) | |
async def create_mask_and_inpaint_simple_lama(image: UploadFile = File(...), risk_level: int = Form(...)): | |
input_path = save_image(image.file, "input.jpg") | |
point1 = (0.00000000000002, 0.00000000000002) | |
point2 = (0.00000000000001, 0.00000000000001) | |
mask_path = special_process_image_yolo(risk_level, input_path, point1, point2, thresholds) | |
output_path = SAVE_DIR / "output_simple_lama.jpg" | |
# SimpleLamaでインペイント | |
inpaint_image_with_mask1(input_path, mask_path, output_path, resize_factor=1) | |
return FileResponse(output_path) | |
# ベクトル化対象のオブジェクトリスト | |
TEXT_PROMPTS = [ | |
'text','Name tag', 'License plate', 'Mail', 'Documents', 'QR codes', | |
'barcodes', 'Map', 'Digital screens', 'information board', | |
'signboard', 'poster', 'sign', 'logo', 'card', 'window', 'mirror', | |
'Famous landmark', 'cardboard', 'manhole', 'utility pole' | |
] | |
BOX_THRESHOLD = 0.3 | |
TEXT_THRESHOLD = 0.3 | |
# クラスタリング結果をJSONファイルから読み込む関数 | |
def load_sums_from_json(filepath): | |
with open(filepath, 'r') as json_file: | |
sums = json.load(json_file) | |
return sums | |
# ベクトルデータをJSONファイルから読み込む関数 | |
def load_vectors_from_json(filepath): | |
with open(filepath, 'r') as json_file: | |
data = json.load(json_file) | |
return data | |
# 新しい画像を分類する関数 | |
def classify_new_image(new_image_vector, sums_data, loaded_vectors, loaded_object_names, k=1): | |
cluster_centers = [] | |
for cluster in sums_data: | |
indices = [loaded_object_names.index(obj_name) for obj_name in cluster] | |
cluster_vectors = np.array([loaded_vectors[obj_name] for obj_name in cluster]) | |
cluster_center = np.mean(cluster_vectors, axis=0) | |
cluster_centers.append(cluster_center) | |
knn = KNeighborsClassifier(n_neighbors=k) | |
knn.fit(cluster_centers, range(len(cluster_centers))) | |
new_image_label = knn.predict([new_image_vector]) | |
return new_image_label[0] | |
def process_image_vec(image_path): | |
# GPUを使用できるか確認 | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
print(device) | |
# YOLOv8モデルをロードし、GPUに移動 | |
model = YOLO('./1026.pt') # モデルのパスを指定 | |
model.to(device) # モデルをGPUに移動 | |
# 初期化 | |
object_vector = np.zeros(len(TEXT_PROMPTS)) | |
# 画像の読み込み | |
image = cv2.imread(image_path) | |
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
# YOLOで推論を実行 | |
results = model(image_rgb) # 推論を実行 | |
# 各プロンプトごとに確認 | |
for i, text_prompt in enumerate(TEXT_PROMPTS): | |
prompt_sum = 0 # 各プロンプトに対応するスコアの合計 | |
for box in results[0].boxes: | |
class_id = int(box.cls[0]) | |
confidence = box.conf[0] | |
detected_class = model.names[class_id] | |
# 検出クラス名とテキストプロンプトの一致を確認 | |
if text_prompt.lower() == detected_class.lower(): | |
prompt_sum += confidence # クラスが一致した場合、信頼度を加算 | |
# object_vectorにスコアを格納 | |
object_vector[i] = prompt_sum | |
print(object_vector) | |
return object_vector.tolist() | |
async def classify_image2(file: UploadFile = File(...)): | |
image_path = "./temp_image.jpg" | |
# アップロードされた画像を保存 | |
with open(image_path, "wb") as buffer: | |
buffer.write(await file.read()) | |
# 画像をベクトル化 | |
new_image_vector = process_image_vec(image_path) | |
# JSONファイルからデータを読み込む | |
json_filepath = "./output_vectors.json" | |
loaded_data = load_vectors_from_json(json_filepath) | |
loaded_vectors = {obj_name: np.array(vector) for obj_name, vector in loaded_data.items()} | |
loaded_object_names = list(loaded_vectors.keys()) | |
# 既存のクラスタリング結果を読み込む | |
sums_data = load_sums_from_json("./sums_data.json") | |
# 新しい画像がどのクラスタに分類されるかを判定 | |
new_image_cluster = classify_new_image(new_image_vector, sums_data, loaded_vectors, loaded_object_names) | |
return {"danger":dangerarray[int(new_image_cluster + 1)]}#バグったらここを+にして | |
# APIのエンドポイント | |
async def classify_image(file: UploadFile = File(...)): | |
image_path = "./temp_image.jpg" | |
# アップロードされた画像を保存 | |
with open(image_path, "wb") as buffer: | |
buffer.write(await file.read()) | |
# 画像をベクトル化 | |
new_image_vector = process_image_vec(image_path) | |
# JSONファイルからデータを読み込む | |
json_filepath = "./output_vectors.json" | |
loaded_data = load_vectors_from_json(json_filepath) | |
loaded_vectors = {obj_name: np.array(vector) for obj_name, vector in loaded_data.items()} | |
loaded_object_names = list(loaded_vectors.keys()) | |
# 既存のクラスタリング結果を読み込む | |
sums_data = load_sums_from_json("./sums_data.json") | |
# 新しい画像がどのクラスタに分類されるかを判定 | |
new_image_cluster = classify_new_image(new_image_vector, sums_data, loaded_vectors, loaded_object_names) | |
return {"danger":dangerarray[int(new_image_cluster + 1)]}#バグったらここを+にして | |
async def create_mask_and_inpaint_simple_lama( | |
image: UploadFile = File(...), | |
risk_level: int = Form(...), | |
x1: float = Form(...), | |
y1: float = Form(...), | |
x2: float = Form(...), | |
y2: float = Form(...), | |
): | |
# Extract points from the form data | |
point1 = [x1, y1] | |
point2 = [x2, y2] | |
# Save the input image | |
input_path = save_image(image.file, "input.jpg") | |
print('point1,point2',point1,point2) | |
# Create a mask image (using the new process_image function) | |
mask_path = special_process_image_yolo(risk_level, input_path, point1, point2,thresholds=thresholds) | |
# Define the output path for the inpainted image | |
output_path = "./output_simple_lama.jpg" | |
# Perform inpainting with SimpleLama | |
inpaint_image_with_mask1(input_path, mask_path, output_path, resize_factor=1) | |
# Return the resulting image as a response | |
return FileResponse(output_path, media_type="image/jpeg", filename="output_simple_lama.jpg") | |
async def create_mask_sum(image: UploadFile = File(...), risk_level: int = Form(...), | |
x1: float = Form(...), | |
y1: float = Form(...), | |
x2: float = Form(...), | |
y2: float = Form(...),): | |
default_x = 0.001 | |
default_y = 0.001 | |
point1 = [default_x if math.isnan(x1) else x1, default_y if math.isnan(y1) else y1] | |
point2 = [default_x if math.isnan(x2) else x2, default_y if math.isnan(y2) else y2] | |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
# 一意な識別子を生成 | |
unique_id = uuid.uuid4().hex | |
input_path = save_image(image.file, f"./input_{timestamp}_{unique_id}.jpg") | |
mask_path = special_process_image_yolo(risk_level, input_path, point1, point2,thresholds=thresholds) | |
output_path = f"./output_simple_lama_{timestamp}_{unique_id}.jpg" | |
print('point1,point2',point1,point2)#消去したくない範囲のこと | |
# OpenCVでインペイント | |
inpaint_image_with_mask1(input_path, mask_path, output_path) | |
return FileResponse(output_path) | |
# カスケードファイルの読み込み (顔検出) | |
#face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') | |
def apply_mosaic(image, x, y, w, h, mosaic_level=15): | |
""" 指定範囲にモザイク処理を適用 """ | |
face = image[y:y+h, x:x+w] | |
face = cv2.resize(face, (w // mosaic_level, h // mosaic_level)) | |
face = cv2.resize(face, (w, h), interpolation=cv2.INTER_NEAREST) | |
image[y:y+h, x:x+w] = face | |
return image | |
''' | |
@app.post("/mosaic_face") | |
async def mosaic_face(file: UploadFile = File(...)): | |
# 画像ファイルを読み込み | |
image_data = await file.read() | |
np_array = np.frombuffer(image_data, np.uint8) | |
img = cv2.imdecode(np_array, cv2.IMREAD_COLOR) | |
# グレースケール変換と顔検出 | |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=4, minSize=(30, 30)) | |
# 検出した顔にモザイクを適用 | |
for (x, y, w, h) in faces: | |
img = apply_mosaic(img, x, y, w, h) | |
# 一時ファイルに保存 | |
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file: | |
temp_file_path = Path(temp_file.name) | |
cv2.imwrite(str(temp_file_path), img) | |
# 一時ファイルをレスポンスとして返す | |
return FileResponse(path=temp_file_path, media_type="image/jpeg", filename="mosaic_image.jpg") | |
''' | |
async def classify_image_llm2(file: UploadFile = File(...), api_key: str = None): | |
image_path = "./temp_image.jpg" | |
# アップロードされた画像を保存 | |
with open(image_path, "wb") as buffer: | |
buffer.write(await file.read()) | |
if not api_key: | |
api_key = os.getenv('GEMINI_API_KEY') | |
danger_level = ObjectDetector(API_KEY=api_key).detect_danger_level(image_path) | |
return {"danger":danger_level} | |
#こっからLLM | |
# LLMを使用して画像を分類するエンドポイント | |
async def classify_image_llm(file: UploadFile = File(...), api_key: str = None): | |
image_path = "./temp_image.jpg" | |
# アップロードされた画像を保存 | |
with open(image_path, "wb") as buffer: | |
buffer.write(await file.read()) | |
if not api_key: | |
api_key = os.getenv('GEMINI_API_KEY') | |
danger_level = ObjectDetector(API_KEY=api_key).detect_danger_level(image_path) | |
return {"danger":danger_level} | |
''' | |
@app.post("/analyze") | |
async def create_mask_sum3( file: UploadFile = File(...), # Next.jsの`file`に対応 | |
scene: str = Form(...), | |
mode: str = Form(...), | |
risk_level: str = Form(None), # `value`を`risk_level`に変更 | |
rect: str = Form(None),): | |
default_x = 0.001 | |
default_y = 0.001 | |
value= int(value) if value.isdigit() else 0 # リスクレベルを整数に変換 | |
# rect JSON文字列をパースして座標を取得 | |
try: | |
# rectが空文字列や "null" の場合を考慮 | |
if rect and rect.strip() and rect.lower() != 'null': | |
rect_data = json.loads(rect) | |
x1 = float(rect_data.get('x1', default_x)) | |
y1 = float(rect_data.get('y1', default_y)) | |
x2 = float(rect_data.get('x2', default_x)) | |
y2 = float(rect_data.get('y2', default_y)) | |
else: | |
# rectが提供されない場合のデフォルト値 | |
x1, y1, x2, y2 = default_x, default_y, default_x, default_y | |
except (json.JSONDecodeError, TypeError, ValueError): | |
# JSONパースエラーや型変換エラーの場合のフォールバック | |
x1, y1, x2, y2 = default_x, default_y, default_x, default_y | |
point1 = [x1, y1] | |
point2 = [x2, y2] | |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
# 一意な識別子を生成 | |
unique_id = uuid.uuid4().hex | |
input_path = save_image(image.file, f"./input_{timestamp}_{unique_id}.jpg") | |
mask_path = llm_to_process_image_simple(value, input_path, point1, point2,thresholds=thresholds) | |
output_path = f"./output_simple_lama_{timestamp}_{unique_id}.jpg" | |
print('point1,point2',point1,point2)#消去したくない範囲のこと | |
# OpenCVでインペイント | |
inpaint_image_with_mask1(input_path, mask_path, output_path) | |
return FileResponse(output_path) | |
''' | |
async def analyze( | |
file: UploadFile = File(...), # Next.jsの`file`に対応 | |
scene: str = Form(...), | |
mode: str = Form(...), | |
risk_level: str = Form(None), # `value`を`risk_level`に変更 | |
rect: str = Form(None), | |
): | |
try: | |
# 必須フィールドのバリデーション | |
if not file or not scene or not mode: | |
raise HTTPException(status_code=400, detail="必須フィールドが不足: file, scene, または mode") | |
# デバッグモード(ダミーレスポンス)が有効な場合 | |
if USE_DUMMY_RESPONSE: | |
print(f"デバッグモード: ダミーレスポンスを使用 (タイプ: {DUMMY_RESPONSE_TYPE})") | |
# テスト用ダミーレスポンスのロジック | |
match DUMMY_RESPONSE_TYPE: | |
case 'fixed': | |
# 固定値のダミーレスポンス(危険度50) | |
dummy_image_path = create_dummy_image(color=(0, 255, 0)) # 緑色のダミー画像 | |
return FileResponse( | |
dummy_image_path, | |
media_type="image/jpeg", | |
headers={"x-danger": "50"} | |
) | |
case 'error': | |
# エラーシナリオのダミーレスポンス(400エラー) | |
raise HTTPException(status_code=400, detail="テスト用エラー: 無効なリクエスト") | |
case 'random' | _: | |
# ランダムな危険度のダミーレスポンス(デフォルト) | |
danger = random.randint(0, 100) | |
dummy_image_path = create_dummy_image(color=(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))) | |
return FileResponse( | |
dummy_image_path, | |
media_type="image/jpeg", | |
headers={"x-danger": str(danger)} | |
) | |
# 通常の処理(ダミー無効の場合) | |
default_x = 0.001 | |
default_y = 0.001 | |
risk_level = int(risk_level) if risk_level and risk_level.isdigit() else 0 # リスクレベルを整数に変換 | |
# rect JSON文字列をパースして座標を取得 | |
try: | |
if rect and rect.strip() and rect.lower() != 'null': | |
rect_data = json.loads(rect) | |
x1 = float(rect_data.get('x1', default_x)) | |
y1 = float(rect_data.get('y1', default_y)) | |
x2 = float(rect_data.get('x2', default_x)) | |
y2 = float(rect_data.get('y2', default_y)) | |
else: | |
x1, y1, x2, y2 = default_x, default_y, default_x, default_y | |
except (json.JSONDecodeError, TypeError, ValueError): | |
x1, y1, x2, y2 = default_x, default_y, default_x, default_y | |
point1 = [x1, y1] | |
point2 = [x2, y2] | |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
unique_id = uuid.uuid4().hex | |
input_path = save_image(file.file, f"input_{timestamp}_{unique_id}.jpg") | |
if mode== 'auto': | |
mask_path,response =await llm_to_process_image_simple_auto(risk_level, input_path, point1, point2,thresholds=thresholds,scene=scene) | |
response = response['risk_level'] # レスポンスから必要な情報を抽出 | |
elif mode == 'fast': | |
mask_path = llm_to_process_image_simple(risk_level, input_path, point1, point2, thresholds=thresholds) | |
response=await classify_image2(file) | |
response = response['danger'] # レスポンスから必要な情報を抽出 | |
else: | |
mask_path = llm_to_process_image_simple(risk_level, input_path, point1, point2, thresholds=thresholds) | |
response=await classify_image_llm2(file) | |
output_path = f"./output_simple_lama_{timestamp}_{unique_id}.jpg" | |
print(f'point1,point2: {point1},{point2}') # 消去したくない範囲 | |
# OpenCVでインペイント | |
inpaint_image_with_mask1(input_path, mask_path, output_path) | |
return FileResponse( | |
output_path, | |
media_type="image/jpeg", | |
headers={"x-danger": str(response)} # 実際の処理では危険度を適切に設定 | |
) | |
except Exception as e: | |
# エラーログを詳細に出力 | |
error_type = type(e).__name__ | |
error_details = str(e) | |
stack_trace = traceback.format_exc() | |
print("="*50) | |
print(f"リクエスト処理エラー発生: {error_type}") | |
print(f"エラー詳細: {error_details}") | |
print(f"--- スタックトレース ---\n{stack_trace}") | |
print("="*50) | |
raise HTTPException(status_code=500, detail=f"内部サーバーエラーが発生しました: {error_type}") | |
async def create_mask_sum2(image: UploadFile = File(...), risk_level: int = Form(...), | |
x1: float = Form(...), | |
y1: float = Form(...), | |
x2: float = Form(...), | |
y2: float = Form(...),): | |
default_x = 0.001 | |
default_y = 0.001 | |
point1 = [default_x if math.isnan(x1) else x1, default_y if math.isnan(y1) else y1] | |
point2 = [default_x if math.isnan(x2) else x2, default_y if math.isnan(y2) else y2] | |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
# 一意な識別子を生成 | |
unique_id = uuid.uuid4().hex | |
input_path = save_image(image.file, f"./input_{timestamp}_{unique_id}.jpg") | |
mask_path = llm_to_process_image_simple(risk_level, input_path, point1, point2,thresholds=thresholds) | |
output_path = f"./output_simple_lama_{timestamp}_{unique_id}.jpg" | |
print('point1,point2',point1,point2)#消去したくない範囲のこと | |
# OpenCVでインペイント | |
inpaint_image_with_mask1(input_path, mask_path, output_path) | |
return FileResponse( | |
output_path, | |
media_type="image/jpeg", | |
headers={"x-danger": str(random.random()*100)} # 実際の処理では危険度を適切に設定 | |
) | |
#三浦と相談 | |
async def create_mask_sum_auto(image: UploadFile = File(...), risk_level: int = Form(...),x1: float = Form(...), | |
y1: float = Form(...), | |
x2: float = Form(...), | |
y2: float = Form(...)): | |
default_x = 0.001 | |
default_y = 0.001 | |
point1 = [default_x if math.isnan(x1) else x1, default_y if math.isnan(y1) else y1] | |
point2 = [default_x if math.isnan(x2) else x2, default_y if math.isnan(y2) else y2] | |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
# 一意な識別子を生成 | |
unique_id = uuid.uuid4().hex | |
input_path = save_image(image.file, f"./input_{timestamp}_{unique_id}.jpg") | |
mask_path,response =await llm_to_process_image_simple_auto(risk_level, input_path, point1, point2,thresholds=thresholds) | |
output_path = f"./output_simple_lama_{timestamp}_{unique_id}.jpg" | |
print('point1,point2',point1,point2)#消去したくない範囲のこと | |
# OpenCVでインペイント | |
inpaint_image_with_mask1(input_path, mask_path, output_path) | |
return FileResponse(output_path) | |
# カスケードファイルの読み込み (顔検出) | |
#face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') | |
def apply_mosaic(image, x, y, w, h, mosaic_level=15): | |
""" 指定範囲にモザイク処理を適用 """ | |
face = image[y:y+h, x:x+w] | |
face = cv2.resize(face, (w // mosaic_level, h // mosaic_level)) | |
face = cv2.resize(face, (w, h), interpolation=cv2.INTER_NEAREST) | |
image[y:y+h, x:x+w] = face | |
return image | |
''' | |
@app.post("/mosaic_face") | |
async def mosaic_face(file: UploadFile = File(...)): | |
# 画像ファイルを読み込み | |
image_data = await file.read() | |
np_array = np.frombuffer(image_data, np.uint8) | |
img = cv2.imdecode(np_array, cv2.IMREAD_COLOR) | |
# グレースケール変換と顔検出 | |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=4, minSize=(30, 30)) | |
# 検出した顔にモザイクを適用 | |
for (x, y, w, h) in faces: | |
img = apply_mosaic(img, x, y, w, h) | |
# 一時ファイルに保存 | |
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file: | |
temp_file_path = Path(temp_file.name) | |
cv2.imwrite(str(temp_file_path), img) | |
# 一時ファイルをレスポンスとして返す | |
return FileResponse(path=temp_file_path, media_type="image/jpeg", filename="mosaic_image.jpg") | |
''' | |
# Helper function to read image file | |
def read_image(file: UploadFile): | |
image = Image.open(BytesIO(file.file.read())) | |
image = np.array(image) | |
if image.shape[2] == 4: # Remove alpha channel if present | |
image = image[:, :, :3] | |
return cv2.cvtColor(image, cv2.COLOR_RGB2BGR) | |
''' | |
# Function to extract face embeddings and bounding boxes from an image | |
def get_face_data(image): | |
# Load InsightFace model | |
model = insightface.app.FaceAnalysis() | |
model.prepare(ctx_id=-1) # Use -1 for CPU, or 0 for GPU | |
faces = model.get(image) | |
if faces: | |
embeddings = [face.embedding for face in faces] | |
bboxes = [face.bbox for face in faces] | |
return embeddings, bboxes, image | |
else: | |
return None, None, image | |
''' | |
# Function to apply mosaic to a specific region in an image | |
def apply_mosaic(image, bbox, mosaic_size=10): | |
x, y, w, h = int(bbox[0]), int(bbox[1]), int(bbox[2] - bbox[0]), int(bbox[3] - bbox[1]) | |
face_region = image[y:y+h, x:x+w] | |
if face_region.size == 0: | |
return image | |
face_region = cv2.resize(face_region, (mosaic_size, mosaic_size), interpolation=cv2.INTER_LINEAR) | |
face_region = cv2.resize(face_region, (w, h), interpolation=cv2.INTER_NEAREST) | |
image[y:y+h, x:x+w] = face_region | |
return image | |
from fastapi import FastAPI, File, UploadFile, HTTPException | |
from io import BytesIO # | |
''' | |
@app.post("/mosaic_faces") | |
async def mosaic_faces(reference_image: UploadFile = File(...), test_image: UploadFile = File(...)): | |
try: | |
# Load images | |
ref_image = read_image(reference_image) | |
test_image_data = read_image(test_image) | |
# Extract face data from reference image | |
reference_embeddings, _, _ = get_face_data(ref_image) | |
if reference_embeddings is None: | |
raise HTTPException(status_code=400, detail="No face detected in reference image.") | |
# Extract face data from test image | |
test_embeddings, test_bboxes, test_image_processed = get_face_data(test_image_data) | |
if test_embeddings is None: | |
raise HTTPException(status_code=400, detail="No face detected in test image.") | |
# Process each detected face in the test image | |
for test_embedding, bbox in zip(test_embeddings, test_bboxes): | |
similarity = np.dot(reference_embeddings[0], test_embedding) / ( | |
np.linalg.norm(reference_embeddings[0]) * np.linalg.norm(test_embedding)) | |
# Apply mosaic to unrecognized faces only | |
if similarity <= 0.4: | |
test_image_processed = apply_mosaic(test_image_processed, bbox) | |
# Save processed image to a temporary file | |
temp_file = "/tmp/processed_image.jpg" | |
cv2.imwrite(temp_file, test_image_processed) | |
return FileResponse(temp_file, media_type="image/jpeg", filename="processed_image.jpg") | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=str(e)) | |
''' | |
async def read_root(request: Request): | |
return templates.TemplateResponse("index.html", {"request": request}) | |