|
|
|
import os |
|
import io |
|
import subprocess |
|
from pathlib import Path |
|
from datetime import datetime |
|
import gradio as gr |
|
from PIL import Image |
|
import piexif |
|
import tempfile |
|
import base64 |
|
import requests |
|
|
|
|
|
import torch |
|
from torchvision import transforms |
|
from PIL import Image |
|
|
|
|
|
|
|
TINEYE_API_KEY = os.environ.get("TINEYE_API_KEY","") |
|
BING_API_KEY = os.environ.get("BING_API_KEY","") |
|
|
|
HF_AI_MODEL = "Dafilab/ai-image-detector" |
|
IMG_SIZE = 380 |
|
|
|
|
|
def save_bytes_to_file(b, path): |
|
with open(path, "wb") as f: |
|
f.write(b) |
|
|
|
def extract_exif(image_bytes): |
|
try: |
|
exif_dict = piexif.load(image_bytes) |
|
|
|
res = {} |
|
for ifd in exif_dict: |
|
if not exif_dict[ifd]: |
|
continue |
|
res[ifd] = {} |
|
for tag, val in exif_dict[ifd].items(): |
|
name = piexif.TAGS[ifd].get(tag, {"name": str(tag)})["name"] |
|
res[ifd][name] = val |
|
return res |
|
except Exception as e: |
|
return {"error": str(e)} |
|
|
|
|
|
def load_ai_model(): |
|
try: |
|
from transformers import ViTImageProcessor, ViTForImageClassification |
|
processor = ViTImageProcessor.from_pretrained(HF_AI_MODEL) |
|
model = ViTForImageClassification.from_pretrained(HF_AI_MODEL) |
|
model.eval() |
|
return processor, model |
|
except Exception as e: |
|
print("Could not load HF model:", e) |
|
return None, None |
|
|
|
processor, hf_model = load_ai_model() |
|
|
|
def detect_ai_image(pil_image): |
|
if processor is None or hf_model is None: |
|
return {"status":"model_not_loaded"} |
|
inputs = processor(images=pil_image, return_tensors="pt") |
|
with torch.no_grad(): |
|
outputs = hf_model(**inputs) |
|
probs = torch.nn.functional.softmax(outputs.logits, dim=1)[0].tolist() |
|
|
|
labels = hf_model.config.id2label if hasattr(hf_model.config, "id2label") else {0:"REAL",1:"FAKE"} |
|
top_idx = max(range(len(probs)), key=lambda i:probs[i]) |
|
return {"label": labels.get(top_idx, str(top_idx)), "confidence": float(probs[top_idx])} |
|
|
|
|
|
def extract_keyframes_from_video(video_path, max_frames=5): |
|
out_dir = tempfile.mkdtemp() |
|
|
|
|
|
try: |
|
cmd = [ |
|
"ffprobe", "-v", "error", "-select_streams", "v:0", |
|
"-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", |
|
video_path |
|
] |
|
proc = subprocess.run(cmd, capture_output=True, text=True, timeout=10) |
|
duration = float(proc.stdout.strip() or 0.0) |
|
except Exception: |
|
duration = 0 |
|
frames = [] |
|
if duration <= 0: |
|
|
|
timestamps = [0, 1, 2, 3, 4][:max_frames] |
|
else: |
|
step = max(1, duration / max_frames) |
|
timestamps = [i*step for i in range(max_frames)] |
|
for i, t in enumerate(timestamps): |
|
out_path = os.path.join(out_dir, f"frame_{i}.jpg") |
|
cmd = ["ffmpeg", "-ss", str(t), "-i", video_path, "-frames:v", "1", "-q:v", "2", out_path, "-y"] |
|
try: |
|
subprocess.run(cmd, capture_output=True, timeout=15) |
|
if os.path.exists(out_path): |
|
frames.append(out_path) |
|
except Exception: |
|
continue |
|
return frames |
|
|
|
|
|
def build_reverse_search_links_for_file(file_url=None, local_file_path=None): |
|
""" |
|
If file_url is provided (public URL), build direct links that open reverse image search with that URL. |
|
If not, we will upload file temporarily to imgur anonymous (optional) or provide download blob. |
|
Here we will prefer to return search-by-upload pages. |
|
""" |
|
links = {} |
|
if file_url: |
|
|
|
links['Google'] = f"https://www.google.com/searchbyimage?image_url={file_url}" |
|
links['Yandex'] = f"https://yandex.com/images/search?rpt=imageview&url={file_url}" |
|
links['TinEye'] = f"https://tineye.com/search?url={file_url}" |
|
links['Bing'] = f"https://www.bing.com/images/search?q=imgurl:{file_url}&view=detailv2" |
|
else: |
|
|
|
links['Google_upload'] = "https://images.google.com/ (use camera icon → upload image)" |
|
links['TinEye_upload'] = "https://tineye.com/ (upload image)" |
|
links['Yandex_upload'] = "https://yandex.com/images/ (upload image)" |
|
links['Bing_upload'] = "https://www.bing.com/images (click camera)" |
|
return links |
|
|
|
|
|
def detect_and_crop_faces(pil_image): |
|
try: |
|
import face_recognition |
|
except Exception as e: |
|
return {"error":"face_recognition_not_installed_or_failed"} |
|
img = pil_image.convert("RGB") |
|
arr = face_recognition.api.load_image_file(io.BytesIO()) |
|
|
|
np_img = face_recognition.api.load_image_file(io.BytesIO(img.tobytes())) if False else None |
|
|
|
import numpy as np |
|
np_img = np.array(img) |
|
locs = face_recognition.face_locations(np_img) |
|
faces = [] |
|
for i, (top,right,bottom,left) in enumerate(locs): |
|
crop = img.crop((left, top, right, bottom)) |
|
b = io.BytesIO() |
|
crop.save(b, format="JPEG") |
|
faces.append({'index':i, 'image_bytes': b.getvalue()}) |
|
return faces |
|
|
|
|
|
def process_upload(file): |
|
|
|
fname = file.name |
|
b = file.read() |
|
out = {"filename": fname} |
|
|
|
try: |
|
pil = Image.open(io.BytesIO(b)) |
|
out['type'] = "image" |
|
|
|
try: |
|
exif = extract_exif(b) |
|
out['exif'] = exif |
|
except Exception as e: |
|
out['exif'] = {"error": str(e)} |
|
|
|
try: |
|
ai_res = detect_ai_image(pil) |
|
out['ai_detection'] = ai_res |
|
except Exception as e: |
|
out['ai_detection'] = {"error":str(e)} |
|
|
|
out['reverse_links'] = build_reverse_search_links_for_file() |
|
|
|
buf = io.BytesIO() |
|
pil.thumbnail((800,800)) |
|
pil.save(buf, format="JPEG") |
|
preview_b64 = base64.b64encode(buf.getvalue()).decode() |
|
out['preview_base64'] = preview_b64 |
|
return out |
|
except Exception: |
|
|
|
tv = tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(fname)[1]) |
|
tv.write(b) |
|
tv.flush() |
|
frames = extract_keyframes_from_video(tv.name, max_frames=5) |
|
out['type'] = "video" |
|
out['keyframes'] = [] |
|
for path in frames: |
|
with open(path,"rb") as f: |
|
out['keyframes'].append(base64.b64encode(f.read()).decode()) |
|
out['reverse_links'] = build_reverse_search_links_for_file() |
|
return out |
|
|
|
|
|
css = """ |
|
.gradio-container { max-width: 1100px; margin: auto; } |
|
""" |
|
|
|
with gr.Blocks(css=css) as demo: |
|
gr.Markdown("## أداة تحقق صور/فيديو مبسطة (صحفيين)\n- ارفع صورة أو فيديو\n- سيعرض EXIF، كشف إذا كانت الصورة مولدة بالـAI (موديل HF إن تم تحميله)، ويفصل keyframes من الفيديو\n- يقدّم روابط سريعة للبحث العكسي (افتحها لتفقد أول ظهور على الويب)\n") |
|
with gr.Row(): |
|
inp = gr.File(label="رفع صورة أو فيديو (JPEG/PNG/MP4...)") |
|
btn = gr.Button("تحقق") |
|
out_json = gr.JSON(label="نتيجة الفحص (JSON)") |
|
preview = gr.Image(label="معاينة / keyframes", interactive=False) |
|
btn.click(process_upload, inputs=inp, outputs=out_json) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|