Spaces:
Running
Running
File size: 3,630 Bytes
55b1acf 1fd46a3 933b48e 1fd46a3 5b14e9a 1fd46a3 5b14e9a 933b48e 1fd46a3 5b14e9a 933b48e 55b1acf 5b14e9a 933b48e 1fd46a3 70ebbdf 933b48e 5b14e9a 1fd46a3 933b48e 5b14e9a 1fd46a3 221ee1e 933b48e 1fd46a3 765e2ba 82ebcef 1fd46a3 765e2ba 1fd46a3 82ebcef 1fd46a3 765e2ba 55b1acf 1fd46a3 221ee1e 1fd46a3 82ebcef 1fd46a3 933b48e 1fd46a3 933b48e 1fd46a3 82ebcef 56598b0 221ee1e 1fd46a3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
#srlsy bruh... checkin the code??
import gradio as gr
import torch
import torch.nn.functional as F
from torchvision import transforms
from PIL import Image
from transformers import ViTForImageClassification, ViTImageProcessor
processor = ViTImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
human_model = ViTForImageClassification.from_pretrained(
"google/vit-base-patch16-224-in21k", num_labels=2
)
human_model.load_state_dict(torch.load("humanNsfw_Swf.pth", map_location="cpu"))
human_model.eval()
anime_model = ViTForImageClassification.from_pretrained(
"google/vit-base-patch16-224-in21k", num_labels=2
)
anime_model.load_state_dict(torch.load("animeCartoonNsfw_Sfw.pth", map_location="cpu"))
anime_model.eval()
def preprocess(image: Image.Image):
inputs = processor(images=image, return_tensors="pt")
return inputs["pixel_values"]
def predict(image, model_type):
if image is None:
return "<div class='result-box'>pls upload an img...</div>"
inputs = preprocess(image)
model = human_model if model_type == "Human" else anime_model
with torch.no_grad():
outputs = model(pixel_values=inputs)
logits = outputs.logits
probs = F.softmax(logits, dim=1)
pred_class = torch.argmax(probs, dim=1).item()
confidence = probs[0][pred_class].item()
label = "NSFW" if pred_class == 0 else "SFW"
return f"""
<div class='result-box'>
<strong>Model:</strong> {model_type}<br>
<strong>Prediction:</strong> {label}<br>
<strong>Confidence:</strong> {confidence:.2%}
</div>
"""
custom_css = """
.result-box {
position: relative;
background-color: black;
padding: 20px;
border-radius: 12px;
color: white;
font-size: 1.2rem;
text-align: center;
font-weight: bold;
width: 100%;
z-index: 1;
overflow: hidden;
box-shadow: 0 0 15px oklch(0.718 0.202 349.761);
}
.result-box::before {
content: "";
position: absolute;
top: -4px;
left: -4px;
right: -4px;
bottom: -4px;
background: conic-gradient(from 0deg, oklch(0.718 0.202 349.761), transparent 40%, oklch(0.718 0.202 349.761));
border-radius: 16px;
animation: spin 3s linear infinite;
z-index: -1;
filter: blur(8px);
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
.disclaimer {
color: white;
font-size: 0.9rem;
text-align: center;
margin-top: 40px;
text-shadow: 0 0 10px oklch(0.718 0.202 349.761);
}
.gradio-container {
max-width: 900px;
margin: auto;
}
"""
# ui
with gr.Blocks(css=custom_css) as demo:
gr.Markdown("## NSFW Detector (Human and Anime/Cartoon)")
gr.Markdown(
"Upload an img and select the appropriate model. The system will detect whether the content is NSFW or SFW."
)
with gr.Row():
with gr.Column(scale=1):
model_choice = gr.Radio(["Human", "Anime"], label="Select Model Type", value="Human")
image_input = gr.Image(type="pil", label="Upload Image")
with gr.Column(scale=1):
output_box = gr.HTML("<div class='result-box'>Awaiting input...</div>")
image_input.change(fn=predict, inputs=[image_input, model_choice], outputs=output_box)
model_choice.change(fn=predict, inputs=[image_input, model_choice], outputs=output_box)
# Disclaimer with glow
gr.Markdown(
"<div class='disclaimer'>This is a side project. Results are not guaranteed. No images are stored.For more info, pls check readme file</div>"
)
if __name__ == "__main__":
demo.launch()
|