Rerandaka commited on
Commit
aaf4f58
·
verified ·
1 Parent(s): 62bc821

initial commit

Browse files
Files changed (2) hide show
  1. app.py +43 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
+ import torch
4
+
5
+ # Load model and tokenizer
6
+ model_id = "Rerandaka/child-safety-01"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
8
+ model = AutoModelForSequenceClassification.from_pretrained(model_id)
9
+
10
+ # Class mapping (optional — edit as needed)
11
+ label_map = {
12
+ 0: "Safe / Normal",
13
+ 1: "Inappropriate / Unsafe"
14
+ }
15
+
16
+ # Inference function
17
+ def classify_text(text: str):
18
+ inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=256)
19
+ with torch.no_grad():
20
+ outputs = model(**inputs)
21
+ probs = torch.nn.functional.softmax(outputs.logits, dim=1)
22
+ predicted = torch.argmax(probs, dim=1).item()
23
+ confidence = probs[0][predicted].item()
24
+ return {
25
+ "label": label_map.get(predicted, str(predicted)),
26
+ "confidence": round(confidence, 4)
27
+ }
28
+
29
+ # Define Gradio Interface
30
+ demo = gr.Interface(
31
+ fn=classify_text,
32
+ inputs=gr.Textbox(label="Enter text to classify"),
33
+ outputs=[
34
+ gr.Textbox(label="Predicted Label"),
35
+ gr.Textbox(label="Confidence")
36
+ ],
37
+ title="Child-Safety Text Classifier",
38
+ description="This model detects if text content is unsafe or inappropriate for children.",
39
+ allow_flagging="never"
40
+ )
41
+
42
+ # Expose API endpoint explicitly
43
+ demo.launch(api_name="predict")
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ transformers
2
+ torch
3
+ gradio