File size: 3,692 Bytes
b02596a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0fd593d
b02596a
0fd593d
b02596a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9e07383
 
 
b02596a
 
aab011e
b02596a
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import gradio as gr
import onnxruntime as ort
import numpy as np
from tokenizers import Tokenizer
from huggingface_hub import hf_hub_download

# Configuration parameters
MAX_LEN = 256

# Hardcoded description value
DESCRIPTION_TEXT = (
    "I am raising this case to report a severe and ongoing issue of vermin infestation, "
    "specifically rats and mice, in my residential area. The problem appears to be directly linked "
)

# Define possible choices for each field
status_choices = ["Assess & Assign", "Generate Letter", "Site Inspection"]
category_choices = ["Litter and Nuisance"]
request_reason_choices = ["Nuisance"]
request_sub_reason_choices = ["Animals"]
additional_reason_choices = ["Vermin, Rats and Mice", "Dog", "Cat", "Horse"]
notification_method_choices = ["No Notification", "Email", "Phone"]
inspection_performed_choices = ["Yes", "No"]
letter_sent_choices = ["Yes", "No"]

# Download the ONNX model and tokenizer from Hugging Face Hub using the correct file name "nba.onnx"
onnx_model_path = hf_hub_download(
    repo_id="iimran/Case-Next-Best-Action-Classifier", filename="nba.onnx"
)
tokenizer_path = hf_hub_download(
    repo_id="iimran/Case-Next-Best-Action-Classifier", filename="train_bpe_tokenizer.json"
)

# Load the tokenizer and ONNX model once outside the function for efficiency.
tokenizer = Tokenizer.from_file(tokenizer_path)
session = ort.InferenceSession(onnx_model_path)
input_name = session.get_inputs()[0].name
output_name = session.get_outputs()[0].name

def predict_action(status, category, request_reason, request_sub_reason,
                   additional_reason, notification_method, inspection_performed, letter_sent):
    # Combine fields into one input string.
    fields = [
        status,
        category,
        request_reason,
        request_sub_reason,
        additional_reason,
        notification_method,
        DESCRIPTION_TEXT,
        inspection_performed,
        letter_sent
    ]
    sample_text = " ".join(fields)
    
    # Tokenize and pad the input
    encoding = tokenizer.encode(sample_text)
    ids = encoding.ids[:MAX_LEN]
    padding = [0] * (MAX_LEN - len(ids))
    input_ids = np.array([ids + padding], dtype=np.int64)
    
    # Run inference
    outputs = session.run([output_name], {input_name: input_ids})
    predicted_class = np.argmax(outputs[0], axis=1)[0]
    
    # Map predicted index to the actual action labels
    label_names = [
        "Assign Case Officer",
        "Generate Letter and Send By Post",
        "Generate Letter and Send Email",
        "Generate Letter and Send SMS",
        "Schedule Inspection",
        "Send Feedback Survey"
    ]
    predicted_label = label_names[predicted_class]
    return predicted_label

# Create the Gradio Interface using the updated API.
demo = gr.Interface(
    fn=predict_action,
    inputs=[
        gr.Dropdown(choices=status_choices, label="Status"),
        gr.Dropdown(choices=category_choices, label="Category"),
        gr.Dropdown(choices=request_reason_choices, label="Request Reason"),
        gr.Dropdown(choices=request_sub_reason_choices, label="Request Sub Reason"),
        gr.Dropdown(choices=additional_reason_choices, label="Additional Reason"),
        gr.Dropdown(choices=notification_method_choices, label="Notification Method"),
        gr.Dropdown(choices=inspection_performed_choices, label="Inspection Performed", value="No"),
        gr.Dropdown(choices=letter_sent_choices, label="Letter Sent", value="No")

    ],
    outputs=gr.Textbox(label="Predicted Action"),
    title="Council - Case Next Best Action Predictor",
    description="Select values from the dropdowns. The description field is fixed."
)

demo.launch()