File size: 1,515 Bytes
b87d52c
 
0c97a7e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9d20c0e
5fe3cd7
 
5e3ae64
 
 
0c97a7e
 
 
 
 
 
b87d52c
 
 
 
0c97a7e
 
b87d52c
5e3ae64
b87d52c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import gradio

from transformers import pipeline
classifier = pipeline("zero-shot-classification",
                      model="facebook/bart-large-mnli")

# sequence_to_classify = "one day I will see the world"
# candidate_labels = ['travel', 'cooking', 'dancing']
# CATEGORIES = ['doc_type.jur', 'doc_type.Spec', 'doc_type.ZDF', 'doc_type.Publ',
#        'doc_type.Scheme', 'content_type.Alt', 'content_type.Krypto',
#        'content_type.Karte', 'content_type.Banking', 'content_type.Reg',
#        'content_type.Konto']
categories = [
    "Legal", "Specification", "Facts and Figures",
    "Publication", "Payment Scheme",
    "Alternative Payment Systems", "Crypto Payments",
    "Card Payments", "Banking", "Regulations", "Account Payments"
]

def clf_text(txt: str):
    res = classifier(txt, categories, multi_label=True)
    items = sorted(zip(res["labels"], res["scores"]), key=lambda tpl: tpl[1])
    # d = dict(zip(res["labels"], res["scores"]))
    # output = [f"{lbl}:\t{score}" for lbl, score in items]
    # return "\n".join(output)
    return dict(items)
# classifier(sequence_to_classify, candidate_labels)
#{'labels': ['travel', 'dancing', 'cooking'],
# 'scores': [0.9938651323318481, 0.0032737774308770895, 0.002861034357920289],
# 'sequence': 'one day I will see the world'}


def my_inference_function(name):
  return "Hello " + name + "!"

gradio_interface = gradio.Interface(
  # fn = my_inference_function,
  fn = clf_text,
  inputs = "text",
  outputs = "json"
)
gradio_interface.launch()