File size: 2,880 Bytes
3d730c5
 
0b476b9
 
 
 
3d730c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b476b9
 
 
 
 
 
 
 
 
3d730c5
 
 
 
 
 
 
e8e470a
 
3d730c5
 
 
 
e8e470a
 
 
 
 
 
 
 
 
 
 
 
 
 
3d730c5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import gradio as gr
import os
os.system("pip install huggingface_hub")

from huggingface_hub import space_info

from predict import *

from transformers import T5ForConditionalGeneration
from transformers import T5TokenizerFast as T5Tokenizer
import pandas as pd
model = "svjack/comet-atomic-en"
device = "cpu"
#device = "cuda:0"
tokenizer = T5Tokenizer.from_pretrained(model)
model = T5ForConditionalGeneration.from_pretrained(model).to(device).eval()

NEED_PREFIX = 'What are the necessary preconditions for the next event?'
EFFECT_PREFIX = 'What could happen after the next event?'
INTENT_PREFIX = 'What is the motivation for the next event?'
REACT_PREFIX = 'What are your feelings after the following event?'

obj = Obj(model, tokenizer, device)

'''
text0 = "X吃到了一顿大餐。"
text1 = "X和Y一起搭了个积木。"
'''
text0 = "X had a big meal."
text1 = "X invites Y to a party."

example_sample = [
    [text0, False],
    [text1, False],
]

def demo_func(event, do_sample):
    #event = "X吃到了一顿大餐。"
    times = 1
    df = pd.DataFrame(
    pd.Series(
        [NEED_PREFIX, EFFECT_PREFIX, INTENT_PREFIX, REACT_PREFIX]
    ).map(
        lambda x: (x, [obj.predict(
            "{}{}".format(x, event), do_sample = do_sample
        )[0] for _ in range(times)][0])
    ).values.tolist()
    )
    df.columns = ["PREFIX", "PRED"]
    l = df.apply(lambda x: x.to_dict(), axis = 1).values.tolist()
    return {
        "Output": l
    }

markdown_exp_size = "##"
lora_repo = "svjack/chatglm3-few-shot"
lora_repo_link = "svjack/chatglm3-few-shot/?input_list_index=6"
emoji_info = space_info(lora_repo).__dict__["cardData"]["emoji"]
space_cnt = 1
task_name = "[---English Comet Atomic---]"
description = f"{markdown_exp_size} {task_name} few shot prompt in ChatGLM3 Few Shot space repo (click submit to activate) : [{lora_repo_link}](https://huggingface.co/spaces/{lora_repo_link}) {emoji_info}"


demo = gr.Interface(
        fn=demo_func,
        inputs=[gr.Text(label = "Event"),
                gr.Checkbox(label="do sample"),
        ],
        outputs="json",
        title=f"English Comet Atomic 🦅 demonstration",
        description = 'This _example_ was **drive** from <br/><b><h4>[https://github.com/svjack/COMET-ATOMIC-En-Zh](https://github.com/svjack/COMET-ATOMIC-En-Zh)</h4></b>\n',
        #description = description,
        examples=example_sample if example_sample else None,
        cache_examples = False
    )

with demo:
    gr.HTML(
    '''
                <div style="justify-content: center; display: flex;">
                <iframe
                src="https://svjack-chatglm3-few-shot-demo.hf.space/?input_list_index=6"
                frameborder="0"
                width="1400"
                height="768"
                ></iframe>
                </div>
    '''
    )

demo.launch(server_name=None, server_port=None)