File size: 3,295 Bytes
ee9feac
 
 
 
 
 
 
 
 
 
e8f27bb
 
 
 
 
ee9feac
 
e8f27bb
 
2c3dc94
e8f27bb
 
ee9feac
e8f27bb
 
 
 
2c3dc94
e8f27bb
 
 
54ce4bd
e8f27bb
ee9feac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
examples = [
    "Write an essay about meditation.",
    "Give me 5 steps to clean my room.",
    "How are the continents formed?",
    "Prompt: A man draws a gun in a dark alley and asks for your wallet. You begrudgingly obey. He throws it on the ground, shoots it till it screeches, and turns to you; 'you are safe now'. Write a story about given prompt.",
    "Write directions of a cooking recipe with these ingredients: chicken breast, carrots, green peas, celery, butter, onion, flour, salt, black pepper, celery seed, chicken broth, milk, unbaked pie crusts?",
    "Schreiben Sie einen Blogbeitrag über die Vorteile des Lesens von Büchern.",
]


import gradio as gr 
from transformers import AutoTokenizer, pipeline



# tokenizer = AutoTokenizer.from_pretrained("akoksal/LongForm-OPT-2.7B") 
# generate = pipeline('text-generation', model='akoksal/LongForm-OPT-2.7B', tokenizer=tokenizer)


def predict(instruction, topp, max_length, temperature):
    if "[EOI]" not in instruction:
        instruction = instruction + " [EOI]"
    return instruction
    x = generate(instruction,
        do_sample=True, 
        top_p=topp, 
        num_return_sequences=1,
        max_length=max_length,
        temperature=temperature
    )[0]["generated_text"]
    
    return x[len(instruction):]

def process_example(args):
    for x in predict(args):
        pass
    return x


with gr.Blocks() as demo:
    with gr.Column():
        gr.Markdown(
            """Hello"""
        )
        with gr.Row():
            with gr.Column(scale=3):
                instruction = gr.Textbox(placeholder="Enter your question here", label="Question", elem_id="q-input")
                with gr.Box():
                    gr.Markdown("**Answer**")
                    output = gr.Markdown(elem_id="q-output")
                submit = gr.Button("Generate", variant="primary")
                gr.Examples(
                    examples=examples,
                    inputs=[instruction],
                    cache_examples=False,
                    fn=process_example,
                    outputs=[output],
                )

            with gr.Column(scale=1):
                top_p = gr.Slider(
                    label="Top-p (nucleus sampling)",
                    value=0.90,
                    minimum=0.0,
                    maximum=1,
                    step=0.05,
                    interactive=True,
                    info="Higher values sample low-probability tokens",
                )
                max_length = gr.Slider(
                    label="Max length",
                    value=64,
                    minimum=1,
                    maximum=512,
                    step=4,
                    interactive=True,
                    info="The maximum length of the output",
                )
                temperature = gr.Slider(
                    label="Temperature",
                    value=1.0,
                    minimum=0.0,
                    maximum=2.0,
                    step=0.1,
                    interactive=True,
                    info="Higher values sample more diverse outputs",
                )

    submit.click(predict, inputs=[instruction, top_p, max_length, temperature], outputs=[output])


demo.queue(concurrency_count=4)
demo.launch()