File size: 2,775 Bytes
02fc79d
 
7e0679d
933893f
02fc79d
933893f
 
 
02fc79d
933893f
 
 
 
d96f124
 
 
 
 
933893f
 
6e7b9cd
 
02fc79d
19b3bf4
 
 
 
 
7e0679d
19b3bf4
c364451
19b3bf4
d96f124
19b3bf4
 
02fc79d
19b3bf4
 
 
 
 
 
 
 
 
 
 
f2aad38
 
 
 
 
19b3bf4
 
2cb3e7e
19b3bf4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import gradio as gr
from gpt4all import GPT4All
from urllib.request import urlopen
import json

url = "https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-chat/metadata/models3.json"
response = urlopen(url)
data_json = json.loads(response.read())

def model_choices():
    model_list = [data_json[i]['filename'] for i in range(len(data_json))]
    return model_list

model_description = {model['filename']: model['description'] for model in data_json}

def llm_intro(selected_model):
    return llm_explanations.get(selected_model, "No description available for this model selection.")

def generate_text(input_text, selected_model):
    model = GPT4All(selected_model)
    output = model.generate(input_text, max_tokens=100)
    return output

# model_dropdown = gr.Dropdown(choices=model_choices(), 
#                              multiselect=False, 
#                              label="LLMs to choose from", 
#                              type="value", 
#                              value="orca-mini-3b-gguf2-q4_0.gguf")

# explanation = gr.Textbox(label="Model Description", interactive=True, lines=10)

# model_dropdown.change(fn=llm_intro, inputs=model_dropdown, outputs=explanation)

# input_text = gr.Textbox(lines=5, label="Input Text")
# output_text = gr.Textbox(lines=5, label="Generated Text")

# gr.Interface(fn=generate_text, 
#              inputs=[input_text, model_dropdown], 
#              outputs=output_text, 
#              theme = gr.themes.Soft(), 
#              analytics_enabled=True, 
#              title="GPT4All Text Generation Experiment").launch()

with gr.Blocks() as demo:
    gr.Markdown("## GPT4All Text Generation Experiment")
    
    # Dropdown to select model
    model_selection = gr.Dropdown(choices=model_choices(), 
                                 multiselect=False, 
                                 label="LLMs to choose from", 
                                 type="value", 
                                 value="orca-mini-3b-gguf2-q4_0.gguf")
    
    # Textbox to show model description
    explanation = gr.Textbox(label="Model Description", lines=10, interactive=False, value="Model description displayed here")
    
    # Link the dropdown with the textbox to update the description based on the selected model
    model_selection.change(fn=llm_intro, inputs=model_selection, outputs=explanation)
    
    # Input text for generating text
    input_text = gr.Textbox(lines=10, label="Input Text")
    
    # Textbox to display generated text
    output_text = gr.Textbox(lines=10, label="Generated Text")
    
    # Button to generate text
    generate_btn = gr.Button("Generate")
    
    generate_btn.click(fn=generate_text, inputs=[input_text, model_selection], outputs=output_text)

demo.launch()