Update app.py
Browse files
app.py
CHANGED
|
@@ -11,6 +11,11 @@ def model_choices():
|
|
| 11 |
model_list = [data_json[i]['filename'] for i in range(len(data_json))]
|
| 12 |
return model_list
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
def generate_text(input_text, selected_model):
|
| 15 |
model = GPT4All(selected_model)
|
| 16 |
output = model.generate(input_text, max_tokens=100)
|
|
@@ -22,6 +27,8 @@ model_dropdown = gr.Dropdown(choices=model_choices(),
|
|
| 22 |
type="value",
|
| 23 |
value="orca-mini-3b-gguf2-q4_0.gguf")
|
| 24 |
|
|
|
|
|
|
|
| 25 |
input_text = gr.Textbox(lines=5, label="Input Text")
|
| 26 |
output_text = gr.Textbox(lines=5, label="Generated Text")
|
| 27 |
|
|
|
|
| 11 |
model_list = [data_json[i]['filename'] for i in range(len(data_json))]
|
| 12 |
return model_list
|
| 13 |
|
| 14 |
+
model_description = {model['filename']: model['description'] for model in data_json}
|
| 15 |
+
|
| 16 |
+
def llm_intro(selected_model):
|
| 17 |
+
return llm_explanations.get(selected_model, "No description available for this model selection.")
|
| 18 |
+
|
| 19 |
def generate_text(input_text, selected_model):
|
| 20 |
model = GPT4All(selected_model)
|
| 21 |
output = model.generate(input_text, max_tokens=100)
|
|
|
|
| 27 |
type="value",
|
| 28 |
value="orca-mini-3b-gguf2-q4_0.gguf")
|
| 29 |
|
| 30 |
+
explanation = gr.Textbox(label="Model Description", interactive=True, lines=10).change(fn=llm_intro, inputs=[model_dropdown], outputs=[explanation])
|
| 31 |
+
|
| 32 |
input_text = gr.Textbox(lines=5, label="Input Text")
|
| 33 |
output_text = gr.Textbox(lines=5, label="Generated Text")
|
| 34 |
|