File size: 4,823 Bytes
02fc79d 7e0679d 933893f cfb47bd 02fc79d 933893f 02fc79d 933893f d96f124 ea90c92 d96f124 8d5d779 d96f124 ea90c92 98cb8e3 ea90c92 933893f ea90c92 6e7b9cd 02fc79d 19b3bf4 7e0679d 19b3bf4 c364451 19b3bf4 d96f124 19b3bf4 02fc79d 19b3bf4 98cb8e3 a6327bc 98cb8e3 a6327bc 19b3bf4 98cb8e3 19b3bf4 98cb8e3 19b3bf4 98cb8e3 f56905f 98cb8e3 a9c5062 98cb8e3 a6327bc 98cb8e3 19b3bf4 98cb8e3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import gradio as gr
from gpt4all import GPT4All
from urllib.request import urlopen
import json
import time
url = "https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-chat/metadata/models3.json"
response = urlopen(url)
data_json = json.loads(response.read())
def model_choices():
model_list = [data_json[i]['filename'] for i in range(len(data_json))]
return model_list
model_description = {model['filename']: model['description'] for model in data_json}
def llm_intro(selected_model):
return model_description.get(selected_model, "No description available for this model selection.")
model_cache = {} # Global cache
def load_model(model_name):
"""
This function checks the cache before loading a model.
If the model is cached, it returns the cached version.
Otherwise, it loads the model, caches it, and then returns it.
"""
if model_name not in model_cache:
# Load the model. This is a placeholder; replace with actual model loading logic.
model = GPT4All(model_name) # Placeholder function
model_cache[model_name] = model
return model_cache[model_name]
def generate_text(input_text, selected_model):
"""
Generate text using the selected model.
This function now uses the caching mechanism to load models.
"""
model = load_model_with_cache(selected_model)
output = model.generate(input_text, max_tokens=100)
return output
# model_dropdown = gr.Dropdown(choices=model_choices(),
# multiselect=False,
# label="LLMs to choose from",
# type="value",
# value="orca-mini-3b-gguf2-q4_0.gguf")
# explanation = gr.Textbox(label="Model Description", interactive=True, lines=10)
# model_dropdown.change(fn=llm_intro, inputs=model_dropdown, outputs=explanation)
# input_text = gr.Textbox(lines=5, label="Input Text")
# output_text = gr.Textbox(lines=5, label="Generated Text")
# gr.Interface(fn=generate_text,
# inputs=[input_text, model_dropdown],
# outputs=output_text,
# theme = gr.themes.Soft(),
# analytics_enabled=True,
# title="GPT4All Text Generation Experiment").launch()
# with gr.Blocks() as demo:
# gr.Markdown("## GPT4All Text Generation Experiment")
# with gr.Row():
# model_selection = gr.Dropdown(choices=model_choices(),
# multiselect=False,
# label="LLMs to choose from",
# type="value",
# value="orca-mini-3b-gguf2-q4_0.gguf")
# explanation = gr.Textbox(label="Model Description", lines=3, interactive=False, value=llm_intro("orca-mini-3b-gguf2-q4_0.gguf"))
# # Link the dropdown with the textbox to update the description based on the selected model
# model_selection.change(fn=llm_intro, inputs=model_selection, outputs=explanation)
# chatbot = gr.Chatbot()
# input_text = gr.Textbox(lines=3, label="Press shift+Enter to submit")
# # output_text = gr.Textbox(lines=10, label="Generated Text")
# clear = gr.ClearButton([input_text, chatbot])
# def respond(message, chat_history, selected_model):
# bot_message = generate_text(message, selected_model)
# chat_history.append((message, bot_message))
# time.sleep(2) # Simulating processing delay if necessary
# return bot_message, chat_history
# input_text.submit(respond, [input_text, chatbot, model_selection], [chatbot])
# # # Button to generate text
# # generate_btn = gr.Button("Generate")
# # generate_btn.click(fn=generate_text, inputs=[input_text, model_selection], outputs=output_text)
# demo.launch()
# Define the chatbot function
def chatbot(model_name, message, chat_history):
model = load_model(model_name)
response = model.generate(message, chat_history)
chat_history.append((message, response))
return chat_history, response
# Create the Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# GPT4All Chatbot")
with gr.Row():
with gr.Column(scale=1):
model_dropdown = gr.Dropdown(
choices=model_choices(),
multiselect=False,
type="value",
value="orca-mini-3b-gguf2-q4_0.gguf",
label="LLMs to choose from"
)
with gr.Column(scale=4):
chatbot = gr.Chatbot(label="Conversation")
message = gr.Textbox(label="Message")
state = gr.State()
message.submit(chatbot, inputs=[model_dropdown, message, state], outputs=[chatbot, state])
# Launch the Gradio app
demo.launch() |