Spaces:
Sleeping
Sleeping
File size: 2,894 Bytes
4a4f086 66f8fc1 4a4f086 861a9e6 95888be 66f8fc1 95888be 861a9e6 95888be 4a4f086 861a9e6 e7a2ae9 40802c2 95888be 40802c2 861a9e6 66f8fc1 e7a2ae9 861a9e6 95888be 861a9e6 95888be 861a9e6 95888be 861a9e6 95888be 861a9e6 95888be e7a2ae9 95888be |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
from langchain.schema import AIMessage, HumanMessage
import gradio as gr
from langchain_community.llms import Ollama
import time
def parse_model_names(path):
"""Parses the model file to extract value-label pairs for the dropdown."""
choices = []
with open(path, 'r') as file:
lines = file.readlines()
for line in lines:
if '#' in line:
value, description = line.split('#', 1)
value = value.strip()
description = description.strip()
choices.append((description, value))
return choices
models = parse_model_names("models.txt")
def predict(message, history, model):
print("Predicting", message, history, model),
llm = Ollama(model=models[model][1], timeout=1000) # Instantiate Ollama with the selected model
history_langchain_format = []
for m in message:
history_langchain_format.append(HumanMessage(content=m[0]))
if m[1] is not None:
history_langchain_format.append(AIMessage(content=m[1]))
try:
chat_response = llm.invoke(history_langchain_format)
except Exception as e: # Use a general exception handler here
chat_response = "Error: " + str(e)
return [(chat_response, )]
# with gr.Blocks(fill_height=True) as demo:
# with gr.Row():
# def update_model(selected_model):
# print("Model selected", selected_model)
# model_state.value = selected_model
# return selected_model
# chat = gr.ChatInterface(predict,
# additional_inputs=[ model_dropdown ],
# )
def print_like_dislike(x: gr.LikeData):
print(x.index, x.value, x.liked)
def add_message(history, message):
for x in message["files"]:
history.append(((x,), None))
if message["text"] is not None:
history.append((message["text"], None))
return history, gr.MultimodalTextbox(value=None, interactive=False)
with gr.Blocks() as demo:
model_dropdown = gr.Dropdown(label="Select LLM Model", choices=models, info="Select the model you want to chat with", type="index")
model_state = gr.State(value=model_dropdown.value)
chatbot = gr.Chatbot(
[],
elem_id="chatbot",
bubble_full_width=False
)
chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
bot_msg = chat_msg.then(predict, [chatbot, chat_input, model_dropdown], chatbot, api_name="bot_response")
bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
chatbot.like(print_like_dislike, None, None)
demo.queue()
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860) |