Spaces:
Sleeping
Sleeping
from langchain.schema import AIMessage, HumanMessage | |
import gradio as gr | |
from langchain_community.llms import Ollama | |
def parse_model_names(path): | |
"""Parses the model file to extract value-label pairs for the dropdown.""" | |
choices = [] | |
with open(path, 'r') as file: | |
lines = file.readlines() | |
for line in lines: | |
if '#' in line: | |
value, description = line.split('#', 1) | |
value = value.strip() | |
description = description.strip() | |
choices.append((description, value)) | |
return choices | |
models = parse_model_names("models.txt") | |
def predict(message, history, model): | |
print("Predicting", message, history, models[model][1]), | |
llm = Ollama(model=models[model][1], timeout=1000) # Instantiate Ollama with the selected model | |
history_langchain_format = [] | |
for human, ai in history: | |
history_langchain_format.append(HumanMessage(content=human)) | |
history_langchain_format.append(AIMessage(content=ai)) | |
history_langchain_format.append(HumanMessage(content=message)) | |
try: | |
chat_response = llm.invoke(history_langchain_format) | |
except Exception as e: # Use a general exception handler here | |
chat_response = "Error: " + str(e) | |
return chat_response | |
with gr.Blocks(fill_height=True) as demo: | |
with gr.Row(): | |
model_dropdown = gr.Dropdown(label="Select LLM Model", choices=models, info="Select the model you want to chat with", type="index") | |
# We use a state variable to track the current model | |
model_state = gr.State(value=model_dropdown.value) | |
def update_model(selected_model): | |
print("Model selected", selected_model) | |
model_state.value = selected_model | |
return selected_model | |
chat = gr.ChatInterface(predict, | |
additional_inputs=[ model_dropdown ], | |
) | |
if __name__ == "__main__": | |
demo.launch(server_name="0.0.0.0", server_port=7860) |