File size: 2,013 Bytes
4a4f086
66f8fc1
4a4f086
66f8fc1
95888be
 
 
 
 
 
 
 
 
 
 
 
66f8fc1
95888be
 
 
 
 
 
4a4f086
 
 
 
 
e7a2ae9
40802c2
95888be
40802c2
e7a2ae9
40802c2
66f8fc1
e7a2ae9
 
95888be
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e7a2ae9
95888be
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
from langchain.schema import AIMessage, HumanMessage
import gradio as gr
from langchain_community.llms import Ollama

def parse_model_names(path):
    """Parses the model file to extract value-label pairs for the dropdown."""
    choices = []
    with open(path, 'r') as file:
        lines = file.readlines()
        for line in lines:
            if '#' in line:
                value, description = line.split('#', 1)
                value = value.strip()
                description = description.strip()
                choices.append((description, value))
    return choices

models = parse_model_names("models.txt")


def predict(message, history, model):
    print("Predicting", message, history, models[model][1]), 
    llm = Ollama(model=models[model][1], timeout=1000)  # Instantiate Ollama with the selected model
    history_langchain_format = []
    for human, ai in history:
        history_langchain_format.append(HumanMessage(content=human))
        history_langchain_format.append(AIMessage(content=ai))
    history_langchain_format.append(HumanMessage(content=message))
    try:
        chat_response = llm.invoke(history_langchain_format)
    except Exception as e:  # Use a general exception handler here
        chat_response = "Error: " + str(e)
        
    return chat_response



with gr.Blocks(fill_height=True) as demo:
    with gr.Row():
        model_dropdown = gr.Dropdown(label="Select LLM Model", choices=models, info="Select the model you want to chat with", type="index")

        # We use a state variable to track the current model
        model_state = gr.State(value=model_dropdown.value)

        def update_model(selected_model):
            print("Model selected", selected_model)
            model_state.value = selected_model
            return selected_model
        
        
    chat = gr.ChatInterface(predict,
        additional_inputs=[ model_dropdown ],
        
    )
        

if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0", server_port=7860)