File size: 4,777 Bytes
b6475e6
7d423f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b6475e6
7d423f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b6475e6
7d423f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b6475e6
7d423f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
import streamlit as st
from langchain_core.messages import HumanMessage, AIMessage

def get_llm(provider, config):
    """Initialize the selected LLM with configuration"""
    try:
        if provider == "OpenAI":
            from langchain_openai import ChatOpenAI
            return ChatOpenAI(
                api_key=config.get("api_key"),
                model=config.get("model_name", "gpt-3.5-turbo")
            )
        elif provider == "Anthropic":
            from langchain_anthropic import ChatAnthropic
            return ChatAnthropic(
                api_key=config.get("api_key"),
                model=config.get("model_name", "claude-3-sonnet-20240229")
            )
        elif provider == "Gemini":
            from langchain_google_genai import ChatGoogleGenerativeAI
            return ChatGoogleGenerativeAI(
                google_api_key=config.get("api_key"),
                model=config.get("model_name", "gemini-pro")
            )
        elif provider == "DeepSeek":
            from langchain_openai import ChatOpenAI
            return ChatOpenAI(
                api_key=config.get("api_key"),
                base_url=config.get("base_url", "https://api.deepseek.com/v1"),
                model=config.get("model_name", "deepseek-chat")
            )
        elif provider == "Ollama":
            from langchain_community.chat_models import ChatOllama
            return ChatOllama(
                base_url=config.get("base_url", "http://localhost:11434"),
                model=config.get("model_name", "llama2")
            )
        else:
            raise ValueError("Selected provider is not supported")
    except ImportError as e:
        st.error(f"Missing required package: {e}")
        return None

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Sidebar configuration
with st.sidebar:
    st.title("⚙️ LLM Configuration")
    provider = st.selectbox(
        "Select Provider",
        ["OpenAI", "Anthropic", "Gemini", "DeepSeek", "Ollama"]
    )

    config = {}
    if provider in ["OpenAI", "Anthropic", "Gemini", "DeepSeek"]:
        config["api_key"] = st.text_input(
            f"{provider} API Key", 
            type="password",
            help=f"Get your API key from {provider}'s platform"
        )
        if provider == "DeepSeek":
            config["base_url"] = st.text_input(
                "API Base URL",
                "https://api.deepseek.com/v1"
            )
        
        # Model name input with provider-specific defaults
        default_models = {
            "OpenAI": "gpt-3.5-turbo",
            "Anthropic": "claude-3-sonnet-20240229",
            "Gemini": "gemini-pro",
            "DeepSeek": "deepseek-chat"
        }
        config["model_name"] = st.text_input(
            "Model Name", 
            value=default_models.get(provider, "")
        )
    elif provider == "Ollama":
        config["model_name"] = st.text_input(
            "Model Name", 
            value="llama2",
            help="Make sure the model is available in your Ollama instance"
        )
        config["base_url"] = st.text_input(
            "Ollama Base URL",
            "http://localhost:11434",
            help="URL where your Ollama server is running"
        )

# Main chat interface
st.title("💬 LLM Chat Interface")

# Display chat messages
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# Handle user input
if prompt := st.chat_input("Type your message..."):
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})
    
    # Display user message
    with st.chat_message("user"):
        st.markdown(prompt)

    # Generate response
    with st.spinner("Thinking..."):
        try:
            llm = get_llm(provider, config)
            if llm is None:
                st.error("Failed to initialize LLM. Check configuration.")
                st.stop()

            # Convert messages to LangChain format
            lc_messages = [
                HumanMessage(content=msg["content"]) if msg["role"] == "user"
                else AIMessage(content=msg["content"])
                for msg in st.session_state.messages
            ]

            # Get LLM response
            response = llm.invoke(lc_messages)
            
            # Display and store assistant response
            with st.chat_message("assistant"):
                st.markdown(response.content)
            st.session_state.messages.append(
                {"role": "assistant", "content": response.content}
            )
        except Exception as e:
            st.error(f"Error generating response: {str(e)}")