Sebbe33 commited on
Commit
7d423f2
·
verified ·
1 Parent(s): d4d4335

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +130 -137
app.py CHANGED
@@ -1,141 +1,134 @@
1
  import streamlit as st
2
- from langchain.chat_models import ChatOpenAI
3
- from langchain.schema import AIMessage, HumanMessage
4
-
5
- # (Optional) If you're using Anthropic
6
- # from langchain.chat_models import ChatAnthropic
7
-
8
- # Placeholder functions for other LLMs (DeepSeek, Gemini, Ollama, etc.)
9
- # Implement or import your own logic here.
10
- def get_deepseek_llm(api_key: str):
11
- """
12
- TODO: Implement your DeepSeek integration.
13
- """
14
- # return your DeepSeek LLM client
15
- pass
16
-
17
- def get_gemini_llm(api_key: str):
18
- """
19
- TODO: Implement your Gemini integration.
20
- """
21
- # return your Gemini LLM client
22
- pass
23
-
24
- def get_ollama_llm():
25
- """
26
- TODO: Implement your local Ollama integration.
27
- Possibly specify a port, endpoint, etc.
28
- """
29
- # return your Ollama LLM client
30
- pass
31
-
32
- def get_claude_llm(api_key: str):
33
- """
34
- Example for Anthropic's Claude
35
- """
36
- # If you installed anthropic: pip install anthropic
37
- # from langchain.chat_models import ChatAnthropic
38
- # llm = ChatAnthropic(anthropic_api_key=api_key)
39
- # return llm
40
- pass
41
-
42
- def load_llm(selected_model: str, api_key: str):
43
- """
44
- Returns the LLM object depending on user selection.
45
- """
46
- if selected_model == "OpenAI":
47
- # Use OpenAI ChatModel
48
- # By default uses GPT-3.5. You can pass model_name="gpt-4" if you have access.
49
- llm = ChatOpenAI(temperature=0.7, openai_api_key=api_key)
50
-
51
- elif selected_model == "Claude":
52
- # llm = get_claude_llm(api_key) # Uncomment once implemented
53
- llm = None # Placeholder
54
- st.warning("Claude is not implemented. Implement the get_claude_llm function.")
55
-
56
- elif selected_model == "Gemini":
57
- # llm = get_gemini_llm(api_key) # Uncomment once implemented
58
- llm = None
59
- st.warning("Gemini is not implemented. Implement the get_gemini_llm function.")
60
-
61
- elif selected_model == "DeepSeek":
62
- # llm = get_deepseek_llm(api_key) # Uncomment once implemented
63
- llm = None
64
- st.warning("DeepSeek is not implemented. Implement the get_deepseek_llm function.")
65
-
66
- elif selected_model == "Ollama (local)":
67
- # llm = get_ollama_llm() # Uncomment once implemented
68
- llm = None
69
- st.warning("Ollama is not implemented. Implement the get_ollama_llm function.")
70
-
71
- else:
72
- llm = None
73
-
74
- return llm
75
-
76
- def initialize_session_state():
77
- """
78
- Initialize the session state for storing conversation history.
79
- """
80
- if "messages" not in st.session_state:
81
- st.session_state["messages"] = []
82
-
83
- def main():
84
- st.title("Multi-LLM Chat App")
85
-
86
- # Sidebar for model selection and API key
87
- st.sidebar.header("Configuration")
88
- selected_model = st.sidebar.selectbox(
89
- "Select an LLM",
90
- ["OpenAI", "Claude", "Gemini", "DeepSeek", "Ollama (local)"]
91
- )
92
- api_key = st.sidebar.text_input("API Key (if needed)", type="password")
93
-
94
- st.sidebar.write("---")
95
- if st.sidebar.button("Clear Chat"):
96
- st.session_state["messages"] = []
97
-
98
- # Initialize conversation in session state
99
- initialize_session_state()
100
-
101
- # Load the chosen LLM
102
- llm = load_llm(selected_model, api_key)
103
-
104
- # Display existing conversation
105
- for msg in st.session_state["messages"]:
106
- if msg["role"] == "user":
107
- st.markdown(f"**You:** {msg['content']}")
108
- else:
109
- st.markdown(f"**LLM:** {msg['content']}")
110
-
111
- # User input
112
- user_input = st.text_input("Type your message here...", "")
113
-
114
- # On submit
115
- if st.button("Send"):
116
- if user_input.strip() == "":
117
- st.warning("Please enter a message before sending.")
118
  else:
119
- # Add user message to conversation history
120
- st.session_state["messages"].append({"role": "user", "content": user_input})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  if llm is None:
123
- st.error("LLM is not configured or implemented for this choice.")
124
- else:
125
- # Prepare messages in a LangChain format
126
- lc_messages = []
127
- for msg in st.session_state["messages"]:
128
- if msg["role"] == "user":
129
- lc_messages.append(HumanMessage(content=msg["content"]))
130
- else:
131
- lc_messages.append(AIMessage(content=msg["content"]))
132
-
133
- # Call the LLM
134
- response = llm(lc_messages)
135
- # Add LLM response to conversation
136
- st.session_state["messages"].append({"role": "assistant", "content": response.content})
137
-
138
- # End
139
-
140
- if __name__ == "__main__":
141
- main()
 
 
 
1
  import streamlit as st
2
+ from langchain_core.messages import HumanMessage, AIMessage
3
+
4
+ def get_llm(provider, config):
5
+ """Initialize the selected LLM with configuration"""
6
+ try:
7
+ if provider == "OpenAI":
8
+ from langchain_openai import ChatOpenAI
9
+ return ChatOpenAI(
10
+ api_key=config.get("api_key"),
11
+ model=config.get("model_name", "gpt-3.5-turbo")
12
+ )
13
+ elif provider == "Anthropic":
14
+ from langchain_anthropic import ChatAnthropic
15
+ return ChatAnthropic(
16
+ api_key=config.get("api_key"),
17
+ model=config.get("model_name", "claude-3-sonnet-20240229")
18
+ )
19
+ elif provider == "Gemini":
20
+ from langchain_google_genai import ChatGoogleGenerativeAI
21
+ return ChatGoogleGenerativeAI(
22
+ google_api_key=config.get("api_key"),
23
+ model=config.get("model_name", "gemini-pro")
24
+ )
25
+ elif provider == "DeepSeek":
26
+ from langchain_openai import ChatOpenAI
27
+ return ChatOpenAI(
28
+ api_key=config.get("api_key"),
29
+ base_url=config.get("base_url", "https://api.deepseek.com/v1"),
30
+ model=config.get("model_name", "deepseek-chat")
31
+ )
32
+ elif provider == "Ollama":
33
+ from langchain_community.chat_models import ChatOllama
34
+ return ChatOllama(
35
+ base_url=config.get("base_url", "http://localhost:11434"),
36
+ model=config.get("model_name", "llama2")
37
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  else:
39
+ raise ValueError("Selected provider is not supported")
40
+ except ImportError as e:
41
+ st.error(f"Missing required package: {e}")
42
+ return None
43
+
44
+ # Initialize chat history
45
+ if "messages" not in st.session_state:
46
+ st.session_state.messages = []
47
+
48
+ # Sidebar configuration
49
+ with st.sidebar:
50
+ st.title("⚙️ LLM Configuration")
51
+ provider = st.selectbox(
52
+ "Select Provider",
53
+ ["OpenAI", "Anthropic", "Gemini", "DeepSeek", "Ollama"]
54
+ )
55
 
56
+ config = {}
57
+ if provider in ["OpenAI", "Anthropic", "Gemini", "DeepSeek"]:
58
+ config["api_key"] = st.text_input(
59
+ f"{provider} API Key",
60
+ type="password",
61
+ help=f"Get your API key from {provider}'s platform"
62
+ )
63
+ if provider == "DeepSeek":
64
+ config["base_url"] = st.text_input(
65
+ "API Base URL",
66
+ "https://api.deepseek.com/v1"
67
+ )
68
+
69
+ # Model name input with provider-specific defaults
70
+ default_models = {
71
+ "OpenAI": "gpt-3.5-turbo",
72
+ "Anthropic": "claude-3-sonnet-20240229",
73
+ "Gemini": "gemini-pro",
74
+ "DeepSeek": "deepseek-chat"
75
+ }
76
+ config["model_name"] = st.text_input(
77
+ "Model Name",
78
+ value=default_models.get(provider, "")
79
+ )
80
+ elif provider == "Ollama":
81
+ config["model_name"] = st.text_input(
82
+ "Model Name",
83
+ value="llama2",
84
+ help="Make sure the model is available in your Ollama instance"
85
+ )
86
+ config["base_url"] = st.text_input(
87
+ "Ollama Base URL",
88
+ "http://localhost:11434",
89
+ help="URL where your Ollama server is running"
90
+ )
91
+
92
+ # Main chat interface
93
+ st.title("💬 LLM Chat Interface")
94
+
95
+ # Display chat messages
96
+ for message in st.session_state.messages:
97
+ with st.chat_message(message["role"]):
98
+ st.markdown(message["content"])
99
+
100
+ # Handle user input
101
+ if prompt := st.chat_input("Type your message..."):
102
+ # Add user message to chat history
103
+ st.session_state.messages.append({"role": "user", "content": prompt})
104
+
105
+ # Display user message
106
+ with st.chat_message("user"):
107
+ st.markdown(prompt)
108
+
109
+ # Generate response
110
+ with st.spinner("Thinking..."):
111
+ try:
112
+ llm = get_llm(provider, config)
113
  if llm is None:
114
+ st.error("Failed to initialize LLM. Check configuration.")
115
+ st.stop()
116
+
117
+ # Convert messages to LangChain format
118
+ lc_messages = [
119
+ HumanMessage(content=msg["content"]) if msg["role"] == "user"
120
+ else AIMessage(content=msg["content"])
121
+ for msg in st.session_state.messages
122
+ ]
123
+
124
+ # Get LLM response
125
+ response = llm.invoke(lc_messages)
126
+
127
+ # Display and store assistant response
128
+ with st.chat_message("assistant"):
129
+ st.markdown(response.content)
130
+ st.session_state.messages.append(
131
+ {"role": "assistant", "content": response.content}
132
+ )
133
+ except Exception as e:
134
+ st.error(f"Error generating response: {str(e)}")