melissalau commited on
Commit
97ccb08
·
1 Parent(s): 748bb20

project restructuring so there'll be no submodule

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.streamlit/config.toml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [browser]
2
+ gatherUsageStats = false
Dockerfile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.13.5-slim
2
+
3
+ WORKDIR /app
4
+
5
+ RUN apt-get update && apt-get install -y \
6
+ build-essential \
7
+ curl \
8
+ git \
9
+ && rm -rf /var/lib/apt/lists/*
10
+
11
+ COPY requirements.txt ./
12
+ COPY src/ ./src/
13
+
14
+ RUN pip3 install -r requirements.txt
15
+
16
+ EXPOSE 8501
17
+
18
+ HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
19
+
20
+ ENTRYPOINT ["streamlit", "run", "src/streamlit-ollama-chatbot.py", "--server.port=8501", "--server.address=0.0.0.0"]
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: My Local Chatbot
3
+ emoji:
4
+ colorFrom: red
5
+ colorTo: blue
6
+ sdk: streamlit
7
+ sdk_version: 1.25.0
8
+ app_file: streamlit-ollama-chatbot.py
9
+ ---
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ altair
2
+ pandas
3
+ streamlit
4
+ langchain
5
+ langchain-ollama
src/streamlit-ollama-chatbot.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from langchain_ollama import ChatOllama
3
+ from langchain.memory import ConversationBufferMemory
4
+ from langchain.memory.chat_message_histories import ChatMessageHistory
5
+ from langchain.prompts import PromptTemplate
6
+ from langchain_core.runnables import RunnableSequence
7
+
8
+ # Streamlit Setup
9
+ st.set_page_config(layout="wide")
10
+ st.title("My Local Chatbot")
11
+
12
+ # Sidebar Inputs
13
+ st.sidebar.header("Settings")
14
+
15
+ # Dropdown for model selection
16
+ model_options = ["llama3:8b", "deepseek-r1:1.5b"]
17
+ MODEL = st.sidebar.selectbox("Choose a Model", model_options, index=0)
18
+
19
+ # Inputs for history + context size
20
+ MAX_HISTORY = st.sidebar.number_input("Max History", min_value=1, max_value=10, value=2, step=1)
21
+ CONTEXT_SIZE = st.sidebar.number_input("Context Size", min_value=1024, max_value=16384, value=8192, step=1024)
22
+
23
+ # Advanced Parameters
24
+ st.sidebar.subheader("Model Parameters")
25
+ TEMPERATURE = st.sidebar.slider("Temperature", 0.0, 1.5, 0.7, 0.1)
26
+ TOP_P = st.sidebar.slider("Top-p (nucleus sampling)", 0.0, 1.0, 0.9, 0.05)
27
+ TOP_K = st.sidebar.slider("Top-k", 0, 100, 40, 5)
28
+ MAX_TOKENS = st.sidebar.number_input("Max Tokens", min_value=256, max_value=16384, value=2048, step=256)
29
+
30
+ # Memory Controls
31
+ def clear_memory():
32
+ chat_history = ChatMessageHistory()
33
+ st.session_state.memory = ConversationBufferMemory(chat_memory=chat_history)
34
+ st.session_state.chat_history = []
35
+ st.session_state.summary = ""
36
+
37
+ if "chat_history" not in st.session_state:
38
+ st.session_state.chat_history = []
39
+ if "memory" not in st.session_state:
40
+ st.session_state.memory = ConversationBufferMemory(return_messages=True)
41
+ # NEW: Initialize a summary variable
42
+ if "summary" not in st.session_state:
43
+ st.session_state.summary = ""
44
+
45
+ # Button to clear memory manually
46
+ if st.sidebar.button("Clear Conversation History"):
47
+ clear_memory()
48
+ # LangChain LLM Setup
49
+ llm = ChatOllama(
50
+ model=MODEL,
51
+ streaming=True,
52
+ temperature=TEMPERATURE,
53
+ top_p=TOP_P,
54
+ top_k=TOP_K,
55
+ num_predict=MAX_TOKENS
56
+ )
57
+
58
+ # ---
59
+ # NEW: Summary Chain and Functions
60
+
61
+ # Prompt Template for summarization
62
+ summary_prompt_template = PromptTemplate(
63
+ input_variables=["chat_history"],
64
+ template="You are a summarizer. Summarize the following conversation to preserve key information and context. \n\n{chat_history}"
65
+ )
66
+
67
+ # Chain for summarization
68
+ summary_chain = summary_prompt_template | llm
69
+
70
+
71
+ def get_summary(chat_history_str):
72
+ """Generates a summary of the conversation history."""
73
+ return summary_chain.invoke({"chat_history": chat_history_str})
74
+
75
+ def summarize_chat():
76
+ if not st.session_state.chat_history:
77
+ return "No chat history to summarize."
78
+
79
+ # Pass the full chat history list to the summarization function
80
+ return get_summary(st.session_state.chat_history)
81
+
82
+
83
+ if st.sidebar.button("Summarize Chat"):
84
+ with st.sidebar:
85
+ st.markdown("**Chat Summary:**")
86
+ summary = summarize_chat()
87
+ st.success(summary)
88
+ # ---
89
+
90
+ # Main Prompt Template
91
+ # Now includes a summary variable
92
+ prompt_template = PromptTemplate(
93
+ input_variables=["summary", "history", "human_input"],
94
+ template="""You are a helpful assistant.
95
+ Current conversation summary:
96
+ {summary}
97
+
98
+ Conversation history:
99
+ {history}
100
+
101
+ User: {human_input}
102
+ Assistant:"""
103
+ )
104
+
105
+ chain = prompt_template | llm
106
+
107
+ # Display Chat History
108
+ for msg in st.session_state.chat_history:
109
+ with st.chat_message(msg["role"]):
110
+ st.markdown(msg["content"])
111
+
112
+ # NEW & CORRECTED Trim Function
113
+ def trim_memory():
114
+ # Trim the chat history to the MAX_HISTORY size
115
+ if len(st.session_state.chat_history) > MAX_HISTORY * 2:
116
+ # Get the history to be trimmed
117
+ history_to_summarize = st.session_state.chat_history[:(len(st.session_state.chat_history) - MAX_HISTORY * 2)]
118
+
119
+ # Format the history string for the summary prompt
120
+ history_str = ""
121
+ for msg in history_to_summarize:
122
+ history_str += f"{msg['role']}: {msg['content']}\n"
123
+
124
+ # Get a summary of the old messages and append to the existing summary
125
+ new_summary = get_summary(history_str)
126
+ st.session_state.summary += "\n" + new_summary
127
+
128
+ # Remove the old messages from the chat history
129
+ st.session_state.chat_history = st.session_state.chat_history[(len(st.session_state.chat_history) - MAX_HISTORY * 2):]
130
+
131
+
132
+ # Handle User Input
133
+ if prompt := st.chat_input("Say something"):
134
+ with st.chat_message("user"):
135
+ st.markdown(prompt)
136
+
137
+ st.session_state.chat_history.append({"role": "user", "content": prompt})
138
+
139
+ # Call the updated trim_memory function
140
+ trim_memory()
141
+
142
+ # Format the current, non-summarized history for the prompt template
143
+ formatted_history = ""
144
+ for msg in st.session_state.chat_history:
145
+ formatted_history += f"{msg['role']}: {msg['content']}\n"
146
+
147
+ with st.chat_message("assistant"):
148
+ response_container = st.empty()
149
+ full_response = ""
150
+
151
+ # Pass both 'human_input', 'history', and 'summary' to the chain
152
+ for chunk in chain.stream({
153
+ "human_input": prompt,
154
+ "history": formatted_history,
155
+ "summary": st.session_state.summary
156
+ }):
157
+ full_response += chunk.content
158
+ response_container.markdown(full_response)
159
+
160
+ st.session_state.chat_history.append({"role": "assistant", "content": full_response})
streamlit-ollama-chatbot DELETED
@@ -1 +0,0 @@
1
- Subproject commit 7370ae6148dd64375b6b86564cd1b5cd51ef46c3