Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- src/bot_specs.py +5 -0
- src/helper_functions.py +176 -0
- src/parameters.py +21 -0
src/bot_specs.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
bot_name = os.getenv("BOT_NAME") or "jimmy"
|
4 |
+
bot_icon = os.getenv("BOT_ICON") or "🧐"
|
5 |
+
user_icon = os.getenv("USER_ICON") or "🥷🏻"
|
src/helper_functions.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from ibm_watsonx_ai import APIClient, Credentials
|
3 |
+
from ibm_watsonx_ai.foundation_models import ModelInference
|
4 |
+
from io import BytesIO
|
5 |
+
from reportlab.lib.pagesizes import letter
|
6 |
+
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
|
7 |
+
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
|
8 |
+
from reportlab.lib import colors
|
9 |
+
from reportlab.lib.enums import TA_LEFT, TA_RIGHT
|
10 |
+
from datetime import datetime
|
11 |
+
import regex
|
12 |
+
import os
|
13 |
+
|
14 |
+
|
15 |
+
def check_password():
|
16 |
+
def password_entered():
|
17 |
+
if st.session_state["password"] == os.getenv("APP_PASSWORD"):
|
18 |
+
st.session_state["password_correct"] = True
|
19 |
+
del st.session_state["password"]
|
20 |
+
else:
|
21 |
+
st.session_state["password_correct"] = False
|
22 |
+
|
23 |
+
if "password_correct" not in st.session_state:
|
24 |
+
st.markdown("\n\n")
|
25 |
+
st.text_input(
|
26 |
+
"Enter the password",
|
27 |
+
type="password",
|
28 |
+
on_change=password_entered,
|
29 |
+
key="password",
|
30 |
+
)
|
31 |
+
st.divider()
|
32 |
+
st.info("Designed and developed by Milan Mrdenovic © IBM Norway 2025")
|
33 |
+
return False
|
34 |
+
elif not st.session_state["password_correct"]:
|
35 |
+
st.markdown("\n\n")
|
36 |
+
st.text_input(
|
37 |
+
"Enter the password",
|
38 |
+
type="password",
|
39 |
+
on_change=password_entered,
|
40 |
+
key="password",
|
41 |
+
)
|
42 |
+
st.divider()
|
43 |
+
st.info("Designed and developed by Milan Mrdenovic © IBM Norway 2025")
|
44 |
+
st.error("😕 Password incorrect")
|
45 |
+
return False
|
46 |
+
else:
|
47 |
+
return True
|
48 |
+
|
49 |
+
|
50 |
+
def initialize_session_state():
|
51 |
+
if "chat_history" not in st.session_state:
|
52 |
+
st.session_state.chat_history = []
|
53 |
+
|
54 |
+
|
55 |
+
def setup_watsonxai_client(
|
56 |
+
api_key: str, project_id: str, url: str = "https://eu-de.ml.cloud.ibm.com"
|
57 |
+
):
|
58 |
+
"""Set up a watsonx.ai python SDK client using an apikey and project_id."""
|
59 |
+
from ibm_watsonx_ai import APIClient, Credentials
|
60 |
+
|
61 |
+
wx_credentials = Credentials(url=url, api_key=api_key)
|
62 |
+
wxai_client = APIClient(wx_credentials, project_id=project_id)
|
63 |
+
|
64 |
+
return wxai_client
|
65 |
+
|
66 |
+
|
67 |
+
emoji_pattern = regex.compile(r"\p{Emoji}", flags=regex.UNICODE)
|
68 |
+
|
69 |
+
|
70 |
+
def remove_emojis(text):
|
71 |
+
return emoji_pattern.sub(r"", text)
|
72 |
+
|
73 |
+
|
74 |
+
def create_pdf_from_chat(chat_history):
|
75 |
+
buffer = BytesIO()
|
76 |
+
doc = SimpleDocTemplate(buffer, pagesize=letter, topMargin=30, bottomMargin=30)
|
77 |
+
styles = getSampleStyleSheet()
|
78 |
+
flowables = []
|
79 |
+
|
80 |
+
title_style = ParagraphStyle(
|
81 |
+
"Title", parent=styles["Heading1"], fontSize=18, spaceAfter=20
|
82 |
+
)
|
83 |
+
flowables.append(
|
84 |
+
Paragraph(
|
85 |
+
f"Chat History - Generated on {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
|
86 |
+
title_style,
|
87 |
+
)
|
88 |
+
)
|
89 |
+
|
90 |
+
user_style = ParagraphStyle(
|
91 |
+
"UserStyle",
|
92 |
+
parent=styles["Normal"],
|
93 |
+
backColor=colors.lightblue,
|
94 |
+
borderPadding=10,
|
95 |
+
alignment=TA_RIGHT,
|
96 |
+
)
|
97 |
+
jimmy_style = ParagraphStyle(
|
98 |
+
"JimmyStyle",
|
99 |
+
parent=styles["Normal"],
|
100 |
+
backColor=colors.lavender,
|
101 |
+
borderPadding=10,
|
102 |
+
)
|
103 |
+
|
104 |
+
for message in chat_history:
|
105 |
+
role = message["role"]
|
106 |
+
content = remove_emojis(message["content"])
|
107 |
+
style = user_style if role == "user" else jimmy_style
|
108 |
+
flowables.append(Paragraph(f"<b>{role.capitalize()}:</b> {content}", style))
|
109 |
+
flowables.append(Spacer(1, 12))
|
110 |
+
|
111 |
+
doc.build(flowables)
|
112 |
+
buffer.seek(0)
|
113 |
+
return buffer
|
114 |
+
|
115 |
+
|
116 |
+
def watsonx_chat_prompt(
|
117 |
+
messages,
|
118 |
+
stream=False,
|
119 |
+
client=None,
|
120 |
+
wx_url=None,
|
121 |
+
wx_apikey=None,
|
122 |
+
project_id=None,
|
123 |
+
model_id=None,
|
124 |
+
params=None,
|
125 |
+
):
|
126 |
+
"""
|
127 |
+
Dynamic chat function for Watson AI
|
128 |
+
|
129 |
+
Args:
|
130 |
+
messages (list): List of message objects following watsonx schema.
|
131 |
+
Each message should have 'role' and 'content' keys.
|
132 |
+
Supports system, user, assistant, and tool messages.
|
133 |
+
stream (bool): If True, return streaming generator; if False, return complete response
|
134 |
+
client (APIClient): Pre-configured Watson client (optional)
|
135 |
+
wx_url (str): Watson URL (required if no client)
|
136 |
+
wx_apikey (str): Watson API key (required if no client)
|
137 |
+
project_id (str): Watson project ID (required if no client)
|
138 |
+
model_id (str): Model identifier
|
139 |
+
params (dict): Model parameters (optional)
|
140 |
+
|
141 |
+
Returns:
|
142 |
+
str or generator: Complete response text or streaming generator based on stream parameter
|
143 |
+
"""
|
144 |
+
# from ibm_watsonx_ai.foundation_models import ModelInference
|
145 |
+
# from ibm_watsonx_ai import APIClient, Credentials
|
146 |
+
|
147 |
+
if params is None:
|
148 |
+
params = {
|
149 |
+
"temperature": 0.7,
|
150 |
+
"max_tokens": 4096,
|
151 |
+
"top_p": 1.0,
|
152 |
+
"stop": ["</s>", "<|end_of_text|>", "<|endoftext|>"],
|
153 |
+
# "frequency_penalty": 0.5,
|
154 |
+
# "presence_penalty": 0.3,
|
155 |
+
}
|
156 |
+
|
157 |
+
# Use provided client or create new one
|
158 |
+
if client is None:
|
159 |
+
wx_credentials = Credentials(url=wx_url, api_key=wx_apikey)
|
160 |
+
client = APIClient(credentials=wx_credentials, project_id=project_id)
|
161 |
+
|
162 |
+
chat_model = ModelInference(api_client=client, model_id=model_id, params=params)
|
163 |
+
|
164 |
+
if stream:
|
165 |
+
return chat_model.chat_stream(messages=messages)
|
166 |
+
else:
|
167 |
+
return chat_model.chat(messages=messages)
|
168 |
+
|
169 |
+
|
170 |
+
def generate_response(watsonx_chat_prompt, stream):
|
171 |
+
if stream:
|
172 |
+
for chunk in watsonx_chat_prompt:
|
173 |
+
if chunk["choices"]:
|
174 |
+
yield chunk["choices"][0]["delta"].get("content", "")
|
175 |
+
else:
|
176 |
+
return watsonx_chat_prompt["choices"][0]["message"]["content"]
|
src/parameters.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
model_id = os.getenv("MODEL_ID") or "meta-llama/llama-3-2-90b-vision-instruct"
|
4 |
+
wx_api_key = os.getenv("WX_API_KEY")
|
5 |
+
wx_project_id = os.getenv("WX_PROJECT_ID")
|
6 |
+
wx_url = os.getenv("WX_URL") or "https://eu-de.ml.cloud.ibm.com"
|
7 |
+
system_prompt = os.getenv("SYSTEM_PROMPT") or ""
|
8 |
+
params = {
|
9 |
+
"temperature": os.getenv("TEMPERATURE") or 0.7,
|
10 |
+
"max_tokens": os.getenv("AX_TOKENS") or 4096,
|
11 |
+
"top_p": os.getenv("TOP_P") or 1.0,
|
12 |
+
"stop": (
|
13 |
+
os.getenv("STOP_SEQUENCES", "").split(",")
|
14 |
+
if os.getenv("STOP_SEQUENCES")
|
15 |
+
else ["</s>", "<|end_of_text|>", "<|endoftext|>"]
|
16 |
+
),
|
17 |
+
# "frequency_penalty": os.getenv("FREQUENCY_PENALTY") or 0.5,
|
18 |
+
# "presence_penalty": os.getenv("PRESENCE_PENALTY") or 0.3,
|
19 |
+
}
|
20 |
+
display_chat_history = os.getenv("DISPLAY_CHAT_HISTORY") or True
|
21 |
+
stream_outputs = os.getenv("STREAM_OUTPUTS") or True
|