MilanM commited on
Commit
1695c47
·
verified ·
1 Parent(s): eb913d3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +163 -0
app.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from io import BytesIO
3
+ import ibm_watsonx_ai
4
+ import secretsload
5
+ import anton_ego_jimmy
6
+ import requests
7
+ import time
8
+ import re
9
+
10
+ from ibm_watsonx_ai.foundation_models import ModelInference
11
+ from ibm_watsonx_ai import Credentials, APIClient
12
+ from ibm_watsonx_ai.metanames import GenTextParamsMetaNames as GenParams
13
+ from ibm_watsonx_ai.metanames import GenTextReturnOptMetaNames as RetParams
14
+ from secretsload import load_stsecrets # script to load credentials from HuggingFace secrets section
15
+
16
+ credentials = load_stsecrets()
17
+
18
+ st.set_page_config(
19
+ page_title="Jimmy le critique",
20
+ page_icon="🍷",
21
+ initial_sidebar_state="collapsed"
22
+ )
23
+
24
+ # Password protection
25
+ def check_password():
26
+ def password_entered():
27
+ if st.session_state["password"] == st.secrets["app_password"]:
28
+ st.session_state["password_correct"] = True
29
+ del st.session_state["password"]
30
+ else:
31
+ st.session_state["password_correct"] = False
32
+
33
+ if "password_correct" not in st.session_state:
34
+ st.markdown("\n\n")
35
+ st.text_input("Enter the password", type="password", on_change=password_entered, key="password")
36
+ st.divider()
37
+ st.info("Template of Jimmy developed by Milan Mrdenovic © IBM Norway 2024")
38
+ return False
39
+ elif not st.session_state["password_correct"]:
40
+ st.markdown("\n\n")
41
+ st.text_input("Enter the password", type="password", on_change=password_entered, key="password")
42
+ st.divider()
43
+ st.info("Template of Jimmy developed by Milan Mrdenovic © IBM Norway 2024")
44
+ st.error("😕 Password incorrect")
45
+ return False
46
+ else:
47
+ return True
48
+
49
+ if not check_password():
50
+ st.stop()
51
+
52
+
53
+ # Initialize session state
54
+ if 'current_page' not in st.session_state:
55
+ st.session_state.current_page = 0
56
+
57
+ def initialize_session_state():
58
+ if 'chat_history' not in st.session_state:
59
+ st.session_state.chat_history = []
60
+
61
+ def setup_client():
62
+ credentials = Credentials(
63
+ url=st.secrets["url"],
64
+ api_key=st.secrets["api_key"]
65
+ )
66
+ apo = st.secrets["api_key"]
67
+ client = APIClient(credentials, project_id=st.secrets["project_id"])
68
+ return client
69
+
70
+ client = setup_client()
71
+
72
+ def prepare_prompt(prompt, chat_history):
73
+ if anton_ego_jimmy.TYPE == "chat" and chat_history:
74
+ chats = "\n".join([f"{message['role']}: \"{message['content']}\"" for message in chat_history])
75
+ return f"Conversation History:\n{chats}\n\nNew Message: {prompt}"
76
+ return prompt
77
+
78
+ def apply_prompt_syntax(prompt, system_prompt, prompt_template, bake_in_prompt_syntax):
79
+ model_family_syntax = {
80
+ "llama3-instruct (llama-3 & 3.1) - system": """\n<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""",
81
+ "llama3-instruct (llama-3 & 3.1) - user": """\n<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""",
82
+ "granite-13b-chat & instruct - system": """\n<|system|>\n{system_prompt}\n<|user|>\n{prompt}\n<|assistant|>\n\n""",
83
+ "granite-13b-chat & instruct - user": """\n<|user|>\n{prompt}\n<|assistant|>\n\n""",
84
+ "llama2-chat - system": """\n[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n{prompt} [/INST]\n""",
85
+ "llama2-chat - user": """\n[INST] {prompt} [/INST] """,
86
+ "mistral & mixtral v2 tokenizer - system": """\n<s>[INST]System Prompt:[{system_prompt}]\n\n{prompt} [/INST]\n""",
87
+ "mistral & mixtral v2 tokenizer - system segmented": """\n<s>[INST]System Prompt:{system_prompt}[/INST][INST]{prompt} [/INST]\n""",
88
+ "mistral & mixtral v2 tokenizer - user": """\n<s>[INST]{prompt} [/INST]\n"""
89
+ }
90
+
91
+ if bake_in_prompt_syntax:
92
+ template = model_family_syntax[prompt_template]
93
+ if system_prompt:
94
+ return template.format(system_prompt=system_prompt, prompt=prompt)
95
+ return prompt
96
+
97
+ def generate_response(watsonx_llm, prompt_data, params):
98
+ generated_response = watsonx_llm.generate_text_stream(prompt=prompt_data, params=params)
99
+ for chunk in generated_response:
100
+ yield chunk
101
+
102
+ def get_response(user_input):
103
+ # Prepare the prompt
104
+ prompt = prepare_prompt(user_input, st.session_state.chat_history)
105
+
106
+ # Apply prompt syntax
107
+ prompt_data = apply_prompt_syntax(
108
+ prompt,
109
+ anton_ego_jimmy.SYSTEM_PROMPT,
110
+ anton_ego_jimmy.PROMPT_TEMPLATE,
111
+ anton_ego_jimmy.BAKE_IN_PROMPT_SYNTAX
112
+ )
113
+
114
+ watsonx_llm = ModelInference(
115
+ api_client=client,
116
+ model_id=anton_ego_jimmy.SELECTED_MODEL,
117
+ verify=anton_ego_jimmy.VERIFY
118
+ )
119
+
120
+ # Prepare parameters
121
+ params = {
122
+ GenParams.DECODING_METHOD: anton_ego_jimmy.DECODING_METHOD,
123
+ GenParams.MAX_NEW_TOKENS: anton_ego_jimmy.MAX_NEW_TOKENS,
124
+ GenParams.MIN_NEW_TOKENS: anton_ego_jimmy.MIN_NEW_TOKENS,
125
+ # GenParams.LENGTH_PENALTY: anton_ego_jimmy.LENGTH_PENALTY,
126
+ GenParams.REPETITION_PENALTY: anton_ego_jimmy.REPETITION_PENALTY,
127
+ GenParams.STOP_SEQUENCES: anton_ego_jimmy.STOP_SEQUENCES
128
+ }
129
+
130
+ print(prompt_data, params)
131
+
132
+ # Generate and stream response
133
+ with st.chat_message("assistant", avatar="🧐"):
134
+ stream = generate_response(watsonx_llm, prompt_data, params)
135
+ response = st.write_stream(stream)
136
+
137
+ # Add AI response to chat history
138
+ st.session_state.chat_history.append({"role": "assistant", "content": response})
139
+
140
+ return response
141
+
142
+ def main():
143
+ initialize_session_state()
144
+ st.title("Jimmy le critique 🧐")
145
+
146
+ # User input
147
+ if anton_ego_jimmy.DISPLAY_CHAT_HISTORY == 1:
148
+ for message in st.session_state.chat_history:
149
+ with st.chat_message(message["role"]):
150
+ st.markdown(message["content"])
151
+ user_input = st.chat_input("You:", key="user_input")
152
+
153
+ if user_input:
154
+ # Add user message to chat history
155
+ st.session_state.chat_history.append({"role": "user", "content": user_input})
156
+ with st.chat_message("user", avatar="🥷🏻"):
157
+ st.markdown(user_input)
158
+
159
+ # Get response
160
+ get_response(user_input)
161
+
162
+ if __name__ == "__main__":
163
+ main()