Spaces:
Running
Running
| import streamlit as st | |
| from io import BytesIO | |
| import ibm_watsonx_ai | |
| import secretsload | |
| import genparam | |
| import requests | |
| import time | |
| import re | |
| from ibm_watsonx_ai.foundation_models import ModelInference | |
| from ibm_watsonx_ai import Credentials, APIClient | |
| from ibm_watsonx_ai.metanames import GenTextParamsMetaNames as GenParams | |
| from ibm_watsonx_ai.metanames import GenTextReturnOptMetaNames as RetParams | |
| from secretsload import load_stsecrets | |
| credentials = load_stsecrets() | |
| st.set_page_config( | |
| page_title="Jimmy", | |
| page_icon="π", | |
| initial_sidebar_state="collapsed" | |
| ) | |
| # Password protection | |
| def check_password(): | |
| def password_entered(): | |
| if st.session_state["password"] == st.secrets["app_password"]: | |
| st.session_state["password_correct"] = True | |
| del st.session_state["password"] | |
| else: | |
| st.session_state["password_correct"] = False | |
| if "password_correct" not in st.session_state: | |
| st.markdown("\n\n") | |
| st.text_input("Enter the password", type="password", on_change=password_entered, key="password") | |
| st.divider() | |
| st.info("Developed by Milan Mrdenovic Β© IBM Norway 2025") | |
| return False | |
| elif not st.session_state["password_correct"]: | |
| st.markdown("\n\n") | |
| st.text_input("Enter the password", type="password", on_change=password_entered, key="password") | |
| st.divider() | |
| st.info("Developed by Milan Mrdenovic Β© IBM Norway 2025") | |
| st.error("π Password incorrect") | |
| return False | |
| else: | |
| return True | |
| if not check_password(): | |
| st.stop() | |
| # Initialize session state | |
| if 'current_page' not in st.session_state: | |
| st.session_state.current_page = 0 | |
| def initialize_session_state(): | |
| if 'chat_history' not in st.session_state: | |
| st.session_state.chat_history = [] | |
| st.session_state.chat_history.append({"role": "system", "content": genparam.SYSTEM_PROMPT}) | |
| def setup_client(): | |
| credentials = Credentials( | |
| url=st.secrets["url"], | |
| api_key=st.secrets["api_key"] | |
| ) | |
| return APIClient(credentials, project_id=st.secrets["project_id"]) | |
| def prepare_prompt(prompt, chat_history): | |
| if genparam.TYPE == "chat" and chat_history: | |
| chats = "\n".join([f"{message['role']}: \"{message['content']}\"" for message in chat_history]) | |
| return f"Conversation History:\n{chats}\n\nNew Message: {prompt}" | |
| return prompt | |
| def apply_prompt_syntax(prompt, system_prompt, prompt_template, bake_in_prompt_syntax): | |
| model_family_syntax = { | |
| ### Llama Models | |
| "llama3_instruct - system": """<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""", | |
| "llama3_instruct - user": """<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""", | |
| ### Granite Models | |
| "granite_3 - system": """<|start_of_role|>system<|end_of_role|>{system_prompt}<|end_of_text|>\n<|start_of_role|>user<|end_of_role|>{prompt}<|end_of_text|>\n<|start_of_role|>assistant<|end_of_role|>""", | |
| "granite_3 - user": """<|start_of_role|>user<|end_of_role|>{prompt}<|end_of_text|>\n<|start_of_role|>assistant<|end_of_role|>""", | |
| ### Granite Code Only | |
| "granite_code - with system": """System:\n{system_prompt}\n\nQuestion:\n{prompt}\n\nAnswer:\n""", | |
| "granite_code - instruction only": """Question:\n{prompt}\n\nAnswer:\n""", | |
| ### Mistral Models | |
| "mistral_ai_models_sys": """<s>[INST] System Prompt: {system_prompt} \n\n User Prompt: {prompt} [/INST]\n\n""", | |
| "mistral_ai_models": """<s>[INST] {prompt} [/INST]\n\n""", | |
| "mistral_ai_small_sys": """<s>[SYSTEM_PROMPT]{system_prompt}[/SYSTEM_PROMPT][INST]{prompt}[/INST]\n\n""", | |
| "mistral_ai_small_raw": """[SYSTEM_PROMPT]{system_prompt}[/SYSTEM_PROMPT][INST]{prompt}[/INST]""", | |
| ### No Syntax | |
| "no syntax - system": """{system_prompt}\n\n{prompt}""", | |
| "no syntax - user": """{prompt}""", | |
| } | |
| if bake_in_prompt_syntax: | |
| template = model_family_syntax[prompt_template] | |
| if system_prompt: | |
| return template.format(system_prompt=system_prompt, prompt=prompt) | |
| return prompt | |
| def generate_response(watsonx_llm, prompt_data, params): | |
| generated_response = watsonx_llm.generate_text_stream(prompt=prompt_data, params=params) | |
| for chunk in generated_response: | |
| yield chunk | |
| def generate_chat_response(watsonx_llm, prompt_data, params): | |
| generated_response = watsonx_llm.chat_stream(messages=chat_history, params=params) | |
| for chunk in generated_response: | |
| yield chunk | |
| def chat_interface(): | |
| st.subheader("Jimmy") | |
| # User input | |
| user_input = st.chat_input("You:", key="user_input") | |
| if user_input: | |
| # Add user message to chat history | |
| st.session_state.chat_history.append({"role": "user", "content": user_input}) | |
| # Prepare the prompt | |
| prompt = prepare_prompt(user_input, st.session_state.chat_history) | |
| # Apply prompt syntax | |
| prompt_data = apply_prompt_syntax( | |
| prompt, | |
| genparam.SYSTEM_PROMPT, | |
| genparam.PROMPT_TEMPLATE, | |
| genparam.BAKE_IN_PROMPT_SYNTAX | |
| ) | |
| # Setup client and model | |
| client = setup_client() | |
| watsonx_llm = ModelInference( | |
| api_client=client, | |
| model_id=genparam.SELECTED_MODEL, | |
| verify=genparam.VERIFY | |
| ) | |
| # Prepare parameters | |
| params = { | |
| GenParams.DECODING_METHOD: genparam.DECODING_METHOD, | |
| GenParams.MAX_NEW_TOKENS: genparam.MAX_NEW_TOKENS, | |
| GenParams.MIN_NEW_TOKENS: genparam.MIN_NEW_TOKENS, | |
| GenParams.REPETITION_PENALTY: genparam.REPETITION_PENALTY, | |
| GenParams.STOP_SEQUENCES: genparam.STOP_SEQUENCES | |
| } | |
| # Generate and stream response | |
| with st.chat_message("Jimmy", avatar="π"): | |
| # stream = generate_response(watsonx_llm, prompt_data, params) | |
| stream = generate_chat_response(watsonx_llm, chat_history, params) | |
| response = st.write_stream(stream) | |
| # Add AI response to chat history | |
| st.session_state.chat_history.append({"role": "Jimmy", "content": response}) | |
| def main(): | |
| initialize_session_state() | |
| chat_interface() | |
| if __name__ == "__main__": | |
| main() |