sainathBelagavi commited on
Commit
d4f68f1
·
verified ·
1 Parent(s): 8358d9a

for enabling the context window by enabling chat history access .

Browse files
Files changed (1) hide show
  1. app.py +29 -35
app.py CHANGED
@@ -3,46 +3,46 @@ from huggingface_hub import InferenceClient
3
  import os
4
  import sys
5
 
6
- st.title("CODEFUSSION ☄")
7
 
8
  base_url = "https://api-inference.huggingface.co/models/"
9
-
10
  API_KEY = os.environ.get('HUGGINGFACE_API_KEY')
11
- # print(API_KEY)
12
- # headers = {"Authorization":"Bearer "+API_KEY}
13
 
14
  model_links = {
15
- "LegacyLift🚀": base_url + "mistralai/Mistral-7B-Instruct-v0.2",
16
- "ModernMigrate⭐": base_url + "mistralai/Mixtral-8x7B-Instruct-v0.1",
17
- "RetroRecode🔄": base_url + "microsoft/Phi-3-mini-4k-instruct"
18
  }
19
 
20
- # Pull info about the model to display
21
  model_info = {
22
  "LegacyLift🚀": {
23
- 'description': """The LegacyLift model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
24
- \nThis model is best for minimal problem-solving, content writing, and daily tips.\n""",
25
  'logo': './11.jpg'
26
  },
27
-
28
  "ModernMigrate⭐": {
29
- 'description': """The ModernMigrate model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
30
- \nThis model excels in coding, logical reasoning, and high-speed inference. \n""",
31
  'logo': './2.jpg'
32
  },
33
-
34
  "RetroRecode🔄": {
35
- 'description': """The RetroRecode model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
36
- \nThis model is best suited for critical development, practical knowledge, and serverless inference.\n""",
37
  'logo': './3.jpg'
38
  },
39
  }
40
 
41
- def format_promt(message, custom_instructions=None):
42
  prompt = ""
43
  if custom_instructions:
44
- prompt += f"[INST] {custom_instructions} [/INST]"
45
- prompt += f"[INST] {message} [/INST]"
 
 
 
 
 
 
 
 
 
46
  return prompt
47
 
48
  def reset_conversation():
@@ -54,32 +54,27 @@ def reset_conversation():
54
  return None
55
 
56
  models = [key for key in model_links.keys()]
57
-
58
  selected_model = st.sidebar.selectbox("Select Model", models)
59
-
60
  temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
61
-
62
  st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
63
 
64
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
65
  st.sidebar.markdown(model_info[selected_model]['description'])
66
  st.sidebar.image(model_info[selected_model]['logo'])
67
- st.sidebar.markdown("*Generating the code might go slow if you are using low power resources *")
68
 
 
69
 
70
  if "prev_option" not in st.session_state:
71
  st.session_state.prev_option = selected_model
72
 
73
  if st.session_state.prev_option != selected_model:
74
  st.session_state.messages = []
75
- # st.write(f"Changed to {selected_model}")
76
  st.session_state.prev_option = selected_model
77
- reset_conversation()
78
 
79
- repo_id = model_links[selected_model]
80
 
 
81
  st.subheader(f'{selected_model}')
82
- # st.title(f'ChatBot Using {selected_model}')
83
 
84
  if "messages" not in st.session_state:
85
  st.session_state.messages = []
@@ -90,24 +85,23 @@ for message in st.session_state.messages:
90
 
91
  if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
92
  custom_instruction = "Act like a Human in conversation"
93
-
94
  with st.chat_message("user"):
95
  st.markdown(prompt)
96
-
 
97
  st.session_state.messages.append({"role": "user", "content": prompt})
98
-
99
- formated_text = format_promt(prompt, custom_instruction)
100
-
 
101
  with st.chat_message("assistant"):
102
  client = InferenceClient(
103
  model=model_links[selected_model], )
104
-
105
  output = client.text_generation(
106
  formated_text,
107
  temperature=temp_values, # 0.5
108
  max_new_tokens=3000,
109
  stream=True
110
  )
111
-
112
  response = st.write_stream(output)
113
- st.session_state.messages.append({"role": "assistant", "content": response})
 
3
  import os
4
  import sys
5
 
6
+ st.title("CODEFUSSION ☄")
7
 
8
  base_url = "https://api-inference.huggingface.co/models/"
 
9
  API_KEY = os.environ.get('HUGGINGFACE_API_KEY')
 
 
10
 
11
  model_links = {
12
+ "LegacyLift🚀": base_url + "mistralai/Mistral-7B-Instruct-v0.2",
13
+ "ModernMigrate⭐": base_url + "mistralai/Mixtral-8x7B-Instruct-v0.1",
14
+ "RetroRecode🔄": base_url + "microsoft/Phi-3-mini-4k-instruct"
15
  }
16
 
 
17
  model_info = {
18
  "LegacyLift🚀": {
19
+ 'description': """The LegacyLift model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \n\nThis model is best for minimal problem-solving, content writing, and daily tips.\n""",
 
20
  'logo': './11.jpg'
21
  },
 
22
  "ModernMigrate⭐": {
23
+ 'description': """The ModernMigrate model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \n\nThis model excels in coding, logical reasoning, and high-speed inference. \n""",
 
24
  'logo': './2.jpg'
25
  },
 
26
  "RetroRecode🔄": {
27
+ 'description': """The RetroRecode model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \n\nThis model is best suited for critical development, practical knowledge, and serverless inference.\n""",
 
28
  'logo': './3.jpg'
29
  },
30
  }
31
 
32
+ def format_promt(message, conversation_history, custom_instructions=None):
33
  prompt = ""
34
  if custom_instructions:
35
+ prompt += f"\[INST\] {custom_instructions} \[/INST\]"
36
+
37
+ # Add conversation history to the prompt
38
+ prompt += "\[CONV_HISTORY\]\n"
39
+ for role, content in conversation_history:
40
+ prompt += f"{role.upper()}: {content}\n"
41
+ prompt += "\[/CONV_HISTORY\]"
42
+
43
+ # Add the current message
44
+ prompt += f"\[INST\] {message} \[/INST\]"
45
+
46
  return prompt
47
 
48
  def reset_conversation():
 
54
  return None
55
 
56
  models = [key for key in model_links.keys()]
 
57
  selected_model = st.sidebar.selectbox("Select Model", models)
 
58
  temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
 
59
  st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
60
 
61
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
62
  st.sidebar.markdown(model_info[selected_model]['description'])
63
  st.sidebar.image(model_info[selected_model]['logo'])
 
64
 
65
+ st.sidebar.markdown("\*Generating the code might go slow if you are using low power resources \*")
66
 
67
  if "prev_option" not in st.session_state:
68
  st.session_state.prev_option = selected_model
69
 
70
  if st.session_state.prev_option != selected_model:
71
  st.session_state.messages = []
 
72
  st.session_state.prev_option = selected_model
 
73
 
74
+ reset_conversation()
75
 
76
+ repo_id = model_links[selected_model]
77
  st.subheader(f'{selected_model}')
 
78
 
79
  if "messages" not in st.session_state:
80
  st.session_state.messages = []
 
85
 
86
  if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
87
  custom_instruction = "Act like a Human in conversation"
 
88
  with st.chat_message("user"):
89
  st.markdown(prompt)
90
+
91
+ # Append the current message to the conversation history
92
  st.session_state.messages.append({"role": "user", "content": prompt})
93
+ conversation_history = [(message["role"], message["content"]) for message in st.session_state.messages]
94
+
95
+ formated_text = format_promt(prompt, conversation_history, custom_instruction)
96
+
97
  with st.chat_message("assistant"):
98
  client = InferenceClient(
99
  model=model_links[selected_model], )
 
100
  output = client.text_generation(
101
  formated_text,
102
  temperature=temp_values, # 0.5
103
  max_new_tokens=3000,
104
  stream=True
105
  )
 
106
  response = st.write_stream(output)
107
+ st.session_state.messages.append({"role": "assistant", "content": response})