ginipick commited on
Commit
c8036ec
ยท
verified ยท
1 Parent(s): ec98a33

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -14
app.py CHANGED
@@ -85,7 +85,7 @@ def get_messages_formatter_type(model_name):
85
  @spaces.GPU(duration=120)
86
  def respond(
87
  message,
88
- history: list[tuple[str, str]],
89
  system_message,
90
  max_tokens,
91
  temperature,
@@ -99,17 +99,17 @@ def respond(
99
  chat_template = get_messages_formatter_type(MISTRAL_MODEL_NAME)
100
 
101
  # ๋ชจ๋ธ ํŒŒ์ผ ๊ฒฝ๋กœ ํ™•์ธ
102
- model_path = os.path.join("./models", MISTRAL_MODEL_NAME)
103
 
104
- print(f"Model path: {model_path}")
105
 
106
- if not os.path.exists(model_path):
107
- print(f"Warning: Model file not found at {model_path}")
108
  print(f"Available files in ./models: {os.listdir('./models')}")
109
 
110
  if llm is None or llm_model != MISTRAL_MODEL_NAME:
111
  llm = Llama(
112
- model_path=model_path,
113
  flash_attn=True,
114
  n_gpu_layers=81,
115
  n_batch=1024,
@@ -136,17 +136,18 @@ def respond(
136
 
137
  messages = BasicChatHistory()
138
 
 
139
  for msn in history:
140
- user = {
141
  'role': Roles.user,
142
- 'content': msn[0]
143
  }
144
- assistant = {
145
  'role': Roles.assistant,
146
- 'content': msn[1]
147
  }
148
- messages.add_message(user)
149
- messages.add_message(assistant)
150
 
151
  stream = agent.get_chat_response(
152
  message,
@@ -192,7 +193,12 @@ demo = gr.ChatInterface(
192
  fn=respond,
193
  title="Ginigen Private AI",
194
  description="6BIT ์–‘์žํ™”๋กœ ๋ชจ๋ธ ํฌ๊ธฐ๋Š” ์ค„์ด๊ณ  ์„ฑ๋Šฅ์€ ์œ ์ง€ํ•˜๋Š” ํ”„๋ผ์ด๋ฒ„์‹œ ์ค‘์‹ฌ AI ์†”๋ฃจ์…˜.",
195
- theme=gr.themes.Soft(primary_hue="violet", secondary_hue="violet", neutral_hue="gray",font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]).set(
 
 
 
 
 
196
  body_background_fill_dark="#16141c",
197
  block_background_fill_dark="#16141c",
198
  block_border_width="1px",
@@ -227,4 +233,4 @@ demo = gr.ChatInterface(
227
  )
228
 
229
  if __name__ == "__main__":
230
- demo.launch()
 
85
  @spaces.GPU(duration=120)
86
  def respond(
87
  message,
88
+ history: list[dict], # history ํ•ญ๋ชฉ์ด tuple์ด ์•„๋‹Œ dict ํ˜•์‹์œผ๋กœ ์ „๋‹ฌ๋จ
89
  system_message,
90
  max_tokens,
91
  temperature,
 
99
  chat_template = get_messages_formatter_type(MISTRAL_MODEL_NAME)
100
 
101
  # ๋ชจ๋ธ ํŒŒ์ผ ๊ฒฝ๋กœ ํ™•์ธ
102
+ model_path_local = os.path.join("./models", MISTRAL_MODEL_NAME)
103
 
104
+ print(f"Model path: {model_path_local}")
105
 
106
+ if not os.path.exists(model_path_local):
107
+ print(f"Warning: Model file not found at {model_path_local}")
108
  print(f"Available files in ./models: {os.listdir('./models')}")
109
 
110
  if llm is None or llm_model != MISTRAL_MODEL_NAME:
111
  llm = Llama(
112
+ model_path=model_path_local,
113
  flash_attn=True,
114
  n_gpu_layers=81,
115
  n_batch=1024,
 
136
 
137
  messages = BasicChatHistory()
138
 
139
+ # history์˜ ๊ฐ ํ•ญ๋ชฉ์ด dict ํ˜•์‹์œผ๋กœ {'user': <user_message>, 'assistant': <assistant_message>} ํ˜•ํƒœ๋ผ๊ณ  ๊ฐ€์ •
140
  for msn in history:
141
+ user_message = {
142
  'role': Roles.user,
143
+ 'content': msn.get('user', '')
144
  }
145
+ assistant_message = {
146
  'role': Roles.assistant,
147
+ 'content': msn.get('assistant', '')
148
  }
149
+ messages.add_message(user_message)
150
+ messages.add_message(assistant_message)
151
 
152
  stream = agent.get_chat_response(
153
  message,
 
193
  fn=respond,
194
  title="Ginigen Private AI",
195
  description="6BIT ์–‘์žํ™”๋กœ ๋ชจ๋ธ ํฌ๊ธฐ๋Š” ์ค„์ด๊ณ  ์„ฑ๋Šฅ์€ ์œ ์ง€ํ•˜๋Š” ํ”„๋ผ์ด๋ฒ„์‹œ ์ค‘์‹ฌ AI ์†”๋ฃจ์…˜.",
196
+ theme=gr.themes.Soft(
197
+ primary_hue="violet",
198
+ secondary_hue="violet",
199
+ neutral_hue="gray",
200
+ font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]
201
+ ).set(
202
  body_background_fill_dark="#16141c",
203
  block_background_fill_dark="#16141c",
204
  block_border_width="1px",
 
233
  )
234
 
235
  if __name__ == "__main__":
236
+ demo.launch()