warhawkmonk commited on
Commit
0a12705
·
verified ·
1 Parent(s): f5dcecf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -11
app.py CHANGED
@@ -217,15 +217,14 @@ def consume_llm_api_updater(prompt):
217
 
218
  )
219
  return completion.choices[0].message.content
220
- def consume_llm_api(prompt):
221
 
222
  client = Groq(
223
  api_key="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"
224
  )
225
-
226
- completion = client.chat.completions.create(
227
-
228
- model="llama3-70b-8192",
229
  messages=[
230
 
231
  {
@@ -233,17 +232,21 @@ def consume_llm_api(prompt):
233
  "content": prompt
234
  },
235
  ],
 
 
 
 
236
 
237
-
 
238
  top_p=1,
239
  stream=True,
240
-
241
  )
242
-
243
  for chunk in completion:
244
  if chunk.choices[0].delta.content:
245
  yield chunk.choices[0].delta.content
246
- # @st.cache_resource
247
  # def load_model():
248
  # pipeline_ = AutoPipelineForInpainting.from_pretrained("kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16).to("cuda")
249
  # return pipeline_
@@ -409,6 +412,16 @@ with column2:
409
 
410
 
411
  for index,prompts_ in enumerate(dictionary['every_prompt_with_val'][::-1]):
 
 
 
 
 
 
 
 
 
 
412
  if prompts_[-1]=="@working":
413
  if index==0:
414
  st.write(prompts_[0].split(send_prompt())[-1].upper() if send_prompt() in prompts_[0] else prompts_[0].upper())
@@ -416,12 +429,12 @@ with column2:
416
  while(len(data_need)==0):
417
  if len(prompts_)==3:
418
  try:
419
- data_need = st.write_stream(consume_llm_api(prompts_[1]))
420
  except:
421
  data_need = st.write_stream(consume_llm_api_conditional(prompts_[1]))
422
  else:
423
  try:
424
- data_need=st.write_stream(consume_llm_api(prompts_[0]))
425
  except:
426
  data_need=st.write_stream(consume_llm_api_conditional(prompts_[0]))
427
 
 
217
 
218
  )
219
  return completion.choices[0].message.content
220
+ def consume_llm_api(prompt,messages=None):
221
 
222
  client = Groq(
223
  api_key="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"
224
  )
225
+ if messages is not None:
226
+ messages= messages
227
+ else:
 
228
  messages=[
229
 
230
  {
 
232
  "content": prompt
233
  },
234
  ],
235
+ completion = client.chat.completions.create(
236
+
237
+ model="llama3-70b-8192",
238
+ messages=messages,
239
 
240
+ # temperature=1,
241
+ # max_completion_tokens=1024,
242
  top_p=1,
243
  stream=True,
244
+ # stop=None,
245
  )
246
+ # return completion.choices[0].message.content
247
  for chunk in completion:
248
  if chunk.choices[0].delta.content:
249
  yield chunk.choices[0].delta.content
 
250
  # def load_model():
251
  # pipeline_ = AutoPipelineForInpainting.from_pretrained("kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16).to("cuda")
252
  # return pipeline_
 
412
 
413
 
414
  for index,prompts_ in enumerate(dictionary['every_prompt_with_val'][::-1]):
415
+ messages = []
416
+
417
+ for i_ in dictionary['every_prompt_with_val']:
418
+ if "@working" in i_[-1]:
419
+ messages.append({"role":"user","content":i_[0]})
420
+ # messages.append({"role":"assistant","content":dictionary['every_prompt_with_val'][i_][1]})
421
+ else:
422
+ messages.append({"role":"user","content":i_[0]})
423
+ messages.append({"role":"assistant","content":i_[1]})
424
+
425
  if prompts_[-1]=="@working":
426
  if index==0:
427
  st.write(prompts_[0].split(send_prompt())[-1].upper() if send_prompt() in prompts_[0] else prompts_[0].upper())
 
429
  while(len(data_need)==0):
430
  if len(prompts_)==3:
431
  try:
432
+ data_need = st.write_stream(consume_llm_api(prompts_[1],messages))
433
  except:
434
  data_need = st.write_stream(consume_llm_api_conditional(prompts_[1]))
435
  else:
436
  try:
437
+ data_need=st.write_stream(consume_llm_api(prompts_[0],messages))
438
  except:
439
  data_need=st.write_stream(consume_llm_api_conditional(prompts_[0]))
440