MVPilgrim commited on
Commit
fc8b872
·
1 Parent(s): f0cb266

8vcpu invalid instruction

Browse files
Files changed (1) hide show
  1. app.py +13 -10
app.py CHANGED
@@ -513,16 +513,19 @@ try:
513
  #with st.spinner('Generating Completion (but slowly. 40+ seconds.)...'):
514
  with st.markdown("<h1 style='text-align: center; color: #666666;'>LLM with RAG Prompting <br style='page-break-after: always;'>Proof of Concept</h1>",
515
  unsafe_allow_html=True):
516
- st.spinner('Generating Completion (but slowly. 40+ seconds.)...')
517
- modelOutput = llm.create_chat_completion(
518
- prompt
519
- #max_tokens=max_tokens,
520
- #temperature=temperature,
521
- #top_p=top_p,
522
- #echo=echoVal,
523
- #stop=stop,
524
- )
525
- result = modelOutput["choices"][0]["message"]["content"]
 
 
 
526
  #result = str(modelOutput)
527
  logger.debug(f"### llmResult: {result}")
528
  logger.info("### runLLM exited.")
 
513
  #with st.spinner('Generating Completion (but slowly. 40+ seconds.)...'):
514
  with st.markdown("<h1 style='text-align: center; color: #666666;'>LLM with RAG Prompting <br style='page-break-after: always;'>Proof of Concept</h1>",
515
  unsafe_allow_html=True):
516
+ with st.spinner('Generating Completion (but slowly. 40+ seconds.)...'):
517
+ modelOutput = llm.create_chat_completion(
518
+ prompt
519
+ #max_tokens=max_tokens,
520
+ #temperature=temperature,
521
+ #top_p=top_p,
522
+ #echo=echoVal,
523
+ #stop=stop,
524
+ )
525
+ if modelOutput != "":
526
+ result = modelOutput["choices"][0]["message"]["content"]
527
+ else:
528
+ result = "No result returned."
529
  #result = str(modelOutput)
530
  logger.debug(f"### llmResult: {result}")
531
  logger.info("### runLLM exited.")