Spaces:
Running
Running
MVPilgrim
commited on
Commit
·
f0cb266
1
Parent(s):
22e0272
8vcpu invalid instruction
Browse files
app.py
CHANGED
@@ -51,7 +51,7 @@ try:
|
|
51 |
try:
|
52 |
subprocess.Popen(["/app/startup.sh"])
|
53 |
# Wait for text2vec-transformers and Weaviate DB to initialize.
|
54 |
-
time.sleep(
|
55 |
#subprocess.run(["/app/cmd.sh 'ps -ef'"])
|
56 |
except Exception as e:
|
57 |
emsg = str(e)
|
@@ -61,7 +61,7 @@ try:
|
|
61 |
st.session_state.runStartup = False
|
62 |
if 'runStartup' not in st.session_state:
|
63 |
logger.info("### runStartup still not in st.session_state after setting variable.")
|
64 |
-
with st.spinner('
|
65 |
runStartup()
|
66 |
try:
|
67 |
logger.info("### Displaying /app/startup.log")
|
@@ -509,10 +509,11 @@ try:
|
|
509 |
echoVal = True
|
510 |
stop = ["Q", "\n"]
|
511 |
|
512 |
-
st.markdown("<h1 style='text-align: center; color: #666666;'>LLM with RAG Prompting <br style='page-break-after: always;'>Proof of Concept</h1>",
|
513 |
-
unsafe_allow_html=True)
|
514 |
modelOutput = ""
|
515 |
-
with st.spinner('Generating Completion (but slowly)...'):
|
|
|
|
|
|
|
516 |
modelOutput = llm.create_chat_completion(
|
517 |
prompt
|
518 |
#max_tokens=max_tokens,
|
|
|
51 |
try:
|
52 |
subprocess.Popen(["/app/startup.sh"])
|
53 |
# Wait for text2vec-transformers and Weaviate DB to initialize.
|
54 |
+
time.sleep(10)
|
55 |
#subprocess.run(["/app/cmd.sh 'ps -ef'"])
|
56 |
except Exception as e:
|
57 |
emsg = str(e)
|
|
|
61 |
st.session_state.runStartup = False
|
62 |
if 'runStartup' not in st.session_state:
|
63 |
logger.info("### runStartup still not in st.session_state after setting variable.")
|
64 |
+
with st.spinner('If needed, initialize Weaviate DB and text2vec-transformer...'):
|
65 |
runStartup()
|
66 |
try:
|
67 |
logger.info("### Displaying /app/startup.log")
|
|
|
509 |
echoVal = True
|
510 |
stop = ["Q", "\n"]
|
511 |
|
|
|
|
|
512 |
modelOutput = ""
|
513 |
+
#with st.spinner('Generating Completion (but slowly. 40+ seconds.)...'):
|
514 |
+
with st.markdown("<h1 style='text-align: center; color: #666666;'>LLM with RAG Prompting <br style='page-break-after: always;'>Proof of Concept</h1>",
|
515 |
+
unsafe_allow_html=True):
|
516 |
+
st.spinner('Generating Completion (but slowly. 40+ seconds.)...')
|
517 |
modelOutput = llm.create_chat_completion(
|
518 |
prompt
|
519 |
#max_tokens=max_tokens,
|