Spaces:
Running
Running
MVPilgrim
commited on
Commit
·
016d98b
1
Parent(s):
ac1f7ec
8vcpu invalid instruction
Browse files- Dockerfile +1 -1
- app.py +2 -0
Dockerfile
CHANGED
@@ -31,7 +31,7 @@ RUN pip install https://files.pythonhosted.org/packages/13/87/e0cb08c2d4bd7d38ab
|
|
31 |
RUN pip show semantic-text-splitter
|
32 |
|
33 |
#RUN pip install llama_cpp_python
|
34 |
-
RUN FORCE_CMAKE=1 CMAKE_SYSTEM_PROCESSOR=AMD64 pip install --verbose --no-cache-dir llama-cpp-python
|
35 |
|
36 |
##############################################################################
|
37 |
# Install Weaviate
|
|
|
31 |
RUN pip show semantic-text-splitter
|
32 |
|
33 |
#RUN pip install llama_cpp_python
|
34 |
+
RUN FORCE_CMAKE=1 CMAKE_SYSTEM_PROCESSOR=AMD64 pip install --verbose --no-cache-dir llama-cpp-python --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu
|
35 |
|
36 |
##############################################################################
|
37 |
# Install Weaviate
|
app.py
CHANGED
@@ -509,6 +509,8 @@ try:
|
|
509 |
echoVal = True
|
510 |
stop = ["Q", "\n"]
|
511 |
|
|
|
|
|
512 |
modelOutput = ""
|
513 |
with st.spinner('Generating Completion (but slowly)...'):
|
514 |
modelOutput = llm.create_chat_completion(
|
|
|
509 |
echoVal = True
|
510 |
stop = ["Q", "\n"]
|
511 |
|
512 |
+
st.markdown("<h1 style='text-align: center; color: #666666;'>LLM with RAG Prompting <br style='page-break-after: always;'>Proof of Concept</h1>",
|
513 |
+
unsafe_allow_html=True)
|
514 |
modelOutput = ""
|
515 |
with st.spinner('Generating Completion (but slowly)...'):
|
516 |
modelOutput = llm.create_chat_completion(
|