Spaces:
Running
Running
MVPilgrim
commited on
Commit
·
9378914
1
Parent(s):
51e6285
debug
Browse files- requirements.txt +1 -20
- semsearchDbgUI.py +33 -97
requirements.txt
CHANGED
@@ -1,21 +1,2 @@
|
|
1 |
-
|
2 |
-
sentence-transformers
|
3 |
-
langchain
|
4 |
-
langchain_community
|
5 |
-
lxml
|
6 |
-
beautifulsoup4
|
7 |
|
8 |
-
transformers==4.34.1
|
9 |
-
fastapi==0.103.2
|
10 |
-
uvicorn==0.23.2
|
11 |
-
nltk==3.8.1
|
12 |
-
torch==2.0.1
|
13 |
-
sentencepiece==0.1.99
|
14 |
-
sentence-transformers==2.2.2
|
15 |
-
optimum==1.13.2
|
16 |
-
onnxruntime==1.16.1
|
17 |
-
onnx==1.14.1
|
18 |
-
|
19 |
-
ipywidgets
|
20 |
-
#ipykernel
|
21 |
-
#ipython
|
|
|
1 |
+
streamlit
|
|
|
|
|
|
|
|
|
|
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
semsearchDbgUI.py
CHANGED
@@ -1,25 +1,9 @@
|
|
1 |
-
import
|
2 |
-
from weaviate.connect import ConnectionParams
|
3 |
-
from weaviate.classes.init import AdditionalConfig, Timeout
|
4 |
-
|
5 |
-
from sentence_transformers import SentenceTransformer
|
6 |
-
from langchain_community.document_loaders import BSHTMLLoader
|
7 |
-
from pathlib import Path
|
8 |
-
from lxml import html
|
9 |
import logging
|
10 |
-
from semantic_text_splitter import HuggingFaceTextSplitter
|
11 |
-
from tokenizers import Tokenizer
|
12 |
-
import json
|
13 |
-
import os
|
14 |
-
import re
|
15 |
-
import logging
|
16 |
-
|
17 |
-
import llama_cpp
|
18 |
-
from llama_cpp import Llama
|
19 |
|
20 |
-
import ipywidgets as widgets
|
21 |
-
from IPython.display import display, clear_output
|
22 |
|
|
|
|
|
23 |
|
24 |
logger = logging.getLogger(__name__)
|
25 |
logging.basicConfig(level=logging.INFO)
|
@@ -31,57 +15,33 @@ logging.basicConfig(level=logging.INFO)
|
|
31 |
#
|
32 |
logger.info("#### MAINLINE ENTERED.")
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
layout=widgets.Layout(width='580px', height='180px')
|
62 |
-
)
|
63 |
-
|
64 |
-
responseTextArea = widgets.Textarea(
|
65 |
-
value='',
|
66 |
-
placeholder='LLM generated response.',
|
67 |
-
description='LLM Resp: ',
|
68 |
-
disabled=False,
|
69 |
-
layout=widgets.Layout(width='780px', height='200px')
|
70 |
-
)
|
71 |
-
|
72 |
-
selectRag = widgets.Checkbox(
|
73 |
-
value=False,
|
74 |
-
description='Use RAG',
|
75 |
-
disabled=False
|
76 |
-
)
|
77 |
-
|
78 |
-
submitButton = widgets.Button(
|
79 |
-
description='Run Model.',
|
80 |
-
disabled=False,
|
81 |
-
button_style='', # 'success', 'info', 'warning', 'danger' or ''
|
82 |
-
tooltip='Click',
|
83 |
-
icon='check' # (FontAwesome names without the `fa-` prefix)
|
84 |
-
)
|
85 |
|
86 |
|
87 |
# Display UI
|
@@ -94,35 +54,11 @@ display(selectRag)
|
|
94 |
display(submitButton)
|
95 |
|
96 |
def runLLM(prompt):
|
97 |
-
|
98 |
-
temperature = 0.3
|
99 |
-
top_p = 0.1
|
100 |
-
echo = True
|
101 |
-
stop = ["Q", "\n"]
|
102 |
-
|
103 |
-
modelOutput = llm(
|
104 |
-
prompt,
|
105 |
-
max_tokens=max_tokens,
|
106 |
-
temperature=temperature,
|
107 |
-
top_p=top_p,
|
108 |
-
echo=echo,
|
109 |
-
stop=stop,
|
110 |
-
)
|
111 |
-
result = modelOutput["choices"][0]["text"].strip()
|
112 |
return(result)
|
113 |
|
114 |
def setPrompt(pprompt,ragFlag):
|
115 |
-
|
116 |
-
if ragFlag:
|
117 |
-
ragPrompt = getRagData(pprompt)
|
118 |
-
userPrompt = pprompt + "\n" + ragPrompt
|
119 |
-
prompt = userPrompt
|
120 |
-
userPrompt = "Using this information: " + ragPrompt \
|
121 |
-
+ "process the following statement or question and produce a a response" \
|
122 |
-
+ intialPrompt
|
123 |
-
else:
|
124 |
-
userPrompt = pprompt
|
125 |
-
#prompt = f""" <s> [INST] <<SYS>> {systemTextArea.value} </SYS>> Q: {userPrompt} A: [/INST]"""
|
126 |
return userPrompt
|
127 |
|
128 |
|
@@ -132,7 +68,7 @@ def on_submitButton_clicked(b):
|
|
132 |
ragPromptTextArea.value = ""
|
133 |
responseTextArea.value = ""
|
134 |
log.debug(f"### selectRag: {selectRag.value}")
|
135 |
-
prompt = setPrompt(userTextArea.value,selectRag.value)
|
136 |
log.debug("### prompt: " + prompt)
|
137 |
runLLM(prompt)
|
138 |
|
|
|
1 |
+
import streamlit
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import logging
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
|
|
|
|
4 |
|
5 |
+
st.markdown("<h1 style='text-align: center; color: #666666;'>Vector Database RAG Proof of Concept</h1>", unsafe_allow_html=True)
|
6 |
+
st.markdown("<h6 style='text-align: center; color: #666666;'>V1</h6>", unsafe_allow_html=True)
|
7 |
|
8 |
logger = logging.getLogger(__name__)
|
9 |
logging.basicConfig(level=logging.INFO)
|
|
|
15 |
#
|
16 |
logger.info("#### MAINLINE ENTERED.")
|
17 |
|
18 |
+
|
19 |
+
|
20 |
+
systemTextArea = st.empty()
|
21 |
+
userTextArea = st.empty()
|
22 |
+
ragPromptTextArea = st.empty()
|
23 |
+
responseTextArea = st.empty()
|
24 |
+
selectRag = st.checkbox("Enable Query With RAG", \
|
25 |
+
value=False, \
|
26 |
+
key="selectRag", \
|
27 |
+
help=None, \
|
28 |
+
on_change=None, \
|
29 |
+
args=None, \
|
30 |
+
kwargs=None, \
|
31 |
+
*, \
|
32 |
+
disabled=False, \
|
33 |
+
label_visibility="visible" \
|
34 |
+
)
|
35 |
+
submitButton = st.button(label, \
|
36 |
+
key=None, \
|
37 |
+
help=None, \
|
38 |
+
on_click=None, \
|
39 |
+
args=None, \
|
40 |
+
kwargs=None, \
|
41 |
+
*, type="secondary", \
|
42 |
+
disabled=False, \
|
43 |
+
use_container_width=False \
|
44 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
|
47 |
# Display UI
|
|
|
54 |
display(submitButton)
|
55 |
|
56 |
def runLLM(prompt):
|
57 |
+
result = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
return(result)
|
59 |
|
60 |
def setPrompt(pprompt,ragFlag):
|
61 |
+
userPrompt = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
return userPrompt
|
63 |
|
64 |
|
|
|
68 |
ragPromptTextArea.value = ""
|
69 |
responseTextArea.value = ""
|
70 |
log.debug(f"### selectRag: {selectRag.value}")
|
71 |
+
prompt = setPrompt(userTextArea.value,selectRag.value)user
|
72 |
log.debug("### prompt: " + prompt)
|
73 |
runLLM(prompt)
|
74 |
|