Update pages/type_text.py
Browse files- pages/type_text.py +26 -26
pages/type_text.py
CHANGED
@@ -147,6 +147,32 @@ def load_model():
|
|
147 |
|
148 |
model = load_model()
|
149 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
150 |
# Semantic search, Compute cosine similarity between INTdesc_embedding and SBS descriptions
|
151 |
INTdesc_embedding = model.encode(INTdesc_input)
|
152 |
SBScorpus_embeddings = model.encode(SBScorpus)
|
@@ -175,32 +201,6 @@ if INTdesc_input and st.button(":blue[Map to SBS codes]", key="run_st_model"): #
|
|
175 |
#st.markdown('<div id="bottom"></div>', unsafe_allow_html=True)
|
176 |
#components.html(scroll_script, height=0, width=0)
|
177 |
|
178 |
-
|
179 |
-
## Define the Reasoning models
|
180 |
-
rs_models = {
|
181 |
-
'(medium speed) original model for general domain: meta-llama/Llama-3.2-1B-Instruct': 'meta-llama/Llama-3.2-1B-Instruct',
|
182 |
-
'(slower speed) original model for general domain: Qwen/Qwen2-1.5B-Instruct': 'Qwen/Qwen2-1.5B-Instruct',
|
183 |
-
'(medium speed) original model for general domain: EpistemeAI/ReasoningCore-1B-r1-0': 'EpistemeAI/ReasoningCore-1B-r1-0',
|
184 |
-
'(expected in future) fine-tuned model for medical domain: meta-llama/Llama-3.2-1B-Instruct': 'meta-llama/Llama-3.2-1B-Instruct',
|
185 |
-
'(expected in future) fine-tuned model for medical domain: Qwen/Qwen2-1.5B-Instruct': 'Qwen/Qwen2-1.5B-Instruct',
|
186 |
-
}
|
187 |
-
|
188 |
-
## Create the select Reasoning box
|
189 |
-
selected_rs_model = st.selectbox('Reasoning model:', list(rs_models.keys())) # or 'Choose a Reasoning Model'
|
190 |
-
#st.write("Current selection:", selected_rs_model)
|
191 |
-
|
192 |
-
## Get the selected Reasoning model
|
193 |
-
Reasoning_model = rs_models[selected_rs_model]
|
194 |
-
|
195 |
-
## Load the Reasoning model as pipeline ...
|
196 |
-
@st.cache_resource
|
197 |
-
def load_pipe():
|
198 |
-
pipe = pipeline("text-generation", model=Reasoning_model, device_map=device,) # device_map="auto", torch_dtype=torch.bfloat16
|
199 |
-
return pipe
|
200 |
-
|
201 |
-
pipe = load_pipe()
|
202 |
-
|
203 |
-
|
204 |
display_format = "ask REASONING MODEL: Which, if any, of the following SBS descriptions corresponds best to " + INTdesc_input +"? "
|
205 |
#st.write(display_format)
|
206 |
question = "Which one, if any, of the following Saudi Billing System descriptions A, B, C, D, or E corresponds best to " + INTdesc_input +"? "
|
|
|
147 |
|
148 |
model = load_model()
|
149 |
|
150 |
+
|
151 |
+
## Define the Reasoning models
|
152 |
+
rs_models = {
|
153 |
+
'(medium speed) original model for general domain: meta-llama/Llama-3.2-1B-Instruct': 'meta-llama/Llama-3.2-1B-Instruct',
|
154 |
+
'(slower speed) original model for general domain: Qwen/Qwen2-1.5B-Instruct': 'Qwen/Qwen2-1.5B-Instruct',
|
155 |
+
'(medium speed) original model for general domain: EpistemeAI/ReasoningCore-1B-r1-0': 'EpistemeAI/ReasoningCore-1B-r1-0',
|
156 |
+
'(expected in future) fine-tuned model for medical domain: meta-llama/Llama-3.2-1B-Instruct': 'meta-llama/Llama-3.2-1B-Instruct',
|
157 |
+
'(expected in future) fine-tuned model for medical domain: Qwen/Qwen2-1.5B-Instruct': 'Qwen/Qwen2-1.5B-Instruct',
|
158 |
+
}
|
159 |
+
|
160 |
+
## Create the select Reasoning box
|
161 |
+
selected_rs_model = st.selectbox('Reasoning model:', list(rs_models.keys())) # or 'Choose a Reasoning Model'
|
162 |
+
#st.write("Current selection:", selected_rs_model)
|
163 |
+
|
164 |
+
## Get the selected Reasoning model
|
165 |
+
Reasoning_model = rs_models[selected_rs_model]
|
166 |
+
|
167 |
+
## Load the Reasoning model as pipeline ...
|
168 |
+
@st.cache_resource
|
169 |
+
def load_pipe():
|
170 |
+
pipe = pipeline("text-generation", model=Reasoning_model, device_map=device,) # device_map="auto", torch_dtype=torch.bfloat16
|
171 |
+
return pipe
|
172 |
+
|
173 |
+
pipe = load_pipe()
|
174 |
+
|
175 |
+
|
176 |
# Semantic search, Compute cosine similarity between INTdesc_embedding and SBS descriptions
|
177 |
INTdesc_embedding = model.encode(INTdesc_input)
|
178 |
SBScorpus_embeddings = model.encode(SBScorpus)
|
|
|
201 |
#st.markdown('<div id="bottom"></div>', unsafe_allow_html=True)
|
202 |
#components.html(scroll_script, height=0, width=0)
|
203 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
display_format = "ask REASONING MODEL: Which, if any, of the following SBS descriptions corresponds best to " + INTdesc_input +"? "
|
205 |
#st.write(display_format)
|
206 |
question = "Which one, if any, of the following Saudi Billing System descriptions A, B, C, D, or E corresponds best to " + INTdesc_input +"? "
|