georad commited on
Commit
60511e9
·
verified ·
1 Parent(s): d388b4c

Update pages/type_text.py

Browse files
Files changed (1) hide show
  1. pages/type_text.py +26 -22
pages/type_text.py CHANGED
@@ -139,22 +139,6 @@ selected_st_model = st.selectbox('Current selected Sentence Transformer model:',
139
  ## Get the selected SentTrans model
140
  SentTrans_model = st_models[selected_st_model]
141
 
142
- ## Define the Reasoning models
143
- rs_models = {
144
- '(medium speed) original model for general domain: meta-llama/Llama-3.2-1B-Instruct': 'meta-llama/Llama-3.2-1B-Instruct',
145
- '(slower speed) original model for general domain: Qwen/Qwen2-1.5B-Instruct': 'Qwen/Qwen2-1.5B-Instruct',
146
- '(medium speed) original model for general domain: EpistemeAI/ReasoningCore-1B-r1-0': 'EpistemeAI/ReasoningCore-1B-r1-0',
147
- '(expected in future) fine-tuned model for medical domain: meta-llama/Llama-3.2-1B-Instruct': 'meta-llama/Llama-3.2-1B-Instruct',
148
- '(expected in future) fine-tuned model for medical domain: Qwen/Qwen2-1.5B-Instruct': 'Qwen/Qwen2-1.5B-Instruct',
149
- }
150
-
151
- ## Create the select Reasoning box
152
- selected_rs_model = st.selectbox('Current selected Reasoning model:', list(rs_models.keys())) # or 'Choose a Reasoning Model'
153
- #st.write("Current selection:", selected_rs_model)
154
-
155
- ## Get the selected Reasoning model
156
- Reasoning_model = rs_models[selected_rs_model]
157
-
158
  ## Load the Sentence Transformer model ...
159
  @st.cache_resource
160
  def load_model():
@@ -163,13 +147,7 @@ def load_model():
163
 
164
  model = load_model()
165
 
166
- ## Load the Reasoning model as pipeline ...
167
- @st.cache_resource
168
- def load_pipe():
169
- pipe = pipeline("text-generation", model=Reasoning_model, device_map=device,) # device_map="auto", torch_dtype=torch.bfloat16
170
- return pipe
171
 
172
- pipe = load_pipe()
173
 
174
  # Semantic search, Compute cosine similarity between INTdesc_embedding and SBS descriptions
175
  INTdesc_embedding = model.encode(INTdesc_input)
@@ -197,6 +175,32 @@ if INTdesc_input and st.button(":blue[Map to SBS codes]", key="run_st_model"): #
197
 
198
  st.dataframe(data=dfALL, hide_index=True)
199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  display_format = "ask REASONING MODEL: Which, if any, of the following SBS descriptions corresponds best to " + INTdesc_input +"? "
201
  #st.write(display_format)
202
  question = "Which one, if any, of the following Saudi Billing System descriptions A, B, C, D, or E corresponds best to " + INTdesc_input +"? "
 
139
  ## Get the selected SentTrans model
140
  SentTrans_model = st_models[selected_st_model]
141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
  ## Load the Sentence Transformer model ...
143
  @st.cache_resource
144
  def load_model():
 
147
 
148
  model = load_model()
149
 
 
 
 
 
 
150
 
 
151
 
152
  # Semantic search, Compute cosine similarity between INTdesc_embedding and SBS descriptions
153
  INTdesc_embedding = model.encode(INTdesc_input)
 
175
 
176
  st.dataframe(data=dfALL, hide_index=True)
177
 
178
+
179
+ ## Define the Reasoning models
180
+ rs_models = {
181
+ '(medium speed) original model for general domain: meta-llama/Llama-3.2-1B-Instruct': 'meta-llama/Llama-3.2-1B-Instruct',
182
+ '(slower speed) original model for general domain: Qwen/Qwen2-1.5B-Instruct': 'Qwen/Qwen2-1.5B-Instruct',
183
+ '(medium speed) original model for general domain: EpistemeAI/ReasoningCore-1B-r1-0': 'EpistemeAI/ReasoningCore-1B-r1-0',
184
+ '(expected in future) fine-tuned model for medical domain: meta-llama/Llama-3.2-1B-Instruct': 'meta-llama/Llama-3.2-1B-Instruct',
185
+ '(expected in future) fine-tuned model for medical domain: Qwen/Qwen2-1.5B-Instruct': 'Qwen/Qwen2-1.5B-Instruct',
186
+ }
187
+
188
+ ## Create the select Reasoning box
189
+ selected_rs_model = st.selectbox('Current selected Reasoning model:', list(rs_models.keys())) # or 'Choose a Reasoning Model'
190
+ #st.write("Current selection:", selected_rs_model)
191
+
192
+ ## Get the selected Reasoning model
193
+ Reasoning_model = rs_models[selected_rs_model]
194
+
195
+ ## Load the Reasoning model as pipeline ...
196
+ @st.cache_resource
197
+ def load_pipe():
198
+ pipe = pipeline("text-generation", model=Reasoning_model, device_map=device,) # device_map="auto", torch_dtype=torch.bfloat16
199
+ return pipe
200
+
201
+ pipe = load_pipe()
202
+
203
+
204
  display_format = "ask REASONING MODEL: Which, if any, of the following SBS descriptions corresponds best to " + INTdesc_input +"? "
205
  #st.write(display_format)
206
  question = "Which one, if any, of the following Saudi Billing System descriptions A, B, C, D, or E corresponds best to " + INTdesc_input +"? "