Update pages/type_text.py
Browse files- pages/type_text.py +3 -3
pages/type_text.py
CHANGED
@@ -129,7 +129,7 @@ SentTrans_model = st_models[selected_st_model]
|
|
129 |
rs_models = {
|
130 |
'(medium speed) original model for general domain: meta-llama/Llama-3.2-1B-Instruct': 'meta-llama/Llama-3.2-1B-Instruct',
|
131 |
'(slower speed) original model for general domain: Qwen/Qwen2-1.5B-Instruct': 'Qwen/Qwen2-1.5B-Instruct',
|
132 |
-
'(
|
133 |
'(expected in future) fine-tuned model for medical domain: meta-llama/Llama-3.2-1B-Instruct': 'meta-llama/Llama-3.2-1B-Instruct',
|
134 |
'(expected in future) fine-tuned model for medical domain: Qwen/Qwen2-1.5B-Instruct': 'Qwen/Qwen2-1.5B-Instruct',
|
135 |
}
|
@@ -220,10 +220,10 @@ if INTdesc_input is not None and st.button("Map to SBS codes", key="run_st_model
|
|
220 |
progress_bar = st.progress(0)
|
221 |
status_text = st.empty()
|
222 |
for pct_complete in range(1,101):
|
223 |
-
time.sleep(
|
224 |
progress_bar.progress(pct_complete)
|
225 |
status_text.write("Progress: {}/100".format(pct_complete))
|
226 |
-
status_text.
|
227 |
#progress_bar.empty()
|
228 |
|
229 |
outputs = pipe(
|
|
|
129 |
rs_models = {
|
130 |
'(medium speed) original model for general domain: meta-llama/Llama-3.2-1B-Instruct': 'meta-llama/Llama-3.2-1B-Instruct',
|
131 |
'(slower speed) original model for general domain: Qwen/Qwen2-1.5B-Instruct': 'Qwen/Qwen2-1.5B-Instruct',
|
132 |
+
'(medium speed) original model for general domain: EpistemeAI/ReasoningCore-1B-r1-0': 'EpistemeAI/ReasoningCore-1B-r1-0',
|
133 |
'(expected in future) fine-tuned model for medical domain: meta-llama/Llama-3.2-1B-Instruct': 'meta-llama/Llama-3.2-1B-Instruct',
|
134 |
'(expected in future) fine-tuned model for medical domain: Qwen/Qwen2-1.5B-Instruct': 'Qwen/Qwen2-1.5B-Instruct',
|
135 |
}
|
|
|
220 |
progress_bar = st.progress(0)
|
221 |
status_text = st.empty()
|
222 |
for pct_complete in range(1,101):
|
223 |
+
time.sleep(3.0)
|
224 |
progress_bar.progress(pct_complete)
|
225 |
status_text.write("Progress: {}/100".format(pct_complete))
|
226 |
+
status_text.warning("It may take several minutes for Reasoning Model to analyze above 5 options and output the results below.")
|
227 |
#progress_bar.empty()
|
228 |
|
229 |
outputs = pipe(
|