georad commited on
Commit
8a5e22e
·
verified ·
1 Parent(s): fe8d957

Update pages/type_text.py

Browse files
Files changed (1) hide show
  1. pages/type_text.py +2 -6
pages/type_text.py CHANGED
@@ -5,7 +5,6 @@ import json
5
  import torch
6
  from transformers import pipeline # AutoTokenizer, AutoModelForCausalLM, AutoModelForTokenClassification
7
  from sentence_transformers import SentenceTransformer, util
8
- #from "/home/user/app/pages/chapter_index.py" import selected_chapters_list
9
 
10
  import os
11
  os.getenv("HF_TOKEN")
@@ -14,6 +13,7 @@ for k, v in st.session_state.items():
14
  st.session_state[k] = v
15
  selected_chapters_floatlist = list(st.session_state.items())[0][1]
16
  selected_chapters_list = [int(i) for i in selected_chapters_floatlist]
 
17
 
18
  def get_device_map() -> str:
19
  return 'cuda' if torch.cuda.is_available() else 'cpu'
@@ -44,8 +44,6 @@ numMAPPINGS_input = 5
44
  #numMAPPINGS_input = st.text_input("Type number of mappings and hit Enter", key="user_input_numMAPPINGS")
45
  #st.button("Clear text", on_click=on_click)
46
 
47
- st.write("SELECTED CHAPTERS: ", selected_chapters_list)
48
-
49
  @st.cache_resource
50
  def load_model():
51
  model = SentenceTransformer('all-MiniLM-L6-v2') # fastest
@@ -62,11 +60,9 @@ INTdesc_embedding = model.encode(INTdesc_input)
62
 
63
  # Semantic search, Compute cosine similarity between all pairs of SBS descriptions
64
 
65
- #df_chapters = filter_chapters_env(df_chapters, "chapter_name")
66
-
67
  #df_SBS = pd.read_csv("SBS_V2_Table.csv", index_col="SBS_Code", usecols=["Long_Description"]) # na_values=['NA']
68
  #df_SBS = pd.read_csv("SBS_V2_Table.csv", usecols=["SBS_Code_Hyphenated","Long_Description"])
69
- from_line = 7727 # Imaging services chapter start, adjust as needed
70
  to_line = 8239 # Imaging services chapter end, adjust as needed
71
  nrows = to_line - from_line + 1
72
  skiprows = list(range(1,from_line - 1))
 
5
  import torch
6
  from transformers import pipeline # AutoTokenizer, AutoModelForCausalLM, AutoModelForTokenClassification
7
  from sentence_transformers import SentenceTransformer, util
 
8
 
9
  import os
10
  os.getenv("HF_TOKEN")
 
13
  st.session_state[k] = v
14
  selected_chapters_floatlist = list(st.session_state.items())[0][1]
15
  selected_chapters_list = [int(i) for i in selected_chapters_floatlist]
16
+ st.write("SELECTED CHAPTERS: ", selected_chapters_list)
17
 
18
  def get_device_map() -> str:
19
  return 'cuda' if torch.cuda.is_available() else 'cpu'
 
44
  #numMAPPINGS_input = st.text_input("Type number of mappings and hit Enter", key="user_input_numMAPPINGS")
45
  #st.button("Clear text", on_click=on_click)
46
 
 
 
47
  @st.cache_resource
48
  def load_model():
49
  model = SentenceTransformer('all-MiniLM-L6-v2') # fastest
 
60
 
61
  # Semantic search, Compute cosine similarity between all pairs of SBS descriptions
62
 
 
 
63
  #df_SBS = pd.read_csv("SBS_V2_Table.csv", index_col="SBS_Code", usecols=["Long_Description"]) # na_values=['NA']
64
  #df_SBS = pd.read_csv("SBS_V2_Table.csv", usecols=["SBS_Code_Hyphenated","Long_Description"])
65
+ from_line = 7726 # Imaging services chapter start, adjust as needed
66
  to_line = 8239 # Imaging services chapter end, adjust as needed
67
  nrows = to_line - from_line + 1
68
  skiprows = list(range(1,from_line - 1))