File size: 7,593 Bytes
1b200e2 30c893d 1b200e2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
import streamlit as st
import pandas as pd
from io import StringIO
import json
import torch
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM #AutoModelForTokenClassification
from sentence_transformers import SentenceTransformer, util
#import lmdeploy
#import turbomind as tm
from huggingface_hub import login
login(token = sbsmapper_token)
def on_click():
st.session_state.user_input = ""
#@st.cache
def convert_df(df:pd.DataFrame):
return df.to_csv(index=False).encode('utf-8')
#@st.cache
def convert_json(df:pd.DataFrame):
result = df.to_json(orient="index")
parsed = json.loads(result)
json_string = json.dumps(parsed)
#st.json(json_string, expanded=True)
return json_string
#st.title("πSBS mapper")
INTdesc_input = st.text_input("Type internal description and hit Enter", key="user_input")
createSBScodes, right_column = st.columns(2)
createSBScodes_clicked = createSBScodes.button("Create SBS codes", key="user_createSBScodes")
right_column.button("Reset", on_click=on_click)
numMAPPINGS_input = 5
#numMAPPINGS_input = st.text_input("Type number of mappings and hit Enter", key="user_input_numMAPPINGS")
#st.button("Clear text", on_click=on_click)
model = SentenceTransformer('all-MiniLM-L6-v2') # fastest
#model = SentenceTransformer('all-mpnet-base-v2') # best performance
#model = SentenceTransformers('all-distilroberta-v1')
#model = SentenceTransformer('sentence-transformers/msmarco-bert-base-dot-v5')
#model = SentenceTransformer('clips/mfaq')
INTdesc_embedding = model.encode(INTdesc_input)
# Semantic search, Compute cosine similarity between all pairs of SBS descriptions
#df_SBS = pd.read_csv("SBS_V2_Table.csv", index_col="SBS_Code", usecols=["Long_Description"]) # na_values=['NA']
#df_SBS = pd.read_csv("SBS_V2_Table.csv", usecols=["SBS_Code_Hyphenated","Long_Description"])
from_line = 7727 # Imaging services chapter start, adjust as needed
to_line = 8239 # Imaging services chapter end, adjust as needed
nrows = to_line - from_line + 1
skiprows = list(range(1,from_line - 1))
df_SBS = pd.read_csv("SBS_V2_Table.csv", header=0, skip_blank_lines=False, skiprows=skiprows, nrows=nrows)
#st.write(df_SBS.head(5))
SBScorpus = df_SBS['Long_Description'].values.tolist()
SBScorpus_embeddings = model.encode(SBScorpus)
#my_model_results = pipeline("ner", model= "checkpoint-92")
HF_model_results = util.semantic_search(INTdesc_embedding, SBScorpus_embeddings)
HF_model_results_sorted = sorted(HF_model_results, key=lambda x: x[1], reverse=True)
HF_model_results_displayed = HF_model_results_sorted[0:numMAPPINGS_input]
model_id = "meta-llama/Llama-3.2-1B-Instruct"
pipe = pipeline(
"text-generation",
model=model_id,
torch_dtype=torch.bfloat16,
device_map="auto",
)
col1, col2, col3 = st.columns([1,1,2.5])
col1.subheader("Score")
col2.subheader("SBS code")
col3.subheader("SBS description V2.0")
dictA = {"Score": [], "SBS Code": [], "SBS Description V2.0": []}
if INTdesc_input is not None and createSBScodes_clicked == True:
#for i, result in enumerate(HF_model_results_displayed):
for result in HF_model_results_displayed:
with st.container():
col1.write("%.4f" % result[0]["score"])
col2.write(df_SBS.loc[df_SBS["Long_Description"] == SBScorpus[result[0]["corpus_id"]],"SBS_Code_Hyphenated"].values[0])
col3.write(SBScorpus[result[0]["corpus_id"]])
dictA["Score"].append("%.4f" % result[0]["score"]), dictA["SBS Code"].append(df_SBS.loc[df_SBS["Long_Description"] == SBScorpus[result[0]["corpus_id"]],"SBS_Code_Hyphenated"].values[0]), dictA["SBS Description V2.0"].append(SBScorpus[result[0]["corpus_id"]])
col1.write("%.4f" % result[1]["score"])
col2.write(df_SBS.loc[df_SBS["Long_Description"] == SBScorpus[result[1]["corpus_id"]],"SBS_Code_Hyphenated"].values[0])
col3.write(SBScorpus[result[1]["corpus_id"]])
dictA["Score"].append("%.4f" % result[1]["score"]), dictA["SBS Code"].append(df_SBS.loc[df_SBS["Long_Description"] == SBScorpus[result[1]["corpus_id"]],"SBS_Code_Hyphenated"].values[0]), dictA["SBS Description V2.0"].append(SBScorpus[result[1]["corpus_id"]])
col1.write("%.4f" % result[2]["score"])
col2.write(df_SBS.loc[df_SBS["Long_Description"] == SBScorpus[result[2]["corpus_id"]],"SBS_Code_Hyphenated"].values[0])
col3.write(SBScorpus[result[2]["corpus_id"]])
dictA["Score"].append("%.4f" % result[2]["score"]), dictA["SBS Code"].append(df_SBS.loc[df_SBS["Long_Description"] == SBScorpus[result[2]["corpus_id"]],"SBS_Code_Hyphenated"].values[0]), dictA["SBS Description V2.0"].append(SBScorpus[result[2]["corpus_id"]])
col1.write("%.4f" % result[3]["score"])
col2.write(df_SBS.loc[df_SBS["Long_Description"] == SBScorpus[result[3]["corpus_id"]],"SBS_Code_Hyphenated"].values[0])
col3.write(SBScorpus[result[3]["corpus_id"]])
dictA["Score"].append("%.4f" % result[3]["score"]), dictA["SBS Code"].append(df_SBS.loc[df_SBS["Long_Description"] == SBScorpus[result[3]["corpus_id"]],"SBS_Code_Hyphenated"].values[0]), dictA["SBS Description V2.0"].append(SBScorpus[result[3]["corpus_id"]])
col1.write("%.4f" % result[4]["score"])
col2.write(df_SBS.loc[df_SBS["Long_Description"] == SBScorpus[result[4]["corpus_id"]],"SBS_Code_Hyphenated"].values[0])
col3.write(SBScorpus[result[4]["corpus_id"]])
dictA["Score"].append("%.4f" % result[4]["score"]), dictA["SBS Code"].append(df_SBS.loc[df_SBS["Long_Description"] == SBScorpus[result[4]["corpus_id"]],"SBS_Code_Hyphenated"].values[0]), dictA["SBS Description V2.0"].append(SBScorpus[result[4]["corpus_id"]])
dfA = pd.DataFrame.from_dict(dictA)
display_format = "ask REASONING MODEL: Which, if any, of the above Saudi Billing System descriptions corresponds best to " + INTdesc_input +"? "
st.write(display_format)
question = "Which, if any, of the below Saudi Billing System descriptions corresponds best to " + INTdesc_input +"? "
shortlist = [SBScorpus[result[0]["corpus_id"]], SBScorpus[result[1]["corpus_id"]], SBScorpus[result[2]["corpus_id"]], SBScorpus[result[3]["corpus_id"]], SBScorpus[result[4]["corpus_id"]]]
prompt = [question + " " + shortlist[0] + " " + shortlist[1] + " " + shortlist[2] + " " + shortlist[3] + " " + shortlist[4]]
st.write(prompt)
messages = [
{"role": "system", "content": "You are a pirate chatbot who always responds in pirate speak!"},
{"role": "user", "content": "Who are you?"},
]
outputs = pipe(
messages,
max_new_tokens=256,
)
st.write(outputs[0]["generated_text"][-1])
bs, b1, b2, b3, bLast = st.columns([0.75, 1.5, 1.5, 1.5, 0.75])
with b1:
#csvbutton = download_button(results, "results.csv", "π₯ Download .csv")
csvbutton = st.download_button(label="π₯ Download .csv", data=convert_df(dfA), file_name= "results.csv", mime='text/csv', key='csv_b')
with b2:
#textbutton = download_button(results, "results.txt", "π₯ Download .txt")
textbutton = st.download_button(label="π₯ Download .txt", data=convert_df(dfA), file_name= "results.text", mime='text/plain', key='text_b')
with b3:
#jsonbutton = download_button(results, "results.json", "π₯ Download .json")
jsonbutton = st.download_button(label="π₯ Download .json", data=convert_json(dfA), file_name= "results.json", mime='application/json', key='json_b') |