Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
import netrc
|
3 |
+
import pickle
|
4 |
+
import sys
|
5 |
+
import pandas as pd
|
6 |
+
import numpy as np
|
7 |
+
import streamlit as st
|
8 |
+
# from sentence_transformers import SentenceTransformer
|
9 |
+
# import sentence_transformers
|
10 |
+
# import torch
|
11 |
+
#######################################
|
12 |
+
|
13 |
+
st.markdown(
|
14 |
+
f"""
|
15 |
+
<style>
|
16 |
+
.reportview-container .main .block-container{{
|
17 |
+
max-width: 90%;
|
18 |
+
padding-top: 5rem;
|
19 |
+
padding-right: 5rem;
|
20 |
+
padding-left: 5rem;
|
21 |
+
padding-bottom: 5rem;
|
22 |
+
}}
|
23 |
+
img{{
|
24 |
+
max-width:40%;
|
25 |
+
margin-bottom:40px;
|
26 |
+
}}
|
27 |
+
</style>
|
28 |
+
""",
|
29 |
+
unsafe_allow_html=True,
|
30 |
+
)
|
31 |
+
|
32 |
+
# # let's load the saved model
|
33 |
+
# loaded_model = pickle.load(open('XpathFinder1.sav', 'rb'))
|
34 |
+
# loaded_model = pickle.load('XpathFinder1.sav', map_location='cpu')
|
35 |
+
|
36 |
+
|
37 |
+
# class CPU_Unpickler(pickle.Unpickler):
|
38 |
+
# def find_class(self, module, name):
|
39 |
+
# if module == 'torch.storage' and name == '_load_from_bytes':
|
40 |
+
# return lambda b: torch.load(io.BytesIO(b), map_location='cpu')
|
41 |
+
# else:
|
42 |
+
# return super().find_class(module, name)
|
43 |
+
#
|
44 |
+
|
45 |
+
#loaded_model = CPU_Unpickler(open('XpathFinder1.sav', 'rb')).load()
|
46 |
+
|
47 |
+
|
48 |
+
# Containers
|
49 |
+
header_container = st.container()
|
50 |
+
mod_container = st.container()
|
51 |
+
|
52 |
+
# Header
|
53 |
+
with header_container:
|
54 |
+
|
55 |
+
# different levels of text you can include in your app
|
56 |
+
st.title("Xpath Finder App")
|
57 |
+
|
58 |
+
|
59 |
+
# model container
|
60 |
+
with mod_container:
|
61 |
+
# collecting input from user
|
62 |
+
prompt = st.text_input("Enter your description below ...")
|
63 |
+
|
64 |
+
# Loading e data
|
65 |
+
data = (pd.read_csv("SBERT_data.csv")).drop(['Unnamed: 0'], axis=1)
|
66 |
+
|
67 |
+
data['prompt'] = prompt
|
68 |
+
data.rename(columns={'target_text': 'sentence2',
|
69 |
+
'prompt': 'sentence1'}, inplace=True)
|
70 |
+
data['sentence2'] = data['sentence2'].astype('str')
|
71 |
+
data['sentence1'] = data['sentence1'].astype('str')
|
72 |
+
|
73 |
+
# let's pass the input to the loaded_model with torch compiled with cuda
|
74 |
+
if prompt:
|
75 |
+
# let's get the result
|
76 |
+
from sentence_transformers.cross_encoder import CrossEncoder
|
77 |
+
XpathFinder = CrossEncoder("cross-encoder/stsb-roberta-base")
|
78 |
+
sentence_pairs = []
|
79 |
+
for sentence1, sentence2 in zip(data['sentence1'], data['sentence2']):
|
80 |
+
sentence_pairs.append([sentence1, sentence2])
|
81 |
+
simscore = XpathFinder.predict([prompt])
|
82 |
+
|
83 |
+
# sorting the df to get highest scoring xpath_container
|
84 |
+
data['SBERT CrossEncoder_Score'] = XpathFinder.predict(sentence_pairs)
|
85 |
+
most_acc = data.head(5)
|
86 |
+
# predictions
|
87 |
+
st.write("Highest Similarity score: ", simscore)
|
88 |
+
st.text("Is this one of these the Xpath you're looking for?")
|
89 |
+
st.write(st.write(most_acc["input_text"]))
|