Spaces:
Running
Running
[BETA] Add GlobalPlatorm support
Browse files
app.py
CHANGED
@@ -6,10 +6,22 @@ from nltk.stem import WordNetLemmatizer
|
|
6 |
from dotenv import load_dotenv
|
7 |
from sklearn.preprocessing import MinMaxScaler
|
8 |
|
9 |
-
|
|
|
|
|
|
|
10 |
warnings.filterwarnings('ignore')
|
11 |
nltk.download('wordnet')
|
12 |
load_dotenv()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
from datasets import load_dataset
|
15 |
import bm25s
|
@@ -23,9 +35,6 @@ from fastapi.staticfiles import StaticFiles
|
|
23 |
from schemas import *
|
24 |
from classes import *
|
25 |
|
26 |
-
from bs4 import BeautifulSoup
|
27 |
-
import requests
|
28 |
-
|
29 |
lemmatizer = WordNetLemmatizer()
|
30 |
|
31 |
spec_metadatas_3gpp = load_dataset("OrganizedProgrammers/3GPPSpecMetadata")
|
@@ -41,9 +50,55 @@ spec_contents_etsi = spec_contents_etsi["train"].to_list()
|
|
41 |
spec_metadatas_etsi = spec_metadatas_etsi["train"].to_list()
|
42 |
tdoc_locations = tdoc_locations_3gpp["train"].to_list()
|
43 |
|
44 |
-
bm25_index_3gpp = BM25HF.load_from_hub("OrganizedProgrammers/3GPPBM25IndexSingle", load_corpus=True, token=os.environ["HF_TOKEN"])
|
45 |
bm25_index_etsi = BM25HF.load_from_hub("OrganizedProgrammers/ETSIBM25IndexSingle", load_corpus=True, token=os.environ["HF_TOKEN"])
|
46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
def get_docs_from_url(url):
|
48 |
"""Get list of documents/directories from a URL"""
|
49 |
try:
|
@@ -74,6 +129,22 @@ def get_document(spec_id: str, spec_title: str, source: str):
|
|
74 |
text.extend([section['section'], section['content']])
|
75 |
return text
|
76 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
tags_metadata = [
|
78 |
{
|
79 |
"name": "Document Retrieval",
|
@@ -153,11 +224,21 @@ def frontend():
|
|
153 |
def find_document(request: DocRequest):
|
154 |
start_time = time.time()
|
155 |
document = request.doc_id
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
|
157 |
-
url = get_tdoc_url(document) if valid_3gpp_doc_format.match(document) else \
|
158 |
-
get_spec_url(document) if valid_3gpp_spec_format.match(document) else \
|
159 |
-
etsi_doc_finder.search_document(document) if valid_etsi_doc_format.match(document) else \
|
160 |
-
etsi_spec_finder.search_document(document) if valid_etsi_spec_format.match(document) else "Document ID not supported"
|
161 |
if "Specification" in url or "Document" in url:
|
162 |
raise HTTPException(status_code=404, detail=url)
|
163 |
|
@@ -203,11 +284,20 @@ def find_document_batch(request: BatchDocRequest):
|
|
203 |
missing = []
|
204 |
|
205 |
for document in documents:
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
211 |
if "Specification" in url or "Document" in url:
|
212 |
missing.append(document)
|
213 |
else:
|
|
|
6 |
from dotenv import load_dotenv
|
7 |
from sklearn.preprocessing import MinMaxScaler
|
8 |
|
9 |
+
from bs4 import BeautifulSoup
|
10 |
+
import requests
|
11 |
+
from urllib.parse import parse_qs, urlparse
|
12 |
+
|
13 |
warnings.filterwarnings('ignore')
|
14 |
nltk.download('wordnet')
|
15 |
load_dotenv()
|
16 |
+
os.environ['CURL_CA_BUNDLE'] = ""
|
17 |
+
|
18 |
+
from huggingface_hub import configure_http_backend
|
19 |
+
def backend_factory() -> requests.Session:
|
20 |
+
session = requests.Session()
|
21 |
+
session.verify = False
|
22 |
+
return session
|
23 |
+
|
24 |
+
configure_http_backend(backend_factory=backend_factory)
|
25 |
|
26 |
from datasets import load_dataset
|
27 |
import bm25s
|
|
|
35 |
from schemas import *
|
36 |
from classes import *
|
37 |
|
|
|
|
|
|
|
38 |
lemmatizer = WordNetLemmatizer()
|
39 |
|
40 |
spec_metadatas_3gpp = load_dataset("OrganizedProgrammers/3GPPSpecMetadata")
|
|
|
50 |
spec_metadatas_etsi = spec_metadatas_etsi["train"].to_list()
|
51 |
tdoc_locations = tdoc_locations_3gpp["train"].to_list()
|
52 |
|
53 |
+
bm25_index_3gpp = BM25HF.load_from_hub("OrganizedProgrammers/3GPPBM25IndexSingle", load_corpus=True, token=os.environ["HF_TOKEN"], )
|
54 |
bm25_index_etsi = BM25HF.load_from_hub("OrganizedProgrammers/ETSIBM25IndexSingle", load_corpus=True, token=os.environ["HF_TOKEN"])
|
55 |
|
56 |
+
def extract_args_and_map(href):
|
57 |
+
if not href or not href.lower().startswith('javascript:'):
|
58 |
+
return None
|
59 |
+
js = href[len('javascript:'):].strip()
|
60 |
+
m = re.match(r'\w+\((.*)\)', js)
|
61 |
+
if not m:
|
62 |
+
return None
|
63 |
+
args_str = m.group(1).strip()
|
64 |
+
parts = [part.strip() for part in args_str.split(',', 1)]
|
65 |
+
if len(parts) != 2:
|
66 |
+
return None
|
67 |
+
try:
|
68 |
+
media_id = int(parts[0])
|
69 |
+
except ValueError:
|
70 |
+
return None
|
71 |
+
spec_type = parts[1].strip()
|
72 |
+
if (spec_type.startswith("'") and spec_type.endswith("'")) or (spec_type.startswith('"') and spec_type.endswith('"')):
|
73 |
+
spec_type = spec_type[1:-1]
|
74 |
+
|
75 |
+
return media_id, spec_type
|
76 |
+
|
77 |
+
url = "https://globalplatform.org/wp-content/themes/globalplatform/ajax/specs-library.php"
|
78 |
+
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36"}
|
79 |
+
resp = requests.post(url, verify=False, headers=headers)
|
80 |
+
soup = BeautifulSoup(resp.text, 'html.parser')
|
81 |
+
|
82 |
+
panels = soup.find_all('div', class_='panel panel-default')
|
83 |
+
gp_spec_locations = {}
|
84 |
+
for panel in panels:
|
85 |
+
header = ''.join([t for t in panel.find('a').children if t.name is None]).strip()
|
86 |
+
try:
|
87 |
+
title, doc_id = header.split(' | ')
|
88 |
+
panel_body = panel.find('div', class_='panel-body')
|
89 |
+
|
90 |
+
download_btn_href = panel_body.find_all('a', href=lambda href: href and href.strip().lower().startswith('javascript:'))[0]
|
91 |
+
media_id, spec_type = extract_args_and_map(download_btn_href['href'])
|
92 |
+
|
93 |
+
changes_history = panel.find_all('div', class_="row")
|
94 |
+
paragraphs_ch = [version.find('p').text for version in changes_history][::-1]
|
95 |
+
document_commits = []
|
96 |
+
for version in range(len(paragraphs_ch)):
|
97 |
+
document_commits.append(f"Version {version + 1} : {paragraphs_ch[version]}")
|
98 |
+
gp_spec_locations[doc_id] = {"title": title, "file_id": media_id, "committee": spec_type, "summary": "\n".join(document_commits)}
|
99 |
+
except:
|
100 |
+
continue
|
101 |
+
|
102 |
def get_docs_from_url(url):
|
103 |
"""Get list of documents/directories from a URL"""
|
104 |
try:
|
|
|
129 |
text.extend([section['section'], section['content']])
|
130 |
return text
|
131 |
|
132 |
+
def get_gp_spec_url(data):
|
133 |
+
file_id = data['file_id']
|
134 |
+
spec_type = data['committee']
|
135 |
+
|
136 |
+
url = "https://globalplatform.org/wp-content/themes/globalplatform/ajax/download-spec-submit.php"
|
137 |
+
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36"}
|
138 |
+
resp = requests.post(url, verify=False, headers=headers, data={"first_name": "", "last_name": "", "company": "", "email": "", "media_id": file_id, "spec_type": spec_type, "agree": "true"})
|
139 |
+
|
140 |
+
r = resp.text
|
141 |
+
mat = re.search(r"window\.location\.href\s*=\s*'([^']+)'", r)
|
142 |
+
if mat:
|
143 |
+
full_url = mat.group(1)
|
144 |
+
parsed_url = urlparse(full_url)
|
145 |
+
query_params = parse_qs(parsed_url.query)
|
146 |
+
return query_params.get('f')[0]
|
147 |
+
|
148 |
tags_metadata = [
|
149 |
{
|
150 |
"name": "Document Retrieval",
|
|
|
224 |
def find_document(request: DocRequest):
|
225 |
start_time = time.time()
|
226 |
document = request.doc_id
|
227 |
+
if valid_3gpp_doc_format.match(document):
|
228 |
+
url = get_tdoc_url(document)
|
229 |
+
elif valid_3gpp_spec_format.match(document):
|
230 |
+
url = get_spec_url(document)
|
231 |
+
elif valid_etsi_doc_format.match(document):
|
232 |
+
etsi_doc_finder.search_document(document)
|
233 |
+
elif valid_etsi_spec_format.match(document):
|
234 |
+
etsi_spec_finder.search_document(document)
|
235 |
+
elif document.startswith("GP"):
|
236 |
+
for sp in gp_spec_locations:
|
237 |
+
if document.lower() in sp.lower():
|
238 |
+
url = get_gp_spec_url(gp_spec_locations[sp])
|
239 |
+
else:
|
240 |
+
url = "Document ID not supported"
|
241 |
|
|
|
|
|
|
|
|
|
242 |
if "Specification" in url or "Document" in url:
|
243 |
raise HTTPException(status_code=404, detail=url)
|
244 |
|
|
|
284 |
missing = []
|
285 |
|
286 |
for document in documents:
|
287 |
+
if valid_3gpp_doc_format.match(document):
|
288 |
+
url = get_tdoc_url(document)
|
289 |
+
elif valid_3gpp_spec_format.match(document):
|
290 |
+
url = get_spec_url(document)
|
291 |
+
elif valid_etsi_doc_format.match(document):
|
292 |
+
etsi_doc_finder.search_document(document)
|
293 |
+
elif valid_etsi_spec_format.match(document):
|
294 |
+
etsi_spec_finder.search_document(document)
|
295 |
+
elif document.startswith("GP"):
|
296 |
+
for sp in gp_spec_locations:
|
297 |
+
if document.lower() in sp.lower():
|
298 |
+
url = get_gp_spec_url(gp_spec_locations[sp])
|
299 |
+
else:
|
300 |
+
url = "Document ID not supported"
|
301 |
if "Specification" in url or "Document" in url:
|
302 |
missing.append(document)
|
303 |
else:
|