adding dump compare
Browse files- Changelog.md +4 -0
- app.py +2 -1
- apps/dump_compare.py +211 -0
Changelog.md
CHANGED
@@ -1,6 +1,10 @@
|
|
1 |
|
2 |
# CHANGELOGS
|
3 |
|
|
|
|
|
|
|
|
|
4 |
## [0.2.11] - 2025-07-04
|
5 |
|
6 |
- Add FNB parser App
|
|
|
1 |
|
2 |
# CHANGELOGS
|
3 |
|
4 |
+
## [0.2.12] - 2025-07-09
|
5 |
+
|
6 |
+
- Add dump compare App
|
7 |
+
|
8 |
## [0.2.11] - 2025-07-04
|
9 |
|
10 |
- Add FNB parser App
|
app.py
CHANGED
@@ -108,7 +108,7 @@ if check_password():
|
|
108 |
layout="wide",
|
109 |
initial_sidebar_state="expanded",
|
110 |
menu_items={
|
111 |
-
"About": "**📡 NPO DB Query v0.2.
|
112 |
},
|
113 |
)
|
114 |
|
@@ -134,6 +134,7 @@ if check_password():
|
|
134 |
title="📡 Automatic Site Clustering",
|
135 |
),
|
136 |
st.Page("apps/fnb_parser.py", title="📄 F4NB Extractor"),
|
|
|
137 |
st.Page(
|
138 |
"apps/import_physical_db.py", title="🌏Physical Database Verification"
|
139 |
),
|
|
|
108 |
layout="wide",
|
109 |
initial_sidebar_state="expanded",
|
110 |
menu_items={
|
111 |
+
"About": "**📡 NPO DB Query v0.2.12**",
|
112 |
},
|
113 |
)
|
114 |
|
|
|
134 |
title="📡 Automatic Site Clustering",
|
135 |
),
|
136 |
st.Page("apps/fnb_parser.py", title="📄 F4NB Extractor"),
|
137 |
+
st.Page("apps/dump_compare.py", title="📊 Dump Compare"),
|
138 |
st.Page(
|
139 |
"apps/import_physical_db.py", title="🌏Physical Database Verification"
|
140 |
),
|
apps/dump_compare.py
ADDED
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
import tempfile
|
4 |
+
|
5 |
+
import pandas as pd
|
6 |
+
import streamlit as st
|
7 |
+
import xlwings as xw
|
8 |
+
from pyxlsb import open_workbook
|
9 |
+
|
10 |
+
# === Core Logic ===
|
11 |
+
|
12 |
+
|
13 |
+
def find_header_row(df, keyword="Dist_Name"):
|
14 |
+
for i in range(min(20, len(df))):
|
15 |
+
row = df.iloc[i].astype(str).str.strip().str.lower()
|
16 |
+
if any(keyword.lower() in str(cell) for cell in row):
|
17 |
+
return i
|
18 |
+
raise ValueError(f"No row with '{keyword}' found.")
|
19 |
+
|
20 |
+
|
21 |
+
def read_xlsb_with_pyxlsb(file, sheet):
|
22 |
+
rows = []
|
23 |
+
with open_workbook(file) as wb:
|
24 |
+
with wb.get_sheet(sheet) as s:
|
25 |
+
for row in s.rows():
|
26 |
+
rows.append([item.v for item in row])
|
27 |
+
return pd.DataFrame(rows)
|
28 |
+
|
29 |
+
|
30 |
+
def read_sheet_fallback(file, sheet):
|
31 |
+
try:
|
32 |
+
return read_xlsb_with_pyxlsb(file, sheet)
|
33 |
+
except Exception:
|
34 |
+
try:
|
35 |
+
app = xw.App(visible=False)
|
36 |
+
book = app.books.open(file)
|
37 |
+
sht = book.sheets[sheet]
|
38 |
+
df = sht.used_range.options(pd.DataFrame, header=False, index=False).value
|
39 |
+
book.close()
|
40 |
+
app.quit()
|
41 |
+
return df
|
42 |
+
except Exception as e2:
|
43 |
+
raise RuntimeError(f"xlwings failed: {e2}")
|
44 |
+
|
45 |
+
|
46 |
+
def load_clean_df(file, sheet):
|
47 |
+
df_raw = read_sheet_fallback(file, sheet)
|
48 |
+
header_row = find_header_row(df_raw)
|
49 |
+
df_raw.columns = df_raw.iloc[header_row]
|
50 |
+
df = df_raw.drop(index=list(range(header_row + 1)))
|
51 |
+
df.columns = [str(c).strip().replace("\xa0", " ") for c in df.columns]
|
52 |
+
df = df.astype(str).apply(lambda col: col.str.strip())
|
53 |
+
return df
|
54 |
+
|
55 |
+
|
56 |
+
def detect_dist_col(columns):
|
57 |
+
for col in columns:
|
58 |
+
if "dist" in col.lower() and "name" in col.lower():
|
59 |
+
return col
|
60 |
+
raise ValueError("Dist_Name column not found.")
|
61 |
+
|
62 |
+
|
63 |
+
def compare_dumps(
|
64 |
+
old_file,
|
65 |
+
new_file,
|
66 |
+
mo_list,
|
67 |
+
output_dir,
|
68 |
+
# progress_callback=None
|
69 |
+
):
|
70 |
+
os.makedirs(output_dir, exist_ok=True)
|
71 |
+
|
72 |
+
# Friendly column labels based on file names
|
73 |
+
old_label = os.path.basename(old_file)
|
74 |
+
new_label = os.path.basename(new_file)
|
75 |
+
|
76 |
+
total_changes = 0
|
77 |
+
logs = []
|
78 |
+
|
79 |
+
for i, sheet_name in enumerate(mo_list):
|
80 |
+
try:
|
81 |
+
df_old = load_clean_df(old_file, sheet_name)
|
82 |
+
df_new = load_clean_df(new_file, sheet_name)
|
83 |
+
|
84 |
+
dist_col_old = detect_dist_col(df_old.columns)
|
85 |
+
dist_col_new = detect_dist_col(df_new.columns)
|
86 |
+
|
87 |
+
df_old = df_old[df_old[dist_col_old].notna()].set_index(dist_col_old)
|
88 |
+
df_new = df_new[df_new[dist_col_new].notna()].set_index(dist_col_new)
|
89 |
+
|
90 |
+
common = df_old.index.intersection(df_new.index)
|
91 |
+
df_old_common = df_old.loc[common]
|
92 |
+
df_new_common = df_new.loc[common]
|
93 |
+
|
94 |
+
mask = (df_old_common != df_new_common) & ~(
|
95 |
+
df_old_common.isna() & df_new_common.isna()
|
96 |
+
)
|
97 |
+
|
98 |
+
changes = []
|
99 |
+
for dist in mask.index:
|
100 |
+
for param in mask.columns[mask.loc[dist]]:
|
101 |
+
if param.strip().lower() == "file_name":
|
102 |
+
continue
|
103 |
+
|
104 |
+
changes.append(
|
105 |
+
{
|
106 |
+
"Dist_Name": dist,
|
107 |
+
"Parameter": param,
|
108 |
+
old_label: df_old_common.loc[dist, param],
|
109 |
+
new_label: df_new_common.loc[dist, param],
|
110 |
+
}
|
111 |
+
)
|
112 |
+
|
113 |
+
df_changes = pd.DataFrame(changes)
|
114 |
+
if not df_changes.empty:
|
115 |
+
output_path = os.path.join(output_dir, f"{sheet_name}_differences.xlsx")
|
116 |
+
df_changes.to_excel(output_path, index=False)
|
117 |
+
logs.append(f"{len(df_changes)} changes in {sheet_name}")
|
118 |
+
total_changes += len(df_changes)
|
119 |
+
else:
|
120 |
+
logs.append(f"No changes in {sheet_name}")
|
121 |
+
|
122 |
+
except Exception as e:
|
123 |
+
logs.append(f"Error in {sheet_name}: {e}")
|
124 |
+
|
125 |
+
# if progress_callback:
|
126 |
+
# progress_callback((i + 1) / len(mo_list))
|
127 |
+
|
128 |
+
return total_changes, logs
|
129 |
+
|
130 |
+
|
131 |
+
# === Streamlit UI ===
|
132 |
+
|
133 |
+
st.title("📊 Dump Compare Tool")
|
134 |
+
|
135 |
+
old_file = st.file_uploader("Upload Old Dump (.xlsb)", type=["xlsb"], key="old")
|
136 |
+
new_file = st.file_uploader("Upload New Dump (.xlsb)", type=["xlsb"], key="new")
|
137 |
+
|
138 |
+
# Determine common sheet names available in BOTH uploaded dumps and let the user pick
|
139 |
+
common_sheets: list[str] = []
|
140 |
+
selected_sheets: list[str] = []
|
141 |
+
|
142 |
+
if old_file and new_file:
|
143 |
+
import tempfile as _tmp
|
144 |
+
|
145 |
+
from pyxlsb import open_workbook as _open_wb
|
146 |
+
|
147 |
+
def _get_sheet_names(uploaded_file) -> list[str]:
|
148 |
+
"""Return sheet names from an `st.uploaded_file` object."""
|
149 |
+
with _tmp.NamedTemporaryFile(delete=False, suffix=".xlsb") as tmp:
|
150 |
+
tmp.write(uploaded_file.getvalue())
|
151 |
+
tmp_path = tmp.name
|
152 |
+
try:
|
153 |
+
with _open_wb(tmp_path) as wb:
|
154 |
+
# `wb.sheets` in pyxlsb already returns a list of sheet names (str)
|
155 |
+
return list(wb.sheets)
|
156 |
+
finally:
|
157 |
+
os.remove(tmp_path)
|
158 |
+
|
159 |
+
common_sheets = sorted(
|
160 |
+
set(_get_sheet_names(old_file)).intersection(_get_sheet_names(new_file))
|
161 |
+
)
|
162 |
+
|
163 |
+
if common_sheets:
|
164 |
+
selected_sheets = st.multiselect(
|
165 |
+
"MO Sheet Names (choose one or more)",
|
166 |
+
common_sheets,
|
167 |
+
default=common_sheets[:1], # select only the first sheet by default
|
168 |
+
)
|
169 |
+
else:
|
170 |
+
st.warning("No common sheet names found between the two files.")
|
171 |
+
output_dir = "comparison_output" # fixed output folder name
|
172 |
+
|
173 |
+
if st.button("Run Comparison", type="primary", use_container_width=True):
|
174 |
+
if not all([old_file, new_file]) or not selected_sheets:
|
175 |
+
st.warning("Please upload both files and select at least one common sheet.")
|
176 |
+
else:
|
177 |
+
mo_list = selected_sheets
|
178 |
+
# Reset file pointers because they may have been consumed while reading sheet names
|
179 |
+
old_file.seek(0)
|
180 |
+
new_file.seek(0)
|
181 |
+
with st.spinner("Comparing dumps..."):
|
182 |
+
with tempfile.TemporaryDirectory() as tmpdir:
|
183 |
+
output_path = os.path.join(tmpdir, output_dir)
|
184 |
+
old_path = os.path.join(tmpdir, "old.xlsb")
|
185 |
+
new_path = os.path.join(tmpdir, "new.xlsb")
|
186 |
+
|
187 |
+
with open(old_path, "wb") as f:
|
188 |
+
f.write(old_file.read())
|
189 |
+
with open(new_path, "wb") as f:
|
190 |
+
f.write(new_file.read())
|
191 |
+
|
192 |
+
# progress_bar = st.progress(0.0)
|
193 |
+
|
194 |
+
# def update_progress(pct):
|
195 |
+
# progress_bar.progress(pct)
|
196 |
+
|
197 |
+
total, logs = compare_dumps(old_path, new_path, mo_list, output_path)
|
198 |
+
|
199 |
+
st.success(f"✅ Comparison completed. Total changes: {total}")
|
200 |
+
|
201 |
+
# Zip and offer download
|
202 |
+
shutil.make_archive(output_path, "zip", output_path)
|
203 |
+
with open(f"{output_path}.zip", "rb") as f:
|
204 |
+
st.download_button(
|
205 |
+
"Download Results (.zip)",
|
206 |
+
f,
|
207 |
+
file_name="differences.zip",
|
208 |
+
mime="application/zip",
|
209 |
+
type="primary",
|
210 |
+
on_click="ignore",
|
211 |
+
)
|