Spaces:
Sleeping
Sleeping
Commit
·
9ca7e46
1
Parent(s):
ca3af14
Update second.py
Browse files
second.py
CHANGED
|
@@ -4,35 +4,16 @@ import csv
|
|
| 4 |
import io
|
| 5 |
import matplotlib.pyplot as plt
|
| 6 |
import numpy as np
|
| 7 |
-
from pre import
|
| 8 |
|
| 9 |
|
| 10 |
def double_main(uploaded_file1,uploaded_file2):
|
| 11 |
# st.title('Single CSV Analyzer')
|
| 12 |
|
| 13 |
if uploaded_file1 is not None and uploaded_file2 is not None:
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
processed_file_1 = io.StringIO(processed_output_1.getvalue())
|
| 18 |
-
data_1 = load_data(processed_file_1)
|
| 19 |
-
|
| 20 |
-
filet_2 = uploaded_file2.read()
|
| 21 |
-
processed_output_2 = preprocess_csv(filet_2)
|
| 22 |
-
processed_file_2 = io.StringIO(processed_output_2.getvalue())
|
| 23 |
-
data_2 = load_data(processed_file_2)
|
| 24 |
-
|
| 25 |
-
data_1 = fill_missing_data(data_1, 4, 0)
|
| 26 |
-
data_1['Start datetime'] = pd.to_datetime(data_1['Start datetime'], errors='coerce')
|
| 27 |
-
data_1['End datetime'] = pd.to_datetime(data_1['End datetime'], errors='coerce')
|
| 28 |
-
data_1['Time spent'] = (data_1['End datetime'] - data_1['Start datetime']).dt.total_seconds()
|
| 29 |
-
|
| 30 |
-
data_2 = fill_missing_data(data_2, 4, 0)
|
| 31 |
-
data_2['Start datetime'] = pd.to_datetime(data_2['Start datetime'], errors='coerce')
|
| 32 |
-
data_2['End datetime'] = pd.to_datetime(data_2['End datetime'], errors='coerce')
|
| 33 |
-
data_2['Time spent'] = (data_2['End datetime'] - data_2['Start datetime']).dt.total_seconds()
|
| 34 |
-
|
| 35 |
-
# Determine which DataFrame is older
|
| 36 |
if data_1['Start datetime'].min() < data_2['Start datetime'].min():
|
| 37 |
older_df = data_1
|
| 38 |
newer_df = data_2
|
|
@@ -77,7 +58,7 @@ def double_main(uploaded_file1,uploaded_file2):
|
|
| 77 |
fail_to_pass_scenarios = merged_df[(merged_df['Status_old'] == 'FAILED') & (merged_df['Status_new'] == 'PASSED')]
|
| 78 |
|
| 79 |
# Display filtered dataframe in Streamlit app
|
| 80 |
-
st.markdown("### New
|
| 81 |
pass_count = len(fail_to_pass_scenarios)
|
| 82 |
st.write(f"Passing scenarios Count: {pass_count}")
|
| 83 |
# Select columns for display
|
|
|
|
| 4 |
import io
|
| 5 |
import matplotlib.pyplot as plt
|
| 6 |
import numpy as np
|
| 7 |
+
from pre import preprocess_uploaded_file
|
| 8 |
|
| 9 |
|
| 10 |
def double_main(uploaded_file1,uploaded_file2):
|
| 11 |
# st.title('Single CSV Analyzer')
|
| 12 |
|
| 13 |
if uploaded_file1 is not None and uploaded_file2 is not None:
|
| 14 |
+
# Process the csv files with header
|
| 15 |
+
data_1 = preprocess_uploaded_file(uploaded_file1)
|
| 16 |
+
data_2 = preprocess_uploaded_file(uploaded_file2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
if data_1['Start datetime'].min() < data_2['Start datetime'].min():
|
| 18 |
older_df = data_1
|
| 19 |
newer_df = data_2
|
|
|
|
| 58 |
fail_to_pass_scenarios = merged_df[(merged_df['Status_old'] == 'FAILED') & (merged_df['Status_new'] == 'PASSED')]
|
| 59 |
|
| 60 |
# Display filtered dataframe in Streamlit app
|
| 61 |
+
st.markdown("### New Failures(previously failing, now passing)")
|
| 62 |
pass_count = len(fail_to_pass_scenarios)
|
| 63 |
st.write(f"Passing scenarios Count: {pass_count}")
|
| 64 |
# Select columns for display
|