Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	Upload 12 files
Browse files- .gitattributes +1 -0
- analyzing.py +675 -0
- final_ufoseti_dataset.h5 +3 -0
- global_power_plant_database.csv +3 -0
- magnetic.py +907 -0
- map.py +506 -0
- military_config.kgl +264 -0
- navigation.py +27 -0
- parsing.py +678 -0
- rag_search.py +438 -0
- secret_bases.csv +146 -0
- uap_analyzer.py +1010 -0
- uap_config.kgl +239 -0
    	
        .gitattributes
    CHANGED
    
    | @@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text | |
| 33 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
|  | 
|  | |
| 33 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
| 36 | 
            +
            global_power_plant_database.csv filter=lfs diff=lfs merge=lfs -text
         | 
    	
        analyzing.py
    ADDED
    
    | @@ -0,0 +1,675 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import streamlit as st
         | 
| 2 | 
            +
            import cudf.pandas
         | 
| 3 | 
            +
            cudf.pandas.install()
         | 
| 4 | 
            +
            import pandas as pd
         | 
| 5 | 
            +
            import numpy as np
         | 
| 6 | 
            +
            import matplotlib.pyplot as plt
         | 
| 7 | 
            +
            import seaborn as sns
         | 
| 8 | 
            +
            from uap_analyzer import UAPParser, UAPAnalyzer, UAPVisualizer
         | 
| 9 | 
            +
            # import ChartGen
         | 
| 10 | 
            +
            # from ChartGen import ChartGPT
         | 
| 11 | 
            +
            from Levenshtein import distance
         | 
| 12 | 
            +
            from sklearn.model_selection import train_test_split
         | 
| 13 | 
            +
            from sklearn.metrics import confusion_matrix
         | 
| 14 | 
            +
            from stqdm import stqdm
         | 
| 15 | 
            +
            stqdm.pandas()
         | 
| 16 | 
            +
            import streamlit.components.v1 as components
         | 
| 17 | 
            +
            from dateutil import parser
         | 
| 18 | 
            +
            from sentence_transformers import SentenceTransformer
         | 
| 19 | 
            +
            import torch
         | 
| 20 | 
            +
            import squarify
         | 
| 21 | 
            +
            import matplotlib.colors as mcolors
         | 
| 22 | 
            +
            import textwrap
         | 
| 23 | 
            +
            import datamapplot
         | 
| 24 | 
            +
             | 
| 25 | 
            +
            st.set_option('deprecation.showPyplotGlobalUse', False)
         | 
| 26 | 
            +
             | 
| 27 | 
            +
            from pandas.api.types import (
         | 
| 28 | 
            +
                is_categorical_dtype,
         | 
| 29 | 
            +
                is_datetime64_any_dtype,
         | 
| 30 | 
            +
                is_numeric_dtype,
         | 
| 31 | 
            +
                is_object_dtype,
         | 
| 32 | 
            +
            )
         | 
| 33 | 
            +
             | 
| 34 | 
            +
             | 
| 35 | 
            +
             | 
| 36 | 
            +
            def load_data(file_path, key='df'):
         | 
| 37 | 
            +
                return pd.read_hdf(file_path, key=key)
         | 
| 38 | 
            +
             | 
| 39 | 
            +
             | 
| 40 | 
            +
            def gemini_query(question, selected_data, gemini_key):
         | 
| 41 | 
            +
             | 
| 42 | 
            +
                if question == "":
         | 
| 43 | 
            +
                    question = "Summarize the following data in relevant bullet points"
         | 
| 44 | 
            +
             | 
| 45 | 
            +
                import pathlib
         | 
| 46 | 
            +
                import textwrap
         | 
| 47 | 
            +
             | 
| 48 | 
            +
                import google.generativeai as genai
         | 
| 49 | 
            +
             | 
| 50 | 
            +
                from IPython.display import display
         | 
| 51 | 
            +
                from IPython.display import Markdown
         | 
| 52 | 
            +
             | 
| 53 | 
            +
             | 
| 54 | 
            +
                def to_markdown(text):
         | 
| 55 | 
            +
                    text = text.replace('•', '  *')
         | 
| 56 | 
            +
                    return Markdown(textwrap.indent(text, '> ', predicate=lambda _: True))
         | 
| 57 | 
            +
                
         | 
| 58 | 
            +
                # selected_data is a list
         | 
| 59 | 
            +
                # remove empty
         | 
| 60 | 
            +
             | 
| 61 | 
            +
                filtered = [str(x) for x in selected_data if str(x) != '' and x is not None]
         | 
| 62 | 
            +
                # make a string
         | 
| 63 | 
            +
                context = '\n'.join(filtered)
         | 
| 64 | 
            +
             | 
| 65 | 
            +
                genai.configure(api_key=gemini_key)
         | 
| 66 | 
            +
                query_model = genai.GenerativeModel('models/gemini-1.5-pro-latest')
         | 
| 67 | 
            +
                response = query_model.generate_content([f"{question}\n Answer based on this context: {context}\n\n"])
         | 
| 68 | 
            +
                return(response.text)
         | 
| 69 | 
            +
             | 
| 70 | 
            +
            def plot_treemap(df, column, top_n=32):
         | 
| 71 | 
            +
                    # Get the value counts and the top N labels
         | 
| 72 | 
            +
                    value_counts = df[column].value_counts()
         | 
| 73 | 
            +
                    top_labels = value_counts.iloc[:top_n].index
         | 
| 74 | 
            +
                    
         | 
| 75 | 
            +
                    # Use np.where to replace all values not in the top N with 'Other'
         | 
| 76 | 
            +
                    revised_column = f'{column}_revised'
         | 
| 77 | 
            +
                    df[revised_column] = np.where(df[column].isin(top_labels), df[column], 'Other')
         | 
| 78 | 
            +
             | 
| 79 | 
            +
                    # Get the value counts including the 'Other' category
         | 
| 80 | 
            +
                    sizes = df[revised_column].value_counts().values
         | 
| 81 | 
            +
                    labels = df[revised_column].value_counts().index
         | 
| 82 | 
            +
             | 
| 83 | 
            +
                    # Get a gradient of colors
         | 
| 84 | 
            +
                    # colors = list(mcolors.TABLEAU_COLORS.values())
         | 
| 85 | 
            +
             | 
| 86 | 
            +
                    n_colors = len(sizes)
         | 
| 87 | 
            +
                    colors = plt.cm.Oranges(np.linspace(0.3, 0.9, n_colors))[::-1]
         | 
| 88 | 
            +
             | 
| 89 | 
            +
             | 
| 90 | 
            +
                    # Get % of each category
         | 
| 91 | 
            +
                    percents = sizes / sizes.sum()
         | 
| 92 | 
            +
             | 
| 93 | 
            +
                    # Prepare labels with percentages
         | 
| 94 | 
            +
                    labels = [f'{label}\n {percent:.1%}' for label, percent in zip(labels, percents)]
         | 
| 95 | 
            +
             | 
| 96 | 
            +
                    fig, ax = plt.subplots(figsize=(20, 12))
         | 
| 97 | 
            +
             | 
| 98 | 
            +
                    # Plot the treemap
         | 
| 99 | 
            +
                    squarify.plot(sizes=sizes, label=labels, alpha=0.7, pad=True, color=colors, text_kwargs={'fontsize': 10})
         | 
| 100 | 
            +
             | 
| 101 | 
            +
                    ax = plt.gca()
         | 
| 102 | 
            +
                    # Iterate over text elements and rectangles (patches) in the axes for color adjustment
         | 
| 103 | 
            +
                    for text, rect in zip(ax.texts, ax.patches):
         | 
| 104 | 
            +
                        background_color = rect.get_facecolor()
         | 
| 105 | 
            +
                        r, g, b, _ = mcolors.to_rgba(background_color)
         | 
| 106 | 
            +
                        brightness = np.average([r, g, b])
         | 
| 107 | 
            +
                        text.set_color('white' if brightness < 0.5 else 'black')
         | 
| 108 | 
            +
             | 
| 109 | 
            +
                        # Adjust font size based on rectangle's area and wrap long text
         | 
| 110 | 
            +
                        coef = 0.8
         | 
| 111 | 
            +
                        font_size = np.sqrt(rect.get_width() * rect.get_height()) * coef
         | 
| 112 | 
            +
                        text.set_fontsize(font_size)
         | 
| 113 | 
            +
                        wrapped_text = textwrap.fill(text.get_text(), width=20)
         | 
| 114 | 
            +
                        text.set_text(wrapped_text)
         | 
| 115 | 
            +
             | 
| 116 | 
            +
                    plt.axis('off')
         | 
| 117 | 
            +
                    plt.gca().invert_yaxis()
         | 
| 118 | 
            +
                    plt.gcf().set_size_inches(20, 12)
         | 
| 119 | 
            +
             | 
| 120 | 
            +
                    fig.patch.set_alpha(0)
         | 
| 121 | 
            +
             | 
| 122 | 
            +
                    ax.patch.set_alpha(0)
         | 
| 123 | 
            +
                    return fig
         | 
| 124 | 
            +
             | 
| 125 | 
            +
            def plot_hist(df, column, bins=10, kde=True):
         | 
| 126 | 
            +
                    fig, ax = plt.subplots(figsize=(12, 6))
         | 
| 127 | 
            +
                    sns.histplot(data=df, x=column, kde=True, bins=bins,color='orange')
         | 
| 128 | 
            +
                    # set the ticks and frame in orange
         | 
| 129 | 
            +
                    ax.spines['bottom'].set_color('orange')
         | 
| 130 | 
            +
                    ax.spines['top'].set_color('orange')
         | 
| 131 | 
            +
                    ax.spines['right'].set_color('orange')
         | 
| 132 | 
            +
                    ax.spines['left'].set_color('orange')
         | 
| 133 | 
            +
                    ax.xaxis.label.set_color('orange')
         | 
| 134 | 
            +
                    ax.yaxis.label.set_color('orange')
         | 
| 135 | 
            +
                    ax.tick_params(axis='x', colors='orange')
         | 
| 136 | 
            +
                    ax.tick_params(axis='y', colors='orange')
         | 
| 137 | 
            +
                    ax.title.set_color('orange')
         | 
| 138 | 
            +
             | 
| 139 | 
            +
                    # Set transparent background
         | 
| 140 | 
            +
                    fig.patch.set_alpha(0)
         | 
| 141 | 
            +
                    ax.patch.set_alpha(0)
         | 
| 142 | 
            +
                    return fig
         | 
| 143 | 
            +
             | 
| 144 | 
            +
             | 
| 145 | 
            +
             | 
| 146 | 
            +
             | 
| 147 | 
            +
            def plot_line(df, x_column, y_columns, figsize=(12, 10), color='orange', title=None, rolling_mean_value=2):
         | 
| 148 | 
            +
                import matplotlib.cm as cm
         | 
| 149 | 
            +
                # Sort the dataframe by the date column
         | 
| 150 | 
            +
                df = df.sort_values(by=x_column)
         | 
| 151 | 
            +
             | 
| 152 | 
            +
                # Calculate rolling mean for each y_column
         | 
| 153 | 
            +
                if rolling_mean_value:
         | 
| 154 | 
            +
                    df[y_columns] = df[y_columns].rolling(len(df) // rolling_mean_value).mean()
         | 
| 155 | 
            +
             | 
| 156 | 
            +
                # Create the plot
         | 
| 157 | 
            +
                fig, ax = plt.subplots(figsize=figsize)
         | 
| 158 | 
            +
             | 
| 159 | 
            +
                colors = cm.Oranges(np.linspace(0.2, 1, len(y_columns)))
         | 
| 160 | 
            +
             | 
| 161 | 
            +
                # Plot each y_column as a separate line with a different color
         | 
| 162 | 
            +
                for i, y_column in enumerate(y_columns):
         | 
| 163 | 
            +
                    df.plot(x=x_column, y=y_column, ax=ax, color=colors[i], label=y_column, linewidth=.5)
         | 
| 164 | 
            +
             | 
| 165 | 
            +
                # Rotate x-axis labels
         | 
| 166 | 
            +
                ax.set_xticklabels(ax.get_xticklabels(), rotation=30, ha='right')
         | 
| 167 | 
            +
             | 
| 168 | 
            +
                # Format x_column as date if it is
         | 
| 169 | 
            +
                if np.issubdtype(df[x_column].dtype, np.datetime64) or np.issubdtype(df[x_column].dtype, np.timedelta64):
         | 
| 170 | 
            +
                    df[x_column] = pd.to_datetime(df[x_column]).dt.date
         | 
| 171 | 
            +
             | 
| 172 | 
            +
                # Set title, labels, and legend
         | 
| 173 | 
            +
                ax.set_title(title or f'{", ".join(y_columns)} over {x_column}', color=color, fontweight='bold')
         | 
| 174 | 
            +
                ax.set_xlabel(x_column, color=color)
         | 
| 175 | 
            +
                ax.set_ylabel(', '.join(y_columns), color=color)
         | 
| 176 | 
            +
                ax.spines['bottom'].set_color('orange')
         | 
| 177 | 
            +
                ax.spines['top'].set_color('orange')
         | 
| 178 | 
            +
                ax.spines['right'].set_color('orange')
         | 
| 179 | 
            +
                ax.spines['left'].set_color('orange')
         | 
| 180 | 
            +
                ax.xaxis.label.set_color('orange')
         | 
| 181 | 
            +
                ax.yaxis.label.set_color('orange')
         | 
| 182 | 
            +
                ax.tick_params(axis='x', colors='orange')
         | 
| 183 | 
            +
                ax.tick_params(axis='y', colors='orange')
         | 
| 184 | 
            +
                ax.title.set_color('orange')
         | 
| 185 | 
            +
             | 
| 186 | 
            +
                ax.legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.4, labelcolor='orange', edgecolor='orange')
         | 
| 187 | 
            +
             | 
| 188 | 
            +
                # Remove background
         | 
| 189 | 
            +
                fig.patch.set_alpha(0)
         | 
| 190 | 
            +
                ax.patch.set_alpha(0)
         | 
| 191 | 
            +
             | 
| 192 | 
            +
                return fig
         | 
| 193 | 
            +
             | 
| 194 | 
            +
            def plot_bar(df, x_column, y_column, figsize=(12, 10), color='orange', title=None):
         | 
| 195 | 
            +
                fig, ax = plt.subplots(figsize=figsize)
         | 
| 196 | 
            +
             | 
| 197 | 
            +
                sns.barplot(data=df, x=x_column, y=y_column, color=color, ax=ax)
         | 
| 198 | 
            +
             | 
| 199 | 
            +
                ax.set_title(title if title else f'{y_column} by {x_column}', color=color, fontweight='bold')
         | 
| 200 | 
            +
                ax.set_xlabel(x_column, color=color)
         | 
| 201 | 
            +
                ax.set_ylabel(y_column, color=color)
         | 
| 202 | 
            +
             | 
| 203 | 
            +
                ax.tick_params(axis='x', colors=color)
         | 
| 204 | 
            +
                ax.tick_params(axis='y', colors=color)
         | 
| 205 | 
            +
             | 
| 206 | 
            +
                # Remove background
         | 
| 207 | 
            +
                fig.patch.set_alpha(0)
         | 
| 208 | 
            +
                ax.patch.set_alpha(0)
         | 
| 209 | 
            +
                ax.spines['bottom'].set_color('orange')
         | 
| 210 | 
            +
                ax.spines['top'].set_color('orange')
         | 
| 211 | 
            +
                ax.spines['right'].set_color('orange')
         | 
| 212 | 
            +
                ax.spines['left'].set_color('orange')
         | 
| 213 | 
            +
                ax.xaxis.label.set_color('orange')
         | 
| 214 | 
            +
                ax.yaxis.label.set_color('orange')
         | 
| 215 | 
            +
                ax.tick_params(axis='x', colors='orange')
         | 
| 216 | 
            +
                ax.tick_params(axis='y', colors='orange')
         | 
| 217 | 
            +
                ax.title.set_color('orange')
         | 
| 218 | 
            +
                ax.legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.4, labelcolor='orange', edgecolor='orange')
         | 
| 219 | 
            +
             | 
| 220 | 
            +
                return fig
         | 
| 221 | 
            +
             | 
| 222 | 
            +
            def plot_grouped_bar(df, x_columns, y_column, figsize=(12, 10), colors=None, title=None):
         | 
| 223 | 
            +
                fig, ax = plt.subplots(figsize=figsize)
         | 
| 224 | 
            +
             | 
| 225 | 
            +
                width = 0.8 / len(x_columns)  # the width of the bars
         | 
| 226 | 
            +
                x = np.arange(len(df))  # the label locations
         | 
| 227 | 
            +
             | 
| 228 | 
            +
                for i, x_column in enumerate(x_columns):
         | 
| 229 | 
            +
                    sns.barplot(data=df, x=x, y=y_column, color=colors[i] if colors else None, ax=ax, width=width, label=x_column)
         | 
| 230 | 
            +
                    x += width  # add the width of the bar to the x position for the next bar
         | 
| 231 | 
            +
             | 
| 232 | 
            +
                ax.set_title(title if title else f'{y_column} by {", ".join(x_columns)}', color='orange', fontweight='bold')
         | 
| 233 | 
            +
                ax.set_xlabel('Groups', color='orange')
         | 
| 234 | 
            +
                ax.set_ylabel(y_column, color='orange')
         | 
| 235 | 
            +
             | 
| 236 | 
            +
                ax.set_xticks(x - width * len(x_columns) / 2)
         | 
| 237 | 
            +
                ax.set_xticklabels(df.index)
         | 
| 238 | 
            +
             | 
| 239 | 
            +
                ax.tick_params(axis='x', colors='orange')
         | 
| 240 | 
            +
                ax.tick_params(axis='y', colors='orange')
         | 
| 241 | 
            +
             | 
| 242 | 
            +
                # Remove background
         | 
| 243 | 
            +
                fig.patch.set_alpha(0)
         | 
| 244 | 
            +
                ax.patch.set_alpha(0)
         | 
| 245 | 
            +
                ax.spines['bottom'].set_color('orange')
         | 
| 246 | 
            +
                ax.spines['top'].set_color('orange')
         | 
| 247 | 
            +
                ax.spines['right'].set_color('orange')
         | 
| 248 | 
            +
                ax.spines['left'].set_color('orange')
         | 
| 249 | 
            +
                ax.xaxis.label.set_color('orange')
         | 
| 250 | 
            +
                ax.yaxis.label.set_color('orange')
         | 
| 251 | 
            +
                ax.title.set_color('orange')
         | 
| 252 | 
            +
                ax.legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.4, labelcolor='orange', edgecolor='orange')
         | 
| 253 | 
            +
             | 
| 254 | 
            +
                return fig
         | 
| 255 | 
            +
             | 
| 256 | 
            +
             | 
| 257 | 
            +
            def filter_dataframe(df: pd.DataFrame) -> pd.DataFrame:
         | 
| 258 | 
            +
                """
         | 
| 259 | 
            +
                Adds a UI on top of a dataframe to let viewers filter columns
         | 
| 260 | 
            +
             | 
| 261 | 
            +
                Args:
         | 
| 262 | 
            +
                    df (pd.DataFrame): Original dataframe
         | 
| 263 | 
            +
             | 
| 264 | 
            +
                Returns:
         | 
| 265 | 
            +
                    pd.DataFrame: Filtered dataframe
         | 
| 266 | 
            +
                """
         | 
| 267 | 
            +
             | 
| 268 | 
            +
                title_font = "Arial"
         | 
| 269 | 
            +
                body_font = "Arial"
         | 
| 270 | 
            +
                title_size = 32
         | 
| 271 | 
            +
                colors = ["red", "green", "blue"]
         | 
| 272 | 
            +
                interpretation = False
         | 
| 273 | 
            +
                extract_docx = False
         | 
| 274 | 
            +
                title = "My Chart"
         | 
| 275 | 
            +
                regex = ".*"
         | 
| 276 | 
            +
                img_path = 'default_image.png'
         | 
| 277 | 
            +
             | 
| 278 | 
            +
             | 
| 279 | 
            +
                #try:
         | 
| 280 | 
            +
                #    modify = st.checkbox("Add filters on raw data")
         | 
| 281 | 
            +
                #except:
         | 
| 282 | 
            +
                #    try:
         | 
| 283 | 
            +
                #        modify = st.checkbox("Add filters on processed data")
         | 
| 284 | 
            +
                #    except:
         | 
| 285 | 
            +
                #        try:
         | 
| 286 | 
            +
                #            modify = st.checkbox("Add filters on parsed data")
         | 
| 287 | 
            +
                #        except:
         | 
| 288 | 
            +
                #            pass
         | 
| 289 | 
            +
             | 
| 290 | 
            +
                #if not modify:
         | 
| 291 | 
            +
                #    return df
         | 
| 292 | 
            +
             | 
| 293 | 
            +
                df_ = df.copy()
         | 
| 294 | 
            +
                # Try to convert datetimes into a standard format (datetime, no timezone)
         | 
| 295 | 
            +
             | 
| 296 | 
            +
            #modification_container = st.container()
         | 
| 297 | 
            +
             | 
| 298 | 
            +
            #with modification_container:
         | 
| 299 | 
            +
                try:
         | 
| 300 | 
            +
                    to_filter_columns = st.multiselect("Filter dataframe on", df_.columns)
         | 
| 301 | 
            +
                except:
         | 
| 302 | 
            +
                    try:
         | 
| 303 | 
            +
                        to_filter_columns = st.multiselect("Filter dataframe", df_.columns)
         | 
| 304 | 
            +
                    except:
         | 
| 305 | 
            +
                        try:
         | 
| 306 | 
            +
                            to_filter_columns = st.multiselect("Filter the dataframe on", df_.columns)
         | 
| 307 | 
            +
                        except:
         | 
| 308 | 
            +
                            pass
         | 
| 309 | 
            +
             | 
| 310 | 
            +
                date_column = None
         | 
| 311 | 
            +
                filtered_columns = []
         | 
| 312 | 
            +
             | 
| 313 | 
            +
                for column in to_filter_columns:
         | 
| 314 | 
            +
                    left, right = st.columns((1, 20))
         | 
| 315 | 
            +
                    # Treat columns with < 200 unique values as categorical if not date or numeric
         | 
| 316 | 
            +
                    if is_categorical_dtype(df_[column]) or (df_[column].nunique() < 120 and not is_datetime64_any_dtype(df_[column]) and not is_numeric_dtype(df_[column])):
         | 
| 317 | 
            +
                        user_cat_input = right.multiselect(
         | 
| 318 | 
            +
                            f"Values for {column}",
         | 
| 319 | 
            +
                            df_[column].value_counts().index.tolist(),
         | 
| 320 | 
            +
                            default=list(df_[column].value_counts().index)
         | 
| 321 | 
            +
                        )
         | 
| 322 | 
            +
                        df_ = df_[df_[column].isin(user_cat_input)]
         | 
| 323 | 
            +
                        filtered_columns.append(column)
         | 
| 324 | 
            +
             | 
| 325 | 
            +
                        with st.status(f"Category Distribution: {column}", expanded=False) as stat:
         | 
| 326 | 
            +
                            st.pyplot(plot_treemap(df_, column))
         | 
| 327 | 
            +
             | 
| 328 | 
            +
                    elif is_numeric_dtype(df_[column]):
         | 
| 329 | 
            +
                        _min = float(df_[column].min())
         | 
| 330 | 
            +
                        _max = float(df_[column].max())
         | 
| 331 | 
            +
                        step = (_max - _min) / 100
         | 
| 332 | 
            +
                        user_num_input = right.slider(
         | 
| 333 | 
            +
                            f"Values for {column}",
         | 
| 334 | 
            +
                            min_value=_min,
         | 
| 335 | 
            +
                            max_value=_max,
         | 
| 336 | 
            +
                            value=(_min, _max),
         | 
| 337 | 
            +
                            step=step,
         | 
| 338 | 
            +
                        )
         | 
| 339 | 
            +
                        df_ = df_[df_[column].between(*user_num_input)]
         | 
| 340 | 
            +
                        filtered_columns.append(column)
         | 
| 341 | 
            +
             | 
| 342 | 
            +
                        # Chart_GPT = ChartGPT(df_, title_font, body_font, title_size,
         | 
| 343 | 
            +
                        #      colors, interpretation, extract_docx, img_path)
         | 
| 344 | 
            +
             | 
| 345 | 
            +
                        with st.status(f"Numerical Distribution: {column}", expanded=False) as stat_:
         | 
| 346 | 
            +
                            st.pyplot(plot_hist(df_, column, bins=int(round(len(df_[column].unique())-1)/2)))
         | 
| 347 | 
            +
             | 
| 348 | 
            +
                    elif is_object_dtype(df_[column]):
         | 
| 349 | 
            +
                        try:
         | 
| 350 | 
            +
                            df_[column] = pd.to_datetime(df_[column], infer_datetime_format=True, errors='coerce')
         | 
| 351 | 
            +
                        except Exception:
         | 
| 352 | 
            +
                            try:
         | 
| 353 | 
            +
                                df_[column] = df_[column].apply(parser.parse)
         | 
| 354 | 
            +
                            except Exception:
         | 
| 355 | 
            +
                                pass
         | 
| 356 | 
            +
             | 
| 357 | 
            +
                        if is_datetime64_any_dtype(df_[column]):
         | 
| 358 | 
            +
                            df_[column] = df_[column].dt.tz_localize(None)
         | 
| 359 | 
            +
                            min_date = df_[column].min().date()
         | 
| 360 | 
            +
                            max_date = df_[column].max().date()
         | 
| 361 | 
            +
                            user_date_input = right.date_input(
         | 
| 362 | 
            +
                                f"Values for {column}",
         | 
| 363 | 
            +
                                value=(min_date, max_date),
         | 
| 364 | 
            +
                                min_value=min_date,
         | 
| 365 | 
            +
                                max_value=max_date,
         | 
| 366 | 
            +
                            )
         | 
| 367 | 
            +
                            # if len(user_date_input) == 2:
         | 
| 368 | 
            +
                            #     start_date, end_date = user_date_input
         | 
| 369 | 
            +
                            #     df_ = df_.loc[df_[column].dt.date.between(start_date, end_date)]
         | 
| 370 | 
            +
                            if len(user_date_input) == 2:
         | 
| 371 | 
            +
                                user_date_input = tuple(map(pd.to_datetime, user_date_input))
         | 
| 372 | 
            +
                                start_date, end_date = user_date_input
         | 
| 373 | 
            +
                                df_ = df_.loc[df_[column].between(start_date, end_date)]
         | 
| 374 | 
            +
             | 
| 375 | 
            +
                            date_column = column
         | 
| 376 | 
            +
             | 
| 377 | 
            +
                            if date_column and filtered_columns:
         | 
| 378 | 
            +
                                numeric_columns = [col for col in filtered_columns if is_numeric_dtype(df_[col])]
         | 
| 379 | 
            +
                                if numeric_columns:
         | 
| 380 | 
            +
                                    fig = plot_line(df_, date_column, numeric_columns)
         | 
| 381 | 
            +
                                    #st.pyplot(fig)
         | 
| 382 | 
            +
                                # now to deal with categorical columns
         | 
| 383 | 
            +
                                categorical_columns = [col for col in filtered_columns if is_categorical_dtype(df_[col])]
         | 
| 384 | 
            +
                                if categorical_columns:
         | 
| 385 | 
            +
                                    fig2 = plot_bar(df_, date_column, categorical_columns[0])
         | 
| 386 | 
            +
                                    #st.pyplot(fig2)
         | 
| 387 | 
            +
                                with st.status(f"Date Distribution: {column}", expanded=False) as stat:
         | 
| 388 | 
            +
                                    try:
         | 
| 389 | 
            +
                                        st.pyplot(fig)
         | 
| 390 | 
            +
                                    except Exception as e:
         | 
| 391 | 
            +
                                        st.error(f"Error plotting line chart: {e}")
         | 
| 392 | 
            +
                                        pass
         | 
| 393 | 
            +
                                    try:
         | 
| 394 | 
            +
                                        st.pyplot(fig2)
         | 
| 395 | 
            +
                                    except Exception as e:
         | 
| 396 | 
            +
                                        st.error(f"Error plotting bar chart: {e}")
         | 
| 397 | 
            +
             | 
| 398 | 
            +
             | 
| 399 | 
            +
                    else:
         | 
| 400 | 
            +
                        user_text_input = right.text_input(
         | 
| 401 | 
            +
                            f"Substring or regex in {column}",
         | 
| 402 | 
            +
                        )
         | 
| 403 | 
            +
                        if user_text_input:
         | 
| 404 | 
            +
                            df_ = df_[df_[column].astype(str).str.contains(user_text_input)]
         | 
| 405 | 
            +
                # write len of df after filtering with % of original
         | 
| 406 | 
            +
                st.write(f"{len(df_)} rows ({len(df_) / len(df) * 100:.2f}%)")
         | 
| 407 | 
            +
                return df_
         | 
| 408 | 
            +
             | 
| 409 | 
            +
            def merge_clusters(df, column):
         | 
| 410 | 
            +
                cluster_terms_ = df.__dict__['cluster_terms']
         | 
| 411 | 
            +
                cluster_labels_ = df.__dict__['cluster_labels']
         | 
| 412 | 
            +
                label_name_map = {label: cluster_terms_[label] for label in set(cluster_labels_)}
         | 
| 413 | 
            +
                merge_map = {}
         | 
| 414 | 
            +
                # Iterate over term pairs and decide on merging based on the distance
         | 
| 415 | 
            +
                for idx, term1 in enumerate(cluster_terms_):
         | 
| 416 | 
            +
                    for jdx, term2 in enumerate(cluster_terms_):
         | 
| 417 | 
            +
                        if idx < jdx and distance(term1, term2) <= 3:  # Adjust threshold as needed
         | 
| 418 | 
            +
                            # Decide to merge labels corresponding to jdx into labels corresponding to idx
         | 
| 419 | 
            +
                            # Find labels corresponding to jdx and idx
         | 
| 420 | 
            +
                            labels_to_merge = [label for label, term_index in enumerate(cluster_labels_) if term_index == jdx]
         | 
| 421 | 
            +
                            for label in labels_to_merge:
         | 
| 422 | 
            +
                                merge_map[label] = idx  # Map the label to use the term index of term1
         | 
| 423 | 
            +
             | 
| 424 | 
            +
                # Update the analyzer with the merged numeric labels 
         | 
| 425 | 
            +
                updated_cluster_labels_ = [merge_map[label] if label in merge_map else label for label in cluster_labels_]
         | 
| 426 | 
            +
             | 
| 427 | 
            +
                df.__dict__['cluster_labels'] = updated_cluster_labels_
         | 
| 428 | 
            +
                # Optional: Update string labels to reflect merged labels
         | 
| 429 | 
            +
                updated_string_labels = [cluster_terms_[label] for label in updated_cluster_labels_]
         | 
| 430 | 
            +
                df.__dict__['string_labels'] = updated_string_labels
         | 
| 431 | 
            +
                return updated_string_labels
         | 
| 432 | 
            +
             | 
| 433 | 
            +
            def analyze_and_predict(data, analyzers, col_names, clusters):
         | 
| 434 | 
            +
                visualizer = UAPVisualizer()
         | 
| 435 | 
            +
                new_data = pd.DataFrame()
         | 
| 436 | 
            +
                for i, column  in enumerate(col_names):
         | 
| 437 | 
            +
                    #new_data[f'Analyzer_{column}'] = analyzers[column].__dict__['cluster_labels']
         | 
| 438 | 
            +
                    new_data[f'Analyzer_{column}'] = clusters[column]
         | 
| 439 | 
            +
                    data[f'Analyzer_{column}'] = clusters[column]
         | 
| 440 | 
            +
                    #data[f'Analyzer_{column}'] = analyzer.__dict__['cluster_labels']
         | 
| 441 | 
            +
             | 
| 442 | 
            +
                    print(f"Cluster terms extracted for {column}")
         | 
| 443 | 
            +
             | 
| 444 | 
            +
                for col in data.columns:
         | 
| 445 | 
            +
                    if 'Analyzer' in col:
         | 
| 446 | 
            +
                        data[col] = data[col].astype('category')
         | 
| 447 | 
            +
             | 
| 448 | 
            +
                new_data = new_data.fillna('null').astype('category')
         | 
| 449 | 
            +
                data_nums = new_data.apply(lambda x: x.cat.codes)
         | 
| 450 | 
            +
             | 
| 451 | 
            +
                for col in data_nums.columns:
         | 
| 452 | 
            +
                    try:
         | 
| 453 | 
            +
                        categories = new_data[col].cat.categories
         | 
| 454 | 
            +
                        x_train, x_test, y_train, y_test = train_test_split(data_nums.drop(columns=[col]), data_nums[col], test_size=0.2, random_state=42)
         | 
| 455 | 
            +
                        bst, accuracy, preds = visualizer.train_xgboost(x_train, y_train, x_test, y_test, len(categories))
         | 
| 456 | 
            +
                        fig = visualizer.plot_results(new_data, bst, x_test, y_test, preds, categories, accuracy, col)
         | 
| 457 | 
            +
                        with st.status(f"Charts Analyses: {col}", expanded=True) as status:
         | 
| 458 | 
            +
                            st.pyplot(fig)
         | 
| 459 | 
            +
                            status.update(label=f"Chart Processed: {col}", expanded=False)   
         | 
| 460 | 
            +
                    except Exception as e:
         | 
| 461 | 
            +
                        print(f"Error processing {col}: {e}")
         | 
| 462 | 
            +
                        continue
         | 
| 463 | 
            +
                return new_data, data
         | 
| 464 | 
            +
             | 
| 465 | 
            +
            from config import API_KEY, GEMINI_KEY, FORMAT_LONG
         | 
| 466 | 
            +
             | 
| 467 | 
            +
            with torch.no_grad():
         | 
| 468 | 
            +
                torch.cuda.empty_cache()
         | 
| 469 | 
            +
             | 
| 470 | 
            +
            #st.set_page_config(
         | 
| 471 | 
            +
            #    page_title="UAP ANALYSIS",
         | 
| 472 | 
            +
            #    page_icon=":alien:",
         | 
| 473 | 
            +
            #    layout="wide",
         | 
| 474 | 
            +
            #    initial_sidebar_state="expanded",
         | 
| 475 | 
            +
            #)
         | 
| 476 | 
            +
             | 
| 477 | 
            +
            st.title('UAP Analysis Dashboard')
         | 
| 478 | 
            +
             | 
| 479 | 
            +
            # Initialize session state
         | 
| 480 | 
            +
            if 'analyzers' not in st.session_state:
         | 
| 481 | 
            +
                st.session_state['analyzers'] = []
         | 
| 482 | 
            +
            if 'col_names' not in st.session_state:
         | 
| 483 | 
            +
                st.session_state['col_names'] = []
         | 
| 484 | 
            +
            if 'clusters' not in st.session_state:
         | 
| 485 | 
            +
                st.session_state['clusters'] = {}
         | 
| 486 | 
            +
            if 'new_data' not in st.session_state:
         | 
| 487 | 
            +
                st.session_state['new_data'] = pd.DataFrame()
         | 
| 488 | 
            +
            if 'dataset' not in st.session_state:
         | 
| 489 | 
            +
                st.session_state['dataset'] = pd.DataFrame()
         | 
| 490 | 
            +
            if 'data_processed' not in st.session_state:
         | 
| 491 | 
            +
                st.session_state['data_processed'] = False
         | 
| 492 | 
            +
            if 'stage' not in st.session_state:
         | 
| 493 | 
            +
                st.session_state['stage'] = 0
         | 
| 494 | 
            +
            if 'filtered_data' not in st.session_state:
         | 
| 495 | 
            +
                st.session_state['filtered_data'] = None
         | 
| 496 | 
            +
            if 'gemini_answer' not in st.session_state:
         | 
| 497 | 
            +
                st.session_state['gemini_answer'] = None
         | 
| 498 | 
            +
            if 'parsed_responses' not in st.session_state:
         | 
| 499 | 
            +
                st.session_state['parsed_responses'] = None
         | 
| 500 | 
            +
             | 
| 501 | 
            +
            # Load dataset
         | 
| 502 | 
            +
            data_path = 'uap_files_embeds.h5'
         | 
| 503 | 
            +
             | 
| 504 | 
            +
            my_dataset = st.file_uploader("Upload Parsed DataFrame", type=["csv", "xlsx"])
         | 
| 505 | 
            +
            # if st.session_state['parsed_responses'] is not None:
         | 
| 506 | 
            +
            #     #use_parsed_data = st.checkbox('Analyze recently parsed dataset')
         | 
| 507 | 
            +
            #     #if use_parsed_data:
         | 
| 508 | 
            +
            #     parsed = st.session_state.get('parsed_responses', pd.DataFrame()).copy() # this will overwrite the parsed_responses variable
         | 
| 509 | 
            +
            #     filtered_data = filter_dataframe(parsed)
         | 
| 510 | 
            +
            #     st.dataframe(filtered_data)
         | 
| 511 | 
            +
            if my_dataset is not None:
         | 
| 512 | 
            +
            #     try:
         | 
| 513 | 
            +
            #         data = pd.read_csv(my_dataset) if my_dataset.type == "text/csv" else pd.read_excel(my_dataset)
         | 
| 514 | 
            +
            #         filtered_data = filter_dataframe(data)
         | 
| 515 | 
            +
            #         st.dataframe(filtered_data)
         | 
| 516 | 
            +
            #     except Exception as e:
         | 
| 517 | 
            +
            #         st.error(f"An error occurred while reading the file: {e}")
         | 
| 518 | 
            +
            # #if 'parsed_responses' not in st.session_state:
         | 
| 519 | 
            +
                try:
         | 
| 520 | 
            +
                    if my_dataset.type == "text/csv":
         | 
| 521 | 
            +
                        data = pd.read_csv(my_dataset)
         | 
| 522 | 
            +
                    elif my_dataset.type == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":
         | 
| 523 | 
            +
                        data = pd.read_excel(my_dataset)
         | 
| 524 | 
            +
                    else:
         | 
| 525 | 
            +
                        st.error("Unsupported file type. Please upload a CSV, Excel or HD5 file.")
         | 
| 526 | 
            +
                        st.stop()
         | 
| 527 | 
            +
                    parser = filter_dataframe(data)
         | 
| 528 | 
            +
                    st.session_state['parsed_responses'] = parser
         | 
| 529 | 
            +
                    st.dataframe(parser)
         | 
| 530 | 
            +
                    st.success(f"Successfully loaded and displayed data from {my_dataset.name}")
         | 
| 531 | 
            +
                except Exception as e:
         | 
| 532 | 
            +
                    st.error(f"An error occurred while reading the file: {e}")
         | 
| 533 | 
            +
            else:
         | 
| 534 | 
            +
                parsed = load_data(data_path).drop(columns=['embeddings']).head(10000)
         | 
| 535 | 
            +
                parsed_responses = filter_dataframe(parsed)
         | 
| 536 | 
            +
                st.session_state['parsed_responses'] = parsed_responses
         | 
| 537 | 
            +
                st.dataframe(parsed_responses)
         | 
| 538 | 
            +
            col1, col2 = st.columns(2)
         | 
| 539 | 
            +
            with col1:
         | 
| 540 | 
            +
                col_parsed = st.selectbox("Which column do you want to query?", st.session_state['parsed_responses'].columns)
         | 
| 541 | 
            +
            with col2:
         | 
| 542 | 
            +
                GEMINI_KEY = st.text_input('Gemini API Key', GEMINI_KEY, type='password', help="Enter your Gemini API key")
         | 
| 543 | 
            +
             | 
| 544 | 
            +
            if col_parsed and GEMINI_KEY:
         | 
| 545 | 
            +
                selected_column_data = st.session_state['parsed_responses'][col_parsed].tolist()
         | 
| 546 | 
            +
                question = st.text_input("Ask a question or leave empty for summarization")
         | 
| 547 | 
            +
                if st.button("Generate Query") and selected_column_data:
         | 
| 548 | 
            +
                    st.write(gemini_query(question, selected_column_data, GEMINI_KEY))
         | 
| 549 | 
            +
            st.session_state['stage'] = 1
         | 
| 550 | 
            +
             | 
| 551 | 
            +
             | 
| 552 | 
            +
            if st.session_state['stage'] > 0 :
         | 
| 553 | 
            +
                columns_to_analyze = st.multiselect(
         | 
| 554 | 
            +
                    label='Select columns to analyze',
         | 
| 555 | 
            +
                    options=st.session_state['parsed_responses'].columns
         | 
| 556 | 
            +
                )
         | 
| 557 | 
            +
                if columns_to_analyze:
         | 
| 558 | 
            +
                    analyzers = []
         | 
| 559 | 
            +
                    col_names = []
         | 
| 560 | 
            +
                    clusters = {}
         | 
| 561 | 
            +
                    for column in columns_to_analyze:
         | 
| 562 | 
            +
                        with torch.no_grad():    
         | 
| 563 | 
            +
                            with st.status(f"Processing {column}", expanded=True) as status:
         | 
| 564 | 
            +
                                analyzer = UAPAnalyzer(st.session_state['parsed_responses'], column)
         | 
| 565 | 
            +
                                st.write(f"Processing {column}...")
         | 
| 566 | 
            +
                                analyzer.preprocess_data(top_n=32)
         | 
| 567 | 
            +
                                st.write("Reducing dimensionality...")
         | 
| 568 | 
            +
                                analyzer.reduce_dimensionality(method='UMAP', n_components=2, n_neighbors=15, min_dist=0.1)
         | 
| 569 | 
            +
                                st.write("Clustering data...")
         | 
| 570 | 
            +
                                analyzer.cluster_data(method='HDBSCAN', min_cluster_size=15)
         | 
| 571 | 
            +
                                analyzer.get_tf_idf_clusters(top_n=3)
         | 
| 572 | 
            +
                                st.write("Naming clusters...")
         | 
| 573 | 
            +
                                analyzers.append(analyzer)
         | 
| 574 | 
            +
                                col_names.append(column)
         | 
| 575 | 
            +
                                clusters[column] = analyzer.merge_similar_clusters(cluster_terms=analyzer.__dict__['cluster_terms'], cluster_labels=analyzer.__dict__['cluster_labels'])
         | 
| 576 | 
            +
                                
         | 
| 577 | 
            +
                                # Run the visualization
         | 
| 578 | 
            +
                                # fig = datamapplot.create_plot(
         | 
| 579 | 
            +
                                #     analyzer.__dict__['reduced_embeddings'],
         | 
| 580 | 
            +
                                #     analyzer.__dict__['cluster_labels'].astype(str),
         | 
| 581 | 
            +
                                #     #label_font_size=11,
         | 
| 582 | 
            +
                                #     label_wrap_width=20,
         | 
| 583 | 
            +
                                #     use_medoids=True,
         | 
| 584 | 
            +
                                # )#.to_html(full_html=False, include_plotlyjs='cdn')
         | 
| 585 | 
            +
                                # st.pyplot(fig.savefig())
         | 
| 586 | 
            +
                                status.update(label=f"Processing {column} complete", expanded=False)
         | 
| 587 | 
            +
                    st.session_state['analyzers'] = analyzers
         | 
| 588 | 
            +
                    st.session_state['col_names'] = col_names
         | 
| 589 | 
            +
                    st.session_state['clusters'] = clusters
         | 
| 590 | 
            +
                    
         | 
| 591 | 
            +
                    # save space
         | 
| 592 | 
            +
                    parsed = None
         | 
| 593 | 
            +
                    analyzers = None
         | 
| 594 | 
            +
                    col_names = None
         | 
| 595 | 
            +
                    clusters = None
         | 
| 596 | 
            +
             | 
| 597 | 
            +
                    if st.session_state['clusters'] is not None:
         | 
| 598 | 
            +
                        try:
         | 
| 599 | 
            +
                            new_data, parsed_responses = analyze_and_predict(st.session_state['parsed_responses'], st.session_state['analyzers'], st.session_state['col_names'], st.session_state['clusters'])       
         | 
| 600 | 
            +
                            st.session_state['dataset'] = parsed_responses
         | 
| 601 | 
            +
                            st.session_state['new_data'] = new_data
         | 
| 602 | 
            +
                            st.session_state['data_processed'] = True
         | 
| 603 | 
            +
                        except Exception as e:
         | 
| 604 | 
            +
                            st.write(f"Error processing data: {e}")
         | 
| 605 | 
            +
                
         | 
| 606 | 
            +
                    if st.session_state['data_processed']:
         | 
| 607 | 
            +
                        try:
         | 
| 608 | 
            +
                            visualizer = UAPVisualizer(data=st.session_state['new_data'])
         | 
| 609 | 
            +
                            #new_data = pd.DataFrame()  # Assuming new_data is prepared earlier in the code
         | 
| 610 | 
            +
                            fig2 = visualizer.plot_cramers_v_heatmap(data=st.session_state['new_data'], significance_level=0.05)
         | 
| 611 | 
            +
                            with st.status(f"Cramer's V Chart", expanded=True) as statuss:
         | 
| 612 | 
            +
                                st.pyplot(fig2)
         | 
| 613 | 
            +
                                statuss.update(label="Cramer's V chart plotted", expanded=False)   
         | 
| 614 | 
            +
                        except Exception as e:
         | 
| 615 | 
            +
                            st.write(f"Error plotting Cramers V: {e}")
         | 
| 616 | 
            +
             | 
| 617 | 
            +
                        for i, column in enumerate(st.session_state['col_names']):
         | 
| 618 | 
            +
                            #if stateful_button(f"Show {column} clusters {i}", key=f"show_{column}_clusters"):
         | 
| 619 | 
            +
                            # if st.session_state['data_processed']:
         | 
| 620 | 
            +
                            #     with st.status(f"Show clusters {column}", expanded=True) as stats:
         | 
| 621 | 
            +
                            #         fig3 = st.session_state['analyzers'][i].plot_embeddings4(title=f"{column} clusters", cluster_terms=st.session_state['analyzers'][i].__dict__['cluster_terms'], cluster_labels=st.session_state['analyzers'][i].__dict__['cluster_labels'], reduced_embeddings=st.session_state['analyzers'][i].__dict__['reduced_embeddings'], column=f'Analyzer_{column}', data=st.session_state['new_data'])
         | 
| 622 | 
            +
                            #         stats.update(label=f"Show clusters {column} complete", expanded=False)
         | 
| 623 | 
            +
                            if st.session_state['data_processed']:
         | 
| 624 | 
            +
                                with st.status(f"Show clusters {column}", expanded=True) as stats:
         | 
| 625 | 
            +
                                    fig3 = st.session_state['analyzers'][i].plot_embeddings4(
         | 
| 626 | 
            +
                                        title=f"{column} clusters", 
         | 
| 627 | 
            +
                                        cluster_terms=st.session_state['analyzers'][i].__dict__['cluster_terms'], 
         | 
| 628 | 
            +
                                        cluster_labels=st.session_state['analyzers'][i].__dict__['cluster_labels'], 
         | 
| 629 | 
            +
                                        reduced_embeddings=st.session_state['analyzers'][i].__dict__['reduced_embeddings'], 
         | 
| 630 | 
            +
                                        column=column,  # Use the original column name here
         | 
| 631 | 
            +
                                        data=st.session_state['parsed_responses']  # Use the original dataset here
         | 
| 632 | 
            +
                                    )
         | 
| 633 | 
            +
                                    stats.update(label=f"Show clusters {column} complete", expanded=False)
         | 
| 634 | 
            +
                            st.session_state['analysis_complete'] = True
         | 
| 635 | 
            +
             | 
| 636 | 
            +
             | 
| 637 | 
            +
            # this will check if the dataframe is not empty
         | 
| 638 | 
            +
            # if st.session_state['new_data'] is not None:
         | 
| 639 | 
            +
            #     parsed2 = st.session_state.get('dataset', pd.DataFrame())
         | 
| 640 | 
            +
            #     parsed2 = filter_dataframe(parsed2)
         | 
| 641 | 
            +
            #     col1, col2 = st.columns(2)
         | 
| 642 | 
            +
            #     st.dataframe(parsed2)
         | 
| 643 | 
            +
            #     with col1:
         | 
| 644 | 
            +
            #         col_parsed2 = st.selectbox("Which columns do you want to query?", parsed2.columns)
         | 
| 645 | 
            +
            #     with col2:
         | 
| 646 | 
            +
            #         GEMINI_KEY = st.text_input('Gemini APIs Key', GEMINI_KEY, type='password', help="Enter your Gemini API key")
         | 
| 647 | 
            +
            #     if col_parsed and GEMINI_KEY:
         | 
| 648 | 
            +
            #         selected_column_data2 = parsed2[col_parsed2].tolist()
         | 
| 649 | 
            +
            #         question2 = st.text_input("Ask a questions or leave empty for summarization")
         | 
| 650 | 
            +
            #         if st.button("Generate Query") and selected_column_data2:
         | 
| 651 | 
            +
            #             with st.status(f"Generating Query", expanded=True) as status:
         | 
| 652 | 
            +
            #                 gemini_answer = gemini_query(question2, selected_column_data2, GEMINI_KEY)
         | 
| 653 | 
            +
            #                 st.write(gemini_answer)
         | 
| 654 | 
            +
            #                 st.session_state['gemini_answer'] = gemini_answer
         | 
| 655 | 
            +
             | 
| 656 | 
            +
            if 'analysis_complete' in st.session_state and st.session_state['analysis_complete']:
         | 
| 657 | 
            +
                ticked_analysis = st.checkbox('Query Processed Data')
         | 
| 658 | 
            +
                if ticked_analysis:
         | 
| 659 | 
            +
                    if st.session_state['new_data'] is not None:
         | 
| 660 | 
            +
                        parsed2 = st.session_state.get('dataset', pd.DataFrame()).copy()
         | 
| 661 | 
            +
                        parsed2 = filter_dataframe(parsed2)
         | 
| 662 | 
            +
                        col1, col2 = st.columns(2)
         | 
| 663 | 
            +
                        st.dataframe(parsed2)
         | 
| 664 | 
            +
                        with col1:
         | 
| 665 | 
            +
                            col_parsed2 = st.selectbox("Which columns do you want to query?", parsed2.columns)
         | 
| 666 | 
            +
                        with col2:
         | 
| 667 | 
            +
                            GEMINI_KEY = st.text_input('Gemini APIs Key', GEMINI_KEY, type='password', help="Enter your Gemini API key")
         | 
| 668 | 
            +
                        if col_parsed2 and GEMINI_KEY:
         | 
| 669 | 
            +
                            selected_column_data2 = parsed2[col_parsed2].tolist()
         | 
| 670 | 
            +
                            question2 = st.text_input("Ask a questions or leave empty for summarization")
         | 
| 671 | 
            +
                            if st.button("Generate Queries") and selected_column_data2:
         | 
| 672 | 
            +
                                with st.status(f"Generating Query", expanded=True) as status:
         | 
| 673 | 
            +
                                    gemini_answer = gemini_query(question2, selected_column_data2, GEMINI_KEY)
         | 
| 674 | 
            +
                                    st.write(gemini_answer)
         | 
| 675 | 
            +
                                    st.session_state['gemini_answer'] = gemini_answer
         | 
    	
        final_ufoseti_dataset.h5
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:829fb6660b24626eb5db39952783c6e17dc17c7c4636df0dfc8b641d0c84efe5
         | 
| 3 | 
            +
            size 39219544
         | 
    	
        global_power_plant_database.csv
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:ee79da6a4e0948e0df5ffc9fd372ee453e7e1d0b2ead57f568565750014f7d59
         | 
| 3 | 
            +
            size 12758630
         | 
    	
        magnetic.py
    ADDED
    
    | @@ -0,0 +1,907 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
             | 
| 2 | 
            +
            import math
         | 
| 3 | 
            +
            import pandas as pd
         | 
| 4 | 
            +
            import numpy as np
         | 
| 5 | 
            +
            import json
         | 
| 6 | 
            +
            import requests
         | 
| 7 | 
            +
            import datetime
         | 
| 8 | 
            +
            from datetime import timedelta
         | 
| 9 | 
            +
            from PIL import Image
         | 
| 10 | 
            +
            # alternative to PIL
         | 
| 11 | 
            +
            import matplotlib.pyplot as plt
         | 
| 12 | 
            +
            import matplotlib.image as mpimg
         | 
| 13 | 
            +
            import os
         | 
| 14 | 
            +
            import matplotlib.dates as mdates
         | 
| 15 | 
            +
            import seaborn as sns
         | 
| 16 | 
            +
            from IPython.display import Image as image_display
         | 
| 17 | 
            +
            path = os.getcwd()
         | 
| 18 | 
            +
            from fastdtw import fastdtw
         | 
| 19 | 
            +
            from scipy.spatial.distance import euclidean
         | 
| 20 | 
            +
            from IPython.display import display
         | 
| 21 | 
            +
            from dateutil import parser
         | 
| 22 | 
            +
            from Levenshtein import distance
         | 
| 23 | 
            +
            from sklearn.model_selection import train_test_split
         | 
| 24 | 
            +
            from sklearn.metrics import confusion_matrix
         | 
| 25 | 
            +
            from stqdm import stqdm
         | 
| 26 | 
            +
            stqdm.pandas()
         | 
| 27 | 
            +
            import streamlit.components.v1 as components
         | 
| 28 | 
            +
            from dateutil import parser
         | 
| 29 | 
            +
            from sentence_transformers import SentenceTransformer
         | 
| 30 | 
            +
            import torch
         | 
| 31 | 
            +
            import squarify
         | 
| 32 | 
            +
            import matplotlib.colors as mcolors
         | 
| 33 | 
            +
            import textwrap
         | 
| 34 | 
            +
            import datamapplot
         | 
| 35 | 
            +
            import streamlit as st
         | 
| 36 | 
            +
             | 
| 37 | 
            +
             | 
| 38 | 
            +
            st.title('Magnetic Correlations Dashboard')
         | 
| 39 | 
            +
             | 
| 40 | 
            +
            st.set_option('deprecation.showPyplotGlobalUse', False)
         | 
| 41 | 
            +
             | 
| 42 | 
            +
             | 
| 43 | 
            +
            from pandas.api.types import (
         | 
| 44 | 
            +
                is_categorical_dtype,
         | 
| 45 | 
            +
                is_datetime64_any_dtype,
         | 
| 46 | 
            +
                is_numeric_dtype,
         | 
| 47 | 
            +
                is_object_dtype,
         | 
| 48 | 
            +
            )
         | 
| 49 | 
            +
             | 
| 50 | 
            +
             | 
| 51 | 
            +
            def plot_treemap(df, column, top_n=32):
         | 
| 52 | 
            +
                    # Get the value counts and the top N labels
         | 
| 53 | 
            +
                    value_counts = df[column].value_counts()
         | 
| 54 | 
            +
                    top_labels = value_counts.iloc[:top_n].index
         | 
| 55 | 
            +
                    
         | 
| 56 | 
            +
                    # Use np.where to replace all values not in the top N with 'Other'
         | 
| 57 | 
            +
                    revised_column = f'{column}_revised'
         | 
| 58 | 
            +
                    df[revised_column] = np.where(df[column].isin(top_labels), df[column], 'Other')
         | 
| 59 | 
            +
             | 
| 60 | 
            +
                    # Get the value counts including the 'Other' category
         | 
| 61 | 
            +
                    sizes = df[revised_column].value_counts().values
         | 
| 62 | 
            +
                    labels = df[revised_column].value_counts().index
         | 
| 63 | 
            +
             | 
| 64 | 
            +
                    # Get a gradient of colors
         | 
| 65 | 
            +
                    # colors = list(mcolors.TABLEAU_COLORS.values())
         | 
| 66 | 
            +
             | 
| 67 | 
            +
                    n_colors = len(sizes)
         | 
| 68 | 
            +
                    colors = plt.cm.Oranges(np.linspace(0.3, 0.9, n_colors))[::-1]
         | 
| 69 | 
            +
             | 
| 70 | 
            +
             | 
| 71 | 
            +
                    # Get % of each category
         | 
| 72 | 
            +
                    percents = sizes / sizes.sum()
         | 
| 73 | 
            +
             | 
| 74 | 
            +
                    # Prepare labels with percentages
         | 
| 75 | 
            +
                    labels = [f'{label}\n {percent:.1%}' for label, percent in zip(labels, percents)]
         | 
| 76 | 
            +
             | 
| 77 | 
            +
                    fig, ax = plt.subplots(figsize=(20, 12))
         | 
| 78 | 
            +
             | 
| 79 | 
            +
                    # Plot the treemap
         | 
| 80 | 
            +
                    squarify.plot(sizes=sizes, label=labels, alpha=0.7, pad=True, color=colors, text_kwargs={'fontsize': 10})
         | 
| 81 | 
            +
             | 
| 82 | 
            +
                    ax = plt.gca()
         | 
| 83 | 
            +
                    # Iterate over text elements and rectangles (patches) in the axes for color adjustment
         | 
| 84 | 
            +
                    for text, rect in zip(ax.texts, ax.patches):
         | 
| 85 | 
            +
                        background_color = rect.get_facecolor()
         | 
| 86 | 
            +
                        r, g, b, _ = mcolors.to_rgba(background_color)
         | 
| 87 | 
            +
                        brightness = np.average([r, g, b])
         | 
| 88 | 
            +
                        text.set_color('white' if brightness < 0.5 else 'black')
         | 
| 89 | 
            +
             | 
| 90 | 
            +
             | 
| 91 | 
            +
            def plot_hist(df, column, bins=10, kde=True):
         | 
| 92 | 
            +
                    fig, ax = plt.subplots(figsize=(12, 6))
         | 
| 93 | 
            +
                    sns.histplot(data=df, x=column, kde=True, bins=bins,color='orange')
         | 
| 94 | 
            +
                    # set the ticks and frame in orange
         | 
| 95 | 
            +
                    ax.spines['bottom'].set_color('orange')
         | 
| 96 | 
            +
                    ax.spines['top'].set_color('orange')
         | 
| 97 | 
            +
                    ax.spines['right'].set_color('orange')
         | 
| 98 | 
            +
                    ax.spines['left'].set_color('orange')
         | 
| 99 | 
            +
                    ax.xaxis.label.set_color('orange')
         | 
| 100 | 
            +
                    ax.yaxis.label.set_color('orange')
         | 
| 101 | 
            +
                    ax.tick_params(axis='x', colors='orange')
         | 
| 102 | 
            +
                    ax.tick_params(axis='y', colors='orange')
         | 
| 103 | 
            +
                    ax.title.set_color('orange')
         | 
| 104 | 
            +
             | 
| 105 | 
            +
                    # Set transparent background
         | 
| 106 | 
            +
                    fig.patch.set_alpha(0)
         | 
| 107 | 
            +
                    ax.patch.set_alpha(0)
         | 
| 108 | 
            +
                    return fig
         | 
| 109 | 
            +
             | 
| 110 | 
            +
             | 
| 111 | 
            +
             | 
| 112 | 
            +
             | 
| 113 | 
            +
            def plot_line(df, x_column, y_columns, figsize=(12, 10), color='orange', title=None, rolling_mean_value=2):
         | 
| 114 | 
            +
                import matplotlib.cm as cm
         | 
| 115 | 
            +
                # Sort the dataframe by the date column
         | 
| 116 | 
            +
                df = df.sort_values(by=x_column)
         | 
| 117 | 
            +
             | 
| 118 | 
            +
                # Calculate rolling mean for each y_column
         | 
| 119 | 
            +
                if rolling_mean_value:
         | 
| 120 | 
            +
                    df[y_columns] = df[y_columns].rolling(len(df) // rolling_mean_value).mean()
         | 
| 121 | 
            +
             | 
| 122 | 
            +
                # Create the plot
         | 
| 123 | 
            +
                fig, ax = plt.subplots(figsize=figsize)
         | 
| 124 | 
            +
             | 
| 125 | 
            +
                colors = cm.Oranges(np.linspace(0.2, 1, len(y_columns)))
         | 
| 126 | 
            +
             | 
| 127 | 
            +
                # Plot each y_column as a separate line with a different color
         | 
| 128 | 
            +
                for i, y_column in enumerate(y_columns):
         | 
| 129 | 
            +
                    df.plot(x=x_column, y=y_column, ax=ax, color=colors[i], label=y_column, linewidth=.5)
         | 
| 130 | 
            +
             | 
| 131 | 
            +
                # Rotate x-axis labels
         | 
| 132 | 
            +
                ax.set_xticklabels(ax.get_xticklabels(), rotation=30, ha='right')
         | 
| 133 | 
            +
             | 
| 134 | 
            +
                # Format x_column as date if it is
         | 
| 135 | 
            +
                if np.issubdtype(df[x_column].dtype, np.datetime64) or np.issubdtype(df[x_column].dtype, np.timedelta64):
         | 
| 136 | 
            +
                    df[x_column] = pd.to_datetime(df[x_column]).dt.date
         | 
| 137 | 
            +
             | 
| 138 | 
            +
                # Set title, labels, and legend
         | 
| 139 | 
            +
                ax.set_title(title or f'{", ".join(y_columns)} over {x_column}', color=color, fontweight='bold')
         | 
| 140 | 
            +
                ax.set_xlabel(x_column, color=color)
         | 
| 141 | 
            +
                ax.set_ylabel(', '.join(y_columns), color=color)
         | 
| 142 | 
            +
                ax.spines['bottom'].set_color('orange')
         | 
| 143 | 
            +
                ax.spines['top'].set_color('orange')
         | 
| 144 | 
            +
                ax.spines['right'].set_color('orange')
         | 
| 145 | 
            +
                ax.spines['left'].set_color('orange')
         | 
| 146 | 
            +
                ax.xaxis.label.set_color('orange')
         | 
| 147 | 
            +
                ax.yaxis.label.set_color('orange')
         | 
| 148 | 
            +
                ax.tick_params(axis='x', colors='orange')
         | 
| 149 | 
            +
                ax.tick_params(axis='y', colors='orange')
         | 
| 150 | 
            +
                ax.title.set_color('orange')
         | 
| 151 | 
            +
             | 
| 152 | 
            +
                ax.legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.4, labelcolor='orange', edgecolor='orange')
         | 
| 153 | 
            +
             | 
| 154 | 
            +
                # Remove background
         | 
| 155 | 
            +
                fig.patch.set_alpha(0)
         | 
| 156 | 
            +
                ax.patch.set_alpha(0)
         | 
| 157 | 
            +
             | 
| 158 | 
            +
                return fig
         | 
| 159 | 
            +
             | 
| 160 | 
            +
            def plot_bar(df, x_column, y_column, figsize=(12, 10), color='orange', title=None):
         | 
| 161 | 
            +
                fig, ax = plt.subplots(figsize=figsize)
         | 
| 162 | 
            +
             | 
| 163 | 
            +
                sns.barplot(data=df, x=x_column, y=y_column, color=color, ax=ax)
         | 
| 164 | 
            +
             | 
| 165 | 
            +
                ax.set_title(title if title else f'{y_column} by {x_column}', color=color, fontweight='bold')
         | 
| 166 | 
            +
                ax.set_xlabel(x_column, color=color)
         | 
| 167 | 
            +
                ax.set_ylabel(y_column, color=color)
         | 
| 168 | 
            +
             | 
| 169 | 
            +
                ax.tick_params(axis='x', colors=color)
         | 
| 170 | 
            +
                ax.tick_params(axis='y', colors=color)
         | 
| 171 | 
            +
             | 
| 172 | 
            +
                # Remove background
         | 
| 173 | 
            +
                fig.patch.set_alpha(0)
         | 
| 174 | 
            +
                ax.patch.set_alpha(0)
         | 
| 175 | 
            +
                ax.spines['bottom'].set_color('orange')
         | 
| 176 | 
            +
                ax.spines['top'].set_color('orange')
         | 
| 177 | 
            +
                ax.spines['right'].set_color('orange')
         | 
| 178 | 
            +
                ax.spines['left'].set_color('orange')
         | 
| 179 | 
            +
                ax.xaxis.label.set_color('orange')
         | 
| 180 | 
            +
                ax.yaxis.label.set_color('orange')
         | 
| 181 | 
            +
                ax.tick_params(axis='x', colors='orange')
         | 
| 182 | 
            +
                ax.tick_params(axis='y', colors='orange')
         | 
| 183 | 
            +
                ax.title.set_color('orange')
         | 
| 184 | 
            +
                ax.legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.4, labelcolor='orange', edgecolor='orange')
         | 
| 185 | 
            +
             | 
| 186 | 
            +
                return fig
         | 
| 187 | 
            +
             | 
| 188 | 
            +
            def plot_grouped_bar(df, x_columns, y_column, figsize=(12, 10), colors=None, title=None):
         | 
| 189 | 
            +
                fig, ax = plt.subplots(figsize=figsize)
         | 
| 190 | 
            +
             | 
| 191 | 
            +
                width = 0.8 / len(x_columns)  # the width of the bars
         | 
| 192 | 
            +
                x = np.arange(len(df))  # the label locations
         | 
| 193 | 
            +
             | 
| 194 | 
            +
                for i, x_column in enumerate(x_columns):
         | 
| 195 | 
            +
                    sns.barplot(data=df, x=x, y=y_column, color=colors[i] if colors else None, ax=ax, width=width, label=x_column)
         | 
| 196 | 
            +
                    x += width  # add the width of the bar to the x position for the next bar
         | 
| 197 | 
            +
             | 
| 198 | 
            +
                ax.set_title(title if title else f'{y_column} by {", ".join(x_columns)}', color='orange', fontweight='bold')
         | 
| 199 | 
            +
                ax.set_xlabel('Groups', color='orange')
         | 
| 200 | 
            +
                ax.set_ylabel(y_column, color='orange')
         | 
| 201 | 
            +
             | 
| 202 | 
            +
                ax.set_xticks(x - width * len(x_columns) / 2)
         | 
| 203 | 
            +
                ax.set_xticklabels(df.index)
         | 
| 204 | 
            +
             | 
| 205 | 
            +
                ax.tick_params(axis='x', colors='orange')
         | 
| 206 | 
            +
                ax.tick_params(axis='y', colors='orange')
         | 
| 207 | 
            +
             | 
| 208 | 
            +
                # Remove background
         | 
| 209 | 
            +
                fig.patch.set_alpha(0)
         | 
| 210 | 
            +
                ax.patch.set_alpha(0)
         | 
| 211 | 
            +
                ax.spines['bottom'].set_color('orange')
         | 
| 212 | 
            +
                ax.spines['top'].set_color('orange')
         | 
| 213 | 
            +
                ax.spines['right'].set_color('orange')
         | 
| 214 | 
            +
                ax.spines['left'].set_color('orange')
         | 
| 215 | 
            +
                ax.xaxis.label.set_color('orange')
         | 
| 216 | 
            +
                ax.yaxis.label.set_color('orange')
         | 
| 217 | 
            +
                ax.title.set_color('orange')
         | 
| 218 | 
            +
                ax.legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.4, labelcolor='orange', edgecolor='orange')
         | 
| 219 | 
            +
             | 
| 220 | 
            +
                return fig
         | 
| 221 | 
            +
             | 
| 222 | 
            +
             | 
| 223 | 
            +
            def filter_dataframe(df: pd.DataFrame) -> pd.DataFrame:
         | 
| 224 | 
            +
                """
         | 
| 225 | 
            +
                Adds a UI on top of a dataframe to let viewers filter columns
         | 
| 226 | 
            +
             | 
| 227 | 
            +
                Args:
         | 
| 228 | 
            +
                    df (pd.DataFrame): Original dataframe
         | 
| 229 | 
            +
             | 
| 230 | 
            +
                Returns:
         | 
| 231 | 
            +
                    pd.DataFrame: Filtered dataframe
         | 
| 232 | 
            +
                """
         | 
| 233 | 
            +
             | 
| 234 | 
            +
                title_font = "Arial"
         | 
| 235 | 
            +
                body_font = "Arial"
         | 
| 236 | 
            +
                title_size = 32
         | 
| 237 | 
            +
                colors = ["red", "green", "blue"]
         | 
| 238 | 
            +
                interpretation = False
         | 
| 239 | 
            +
                extract_docx = False
         | 
| 240 | 
            +
                title = "My Chart"
         | 
| 241 | 
            +
                regex = ".*"
         | 
| 242 | 
            +
                img_path = 'default_image.png'
         | 
| 243 | 
            +
             | 
| 244 | 
            +
             | 
| 245 | 
            +
                #try:
         | 
| 246 | 
            +
                #    modify = st.checkbox("Add filters on raw data")
         | 
| 247 | 
            +
                #except:
         | 
| 248 | 
            +
                #    try:
         | 
| 249 | 
            +
                #        modify = st.checkbox("Add filters on processed data")
         | 
| 250 | 
            +
                #    except:
         | 
| 251 | 
            +
                #        try:
         | 
| 252 | 
            +
                #            modify = st.checkbox("Add filters on parsed data")
         | 
| 253 | 
            +
                #        except:
         | 
| 254 | 
            +
                #            pass
         | 
| 255 | 
            +
             | 
| 256 | 
            +
                #if not modify:
         | 
| 257 | 
            +
                #    return df
         | 
| 258 | 
            +
             | 
| 259 | 
            +
                df_ = df.copy()
         | 
| 260 | 
            +
                # Try to convert datetimes into a standard format (datetime, no timezone)
         | 
| 261 | 
            +
             | 
| 262 | 
            +
            #modification_container = st.container()
         | 
| 263 | 
            +
             | 
| 264 | 
            +
            #with modification_container:
         | 
| 265 | 
            +
                to_filter_columns = st.multiselect("Filter dataframe on", df_.columns)
         | 
| 266 | 
            +
             | 
| 267 | 
            +
                date_column = None
         | 
| 268 | 
            +
                filtered_columns = []
         | 
| 269 | 
            +
             | 
| 270 | 
            +
                for column in to_filter_columns:
         | 
| 271 | 
            +
                    left, right = st.columns((1, 20))
         | 
| 272 | 
            +
                    # Treat columns with < 200 unique values as categorical if not date or numeric
         | 
| 273 | 
            +
                    if is_categorical_dtype(df_[column]) or (df_[column].nunique() < 120 and not is_datetime64_any_dtype(df_[column]) and not is_numeric_dtype(df_[column])):
         | 
| 274 | 
            +
                        user_cat_input = right.multiselect(
         | 
| 275 | 
            +
                            f"Values for {column}",
         | 
| 276 | 
            +
                            df_[column].value_counts().index.tolist(),
         | 
| 277 | 
            +
                            default=list(df_[column].value_counts().index)
         | 
| 278 | 
            +
                        )
         | 
| 279 | 
            +
                        df_ = df_[df_[column].isin(user_cat_input)]
         | 
| 280 | 
            +
                        filtered_columns.append(column)
         | 
| 281 | 
            +
             | 
| 282 | 
            +
                        with st.status(f"Category Distribution: {column}", expanded=False) as stat:
         | 
| 283 | 
            +
                            st.pyplot(plot_treemap(df_, column))
         | 
| 284 | 
            +
             | 
| 285 | 
            +
                    elif is_numeric_dtype(df_[column]):
         | 
| 286 | 
            +
                        _min = float(df_[column].min())
         | 
| 287 | 
            +
                        _max = float(df_[column].max())
         | 
| 288 | 
            +
                        step = (_max - _min) / 100
         | 
| 289 | 
            +
                        user_num_input = right.slider(
         | 
| 290 | 
            +
                            f"Values for {column}",
         | 
| 291 | 
            +
                            min_value=_min,
         | 
| 292 | 
            +
                            max_value=_max,
         | 
| 293 | 
            +
                            value=(_min, _max),
         | 
| 294 | 
            +
                            step=step,
         | 
| 295 | 
            +
                        )
         | 
| 296 | 
            +
                        df_ = df_[df_[column].between(*user_num_input)]
         | 
| 297 | 
            +
                        filtered_columns.append(column)
         | 
| 298 | 
            +
             | 
| 299 | 
            +
                        # Chart_GPT = ChartGPT(df_, title_font, body_font, title_size,
         | 
| 300 | 
            +
                        #      colors, interpretation, extract_docx, img_path)
         | 
| 301 | 
            +
             | 
| 302 | 
            +
                        with st.status(f"Numerical Distribution: {column}", expanded=False) as stat_:
         | 
| 303 | 
            +
                            st.pyplot(plot_hist(df_, column, bins=int(round(len(df_[column].unique())-1)/2)))
         | 
| 304 | 
            +
             | 
| 305 | 
            +
                    elif is_object_dtype(df_[column]):
         | 
| 306 | 
            +
                        try:
         | 
| 307 | 
            +
                            df_[column] = pd.to_datetime(df_[column], infer_datetime_format=True, errors='coerce')
         | 
| 308 | 
            +
                        except Exception:
         | 
| 309 | 
            +
                            try:
         | 
| 310 | 
            +
                                df_[column] = df_[column].apply(parser.parse)
         | 
| 311 | 
            +
                            except Exception:
         | 
| 312 | 
            +
                                pass
         | 
| 313 | 
            +
             | 
| 314 | 
            +
                        if is_datetime64_any_dtype(df_[column]):
         | 
| 315 | 
            +
                            df_[column] = df_[column].dt.tz_localize(None)
         | 
| 316 | 
            +
                            min_date = df_[column].min().date()
         | 
| 317 | 
            +
                            max_date = df_[column].max().date()
         | 
| 318 | 
            +
                            user_date_input = right.date_input(
         | 
| 319 | 
            +
                                f"Values for {column}",
         | 
| 320 | 
            +
                                value=(min_date, max_date),
         | 
| 321 | 
            +
                                min_value=min_date,
         | 
| 322 | 
            +
                                max_value=max_date,
         | 
| 323 | 
            +
                            )
         | 
| 324 | 
            +
                            # if len(user_date_input) == 2:
         | 
| 325 | 
            +
                            #     start_date, end_date = user_date_input
         | 
| 326 | 
            +
                            #     df_ = df_.loc[df_[column].dt.date.between(start_date, end_date)]
         | 
| 327 | 
            +
                            if len(user_date_input) == 2:
         | 
| 328 | 
            +
                                user_date_input = tuple(map(pd.to_datetime, user_date_input))
         | 
| 329 | 
            +
                                start_date, end_date = user_date_input
         | 
| 330 | 
            +
                                df_ = df_.loc[df_[column].between(start_date, end_date)]
         | 
| 331 | 
            +
             | 
| 332 | 
            +
                            date_column = column
         | 
| 333 | 
            +
             | 
| 334 | 
            +
                            if date_column and filtered_columns:
         | 
| 335 | 
            +
                                numeric_columns = [col for col in filtered_columns if is_numeric_dtype(df_[col])]
         | 
| 336 | 
            +
                                if numeric_columns:
         | 
| 337 | 
            +
                                    fig = plot_line(df_, date_column, numeric_columns)
         | 
| 338 | 
            +
                                    #st.pyplot(fig)
         | 
| 339 | 
            +
                                # now to deal with categorical columns
         | 
| 340 | 
            +
                                categorical_columns = [col for col in filtered_columns if is_categorical_dtype(df_[col])]
         | 
| 341 | 
            +
                                if categorical_columns:
         | 
| 342 | 
            +
                                    fig2 = plot_bar(df_, date_column, categorical_columns[0])
         | 
| 343 | 
            +
                                    #st.pyplot(fig2)
         | 
| 344 | 
            +
                                with st.status(f"Date Distribution: {column}", expanded=False) as stat:
         | 
| 345 | 
            +
                                    try:
         | 
| 346 | 
            +
                                        st.pyplot(fig)
         | 
| 347 | 
            +
                                    except Exception as e:
         | 
| 348 | 
            +
                                        st.error(f"Error plotting line chart: {e}")
         | 
| 349 | 
            +
                                        pass
         | 
| 350 | 
            +
                                    try:
         | 
| 351 | 
            +
                                        st.pyplot(fig2)
         | 
| 352 | 
            +
                                    except Exception as e:
         | 
| 353 | 
            +
                                        st.error(f"Error plotting bar chart: {e}")
         | 
| 354 | 
            +
             | 
| 355 | 
            +
             | 
| 356 | 
            +
                    else:
         | 
| 357 | 
            +
                        user_text_input = right.text_input(
         | 
| 358 | 
            +
                            f"Substring or regex in {column}",
         | 
| 359 | 
            +
                        )
         | 
| 360 | 
            +
                        if user_text_input:
         | 
| 361 | 
            +
                            df_ = df_[df_[column].astype(str).str.contains(user_text_input)]
         | 
| 362 | 
            +
                # write len of df after filtering with % of original
         | 
| 363 | 
            +
                st.write(f"{len(df_)} rows ({len(df_) / len(df) * 100:.2f}%)")
         | 
| 364 | 
            +
                return df_
         | 
| 365 | 
            +
             | 
| 366 | 
            +
             | 
| 367 | 
            +
            def get_stations():
         | 
| 368 | 
            +
                base_url = 'https://imag-data.bgs.ac.uk:/GIN_V1/GINServices?Request=GetCapabilities&format=json' 
         | 
| 369 | 
            +
                response = requests.get(base_url)
         | 
| 370 | 
            +
                data = response.json()
         | 
| 371 | 
            +
                dataframe_stations = pd.DataFrame.from_dict(data['ObservatoryList'])
         | 
| 372 | 
            +
                return dataframe_stations
         | 
| 373 | 
            +
             | 
| 374 | 
            +
            def get_haversine_distance(lat1, lon1, lat2, lon2):
         | 
| 375 | 
            +
                R = 6371
         | 
| 376 | 
            +
                dlat = math.radians(lat2 - lat1)
         | 
| 377 | 
            +
                dlon = math.radians(lon2 - lon1)
         | 
| 378 | 
            +
                a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
         | 
| 379 | 
            +
                c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
         | 
| 380 | 
            +
                d = R * c
         | 
| 381 | 
            +
                return d
         | 
| 382 | 
            +
             | 
| 383 | 
            +
            def compare_stations(test_lat_lon, data_table, distance=1000, closest=False):
         | 
| 384 | 
            +
                table_updated = pd.DataFrame()
         | 
| 385 | 
            +
                distances = dict()
         | 
| 386 | 
            +
                for lat,lon,names in data_table[['Latitude', 'Longitude', 'Name']].values:
         | 
| 387 | 
            +
                    harv_distance = get_haversine_distance(test_lat_lon[0], test_lat_lon[1], lat, lon)
         | 
| 388 | 
            +
                    if harv_distance < distance:
         | 
| 389 | 
            +
                        #print(f"Station {names} is at {round(harv_distance,2)} km from the test point")
         | 
| 390 | 
            +
                        table_updated = pd.concat([table_updated, data_table[data_table['Name'] == names]])
         | 
| 391 | 
            +
                        distances[names] = harv_distance
         | 
| 392 | 
            +
                if closest:
         | 
| 393 | 
            +
                    closest_station = min(distances, key=distances.get)
         | 
| 394 | 
            +
                    #print(f"The closest station is {closest_station} at {round(distances[closest_station],2)} km")
         | 
| 395 | 
            +
                    table_updated = data_table[data_table['Name'] == closest_station]
         | 
| 396 | 
            +
                    table_updated['Distance'] = distances[closest_station]
         | 
| 397 | 
            +
                return table_updated
         | 
| 398 | 
            +
             | 
| 399 | 
            +
            def get_data(IagaCode, start_date, end_date):
         | 
| 400 | 
            +
                try:
         | 
| 401 | 
            +
                    start_date_ = datetime.datetime.strptime(start_date, '%Y-%m-%d')
         | 
| 402 | 
            +
                except ValueError as e:
         | 
| 403 | 
            +
                    print(f"Error: {e}")
         | 
| 404 | 
            +
                    start_date_ = pd.to_datetime(start_date)
         | 
| 405 | 
            +
                try:
         | 
| 406 | 
            +
                    end_date_ = datetime.datetime.strptime(end_date, '%Y-%m-%d')
         | 
| 407 | 
            +
                except ValueError as e:
         | 
| 408 | 
            +
                    print(f"Error: {e}")
         | 
| 409 | 
            +
                    end_date_ = pd.to_datetime(end_date)
         | 
| 410 | 
            +
             | 
| 411 | 
            +
                duration = end_date_ - start_date_
         | 
| 412 | 
            +
                # Define the parameters for the request
         | 
| 413 | 
            +
                params = {
         | 
| 414 | 
            +
                    'Request': 'GetData',
         | 
| 415 | 
            +
                    'format': 'PNG',
         | 
| 416 | 
            +
                    'testObsys': '0',
         | 
| 417 | 
            +
                    'observatoryIagaCode': IagaCode,
         | 
| 418 | 
            +
                    'samplesPerDay': 'minute',
         | 
| 419 | 
            +
                    'publicationState': 'Best available',
         | 
| 420 | 
            +
                    'dataStartDate': start_date,
         | 
| 421 | 
            +
                    # make substraction
         | 
| 422 | 
            +
                    'dataDuration': duration.days,
         | 
| 423 | 
            +
                    'traceList': '1234',
         | 
| 424 | 
            +
                    'colourTraces': 'true',
         | 
| 425 | 
            +
                    'pictureSize': 'Automatic',
         | 
| 426 | 
            +
                    'dataScale': 'Automatic',
         | 
| 427 | 
            +
                    'pdfSize': '21,29.7',
         | 
| 428 | 
            +
                }
         | 
| 429 | 
            +
             | 
| 430 | 
            +
                base_url_json = 'https://imag-data.bgs.ac.uk:/GIN_V1/GINServices?Request=GetData&format=json'
         | 
| 431 | 
            +
                #base_url_img = 'https://imag-data.bgs.ac.uk:/GIN_V1/GINServices?Request=GetData&format=png'
         | 
| 432 | 
            +
             | 
| 433 | 
            +
                for base_url in [base_url_json]:#, base_url_img]:
         | 
| 434 | 
            +
                    response = requests.get(base_url, params=params)
         | 
| 435 | 
            +
                    if response.status_code == 200:
         | 
| 436 | 
            +
                        content_type = response.headers.get('Content-Type')
         | 
| 437 | 
            +
                        if 'image' in content_type:
         | 
| 438 | 
            +
                            # f"custom_plot_{new_dataset.iloc[0]['IagaCode']}_{str_date.replace(':', '_')}.png"
         | 
| 439 | 
            +
                            # output_image_path = "plot_image.png" 
         | 
| 440 | 
            +
                            # with open(output_image_path, 'wb') as file:
         | 
| 441 | 
            +
                            #     file.write(response.content)
         | 
| 442 | 
            +
                            # print(f"Image successfully saved as {output_image_path}")
         | 
| 443 | 
            +
                            
         | 
| 444 | 
            +
                            # # Display the image
         | 
| 445 | 
            +
                            # img = mpimg.imread(output_image_path)
         | 
| 446 | 
            +
                            # plt.imshow(img)
         | 
| 447 | 
            +
                            # plt.axis('off')  # Hide axes
         | 
| 448 | 
            +
                            # plt.show()
         | 
| 449 | 
            +
                            # img_answer = Image.open(output_image_path)
         | 
| 450 | 
            +
                            img_answer = None
         | 
| 451 | 
            +
                        else:
         | 
| 452 | 
            +
                            print(f"Unexpected content type: {content_type}")
         | 
| 453 | 
            +
                            #print("Response content:")
         | 
| 454 | 
            +
                            #print(response.content.decode('utf-8'))  # Attempt to print response as text
         | 
| 455 | 
            +
                            # return json
         | 
| 456 | 
            +
                            answer = response.json()
         | 
| 457 | 
            +
                    else:
         | 
| 458 | 
            +
                        print(f"Failed to retrieve data. HTTP Status code: {response.status_code}")
         | 
| 459 | 
            +
                        print("Response content:")
         | 
| 460 | 
            +
                        print(response.content.decode('utf-8'))
         | 
| 461 | 
            +
                return answer#, img_answer
         | 
| 462 | 
            +
             | 
| 463 | 
            +
             | 
| 464 | 
            +
            # def get_data(IagaCode, start_date, end_date):
         | 
| 465 | 
            +
            #     # Convert dates to datetime
         | 
| 466 | 
            +
            #     try:
         | 
| 467 | 
            +
            #         start_date_ = pd.to_datetime(start_date)
         | 
| 468 | 
            +
            #         end_date_ = pd.to_datetime(end_date)
         | 
| 469 | 
            +
            #     except ValueError as e:
         | 
| 470 | 
            +
            #         print(f"Error: {e}")
         | 
| 471 | 
            +
            #         return None, None
         | 
| 472 | 
            +
             | 
| 473 | 
            +
            #     duration = (end_date_ - start_date_).days
         | 
| 474 | 
            +
             | 
| 475 | 
            +
            #     # Define the parameters for the request
         | 
| 476 | 
            +
            #     params = {
         | 
| 477 | 
            +
            #         'Request': 'GetData',
         | 
| 478 | 
            +
            #         'format': 'json',
         | 
| 479 | 
            +
            #         'testObsys': '0',
         | 
| 480 | 
            +
            #         'observatoryIagaCode': IagaCode,
         | 
| 481 | 
            +
            #         'samplesPerDay': 'minute',
         | 
| 482 | 
            +
            #         'publicationState': 'Best available',
         | 
| 483 | 
            +
            #         'dataStartDate': start_date_.strftime('%Y-%m-%d'),
         | 
| 484 | 
            +
            #         'dataDuration': duration,
         | 
| 485 | 
            +
            #         'traceList': '1234',
         | 
| 486 | 
            +
            #         'colourTraces': 'true',
         | 
| 487 | 
            +
            #         'pictureSize': 'Automatic',
         | 
| 488 | 
            +
            #         'dataScale': 'Automatic',
         | 
| 489 | 
            +
            #         'pdfSize': '21,29.7',
         | 
| 490 | 
            +
            #     }
         | 
| 491 | 
            +
             | 
| 492 | 
            +
            #     base_url_json = 'https://imag-data.bgs.ac.uk:/GIN_V1/GINServices?Request=GetData&format=json'
         | 
| 493 | 
            +
            #     base_url_img = 'https://imag-data.bgs.ac.uk:/GIN_V1/GINServices?Request=GetData&format=png'
         | 
| 494 | 
            +
             | 
| 495 | 
            +
            #     try:
         | 
| 496 | 
            +
            #         # Request JSON data
         | 
| 497 | 
            +
            #         response_json = requests.get(base_url_json, params=params)
         | 
| 498 | 
            +
            #         response_json.raise_for_status()  # Raises an error for bad status codes
         | 
| 499 | 
            +
            #         data = response_json.json()
         | 
| 500 | 
            +
             | 
| 501 | 
            +
            #         # Request Image
         | 
| 502 | 
            +
            #         params['format'] = 'png'
         | 
| 503 | 
            +
            #         response_img = requests.get(base_url_img, params=params)
         | 
| 504 | 
            +
            #         response_img.raise_for_status()
         | 
| 505 | 
            +
             | 
| 506 | 
            +
            #         # Save and display image if response is successful
         | 
| 507 | 
            +
            #         if 'image' in response_img.headers.get('Content-Type'):
         | 
| 508 | 
            +
            #             output_image_path = "plot_image.png"
         | 
| 509 | 
            +
            #             with open(output_image_path, 'wb') as file:
         | 
| 510 | 
            +
            #                 file.write(response_img.content)
         | 
| 511 | 
            +
            #             print(f"Image successfully saved as {output_image_path}")
         | 
| 512 | 
            +
             | 
| 513 | 
            +
            #             img = mpimg.imread(output_image_path)
         | 
| 514 | 
            +
            #             plt.imshow(img)
         | 
| 515 | 
            +
            #             plt.axis('off')
         | 
| 516 | 
            +
            #             plt.show()
         | 
| 517 | 
            +
            #             img_answer = Image.open(output_image_path)
         | 
| 518 | 
            +
            #         else:
         | 
| 519 | 
            +
            #             img_answer = None
         | 
| 520 | 
            +
             | 
| 521 | 
            +
            #         return data, img_answer
         | 
| 522 | 
            +
             | 
| 523 | 
            +
            #     except requests.RequestException as e:
         | 
| 524 | 
            +
            #         print(f"Request failed: {e}")
         | 
| 525 | 
            +
            #         return None, None
         | 
| 526 | 
            +
            #     except ValueError as e:
         | 
| 527 | 
            +
            #         print(f"JSON decode error: {e}")
         | 
| 528 | 
            +
            #         return None, None
         | 
| 529 | 
            +
             | 
| 530 | 
            +
            def clean_uap_data(dataset, lat, lon, date):
         | 
| 531 | 
            +
                # Assuming 'nuforc' is already defined
         | 
| 532 | 
            +
                processed = dataset[dataset[[lat, lon, date]].notnull().all(axis=1)]
         | 
| 533 | 
            +
                # Converting 'Lat' and 'Long' columns to floats, handling errors
         | 
| 534 | 
            +
                processed[lat] = pd.to_numeric(processed[lat], errors='coerce')
         | 
| 535 | 
            +
                processed[lon] = pd.to_numeric(processed[lon], errors='coerce')
         | 
| 536 | 
            +
             | 
| 537 | 
            +
                # if processed[date].min() < pd.to_datetime('1677-09-22'):
         | 
| 538 | 
            +
                #     processed.loc[processed[date] < pd.to_datetime('1677-09-22'), 'corrected_date'] = pd.to_datetime('1677-09-22 00:00:00')
         | 
| 539 | 
            +
             | 
| 540 | 
            +
                procesed = processed[processed[date] >= '1677-09-22']
         | 
| 541 | 
            +
             | 
| 542 | 
            +
                # convert date to str
         | 
| 543 | 
            +
                #processed[date] = processed[date].astype(str)
         | 
| 544 | 
            +
                # Dropping rows where 'Lat' or 'Long' conversion failed (i.e., became NaN)
         | 
| 545 | 
            +
                processed = processed.dropna(subset=[lat, lon])
         | 
| 546 | 
            +
                return processed
         | 
| 547 | 
            +
             | 
| 548 | 
            +
             | 
| 549 | 
            +
            def plot_overlapped_timeseries(data_list, event_times, window_hours=12, save_path=None):
         | 
| 550 | 
            +
                fig, axs = plt.subplots(4, 1, figsize=(12, 16), sharex=True)
         | 
| 551 | 
            +
                fig.patch.set_alpha(0)  # Make figure background transparent
         | 
| 552 | 
            +
             | 
| 553 | 
            +
                components = ['X', 'Y', 'Z', 'S']
         | 
| 554 | 
            +
                colors = ['red', 'green', 'blue', 'black']
         | 
| 555 | 
            +
             | 
| 556 | 
            +
                for i, component in enumerate(components):
         | 
| 557 | 
            +
                    axs[i].patch.set_alpha(0)  # Make subplot background transparent
         | 
| 558 | 
            +
                    axs[i].set_ylabel(component, color='orange')
         | 
| 559 | 
            +
                    axs[i].grid(True, color='orange', alpha=0.3)
         | 
| 560 | 
            +
                    
         | 
| 561 | 
            +
                    for spine in axs[i].spines.values():
         | 
| 562 | 
            +
                        spine.set_color('orange')
         | 
| 563 | 
            +
                    
         | 
| 564 | 
            +
                    axs[i].tick_params(axis='both', colors='orange')  # Change tick color
         | 
| 565 | 
            +
                    axs[i].set_title(f'{component}', color='orange')
         | 
| 566 | 
            +
                    axs[i].set_xlabel('Time Difference from Event (hours)', color='orange')
         | 
| 567 | 
            +
             | 
| 568 | 
            +
                    for j, (df, event_time) in enumerate(zip(data_list, event_times)):
         | 
| 569 | 
            +
                        # Convert datetime column to UTC if it has timezone info, otherwise assume it's UTC
         | 
| 570 | 
            +
                        df['datetime'] = pd.to_datetime(df['datetime']).dt.tz_localize(None)
         | 
| 571 | 
            +
                        
         | 
| 572 | 
            +
                        # Convert event_time to UTC if it has timezone info, otherwise assume it's UTC
         | 
| 573 | 
            +
                        event_time = pd.to_datetime(event_time).tz_localize(None)
         | 
| 574 | 
            +
                        
         | 
| 575 | 
            +
                        # Calculate time difference from event
         | 
| 576 | 
            +
                        df['time_diff'] = (df['datetime'] - event_time).dt.total_seconds() / 3600  # Convert to hours
         | 
| 577 | 
            +
                        
         | 
| 578 | 
            +
                        # Filter data within the specified window
         | 
| 579 | 
            +
                        df_window = df[(df['time_diff'] >= -window_hours) & (df['time_diff'] <= window_hours)]
         | 
| 580 | 
            +
             | 
| 581 | 
            +
                        # normalize component data
         | 
| 582 | 
            +
                        df_window[component] = (df_window[component] - df_window[component].mean()) / df_window[component].std()
         | 
| 583 | 
            +
                        
         | 
| 584 | 
            +
                        axs[i].plot(df_window['time_diff'], df_window[component], color=colors[i], alpha=0.7, label=f'Event {j+1}', linewidth=1)
         | 
| 585 | 
            +
                    
         | 
| 586 | 
            +
                    axs[i].axvline(x=0, color='red', linewidth=2, linestyle='--', label='Event Time')
         | 
| 587 | 
            +
                    axs[i].set_xlim(-window_hours, window_hours)
         | 
| 588 | 
            +
                    #axs[i].legend(loc='upper left', bbox_to_anchor=(1, 1))
         | 
| 589 | 
            +
             | 
| 590 | 
            +
                axs[-1].set_xlabel('Hours from Event', color='orange')
         | 
| 591 | 
            +
                fig.suptitle('Overlapped Time Series of Components', fontsize=16, color='orange')
         | 
| 592 | 
            +
                
         | 
| 593 | 
            +
                plt.tight_layout()
         | 
| 594 | 
            +
                plt.subplots_adjust(top=0.95, right=0.85)
         | 
| 595 | 
            +
             | 
| 596 | 
            +
                if save_path:
         | 
| 597 | 
            +
                    fig.savefig(save_path, transparent=True, bbox_inches='tight')
         | 
| 598 | 
            +
                    plt.close(fig)
         | 
| 599 | 
            +
                    return save_path
         | 
| 600 | 
            +
                else:
         | 
| 601 | 
            +
                    return fig
         | 
| 602 | 
            +
                
         | 
| 603 | 
            +
            def plot_average_timeseries(data_list, event_times, window_hours=12, save_path=None):
         | 
| 604 | 
            +
                fig, axs = plt.subplots(4, 1, figsize=(12, 16), sharex=True)
         | 
| 605 | 
            +
                fig.patch.set_alpha(0)  # Make figure background transparent
         | 
| 606 | 
            +
             | 
| 607 | 
            +
                components = ['X', 'Y', 'Z', 'S']
         | 
| 608 | 
            +
                colors = ['red', 'green', 'blue', 'black']
         | 
| 609 | 
            +
             | 
| 610 | 
            +
                for i, component in enumerate(components):
         | 
| 611 | 
            +
                    axs[i].patch.set_alpha(0)
         | 
| 612 | 
            +
                    axs[i].set_ylabel(component, color='orange')
         | 
| 613 | 
            +
                    axs[i].grid(True, color='orange', alpha=0.3)
         | 
| 614 | 
            +
             | 
| 615 | 
            +
                    for spine in axs[i].spines.values():
         | 
| 616 | 
            +
                            spine.set_color('orange')
         | 
| 617 | 
            +
                    
         | 
| 618 | 
            +
                    axs[i].tick_params(axis='both', colors='orange')
         | 
| 619 | 
            +
             | 
| 620 | 
            +
                    all_data = []
         | 
| 621 | 
            +
                    time_diffs = []
         | 
| 622 | 
            +
             | 
| 623 | 
            +
                    for j, (df, event_time) in enumerate(zip(data_list, event_times)):
         | 
| 624 | 
            +
                        # Convert datetime column to UTC if it has timezone info, otherwise assume it's UTC
         | 
| 625 | 
            +
                        df['datetime'] = pd.to_datetime(df['datetime']).dt.tz_localize(None)
         | 
| 626 | 
            +
             | 
| 627 | 
            +
                        # Convert event_time to UTC if it has timezone info, otherwise assume it's UTC
         | 
| 628 | 
            +
                        event_time = pd.to_datetime(event_time).tz_localize(None)
         | 
| 629 | 
            +
             | 
| 630 | 
            +
                        # Calculate time difference from event
         | 
| 631 | 
            +
                        df['time_diff'] = (df['datetime'] - event_time).dt.total_seconds() / 3600  # Convert to hours
         | 
| 632 | 
            +
             | 
| 633 | 
            +
                        # Filter data within the specified window
         | 
| 634 | 
            +
                        df_window = df[(df['time_diff'] >= -window_hours) & (df['time_diff'] <= window_hours)]
         | 
| 635 | 
            +
             | 
| 636 | 
            +
                        # Normalize component data
         | 
| 637 | 
            +
                        df_window[component] = (df_window[component] - df_window[component].mean())# / df_window[component].std()
         | 
| 638 | 
            +
             | 
| 639 | 
            +
                        all_data.append(df_window[component].values)
         | 
| 640 | 
            +
                        time_diffs.append(df_window['time_diff'].values)
         | 
| 641 | 
            +
             | 
| 642 | 
            +
                    # Calculate average and standard deviation
         | 
| 643 | 
            +
                    try:
         | 
| 644 | 
            +
                        avg_data = np.mean(all_data, axis=0)
         | 
| 645 | 
            +
                    except:
         | 
| 646 | 
            +
                        avg_data = np.zeros_like(all_data[0])
         | 
| 647 | 
            +
                    try:
         | 
| 648 | 
            +
                        std_data = np.std(all_data, axis=0)
         | 
| 649 | 
            +
                    except:
         | 
| 650 | 
            +
                        std_data = np.zeros_like(avg_data)
         | 
| 651 | 
            +
             | 
| 652 | 
            +
                    axs[-1].set_xlabel('Hours from Event', color='orange')
         | 
| 653 | 
            +
                    fig.suptitle('Average Time Series of Components', fontsize=16, color='orange')
         | 
| 654 | 
            +
             | 
| 655 | 
            +
                    # Plot average line
         | 
| 656 | 
            +
                    axs[i].plot(time_diffs[0], avg_data, color=colors[i], label='Average')
         | 
| 657 | 
            +
             | 
| 658 | 
            +
                    # Plot standard deviation as shaded region
         | 
| 659 | 
            +
                    try:
         | 
| 660 | 
            +
                        axs[i].fill_between(time_diffs[0], avg_data - std_data, avg_data + std_data, color=colors[i], alpha=0.2)
         | 
| 661 | 
            +
                    except:
         | 
| 662 | 
            +
                        pass
         | 
| 663 | 
            +
             | 
| 664 | 
            +
                    axs[i].axvline(x=0, color='red', linewidth=2, linestyle='--', label='Event Time')
         | 
| 665 | 
            +
                    axs[i].set_xlim(-window_hours, window_hours)
         | 
| 666 | 
            +
                    # orange frame, orange label legend
         | 
| 667 | 
            +
                    axs[i].legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.4, labelcolor='orange', edgecolor='orange')
         | 
| 668 | 
            +
             | 
| 669 | 
            +
                plt.tight_layout()
         | 
| 670 | 
            +
                plt.subplots_adjust(top=0.95, right=0.85)
         | 
| 671 | 
            +
             | 
| 672 | 
            +
                if save_path:
         | 
| 673 | 
            +
                    fig.savefig(save_path, transparent=True, bbox_inches='tight')
         | 
| 674 | 
            +
                    plt.close(fig)
         | 
| 675 | 
            +
                    return save_path
         | 
| 676 | 
            +
                else:
         | 
| 677 | 
            +
                    return fig
         | 
| 678 | 
            +
                
         | 
| 679 | 
            +
            def align_series(reference, series):
         | 
| 680 | 
            +
                reference = reference.flatten()
         | 
| 681 | 
            +
                series = series.flatten()
         | 
| 682 | 
            +
                _, path = fastdtw(reference, series, dist=euclidean)
         | 
| 683 | 
            +
                aligned = np.zeros(len(reference))
         | 
| 684 | 
            +
                for ref_idx, series_idx in path:
         | 
| 685 | 
            +
                    aligned[ref_idx] = series[series_idx]
         | 
| 686 | 
            +
                return aligned
         | 
| 687 | 
            +
             | 
| 688 | 
            +
            def plot_average_timeseries_with_dtw(data_list, event_times, window_hours=12, save_path=None):
         | 
| 689 | 
            +
                fig, axs = plt.subplots(4, 1, figsize=(12, 16), sharex=True)
         | 
| 690 | 
            +
                fig.patch.set_alpha(0)  # Make figure background transparent
         | 
| 691 | 
            +
             | 
| 692 | 
            +
                components = ['X', 'Y', 'Z', 'S']
         | 
| 693 | 
            +
                colors = ['red', 'green', 'blue', 'black']
         | 
| 694 | 
            +
                fig.text(0.02, 0.5, 'Geomagnetic Variation (nT)', va='center', rotation='vertical', color='orange')
         | 
| 695 | 
            +
             | 
| 696 | 
            +
             | 
| 697 | 
            +
                for i, component in enumerate(components):
         | 
| 698 | 
            +
                    axs[i].patch.set_alpha(0)
         | 
| 699 | 
            +
                    axs[i].set_ylabel(component, color='orange', rotation=90)
         | 
| 700 | 
            +
                    axs[i].grid(True, color='orange', alpha=0.3)
         | 
| 701 | 
            +
                    
         | 
| 702 | 
            +
                    for spine in axs[i].spines.values():
         | 
| 703 | 
            +
                        spine.set_color('orange')
         | 
| 704 | 
            +
                    
         | 
| 705 | 
            +
                    axs[i].tick_params(axis='both', colors='orange')
         | 
| 706 | 
            +
             | 
| 707 | 
            +
                    all_aligned_data = []
         | 
| 708 | 
            +
                    reference_df = None
         | 
| 709 | 
            +
             | 
| 710 | 
            +
                    for j, (df, event_time) in enumerate(zip(data_list, event_times)):
         | 
| 711 | 
            +
                        df['datetime'] = pd.to_datetime(df['datetime']).dt.tz_localize(None)
         | 
| 712 | 
            +
                        event_time = pd.to_datetime(event_time).tz_localize(None)
         | 
| 713 | 
            +
                        df['time_diff'] = (df['datetime'] - event_time).dt.total_seconds() / 3600
         | 
| 714 | 
            +
                        df_window = df[(df['time_diff'] >= -window_hours) & (df['time_diff'] <= window_hours)]
         | 
| 715 | 
            +
                        df_window[component] = (df_window[component] - df_window[component].mean())# / df_window[component].std()
         | 
| 716 | 
            +
                        
         | 
| 717 | 
            +
                        if reference_df is None:
         | 
| 718 | 
            +
                            reference_df = df_window
         | 
| 719 | 
            +
                            all_aligned_data.append(reference_df[component].values)
         | 
| 720 | 
            +
                        else:
         | 
| 721 | 
            +
                            try:
         | 
| 722 | 
            +
                                aligned_series = align_series(reference_df[component].values, df_window[component].values)
         | 
| 723 | 
            +
                                all_aligned_data.append(aligned_series)
         | 
| 724 | 
            +
                            except:
         | 
| 725 | 
            +
                                pass
         | 
| 726 | 
            +
             | 
| 727 | 
            +
                    # Calculate average and standard deviation of aligned data
         | 
| 728 | 
            +
                    all_aligned_data = np.array(all_aligned_data)
         | 
| 729 | 
            +
                    avg_data = np.mean(all_aligned_data, axis=0)
         | 
| 730 | 
            +
             | 
| 731 | 
            +
                    # round float to avoid sqrt errors
         | 
| 732 | 
            +
                    def calculate_std(data):
         | 
| 733 | 
            +
                        if data is not None and len(data) > 0:
         | 
| 734 | 
            +
                            data = np.array(data)
         | 
| 735 | 
            +
                            std_data = np.std(data)
         | 
| 736 | 
            +
                            return std_data
         | 
| 737 | 
            +
                        else:
         | 
| 738 | 
            +
                            return "Data is empty or not a list"
         | 
| 739 | 
            +
                        
         | 
| 740 | 
            +
                    std_data = calculate_std(all_aligned_data)
         | 
| 741 | 
            +
             | 
| 742 | 
            +
                    # Plot average line
         | 
| 743 | 
            +
                    axs[i].plot(reference_df['time_diff'], avg_data, color=colors[i], label='Average')
         | 
| 744 | 
            +
             | 
| 745 | 
            +
                    # Plot standard deviation as shaded region
         | 
| 746 | 
            +
                    try:
         | 
| 747 | 
            +
                        axs[i].fill_between(reference_df['time_diff'], avg_data - std_data, avg_data + std_data, color=colors[i], alpha=0.2)
         | 
| 748 | 
            +
                    except TypeError as e:
         | 
| 749 | 
            +
                        #print(f"Error: {e}")
         | 
| 750 | 
            +
                        pass
         | 
| 751 | 
            +
                        
         | 
| 752 | 
            +
             | 
| 753 | 
            +
                    axs[i].axvline(x=0, color='red', linewidth=2, linestyle='--', label='Event Time')
         | 
| 754 | 
            +
                    axs[i].set_xlim(-window_hours, window_hours)
         | 
| 755 | 
            +
                    axs[i].legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.2, labelcolor='orange', edgecolor='orange')
         | 
| 756 | 
            +
             | 
| 757 | 
            +
             | 
| 758 | 
            +
                axs[-1].set_xlabel('Hours from Event', color='orange')
         | 
| 759 | 
            +
                fig.suptitle('Average Time Series of Components (FastDTW Aligned)', fontsize=16, color='orange')
         | 
| 760 | 
            +
             | 
| 761 | 
            +
                plt.tight_layout()
         | 
| 762 | 
            +
                plt.subplots_adjust(top=0.85, right=0.85, left=0.1)
         | 
| 763 | 
            +
             | 
| 764 | 
            +
                if save_path:
         | 
| 765 | 
            +
                    fig.savefig(save_path, transparent=True, bbox_inches='tight')
         | 
| 766 | 
            +
                    plt.close(fig)
         | 
| 767 | 
            +
                    return save_path
         | 
| 768 | 
            +
                else:
         | 
| 769 | 
            +
                    return fig
         | 
| 770 | 
            +
             | 
| 771 | 
            +
            def plot_data_custom(df, date, save_path=None, subtitle=None):
         | 
| 772 | 
            +
                df['datetime'] = pd.to_datetime(df['datetime'])
         | 
| 773 | 
            +
                event = pd.to_datetime(date)
         | 
| 774 | 
            +
                window = timedelta(hours=12)
         | 
| 775 | 
            +
                x_min = event - window
         | 
| 776 | 
            +
                x_max = event + window
         | 
| 777 | 
            +
             | 
| 778 | 
            +
                fig, axs = plt.subplots(4, 1, figsize=(12, 12), sharex=True)
         | 
| 779 | 
            +
                fig.patch.set_alpha(0)  # Make figure background transparent
         | 
| 780 | 
            +
             | 
| 781 | 
            +
                components = ['X', 'Y', 'Z', 'S']
         | 
| 782 | 
            +
                colors = ['red', 'green', 'blue', 'black']
         | 
| 783 | 
            +
             | 
| 784 | 
            +
                fig.text(0.02, 0.5, 'Geomagnetic Variation (nT)', va='center', rotation='vertical', color='orange')
         | 
| 785 | 
            +
             | 
| 786 | 
            +
                # if df[component].isnull().all().all():
         | 
| 787 | 
            +
                #     return None
         | 
| 788 | 
            +
                        
         | 
| 789 | 
            +
                for i, component in enumerate(components):
         | 
| 790 | 
            +
                    axs[i].plot(df['datetime'], df[component], label=component, color=colors[i])
         | 
| 791 | 
            +
                    axs[i].axvline(x=event, color='red', linewidth=2, label='Event', linestyle='--')
         | 
| 792 | 
            +
                    axs[i].set_ylabel(component, color='orange', rotation=90)
         | 
| 793 | 
            +
                    axs[i].set_xlim(x_min, x_max)
         | 
| 794 | 
            +
                    axs[i].legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.2, labelcolor='orange', edgecolor='orange')
         | 
| 795 | 
            +
                    axs[i].grid(True, color='orange', alpha=0.3)
         | 
| 796 | 
            +
                    axs[i].patch.set_alpha(0)  # Make subplot background transparent
         | 
| 797 | 
            +
                    
         | 
| 798 | 
            +
                    for spine in axs[i].spines.values():
         | 
| 799 | 
            +
                        spine.set_color('orange')
         | 
| 800 | 
            +
                    
         | 
| 801 | 
            +
                    axs[i].xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
         | 
| 802 | 
            +
                    axs[i].xaxis.set_major_locator(mdates.HourLocator(interval=1))
         | 
| 803 | 
            +
                    axs[i].tick_params(axis='both', colors='orange')
         | 
| 804 | 
            +
             | 
| 805 | 
            +
                plt.setp(axs[-1].xaxis.get_majorticklabels(), rotation=45)
         | 
| 806 | 
            +
                axs[-1].set_xlabel('Hours', color='orange')
         | 
| 807 | 
            +
                fig.suptitle(f'Time Series of Components with Event Marks\n{subtitle}', fontsize=12, color='orange')
         | 
| 808 | 
            +
                
         | 
| 809 | 
            +
                plt.tight_layout()
         | 
| 810 | 
            +
                #plt.subplots_adjust(top=0.85)
         | 
| 811 | 
            +
                plt.subplots_adjust(top=0.85, right=0.85, left=0.1)
         | 
| 812 | 
            +
             | 
| 813 | 
            +
             | 
| 814 | 
            +
                if save_path:
         | 
| 815 | 
            +
                    fig.savefig(save_path, transparent=True)
         | 
| 816 | 
            +
                    plt.close(fig)
         | 
| 817 | 
            +
                    return save_path
         | 
| 818 | 
            +
                else:
         | 
| 819 | 
            +
                    return fig
         | 
| 820 | 
            +
             | 
| 821 | 
            +
             | 
| 822 | 
            +
            def batch_requests(stations, dataset, lon, lat, date, distance=100):
         | 
| 823 | 
            +
                results = {"station": [], "data": [], "image": [], "custom_image": []}
         | 
| 824 | 
            +
                all_data = []
         | 
| 825 | 
            +
                all_event_times = []
         | 
| 826 | 
            +
             | 
| 827 | 
            +
                for lon_, lat_, date_ in dataset[[lon, lat, date]].values:
         | 
| 828 | 
            +
                    test_lat_lon = (lat_, lon_)
         | 
| 829 | 
            +
                    try:
         | 
| 830 | 
            +
                        str_date = pd.to_datetime(date_).strftime('%Y-%m-%dT%H:%M:%S')
         | 
| 831 | 
            +
                    except:
         | 
| 832 | 
            +
                        str_date = date_
         | 
| 833 | 
            +
                    twelve_hours = pd.Timedelta(hours=12)
         | 
| 834 | 
            +
                    forty_eight_hours = pd.Timedelta(hours=48)
         | 
| 835 | 
            +
                    try:
         | 
| 836 | 
            +
                        str_date_start = (pd.to_datetime(str_date) - twelve_hours).strftime('%Y-%m-%dT%H:%M:%S')
         | 
| 837 | 
            +
                        str_date_end = (pd.to_datetime(str_date) + forty_eight_hours).strftime('%Y-%m-%dT%H:%M:%S')
         | 
| 838 | 
            +
                    except Exception as e:
         | 
| 839 | 
            +
                        print(f"Error: {e}")
         | 
| 840 | 
            +
                        pass
         | 
| 841 | 
            +
                    
         | 
| 842 | 
            +
                    try:
         | 
| 843 | 
            +
                        new_dataset = compare_stations(test_lat_lon, stations, distance=distance, closest=True)
         | 
| 844 | 
            +
                        station_name = new_dataset['Name']
         | 
| 845 | 
            +
                        station_distance = new_dataset['Distance']
         | 
| 846 | 
            +
                        test_ = get_data(new_dataset.iloc[0]['IagaCode'], str_date_start, str_date_end)
         | 
| 847 | 
            +
             | 
| 848 | 
            +
                        if test_:
         | 
| 849 | 
            +
                            results["station"].append(new_dataset.iloc[0]['IagaCode'])
         | 
| 850 | 
            +
                            results["data"].append(test_)
         | 
| 851 | 
            +
                            plotted = pd.DataFrame({
         | 
| 852 | 
            +
                                'datetime': test_['datetime'],
         | 
| 853 | 
            +
                                'X': test_['X'],
         | 
| 854 | 
            +
                                'Y': test_['Y'],
         | 
| 855 | 
            +
                                'Z': test_['Z'],
         | 
| 856 | 
            +
                                'S': test_['S'],
         | 
| 857 | 
            +
                            })
         | 
| 858 | 
            +
                            all_data.append(plotted)
         | 
| 859 | 
            +
                            all_event_times.append(pd.to_datetime(date_))
         | 
| 860 | 
            +
                            # print(date_)
         | 
| 861 | 
            +
                            additional_data = f"Date: {date_}\nLat/Lon: {lat_}, {lon_}\nClosest station: {station_name.values[0]}\n Distance:{round(station_distance.values[0],2)} km"
         | 
| 862 | 
            +
                            fig = plot_data_custom(plotted, date=pd.to_datetime(date_), save_path=None, subtitle =additional_data)
         | 
| 863 | 
            +
                            with st.status(f'Magnetic Data: {date_}', expanded=False) as status:
         | 
| 864 | 
            +
                                st.pyplot(fig)
         | 
| 865 | 
            +
                                status.update(f'Magnetic Data: {date_} - Finished!')
         | 
| 866 | 
            +
                    except Exception as e:
         | 
| 867 | 
            +
                        #print(f"An error occurred: {e}")
         | 
| 868 | 
            +
                        pass
         | 
| 869 | 
            +
             | 
| 870 | 
            +
                if all_data:
         | 
| 871 | 
            +
                    fig_overlapped = plot_overlapped_timeseries(all_data, all_event_times)
         | 
| 872 | 
            +
                    display(fig_overlapped)
         | 
| 873 | 
            +
                    plt.close(fig_overlapped)
         | 
| 874 | 
            +
                    # fig_average = plot_average_timeseries(all_data, all_event_times)
         | 
| 875 | 
            +
                    # st.pyplot(fig_average)
         | 
| 876 | 
            +
                    fig_average_aligned = plot_average_timeseries_with_dtw(all_data, all_event_times)
         | 
| 877 | 
            +
                    with st.status(f'Dynamic Time Warping Data', expanded=False) as stts:
         | 
| 878 | 
            +
                        st.pyplot(fig_average_aligned)
         | 
| 879 | 
            +
                return results
         | 
| 880 | 
            +
             | 
| 881 | 
            +
             | 
| 882 | 
            +
            df = pd.DataFrame()
         | 
| 883 | 
            +
             | 
| 884 | 
            +
             | 
| 885 | 
            +
            # Upload dataset
         | 
| 886 | 
            +
            uploaded_file = st.file_uploader("Choose a file", type=["csv", "xlsx"])
         | 
| 887 | 
            +
             | 
| 888 | 
            +
            if uploaded_file is not None:
         | 
| 889 | 
            +
                if uploaded_file.name.endswith('.csv'):
         | 
| 890 | 
            +
                    df = pd.read_csv(uploaded_file)
         | 
| 891 | 
            +
                else:
         | 
| 892 | 
            +
                    df = pd.read_excel(uploaded_file)
         | 
| 893 | 
            +
                stations = get_stations()
         | 
| 894 | 
            +
                st.write("Dataset Loaded:")
         | 
| 895 | 
            +
                df = filter_dataframe(df)
         | 
| 896 | 
            +
                st.dataframe(df)
         | 
| 897 | 
            +
             | 
| 898 | 
            +
                # Select columns
         | 
| 899 | 
            +
                lon_col = st.selectbox("Select Longitude Column", df.columns)
         | 
| 900 | 
            +
                lat_col = st.selectbox("Select Latitude Column", df.columns)
         | 
| 901 | 
            +
                date_col = st.selectbox("Select Date Column", df.columns)
         | 
| 902 | 
            +
                distance = st.number_input("Enter Distance", min_value=0, value=100)
         | 
| 903 | 
            +
             | 
| 904 | 
            +
                # Process data
         | 
| 905 | 
            +
                if st.button("Process Data"):
         | 
| 906 | 
            +
                    cases = clean_uap_data(df, lat_col, lon_col, date_col)
         | 
| 907 | 
            +
                    results = batch_requests(stations, cases, lon_col, lat_col, date_col, distance=distance)
         | 
    	
        map.py
    ADDED
    
    | @@ -0,0 +1,506 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import json
         | 
| 2 | 
            +
            import streamlit as st
         | 
| 3 | 
            +
            #import geopandas as gpd
         | 
| 4 | 
            +
            from keplergl import keplergl
         | 
| 5 | 
            +
            import pandas as pd
         | 
| 6 | 
            +
            import streamlit as st
         | 
| 7 | 
            +
            import pandas as pd
         | 
| 8 | 
            +
            import numpy as np
         | 
| 9 | 
            +
            import matplotlib.pyplot as plt
         | 
| 10 | 
            +
            import seaborn as sns
         | 
| 11 | 
            +
            from uap_analyzer import UAPParser, UAPAnalyzer, UAPVisualizer
         | 
| 12 | 
            +
            # import ChartGen
         | 
| 13 | 
            +
            # from ChartGen import ChartGPT
         | 
| 14 | 
            +
            from Levenshtein import distance
         | 
| 15 | 
            +
            from sklearn.model_selection import train_test_split
         | 
| 16 | 
            +
            from sklearn.metrics import confusion_matrix
         | 
| 17 | 
            +
            from stqdm import stqdm
         | 
| 18 | 
            +
            stqdm.pandas()
         | 
| 19 | 
            +
            import streamlit.components.v1 as components
         | 
| 20 | 
            +
            from dateutil import parser
         | 
| 21 | 
            +
            from sentence_transformers import SentenceTransformer
         | 
| 22 | 
            +
            import torch
         | 
| 23 | 
            +
            import squarify
         | 
| 24 | 
            +
            import matplotlib.colors as mcolors
         | 
| 25 | 
            +
            import textwrap
         | 
| 26 | 
            +
            import datamapplot
         | 
| 27 | 
            +
            from streamlit_extras.stateful_button import button as stateful_button
         | 
| 28 | 
            +
            from streamlit_keplergl import keplergl_static
         | 
| 29 | 
            +
            from keplergl import KeplerGl
         | 
| 30 | 
            +
             | 
| 31 | 
            +
             | 
| 32 | 
            +
            st.set_option('deprecation.showPyplotGlobalUse', False)
         | 
| 33 | 
            +
             | 
| 34 | 
            +
            from pandas.api.types import (
         | 
| 35 | 
            +
                is_categorical_dtype,
         | 
| 36 | 
            +
                is_datetime64_any_dtype,
         | 
| 37 | 
            +
                is_numeric_dtype,
         | 
| 38 | 
            +
                is_object_dtype,
         | 
| 39 | 
            +
            )
         | 
| 40 | 
            +
             | 
| 41 | 
            +
            st.title('Interactive Map')
         | 
| 42 | 
            +
             | 
| 43 | 
            +
            # Initialize session state
         | 
| 44 | 
            +
            if 'analyzers' not in st.session_state:
         | 
| 45 | 
            +
                st.session_state['analyzers'] = []
         | 
| 46 | 
            +
            if 'col_names' not in st.session_state:
         | 
| 47 | 
            +
                st.session_state['col_names'] = []
         | 
| 48 | 
            +
            if 'clusters' not in st.session_state:
         | 
| 49 | 
            +
                st.session_state['clusters'] = {}
         | 
| 50 | 
            +
            if 'new_data' not in st.session_state:
         | 
| 51 | 
            +
                st.session_state['new_data'] = pd.DataFrame()
         | 
| 52 | 
            +
            if 'dataset' not in st.session_state:
         | 
| 53 | 
            +
                st.session_state['dataset'] = pd.DataFrame()
         | 
| 54 | 
            +
            if 'data_processed' not in st.session_state:
         | 
| 55 | 
            +
                st.session_state['data_processed'] = False
         | 
| 56 | 
            +
            if 'stage' not in st.session_state:
         | 
| 57 | 
            +
                st.session_state['stage'] = 0
         | 
| 58 | 
            +
            if 'filtered_data' not in st.session_state:
         | 
| 59 | 
            +
                st.session_state['filtered_data'] = None
         | 
| 60 | 
            +
            if 'gemini_answer' not in st.session_state:
         | 
| 61 | 
            +
                st.session_state['gemini_answer'] = None
         | 
| 62 | 
            +
            if 'parsed_responses' not in st.session_state:
         | 
| 63 | 
            +
                st.session_state['parsed_responses'] = None
         | 
| 64 | 
            +
            if 'map_generated' not in st.session_state:
         | 
| 65 | 
            +
                st.session_state['map_generated'] = False
         | 
| 66 | 
            +
            if 'date_loaded' not in st.session_state:
         | 
| 67 | 
            +
                st.session_state['data_loaded'] = False
         | 
| 68 | 
            +
             | 
| 69 | 
            +
             | 
| 70 | 
            +
            if "datasets" not in st.session_state:
         | 
| 71 | 
            +
                st.session_state.datasets = []
         | 
| 72 | 
            +
             | 
| 73 | 
            +
            # sf_zip_geo_gdf = gpd.read_file("sf_zip_geo.geojson")
         | 
| 74 | 
            +
            # sf_zip_geo_gdf.label = "SF Zip Geo"
         | 
| 75 | 
            +
            # sf_zip_geo_gdf.id = "sf-zip-geo"
         | 
| 76 | 
            +
            # st.session_state.datasets.append(sf_zip_geo_gdf)
         | 
| 77 | 
            +
             | 
| 78 | 
            +
            def plot_treemap(df, column, top_n=32):
         | 
| 79 | 
            +
                    # Get the value counts and the top N labels
         | 
| 80 | 
            +
                    value_counts = df[column].value_counts()
         | 
| 81 | 
            +
                    top_labels = value_counts.iloc[:top_n].index
         | 
| 82 | 
            +
                    
         | 
| 83 | 
            +
                    # Use np.where to replace all values not in the top N with 'Other'
         | 
| 84 | 
            +
                    revised_column = f'{column}_revised'
         | 
| 85 | 
            +
                    df[revised_column] = np.where(df[column].isin(top_labels), df[column], 'Other')
         | 
| 86 | 
            +
             | 
| 87 | 
            +
                    # Get the value counts including the 'Other' category
         | 
| 88 | 
            +
                    sizes = df[revised_column].value_counts().values
         | 
| 89 | 
            +
                    labels = df[revised_column].value_counts().index
         | 
| 90 | 
            +
             | 
| 91 | 
            +
                    # Get a gradient of colors
         | 
| 92 | 
            +
                    # colors = list(mcolors.TABLEAU_COLORS.values())
         | 
| 93 | 
            +
             | 
| 94 | 
            +
                    n_colors = len(sizes)
         | 
| 95 | 
            +
                    colors = plt.cm.Oranges(np.linspace(0.3, 0.9, n_colors))[::-1]
         | 
| 96 | 
            +
             | 
| 97 | 
            +
             | 
| 98 | 
            +
                    # Get % of each category
         | 
| 99 | 
            +
                    percents = sizes / sizes.sum()
         | 
| 100 | 
            +
             | 
| 101 | 
            +
                    # Prepare labels with percentages
         | 
| 102 | 
            +
                    labels = [f'{label}\n {percent:.1%}' for label, percent in zip(labels, percents)]
         | 
| 103 | 
            +
             | 
| 104 | 
            +
                    fig, ax = plt.subplots(figsize=(20, 12))
         | 
| 105 | 
            +
             | 
| 106 | 
            +
                    # Plot the treemap
         | 
| 107 | 
            +
                    squarify.plot(sizes=sizes, label=labels, alpha=0.7, pad=True, color=colors, text_kwargs={'fontsize': 10})
         | 
| 108 | 
            +
             | 
| 109 | 
            +
                    ax = plt.gca()
         | 
| 110 | 
            +
                    # Iterate over text elements and rectangles (patches) in the axes for color adjustment
         | 
| 111 | 
            +
                    for text, rect in zip(ax.texts, ax.patches):
         | 
| 112 | 
            +
                        background_color = rect.get_facecolor()
         | 
| 113 | 
            +
                        r, g, b, _ = mcolors.to_rgba(background_color)
         | 
| 114 | 
            +
                        brightness = np.average([r, g, b])
         | 
| 115 | 
            +
                        text.set_color('white' if brightness < 0.5 else 'black')
         | 
| 116 | 
            +
             | 
| 117 | 
            +
                        # Adjust font size based on rectangle's area and wrap long text
         | 
| 118 | 
            +
                        coef = 0.8
         | 
| 119 | 
            +
                        font_size = np.sqrt(rect.get_width() * rect.get_height()) * coef
         | 
| 120 | 
            +
                        text.set_fontsize(font_size)
         | 
| 121 | 
            +
                        wrapped_text = textwrap.fill(text.get_text(), width=20)
         | 
| 122 | 
            +
                        text.set_text(wrapped_text)
         | 
| 123 | 
            +
             | 
| 124 | 
            +
                    plt.axis('off')
         | 
| 125 | 
            +
                    plt.gca().invert_yaxis()
         | 
| 126 | 
            +
                    plt.gcf().set_size_inches(20, 12)
         | 
| 127 | 
            +
             | 
| 128 | 
            +
                    fig.patch.set_alpha(0)
         | 
| 129 | 
            +
             | 
| 130 | 
            +
                    ax.patch.set_alpha(0)
         | 
| 131 | 
            +
                    return fig
         | 
| 132 | 
            +
             | 
| 133 | 
            +
            def plot_hist(df, column, bins=10, kde=True):
         | 
| 134 | 
            +
                    fig, ax = plt.subplots(figsize=(12, 6))
         | 
| 135 | 
            +
                    sns.histplot(data=df, x=column, kde=True, bins=bins,color='orange')
         | 
| 136 | 
            +
                    # set the ticks and frame in orange
         | 
| 137 | 
            +
                    ax.spines['bottom'].set_color('orange')
         | 
| 138 | 
            +
                    ax.spines['top'].set_color('orange')
         | 
| 139 | 
            +
                    ax.spines['right'].set_color('orange')
         | 
| 140 | 
            +
                    ax.spines['left'].set_color('orange')
         | 
| 141 | 
            +
                    ax.xaxis.label.set_color('orange')
         | 
| 142 | 
            +
                    ax.yaxis.label.set_color('orange')
         | 
| 143 | 
            +
                    ax.tick_params(axis='x', colors='orange')
         | 
| 144 | 
            +
                    ax.tick_params(axis='y', colors='orange')
         | 
| 145 | 
            +
                    ax.title.set_color('orange')
         | 
| 146 | 
            +
             | 
| 147 | 
            +
                    # Set transparent background
         | 
| 148 | 
            +
                    fig.patch.set_alpha(0)
         | 
| 149 | 
            +
                    ax.patch.set_alpha(0)
         | 
| 150 | 
            +
                    return fig
         | 
| 151 | 
            +
             | 
| 152 | 
            +
             | 
| 153 | 
            +
            def plot_line(df, x_column, y_columns, figsize=(12, 10), color='orange', title=None, rolling_mean_value=2):
         | 
| 154 | 
            +
                import matplotlib.cm as cm
         | 
| 155 | 
            +
                # Sort the dataframe by the date column
         | 
| 156 | 
            +
                df = df.sort_values(by=x_column)
         | 
| 157 | 
            +
             | 
| 158 | 
            +
                # Calculate rolling mean for each y_column
         | 
| 159 | 
            +
                if rolling_mean_value:
         | 
| 160 | 
            +
                    df[y_columns] = df[y_columns].rolling(len(df) // rolling_mean_value).mean()
         | 
| 161 | 
            +
             | 
| 162 | 
            +
                # Create the plot
         | 
| 163 | 
            +
                fig, ax = plt.subplots(figsize=figsize)
         | 
| 164 | 
            +
             | 
| 165 | 
            +
                colors = cm.Oranges(np.linspace(0.2, 1, len(y_columns)))
         | 
| 166 | 
            +
             | 
| 167 | 
            +
                # Plot each y_column as a separate line with a different color
         | 
| 168 | 
            +
                for i, y_column in enumerate(y_columns):
         | 
| 169 | 
            +
                    df.plot(x=x_column, y=y_column, ax=ax, color=colors[i], label=y_column, linewidth=.5)
         | 
| 170 | 
            +
             | 
| 171 | 
            +
                # Rotate x-axis labels
         | 
| 172 | 
            +
                ax.set_xticklabels(ax.get_xticklabels(), rotation=30, ha='right')
         | 
| 173 | 
            +
             | 
| 174 | 
            +
                # Format x_column as date if it is
         | 
| 175 | 
            +
                if np.issubdtype(df[x_column].dtype, np.datetime64) or np.issubdtype(df[x_column].dtype, np.timedelta64):
         | 
| 176 | 
            +
                    df[x_column] = pd.to_datetime(df[x_column]).dt.date
         | 
| 177 | 
            +
             | 
| 178 | 
            +
                # Set title, labels, and legend
         | 
| 179 | 
            +
                ax.set_title(title or f'{", ".join(y_columns)} over {x_column}', color=color, fontweight='bold')
         | 
| 180 | 
            +
                ax.set_xlabel(x_column, color=color)
         | 
| 181 | 
            +
                ax.set_ylabel(', '.join(y_columns), color=color)
         | 
| 182 | 
            +
                ax.spines['bottom'].set_color('orange')
         | 
| 183 | 
            +
                ax.spines['top'].set_color('orange')
         | 
| 184 | 
            +
                ax.spines['right'].set_color('orange')
         | 
| 185 | 
            +
                ax.spines['left'].set_color('orange')
         | 
| 186 | 
            +
                ax.xaxis.label.set_color('orange')
         | 
| 187 | 
            +
                ax.yaxis.label.set_color('orange')
         | 
| 188 | 
            +
                ax.tick_params(axis='x', colors='orange')
         | 
| 189 | 
            +
                ax.tick_params(axis='y', colors='orange')
         | 
| 190 | 
            +
                ax.title.set_color('orange')
         | 
| 191 | 
            +
             | 
| 192 | 
            +
                ax.legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.4, labelcolor='orange', edgecolor='orange')
         | 
| 193 | 
            +
             | 
| 194 | 
            +
                # Remove background
         | 
| 195 | 
            +
                fig.patch.set_alpha(0)
         | 
| 196 | 
            +
                ax.patch.set_alpha(0)
         | 
| 197 | 
            +
             | 
| 198 | 
            +
                return fig
         | 
| 199 | 
            +
             | 
| 200 | 
            +
            def plot_bar(df, x_column, y_column, figsize=(12, 10), color='orange', title=None):
         | 
| 201 | 
            +
                fig, ax = plt.subplots(figsize=figsize)
         | 
| 202 | 
            +
             | 
| 203 | 
            +
                sns.barplot(data=df, x=x_column, y=y_column, color=color, ax=ax)
         | 
| 204 | 
            +
             | 
| 205 | 
            +
                ax.set_title(title if title else f'{y_column} by {x_column}', color=color, fontweight='bold')
         | 
| 206 | 
            +
                ax.set_xlabel(x_column, color=color)
         | 
| 207 | 
            +
                ax.set_ylabel(y_column, color=color)
         | 
| 208 | 
            +
             | 
| 209 | 
            +
                ax.tick_params(axis='x', colors=color)
         | 
| 210 | 
            +
                ax.tick_params(axis='y', colors=color)
         | 
| 211 | 
            +
             | 
| 212 | 
            +
                # Remove background
         | 
| 213 | 
            +
                fig.patch.set_alpha(0)
         | 
| 214 | 
            +
                ax.patch.set_alpha(0)
         | 
| 215 | 
            +
                ax.spines['bottom'].set_color('orange')
         | 
| 216 | 
            +
                ax.spines['top'].set_color('orange')
         | 
| 217 | 
            +
                ax.spines['right'].set_color('orange')
         | 
| 218 | 
            +
                ax.spines['left'].set_color('orange')
         | 
| 219 | 
            +
                ax.xaxis.label.set_color('orange')
         | 
| 220 | 
            +
                ax.yaxis.label.set_color('orange')
         | 
| 221 | 
            +
                ax.tick_params(axis='x', colors='orange')
         | 
| 222 | 
            +
                ax.tick_params(axis='y', colors='orange')
         | 
| 223 | 
            +
                ax.title.set_color('orange')
         | 
| 224 | 
            +
                ax.legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.4, labelcolor='orange', edgecolor='orange')
         | 
| 225 | 
            +
             | 
| 226 | 
            +
                return fig
         | 
| 227 | 
            +
             | 
| 228 | 
            +
            def plot_grouped_bar(df, x_columns, y_column, figsize=(12, 10), colors=None, title=None):
         | 
| 229 | 
            +
                fig, ax = plt.subplots(figsize=figsize)
         | 
| 230 | 
            +
             | 
| 231 | 
            +
                width = 0.8 / len(x_columns)  # the width of the bars
         | 
| 232 | 
            +
                x = np.arange(len(df))  # the label locations
         | 
| 233 | 
            +
             | 
| 234 | 
            +
                for i, x_column in enumerate(x_columns):
         | 
| 235 | 
            +
                    sns.barplot(data=df, x=x, y=y_column, color=colors[i] if colors else None, ax=ax, width=width, label=x_column)
         | 
| 236 | 
            +
                    x += width  # add the width of the bar to the x position for the next bar
         | 
| 237 | 
            +
             | 
| 238 | 
            +
                ax.set_title(title if title else f'{y_column} by {", ".join(x_columns)}', color='orange', fontweight='bold')
         | 
| 239 | 
            +
                ax.set_xlabel('Groups', color='orange')
         | 
| 240 | 
            +
                ax.set_ylabel(y_column, color='orange')
         | 
| 241 | 
            +
             | 
| 242 | 
            +
                ax.set_xticks(x - width * len(x_columns) / 2)
         | 
| 243 | 
            +
                ax.set_xticklabels(df.index)
         | 
| 244 | 
            +
             | 
| 245 | 
            +
                ax.tick_params(axis='x', colors='orange')
         | 
| 246 | 
            +
                ax.tick_params(axis='y', colors='orange')
         | 
| 247 | 
            +
             | 
| 248 | 
            +
                # Remove background
         | 
| 249 | 
            +
                fig.patch.set_alpha(0)
         | 
| 250 | 
            +
                ax.patch.set_alpha(0)
         | 
| 251 | 
            +
                ax.spines['bottom'].set_color('orange')
         | 
| 252 | 
            +
                ax.spines['top'].set_color('orange')
         | 
| 253 | 
            +
                ax.spines['right'].set_color('orange')
         | 
| 254 | 
            +
                ax.spines['left'].set_color('orange')
         | 
| 255 | 
            +
                ax.xaxis.label.set_color('orange')
         | 
| 256 | 
            +
                ax.yaxis.label.set_color('orange')
         | 
| 257 | 
            +
                ax.title.set_color('orange')
         | 
| 258 | 
            +
                ax.legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.4, labelcolor='orange', edgecolor='orange')
         | 
| 259 | 
            +
             | 
| 260 | 
            +
                return fig
         | 
| 261 | 
            +
             | 
| 262 | 
            +
            def generate_kepler_map(data):
         | 
| 263 | 
            +
                map_config = keplergl(data, height=400)
         | 
| 264 | 
            +
                return map_config
         | 
| 265 | 
            +
             | 
| 266 | 
            +
            def filter_dataframe(df: pd.DataFrame) -> pd.DataFrame:
         | 
| 267 | 
            +
                """
         | 
| 268 | 
            +
                Adds a UI on top of a dataframe to let viewers filter columns
         | 
| 269 | 
            +
             | 
| 270 | 
            +
                Args:
         | 
| 271 | 
            +
                    df (pd.DataFrame): Original dataframe
         | 
| 272 | 
            +
             | 
| 273 | 
            +
                Returns:
         | 
| 274 | 
            +
                    pd.DataFrame: Filtered dataframe
         | 
| 275 | 
            +
                """
         | 
| 276 | 
            +
             | 
| 277 | 
            +
                title_font = "Arial"
         | 
| 278 | 
            +
                body_font = "Arial"
         | 
| 279 | 
            +
                title_size = 32
         | 
| 280 | 
            +
                colors = ["red", "green", "blue"]
         | 
| 281 | 
            +
                interpretation = False
         | 
| 282 | 
            +
                extract_docx = False
         | 
| 283 | 
            +
                title = "My Chart"
         | 
| 284 | 
            +
                regex = ".*"
         | 
| 285 | 
            +
                img_path = 'default_image.png'
         | 
| 286 | 
            +
             | 
| 287 | 
            +
             | 
| 288 | 
            +
                #try:
         | 
| 289 | 
            +
                #    modify = st.checkbox("Add filters on raw data")
         | 
| 290 | 
            +
                #except:
         | 
| 291 | 
            +
                #    try:
         | 
| 292 | 
            +
                #        modify = st.checkbox("Add filters on processed data")
         | 
| 293 | 
            +
                #    except:
         | 
| 294 | 
            +
                #        try:
         | 
| 295 | 
            +
                #            modify = st.checkbox("Add filters on parsed data")
         | 
| 296 | 
            +
                #        except:
         | 
| 297 | 
            +
                #            pass
         | 
| 298 | 
            +
             | 
| 299 | 
            +
                #if not modify:
         | 
| 300 | 
            +
                #    return df
         | 
| 301 | 
            +
             | 
| 302 | 
            +
                df_ = df.copy()
         | 
| 303 | 
            +
                # Try to convert datetimes into a standard format (datetime, no timezone)
         | 
| 304 | 
            +
             | 
| 305 | 
            +
            #modification_container = st.container()
         | 
| 306 | 
            +
             | 
| 307 | 
            +
            #with modification_container:
         | 
| 308 | 
            +
                try:
         | 
| 309 | 
            +
                    to_filter_columns = st.multiselect("Filter dataframe on", df_.columns)
         | 
| 310 | 
            +
                except:
         | 
| 311 | 
            +
                    try:
         | 
| 312 | 
            +
                        to_filter_columns = st.multiselect("Filter dataframe", df_.columns)
         | 
| 313 | 
            +
                    except:
         | 
| 314 | 
            +
                        try:
         | 
| 315 | 
            +
                            to_filter_columns = st.multiselect("Filter the dataframe on", df_.columns)
         | 
| 316 | 
            +
                        except:
         | 
| 317 | 
            +
                            pass
         | 
| 318 | 
            +
             | 
| 319 | 
            +
                date_column = None
         | 
| 320 | 
            +
                filtered_columns = []
         | 
| 321 | 
            +
             | 
| 322 | 
            +
                for column in to_filter_columns:
         | 
| 323 | 
            +
                    left, right = st.columns((1, 20))
         | 
| 324 | 
            +
                    # Treat columns with < 200 unique values as categorical if not date or numeric
         | 
| 325 | 
            +
                    if is_categorical_dtype(df_[column]) or (df_[column].nunique() < 120 and not is_datetime64_any_dtype(df_[column]) and not is_numeric_dtype(df_[column])):
         | 
| 326 | 
            +
                        user_cat_input = right.multiselect(
         | 
| 327 | 
            +
                            f"Values for {column}",
         | 
| 328 | 
            +
                            df_[column].value_counts().index.tolist(),
         | 
| 329 | 
            +
                            default=list(df_[column].value_counts().index)
         | 
| 330 | 
            +
                        )
         | 
| 331 | 
            +
                        df_ = df_[df_[column].isin(user_cat_input)]
         | 
| 332 | 
            +
                        filtered_columns.append(column)
         | 
| 333 | 
            +
             | 
| 334 | 
            +
                        with st.status(f"Category Distribution: {column}", expanded=False) as stat:
         | 
| 335 | 
            +
                            st.pyplot(plot_treemap(df_, column))
         | 
| 336 | 
            +
             | 
| 337 | 
            +
                    elif is_numeric_dtype(df_[column]):
         | 
| 338 | 
            +
                        _min = float(df_[column].min())
         | 
| 339 | 
            +
                        _max = float(df_[column].max())
         | 
| 340 | 
            +
                        step = (_max - _min) / 100
         | 
| 341 | 
            +
                        user_num_input = right.slider(
         | 
| 342 | 
            +
                            f"Values for {column}",
         | 
| 343 | 
            +
                            min_value=_min,
         | 
| 344 | 
            +
                            max_value=_max,
         | 
| 345 | 
            +
                            value=(_min, _max),
         | 
| 346 | 
            +
                            step=step,
         | 
| 347 | 
            +
                        )
         | 
| 348 | 
            +
                        df_ = df_[df_[column].between(*user_num_input)]
         | 
| 349 | 
            +
                        filtered_columns.append(column)
         | 
| 350 | 
            +
             | 
| 351 | 
            +
                        # Chart_GPT = ChartGPT(df_, title_font, body_font, title_size,
         | 
| 352 | 
            +
                        #      colors, interpretation, extract_docx, img_path)
         | 
| 353 | 
            +
             | 
| 354 | 
            +
                        with st.status(f"Numerical Distribution: {column}", expanded=False) as stat_:
         | 
| 355 | 
            +
                            st.pyplot(plot_hist(df_, column, bins=int(round(len(df_[column].unique())-1)/2)))
         | 
| 356 | 
            +
             | 
| 357 | 
            +
                    elif is_object_dtype(df_[column]):
         | 
| 358 | 
            +
                        try:
         | 
| 359 | 
            +
                            df_[column] = pd.to_datetime(df_[column], infer_datetime_format=True, errors='coerce')
         | 
| 360 | 
            +
                        except Exception:
         | 
| 361 | 
            +
                            try:
         | 
| 362 | 
            +
                                df_[column] = df_[column].apply(parser.parse)
         | 
| 363 | 
            +
                            except Exception:
         | 
| 364 | 
            +
                                pass
         | 
| 365 | 
            +
             | 
| 366 | 
            +
                        if is_datetime64_any_dtype(df_[column]):
         | 
| 367 | 
            +
                            df_[column] = df_[column].dt.tz_localize(None)
         | 
| 368 | 
            +
                            min_date = df_[column].min().date()
         | 
| 369 | 
            +
                            max_date = df_[column].max().date()
         | 
| 370 | 
            +
                            user_date_input = right.date_input(
         | 
| 371 | 
            +
                                f"Values for {column}",
         | 
| 372 | 
            +
                                value=(min_date, max_date),
         | 
| 373 | 
            +
                                min_value=min_date,
         | 
| 374 | 
            +
                                max_value=max_date,
         | 
| 375 | 
            +
                            )
         | 
| 376 | 
            +
                            # if len(user_date_input) == 2:
         | 
| 377 | 
            +
                            #     start_date, end_date = user_date_input
         | 
| 378 | 
            +
                            #     df_ = df_.loc[df_[column].dt.date.between(start_date, end_date)]
         | 
| 379 | 
            +
                            if len(user_date_input) == 2:
         | 
| 380 | 
            +
                                user_date_input = tuple(map(pd.to_datetime, user_date_input))
         | 
| 381 | 
            +
                                start_date, end_date = user_date_input
         | 
| 382 | 
            +
                                df_ = df_.loc[df_[column].between(start_date, end_date)]
         | 
| 383 | 
            +
             | 
| 384 | 
            +
                            date_column = column
         | 
| 385 | 
            +
             | 
| 386 | 
            +
                            if date_column and filtered_columns:
         | 
| 387 | 
            +
                                numeric_columns = [col for col in filtered_columns if is_numeric_dtype(df_[col])]
         | 
| 388 | 
            +
                                if numeric_columns:
         | 
| 389 | 
            +
                                    fig = plot_line(df_, date_column, numeric_columns)
         | 
| 390 | 
            +
                                    #st.pyplot(fig)
         | 
| 391 | 
            +
                                # now to deal with categorical columns
         | 
| 392 | 
            +
                                categorical_columns = [col for col in filtered_columns if is_categorical_dtype(df_[col])]
         | 
| 393 | 
            +
                                if categorical_columns:
         | 
| 394 | 
            +
                                    fig2 = plot_bar(df_, date_column, categorical_columns[0])
         | 
| 395 | 
            +
                                    #st.pyplot(fig2)
         | 
| 396 | 
            +
                                with st.status(f"Date Distribution: {column}", expanded=False) as stat:
         | 
| 397 | 
            +
                                    try:
         | 
| 398 | 
            +
                                        st.pyplot(fig)
         | 
| 399 | 
            +
                                    except Exception as e:
         | 
| 400 | 
            +
                                        st.error(f"Error plotting line chart: {e}")
         | 
| 401 | 
            +
                                        pass
         | 
| 402 | 
            +
                                    try:
         | 
| 403 | 
            +
                                        st.pyplot(fig2)
         | 
| 404 | 
            +
                                    except Exception as e:
         | 
| 405 | 
            +
                                        st.error(f"Error plotting bar chart: {e}")
         | 
| 406 | 
            +
             | 
| 407 | 
            +
             | 
| 408 | 
            +
                    else:
         | 
| 409 | 
            +
                        user_text_input = right.text_input(
         | 
| 410 | 
            +
                            f"Substring or regex in {column}",
         | 
| 411 | 
            +
                        )
         | 
| 412 | 
            +
                        if user_text_input:
         | 
| 413 | 
            +
                            df_ = df_[df_[column].astype(str).str.contains(user_text_input)]
         | 
| 414 | 
            +
                # write len of df after filtering with % of original
         | 
| 415 | 
            +
                st.write(f"{len(df_)} rows ({len(df_) / len(df) * 100:.2f}%)")
         | 
| 416 | 
            +
                return df_
         | 
| 417 | 
            +
             | 
| 418 | 
            +
            def find_lat_lon_columns(df):
         | 
| 419 | 
            +
                lat_columns = df.columns[df.columns.str.lower().str.contains('lat')]
         | 
| 420 | 
            +
                lon_columns = df.columns[df.columns.str.lower().str.contains('lon|lng')]
         | 
| 421 | 
            +
                
         | 
| 422 | 
            +
                if len(lat_columns) > 0 and len(lon_columns) > 0:
         | 
| 423 | 
            +
                    return lat_columns[0], lon_columns[0]
         | 
| 424 | 
            +
                else:
         | 
| 425 | 
            +
                    return None, None
         | 
| 426 | 
            +
             | 
| 427 | 
            +
            my_dataset = st.file_uploader("Upload Parsed DataFrame", type=["csv", "xlsx"])
         | 
| 428 | 
            +
            map_1 = KeplerGl(height=800)
         | 
| 429 | 
            +
            powerplant = pd.read_csv('global_power_plant_database.csv')
         | 
| 430 | 
            +
            secret_bases = pd.read_csv('secret_bases.csv')
         | 
| 431 | 
            +
             | 
| 432 | 
            +
            map_1.add_data(
         | 
| 433 | 
            +
                        data=secret_bases, name="secret_bases"
         | 
| 434 | 
            +
                    )
         | 
| 435 | 
            +
            map_1.add_data(
         | 
| 436 | 
            +
                    data=powerplant, name='nuclear_powerplants'
         | 
| 437 | 
            +
                    )
         | 
| 438 | 
            +
             | 
| 439 | 
            +
             | 
| 440 | 
            +
            if my_dataset is not None :
         | 
| 441 | 
            +
                try:
         | 
| 442 | 
            +
                    if my_dataset.type == "text/csv":
         | 
| 443 | 
            +
                        data = pd.read_csv(my_dataset)
         | 
| 444 | 
            +
                    elif my_dataset.type == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":
         | 
| 445 | 
            +
                        data = pd.read_excel(my_dataset)
         | 
| 446 | 
            +
                    else:
         | 
| 447 | 
            +
                        st.error("Unsupported file type. Please upload a CSV, Excel or HD5 file.")
         | 
| 448 | 
            +
                        st.stop()
         | 
| 449 | 
            +
                    parser = filter_dataframe(data)
         | 
| 450 | 
            +
                    st.session_state['parsed_responses'] = parser
         | 
| 451 | 
            +
                    st.dataframe(parser)
         | 
| 452 | 
            +
                    st.success(f"Successfully loaded and displayed data from {my_dataset.name}")
         | 
| 453 | 
            +
                    #h3_hex_id_df = pd.read_csv("keplergl/h3_data.csv")
         | 
| 454 | 
            +
                    st.session_state['data_loaded'] = True
         | 
| 455 | 
            +
                    # Load the base config
         | 
| 456 | 
            +
                    with open('military_config.kgl', 'r') as f:
         | 
| 457 | 
            +
                        base_config = json.load(f)
         | 
| 458 | 
            +
             | 
| 459 | 
            +
                    with open('uap_config.kgl', 'r') as f:
         | 
| 460 | 
            +
                        uap_config = json.load(f)
         | 
| 461 | 
            +
             | 
| 462 | 
            +
                    if parser.columns.str.contains('date').any():
         | 
| 463 | 
            +
                        # Get the date column name
         | 
| 464 | 
            +
                        date_column = parser.columns[parser.columns.str.contains('date')].values[0]
         | 
| 465 | 
            +
             | 
| 466 | 
            +
                        # Create a new filter
         | 
| 467 | 
            +
                        new_filter = {
         | 
| 468 | 
            +
                            "dataId": "uap_sightings",
         | 
| 469 | 
            +
                            "name": date_column
         | 
| 470 | 
            +
                        }
         | 
| 471 | 
            +
             | 
| 472 | 
            +
                        # Append the new filter to the existing filters
         | 
| 473 | 
            +
                        base_config['config']['visState']['filters'].append(new_filter)
         | 
| 474 | 
            +
             | 
| 475 | 
            +
                        # Update the map config
         | 
| 476 | 
            +
                        map_1.config = base_config
         | 
| 477 | 
            +
             | 
| 478 | 
            +
                    map_1.add_data(
         | 
| 479 | 
            +
                        data=parser, name="uap_sightings"
         | 
| 480 | 
            +
                        )
         | 
| 481 | 
            +
                    
         | 
| 482 | 
            +
                    # Find the latitude and longitude columns in the dataframe
         | 
| 483 | 
            +
                    lat_col, lon_col = find_lat_lon_columns(parser)
         | 
| 484 | 
            +
             | 
| 485 | 
            +
                    if lat_col and lon_col:
         | 
| 486 | 
            +
                        # Update the layer configurations
         | 
| 487 | 
            +
                        for layer in uap_config['config']['visState']['layers']:
         | 
| 488 | 
            +
                            if 'config' in layer and 'columns' in layer['config']:
         | 
| 489 | 
            +
                                if 'lat' in layer['config']['columns']:
         | 
| 490 | 
            +
                                    layer['config']['columns']['lat'] = lat_col
         | 
| 491 | 
            +
                                if 'lng' in layer['config']['columns']:
         | 
| 492 | 
            +
                                    layer['config']['columns']['lng'] = lon_col
         | 
| 493 | 
            +
             | 
| 494 | 
            +
                        # Now extend the base_config with the updated uap_config layers
         | 
| 495 | 
            +
                        base_config['config']['visState']['layers'].extend(uap_config['config']['visState']['layers'])
         | 
| 496 | 
            +
                        map_1.config = base_config
         | 
| 497 | 
            +
                    else:
         | 
| 498 | 
            +
                        base_config['config']['visState']['layers'].extend([layer for layer in uap_config['config']['visState']['layers']])
         | 
| 499 | 
            +
                        map_1.config = base_config
         | 
| 500 | 
            +
                    
         | 
| 501 | 
            +
                    keplergl_static(map_1, center_map=True)
         | 
| 502 | 
            +
                    st.session_state['map_generated'] = True
         | 
| 503 | 
            +
                except Exception as e:
         | 
| 504 | 
            +
                    st.error(f"An error occurred while reading the file: {e}")
         | 
| 505 | 
            +
            else:
         | 
| 506 | 
            +
                st.warning("Please upload a file to get started.")
         | 
    	
        military_config.kgl
    ADDED
    
    | @@ -0,0 +1,264 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "version": "v1",
         | 
| 3 | 
            +
              "config": {
         | 
| 4 | 
            +
                "visState": {
         | 
| 5 | 
            +
                  "filters": [
         | 
| 6 | 
            +
                    {
         | 
| 7 | 
            +
                      "dataId": [
         | 
| 8 | 
            +
                        "nuclear_powerplants"
         | 
| 9 | 
            +
                      ],
         | 
| 10 | 
            +
                      "id": "c40zwvx0v",
         | 
| 11 | 
            +
                      "name": [
         | 
| 12 | 
            +
                        "primary_fuel"
         | 
| 13 | 
            +
                      ],
         | 
| 14 | 
            +
                      "type": "multiSelect",
         | 
| 15 | 
            +
                      "value": [
         | 
| 16 | 
            +
                        "Nuclear"
         | 
| 17 | 
            +
                      ],
         | 
| 18 | 
            +
                      "enlarged": false,
         | 
| 19 | 
            +
                      "plotType": "histogram",
         | 
| 20 | 
            +
                      "animationWindow": "free",
         | 
| 21 | 
            +
                      "yAxis": null,
         | 
| 22 | 
            +
                      "speed": 1
         | 
| 23 | 
            +
                    }
         | 
| 24 | 
            +
                  ],
         | 
| 25 | 
            +
                  "layers": [
         | 
| 26 | 
            +
                    {
         | 
| 27 | 
            +
                      "id": "k1xxw47",
         | 
| 28 | 
            +
                      "type": "icon",
         | 
| 29 | 
            +
                      "config": {
         | 
| 30 | 
            +
                        "dataId": "secret_bases",
         | 
| 31 | 
            +
                        "label": "Underground Bases",
         | 
| 32 | 
            +
                        "color": [
         | 
| 33 | 
            +
                          210,
         | 
| 34 | 
            +
                          0,
         | 
| 35 | 
            +
                          0
         | 
| 36 | 
            +
                        ],
         | 
| 37 | 
            +
                        "highlightColor": [
         | 
| 38 | 
            +
                          252,
         | 
| 39 | 
            +
                          242,
         | 
| 40 | 
            +
                          26,
         | 
| 41 | 
            +
                          255
         | 
| 42 | 
            +
                        ],
         | 
| 43 | 
            +
                        "columns": {
         | 
| 44 | 
            +
                          "lat": "latitude",
         | 
| 45 | 
            +
                          "lng": "longitude",
         | 
| 46 | 
            +
                          "icon": "icon"
         | 
| 47 | 
            +
                        },
         | 
| 48 | 
            +
                        "isVisible": true,
         | 
| 49 | 
            +
                        "visConfig": {
         | 
| 50 | 
            +
                          "radius": 35.9,
         | 
| 51 | 
            +
                          "fixedRadius": false,
         | 
| 52 | 
            +
                          "opacity": 0.8,
         | 
| 53 | 
            +
                          "colorRange": {
         | 
| 54 | 
            +
                            "name": "Global Warming",
         | 
| 55 | 
            +
                            "type": "sequential",
         | 
| 56 | 
            +
                            "category": "Uber",
         | 
| 57 | 
            +
                            "colors": [
         | 
| 58 | 
            +
                              "#5A1846",
         | 
| 59 | 
            +
                              "#900C3F",
         | 
| 60 | 
            +
                              "#C70039",
         | 
| 61 | 
            +
                              "#E3611C",
         | 
| 62 | 
            +
                              "#F1920E",
         | 
| 63 | 
            +
                              "#FFC300"
         | 
| 64 | 
            +
                            ]
         | 
| 65 | 
            +
                          },
         | 
| 66 | 
            +
                          "radiusRange": [
         | 
| 67 | 
            +
                            0,
         | 
| 68 | 
            +
                            50
         | 
| 69 | 
            +
                          ]
         | 
| 70 | 
            +
                        },
         | 
| 71 | 
            +
                        "hidden": false,
         | 
| 72 | 
            +
                        "textLabel": [
         | 
| 73 | 
            +
                          {
         | 
| 74 | 
            +
                            "field": null,
         | 
| 75 | 
            +
                            "color": [
         | 
| 76 | 
            +
                              255,
         | 
| 77 | 
            +
                              255,
         | 
| 78 | 
            +
                              255
         | 
| 79 | 
            +
                            ],
         | 
| 80 | 
            +
                            "size": 18,
         | 
| 81 | 
            +
                            "offset": [
         | 
| 82 | 
            +
                              0,
         | 
| 83 | 
            +
                              0
         | 
| 84 | 
            +
                            ],
         | 
| 85 | 
            +
                            "anchor": "start",
         | 
| 86 | 
            +
                            "alignment": "center"
         | 
| 87 | 
            +
                          }
         | 
| 88 | 
            +
                        ]
         | 
| 89 | 
            +
                      },
         | 
| 90 | 
            +
                      "visualChannels": {
         | 
| 91 | 
            +
                        "colorField": null,
         | 
| 92 | 
            +
                        "colorScale": "quantile",
         | 
| 93 | 
            +
                        "sizeField": null,
         | 
| 94 | 
            +
                        "sizeScale": "linear"
         | 
| 95 | 
            +
                      }
         | 
| 96 | 
            +
                    },
         | 
| 97 | 
            +
                    {
         | 
| 98 | 
            +
                      "id": "i53syw",
         | 
| 99 | 
            +
                      "type": "icon",
         | 
| 100 | 
            +
                      "config": {
         | 
| 101 | 
            +
                        "dataId": "nuclear_powerplants",
         | 
| 102 | 
            +
                        "label": "Nuclear Facilities",
         | 
| 103 | 
            +
                        "color": [
         | 
| 104 | 
            +
                          253,
         | 
| 105 | 
            +
                          167,
         | 
| 106 | 
            +
                          0
         | 
| 107 | 
            +
                        ],
         | 
| 108 | 
            +
                        "highlightColor": [
         | 
| 109 | 
            +
                          252,
         | 
| 110 | 
            +
                          242,
         | 
| 111 | 
            +
                          26,
         | 
| 112 | 
            +
                          255
         | 
| 113 | 
            +
                        ],
         | 
| 114 | 
            +
                        "columns": {
         | 
| 115 | 
            +
                          "lat": "latitude",
         | 
| 116 | 
            +
                          "lng": "longitude",
         | 
| 117 | 
            +
                          "icon": "icon"
         | 
| 118 | 
            +
                        },
         | 
| 119 | 
            +
                        "isVisible": true,
         | 
| 120 | 
            +
                        "visConfig": {
         | 
| 121 | 
            +
                          "radius": 29.1,
         | 
| 122 | 
            +
                          "fixedRadius": false,
         | 
| 123 | 
            +
                          "opacity": 0.8,
         | 
| 124 | 
            +
                          "colorRange": {
         | 
| 125 | 
            +
                            "name": "Global Warming",
         | 
| 126 | 
            +
                            "type": "sequential",
         | 
| 127 | 
            +
                            "category": "Uber",
         | 
| 128 | 
            +
                            "colors": [
         | 
| 129 | 
            +
                              "#5A1846",
         | 
| 130 | 
            +
                              "#900C3F",
         | 
| 131 | 
            +
                              "#C70039",
         | 
| 132 | 
            +
                              "#E3611C",
         | 
| 133 | 
            +
                              "#F1920E",
         | 
| 134 | 
            +
                              "#FFC300"
         | 
| 135 | 
            +
                            ]
         | 
| 136 | 
            +
                          },
         | 
| 137 | 
            +
                          "radiusRange": [
         | 
| 138 | 
            +
                            0,
         | 
| 139 | 
            +
                            50
         | 
| 140 | 
            +
                          ]
         | 
| 141 | 
            +
                        },
         | 
| 142 | 
            +
                        "hidden": false,
         | 
| 143 | 
            +
                        "textLabel": [
         | 
| 144 | 
            +
                          {
         | 
| 145 | 
            +
                            "field": null,
         | 
| 146 | 
            +
                            "color": [
         | 
| 147 | 
            +
                              255,
         | 
| 148 | 
            +
                              255,
         | 
| 149 | 
            +
                              255
         | 
| 150 | 
            +
                            ],
         | 
| 151 | 
            +
                            "size": 18,
         | 
| 152 | 
            +
                            "offset": [
         | 
| 153 | 
            +
                              0,
         | 
| 154 | 
            +
                              0
         | 
| 155 | 
            +
                            ],
         | 
| 156 | 
            +
                            "anchor": "start",
         | 
| 157 | 
            +
                            "alignment": "center"
         | 
| 158 | 
            +
                          }
         | 
| 159 | 
            +
                        ]
         | 
| 160 | 
            +
                      },
         | 
| 161 | 
            +
                      "visualChannels": {
         | 
| 162 | 
            +
                        "colorField": null,
         | 
| 163 | 
            +
                        "colorScale": "quantile",
         | 
| 164 | 
            +
                        "sizeField": null,
         | 
| 165 | 
            +
                        "sizeScale": "linear"
         | 
| 166 | 
            +
                      }
         | 
| 167 | 
            +
                    }
         | 
| 168 | 
            +
                  ],
         | 
| 169 | 
            +
                  "interactionConfig": {
         | 
| 170 | 
            +
                    "tooltip": {
         | 
| 171 | 
            +
                      "fieldsToShow": {
         | 
| 172 | 
            +
                        "qw5zqkhrp": [
         | 
| 173 | 
            +
                          {
         | 
| 174 | 
            +
                            "name": "0",
         | 
| 175 | 
            +
                            "format": null
         | 
| 176 | 
            +
                          },
         | 
| 177 | 
            +
                          {
         | 
| 178 | 
            +
                            "name": "country",
         | 
| 179 | 
            +
                            "format": null
         | 
| 180 | 
            +
                          },
         | 
| 181 | 
            +
                          {
         | 
| 182 | 
            +
                            "name": "country_long",
         | 
| 183 | 
            +
                            "format": null
         | 
| 184 | 
            +
                          },
         | 
| 185 | 
            +
                          {
         | 
| 186 | 
            +
                            "name": "name",
         | 
| 187 | 
            +
                            "format": null
         | 
| 188 | 
            +
                          },
         | 
| 189 | 
            +
                          {
         | 
| 190 | 
            +
                            "name": "gppd_idnr",
         | 
| 191 | 
            +
                            "format": null
         | 
| 192 | 
            +
                          }
         | 
| 193 | 
            +
                        ],
         | 
| 194 | 
            +
                        "hmakkovr9": [
         | 
| 195 | 
            +
                          {
         | 
| 196 | 
            +
                            "name": "0",
         | 
| 197 | 
            +
                            "format": null
         | 
| 198 | 
            +
                          },
         | 
| 199 | 
            +
                          {
         | 
| 200 | 
            +
                            "name": "id",
         | 
| 201 | 
            +
                            "format": null
         | 
| 202 | 
            +
                          },
         | 
| 203 | 
            +
                          {
         | 
| 204 | 
            +
                            "name": "name",
         | 
| 205 | 
            +
                            "format": null
         | 
| 206 | 
            +
                          },
         | 
| 207 | 
            +
                          {
         | 
| 208 | 
            +
                            "name": "icon",
         | 
| 209 | 
            +
                            "format": null
         | 
| 210 | 
            +
                          }
         | 
| 211 | 
            +
                        ]
         | 
| 212 | 
            +
                      },
         | 
| 213 | 
            +
                      "compareMode": false,
         | 
| 214 | 
            +
                      "compareType": "absolute",
         | 
| 215 | 
            +
                      "enabled": true
         | 
| 216 | 
            +
                    },
         | 
| 217 | 
            +
                    "brush": {
         | 
| 218 | 
            +
                      "size": 0.5,
         | 
| 219 | 
            +
                      "enabled": false
         | 
| 220 | 
            +
                    },
         | 
| 221 | 
            +
                    "geocoder": {
         | 
| 222 | 
            +
                      "enabled": false
         | 
| 223 | 
            +
                    },
         | 
| 224 | 
            +
                    "coordinate": {
         | 
| 225 | 
            +
                      "enabled": false
         | 
| 226 | 
            +
                    }
         | 
| 227 | 
            +
                  },
         | 
| 228 | 
            +
                  "layerBlending": "normal",
         | 
| 229 | 
            +
                  "splitMaps": [],
         | 
| 230 | 
            +
                  "animationConfig": {
         | 
| 231 | 
            +
                    "currentTime": null,
         | 
| 232 | 
            +
                    "speed": 1
         | 
| 233 | 
            +
                  }
         | 
| 234 | 
            +
                },
         | 
| 235 | 
            +
                "mapState": {
         | 
| 236 | 
            +
                  "bearing": 0,
         | 
| 237 | 
            +
                  "dragRotate": false,
         | 
| 238 | 
            +
                  "latitude": 34.502289455408366,
         | 
| 239 | 
            +
                  "longitude": -27.82946603675378,
         | 
| 240 | 
            +
                  "pitch": 0,
         | 
| 241 | 
            +
                  "zoom": 2.745704196646382,
         | 
| 242 | 
            +
                  "isSplit": false
         | 
| 243 | 
            +
                },
         | 
| 244 | 
            +
                "mapStyle": {
         | 
| 245 | 
            +
                  "styleType": "dark",
         | 
| 246 | 
            +
                  "topLayerGroups": {},
         | 
| 247 | 
            +
                  "visibleLayerGroups": {
         | 
| 248 | 
            +
                    "label": true,
         | 
| 249 | 
            +
                    "road": true,
         | 
| 250 | 
            +
                    "border": false,
         | 
| 251 | 
            +
                    "building": true,
         | 
| 252 | 
            +
                    "water": true,
         | 
| 253 | 
            +
                    "land": true,
         | 
| 254 | 
            +
                    "3d building": false
         | 
| 255 | 
            +
                  },
         | 
| 256 | 
            +
                  "threeDBuildingColor": [
         | 
| 257 | 
            +
                    9.665468314072013,
         | 
| 258 | 
            +
                    17.18305478057247,
         | 
| 259 | 
            +
                    31.1442867897876
         | 
| 260 | 
            +
                  ],
         | 
| 261 | 
            +
                  "mapStyles": {}
         | 
| 262 | 
            +
                }
         | 
| 263 | 
            +
              }
         | 
| 264 | 
            +
            }
         | 
    	
        navigation.py
    ADDED
    
    | @@ -0,0 +1,27 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import streamlit as st
         | 
| 2 | 
            +
            from st_paywall import add_auth
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            # add_auth(required=False)
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            # st.write(st.session_state.email)
         | 
| 7 | 
            +
            # st.write(st.session_state.user_subscribed)
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            # if "buttons" in st.session_state:
         | 
| 10 | 
            +
            #    st.session_state.buttons = st.session_state.buttons
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            st.set_page_config(
         | 
| 13 | 
            +
                page_title="UAP Analytics",
         | 
| 14 | 
            +
                page_icon="🛸",
         | 
| 15 | 
            +
                layout="wide",
         | 
| 16 | 
            +
                initial_sidebar_state="expanded",
         | 
| 17 | 
            +
            )
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            pg = st.navigation([
         | 
| 20 | 
            +
                        st.Page("rag_search.py", title="Smart-Search (Retrieval Augmented Generations)", icon="🔍"),
         | 
| 21 | 
            +
                        st.Page("parsing.py", title="UAP Feature Extraction (Shape, Speed, Color)", icon="📄"),
         | 
| 22 | 
            +
                        st.Page("analyzing.py", title="Statistical Analysis (UMAP+HDBSCAN, XGBoost, V-Cramer)", icon="🧠"),
         | 
| 23 | 
            +
                        st.Page("magnetic.py", title="Magnetic Anomaly Detection (InterMagnet Stations)", icon="🧲"),
         | 
| 24 | 
            +
                        st.Page("map.py", title="Interactive Map", icon="🗺️"),
         | 
| 25 | 
            +
                    ])
         | 
| 26 | 
            +
             | 
| 27 | 
            +
            pg.run()
         | 
    	
        parsing.py
    ADDED
    
    | @@ -0,0 +1,678 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import streamlit as st
         | 
| 2 | 
            +
            import pandas as pd
         | 
| 3 | 
            +
            import numpy as np
         | 
| 4 | 
            +
            import matplotlib.pyplot as plt
         | 
| 5 | 
            +
            import seaborn as sns
         | 
| 6 | 
            +
            from uap_analyzer import UAPParser, UAPAnalyzer, UAPVisualizer
         | 
| 7 | 
            +
            # import ChartGen
         | 
| 8 | 
            +
            # from ChartGen import ChartGPT
         | 
| 9 | 
            +
            from Levenshtein import distance
         | 
| 10 | 
            +
            from sklearn.model_selection import train_test_split
         | 
| 11 | 
            +
            from sklearn.metrics import confusion_matrix
         | 
| 12 | 
            +
            from stqdm import stqdm
         | 
| 13 | 
            +
            stqdm.pandas()
         | 
| 14 | 
            +
            import streamlit.components.v1 as components
         | 
| 15 | 
            +
            from dateutil import parser
         | 
| 16 | 
            +
            from sentence_transformers import SentenceTransformer
         | 
| 17 | 
            +
            import torch
         | 
| 18 | 
            +
            import squarify
         | 
| 19 | 
            +
            import matplotlib.colors as mcolors
         | 
| 20 | 
            +
            import textwrap
         | 
| 21 | 
            +
            import datamapplot
         | 
| 22 | 
            +
            import openai
         | 
| 23 | 
            +
            from openai import OpenAI
         | 
| 24 | 
            +
            import os
         | 
| 25 | 
            +
            import json
         | 
| 26 | 
            +
            # this is a test comment
         | 
| 27 | 
            +
            import plotly.graph_objects as go
         | 
| 28 | 
            +
             | 
| 29 | 
            +
            st.set_option('deprecation.showPyplotGlobalUse', False)
         | 
| 30 | 
            +
             | 
| 31 | 
            +
            from pandas.api.types import (
         | 
| 32 | 
            +
                is_categorical_dtype,
         | 
| 33 | 
            +
                is_datetime64_any_dtype,
         | 
| 34 | 
            +
                is_numeric_dtype,
         | 
| 35 | 
            +
                is_object_dtype,
         | 
| 36 | 
            +
            )
         | 
| 37 | 
            +
             | 
| 38 | 
            +
             | 
| 39 | 
            +
             | 
| 40 | 
            +
            def load_data(file_path, key='df'):
         | 
| 41 | 
            +
                return pd.read_hdf(file_path, key=key)
         | 
| 42 | 
            +
             | 
| 43 | 
            +
             | 
| 44 | 
            +
            def gemini_query(question, selected_data, gemini_key):
         | 
| 45 | 
            +
             | 
| 46 | 
            +
                if question == "":
         | 
| 47 | 
            +
                    question = "Summarize the following data in relevant bullet points"
         | 
| 48 | 
            +
             | 
| 49 | 
            +
                import pathlib
         | 
| 50 | 
            +
                import textwrap
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                import google.generativeai as genai
         | 
| 53 | 
            +
             | 
| 54 | 
            +
                from IPython.display import display
         | 
| 55 | 
            +
                from IPython.display import Markdown
         | 
| 56 | 
            +
             | 
| 57 | 
            +
             | 
| 58 | 
            +
                def to_markdown(text):
         | 
| 59 | 
            +
                    text = text.replace('•', '  *')
         | 
| 60 | 
            +
                    return Markdown(textwrap.indent(text, '> ', predicate=lambda _: True))
         | 
| 61 | 
            +
                
         | 
| 62 | 
            +
                # selected_data is a list
         | 
| 63 | 
            +
                # remove empty
         | 
| 64 | 
            +
             | 
| 65 | 
            +
                filtered = [str(x) for x in selected_data if str(x) != '' and x is not None]
         | 
| 66 | 
            +
                # make a string
         | 
| 67 | 
            +
                context = '\n'.join(filtered)
         | 
| 68 | 
            +
             | 
| 69 | 
            +
                genai.configure(api_key=gemini_key)
         | 
| 70 | 
            +
                query_model = genai.GenerativeModel('models/gemini-1.5-pro-latest')
         | 
| 71 | 
            +
                response = query_model.generate_content([f"{question}\n Answer based on this context: {context}\n\n"])
         | 
| 72 | 
            +
                return(response.text)
         | 
| 73 | 
            +
             | 
| 74 | 
            +
            def plot_treemap(df, column, top_n=32):
         | 
| 75 | 
            +
                    # Get the value counts and the top N labels
         | 
| 76 | 
            +
                    value_counts = df[column].value_counts()
         | 
| 77 | 
            +
                    top_labels = value_counts.iloc[:top_n].index
         | 
| 78 | 
            +
                    
         | 
| 79 | 
            +
                    # Use np.where to replace all values not in the top N with 'Other'
         | 
| 80 | 
            +
                    revised_column = f'{column}_revised'
         | 
| 81 | 
            +
                    df[revised_column] = np.where(df[column].isin(top_labels), df[column], 'Other')
         | 
| 82 | 
            +
             | 
| 83 | 
            +
                    # Get the value counts including the 'Other' category
         | 
| 84 | 
            +
                    sizes = df[revised_column].value_counts().values
         | 
| 85 | 
            +
                    labels = df[revised_column].value_counts().index
         | 
| 86 | 
            +
             | 
| 87 | 
            +
                    # Get a gradient of colors
         | 
| 88 | 
            +
                    # colors = list(mcolors.TABLEAU_COLORS.values())
         | 
| 89 | 
            +
             | 
| 90 | 
            +
                    n_colors = len(sizes)
         | 
| 91 | 
            +
                    colors = plt.cm.Oranges(np.linspace(0.3, 0.9, n_colors))[::-1]
         | 
| 92 | 
            +
             | 
| 93 | 
            +
             | 
| 94 | 
            +
                    # Get % of each category
         | 
| 95 | 
            +
                    percents = sizes / sizes.sum()
         | 
| 96 | 
            +
             | 
| 97 | 
            +
                    # Prepare labels with percentages
         | 
| 98 | 
            +
                    labels = [f'{label}\n {percent:.1%}' for label, percent in zip(labels, percents)]
         | 
| 99 | 
            +
             | 
| 100 | 
            +
                    fig, ax = plt.subplots(figsize=(20, 12))
         | 
| 101 | 
            +
             | 
| 102 | 
            +
                    # Plot the treemap
         | 
| 103 | 
            +
                    squarify.plot(sizes=sizes, label=labels, alpha=0.7, pad=True, color=colors, text_kwargs={'fontsize': 10})
         | 
| 104 | 
            +
             | 
| 105 | 
            +
                    ax = plt.gca()
         | 
| 106 | 
            +
                    # Iterate over text elements and rectangles (patches) in the axes for color adjustment
         | 
| 107 | 
            +
                    for text, rect in zip(ax.texts, ax.patches):
         | 
| 108 | 
            +
                        background_color = rect.get_facecolor()
         | 
| 109 | 
            +
                        r, g, b, _ = mcolors.to_rgba(background_color)
         | 
| 110 | 
            +
                        brightness = np.average([r, g, b])
         | 
| 111 | 
            +
                        text.set_color('white' if brightness < 0.5 else 'black')
         | 
| 112 | 
            +
             | 
| 113 | 
            +
                        # Adjust font size based on rectangle's area and wrap long text
         | 
| 114 | 
            +
              
         | 
| 115 | 
            +
             | 
| 116 | 
            +
            st.set_option('deprecation.showPyplotGlobalUse', False)
         | 
| 117 | 
            +
             | 
| 118 | 
            +
            from pandas.api.types import (
         | 
| 119 | 
            +
                is_categorical_dtype,
         | 
| 120 | 
            +
                is_datetime64_any_dtype,
         | 
| 121 | 
            +
                is_numeric_dtype,
         | 
| 122 | 
            +
                is_object_dtype,
         | 
| 123 | 
            +
            )
         | 
| 124 | 
            +
             | 
| 125 | 
            +
             | 
| 126 | 
            +
            class CachedUAPParser(UAPParser):
         | 
| 127 | 
            +
                def __init__(self, *args, **kwargs):
         | 
| 128 | 
            +
                    super().__init__(*args, **kwargs)
         | 
| 129 | 
            +
                    if 'parsed_responses' not in st.session_state:
         | 
| 130 | 
            +
                        st.session_state['parsed_responses'] = {}
         | 
| 131 | 
            +
             | 
| 132 | 
            +
                def parse_responses(self):
         | 
| 133 | 
            +
                    parsed_responses = {}
         | 
| 134 | 
            +
                    not_parsed = 0
         | 
| 135 | 
            +
                    try:
         | 
| 136 | 
            +
                        for k, v in self.responses.items():
         | 
| 137 | 
            +
                            try:
         | 
| 138 | 
            +
                                parsed_responses[k] = json.loads(v)
         | 
| 139 | 
            +
                            except:
         | 
| 140 | 
            +
                                try:
         | 
| 141 | 
            +
                                    parsed_responses[k] = json.loads(v.replace("'", '"'))
         | 
| 142 | 
            +
                                except:
         | 
| 143 | 
            +
                                    not_parsed += 1
         | 
| 144 | 
            +
             | 
| 145 | 
            +
                        # Update the cached responses
         | 
| 146 | 
            +
                        st.session_state['parsed_responses'] = parsed_responses
         | 
| 147 | 
            +
                    except Exception as e:
         | 
| 148 | 
            +
                        st.error(f"Error parsing responses: {e}")
         | 
| 149 | 
            +
             
         | 
| 150 | 
            +
                    st.write(f"Number of unparsed responses: {not_parsed}")
         | 
| 151 | 
            +
                    st.write(f"Number of parsed responses: {len(parsed_responses)}")
         | 
| 152 | 
            +
                    return st.session_state['parsed_responses']
         | 
| 153 | 
            +
             | 
| 154 | 
            +
                def responses_to_df(self, col, parsed_responses):
         | 
| 155 | 
            +
                    try:
         | 
| 156 | 
            +
                        parsed_df = pd.DataFrame(parsed_responses).T
         | 
| 157 | 
            +
                        if col is not None:
         | 
| 158 | 
            +
                            parsed_df2 = pd.json_normalize(parsed_df[col])
         | 
| 159 | 
            +
                            parsed_df2.index = parsed_df.index
         | 
| 160 | 
            +
                        else:
         | 
| 161 | 
            +
                            parsed_df2 = pd.json_normalize(parsed_df)
         | 
| 162 | 
            +
                            parsed_df2.index = parsed_df.index
         | 
| 163 | 
            +
                        
         | 
| 164 | 
            +
                        # Convert problematic columns to string
         | 
| 165 | 
            +
                        for column in parsed_df2.columns:
         | 
| 166 | 
            +
                            if parsed_df2[column].dtype == 'object':
         | 
| 167 | 
            +
                                parsed_df2[column] = parsed_df2[column].astype(str)
         | 
| 168 | 
            +
                        
         | 
| 169 | 
            +
                        return parsed_df2
         | 
| 170 | 
            +
                    except Exception as e:
         | 
| 171 | 
            +
                        st.error(f"Error converting responses to DataFrame: {e}")
         | 
| 172 | 
            +
                        return pd.DataFrame()  # Return an empty DataFrame if conversion fails
         | 
| 173 | 
            +
             | 
| 174 | 
            +
             | 
| 175 | 
            +
            def load_data(file_path, key='df'):
         | 
| 176 | 
            +
                return pd.read_hdf(file_path, key=key)
         | 
| 177 | 
            +
             | 
| 178 | 
            +
             | 
| 179 | 
            +
            def gemini_query(question, selected_data, gemini_key):
         | 
| 180 | 
            +
             | 
| 181 | 
            +
                if question == "":
         | 
| 182 | 
            +
                    question = "Summarize the following data in relevant bullet points"
         | 
| 183 | 
            +
             | 
| 184 | 
            +
                import pathlib
         | 
| 185 | 
            +
                import textwrap
         | 
| 186 | 
            +
             | 
| 187 | 
            +
                import google.generativeai as genai
         | 
| 188 | 
            +
             | 
| 189 | 
            +
                from IPython.display import display
         | 
| 190 | 
            +
                from IPython.display import Markdown
         | 
| 191 | 
            +
             | 
| 192 | 
            +
             | 
| 193 | 
            +
                def to_markdown(text):
         | 
| 194 | 
            +
                    text = text.replace('•', '  *')
         | 
| 195 | 
            +
                    return Markdown(textwrap.indent(text, '> ', predicate=lambda _: True))
         | 
| 196 | 
            +
                
         | 
| 197 | 
            +
                # selected_data is a list
         | 
| 198 | 
            +
                # remove empty
         | 
| 199 | 
            +
             | 
| 200 | 
            +
                filtered = [str(x) for x in selected_data if str(x) != '' and x is not None]
         | 
| 201 | 
            +
                # make a string
         | 
| 202 | 
            +
                context = '\n'.join(filtered)
         | 
| 203 | 
            +
             | 
| 204 | 
            +
                genai.configure(api_key=gemini_key)
         | 
| 205 | 
            +
                query_model = genai.GenerativeModel('models/gemini-1.5-pro-latest')
         | 
| 206 | 
            +
                response = query_model.generate_content([f"{question}\n Answer based on this context: {context}\n\n"])
         | 
| 207 | 
            +
                return(response.text)
         | 
| 208 | 
            +
             | 
| 209 | 
            +
             | 
| 210 | 
            +
            def plot_hist(df, column, bins=10, kde=True):
         | 
| 211 | 
            +
                    fig, ax = plt.subplots(figsize=(12, 6))
         | 
| 212 | 
            +
                    sns.histplot(data=df, x=column, kde=True, bins=bins,color='orange')
         | 
| 213 | 
            +
                    # set the ticks and frame in orange
         | 
| 214 | 
            +
                    ax.spines['bottom'].set_color('orange')
         | 
| 215 | 
            +
                    ax.spines['top'].set_color('orange')
         | 
| 216 | 
            +
                    ax.spines['right'].set_color('orange')
         | 
| 217 | 
            +
                    ax.spines['left'].set_color('orange')
         | 
| 218 | 
            +
                    ax.xaxis.label.set_color('orange')
         | 
| 219 | 
            +
                    ax.yaxis.label.set_color('orange')
         | 
| 220 | 
            +
                    ax.tick_params(axis='x', colors='orange')
         | 
| 221 | 
            +
                    ax.tick_params(axis='y', colors='orange')
         | 
| 222 | 
            +
                    ax.title.set_color('orange')
         | 
| 223 | 
            +
             | 
| 224 | 
            +
                    # Set transparent background
         | 
| 225 | 
            +
                    fig.patch.set_alpha(0)
         | 
| 226 | 
            +
                    ax.patch.set_alpha(0)
         | 
| 227 | 
            +
                    return fig
         | 
| 228 | 
            +
             | 
| 229 | 
            +
             | 
| 230 | 
            +
             | 
| 231 | 
            +
            def is_api_key_valid(api_key, model='gpt-3.5-turbo'):
         | 
| 232 | 
            +
                try:
         | 
| 233 | 
            +
                    os.environ['OPENAI_API_KEY'] = api_key
         | 
| 234 | 
            +
                    client = OpenAI()
         | 
| 235 | 
            +
                    response = client.chat.completions.create(
         | 
| 236 | 
            +
                        model=model,
         | 
| 237 | 
            +
                        messages=[{"role": "user", "content": 'Say Hello World!'}])
         | 
| 238 | 
            +
                    text = response.choices[0].message.content
         | 
| 239 | 
            +
                    if len(text) >= 0:
         | 
| 240 | 
            +
                        return True
         | 
| 241 | 
            +
                except Exception as e:
         | 
| 242 | 
            +
                   st.error(f'Error with the API key :{e}')
         | 
| 243 | 
            +
                   return False
         | 
| 244 | 
            +
             | 
| 245 | 
            +
            def download_json(data):
         | 
| 246 | 
            +
                json_str = json.dumps(data, indent=2)
         | 
| 247 | 
            +
                return json_str
         | 
| 248 | 
            +
             | 
| 249 | 
            +
             | 
| 250 | 
            +
            def convert_cached_data_to_df(parser):
         | 
| 251 | 
            +
                if 'parsed_responses' in st.session_state:
         | 
| 252 | 
            +
                    #parser = CachedUAPParser(api_key=API_KEY, model='gpt-3.5-turbo-0125')
         | 
| 253 | 
            +
                    try:
         | 
| 254 | 
            +
                        responses_df = parser.responses_to_df('sightingDetails', st.session_state['parsed_responses'])
         | 
| 255 | 
            +
                    except Exception as e:
         | 
| 256 | 
            +
                        st.warning(f"Error parsing with 'sightingDetails': {e}")
         | 
| 257 | 
            +
                        responses_df = parser.responses_to_df(None, st.session_state['parsed_responses'])
         | 
| 258 | 
            +
                    if not responses_df.empty:
         | 
| 259 | 
            +
                        st.dataframe(responses_df)
         | 
| 260 | 
            +
                        st.session_state['parsed_responses_df'] = responses_df.copy()
         | 
| 261 | 
            +
                        st.success("Successfully converted cached data to DataFrame.")
         | 
| 262 | 
            +
                    else:
         | 
| 263 | 
            +
                        st.error("Failed to create DataFrame from cached responses.")
         | 
| 264 | 
            +
                else:
         | 
| 265 | 
            +
                    st.warning("No cached data available. Please parse the dataset first.")
         | 
| 266 | 
            +
             | 
| 267 | 
            +
            def plot_line(df, x_column, y_columns, figsize=(12, 10), color='orange', title=None, rolling_mean_value=2):
         | 
| 268 | 
            +
                import matplotlib.cm as cm
         | 
| 269 | 
            +
                # Sort the dataframe by the date column
         | 
| 270 | 
            +
                df = df.sort_values(by=x_column)
         | 
| 271 | 
            +
             | 
| 272 | 
            +
                # Calculate rolling mean for each y_column
         | 
| 273 | 
            +
                if rolling_mean_value:
         | 
| 274 | 
            +
                    df[y_columns] = df[y_columns].rolling(len(df) // rolling_mean_value).mean()
         | 
| 275 | 
            +
             | 
| 276 | 
            +
                # Create the plot
         | 
| 277 | 
            +
                fig, ax = plt.subplots(figsize=figsize)
         | 
| 278 | 
            +
             | 
| 279 | 
            +
                colors = cm.Oranges(np.linspace(0.2, 1, len(y_columns)))
         | 
| 280 | 
            +
             | 
| 281 | 
            +
                # Plot each y_column as a separate line with a different color
         | 
| 282 | 
            +
                for i, y_column in enumerate(y_columns):
         | 
| 283 | 
            +
                    df.plot(x=x_column, y=y_column, ax=ax, color=colors[i], label=y_column, linewidth=.5)
         | 
| 284 | 
            +
             | 
| 285 | 
            +
                # Rotate x-axis labels
         | 
| 286 | 
            +
                ax.set_xticklabels(ax.get_xticklabels(), rotation=30, ha='right')
         | 
| 287 | 
            +
             | 
| 288 | 
            +
                # Format x_column as date if it is
         | 
| 289 | 
            +
                if np.issubdtype(df[x_column].dtype, np.datetime64) or np.issubdtype(df[x_column].dtype, np.timedelta64):
         | 
| 290 | 
            +
                    df[x_column] = pd.to_datetime(df[x_column]).dt.date
         | 
| 291 | 
            +
             | 
| 292 | 
            +
                # Set title, labels, and legend
         | 
| 293 | 
            +
                ax.set_title(title or f'{", ".join(y_columns)} over {x_column}', color=color, fontweight='bold')
         | 
| 294 | 
            +
                ax.set_xlabel(x_column, color=color)
         | 
| 295 | 
            +
                ax.set_ylabel(', '.join(y_columns), color=color)
         | 
| 296 | 
            +
                ax.spines['bottom'].set_color('orange')
         | 
| 297 | 
            +
                ax.spines['top'].set_color('orange')
         | 
| 298 | 
            +
                ax.spines['right'].set_color('orange')
         | 
| 299 | 
            +
                ax.spines['left'].set_color('orange')
         | 
| 300 | 
            +
                ax.xaxis.label.set_color('orange')
         | 
| 301 | 
            +
                ax.yaxis.label.set_color('orange')
         | 
| 302 | 
            +
                ax.tick_params(axis='x', colors='orange')
         | 
| 303 | 
            +
                ax.tick_params(axis='y', colors='orange')
         | 
| 304 | 
            +
                ax.title.set_color('orange')
         | 
| 305 | 
            +
             | 
| 306 | 
            +
                ax.legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.4, labelcolor='orange', edgecolor='orange')
         | 
| 307 | 
            +
             | 
| 308 | 
            +
                # Remove background
         | 
| 309 | 
            +
                fig.patch.set_alpha(0)
         | 
| 310 | 
            +
                ax.patch.set_alpha(0)
         | 
| 311 | 
            +
             | 
| 312 | 
            +
                return fig
         | 
| 313 | 
            +
             | 
| 314 | 
            +
            def plot_bar(df, x_column, y_column, figsize=(12, 10), color='orange', title=None):
         | 
| 315 | 
            +
                fig, ax = plt.subplots(figsize=figsize)
         | 
| 316 | 
            +
             | 
| 317 | 
            +
                sns.barplot(data=df, x=x_column, y=y_column, color=color, ax=ax)
         | 
| 318 | 
            +
             | 
| 319 | 
            +
                ax.set_title(title if title else f'{y_column} by {x_column}', color=color, fontweight='bold')
         | 
| 320 | 
            +
                ax.set_xlabel(x_column, color=color)
         | 
| 321 | 
            +
                ax.set_ylabel(y_column, color=color)
         | 
| 322 | 
            +
             | 
| 323 | 
            +
                ax.tick_params(axis='x', colors=color)
         | 
| 324 | 
            +
                ax.tick_params(axis='y', colors=color)
         | 
| 325 | 
            +
             | 
| 326 | 
            +
                # Remove background
         | 
| 327 | 
            +
                fig.patch.set_alpha(0)
         | 
| 328 | 
            +
                ax.patch.set_alpha(0)
         | 
| 329 | 
            +
                ax.spines['bottom'].set_color('orange')
         | 
| 330 | 
            +
                ax.spines['top'].set_color('orange')
         | 
| 331 | 
            +
                ax.spines['right'].set_color('orange')
         | 
| 332 | 
            +
                ax.spines['left'].set_color('orange')
         | 
| 333 | 
            +
                ax.xaxis.label.set_color('orange')
         | 
| 334 | 
            +
                ax.yaxis.label.set_color('orange')
         | 
| 335 | 
            +
                ax.tick_params(axis='x', colors='orange')
         | 
| 336 | 
            +
                ax.tick_params(axis='y', colors='orange')
         | 
| 337 | 
            +
                ax.title.set_color('orange')
         | 
| 338 | 
            +
                ax.legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.4, labelcolor='orange', edgecolor='orange')
         | 
| 339 | 
            +
             | 
| 340 | 
            +
                return fig
         | 
| 341 | 
            +
             | 
| 342 | 
            +
            def plot_grouped_bar(df, x_columns, y_column, figsize=(12, 10), colors=None, title=None):
         | 
| 343 | 
            +
                fig, ax = plt.subplots(figsize=figsize)
         | 
| 344 | 
            +
             | 
| 345 | 
            +
                width = 0.8 / len(x_columns)  # the width of the bars
         | 
| 346 | 
            +
                x = np.arange(len(df))  # the label locations
         | 
| 347 | 
            +
             | 
| 348 | 
            +
                for i, x_column in enumerate(x_columns):
         | 
| 349 | 
            +
                    sns.barplot(data=df, x=x, y=y_column, color=colors[i] if colors else None, ax=ax, width=width, label=x_column)
         | 
| 350 | 
            +
                    x += width  # add the width of the bar to the x position for the next bar
         | 
| 351 | 
            +
             | 
| 352 | 
            +
                ax.set_title(title if title else f'{y_column} by {", ".join(x_columns)}', color='orange', fontweight='bold')
         | 
| 353 | 
            +
                ax.set_xlabel('Groups', color='orange')
         | 
| 354 | 
            +
                ax.set_ylabel(y_column, color='orange')
         | 
| 355 | 
            +
             | 
| 356 | 
            +
                ax.set_xticks(x - width * len(x_columns) / 2)
         | 
| 357 | 
            +
                ax.set_xticklabels(df.index)
         | 
| 358 | 
            +
             | 
| 359 | 
            +
                ax.tick_params(axis='x', colors='orange')
         | 
| 360 | 
            +
                ax.tick_params(axis='y', colors='orange')
         | 
| 361 | 
            +
             | 
| 362 | 
            +
                # Remove background
         | 
| 363 | 
            +
                fig.patch.set_alpha(0)
         | 
| 364 | 
            +
                ax.patch.set_alpha(0)
         | 
| 365 | 
            +
                ax.spines['bottom'].set_color('orange')
         | 
| 366 | 
            +
                ax.spines['top'].set_color('orange')
         | 
| 367 | 
            +
                ax.spines['right'].set_color('orange')
         | 
| 368 | 
            +
                ax.spines['left'].set_color('orange')
         | 
| 369 | 
            +
                ax.xaxis.label.set_color('orange')
         | 
| 370 | 
            +
                ax.yaxis.label.set_color('orange')
         | 
| 371 | 
            +
                ax.title.set_color('orange')
         | 
| 372 | 
            +
                ax.legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.4, labelcolor='orange', edgecolor='orange')
         | 
| 373 | 
            +
             | 
| 374 | 
            +
                return fig
         | 
| 375 | 
            +
             | 
| 376 | 
            +
            @st.cache_data
         | 
| 377 | 
            +
            def convert_df(df):
         | 
| 378 | 
            +
                # IMPORTANT: Cache the conversion to prevent computation on every rerun
         | 
| 379 | 
            +
                try:
         | 
| 380 | 
            +
                    csv = df.to_csv().encode("utf-8")
         | 
| 381 | 
            +
                except:
         | 
| 382 | 
            +
                    csv = df.to_csv().encode("utf-8-sig")
         | 
| 383 | 
            +
                return csv
         | 
| 384 | 
            +
             | 
| 385 | 
            +
             | 
| 386 | 
            +
            def filter_dataframe(df: pd.DataFrame) -> pd.DataFrame:
         | 
| 387 | 
            +
                """
         | 
| 388 | 
            +
                Adds a UI on top of a dataframe to let viewers filter columns
         | 
| 389 | 
            +
             | 
| 390 | 
            +
                Args:
         | 
| 391 | 
            +
                    df (pd.DataFrame): Original dataframe
         | 
| 392 | 
            +
             | 
| 393 | 
            +
                Returns:
         | 
| 394 | 
            +
                    pd.DataFrame: Filtered dataframe
         | 
| 395 | 
            +
                """
         | 
| 396 | 
            +
             | 
| 397 | 
            +
                title_font = "Arial"
         | 
| 398 | 
            +
                body_font = "Arial"
         | 
| 399 | 
            +
                title_size = 32
         | 
| 400 | 
            +
                colors = ["red", "green", "blue"]
         | 
| 401 | 
            +
                interpretation = False
         | 
| 402 | 
            +
                extract_docx = False
         | 
| 403 | 
            +
                title = "My Chart"
         | 
| 404 | 
            +
                regex = ".*"
         | 
| 405 | 
            +
                img_path = 'default_image.png'
         | 
| 406 | 
            +
             | 
| 407 | 
            +
             | 
| 408 | 
            +
                #try:
         | 
| 409 | 
            +
                #    modify = st.checkbox("Add filters on raw data")
         | 
| 410 | 
            +
                #except:
         | 
| 411 | 
            +
                #    try:
         | 
| 412 | 
            +
                #        modify = st.checkbox("Add filters on processed data")
         | 
| 413 | 
            +
                #    except:
         | 
| 414 | 
            +
                #        try:
         | 
| 415 | 
            +
                #            modify = st.checkbox("Add filters on parsed data")
         | 
| 416 | 
            +
                #        except:
         | 
| 417 | 
            +
                #            pass
         | 
| 418 | 
            +
             | 
| 419 | 
            +
                #if not modify:
         | 
| 420 | 
            +
                #    return df
         | 
| 421 | 
            +
             | 
| 422 | 
            +
                df_ = df.copy()
         | 
| 423 | 
            +
                # Try to convert datetimes into a standard format (datetime, no timezone)
         | 
| 424 | 
            +
             | 
| 425 | 
            +
            #modification_container = st.container()
         | 
| 426 | 
            +
             | 
| 427 | 
            +
            #with modification_container:
         | 
| 428 | 
            +
                to_filter_columns = st.multiselect("Filter dataframe on", df_.columns)
         | 
| 429 | 
            +
             | 
| 430 | 
            +
                date_column = None
         | 
| 431 | 
            +
                filtered_columns = []
         | 
| 432 | 
            +
             | 
| 433 | 
            +
                for column in to_filter_columns:
         | 
| 434 | 
            +
                    left, right = st.columns((1, 20))
         | 
| 435 | 
            +
                    # Treat columns with < 200 unique values as categorical if not date or numeric
         | 
| 436 | 
            +
                    if is_categorical_dtype(df_[column]) or (df_[column].nunique() < 120 and not is_datetime64_any_dtype(df_[column]) and not is_numeric_dtype(df_[column])):
         | 
| 437 | 
            +
                        user_cat_input = right.multiselect(
         | 
| 438 | 
            +
                            f"Values for {column}",
         | 
| 439 | 
            +
                            df_[column].value_counts().index.tolist(),
         | 
| 440 | 
            +
                            default=list(df_[column].value_counts().index)
         | 
| 441 | 
            +
                        )
         | 
| 442 | 
            +
                        df_ = df_[df_[column].isin(user_cat_input)]
         | 
| 443 | 
            +
                        filtered_columns.append(column)
         | 
| 444 | 
            +
             | 
| 445 | 
            +
                        with st.status(f"Category Distribution: {column}", expanded=False) as stat:
         | 
| 446 | 
            +
                            st.pyplot(plot_treemap(df_, column))
         | 
| 447 | 
            +
             | 
| 448 | 
            +
                    elif is_numeric_dtype(df_[column]):
         | 
| 449 | 
            +
                        _min = float(df_[column].min())
         | 
| 450 | 
            +
                        _max = float(df_[column].max())
         | 
| 451 | 
            +
                        step = (_max - _min) / 100
         | 
| 452 | 
            +
                        user_num_input = right.slider(
         | 
| 453 | 
            +
                            f"Values for {column}",
         | 
| 454 | 
            +
                            min_value=_min,
         | 
| 455 | 
            +
                            max_value=_max,
         | 
| 456 | 
            +
                            value=(_min, _max),
         | 
| 457 | 
            +
                            step=step,
         | 
| 458 | 
            +
                        )
         | 
| 459 | 
            +
                        df_ = df_[df_[column].between(*user_num_input)]
         | 
| 460 | 
            +
                        filtered_columns.append(column)
         | 
| 461 | 
            +
             | 
| 462 | 
            +
                        # Chart_GPT = ChartGPT(df_, title_font, body_font, title_size,
         | 
| 463 | 
            +
                        #      colors, interpretation, extract_docx, img_path)
         | 
| 464 | 
            +
             | 
| 465 | 
            +
                        with st.status(f"Numerical Distribution: {column}", expanded=False) as stat_:
         | 
| 466 | 
            +
                            st.pyplot(plot_hist(df_, column, bins=int(round(len(df_[column].unique())-1)/2)))
         | 
| 467 | 
            +
             | 
| 468 | 
            +
                    elif is_object_dtype(df_[column]):
         | 
| 469 | 
            +
                        try:
         | 
| 470 | 
            +
                            df_[column] = pd.to_datetime(df_[column], infer_datetime_format=True, errors='coerce')
         | 
| 471 | 
            +
                        except Exception:
         | 
| 472 | 
            +
                            try:
         | 
| 473 | 
            +
                                df_[column] = df_[column].apply(parser.parse)
         | 
| 474 | 
            +
                            except Exception:
         | 
| 475 | 
            +
                                pass
         | 
| 476 | 
            +
             | 
| 477 | 
            +
                        if is_datetime64_any_dtype(df_[column]):
         | 
| 478 | 
            +
                            df_[column] = df_[column].dt.tz_localize(None)
         | 
| 479 | 
            +
                            min_date = df_[column].min().date()
         | 
| 480 | 
            +
                            max_date = df_[column].max().date()
         | 
| 481 | 
            +
                            user_date_input = right.date_input(
         | 
| 482 | 
            +
                                f"Values for {column}",
         | 
| 483 | 
            +
                                value=(min_date, max_date),
         | 
| 484 | 
            +
                                min_value=min_date,
         | 
| 485 | 
            +
                                max_value=max_date,
         | 
| 486 | 
            +
                            )
         | 
| 487 | 
            +
                            # if len(user_date_input) == 2:
         | 
| 488 | 
            +
                            #     start_date, end_date = user_date_input
         | 
| 489 | 
            +
                            #     df_ = df_.loc[df_[column].dt.date.between(start_date, end_date)]
         | 
| 490 | 
            +
                            if len(user_date_input) == 2:
         | 
| 491 | 
            +
                                user_date_input = tuple(map(pd.to_datetime, user_date_input))
         | 
| 492 | 
            +
                                start_date, end_date = user_date_input
         | 
| 493 | 
            +
                                df_ = df_.loc[df_[column].between(start_date, end_date)]
         | 
| 494 | 
            +
             | 
| 495 | 
            +
                            date_column = column
         | 
| 496 | 
            +
             | 
| 497 | 
            +
                            if date_column and filtered_columns:
         | 
| 498 | 
            +
                                numeric_columns = [col for col in filtered_columns if is_numeric_dtype(df_[col])]
         | 
| 499 | 
            +
                                if numeric_columns:
         | 
| 500 | 
            +
                                    fig = plot_line(df_, date_column, numeric_columns)
         | 
| 501 | 
            +
                                    #st.pyplot(fig)
         | 
| 502 | 
            +
                                # now to deal with categorical columns
         | 
| 503 | 
            +
                                categorical_columns = [col for col in filtered_columns if is_categorical_dtype(df_[col])]
         | 
| 504 | 
            +
                                if categorical_columns:
         | 
| 505 | 
            +
                                    fig2 = plot_bar(df_, date_column, categorical_columns[0])
         | 
| 506 | 
            +
                                    #st.pyplot(fig2)
         | 
| 507 | 
            +
                                with st.status(f"Date Distribution: {column}", expanded=False) as stat:
         | 
| 508 | 
            +
                                    try:
         | 
| 509 | 
            +
                                        st.pyplot(fig)
         | 
| 510 | 
            +
                                    except Exception as e:
         | 
| 511 | 
            +
                                        st.error(f"Error plotting line chart: {e}")
         | 
| 512 | 
            +
                                        pass
         | 
| 513 | 
            +
                                    try:
         | 
| 514 | 
            +
                                        st.pyplot(fig2)
         | 
| 515 | 
            +
                                    except Exception as e:
         | 
| 516 | 
            +
                                        st.error(f"Error plotting bar chart: {e}")
         | 
| 517 | 
            +
             | 
| 518 | 
            +
             | 
| 519 | 
            +
                    else:
         | 
| 520 | 
            +
                        user_text_input = right.text_input(
         | 
| 521 | 
            +
                            f"Substring or regex in {column}",
         | 
| 522 | 
            +
                        )
         | 
| 523 | 
            +
                        if user_text_input:
         | 
| 524 | 
            +
                            df_ = df_[df_[column].astype(str).str.contains(user_text_input)]
         | 
| 525 | 
            +
                # write len of df after filtering with % of original
         | 
| 526 | 
            +
                st.write(f"{len(df_)} rows ({len(df_) / len(df) * 100:.2f}%)")
         | 
| 527 | 
            +
                return df_
         | 
| 528 | 
            +
             | 
| 529 | 
            +
             | 
| 530 | 
            +
            from config import API_KEY, GEMINI_KEY, FORMAT_LONG
         | 
| 531 | 
            +
             | 
| 532 | 
            +
            with torch.no_grad():
         | 
| 533 | 
            +
                torch.cuda.empty_cache()
         | 
| 534 | 
            +
             | 
| 535 | 
            +
            #st.set_page_config(
         | 
| 536 | 
            +
            #    page_title="UAP ANALYSIS",
         | 
| 537 | 
            +
            #    page_icon=":alien:",
         | 
| 538 | 
            +
            #    layout="wide",
         | 
| 539 | 
            +
            #    initial_sidebar_state="expanded",
         | 
| 540 | 
            +
            #)
         | 
| 541 | 
            +
             | 
| 542 | 
            +
            st.title('UAP Feature Extraction')
         | 
| 543 | 
            +
             | 
| 544 | 
            +
            # Initialize session state
         | 
| 545 | 
            +
            if 'analyzers' not in st.session_state:
         | 
| 546 | 
            +
                st.session_state['analyzers'] = []
         | 
| 547 | 
            +
            if 'col_names' not in st.session_state:
         | 
| 548 | 
            +
                st.session_state['col_names'] = []
         | 
| 549 | 
            +
            if 'clusters' not in st.session_state:
         | 
| 550 | 
            +
                st.session_state['clusters'] = {}
         | 
| 551 | 
            +
            if 'new_data' not in st.session_state:
         | 
| 552 | 
            +
                st.session_state['new_data'] = pd.DataFrame()
         | 
| 553 | 
            +
            if 'dataset' not in st.session_state:
         | 
| 554 | 
            +
                st.session_state['dataset'] = pd.DataFrame()
         | 
| 555 | 
            +
            if 'data_processed' not in st.session_state:
         | 
| 556 | 
            +
                st.session_state['data_processed'] = False
         | 
| 557 | 
            +
            if 'stage' not in st.session_state:
         | 
| 558 | 
            +
                st.session_state['stage'] = 0
         | 
| 559 | 
            +
            if 'filtered_data' not in st.session_state:
         | 
| 560 | 
            +
                st.session_state['filtered_data'] = None
         | 
| 561 | 
            +
            if 'gemini_answer' not in st.session_state:
         | 
| 562 | 
            +
                st.session_state['gemini_answer'] = None
         | 
| 563 | 
            +
            if 'parsed_responses' not in st.session_state:
         | 
| 564 | 
            +
                st.session_state['parsed_responses'] = None
         | 
| 565 | 
            +
            if 'parsed_responses_df' not in st.session_state:
         | 
| 566 | 
            +
                st.session_state['parsed_responses_df'] = None
         | 
| 567 | 
            +
            if 'json_format' not in st.session_state:
         | 
| 568 | 
            +
                st.session_state['json_format'] = None
         | 
| 569 | 
            +
            if 'api_key_valid' not in st.session_state:
         | 
| 570 | 
            +
                st.session_state['api_key_valid'] = False
         | 
| 571 | 
            +
            if 'previous_api_key' not in st.session_state:
         | 
| 572 | 
            +
                st.session_state['previous_api_key'] = None
         | 
| 573 | 
            +
             | 
| 574 | 
            +
                
         | 
| 575 | 
            +
            # Unparsed data
         | 
| 576 | 
            +
            #unparsed_tickbox = st.checkbox('Data Parsing')
         | 
| 577 | 
            +
            #if unparsed_tickbox:
         | 
| 578 | 
            +
            unparsed = st.file_uploader("Upload Raw DataFrame", type=["csv", "xlsx"])
         | 
| 579 | 
            +
            if unparsed is not None:
         | 
| 580 | 
            +
                try:
         | 
| 581 | 
            +
                    data = pd.read_csv(unparsed) if unparsed.type == "text/csv" else pd.read_excel(unparsed)
         | 
| 582 | 
            +
                    filtered_data = filter_dataframe(data)
         | 
| 583 | 
            +
                    st.dataframe(filtered_data)
         | 
| 584 | 
            +
                except Exception as e:
         | 
| 585 | 
            +
                    st.error(f"An error occurred while reading the file: {e}")
         | 
| 586 | 
            +
                
         | 
| 587 | 
            +
                modify_json = st.checkbox('Custom JSON')
         | 
| 588 | 
            +
                API_KEY = st.text_input('OpenAI API Key', API_KEY, type='password', help="Enter your OpenAI API key")       
         | 
| 589 | 
            +
                    
         | 
| 590 | 
            +
                
         | 
| 591 | 
            +
                
         | 
| 592 | 
            +
                if modify_json:
         | 
| 593 | 
            +
                    FORMAT_LONG = st.text_area('Custom JSON', FORMAT_LONG, height=500)
         | 
| 594 | 
            +
                    st.download_button("Save Format", FORMAT_LONG)
         | 
| 595 | 
            +
                try:
         | 
| 596 | 
            +
                    json.loads(FORMAT_LONG)
         | 
| 597 | 
            +
                    st.session_state['json_format'] = True
         | 
| 598 | 
            +
                except json.JSONDecodeError as e:
         | 
| 599 | 
            +
                    st.error(f"Invalid JSON format: {str(e)}")
         | 
| 600 | 
            +
                    st.session_state['json_format'] = False
         | 
| 601 | 
            +
                    st.stop()  # Stop execution if JSON is invalid
         | 
| 602 | 
            +
             | 
| 603 | 
            +
                # If the DataFrame is successfully created, allow the user to select a column
         | 
| 604 | 
            +
                col_unparsed = st.selectbox("Select column corresponding to text", data.columns)
         | 
| 605 | 
            +
                
         | 
| 606 | 
            +
                    
         | 
| 607 | 
            +
                if st.button("Parse Dataset") and st.session_state['json_format']:
         | 
| 608 | 
            +
                    if API_KEY:
         | 
| 609 | 
            +
                        # Only validate if the API key has changed
         | 
| 610 | 
            +
                        if API_KEY != st.session_state['previous_api_key']:
         | 
| 611 | 
            +
                            if is_api_key_valid(API_KEY):
         | 
| 612 | 
            +
                                st.session_state['api_key_valid'] = True
         | 
| 613 | 
            +
                                st.session_state['previous_api_key'] = API_KEY
         | 
| 614 | 
            +
                                st.success("API key is valid!")
         | 
| 615 | 
            +
                            else:
         | 
| 616 | 
            +
                                st.session_state['api_key_valid'] = False
         | 
| 617 | 
            +
                                st.error("Invalid API key. Please check and try again.")
         | 
| 618 | 
            +
                        elif st.session_state['api_key_valid']:
         | 
| 619 | 
            +
                            st.success("API key is valid!")
         | 
| 620 | 
            +
                    if not API_KEY:# or not st.session_state['api_key_valid']:
         | 
| 621 | 
            +
                        st.warning("Please enter your API key to proceed.")
         | 
| 622 | 
            +
                        st.stop()
         | 
| 623 | 
            +
                    selected_column_data = filtered_data[col_unparsed].tolist()
         | 
| 624 | 
            +
                    st.session_state.result = selected_column_data
         | 
| 625 | 
            +
                    with st.status("Parsing...", expanded=True) as stat:
         | 
| 626 | 
            +
                        try:
         | 
| 627 | 
            +
                            st.write("Parsing descriptions...")
         | 
| 628 | 
            +
                            parser = CachedUAPParser(api_key=API_KEY, model='gpt-3.5-turbo-0125', col=st.session_state.result)
         | 
| 629 | 
            +
                            descriptions = st.session_state.result
         | 
| 630 | 
            +
                            format_long = FORMAT_LONG
         | 
| 631 | 
            +
                            parser.process_descriptions(descriptions, format_long)
         | 
| 632 | 
            +
                            st.session_state['parsed_responses'] = parser.parse_responses()
         | 
| 633 | 
            +
                            try:
         | 
| 634 | 
            +
                                responses_df = parser.responses_to_df('sightingDetails', st.session_state['parsed_responses'])
         | 
| 635 | 
            +
                            except Exception as e:
         | 
| 636 | 
            +
                                st.warning(f"Error parsing with 'sightingDetails': {e}")
         | 
| 637 | 
            +
                                responses_df = parser.responses_to_df(None, st.session_state['parsed_responses'])
         | 
| 638 | 
            +
                            
         | 
| 639 | 
            +
                            if not responses_df.empty:
         | 
| 640 | 
            +
                                st.dataframe(responses_df)
         | 
| 641 | 
            +
                                st.session_state['parsed_responses_df'] = responses_df.copy()
         | 
| 642 | 
            +
                                stat.update(label="Parsing complete", state="complete", expanded=False)
         | 
| 643 | 
            +
                            else:
         | 
| 644 | 
            +
                                st.error("Failed to create DataFrame from parsed responses.")
         | 
| 645 | 
            +
                        except Exception as e:
         | 
| 646 | 
            +
                            st.error(f"An error occurred during parsing: {str(e)}")
         | 
| 647 | 
            +
             | 
| 648 | 
            +
                # Add download button for parsed data
         | 
| 649 | 
            +
                if st.session_state['parsed_responses'] is not None:
         | 
| 650 | 
            +
                    json_str = download_json(st.session_state['parsed_responses'])
         | 
| 651 | 
            +
                    st.download_button(
         | 
| 652 | 
            +
                        label="Download Parsed Data as JSON",
         | 
| 653 | 
            +
                        data=json_str,
         | 
| 654 | 
            +
                        file_name="parsed_responses.json",
         | 
| 655 | 
            +
                        mime="application/json"
         | 
| 656 | 
            +
                    )
         | 
| 657 | 
            +
                    # Add button to convert cached data to DataFrame
         | 
| 658 | 
            +
                    if st.button("Convert Cached Data to DataFrame"):
         | 
| 659 | 
            +
                        convert_cached_data_to_df(st.session_state['parsed_responses'])
         | 
| 660 | 
            +
                        
         | 
| 661 | 
            +
                if st.session_state['parsed_responses_df'] is not None:
         | 
| 662 | 
            +
                    st.download_button(
         | 
| 663 | 
            +
                    label="Save CSV",
         | 
| 664 | 
            +
                    data=convert_df(st.session_state['parsed_responses_df']),
         | 
| 665 | 
            +
                    file_name="uap_data.csv",
         | 
| 666 | 
            +
                    mime="text/csv",
         | 
| 667 | 
            +
                    )
         | 
| 668 | 
            +
             | 
| 669 | 
            +
                
         | 
| 670 | 
            +
                    
         | 
| 671 | 
            +
             | 
| 672 | 
            +
             | 
| 673 | 
            +
             | 
| 674 | 
            +
            #except Exception as e:
         | 
| 675 | 
            +
            #    stat.update(label=f"Parsing failed: {e}", state="error")
         | 
| 676 | 
            +
            # st.write("Parsing descriptions...")
         | 
| 677 | 
            +
            # st.update_status("Parsing descriptions...")
         | 
| 678 | 
            +
             
         | 
    	
        rag_search.py
    ADDED
    
    | @@ -0,0 +1,438 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
             | 
| 2 | 
            +
            import streamlit as st
         | 
| 3 | 
            +
            import pandas as pd
         | 
| 4 | 
            +
            import cohere
         | 
| 5 | 
            +
            import streamlit as st
         | 
| 6 | 
            +
            import pandas as pd
         | 
| 7 | 
            +
            import numpy as np
         | 
| 8 | 
            +
            import matplotlib.pyplot as plt
         | 
| 9 | 
            +
            import seaborn as sns
         | 
| 10 | 
            +
            from uap_analyzer import UAPParser, UAPAnalyzer, UAPVisualizer
         | 
| 11 | 
            +
            # import ChartGen
         | 
| 12 | 
            +
            # from ChartGen import ChartGPT
         | 
| 13 | 
            +
            from Levenshtein import distance
         | 
| 14 | 
            +
            from sklearn.model_selection import train_test_split
         | 
| 15 | 
            +
            from sklearn.metrics import confusion_matrix
         | 
| 16 | 
            +
            from stqdm import stqdm
         | 
| 17 | 
            +
            stqdm.pandas()
         | 
| 18 | 
            +
            import streamlit.components.v1 as components
         | 
| 19 | 
            +
            from dateutil import parser
         | 
| 20 | 
            +
            from sentence_transformers import SentenceTransformer
         | 
| 21 | 
            +
            import torch
         | 
| 22 | 
            +
            import squarify
         | 
| 23 | 
            +
            import matplotlib.colors as mcolors
         | 
| 24 | 
            +
            import textwrap
         | 
| 25 | 
            +
            import datamapplot
         | 
| 26 | 
            +
            import json
         | 
| 27 | 
            +
             | 
| 28 | 
            +
            st.set_option('deprecation.showPyplotGlobalUse', False)
         | 
| 29 | 
            +
             | 
| 30 | 
            +
            from pandas.api.types import (
         | 
| 31 | 
            +
                is_categorical_dtype,
         | 
| 32 | 
            +
                is_datetime64_any_dtype,
         | 
| 33 | 
            +
                is_numeric_dtype,
         | 
| 34 | 
            +
                is_object_dtype,
         | 
| 35 | 
            +
            )
         | 
| 36 | 
            +
             | 
| 37 | 
            +
             | 
| 38 | 
            +
            def plot_treemap(df, column, top_n=32):
         | 
| 39 | 
            +
                    # Get the value counts and the top N labels
         | 
| 40 | 
            +
                    value_counts = df[column].value_counts()
         | 
| 41 | 
            +
                    top_labels = value_counts.iloc[:top_n].index
         | 
| 42 | 
            +
                    
         | 
| 43 | 
            +
                    # Use np.where to replace all values not in the top N with 'Other'
         | 
| 44 | 
            +
                    revised_column = f'{column}_revised'
         | 
| 45 | 
            +
                    df[revised_column] = np.where(df[column].isin(top_labels), df[column], 'Other')
         | 
| 46 | 
            +
             | 
| 47 | 
            +
                    # Get the value counts including the 'Other' category
         | 
| 48 | 
            +
                    sizes = df[revised_column].value_counts().values
         | 
| 49 | 
            +
                    labels = df[revised_column].value_counts().index
         | 
| 50 | 
            +
             | 
| 51 | 
            +
                    # Get a gradient of colors
         | 
| 52 | 
            +
                    # colors = list(mcolors.TABLEAU_COLORS.values())
         | 
| 53 | 
            +
             | 
| 54 | 
            +
                    n_colors = len(sizes)
         | 
| 55 | 
            +
                    colors = plt.cm.Oranges(np.linspace(0.3, 0.9, n_colors))[::-1]
         | 
| 56 | 
            +
             | 
| 57 | 
            +
             | 
| 58 | 
            +
                    # Get % of each category
         | 
| 59 | 
            +
                    percents = sizes / sizes.sum()
         | 
| 60 | 
            +
             | 
| 61 | 
            +
                    # Prepare labels with percentages
         | 
| 62 | 
            +
                    labels = [f'{label}\n {percent:.1%}' for label, percent in zip(labels, percents)]
         | 
| 63 | 
            +
             | 
| 64 | 
            +
                    fig, ax = plt.subplots(figsize=(20, 12))
         | 
| 65 | 
            +
             | 
| 66 | 
            +
                    # Plot the treemap
         | 
| 67 | 
            +
                    squarify.plot(sizes=sizes, label=labels, alpha=0.7, pad=True, color=colors, text_kwargs={'fontsize': 10})
         | 
| 68 | 
            +
             | 
| 69 | 
            +
                    ax = plt.gca()
         | 
| 70 | 
            +
                    # Iterate over text elements and rectangles (patches) in the axes for color adjustment
         | 
| 71 | 
            +
                    for text, rect in zip(ax.texts, ax.patches):
         | 
| 72 | 
            +
                        background_color = rect.get_facecolor()
         | 
| 73 | 
            +
                        r, g, b, _ = mcolors.to_rgba(background_color)
         | 
| 74 | 
            +
                        brightness = np.average([r, g, b])
         | 
| 75 | 
            +
                        text.set_color('white' if brightness < 0.5 else 'black')
         | 
| 76 | 
            +
             | 
| 77 | 
            +
                        # Adjust font size based on rectangle's area and wrap long text
         | 
| 78 | 
            +
                        coef = 0.8
         | 
| 79 | 
            +
                        font_size = np.sqrt(rect.get_width() * rect.get_height()) * coef
         | 
| 80 | 
            +
                        text.set_fontsize(font_size)
         | 
| 81 | 
            +
                        wrapped_text = textwrap.fill(text.get_text(), width=20)
         | 
| 82 | 
            +
                        text.set_text(wrapped_text)
         | 
| 83 | 
            +
             | 
| 84 | 
            +
                    plt.axis('off')
         | 
| 85 | 
            +
                    plt.gca().invert_yaxis()
         | 
| 86 | 
            +
                    plt.gcf().set_size_inches(20, 12)
         | 
| 87 | 
            +
             | 
| 88 | 
            +
                    fig.patch.set_alpha(0)
         | 
| 89 | 
            +
             | 
| 90 | 
            +
                    ax.patch.set_alpha(0)
         | 
| 91 | 
            +
                    return fig
         | 
| 92 | 
            +
             | 
| 93 | 
            +
            def plot_hist(df, column, bins=10, kde=True):
         | 
| 94 | 
            +
                    fig, ax = plt.subplots(figsize=(12, 6))
         | 
| 95 | 
            +
                    sns.histplot(data=df, x=column, kde=True, bins=bins,color='orange')
         | 
| 96 | 
            +
                    # set the ticks and frame in orange
         | 
| 97 | 
            +
                    ax.spines['bottom'].set_color('orange')
         | 
| 98 | 
            +
                    ax.spines['top'].set_color('orange')
         | 
| 99 | 
            +
                    ax.spines['right'].set_color('orange')
         | 
| 100 | 
            +
                    ax.spines['left'].set_color('orange')
         | 
| 101 | 
            +
                    ax.xaxis.label.set_color('orange')
         | 
| 102 | 
            +
                    ax.yaxis.label.set_color('orange')
         | 
| 103 | 
            +
                    ax.tick_params(axis='x', colors='orange')
         | 
| 104 | 
            +
                    ax.tick_params(axis='y', colors='orange')
         | 
| 105 | 
            +
                    ax.title.set_color('orange')
         | 
| 106 | 
            +
             | 
| 107 | 
            +
                    # Set transparent background
         | 
| 108 | 
            +
                    fig.patch.set_alpha(0)
         | 
| 109 | 
            +
                    ax.patch.set_alpha(0)
         | 
| 110 | 
            +
                    return fig
         | 
| 111 | 
            +
             | 
| 112 | 
            +
             | 
| 113 | 
            +
            def plot_line(df, x_column, y_columns, figsize=(12, 10), color='orange', title=None, rolling_mean_value=2):
         | 
| 114 | 
            +
                import matplotlib.cm as cm
         | 
| 115 | 
            +
                # Sort the dataframe by the date column
         | 
| 116 | 
            +
                df = df.sort_values(by=x_column)
         | 
| 117 | 
            +
             | 
| 118 | 
            +
                # Calculate rolling mean for each y_column
         | 
| 119 | 
            +
                if rolling_mean_value:
         | 
| 120 | 
            +
                    df[y_columns] = df[y_columns].rolling(len(df) // rolling_mean_value).mean()
         | 
| 121 | 
            +
             | 
| 122 | 
            +
                # Create the plot
         | 
| 123 | 
            +
                fig, ax = plt.subplots(figsize=figsize)
         | 
| 124 | 
            +
             | 
| 125 | 
            +
                colors = cm.Oranges(np.linspace(0.2, 1, len(y_columns)))
         | 
| 126 | 
            +
             | 
| 127 | 
            +
                # Plot each y_column as a separate line with a different color
         | 
| 128 | 
            +
                for i, y_column in enumerate(y_columns):
         | 
| 129 | 
            +
                    df.plot(x=x_column, y=y_column, ax=ax, color=colors[i], label=y_column, linewidth=.5)
         | 
| 130 | 
            +
             | 
| 131 | 
            +
                # Rotate x-axis labels
         | 
| 132 | 
            +
                ax.set_xticklabels(ax.get_xticklabels(), rotation=30, ha='right')
         | 
| 133 | 
            +
             | 
| 134 | 
            +
                # Format x_column as date if it is
         | 
| 135 | 
            +
                if np.issubdtype(df[x_column].dtype, np.datetime64) or np.issubdtype(df[x_column].dtype, np.timedelta64):
         | 
| 136 | 
            +
                    df[x_column] = pd.to_datetime(df[x_column]).dt.date
         | 
| 137 | 
            +
             | 
| 138 | 
            +
                # Set title, labels, and legend
         | 
| 139 | 
            +
                ax.set_title(title or f'{", ".join(y_columns)} over {x_column}', color=color, fontweight='bold')
         | 
| 140 | 
            +
                ax.set_xlabel(x_column, color=color)
         | 
| 141 | 
            +
                ax.set_ylabel(', '.join(y_columns), color=color)
         | 
| 142 | 
            +
                ax.spines['bottom'].set_color('orange')
         | 
| 143 | 
            +
                ax.spines['top'].set_color('orange')
         | 
| 144 | 
            +
                ax.spines['right'].set_color('orange')
         | 
| 145 | 
            +
                ax.spines['left'].set_color('orange')
         | 
| 146 | 
            +
                ax.xaxis.label.set_color('orange')
         | 
| 147 | 
            +
                ax.yaxis.label.set_color('orange')
         | 
| 148 | 
            +
                ax.tick_params(axis='x', colors='orange')
         | 
| 149 | 
            +
                ax.tick_params(axis='y', colors='orange')
         | 
| 150 | 
            +
                ax.title.set_color('orange')
         | 
| 151 | 
            +
             | 
| 152 | 
            +
                ax.legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.4, labelcolor='orange', edgecolor='orange')
         | 
| 153 | 
            +
             | 
| 154 | 
            +
                # Remove background
         | 
| 155 | 
            +
                fig.patch.set_alpha(0)
         | 
| 156 | 
            +
                ax.patch.set_alpha(0)
         | 
| 157 | 
            +
             | 
| 158 | 
            +
                return fig
         | 
| 159 | 
            +
             | 
| 160 | 
            +
            def plot_bar(df, x_column, y_column, figsize=(12, 10), color='orange', title=None):
         | 
| 161 | 
            +
                fig, ax = plt.subplots(figsize=figsize)
         | 
| 162 | 
            +
             | 
| 163 | 
            +
                sns.barplot(data=df, x=x_column, y=y_column, color=color, ax=ax)
         | 
| 164 | 
            +
             | 
| 165 | 
            +
                ax.set_title(title if title else f'{y_column} by {x_column}', color=color, fontweight='bold')
         | 
| 166 | 
            +
                ax.set_xlabel(x_column, color=color)
         | 
| 167 | 
            +
                ax.set_ylabel(y_column, color=color)
         | 
| 168 | 
            +
             | 
| 169 | 
            +
                ax.tick_params(axis='x', colors=color)
         | 
| 170 | 
            +
                ax.tick_params(axis='y', colors=color)
         | 
| 171 | 
            +
             | 
| 172 | 
            +
                # Remove background
         | 
| 173 | 
            +
                fig.patch.set_alpha(0)
         | 
| 174 | 
            +
                ax.patch.set_alpha(0)
         | 
| 175 | 
            +
                ax.spines['bottom'].set_color('orange')
         | 
| 176 | 
            +
                ax.spines['top'].set_color('orange')
         | 
| 177 | 
            +
                ax.spines['right'].set_color('orange')
         | 
| 178 | 
            +
                ax.spines['left'].set_color('orange')
         | 
| 179 | 
            +
                ax.xaxis.label.set_color('orange')
         | 
| 180 | 
            +
                ax.yaxis.label.set_color('orange')
         | 
| 181 | 
            +
                ax.tick_params(axis='x', colors='orange')
         | 
| 182 | 
            +
                ax.tick_params(axis='y', colors='orange')
         | 
| 183 | 
            +
                ax.title.set_color('orange')
         | 
| 184 | 
            +
                ax.legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.4, labelcolor='orange', edgecolor='orange')
         | 
| 185 | 
            +
             | 
| 186 | 
            +
                return fig
         | 
| 187 | 
            +
             | 
| 188 | 
            +
            def plot_grouped_bar(df, x_columns, y_column, figsize=(12, 10), colors=None, title=None):
         | 
| 189 | 
            +
                fig, ax = plt.subplots(figsize=figsize)
         | 
| 190 | 
            +
             | 
| 191 | 
            +
                width = 0.8 / len(x_columns)  # the width of the bars
         | 
| 192 | 
            +
                x = np.arange(len(df))  # the label locations
         | 
| 193 | 
            +
             | 
| 194 | 
            +
                for i, x_column in enumerate(x_columns):
         | 
| 195 | 
            +
                    sns.barplot(data=df, x=x, y=y_column, color=colors[i] if colors else None, ax=ax, width=width, label=x_column)
         | 
| 196 | 
            +
                    x += width  # add the width of the bar to the x position for the next bar
         | 
| 197 | 
            +
             | 
| 198 | 
            +
                ax.set_title(title if title else f'{y_column} by {", ".join(x_columns)}', color='orange', fontweight='bold')
         | 
| 199 | 
            +
                ax.set_xlabel('Groups', color='orange')
         | 
| 200 | 
            +
                ax.set_ylabel(y_column, color='orange')
         | 
| 201 | 
            +
             | 
| 202 | 
            +
                ax.set_xticks(x - width * len(x_columns) / 2)
         | 
| 203 | 
            +
                ax.set_xticklabels(df.index)
         | 
| 204 | 
            +
             | 
| 205 | 
            +
                ax.tick_params(axis='x', colors='orange')
         | 
| 206 | 
            +
                ax.tick_params(axis='y', colors='orange')
         | 
| 207 | 
            +
             | 
| 208 | 
            +
                # Remove background
         | 
| 209 | 
            +
                fig.patch.set_alpha(0)
         | 
| 210 | 
            +
                ax.patch.set_alpha(0)
         | 
| 211 | 
            +
                ax.spines['bottom'].set_color('orange')
         | 
| 212 | 
            +
                ax.spines['top'].set_color('orange')
         | 
| 213 | 
            +
                ax.spines['right'].set_color('orange')
         | 
| 214 | 
            +
                ax.spines['left'].set_color('orange')
         | 
| 215 | 
            +
                ax.xaxis.label.set_color('orange')
         | 
| 216 | 
            +
                ax.yaxis.label.set_color('orange')
         | 
| 217 | 
            +
                ax.title.set_color('orange')
         | 
| 218 | 
            +
                ax.legend(loc='upper right', bbox_to_anchor=(1, 1), facecolor='black', framealpha=.4, labelcolor='orange', edgecolor='orange')
         | 
| 219 | 
            +
             | 
| 220 | 
            +
                return fig
         | 
| 221 | 
            +
             | 
| 222 | 
            +
             | 
| 223 | 
            +
            def filter_dataframe(df: pd.DataFrame) -> pd.DataFrame:
         | 
| 224 | 
            +
                """
         | 
| 225 | 
            +
                Adds a UI on top of a dataframe to let viewers filter columns
         | 
| 226 | 
            +
             | 
| 227 | 
            +
                Args:
         | 
| 228 | 
            +
                    df (pd.DataFrame): Original dataframe
         | 
| 229 | 
            +
             | 
| 230 | 
            +
                Returns:
         | 
| 231 | 
            +
                    pd.DataFrame: Filtered dataframe
         | 
| 232 | 
            +
                """
         | 
| 233 | 
            +
             | 
| 234 | 
            +
                title_font = "Arial"
         | 
| 235 | 
            +
                body_font = "Arial"
         | 
| 236 | 
            +
                title_size = 32
         | 
| 237 | 
            +
                colors = ["red", "green", "blue"]
         | 
| 238 | 
            +
                interpretation = False
         | 
| 239 | 
            +
                extract_docx = False
         | 
| 240 | 
            +
                title = "My Chart"
         | 
| 241 | 
            +
                regex = ".*"
         | 
| 242 | 
            +
                img_path = 'default_image.png'
         | 
| 243 | 
            +
             | 
| 244 | 
            +
                df_ = df.copy()
         | 
| 245 | 
            +
             | 
| 246 | 
            +
            #modification_container = st.container()
         | 
| 247 | 
            +
             | 
| 248 | 
            +
            #with modification_container:
         | 
| 249 | 
            +
                to_filter_columns = st.multiselect("Filter dataframe on", df_.columns)
         | 
| 250 | 
            +
             | 
| 251 | 
            +
                date_column = None
         | 
| 252 | 
            +
                filtered_columns = []
         | 
| 253 | 
            +
             | 
| 254 | 
            +
                for column in to_filter_columns:
         | 
| 255 | 
            +
                    left, right = st.columns((1, 20))
         | 
| 256 | 
            +
                    # Treat columns with < 200 unique values as categorical if not date or numeric
         | 
| 257 | 
            +
                    if is_categorical_dtype(df_[column]) or (df_[column].nunique() < 120 and not is_datetime64_any_dtype(df_[column]) and not is_numeric_dtype(df_[column])):
         | 
| 258 | 
            +
                        user_cat_input = right.multiselect(
         | 
| 259 | 
            +
                            f"Values for {column}",
         | 
| 260 | 
            +
                            df_[column].value_counts().index.tolist(),
         | 
| 261 | 
            +
                            default=list(df_[column].value_counts().index)
         | 
| 262 | 
            +
                        )
         | 
| 263 | 
            +
                        df_ = df_[df_[column].isin(user_cat_input)]
         | 
| 264 | 
            +
                        filtered_columns.append(column)
         | 
| 265 | 
            +
             | 
| 266 | 
            +
                        with st.status(f"Category Distribution: {column}", expanded=False) as stat:
         | 
| 267 | 
            +
                            st.pyplot(plot_treemap(df_, column))
         | 
| 268 | 
            +
             | 
| 269 | 
            +
                    elif is_numeric_dtype(df_[column]):
         | 
| 270 | 
            +
                        _min = float(df_[column].min())
         | 
| 271 | 
            +
                        _max = float(df_[column].max())
         | 
| 272 | 
            +
                        step = (_max - _min) / 100
         | 
| 273 | 
            +
                        user_num_input = right.slider(
         | 
| 274 | 
            +
                            f"Values for {column}",
         | 
| 275 | 
            +
                            min_value=_min,
         | 
| 276 | 
            +
                            max_value=_max,
         | 
| 277 | 
            +
                            value=(_min, _max),
         | 
| 278 | 
            +
                            step=step,
         | 
| 279 | 
            +
                        )
         | 
| 280 | 
            +
                        df_ = df_[df_[column].between(*user_num_input)]
         | 
| 281 | 
            +
                        filtered_columns.append(column)
         | 
| 282 | 
            +
             | 
| 283 | 
            +
                        # Chart_GPT = ChartGPT(df_, title_font, body_font, title_size,
         | 
| 284 | 
            +
                        #      colors, interpretation, extract_docx, img_path)
         | 
| 285 | 
            +
             | 
| 286 | 
            +
                        with st.status(f"Numerical Distribution: {column}", expanded=False) as stat_:
         | 
| 287 | 
            +
                            st.pyplot(plot_hist(df_, column, bins=int(round(len(df_[column].unique())-1)/2)))
         | 
| 288 | 
            +
             | 
| 289 | 
            +
                    elif is_object_dtype(df_[column]):
         | 
| 290 | 
            +
                        try:
         | 
| 291 | 
            +
                            df_[column] = pd.to_datetime(df_[column], infer_datetime_format=True, errors='coerce')
         | 
| 292 | 
            +
                        except Exception:
         | 
| 293 | 
            +
                            try:
         | 
| 294 | 
            +
                                df_[column] = df_[column].apply(parser.parse)
         | 
| 295 | 
            +
                            except Exception:
         | 
| 296 | 
            +
                                pass
         | 
| 297 | 
            +
             | 
| 298 | 
            +
                        if is_datetime64_any_dtype(df_[column]):
         | 
| 299 | 
            +
                            df_[column] = df_[column].dt.tz_localize(None)
         | 
| 300 | 
            +
                            min_date = df_[column].min().date()
         | 
| 301 | 
            +
                            max_date = df_[column].max().date()
         | 
| 302 | 
            +
                            user_date_input = right.date_input(
         | 
| 303 | 
            +
                                f"Values for {column}",
         | 
| 304 | 
            +
                                value=(min_date, max_date),
         | 
| 305 | 
            +
                                min_value=min_date,
         | 
| 306 | 
            +
                                max_value=max_date,
         | 
| 307 | 
            +
                            )
         | 
| 308 | 
            +
                            # if len(user_date_input) == 2:
         | 
| 309 | 
            +
                            #     start_date, end_date = user_date_input
         | 
| 310 | 
            +
                            #     df_ = df_.loc[df_[column].dt.date.between(start_date, end_date)]
         | 
| 311 | 
            +
                            if len(user_date_input) == 2:
         | 
| 312 | 
            +
                                user_date_input = tuple(map(pd.to_datetime, user_date_input))
         | 
| 313 | 
            +
                                start_date, end_date = user_date_input
         | 
| 314 | 
            +
                                df_ = df_.loc[df_[column].between(start_date, end_date)]
         | 
| 315 | 
            +
             | 
| 316 | 
            +
                            date_column = column
         | 
| 317 | 
            +
             | 
| 318 | 
            +
                            if date_column and filtered_columns:
         | 
| 319 | 
            +
                                numeric_columns = [col for col in filtered_columns if is_numeric_dtype(df_[col])]
         | 
| 320 | 
            +
                                if numeric_columns:
         | 
| 321 | 
            +
                                    fig = plot_line(df_, date_column, numeric_columns)
         | 
| 322 | 
            +
                                    with st.status(f"Date Numerical Distributions: {column}", expanded=False) as stat:
         | 
| 323 | 
            +
                                                            try:
         | 
| 324 | 
            +
                                                                st.pyplot(fig)
         | 
| 325 | 
            +
                                                            except Exception as e:
         | 
| 326 | 
            +
                                                                st.error(f"Error plotting line chart: {e}")
         | 
| 327 | 
            +
                                                                pass                    # now to deal with categorical columns
         | 
| 328 | 
            +
                                categorical_columns = [col for col in filtered_columns if is_categorical_dtype(df_[col])]
         | 
| 329 | 
            +
                                if categorical_columns:
         | 
| 330 | 
            +
                                    fig2 = plot_grouped_bar(df_, categorical_columns, date_column)
         | 
| 331 | 
            +
                                    with st.status(f"Date Categorical Distributions: {column}", expanded=False) as sta:
         | 
| 332 | 
            +
                                        try:
         | 
| 333 | 
            +
                                            st.pyplot(fig2)
         | 
| 334 | 
            +
                                        except Exception as e:
         | 
| 335 | 
            +
                                            st.error(f"Error plotting bar chart: {e}")
         | 
| 336 | 
            +
             | 
| 337 | 
            +
                    else:
         | 
| 338 | 
            +
                        user_text_input = right.text_input(
         | 
| 339 | 
            +
                            f"Substring or regex in {column}",
         | 
| 340 | 
            +
                        )
         | 
| 341 | 
            +
                        if user_text_input:
         | 
| 342 | 
            +
                            df_ = df_[df_[column].astype(str).str.contains(user_text_input)]
         | 
| 343 | 
            +
                # write len of df after filtering with % of original
         | 
| 344 | 
            +
                st.write(f"{len(df_)} rows ({len(df_) / len(df) * 100:.2f}%)")
         | 
| 345 | 
            +
                return df_
         | 
| 346 | 
            +
             | 
| 347 | 
            +
             | 
| 348 | 
            +
            # Initialize session state
         | 
| 349 | 
            +
            if 'analyzers' not in st.session_state:
         | 
| 350 | 
            +
                st.session_state['analyzers'] = []
         | 
| 351 | 
            +
            if 'col_names' not in st.session_state:
         | 
| 352 | 
            +
                st.session_state['col_names'] = []
         | 
| 353 | 
            +
            if 'clusters' not in st.session_state:
         | 
| 354 | 
            +
                st.session_state['clusters'] = {}
         | 
| 355 | 
            +
            if 'new_data' not in st.session_state:
         | 
| 356 | 
            +
                st.session_state['new_data'] = pd.DataFrame()
         | 
| 357 | 
            +
            if 'dataset' not in st.session_state:
         | 
| 358 | 
            +
                st.session_state['dataset'] = pd.DataFrame()
         | 
| 359 | 
            +
            if 'data_processed' not in st.session_state:
         | 
| 360 | 
            +
                st.session_state['data_processed'] = False
         | 
| 361 | 
            +
            if 'stage' not in st.session_state:
         | 
| 362 | 
            +
                st.session_state['stage'] = 0
         | 
| 363 | 
            +
            if 'filtered_data' not in st.session_state:
         | 
| 364 | 
            +
                st.session_state['filtered_data'] = None
         | 
| 365 | 
            +
            if 'gemini_answer' not in st.session_state:
         | 
| 366 | 
            +
                st.session_state['gemini_answer'] = None
         | 
| 367 | 
            +
            if 'parsed_responses' not in st.session_state:
         | 
| 368 | 
            +
                st.session_state['parsed_responses'] = None
         | 
| 369 | 
            +
            if 'json_format' not in st.session_state:
         | 
| 370 | 
            +
                st.session_state['json_format'] = None
         | 
| 371 | 
            +
            if 'api_key_valid' not in st.session_state:
         | 
| 372 | 
            +
                st.session_state['api_key_valid'] = False
         | 
| 373 | 
            +
            if 'previous_api_key' not in st.session_state:
         | 
| 374 | 
            +
                st.session_state['previous_api_key'] = None
         | 
| 375 | 
            +
             | 
| 376 | 
            +
             | 
| 377 | 
            +
            def load_data(file_path, key='df'):
         | 
| 378 | 
            +
                return pd.read_hdf(file_path, key=key)
         | 
| 379 | 
            +
             | 
| 380 | 
            +
             | 
| 381 | 
            +
            datasett = st.file_uploader("Upload Raw DataFrame", type=["csv", "xlsx"])
         | 
| 382 | 
            +
            if datasett is not None:
         | 
| 383 | 
            +
                try:
         | 
| 384 | 
            +
                    data = pd.read_csv(datasett) if datasett.type == "text/csv" else pd.read_excel(datasett)
         | 
| 385 | 
            +
                    filtered_data = filter_dataframe(data)
         | 
| 386 | 
            +
                    st.session_state['parsed_responses'] = filtered_data
         | 
| 387 | 
            +
                    st.dataframe(filtered_data)
         | 
| 388 | 
            +
                except Exception as e:
         | 
| 389 | 
            +
                    st.error(f"An error occurred while reading the file: {e}")
         | 
| 390 | 
            +
             | 
| 391 | 
            +
                col1, col2 = st.columns(2)
         | 
| 392 | 
            +
                with col1:
         | 
| 393 | 
            +
                    columns_to_query = st.multiselect(
         | 
| 394 | 
            +
                    label='Select columns to analyze',
         | 
| 395 | 
            +
                    options=st.session_state['parsed_responses'].columns)
         | 
| 396 | 
            +
                with col2:
         | 
| 397 | 
            +
                    COHERE_KEY = st.text_input('Cohere APIs Key', '', type='password', help="Enter your Cohere API key")
         | 
| 398 | 
            +
                
         | 
| 399 | 
            +
                question = st.text_input("Ask a question")
         | 
| 400 | 
            +
             | 
| 401 | 
            +
                if st.session_state['parsed_responses'] is not None and question and COHERE_KEY:
         | 
| 402 | 
            +
                    co = cohere.Client(api_key = COHERE_KEY)
         | 
| 403 | 
            +
                    documents = st.session_state['parsed_responses'][columns_to_query].to_dict('records')
         | 
| 404 | 
            +
                    json_documents = [json.dumps(doc) for doc in documents]
         | 
| 405 | 
            +
                    try:
         | 
| 406 | 
            +
                        results = co.rerank(
         | 
| 407 | 
            +
                            model="rerank-english-v3.0",
         | 
| 408 | 
            +
                            query=question,
         | 
| 409 | 
            +
                            documents=json_documents,
         | 
| 410 | 
            +
                            top_n=5,
         | 
| 411 | 
            +
                            return_documents=True
         | 
| 412 | 
            +
                        )
         | 
| 413 | 
            +
                        
         | 
| 414 | 
            +
                        st.subheader("Reranked Results:")
         | 
| 415 | 
            +
                        # Create a new dataframe with reranked results
         | 
| 416 | 
            +
                        reranked_indices = [result.index for result in results.results]
         | 
| 417 | 
            +
                        reranked_scores = [result.relevance_score for result in results.results]
         | 
| 418 | 
            +
                        
         | 
| 419 | 
            +
                        reranked_df = st.session_state['parsed_responses'].iloc[reranked_indices].copy()
         | 
| 420 | 
            +
                        reranked_df['relevance_score'] = reranked_scores
         | 
| 421 | 
            +
                        reranked_df['rank'] = range(1, len(reranked_indices) + 1)
         | 
| 422 | 
            +
                        
         | 
| 423 | 
            +
                        # Set the new index to be the rank
         | 
| 424 | 
            +
                        reranked_df.set_index('rank', inplace=True)
         | 
| 425 | 
            +
                        
         | 
| 426 | 
            +
                        # Display the reranked dataframe
         | 
| 427 | 
            +
                        st.dataframe(reranked_df)
         | 
| 428 | 
            +
             | 
| 429 | 
            +
                        # markdown format
         | 
| 430 | 
            +
                        #for idx, result in enumerate(results.results, 1):
         | 
| 431 | 
            +
                        #    st.write(f"Result {idx}:")
         | 
| 432 | 
            +
                        #    st.write(f"Index: {result.index}")
         | 
| 433 | 
            +
                        #    st.write(f"Relevance Score: {result.relevance_score}")
         | 
| 434 | 
            +
                        #    st.write(f"Document: {json.loads(json_documents[result.index])}")
         | 
| 435 | 
            +
                        #    st.write("---")
         | 
| 436 | 
            +
                            
         | 
| 437 | 
            +
                    except Exception as e:
         | 
| 438 | 
            +
                        st.error(f"An error occurred during reranking: {e}")
         | 
    	
        secret_bases.csv
    ADDED
    
    | @@ -0,0 +1,146 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ,id,name,coordinates,latitude,longitude,icon
         | 
| 2 | 
            +
            0,10172,"633rd Air Base Wing, Joint Base LangleyEustis, VA","37.082, -76.360",37.082, -76.360,draw-shape
         | 
| 3 | 
            +
            1,10173,"Air Force Materiel Command, Wright Patterson AFB, Ohio","39.826, -84.048",39.826, -84.048,draw-shape
         | 
| 4 | 
            +
            2,10016,Air Force Office of Special Investigations,"38.871, -77.056",38.871, -77.056,draw-shape
         | 
| 5 | 
            +
            3,10017,Air Force Plant 42,"34.637, -118.084",34.637, -118.084,draw-shape
         | 
| 6 | 
            +
            4,10018,Air Force Weapons Laboratory Kirtland,"35.049, -106.609",35.049, -106.609,draw-shape
         | 
| 7 | 
            +
            5,10019,Area 51,"37.234, -115.806",37.234, -115.806,draw-shape
         | 
| 8 | 
            +
            6,10020,Area S4,"37.020, -115.787",37.020, -115.787,draw-shape
         | 
| 9 | 
            +
            7,10021,AT&T,"32.779, -96.808",32.779, -96.808,draw-shape
         | 
| 10 | 
            +
            8,10022,Avon Park Air Force Range,"27.647, -81.342",27.647, -81.342,draw-shape
         | 
| 11 | 
            +
            9,10023,"Azores Islands, Portugal, Lajes Field USAF","38.761, -27.091",38.761, -27.091,draw-shape
         | 
| 12 | 
            +
            10,10024,Barksdale AFB,"32.501, -93.663",32.501, -93.663,draw-shape
         | 
| 13 | 
            +
            11,10025,Battell Memorial Institute,"44.479, -73.196",44.479, -73.196,draw-shape
         | 
| 14 | 
            +
            12,10026,Beale Air Force Base,"39.136, -121.436",39.136, -121.436,draw-shape
         | 
| 15 | 
            +
            13,10027,Bechtel Corp.,"37.789, -122.396",37.789, -122.396,draw-shape
         | 
| 16 | 
            +
            14,10028,Bell Labs,"40.684, -74.401",40.684, -74.401,draw-shape
         | 
| 17 | 
            +
            15,10029,Berkeley University,"37.872, -122.259",37.872, -122.259,draw-shape
         | 
| 18 | 
            +
            16,10030,Blackjack Control,"36.234, -116.806",36.234, -116.806,draw-shape
         | 
| 19 | 
            +
            17,10031,Boeing Phantom Works,"38.676, -90.444",38.676, -90.444,draw-shape
         | 
| 20 | 
            +
            18,10032,"Booz-Allen and Hamilton, Inc.","38.924, -77.226",38.924, -77.226,draw-shape
         | 
| 21 | 
            +
            19,10033,Brooks AFB,"29.384, -98.581",29.384, -98.581,draw-shape
         | 
| 22 | 
            +
            20,10034,Buckley Space Force Base,"39.702, -104.751",39.702, -104.751,draw-shape
         | 
| 23 | 
            +
            21,10035,"C Martin Co – Gov't contractor at Dugway Proving Grounds, UT","40.197, -112.936",40.197, -112.936,draw-shape
         | 
| 24 | 
            +
            22,10036,Camp Peary,"37.341, -76.640",37.341, -76.640,draw-shape
         | 
| 25 | 
            +
            23,10037,Carswell Air Force Base,"32.769, -97.442",32.769, -97.442,draw-shape
         | 
| 26 | 
            +
            24,10038,China Lake Naval Air Weapons Station,"35.650, -117.669",35.650, -117.669,draw-shape
         | 
| 27 | 
            +
            25,10039,CIA Headquarters,"38.952, -77.146",38.952, -77.146,draw-shape
         | 
| 28 | 
            +
            26,10040,"CIA/160th operating under the NSC near Nashville, TN","36.167, -86.778",36.167, -86.778,draw-shape
         | 
| 29 | 
            +
            27,10041,Council on Foreign Relations,"40.769, -73.968",40.769, -73.968,draw-shape
         | 
| 30 | 
            +
            28,10042,Coyote Canyon Test Site,"35.049, -106.609",35.049, -106.609,draw-shape
         | 
| 31 | 
            +
            29,10043,"Crane, Indiana","38.890, -86.842",38.890, -86.842,draw-shape
         | 
| 32 | 
            +
            30,10044,DARPA,"38.883, -77.092",38.883, -77.092,draw-shape
         | 
| 33 | 
            +
            31,10045,DIA,"38.871, -77.056",38.871, -77.056,draw-shape
         | 
| 34 | 
            +
            32,10046,"Dugway Proving Grounds outside Provo, UT","40.197, -112.936",40.197, -112.936,draw-shape
         | 
| 35 | 
            +
            33,10047,Dulce NM,"36.940, -107.000",36.940, -107.000,draw-shape
         | 
| 36 | 
            +
            34,10048,E-Systems,"32.903, -96.461",32.903, -96.461,draw-shape
         | 
| 37 | 
            +
            35,10049,Eagle's Nest aka Air Force T.O.C. and The Dragon's Den,"38.871, -77.056",38.871, -77.056,draw-shape
         | 
| 38 | 
            +
            36,10050,Edwards North Base Complex,"34.957, -117.884",34.957, -117.884,draw-shape
         | 
| 39 | 
            +
            37,10051,EG&G,"42.380, -71.264",42.380, -71.264,draw-shape
         | 
| 40 | 
            +
            38,10052,EG&G Terminal Building,"36.080, -115.152",36.080, -115.152,draw-shape
         | 
| 41 | 
            +
            39,10053,Eglin Air Force Base,"30.462, -86.559",30.462, -86.559,draw-shape
         | 
| 42 | 
            +
            40,10054,FBI Headquarters,"38.895, -77.025",38.895, -77.025,draw-shape
         | 
| 43 | 
            +
            41,10055,Ford,"42.314, -83.209",42.314, -83.209,draw-shape
         | 
| 44 | 
            +
            42,10176,"Ford Island, Hawaii","21.364, -157.961",21.364, -157.961,draw-shape
         | 
| 45 | 
            +
            43,10056,Fort Benning,"32.357, -84.958",32.357, -84.958,draw-shape
         | 
| 46 | 
            +
            44,10057,"Fort Bragg, NC","35.141, -79.008",35.141, -79.008,draw-shape
         | 
| 47 | 
            +
            45,10058,Fort Chaffee Maneuver Training Center,"35.289, -94.296",35.289, -94.296,draw-shape
         | 
| 48 | 
            +
            46,10059,Fort Hood Army Base,"31.132, -97.781",31.132, -97.781,draw-shape
         | 
| 49 | 
            +
            47,10060,Fort Huachuca,"31.556, -110.346",31.556, -110.346,draw-shape
         | 
| 50 | 
            +
            48,10061,Fort Irwin/Raytheon ARV,"35.263, -116.687",35.263, -116.687,draw-shape
         | 
| 51 | 
            +
            49,10062,"Fort Jackson, South Carolina Army base","34.021, -80.897",34.021, -80.897,draw-shape
         | 
| 52 | 
            +
            50,10063,Fort Knox,"37.892, -85.964",37.892, -85.964,draw-shape
         | 
| 53 | 
            +
            51,10064,Fort Polk,"31.046, -93.208",31.046, -93.208,draw-shape
         | 
| 54 | 
            +
            52,10065,"Fort Sill, near Lawton, OK","34.665, -98.402",34.665, -98.402,draw-shape
         | 
| 55 | 
            +
            53,10165,Ft. Bliss,"31.812, -106.422",31.812, -106.422,draw-shape
         | 
| 56 | 
            +
            54,10066,Ft. Monmouth,"40.312, -74.045",40.312, -74.045,draw-shape
         | 
| 57 | 
            +
            55,10067,General Motors,"42.331, -83.046",42.331, -83.046,draw-shape
         | 
| 58 | 
            +
            56,10068,George AFB,"34.597, -117.384",34.597, -117.384,draw-shape
         | 
| 59 | 
            +
            57,10069,Goddard Spaceflight Center,"38.998, -76.852",38.998, -76.852,draw-shape
         | 
| 60 | 
            +
            58,10070,Grand Forks AFB,"47.961, -97.401",47.961, -97.401,draw-shape
         | 
| 61 | 
            +
            59,10071,Guggenheim Foundation Laboratory,"40.779, -73.960",40.779, -73.960,draw-shape
         | 
| 62 | 
            +
            60,10072,Hanger One Moffett Field,"37.416, -122.049",37.416, -122.049,draw-shape
         | 
| 63 | 
            +
            61,10073,Hanscom AFB,"42.469, -71.289",42.469, -71.289,draw-shape
         | 
| 64 | 
            +
            62,10074,Haystack Butte,"47.032, -111.956",47.032, -111.956,draw-shape
         | 
| 65 | 
            +
            63,10075,HITT Construction,"38.852, -77.322",38.852, -77.322,draw-shape
         | 
| 66 | 
            +
            64,10076,Holloman AFB,"32.852, -106.106",32.852, -106.106,draw-shape
         | 
| 67 | 
            +
            65,10077,Homestead Air Force Base,"25.488, -80.383",25.488, -80.383,draw-shape
         | 
| 68 | 
            +
            66,10078,Houma AFB,"29.567, -90.736",29.567, -90.736,draw-shape
         | 
| 69 | 
            +
            67,10079,Hughes Aircraft Company,"33.932, -118.379",33.932, -118.379,draw-shape
         | 
| 70 | 
            +
            68,10080,Hunter Liggett Military Reservation,"35.975, -121.229",35.975, -121.229,draw-shape
         | 
| 71 | 
            +
            69,10081,"Irvine, CA","33.684, -117.827",33.684, -117.827,draw-shape
         | 
| 72 | 
            +
            70,10082,ITT,"40.745, -73.977",40.745, -73.977,draw-shape
         | 
| 73 | 
            +
            71,10083,"James F Hanley Federal Bldg, Syracuse, NY","43.051, -76.150",43.051, -76.150,draw-shape
         | 
| 74 | 
            +
            72,10084,Jason Society,"38.895, -77.036",38.895, -77.036,draw-shape
         | 
| 75 | 
            +
            73,10085,John Hopkins Hospital,"39.296, -76.592",39.296, -76.592,draw-shape
         | 
| 76 | 
            +
            74,10086,Kelly AFB,"29.383, -98.582",29.383, -98.582,draw-shape
         | 
| 77 | 
            +
            75,10087,Kirtland Air Force Base,"35.049, -106.609",35.049, -106.609,draw-shape
         | 
| 78 | 
            +
            76,10088,Langley Air Force Base,"37.082, -76.360",37.082, -76.360,draw-shape
         | 
| 79 | 
            +
            77,10089,Lawrence Livermore Labs,"37.688, -121.706",37.688, -121.706,draw-shape
         | 
| 80 | 
            +
            78,10090,"Lewis McChord AFB, McChord, WA","47.137, -122.487",47.137, -122.487,draw-shape
         | 
| 81 | 
            +
            79,10091,Lockheed Martin,"39.595, -105.071",39.595, -105.071,draw-shape
         | 
| 82 | 
            +
            80,10093,Lockheed Martin Skunk Works,"34.637, -118.084",34.637, -118.084,draw-shape
         | 
| 83 | 
            +
            81,10092,Lockheed-Martin Helendale Plant,"34.742, -117.319",34.742, -117.319,draw-shape
         | 
| 84 | 
            +
            82,10094,Lookout Mountain Air Force Station,"34.109, -118.386",34.109, -118.386,draw-shape
         | 
| 85 | 
            +
            83,10095,Los Alamos National Labs,"35.838, -106.314",35.838, -106.314,draw-shape
         | 
| 86 | 
            +
            84,10096,"Lucerne, Switzerland secret, underground facility beneath CERN","46.234, 6.055",46.234, 6.055,draw-shape
         | 
| 87 | 
            +
            85,10097,Luke AFB,"33.535, -112.383",33.535, -112.383,draw-shape
         | 
| 88 | 
            +
            86,10098,MacDill Air Force Base,"27.849, -82.521",27.849, -82.521,draw-shape
         | 
| 89 | 
            +
            87,10099,Manzano Mountain Weapons Storage Facility,"34.998, -106.475",34.998, -106.475,draw-shape
         | 
| 90 | 
            +
            88,10100,Marshal Space Flight Center,"34.662, -86.672",34.662, -86.672,draw-shape
         | 
| 91 | 
            +
            89,10101,Masonic Temple,"38.895, -77.036",38.895, -77.036,draw-shape
         | 
| 92 | 
            +
            90,10102,Maxwell AFB,"32.380, -86.365",32.380, -86.365,draw-shape
         | 
| 93 | 
            +
            91,10113,Nellis AFB,"36.2, -115.0",36.2, -115.0,draw-shape
         | 
| 94 | 
            +
            92,10114,Nevada Test Site,"37.1, -116.1",37.1, -116.1,draw-shape
         | 
| 95 | 
            +
            93,10115,NORAD Cheyenne Mountain,"38.7, -104.8",38.7, -104.8,draw-shape
         | 
| 96 | 
            +
            94,10117,"Northrop ""Anthill""","34.8, -118.9",34.8, -118.9,draw-shape
         | 
| 97 | 
            +
            95,10119,Norton Air Force Base,"34.1, -117.2",34.1, -117.2,draw-shape
         | 
| 98 | 
            +
            96,10122,Oak Ridge National Laboratory,"35.9, -84.3",35.9, -84.3,draw-shape
         | 
| 99 | 
            +
            97,10123,Offutt AFB,"41.1, -95.9",41.1, -95.9,draw-shape
         | 
| 100 | 
            +
            98,10127,Pease Air National Guard Base,"43.1, -70.8",43.1, -70.8,draw-shape
         | 
| 101 | 
            +
            99,10128,Pentagon,"38.9, -77.1",38.9, -77.1,draw-shape
         | 
| 102 | 
            +
            100,10132,Pueblo Army Depot,"38.3, -104.3",38.3, -104.3,draw-shape
         | 
| 103 | 
            +
            101,10134,Red Stone Arsenal,"34.6, -86.6",34.6, -86.6,draw-shape
         | 
| 104 | 
            +
            102,10136,Rickenbacker Air National Guard Base,"39.8, -82.9",39.8, -82.9,draw-shape
         | 
| 105 | 
            +
            103,10140,Scott AFB,"38.5, -89.8",38.5, -89.8,draw-shape
         | 
| 106 | 
            +
            104,10144,Seymour-Johnson Air Force Base,"35.3, -77.9",35.3, -77.9,draw-shape
         | 
| 107 | 
            +
            105,10152,Tinker AFB,"35.4, -97.4",35.4, -97.4,draw-shape
         | 
| 108 | 
            +
            106,10154,Travis Air Force Base,"38.3, -121.9",38.3, -121.9,draw-shape
         | 
| 109 | 
            +
            107,10158,Walter Reed Hospital,"38.9, -77.0",38.9, -77.0,draw-shape
         | 
| 110 | 
            +
            108,10160,Wright Patterson Air Force Base,"39.8, -84.0",39.8, -84.0,draw-shape
         | 
| 111 | 
            +
            109,10130,Pine Gap,"-23.8, 133.7",-23.8, 133.7,draw-shape
         | 
| 112 | 
            +
            110,10143,"Seoul, Korea - Secret mountain facility","37.5, 127.0",37.5, 127.0,draw-shape
         | 
| 113 | 
            +
            111,10125,"Padang, Indonesia","-0.9, 100.4",-0.9, 100.4,draw-shape
         | 
| 114 | 
            +
            112,10171,Peasemore,"51.5, -1.3",51.5, -1.3,draw-shape
         | 
| 115 | 
            +
            113,10102,Maxwell AFB,"32.4, -86.4",32.4, -86.4,draw-shape
         | 
| 116 | 
            +
            114,10103,McClellan Air Force Base,"38.7, -121.4",38.7, -121.4,draw-shape
         | 
| 117 | 
            +
            115,10104,McDonald Douglas Llano Plant,"34.5, -117.8",34.5, -117.8,draw-shape
         | 
| 118 | 
            +
            116,10105,Miramar Naval Base,"32.9, -117.1",32.9, -117.1,draw-shape
         | 
| 119 | 
            +
            117,10106,MIT,"42.4, -71.1",42.4, -71.1,draw-shape
         | 
| 120 | 
            +
            118,10169,Mount Hough,"39.9, -120.9",39.9, -120.9,draw-shape
         | 
| 121 | 
            +
            119,10109,NASA Ames Research Center,"37.4, -122.1",37.4, -122.1,draw-shape
         | 
| 122 | 
            +
            120,10110,NASA Johnson Space Center,"29.6, -95.1",29.6, -95.1,draw-shape
         | 
| 123 | 
            +
            121,10111,Naval Air Station,"44.9, -66.9",44.9, -66.9,draw-shape
         | 
| 124 | 
            +
            122,10112,Naval Station Great Lakes,"42.3, -87.8",42.3, -87.8,draw-shape
         | 
| 125 | 
            +
            123,10113,Nellis AFB,"36.2, -115.0",36.2, -115.0,draw-shape
         | 
| 126 | 
            +
            124,10114,Nevada Test Site,"37.1, -116.1",37.1, -116.1,draw-shape
         | 
| 127 | 
            +
            125,10115,NORAD Cheyenne Mountain,"38.7, -104.8",38.7, -104.8,draw-shape
         | 
| 128 | 
            +
            126,10117,"Northrop ""Anthill""","34.8, -118.9",34.8, -118.9,draw-shape
         | 
| 129 | 
            +
            127,10119,Norton Air Force Base,"34.1, -117.2",34.1, -117.2,draw-shape
         | 
| 130 | 
            +
            128,10122,Oak Ridge National Laboratory,"35.9, -84.3",35.9, -84.3,draw-shape
         | 
| 131 | 
            +
            129,10123,Offutt AFB,"41.1, -95.9",41.1, -95.9,draw-shape
         | 
| 132 | 
            +
            130,10127,Pease Air National Guard Base,"43.1, -70.8",43.1, -70.8,draw-shape
         | 
| 133 | 
            +
            131,10128,Pentagon,"38.9, -77.1",38.9, -77.1,draw-shape
         | 
| 134 | 
            +
            132,10132,Pueblo Army Depot,"38.3, -104.3",38.3, -104.3,draw-shape
         | 
| 135 | 
            +
            133,10134,Red Stone Arsenal,"34.6, -86.6",34.6, -86.6,draw-shape
         | 
| 136 | 
            +
            134,10136,Rickenbacker Air National Guard Base,"39.8, -82.9",39.8, -82.9,draw-shape
         | 
| 137 | 
            +
            135,10140,Scott AFB,"38.5, -89.8",38.5, -89.8,draw-shape
         | 
| 138 | 
            +
            136,10144,Seymour-Johnson Air Force Base,"35.3, -77.9",35.3, -77.9,draw-shape
         | 
| 139 | 
            +
            137,10152,Tinker AFB,"35.4, -97.4",35.4, -97.4,draw-shape
         | 
| 140 | 
            +
            138,10154,Travis Air Force Base,"38.3, -121.9",38.3, -121.9,draw-shape
         | 
| 141 | 
            +
            139,10158,Walter Reed Hospital,"38.9, -77.0",38.9, -77.0,draw-shape
         | 
| 142 | 
            +
            140,10160,Wright Patterson Air Force Base,"39.8, -84.0",39.8, -84.0,draw-shape
         | 
| 143 | 
            +
            141,10130,Pine Gap,"-23.8, 133.7",-23.8, 133.7,draw-shape
         | 
| 144 | 
            +
            142,10143,"Seoul, Korea - Secret mountain facility","37.5, 127.0",37.5, 127.0,draw-shape
         | 
| 145 | 
            +
            143,10125,"Padang, Indonesia","-0.9, 100.4",-0.9, 100.4,draw-shape
         | 
| 146 | 
            +
            144,10171,Peasemore,"51.5, -1.3",51.5, -1.3,draw-shape
         | 
    	
        uap_analyzer.py
    ADDED
    
    | @@ -0,0 +1,1010 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import pandas as pd
         | 
| 2 | 
            +
            import numpy as np
         | 
| 3 | 
            +
            from sklearn.decomposition import PCA
         | 
| 4 | 
            +
            from sklearn.cluster import KMeans
         | 
| 5 | 
            +
            from cuml.manifold import umap
         | 
| 6 | 
            +
            from cuml.cluster import hdbscan
         | 
| 7 | 
            +
            import plotly.graph_objects as go
         | 
| 8 | 
            +
            from sentence_transformers import SentenceTransformer
         | 
| 9 | 
            +
            import torch
         | 
| 10 | 
            +
            with torch.no_grad():
         | 
| 11 | 
            +
                embed_model = SentenceTransformer('embaas/sentence-transformers-e5-large-v2')
         | 
| 12 | 
            +
                embed_model.to('cuda')
         | 
| 13 | 
            +
            from sentence_transformers.util import pytorch_cos_sim, pairwise_cos_sim
         | 
| 14 | 
            +
            #from stqdm.notebook import stqdm
         | 
| 15 | 
            +
            #stqdm.pandas()
         | 
| 16 | 
            +
            import logging
         | 
| 17 | 
            +
            import pandas as pd
         | 
| 18 | 
            +
            import numpy as np
         | 
| 19 | 
            +
            from sklearn.decomposition import PCA
         | 
| 20 | 
            +
            from sklearn.cluster import KMeans
         | 
| 21 | 
            +
            import plotly.graph_objects as go
         | 
| 22 | 
            +
            import plotly.express as px
         | 
| 23 | 
            +
            from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
         | 
| 24 | 
            +
            import numpy as np
         | 
| 25 | 
            +
            from Levenshtein import distance
         | 
| 26 | 
            +
            import logging
         | 
| 27 | 
            +
            from sklearn.metrics import confusion_matrix
         | 
| 28 | 
            +
            import seaborn as sns
         | 
| 29 | 
            +
            import matplotlib.pyplot as plt
         | 
| 30 | 
            +
            import xgboost as xgb
         | 
| 31 | 
            +
            from xgboost import plot_importance
         | 
| 32 | 
            +
            import matplotlib.pyplot as plt
         | 
| 33 | 
            +
            from sklearn.metrics import accuracy_score, confusion_matrix
         | 
| 34 | 
            +
            from scipy.stats import chi2_contingency
         | 
| 35 | 
            +
            import matplotlib.pyplot as plt
         | 
| 36 | 
            +
            import seaborn as sns
         | 
| 37 | 
            +
            from statsmodels.graphics.mosaicplot import mosaic
         | 
| 38 | 
            +
            import pickle
         | 
| 39 | 
            +
            import pandas as pd
         | 
| 40 | 
            +
            from sklearn.model_selection import train_test_split
         | 
| 41 | 
            +
            from sklearn.metrics import confusion_matrix
         | 
| 42 | 
            +
            import seaborn as sns
         | 
| 43 | 
            +
            import matplotlib.pyplot as plt
         | 
| 44 | 
            +
            import xgboost as xgb
         | 
| 45 | 
            +
            from xgboost import plot_importance
         | 
| 46 | 
            +
            import matplotlib.pyplot as plt
         | 
| 47 | 
            +
            from sklearn.metrics import accuracy_score, confusion_matrix
         | 
| 48 | 
            +
            from scipy.stats import chi2_contingency
         | 
| 49 | 
            +
            import matplotlib.pyplot as plt
         | 
| 50 | 
            +
            import seaborn as sns
         | 
| 51 | 
            +
            from statsmodels.graphics.mosaicplot import mosaic
         | 
| 52 | 
            +
            from statsmodels.api import stats
         | 
| 53 | 
            +
            import os
         | 
| 54 | 
            +
            import time
         | 
| 55 | 
            +
            import concurrent.futures
         | 
| 56 | 
            +
            from requests.exceptions import HTTPError
         | 
| 57 | 
            +
            from stqdm import stqdm
         | 
| 58 | 
            +
            stqdm.pandas()
         | 
| 59 | 
            +
            import json
         | 
| 60 | 
            +
            import pandas as pd
         | 
| 61 | 
            +
            from openai import OpenAI
         | 
| 62 | 
            +
            import numpy as np
         | 
| 63 | 
            +
            import matplotlib.pyplot as plt
         | 
| 64 | 
            +
            import squarify
         | 
| 65 | 
            +
            import matplotlib.colors as mcolors
         | 
| 66 | 
            +
            import textwrap
         | 
| 67 | 
            +
            import pandas as pd
         | 
| 68 | 
            +
            import streamlit as st
         | 
| 69 | 
            +
            st.set_option('deprecation.showPyplotGlobalUse', False)
         | 
| 70 | 
            +
             | 
| 71 | 
            +
             | 
| 72 | 
            +
            # Configure logging
         | 
| 73 | 
            +
            logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
         | 
| 74 | 
            +
             | 
| 75 | 
            +
            class UAPAnalyzer:
         | 
| 76 | 
            +
                """
         | 
| 77 | 
            +
                A class for analyzing and clustering textual data within a pandas DataFrame using 
         | 
| 78 | 
            +
                Natural Language Processing (NLP) techniques and machine learning models.
         | 
| 79 | 
            +
                
         | 
| 80 | 
            +
                Attributes:
         | 
| 81 | 
            +
                    data (pd.DataFrame): The dataset containing textual data for analysis.
         | 
| 82 | 
            +
                    column (str): The name of the column in the DataFrame to be analyzed.
         | 
| 83 | 
            +
                    embeddings (np.ndarray): The vector representations of textual data.
         | 
| 84 | 
            +
                    reduced_embeddings (np.ndarray): The dimensionality-reduced embeddings.
         | 
| 85 | 
            +
                    cluster_labels (np.ndarray): The labels assigned to each data point after clustering.
         | 
| 86 | 
            +
                    cluster_terms (list): The list of terms associated with each cluster.
         | 
| 87 | 
            +
                    tfidf_matrix (sparse matrix): The Term Frequency-Inverse Document Frequency (TF-IDF) matrix.
         | 
| 88 | 
            +
                    models (dict): A dictionary to store trained machine learning models.
         | 
| 89 | 
            +
                    evaluations (dict): A dictionary to store evaluation results of models.
         | 
| 90 | 
            +
                    data_nums (pd.DataFrame): The DataFrame with numerical encoding of categorical data.
         | 
| 91 | 
            +
                """
         | 
| 92 | 
            +
             | 
| 93 | 
            +
                def __init__(self, data, column, has_embeddings=False):
         | 
| 94 | 
            +
                    """
         | 
| 95 | 
            +
                    Initializes the UAPAnalyzer with a dataset and a specified column for analysis.
         | 
| 96 | 
            +
                    
         | 
| 97 | 
            +
                    Args:
         | 
| 98 | 
            +
                        data (pd.DataFrame): The dataset for analysis.
         | 
| 99 | 
            +
                        column (str): The column within the dataset to analyze.
         | 
| 100 | 
            +
                    """
         | 
| 101 | 
            +
                    assert isinstance(data, pd.DataFrame), "Data must be a pandas DataFrame"
         | 
| 102 | 
            +
                    assert column in data.columns, f"Column '{column}' not found in DataFrame"
         | 
| 103 | 
            +
                    self.has_embeddings = has_embeddings        
         | 
| 104 | 
            +
                    self.data = data
         | 
| 105 | 
            +
                    self.column = column
         | 
| 106 | 
            +
                    self.embeddings = None
         | 
| 107 | 
            +
                    self.reduced_embeddings = None
         | 
| 108 | 
            +
                    self.cluster_labels = None
         | 
| 109 | 
            +
                    self.cluster_names = None
         | 
| 110 | 
            +
                    self.cluster_terms = None 
         | 
| 111 | 
            +
                    self.cluster_terms_embeddings = None
         | 
| 112 | 
            +
                    self.tfidf_matrix = None
         | 
| 113 | 
            +
                    self.models = {}  # To store trained models
         | 
| 114 | 
            +
                    self.evaluations = {}  # To store evaluation results
         | 
| 115 | 
            +
                    self.data_nums = None  # Encoded numerical data
         | 
| 116 | 
            +
                    self.x_train = None
         | 
| 117 | 
            +
                    self.y_train = None
         | 
| 118 | 
            +
                    self.x_test = None
         | 
| 119 | 
            +
                    self.y_test = None
         | 
| 120 | 
            +
                    self.preds = None
         | 
| 121 | 
            +
                    self.new_dataset = None
         | 
| 122 | 
            +
                    self.model = embed_model
         | 
| 123 | 
            +
                    self.model = self.model.to('cuda')
         | 
| 124 | 
            +
                    #self.cluster_names_ = pd.DataFrame()
         | 
| 125 | 
            +
             | 
| 126 | 
            +
                    logging.info("UAPAnalyzer initialized")
         | 
| 127 | 
            +
             | 
| 128 | 
            +
                def preprocess_data(self, trim=False, has_embeddings=False, top_n=32,):
         | 
| 129 | 
            +
                    """
         | 
| 130 | 
            +
                    Preprocesses the data by optionally trimming the dataset to include only the top N labels and extracting embeddings.
         | 
| 131 | 
            +
                    
         | 
| 132 | 
            +
                    Args:
         | 
| 133 | 
            +
                        trim (bool): Whether to trim the dataset to include only the top N labels.
         | 
| 134 | 
            +
                        top_n (int): The number of top labels to retain if trimming is enabled.
         | 
| 135 | 
            +
                    """
         | 
| 136 | 
            +
                    logging.info("Preprocessing data")
         | 
| 137 | 
            +
             | 
| 138 | 
            +
                    # if trim is True
         | 
| 139 | 
            +
                    if trim:
         | 
| 140 | 
            +
                        # Identify the top labels based on value counts
         | 
| 141 | 
            +
                        top_labels = self.data[self.column].value_counts().nlargest(top_n).index.tolist()
         | 
| 142 | 
            +
                        # Revise the column data, setting values to 'Other' if they are not in the top labels
         | 
| 143 | 
            +
                        self.data[f'{self.column}_revised'] = np.where(self.data[self.column].isin(top_labels), self.data[self.column], 'Other')
         | 
| 144 | 
            +
                    # Convert the column data to string type before passing to _extract_embeddings
         | 
| 145 | 
            +
                    # This is useful especially if the data type of the column is not originally string
         | 
| 146 | 
            +
                    string_data = self.data[f'{self.column}'].astype(str)
         | 
| 147 | 
            +
                    # Extract embeddings from the revised and string-converted column data
         | 
| 148 | 
            +
                    if has_embeddings:
         | 
| 149 | 
            +
                        self.embeddings = self.data['embeddings'].to_list()
         | 
| 150 | 
            +
                    else:
         | 
| 151 | 
            +
                        self.embeddings = self._extract_embeddings(string_data)
         | 
| 152 | 
            +
                    logging.info("Data preprocessing complete")
         | 
| 153 | 
            +
             | 
| 154 | 
            +
             | 
| 155 | 
            +
                def _extract_embeddings(self, data_column):
         | 
| 156 | 
            +
                    """
         | 
| 157 | 
            +
                    Extracts embeddings from the given data column.
         | 
| 158 | 
            +
                    
         | 
| 159 | 
            +
                    Args:
         | 
| 160 | 
            +
                        data_column (pd.Series): The column from which to extract embeddings.
         | 
| 161 | 
            +
                    
         | 
| 162 | 
            +
                    Returns:
         | 
| 163 | 
            +
                        np.ndarray: The extracted embeddings.
         | 
| 164 | 
            +
                    """
         | 
| 165 | 
            +
                    logging.info("Extracting embeddings")
         | 
| 166 | 
            +
                    # convert to str
         | 
| 167 | 
            +
                    return embed_model.encode(data_column.tolist(), show_progress_bar=True)
         | 
| 168 | 
            +
             | 
| 169 | 
            +
                def reduce_dimensionality(self, method='UMAP', n_components=2, **kwargs):
         | 
| 170 | 
            +
                    """
         | 
| 171 | 
            +
                    Reduces the dimensionality of embeddings using specified method.
         | 
| 172 | 
            +
                    
         | 
| 173 | 
            +
                    Args:
         | 
| 174 | 
            +
                        method (str): The dimensionality reduction method to use ('UMAP' or 'PCA').
         | 
| 175 | 
            +
                        n_components (int): The number of dimensions to reduce to.
         | 
| 176 | 
            +
                        **kwargs: Additional keyword arguments for the dimensionality reduction method.
         | 
| 177 | 
            +
                    """
         | 
| 178 | 
            +
                    logging.info(f"Reducing dimensionality using {method}")
         | 
| 179 | 
            +
                    if method == 'UMAP':
         | 
| 180 | 
            +
                        reducer = umap.UMAP(n_components=n_components, **kwargs)
         | 
| 181 | 
            +
                    elif method == 'PCA':
         | 
| 182 | 
            +
                        reducer = PCA(n_components=n_components)
         | 
| 183 | 
            +
                    else:
         | 
| 184 | 
            +
                        raise ValueError("Unsupported dimensionality reduction method")
         | 
| 185 | 
            +
                    
         | 
| 186 | 
            +
                    self.reduced_embeddings = reducer.fit_transform(self.embeddings)
         | 
| 187 | 
            +
                    logging.info(f"Dimensionality reduced using {method}")
         | 
| 188 | 
            +
             | 
| 189 | 
            +
                def cluster_data(self, method='HDBSCAN', **kwargs):
         | 
| 190 | 
            +
                    """
         | 
| 191 | 
            +
                    Clusters the reduced dimensionality data using the specified clustering method.
         | 
| 192 | 
            +
                    
         | 
| 193 | 
            +
                    Args:
         | 
| 194 | 
            +
                        method (str): The clustering method to use ('HDBSCAN' or 'KMeans').
         | 
| 195 | 
            +
                        **kwargs: Additional keyword arguments for the clustering method.
         | 
| 196 | 
            +
                    """
         | 
| 197 | 
            +
                    logging.info(f"Clustering data using {method}")
         | 
| 198 | 
            +
                    if method == 'HDBSCAN':
         | 
| 199 | 
            +
                        clusterer = hdbscan.HDBSCAN(**kwargs)
         | 
| 200 | 
            +
                    elif method == 'KMeans':
         | 
| 201 | 
            +
                        clusterer = KMeans(**kwargs)
         | 
| 202 | 
            +
                    else:
         | 
| 203 | 
            +
                        raise ValueError("Unsupported clustering method")
         | 
| 204 | 
            +
                    
         | 
| 205 | 
            +
                    clusterer.fit(self.reduced_embeddings)
         | 
| 206 | 
            +
                    self.cluster_labels = clusterer.labels_
         | 
| 207 | 
            +
                    logging.info(f"Data clustering complete using {method}")
         | 
| 208 | 
            +
             | 
| 209 | 
            +
                    
         | 
| 210 | 
            +
                def get_tf_idf_clusters(self, top_n=2):
         | 
| 211 | 
            +
                    """
         | 
| 212 | 
            +
                    Names clusters using the most frequent terms based on TF-IDF analysis.
         | 
| 213 | 
            +
             | 
| 214 | 
            +
                    Args:
         | 
| 215 | 
            +
                        top_n (int): The number of top terms to consider for naming each cluster.
         | 
| 216 | 
            +
                    """
         | 
| 217 | 
            +
                    logging.info("Naming clusters based on top TF-IDF terms.")
         | 
| 218 | 
            +
             | 
| 219 | 
            +
                    # Ensure data has been clustered
         | 
| 220 | 
            +
                    assert self.cluster_labels is not None, "Data has not been clustered yet."
         | 
| 221 | 
            +
                    vectorizer = TfidfVectorizer(max_features=1000, stop_words='english')
         | 
| 222 | 
            +
             | 
| 223 | 
            +
                    # Fit the vectorizer to the text data and transform it into a TF-IDF matrix
         | 
| 224 | 
            +
                    tfidf_matrix = vectorizer.fit_transform(self.data[f'{self.column}'].astype(str))
         | 
| 225 | 
            +
             | 
| 226 | 
            +
                    # Initialize an empty list to store the cluster terms
         | 
| 227 | 
            +
                    self.cluster_terms = []
         | 
| 228 | 
            +
             | 
| 229 | 
            +
                    for cluster_id in np.unique(self.cluster_labels):
         | 
| 230 | 
            +
                        # Skip noise if present (-1 in HDBSCAN)
         | 
| 231 | 
            +
                        if cluster_id == -1:
         | 
| 232 | 
            +
                            continue
         | 
| 233 | 
            +
             | 
| 234 | 
            +
                        # Find indices of documents in the current cluster
         | 
| 235 | 
            +
                        indices = np.where(self.cluster_labels == cluster_id)[0]
         | 
| 236 | 
            +
             | 
| 237 | 
            +
                        # Compute the mean TF-IDF score for each term in the cluster
         | 
| 238 | 
            +
                        cluster_tfidf_mean = np.mean(tfidf_matrix[indices], axis=0)
         | 
| 239 | 
            +
             | 
| 240 | 
            +
                        # Use the matrix directly for indexing if it does not support .toarray()
         | 
| 241 | 
            +
                        # Ensure it's in a format that supports indexing, convert if necessary
         | 
| 242 | 
            +
                        if hasattr(cluster_tfidf_mean, "toarray"):
         | 
| 243 | 
            +
                            dense_mean = cluster_tfidf_mean.toarray().flatten()
         | 
| 244 | 
            +
                        else:
         | 
| 245 | 
            +
                            dense_mean = np.asarray(cluster_tfidf_mean).flatten()
         | 
| 246 | 
            +
             | 
| 247 | 
            +
                        # Get the indices of the top_n terms
         | 
| 248 | 
            +
                        top_n_indices = np.argsort(dense_mean)[-top_n:]
         | 
| 249 | 
            +
             | 
| 250 | 
            +
                        # Get the corresponding terms for these top indices
         | 
| 251 | 
            +
                        terms = vectorizer.get_feature_names_out()
         | 
| 252 | 
            +
                        top_terms = [terms[i] for i in top_n_indices]
         | 
| 253 | 
            +
             | 
| 254 | 
            +
                        # Join the top_n terms with a hyphen
         | 
| 255 | 
            +
                        cluster_name = '-'.join(top_terms)
         | 
| 256 | 
            +
             | 
| 257 | 
            +
                        # Append the cluster name to the list
         | 
| 258 | 
            +
                        self.cluster_terms.append(cluster_name)
         | 
| 259 | 
            +
             | 
| 260 | 
            +
                    # Convert the list of cluster terms to a categorical data type
         | 
| 261 | 
            +
                    self.cluster_terms = pd.Categorical(self.cluster_terms)
         | 
| 262 | 
            +
                    logging.info("Cluster naming completed.")
         | 
| 263 | 
            +
             | 
| 264 | 
            +
                def merge_similar_clusters(self, distance='cosine', char_diff_threshold = 3, similarity_threshold = 0.92, embeddings = 'SBERT'):
         | 
| 265 | 
            +
                    """
         | 
| 266 | 
            +
                    Merges similar clusters based on cosine similarity of their associated terms.
         | 
| 267 | 
            +
                    
         | 
| 268 | 
            +
                    Args:
         | 
| 269 | 
            +
                        similarity_threshold (float): The similarity threshold above which clusters are considered similar enough to merge.
         | 
| 270 | 
            +
                    """
         | 
| 271 | 
            +
                    from collections import defaultdict
         | 
| 272 | 
            +
                    logging.info("Merging similar clusters")
         | 
| 273 | 
            +
             | 
| 274 | 
            +
                    # A mapping from cluster names to a set of cluster names to be merged
         | 
| 275 | 
            +
                    merge_mapping = defaultdict(set)
         | 
| 276 | 
            +
                    merge_labels = defaultdict(set)
         | 
| 277 | 
            +
             | 
| 278 | 
            +
                    if distance == 'levenshtein':
         | 
| 279 | 
            +
                        distances = {}
         | 
| 280 | 
            +
                        for i, name1 in enumerate(self.cluster_terms):
         | 
| 281 | 
            +
                            for j, name2 in enumerate(self.cluster_terms[i + 1:], start=i + 1):
         | 
| 282 | 
            +
                                dist = distance(name1, name2)
         | 
| 283 | 
            +
                                if dist <= char_diff_threshold:
         | 
| 284 | 
            +
                                    logging.info(f"Merging '{name2}' into '{name1}'")
         | 
| 285 | 
            +
                                    merge_mapping[name1].add(name2)
         | 
| 286 | 
            +
             | 
| 287 | 
            +
                    elif distance == 'cosine':
         | 
| 288 | 
            +
                        self.cluster_terms_embeddings = embed_model.encode(self.cluster_terms)
         | 
| 289 | 
            +
                        cos_sim_matrix = pytorch_cos_sim(self.cluster_terms_embeddings, self.cluster_terms_embeddings)
         | 
| 290 | 
            +
                        for i, name1 in enumerate(self.cluster_terms):
         | 
| 291 | 
            +
                            for j, name2 in enumerate(self.cluster_terms[i + 1:], start=i + 1):
         | 
| 292 | 
            +
                                if cos_sim_matrix[i][j] > similarity_threshold:
         | 
| 293 | 
            +
                                    #st.write(f"Merging cluster '{name2}' into cluster '{name1}' based on cosine similarity")
         | 
| 294 | 
            +
                                    logging.info(f"Merging cluster '{name2}' into cluster '{name1}' based on cosine similarity")
         | 
| 295 | 
            +
                                    merge_mapping[name1].add(name2)
         | 
| 296 | 
            +
             | 
| 297 | 
            +
             | 
| 298 | 
            +
                    # Flatten the merge mapping to a simple name change mapping
         | 
| 299 | 
            +
                    name_change_mapping = {}
         | 
| 300 | 
            +
                    for cluster_name, merges in merge_mapping.items():
         | 
| 301 | 
            +
                        for merge_name in merges:
         | 
| 302 | 
            +
                            name_change_mapping[merge_name] = cluster_name
         | 
| 303 | 
            +
             | 
| 304 | 
            +
                    # Update cluster labels based on name changes
         | 
| 305 | 
            +
                    updated_cluster_terms = []
         | 
| 306 | 
            +
                    original_to_updated_index = {}
         | 
| 307 | 
            +
                    for i, name in enumerate(self.cluster_terms):
         | 
| 308 | 
            +
                        updated_name = name_change_mapping.get(name, name)
         | 
| 309 | 
            +
                        if updated_name not in updated_cluster_terms:
         | 
| 310 | 
            +
                            updated_cluster_terms.append(updated_name)
         | 
| 311 | 
            +
                            original_to_updated_index[i] = len(updated_cluster_terms) - 1
         | 
| 312 | 
            +
                        else:
         | 
| 313 | 
            +
                            updated_index = updated_cluster_terms.index(updated_name)
         | 
| 314 | 
            +
                            original_to_updated_index[i] = updated_index
         | 
| 315 | 
            +
             | 
| 316 | 
            +
                    self.cluster_terms = updated_cluster_terms  # Update cluster terms with merged names
         | 
| 317 | 
            +
                    self.clusters_labels = np.array([original_to_updated_index[label] for label in self.cluster_labels])
         | 
| 318 | 
            +
             | 
| 319 | 
            +
             | 
| 320 | 
            +
                    # Update cluster labels according to the new index mapping
         | 
| 321 | 
            +
                    # self.cluster_labels = np.array([original_to_updated_index[label] if label in original_to_updated_index else -1 for label in self.cluster_labels])
         | 
| 322 | 
            +
                    # self.cluster_terms = [self.cluster_terms[original_to_updated_index[label]] if label != -1 else 'Noise' for label in self.cluster_labels]
         | 
| 323 | 
            +
             | 
| 324 | 
            +
                    # Log the total number of merges
         | 
| 325 | 
            +
                    total_merges = sum(len(merges) for merges in merge_mapping.values())
         | 
| 326 | 
            +
                    logging.info(f"Total clusters merged: {total_merges}")
         | 
| 327 | 
            +
             | 
| 328 | 
            +
                    unique_labels = np.unique(self.cluster_labels)
         | 
| 329 | 
            +
                    label_to_index = {label: index for index, label in enumerate(unique_labels)}
         | 
| 330 | 
            +
                    self.cluster_labels = np.array([label_to_index[label] for label in self.cluster_labels])
         | 
| 331 | 
            +
                    self.cluster_terms = [self.cluster_terms[label_to_index[label]] for label in self.cluster_labels]
         | 
| 332 | 
            +
             | 
| 333 | 
            +
                def merge_similar_clusters2(self, distance='cosine', char_diff_threshold=3, similarity_threshold=0.92):
         | 
| 334 | 
            +
                    logging.info("Merging similar clusters based on distance: {}".format(distance))
         | 
| 335 | 
            +
                    from collections import defaultdict
         | 
| 336 | 
            +
                    merge_mapping = defaultdict(set)
         | 
| 337 | 
            +
             | 
| 338 | 
            +
                    if distance == 'levenshtein':
         | 
| 339 | 
            +
                        for i, name1 in enumerate(self.cluster_terms):
         | 
| 340 | 
            +
                            for j, name2 in enumerate(self.cluster_terms[i + 1:], start=i + 1):
         | 
| 341 | 
            +
                                dist = distance(name1, name2)
         | 
| 342 | 
            +
                                if dist <= char_diff_threshold:
         | 
| 343 | 
            +
                                    merge_mapping[name1].add(name2)
         | 
| 344 | 
            +
                                    logging.info(f"Merging '{name2}' into '{name1}' based on Levenshtein distance")
         | 
| 345 | 
            +
             | 
| 346 | 
            +
                    elif distance == 'cosine':
         | 
| 347 | 
            +
                        if self.cluster_terms_embeddings is None:
         | 
| 348 | 
            +
                            self.cluster_terms_embeddings = embed_model.encode(self.cluster_terms)
         | 
| 349 | 
            +
                        cos_sim_matrix = pytorch_cos_sim(self.cluster_terms_embeddings, self.cluster_terms_embeddings)
         | 
| 350 | 
            +
                        for i in range(len(self.cluster_terms)):
         | 
| 351 | 
            +
                            for j in range(i + 1, len(self.cluster_terms)):
         | 
| 352 | 
            +
                                if cos_sim_matrix[i][j] > similarity_threshold:
         | 
| 353 | 
            +
                                    merge_mapping[self.cluster_terms[i]].add(self.cluster_terms[j])
         | 
| 354 | 
            +
                                    #st.write(f"Merging cluster '{self.cluster_terms[j]}' into cluster '{self.cluster_terms[i]}'")
         | 
| 355 | 
            +
                                    logging.info(f"Merging cluster '{self.cluster_terms[j]}' into cluster '{self.cluster_terms[i]}'")
         | 
| 356 | 
            +
             | 
| 357 | 
            +
                    self._update_cluster_terms_and_labels(merge_mapping)
         | 
| 358 | 
            +
             | 
| 359 | 
            +
                def _update_cluster_terms_and_labels(self, merge_mapping):
         | 
| 360 | 
            +
                    # Flatten the merge mapping to a simple name change mapping
         | 
| 361 | 
            +
                    name_change_mapping = {old: new for new, olds in merge_mapping.items() for old in olds}
         | 
| 362 | 
            +
                    # Update cluster terms and labels
         | 
| 363 | 
            +
                    unique_new_terms = list(set(name_change_mapping.values()))
         | 
| 364 | 
            +
                    # replace the old terms with the new terms (name2) otherwise, keep the old terms (name1)
         | 
| 365 | 
            +
                    # self.cluster_terms = [name_change_mapping.get(term, term) for term in self.cluster_terms]
         | 
| 366 | 
            +
                    # self.cluster_labels = np.array([unique_new_terms.index(term) if term in unique_new_terms else term for term in self.cluster_terms])
         | 
| 367 | 
            +
                    self.cluster_terms = [name_change_mapping.get(term, term) for term in self.cluster_terms]
         | 
| 368 | 
            +
                    self.cluster_labels = [unique_new_terms.index(term) if term in unique_new_terms else -1 for term in self.cluster_terms]
         | 
| 369 | 
            +
             | 
| 370 | 
            +
                    logging.info(f"Total clusters merged: {len(merge_mapping)}")
         | 
| 371 | 
            +
             | 
| 372 | 
            +
             | 
| 373 | 
            +
                def cluster_levenshtein(self, cluster_terms, cluster_labels, char_diff_threshold=3):
         | 
| 374 | 
            +
                    from Levenshtein import distance  # Make sure to import the correct distance function
         | 
| 375 | 
            +
             | 
| 376 | 
            +
                    merge_map = {}
         | 
| 377 | 
            +
                    # Iterate over term pairs and decide on merging based on the distance
         | 
| 378 | 
            +
                    for idx, term1 in enumerate(cluster_terms):
         | 
| 379 | 
            +
                        for jdx, term2 in enumerate(cluster_terms):
         | 
| 380 | 
            +
                            if idx < jdx and distance(term1, term2) <= char_diff_threshold:  
         | 
| 381 | 
            +
                                labels_to_merge = [label for label, term_index in enumerate(cluster_labels) if term_index == jdx]
         | 
| 382 | 
            +
                                for label in labels_to_merge:
         | 
| 383 | 
            +
                                    merge_map[label] = idx  # Map the label to use the term index of term1
         | 
| 384 | 
            +
                                logging.info(f"Merging '{term2}' into '{term1}'")
         | 
| 385 | 
            +
                                st.write(f"Merging '{term2}' into '{term1}'")
         | 
| 386 | 
            +
                    # Update the cluster labels
         | 
| 387 | 
            +
                    updated_cluster_labels = [merge_map.get(label, label) for label in cluster_labels]
         | 
| 388 | 
            +
                    # Update string labels to reflect merged labels
         | 
| 389 | 
            +
                    updated_string_labels = [cluster_terms[label] for label in updated_cluster_labels]
         | 
| 390 | 
            +
                    return updated_string_labels
         | 
| 391 | 
            +
             | 
| 392 | 
            +
                def cluster_cosine(self, cluster_terms, cluster_labels, similarity_threshold):
         | 
| 393 | 
            +
                    from sklearn.metrics.pairwise import cosine_similarity
         | 
| 394 | 
            +
                    cluster_terms_embeddings = embed_model.encode(cluster_terms)
         | 
| 395 | 
            +
                    # Compute cosine similarity matrix in a vectorized form
         | 
| 396 | 
            +
                    cos_sim_matrix = cosine_similarity(cluster_terms_embeddings, cluster_terms_embeddings)
         | 
| 397 | 
            +
             | 
| 398 | 
            +
                    merge_map = {}
         | 
| 399 | 
            +
                    n_terms = len(cluster_terms)
         | 
| 400 | 
            +
                    # Iterate only over upper triangular matrix excluding diagonal to avoid redundant computations and self-comparison
         | 
| 401 | 
            +
                    for idx in range(n_terms):
         | 
| 402 | 
            +
                        for jdx in range(idx + 1, n_terms):
         | 
| 403 | 
            +
                            if cos_sim_matrix[idx, jdx] >= similarity_threshold:
         | 
| 404 | 
            +
                                labels_to_merge = [label for label, term_index in enumerate(cluster_labels) if term_index == jdx]
         | 
| 405 | 
            +
                                for label in labels_to_merge:
         | 
| 406 | 
            +
                                    merge_map[label] = idx
         | 
| 407 | 
            +
                                st.write(f"Merging '{cluster_terms[jdx]}' into '{cluster_terms[idx]}'")
         | 
| 408 | 
            +
                                logging.info(f"Merging '{cluster_terms[jdx]}' into '{cluster_terms[idx]}'")
         | 
| 409 | 
            +
                    # Update the cluster labels
         | 
| 410 | 
            +
                    updated_cluster_labels = [merge_map.get(label, label) for label in cluster_labels]
         | 
| 411 | 
            +
                    # Update string labels to reflect merged labels
         | 
| 412 | 
            +
                    updated_string_labels = [cluster_terms[label] for label in updated_cluster_labels]
         | 
| 413 | 
            +
                    # make a dataframe with index, cluster label and cluster term
         | 
| 414 | 
            +
                    return updated_string_labels
         | 
| 415 | 
            +
             | 
| 416 | 
            +
                def merge_similar_clusters(self, cluster_terms, cluster_labels, distance_type='cosine', char_diff_threshold=3, similarity_threshold=0.92):
         | 
| 417 | 
            +
                    if distance_type == 'levenshtein':
         | 
| 418 | 
            +
                        return self.cluster_levenshtein(cluster_terms, cluster_labels, char_diff_threshold)
         | 
| 419 | 
            +
                    elif distance_type == 'cosine':
         | 
| 420 | 
            +
                        return self.cluster_cosine(cluster_terms, cluster_labels, similarity_threshold)
         | 
| 421 | 
            +
             | 
| 422 | 
            +
                def plot_embeddings2(self, title=None):
         | 
| 423 | 
            +
                    assert self.reduced_embeddings is not None, "Dimensionality reduction has not been performed yet."
         | 
| 424 | 
            +
                    assert self.cluster_terms is not None, "Cluster TF-IDF analysis has not been performed yet."
         | 
| 425 | 
            +
             | 
| 426 | 
            +
                    logging.info("Plotting embeddings with TF-IDF colors")
         | 
| 427 | 
            +
             | 
| 428 | 
            +
                    fig = go.Figure()
         | 
| 429 | 
            +
             | 
| 430 | 
            +
                    unique_cluster_terms = np.unique(self.cluster_terms)
         | 
| 431 | 
            +
             | 
| 432 | 
            +
                    for cluster_term in unique_cluster_terms:
         | 
| 433 | 
            +
                        if cluster_term != 'Noise':
         | 
| 434 | 
            +
                            indices = np.where(np.array(self.cluster_terms) == cluster_term)[0]
         | 
| 435 | 
            +
             | 
| 436 | 
            +
                            # Plot points in the current cluster
         | 
| 437 | 
            +
                            fig.add_trace(
         | 
| 438 | 
            +
                                go.Scatter(
         | 
| 439 | 
            +
                                    x=self.reduced_embeddings[indices, 0],
         | 
| 440 | 
            +
                                    y=self.reduced_embeddings[indices, 1],
         | 
| 441 | 
            +
                                    mode='markers',
         | 
| 442 | 
            +
                                    marker=dict(
         | 
| 443 | 
            +
                                        size=5,
         | 
| 444 | 
            +
                                        opacity=0.8,
         | 
| 445 | 
            +
                                    ),
         | 
| 446 | 
            +
                                    name=cluster_term,
         | 
| 447 | 
            +
                                    text=self.data[f'{self.column}'].iloc[indices], 
         | 
| 448 | 
            +
                                    hoverinfo='text',
         | 
| 449 | 
            +
                                )
         | 
| 450 | 
            +
                            )
         | 
| 451 | 
            +
                        else:
         | 
| 452 | 
            +
                            # Plot noise points differently if needed
         | 
| 453 | 
            +
                            fig.add_trace(
         | 
| 454 | 
            +
                                go.Scatter(
         | 
| 455 | 
            +
                                    x=self.reduced_embeddings[indices, 0],
         | 
| 456 | 
            +
                                    y=self.reduced_embeddings[indices, 1],
         | 
| 457 | 
            +
                                    mode='markers',
         | 
| 458 | 
            +
                                    marker=dict(
         | 
| 459 | 
            +
                                        size=5,
         | 
| 460 | 
            +
                                        opacity=0.5,
         | 
| 461 | 
            +
                                        color='grey'
         | 
| 462 | 
            +
                                    ),
         | 
| 463 | 
            +
                                    name='Noise',
         | 
| 464 | 
            +
                                    text=[self.data[f'{self.column}'][i] for i in indices],  # Adjusted for potential pandas use
         | 
| 465 | 
            +
                                    hoverinfo='text',
         | 
| 466 | 
            +
                                )
         | 
| 467 | 
            +
                            )
         | 
| 468 | 
            +
                        # else:
         | 
| 469 | 
            +
                        #     indices = np.where(np.array(self.cluster_terms) == 'Noise')[0]
         | 
| 470 | 
            +
             | 
| 471 | 
            +
                        #     # Plot noise points
         | 
| 472 | 
            +
                        #     fig.add_trace(
         | 
| 473 | 
            +
                        #         go.Scatter(
         | 
| 474 | 
            +
                        #             x=self.reduced_embeddings[indices, 0],
         | 
| 475 | 
            +
                        #             y=self.reduced_embeddings[indices, 1],
         | 
| 476 | 
            +
                        #             mode='markers',
         | 
| 477 | 
            +
                        #             marker=dict(
         | 
| 478 | 
            +
                        #                 size=5,
         | 
| 479 | 
            +
                        #                 opacity=0.8,
         | 
| 480 | 
            +
                        #             ),
         | 
| 481 | 
            +
                        #             name='Noise',
         | 
| 482 | 
            +
                        #             text=self.data[f'{self.column}'].iloc[indices],
         | 
| 483 | 
            +
                        #             hoverinfo='text',
         | 
| 484 | 
            +
                        #         )
         | 
| 485 | 
            +
                        #     )
         | 
| 486 | 
            +
             | 
| 487 | 
            +
                    fig.update_layout(title=title, showlegend=True, legend_title_text='Top TF-IDF Terms')
         | 
| 488 | 
            +
                    #return fig
         | 
| 489 | 
            +
                    st.plotly_chart(fig, use_container_width=True)  
         | 
| 490 | 
            +
                   #fig.show()
         | 
| 491 | 
            +
                    #logging.info("Embeddings plotted with TF-IDF colors")
         | 
| 492 | 
            +
             | 
| 493 | 
            +
                def plot_embeddings3(self, title=None):
         | 
| 494 | 
            +
                    assert self.reduced_embeddings is not None, "Dimensionality reduction has not been performed yet."
         | 
| 495 | 
            +
                    assert self.cluster_terms is not None, "Cluster TF-IDF analysis has not been performed yet."
         | 
| 496 | 
            +
             | 
| 497 | 
            +
                    logging.info("Plotting embeddings with TF-IDF colors")
         | 
| 498 | 
            +
             | 
| 499 | 
            +
                    fig = go.Figure()
         | 
| 500 | 
            +
             | 
| 501 | 
            +
                    unique_cluster_terms = np.unique(self.cluster_terms)
         | 
| 502 | 
            +
             | 
| 503 | 
            +
                    terms_order = {term: i for i, term in enumerate(np.unique(self.cluster_terms, return_index=True)[0])}
         | 
| 504 | 
            +
                    #indices = np.argsort([terms_order[term] for term in self.cluster_terms])
         | 
| 505 | 
            +
             | 
| 506 | 
            +
                    # Handling color assignment, especially for noise
         | 
| 507 | 
            +
                    colors = {term: ('grey' if term == 'Noise' else None) for term in unique_cluster_terms}
         | 
| 508 | 
            +
                    color_map = px.colors.qualitative.Plotly  # Default color map from Plotly Express for consistency
         | 
| 509 | 
            +
             | 
| 510 | 
            +
                    # Apply a custom color map, handling 'Noise' specifically
         | 
| 511 | 
            +
                    color_idx = 0
         | 
| 512 | 
            +
                    for cluster_term in unique_cluster_terms:
         | 
| 513 | 
            +
                        indices = np.where(np.array(self.cluster_terms) == cluster_term)[0]
         | 
| 514 | 
            +
                        if cluster_term != 'Noise':
         | 
| 515 | 
            +
                            marker_color = color_map[color_idx % len(color_map)]
         | 
| 516 | 
            +
                            color_idx += 1
         | 
| 517 | 
            +
                        else:
         | 
| 518 | 
            +
                            marker_color = 'grey'
         | 
| 519 | 
            +
             | 
| 520 | 
            +
                        fig.add_trace(
         | 
| 521 | 
            +
                            go.Scatter(
         | 
| 522 | 
            +
                                x=self.reduced_embeddings[indices, 0],
         | 
| 523 | 
            +
                                y=self.reduced_embeddings[indices, 1],
         | 
| 524 | 
            +
                                mode='markers',
         | 
| 525 | 
            +
                                marker=dict(
         | 
| 526 | 
            +
                                    size=5,
         | 
| 527 | 
            +
                                    opacity=(0.5 if cluster_term == 'Noise' else 0.8),
         | 
| 528 | 
            +
                                    color=marker_color
         | 
| 529 | 
            +
                                ),
         | 
| 530 | 
            +
                                name=cluster_term,
         | 
| 531 | 
            +
                                text=self.data[f'{self.column}'].iloc[indices],
         | 
| 532 | 
            +
                                hoverinfo='text'
         | 
| 533 | 
            +
                            )
         | 
| 534 | 
            +
                        )
         | 
| 535 | 
            +
                    fig.data = sorted(fig.data, key=lambda trace: terms_order[trace.name])
         | 
| 536 | 
            +
                    fig.update_layout(title=title if title else "Embeddings Visualized", showlegend=True, legend_title_text='Top TF-IDF Terms')
         | 
| 537 | 
            +
                    st.plotly_chart(fig, use_container_width=True)
         | 
| 538 | 
            +
             | 
| 539 | 
            +
             | 
| 540 | 
            +
                def plot_embeddings(self, title=None):
         | 
| 541 | 
            +
                    """
         | 
| 542 | 
            +
                    Plots the reduced dimensionality embeddings with clusters indicated.
         | 
| 543 | 
            +
                    
         | 
| 544 | 
            +
                    Args:
         | 
| 545 | 
            +
                        title (str): The title of the plot.
         | 
| 546 | 
            +
                    """
         | 
| 547 | 
            +
                    # Ensure dimensionality reduction and TF-IDF based cluster naming have been performed
         | 
| 548 | 
            +
                    assert self.reduced_embeddings is not None, "Dimensionality reduction has not been performed yet."
         | 
| 549 | 
            +
                    assert self.cluster_terms is not None, "Cluster TF-IDF analysis has not been performed yet."
         | 
| 550 | 
            +
             | 
| 551 | 
            +
                    logging.info("Plotting embeddings with TF-IDF colors")
         | 
| 552 | 
            +
                    
         | 
| 553 | 
            +
                    fig = go.Figure()
         | 
| 554 | 
            +
                    
         | 
| 555 | 
            +
                    #for i, term in enumerate(self.cluster_terms):
         | 
| 556 | 
            +
                        # Indices of points in the current cluster
         | 
| 557 | 
            +
                    #unique_cluster_ids = np.unique(self.cluster_labels[self.cluster_labels != -1])  # Exclude noise
         | 
| 558 | 
            +
                    unique_cluster_terms = np.unique(self.cluster_terms)
         | 
| 559 | 
            +
                    unique_cluster_labels = np.unique(self.cluster_labels)
         | 
| 560 | 
            +
                        
         | 
| 561 | 
            +
                    for i, (cluster_id, cluster_terms) in enumerate(zip(unique_cluster_labels, unique_cluster_terms)):
         | 
| 562 | 
            +
                        indices = np.where(self.cluster_labels == cluster_id)[0]
         | 
| 563 | 
            +
                        #indices = np.where(self.cluster_labels == i)[0]
         | 
| 564 | 
            +
                        
         | 
| 565 | 
            +
                        # Plot points in the current cluster
         | 
| 566 | 
            +
                        fig.add_trace(
         | 
| 567 | 
            +
                            go.Scatter(
         | 
| 568 | 
            +
                                x=self.reduced_embeddings[indices, 0],
         | 
| 569 | 
            +
                                y=self.reduced_embeddings[indices, 1],
         | 
| 570 | 
            +
                                mode='markers',
         | 
| 571 | 
            +
                                marker=dict(
         | 
| 572 | 
            +
                                    #color=i,
         | 
| 573 | 
            +
                                    #colorscale='rainbow',
         | 
| 574 | 
            +
                                    size=5,
         | 
| 575 | 
            +
                                    opacity=0.8,
         | 
| 576 | 
            +
                                ),
         | 
| 577 | 
            +
                                name=cluster_terms,
         | 
| 578 | 
            +
                                text=self.data[f'{self.column}'].iloc[indices],
         | 
| 579 | 
            +
                                hoverinfo='text',
         | 
| 580 | 
            +
                            )
         | 
| 581 | 
            +
                        )
         | 
| 582 | 
            +
                        
         | 
| 583 | 
            +
                    
         | 
| 584 | 
            +
                    fig.update_layout(title=title, showlegend=True, legend_title_text='Top TF-IDF Terms')
         | 
| 585 | 
            +
                    st.plotly_chart(fig, use_container_width=True)
         | 
| 586 | 
            +
                    logging.info("Embeddings plotted with TF-IDF colors")
         | 
| 587 | 
            +
             | 
| 588 | 
            +
                def plot_embeddings4(self, title=None, cluster_terms=None, cluster_labels=None, reduced_embeddings=None, column=None, data=None):
         | 
| 589 | 
            +
                    """
         | 
| 590 | 
            +
                    Plots the reduced dimensionality embeddings with clusters indicated.
         | 
| 591 | 
            +
                    
         | 
| 592 | 
            +
                    Args:
         | 
| 593 | 
            +
                        title (str): The title of the plot.
         | 
| 594 | 
            +
                    """
         | 
| 595 | 
            +
                    # Ensure dimensionality reduction and TF-IDF based cluster naming have been performed
         | 
| 596 | 
            +
                    assert reduced_embeddings is not None, "Dimensionality reduction has not been performed yet."
         | 
| 597 | 
            +
                    assert cluster_terms is not None, "Cluster TF-IDF analysis has not been performed yet."
         | 
| 598 | 
            +
             | 
| 599 | 
            +
                    logging.info("Plotting embeddings with TF-IDF colors")
         | 
| 600 | 
            +
                    
         | 
| 601 | 
            +
                    fig = go.Figure()
         | 
| 602 | 
            +
                    
         | 
| 603 | 
            +
                    # Determine unique cluster IDs and terms, and ensure consistent color mapping
         | 
| 604 | 
            +
                    unique_cluster_ids = np.unique(cluster_labels)
         | 
| 605 | 
            +
                    unique_cluster_terms = [cluster_terms[i] for i in unique_cluster_ids]#if i != -1]  # Exclude noise by ID
         | 
| 606 | 
            +
             | 
| 607 | 
            +
                    color_map = px.colors.qualitative.Plotly  # Using Plotly Express's qualitative colors for consistency
         | 
| 608 | 
            +
                    color_idx = 0
         | 
| 609 | 
            +
                    
         | 
| 610 | 
            +
                    # Map each cluster ID to a color
         | 
| 611 | 
            +
                    cluster_colors = {}
         | 
| 612 | 
            +
                    for cid in unique_cluster_ids:
         | 
| 613 | 
            +
                        #if cid != -1:  # Exclude noise
         | 
| 614 | 
            +
                            cluster_colors[cid] = color_map[color_idx % len(color_map)]
         | 
| 615 | 
            +
                            color_idx += 1
         | 
| 616 | 
            +
                        #else:
         | 
| 617 | 
            +
                        #    cluster_colors[cid] = 'grey'  # Noise or outliers in grey
         | 
| 618 | 
            +
             | 
| 619 | 
            +
                    for cluster_id, cluster_term in zip(unique_cluster_ids, unique_cluster_terms):
         | 
| 620 | 
            +
                        indices = np.where(cluster_labels == cluster_id)[0]
         | 
| 621 | 
            +
                        fig.add_trace(
         | 
| 622 | 
            +
                            go.Scatter(
         | 
| 623 | 
            +
                                x=reduced_embeddings[indices, 0],
         | 
| 624 | 
            +
                                y=reduced_embeddings[indices, 1],
         | 
| 625 | 
            +
                                mode='markers',
         | 
| 626 | 
            +
                                marker=dict(
         | 
| 627 | 
            +
                                    color=cluster_colors[cluster_id],
         | 
| 628 | 
            +
                                    size=5,
         | 
| 629 | 
            +
                                    opacity=0.8#if cluster_id != -1 else 0.5,
         | 
| 630 | 
            +
                                ),
         | 
| 631 | 
            +
                                name=cluster_term,
         | 
| 632 | 
            +
                                text=data[column].iloc[indices],  # Use the original column for hover text
         | 
| 633 | 
            +
                                hoverinfo='text',
         | 
| 634 | 
            +
                            )
         | 
| 635 | 
            +
                        )
         | 
| 636 | 
            +
                        
         | 
| 637 | 
            +
                    fig.update_layout(
         | 
| 638 | 
            +
                        title=title if title else "Embeddings Visualized",
         | 
| 639 | 
            +
                        showlegend=True,
         | 
| 640 | 
            +
                        legend_title_text='Top TF-IDF Terms',
         | 
| 641 | 
            +
                        legend=dict(
         | 
| 642 | 
            +
                            traceorder='normal',  # 'normal' or 'reversed'; ensures that traces appear in the order they are added
         | 
| 643 | 
            +
                            itemsizing='constant'
         | 
| 644 | 
            +
                        )
         | 
| 645 | 
            +
                    )
         | 
| 646 | 
            +
                    st.plotly_chart(fig, use_container_width=True)
         | 
| 647 | 
            +
                    logging.info("Embeddings plotted with TF-IDF colors")
         | 
| 648 | 
            +
             | 
| 649 | 
            +
             | 
| 650 | 
            +
            logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
         | 
| 651 | 
            +
             | 
| 652 | 
            +
            def analyze_and_predict(data, analyzers, col_names):
         | 
| 653 | 
            +
                """
         | 
| 654 | 
            +
                Performs analysis on the data using provided analyzers and makes predictions on specified columns.
         | 
| 655 | 
            +
             | 
| 656 | 
            +
                Args:
         | 
| 657 | 
            +
                    data (pd.DataFrame): The dataset for analysis.
         | 
| 658 | 
            +
                    analyzers (list): A list of UAPAnalyzer instances.
         | 
| 659 | 
            +
                    col_names (list): Column names to be analyzed and predicted.
         | 
| 660 | 
            +
                """
         | 
| 661 | 
            +
                new_data = pd.DataFrame()
         | 
| 662 | 
            +
                for i, (column, analyzer) in enumerate(zip(col_names, analyzers)):
         | 
| 663 | 
            +
                    new_data[f'Analyzer_{column}'] = analyzer.__dict__['cluster_terms']
         | 
| 664 | 
            +
                    logging.info(f"Cluster terms extracted for {column}")
         | 
| 665 | 
            +
             | 
| 666 | 
            +
                new_data = new_data.fillna('null').astype('category')
         | 
| 667 | 
            +
                data_nums = new_data.apply(lambda x: x.cat.codes)
         | 
| 668 | 
            +
             | 
| 669 | 
            +
                for col in data_nums.columns:
         | 
| 670 | 
            +
                    try:
         | 
| 671 | 
            +
                        categories = new_data[col].cat.categories
         | 
| 672 | 
            +
                        x_train, x_test, y_train, y_test = train_test_split(data_nums.drop(columns=[col]), data_nums[col], test_size=0.2, random_state=42)
         | 
| 673 | 
            +
                        bst, accuracy, preds = train_xgboost(x_train, y_train, x_test, y_test, len(categories))
         | 
| 674 | 
            +
                        plot_results(new_data, bst, x_test, y_test, preds, categories, accuracy, col)
         | 
| 675 | 
            +
                    except Exception as e:
         | 
| 676 | 
            +
                        logging.error(f"Error processing {col}: {e}")
         | 
| 677 | 
            +
                return new_data
         | 
| 678 | 
            +
             | 
| 679 | 
            +
            def train_xgboost(x_train, y_train, x_test, y_test, num_classes):
         | 
| 680 | 
            +
                """
         | 
| 681 | 
            +
                Trains an XGBoost model and evaluates its performance.
         | 
| 682 | 
            +
             | 
| 683 | 
            +
                Args:
         | 
| 684 | 
            +
                    x_train (pd.DataFrame): Training features.
         | 
| 685 | 
            +
                    y_train (pd.Series): Training labels.
         | 
| 686 | 
            +
                    x_test (pd.DataFrame): Test features.
         | 
| 687 | 
            +
                    y_test (pd.Series): Test labels.
         | 
| 688 | 
            +
                    num_classes (int): The number of unique classes in the target variable.
         | 
| 689 | 
            +
             | 
| 690 | 
            +
                Returns:
         | 
| 691 | 
            +
                    bst (Booster): The trained XGBoost model.
         | 
| 692 | 
            +
                    accuracy (float): The accuracy of the model on the test set.
         | 
| 693 | 
            +
                """
         | 
| 694 | 
            +
                dtrain = xgb.DMatrix(x_train, label=y_train, enable_categorical=True)
         | 
| 695 | 
            +
                dtest = xgb.DMatrix(x_test, label=y_test)
         | 
| 696 | 
            +
             | 
| 697 | 
            +
                params = {'device':'cuda', 'objective': 'multi:softmax', 'num_class': num_classes, 'max_depth': 6, 'eta': 0.3}
         | 
| 698 | 
            +
                num_round = 100
         | 
| 699 | 
            +
                bst = xgb.train(dtrain=dtrain, params=params, num_boost_round=num_round)
         | 
| 700 | 
            +
                preds = bst.predict(dtest)
         | 
| 701 | 
            +
                accuracy = accuracy_score(y_test, preds)
         | 
| 702 | 
            +
             | 
| 703 | 
            +
                logging.info(f"XGBoost trained with accuracy: {accuracy:.2f}")
         | 
| 704 | 
            +
                return bst, accuracy, preds
         | 
| 705 | 
            +
             | 
| 706 | 
            +
            def plot_results(new_data, bst, x_test, y_test, preds, categories, accuracy, col):
         | 
| 707 | 
            +
                """
         | 
| 708 | 
            +
                Plots the feature importance, confusion matrix, and contingency table.
         | 
| 709 | 
            +
             | 
| 710 | 
            +
                Args:
         | 
| 711 | 
            +
                    bst (Booster): The trained XGBoost model.
         | 
| 712 | 
            +
                    x_test (pd.DataFrame): Test features.
         | 
| 713 | 
            +
                    y_test (pd.Series): Test labels.
         | 
| 714 | 
            +
                    preds (np.array): Predictions made by the model.
         | 
| 715 | 
            +
                    categories (Index): Category names for the target variable.
         | 
| 716 | 
            +
                    accuracy (float): The accuracy of the model on the test set.
         | 
| 717 | 
            +
                    col (str): The target column name being analyzed and predicted.
         | 
| 718 | 
            +
                """
         | 
| 719 | 
            +
                fig, axs = plt.subplots(1, 3, figsize=(25, 5), dpi=300)
         | 
| 720 | 
            +
                fig.suptitle(f'{col.split(sep=".")[-1]} prediction', fontsize=35)
         | 
| 721 | 
            +
             | 
| 722 | 
            +
                plot_importance(bst, ax=axs[0], importance_type='gain', show_values=False)
         | 
| 723 | 
            +
                conf_matrix = confusion_matrix(y_test, preds)
         | 
| 724 | 
            +
                sns.heatmap(conf_matrix, annot=True, fmt='g', cmap='Blues', xticklabels=categories, yticklabels=categories, ax=axs[1])
         | 
| 725 | 
            +
                axs[1].set_title(f'Confusion Matrix\nAccuracy: {accuracy * 100:.2f}%')
         | 
| 726 | 
            +
                # make axes rotated
         | 
| 727 | 
            +
                axs[1].set_yticklabels(axs[1].get_yticklabels(), rotation=30, ha='right')
         | 
| 728 | 
            +
                sorted_features = sorted(bst.get_score(importance_type="gain").items(), key=lambda x: x[1], reverse=True)
         | 
| 729 | 
            +
                # The most important feature is the first element in the sorted list
         | 
| 730 | 
            +
                most_important_feature = sorted_features[0][0]
         | 
| 731 | 
            +
                # Create a contingency table
         | 
| 732 | 
            +
                contingency_table = pd.crosstab(new_data[col], new_data[most_important_feature])
         | 
| 733 | 
            +
             | 
| 734 | 
            +
                # resid pearson is used to calculate the residuals, which 
         | 
| 735 | 
            +
                table = stats.Table(contingency_table).resid_pearson
         | 
| 736 | 
            +
                #print(table)
         | 
| 737 | 
            +
                # Perform the chi-squared test
         | 
| 738 | 
            +
                chi2, p, dof, expected = chi2_contingency(contingency_table)
         | 
| 739 | 
            +
                # Print the results
         | 
| 740 | 
            +
                print(f"Chi-squared test for {col} and {most_important_feature}: p-value = {p}")
         | 
| 741 | 
            +
                
         | 
| 742 | 
            +
                sns.heatmap(table, annot=True, cmap='Greens', ax=axs[2])
         | 
| 743 | 
            +
                # make axis rotated
         | 
| 744 | 
            +
                axs[2].set_yticklabels(axs[2].get_yticklabels(), rotation=30, ha='right')
         | 
| 745 | 
            +
                axs[2].set_title(f'Contingency Table between {col.split(sep=".")[-1]} and {most_important_feature.split(sep=".")[-1]}\np-value = {p}')    
         | 
| 746 | 
            +
             | 
| 747 | 
            +
                plt.tight_layout()
         | 
| 748 | 
            +
                #plt.savefig(f"{col}_{accuracy:.2f}_prediction_XGB.jpeg", dpi=300)
         | 
| 749 | 
            +
                return plt
         | 
| 750 | 
            +
             | 
| 751 | 
            +
            def cramers_v(confusion_matrix):
         | 
| 752 | 
            +
                """Calculate Cramer's V statistic for categorical-categorical association."""
         | 
| 753 | 
            +
                chi2 = chi2_contingency(confusion_matrix)[0]
         | 
| 754 | 
            +
                n = confusion_matrix.sum().sum()
         | 
| 755 | 
            +
                phi2 = chi2 / n
         | 
| 756 | 
            +
                r, k = confusion_matrix.shape
         | 
| 757 | 
            +
                phi2corr = max(0, phi2 - ((k-1)*(r-1))/(n-1))
         | 
| 758 | 
            +
                r_corr = r - ((r-1)**2)/(n-1)
         | 
| 759 | 
            +
                k_corr = k - ((k-1)**2)/(n-1)
         | 
| 760 | 
            +
                return np.sqrt(phi2corr / min((k_corr-1), (r_corr-1)))
         | 
| 761 | 
            +
             | 
| 762 | 
            +
            def plot_cramers_v_heatmap(data, significance_level=0.05):
         | 
| 763 | 
            +
                """Plot heatmap of Cramer's V statistic for each pair of categorical variables in a DataFrame."""
         | 
| 764 | 
            +
                # Initialize a DataFrame to store Cramer's V values
         | 
| 765 | 
            +
                cramers_v_df = pd.DataFrame(index=data.columns, columns=data.columns, data=np.nan)
         | 
| 766 | 
            +
                
         | 
| 767 | 
            +
                # Compute Cramer's V for each pair of columns
         | 
| 768 | 
            +
                for col1 in data.columns:
         | 
| 769 | 
            +
                    for col2 in data.columns:
         | 
| 770 | 
            +
                        if col1 != col2:  # Avoid self-comparison
         | 
| 771 | 
            +
                            confusion_matrix = pd.crosstab(data[col1], data[col2])
         | 
| 772 | 
            +
                            chi2, p, dof, expected = chi2_contingency(confusion_matrix)
         | 
| 773 | 
            +
                            # Check if the p-value is less than the significance level
         | 
| 774 | 
            +
                            #if p < significance_level:
         | 
| 775 | 
            +
                            #    cramers_v_df.at[col1, col2] = cramers_v(confusion_matrix)
         | 
| 776 | 
            +
                            # alternatively, you can use the following line to include all pairs
         | 
| 777 | 
            +
                            cramers_v_df.at[col1, col2] = cramers_v(confusion_matrix)
         | 
| 778 | 
            +
                
         | 
| 779 | 
            +
                # Plot the heatmap
         | 
| 780 | 
            +
                plt.figure(figsize=(12, 10), dpi=200)
         | 
| 781 | 
            +
                mask = np.triu(np.ones_like(cramers_v_df, dtype=bool))  # Mask for the upper triangle
         | 
| 782 | 
            +
                # make a max and min of the cmap
         | 
| 783 | 
            +
                sns.heatmap(cramers_v_df, annot=True, fmt=".2f", cmap='coolwarm', cbar=True, mask=mask, square=True)
         | 
| 784 | 
            +
                plt.title(f"Heatmap of Cramér's V (p < {significance_level})")
         | 
| 785 | 
            +
                return plt
         | 
| 786 | 
            +
             | 
| 787 | 
            +
             | 
| 788 | 
            +
            class UAPVisualizer:
         | 
| 789 | 
            +
                def __init__(self, data=None):
         | 
| 790 | 
            +
                    pass  # Initialization can be added if needed
         | 
| 791 | 
            +
             | 
| 792 | 
            +
                def analyze_and_predict(self, data, analyzers, col_names):
         | 
| 793 | 
            +
                    new_data = pd.DataFrame()
         | 
| 794 | 
            +
                    for i, (column, analyzer) in enumerate(zip(col_names, analyzers)):
         | 
| 795 | 
            +
                        new_data[f'Analyzer_{column}'] = analyzer.__dict__['cluster_terms']
         | 
| 796 | 
            +
                        print(f"Cluster terms extracted for {column}")
         | 
| 797 | 
            +
             | 
| 798 | 
            +
                    new_data = new_data.fillna('null').astype('category')
         | 
| 799 | 
            +
                    data_nums = new_data.apply(lambda x: x.cat.codes)
         | 
| 800 | 
            +
             | 
| 801 | 
            +
                    for col in data_nums.columns:
         | 
| 802 | 
            +
                        try:
         | 
| 803 | 
            +
                            categories = new_data[col].cat.categories
         | 
| 804 | 
            +
                            x_train, x_test, y_train, y_test = train_test_split(data_nums.drop(columns=[col]), data_nums[col], test_size=0.2, random_state=42)
         | 
| 805 | 
            +
                            bst, accuracy, preds = self.train_xgboost(x_train, y_train, x_test, y_test, len(categories))
         | 
| 806 | 
            +
                            self.plot_results(new_data, bst, x_test, y_test, preds, categories, accuracy, col)
         | 
| 807 | 
            +
                        except Exception as e:
         | 
| 808 | 
            +
                            print(f"Error processing {col}: {e}")
         | 
| 809 | 
            +
             | 
| 810 | 
            +
                def train_xgboost(self, x_train, y_train, x_test, y_test, num_classes):
         | 
| 811 | 
            +
                    dtrain = xgb.DMatrix(x_train, label=y_train, enable_categorical=True)
         | 
| 812 | 
            +
                    dtest = xgb.DMatrix(x_test, label=y_test)
         | 
| 813 | 
            +
             | 
| 814 | 
            +
                    params = {'objective': 'multi:softmax', 'num_class': num_classes, 'max_depth': 6, 'eta': 0.3}
         | 
| 815 | 
            +
                    num_round = 100
         | 
| 816 | 
            +
                    bst = xgb.train(dtrain=dtrain, params=params, num_boost_round=num_round)
         | 
| 817 | 
            +
                    preds = bst.predict(dtest)
         | 
| 818 | 
            +
                    accuracy = accuracy_score(y_test, preds)
         | 
| 819 | 
            +
             | 
| 820 | 
            +
                    print(f"XGBoost trained with accuracy: {accuracy:.2f}")
         | 
| 821 | 
            +
                    return bst, accuracy, preds
         | 
| 822 | 
            +
             | 
| 823 | 
            +
                def plot_results(self, new_data, bst, x_test, y_test, preds, categories, accuracy, col):
         | 
| 824 | 
            +
                    fig, axs = plt.subplots(1, 3, figsize=(25, 5))
         | 
| 825 | 
            +
                    fig.suptitle(f'{col.split(sep=".")[-1]} prediction', fontsize=35)
         | 
| 826 | 
            +
             | 
| 827 | 
            +
                    plot_importance(bst, ax=axs[0], importance_type='gain', show_values=False)
         | 
| 828 | 
            +
                    conf_matrix = confusion_matrix(y_test, preds)
         | 
| 829 | 
            +
                    sns.heatmap(conf_matrix, annot=True, fmt='g', cmap='Blues', xticklabels=categories, yticklabels=categories, ax=axs[1])
         | 
| 830 | 
            +
                    axs[1].set_title(f'Confusion Matrix\nAccuracy: {accuracy * 100:.2f}%')
         | 
| 831 | 
            +
             | 
| 832 | 
            +
                    sorted_features = sorted(bst.get_score(importance_type="gain").items(), key=lambda x: x[1], reverse=True)
         | 
| 833 | 
            +
                    most_important_feature = sorted_features[0][0]
         | 
| 834 | 
            +
                    contingency_table = pd.crosstab(new_data[col], new_data[most_important_feature])
         | 
| 835 | 
            +
                    chi2, p, dof, expected = chi2_contingency(contingency_table)
         | 
| 836 | 
            +
                    print(f"Chi-squared test for {col} and {most_important_feature}: p-value = {p}")
         | 
| 837 | 
            +
                    
         | 
| 838 | 
            +
                    sns.heatmap(contingency_table, annot=True, cmap='Greens', ax=axs[2])
         | 
| 839 | 
            +
                    axs[2].set_title(f'Contingency Table between {col.split(sep=".")[-1]} and {most_important_feature.split(sep=".")[-1]}\np-value = {p}')    
         | 
| 840 | 
            +
             | 
| 841 | 
            +
                    plt.tight_layout()
         | 
| 842 | 
            +
                    plt.savefig(f"{col}_{accuracy:.2f}_prediction_XGB.jpeg", dpi=300)
         | 
| 843 | 
            +
                    plt.show()
         | 
| 844 | 
            +
             | 
| 845 | 
            +
                @staticmethod
         | 
| 846 | 
            +
                def cramers_v(confusion_matrix):
         | 
| 847 | 
            +
                    chi2 = chi2_contingency(confusion_matrix)[0]
         | 
| 848 | 
            +
                    n = confusion_matrix.sum().sum()
         | 
| 849 | 
            +
                    phi2 = chi2 / n
         | 
| 850 | 
            +
                    r, k = confusion_matrix.shape
         | 
| 851 | 
            +
                    phi2corr = max(0, phi2 - ((k-1)*(r-1))/(n-1))
         | 
| 852 | 
            +
                    r_corr = r - ((r-1)**2)/(n-1)
         | 
| 853 | 
            +
                    k_corr = k - ((k-1)**2)/(n-1)
         | 
| 854 | 
            +
                    return np.sqrt(phi2corr / min((k_corr-1), (r_corr-1)))
         | 
| 855 | 
            +
             | 
| 856 | 
            +
                def plot_cramers_v_heatmap(self, data, significance_level=0.05):
         | 
| 857 | 
            +
                    cramers_v_df = pd.DataFrame(index=data.columns, columns=data.columns, data=np.nan)
         | 
| 858 | 
            +
                    
         | 
| 859 | 
            +
                    for col1 in data.columns:
         | 
| 860 | 
            +
                        for col2 in data.columns:
         | 
| 861 | 
            +
                            if col1 != col2:
         | 
| 862 | 
            +
                                confusion_matrix = pd.crosstab(data[col1], data[col2])
         | 
| 863 | 
            +
                                chi2, p, dof, expected = chi2_contingency(confusion_matrix)
         | 
| 864 | 
            +
                                if p < significance_level:
         | 
| 865 | 
            +
                                    cramers_v_df.at[col1, col2] = UAPVisualizer.cramers_v(confusion_matrix)
         | 
| 866 | 
            +
                    
         | 
| 867 | 
            +
                    plt.figure(figsize=(10, 8)),# facecolor="black")
         | 
| 868 | 
            +
                    mask = np.triu(np.ones_like(cramers_v_df, dtype=bool))
         | 
| 869 | 
            +
                    #sns.set_theme(style="dark", rc={"axes.facecolor": "black", "grid.color": "white", "xtick.color": "white", "ytick.color": "white", "axes.labelcolor": "white", "axes.titlecolor": "white"})
         | 
| 870 | 
            +
                    # ax = sns.heatmap(cramers_v_df, annot=True, fmt=".1f", linewidths=.5, linecolor='white', cmap='coolwarm', annot_kws={"color":"white"}, cbar=True, mask=mask, square=True)
         | 
| 871 | 
            +
                    # Customizing the color of the ticks and labels to white
         | 
| 872 | 
            +
                    # plt.xticks(color='white')
         | 
| 873 | 
            +
                    # plt.yticks(color='white')
         | 
| 874 | 
            +
                    sns.heatmap(cramers_v_df, annot=True, fmt=".2f", cmap='coolwarm', cbar=True, mask=mask, square=True)
         | 
| 875 | 
            +
                    plt.title(f"Heatmap of Cramér's V (p < {significance_level})")
         | 
| 876 | 
            +
                    plt.show()
         | 
| 877 | 
            +
             | 
| 878 | 
            +
                
         | 
| 879 | 
            +
                def plot_treemap(self, df, column, top_n=32):
         | 
| 880 | 
            +
                    # Get the value counts and the top N labels
         | 
| 881 | 
            +
                    value_counts = df[column].value_counts()
         | 
| 882 | 
            +
                    top_labels = value_counts.iloc[:top_n].index
         | 
| 883 | 
            +
                    
         | 
| 884 | 
            +
             | 
| 885 | 
            +
                    # Use np.where to replace all values not in the top N with 'Other'
         | 
| 886 | 
            +
                    revised_column = f'{column}_revised'
         | 
| 887 | 
            +
                    df[revised_column] = np.where(df[column].isin(top_labels), df[column], 'Other')
         | 
| 888 | 
            +
             | 
| 889 | 
            +
                    # Get the value counts including the 'Other' category
         | 
| 890 | 
            +
                    sizes = df[revised_column].value_counts().values
         | 
| 891 | 
            +
                    labels = df[revised_column].value_counts().index
         | 
| 892 | 
            +
             | 
| 893 | 
            +
                    # Get a gradient of colors
         | 
| 894 | 
            +
                    colors = list(mcolors.TABLEAU_COLORS.values())
         | 
| 895 | 
            +
             | 
| 896 | 
            +
                    # Get % of each category
         | 
| 897 | 
            +
                    percents = sizes / sizes.sum()
         | 
| 898 | 
            +
             | 
| 899 | 
            +
                    # Prepare labels with percentages
         | 
| 900 | 
            +
                    labels = [f'{label}\n {percent:.1%}' for label, percent in zip(labels, percents)]
         | 
| 901 | 
            +
             | 
| 902 | 
            +
                    # Plot the treemap
         | 
| 903 | 
            +
                    squarify.plot(sizes=sizes, label=labels, alpha=0.7, pad=True, color=colors, text_kwargs={'fontsize': 10})
         | 
| 904 | 
            +
             | 
| 905 | 
            +
                    ax = plt.gca()
         | 
| 906 | 
            +
             | 
| 907 | 
            +
                    # Iterate over text elements and rectangles (patches) in the axes for color adjustment
         | 
| 908 | 
            +
                    for text, rect in zip(ax.texts, ax.patches):
         | 
| 909 | 
            +
                        background_color = rect.get_facecolor()
         | 
| 910 | 
            +
                        r, g, b, _ = mcolors.to_rgba(background_color)
         | 
| 911 | 
            +
                        brightness = np.average([r, g, b])
         | 
| 912 | 
            +
                        text.set_color('white' if brightness < 0.5 else 'black')
         | 
| 913 | 
            +
             | 
| 914 | 
            +
                        # Adjust font size based on rectangle's area and wrap long text
         | 
| 915 | 
            +
                        coef = 0.8
         | 
| 916 | 
            +
                        font_size = np.sqrt(rect.get_width() * rect.get_height()) * coef
         | 
| 917 | 
            +
                        text.set_fontsize(font_size)
         | 
| 918 | 
            +
                        wrapped_text = textwrap.fill(text.get_text(), width=20)
         | 
| 919 | 
            +
                        text.set_text(wrapped_text)
         | 
| 920 | 
            +
             | 
| 921 | 
            +
                    plt.axis('off')
         | 
| 922 | 
            +
                    plt.gca().invert_yaxis()
         | 
| 923 | 
            +
                    plt.gcf().set_size_inches(20, 12)
         | 
| 924 | 
            +
                    plt.show()
         | 
| 925 | 
            +
             | 
| 926 | 
            +
             | 
| 927 | 
            +
             | 
| 928 | 
            +
             | 
| 929 | 
            +
            class UAPParser:
         | 
| 930 | 
            +
                def __init__(self, api_key, model="gpt-3.5-turbo-0125", col=None, format_long=None):
         | 
| 931 | 
            +
                    os.environ['OPENAI_API_KEY'] = api_key
         | 
| 932 | 
            +
                    self.client = OpenAI()
         | 
| 933 | 
            +
                    self.model = model
         | 
| 934 | 
            +
                    self.responses = {}
         | 
| 935 | 
            +
                    self.col = None
         | 
| 936 | 
            +
             | 
| 937 | 
            +
                def fetch_response(self, description, format_long):
         | 
| 938 | 
            +
                    INITIAL_WAIT_TIME = 5
         | 
| 939 | 
            +
                    MAX_WAIT_TIME = 600
         | 
| 940 | 
            +
                    MAX_RETRIES = 10
         | 
| 941 | 
            +
             | 
| 942 | 
            +
                    wait_time = INITIAL_WAIT_TIME
         | 
| 943 | 
            +
                    for attempt in range(MAX_RETRIES):
         | 
| 944 | 
            +
                        try:
         | 
| 945 | 
            +
                            response = self.client.chat.completions.create(
         | 
| 946 | 
            +
                                model=self.model,
         | 
| 947 | 
            +
                                response_format={"type": "json_object"},
         | 
| 948 | 
            +
                                messages=[
         | 
| 949 | 
            +
                                    {"role": "system", "content": "You are a helpful assistant which is tasked to help parse data."},
         | 
| 950 | 
            +
                                    {"role": "user", "content": f'Input report: {description}\n\n Parse data following this json structure; leave missing data empty: {format_long}  Output:'}
         | 
| 951 | 
            +
                                ]
         | 
| 952 | 
            +
                            )
         | 
| 953 | 
            +
                            return response
         | 
| 954 | 
            +
                        except HTTPError as e:
         | 
| 955 | 
            +
                            if 'TooManyRequests' in str(e):
         | 
| 956 | 
            +
                                time.sleep(wait_time)
         | 
| 957 | 
            +
                                wait_time = min(wait_time * 2, MAX_WAIT_TIME)  # Exponential backoff
         | 
| 958 | 
            +
                            else:
         | 
| 959 | 
            +
                                raise
         | 
| 960 | 
            +
                        except Exception as e:
         | 
| 961 | 
            +
                            print(f"Unexpected error: {e}")
         | 
| 962 | 
            +
                            break
         | 
| 963 | 
            +
             | 
| 964 | 
            +
                    return None  # Return None if all retries fail
         | 
| 965 | 
            +
             | 
| 966 | 
            +
                def process_descriptions(self, descriptions, format_long, max_workers=32):
         | 
| 967 | 
            +
                    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
         | 
| 968 | 
            +
                        future_to_desc = {executor.submit(self.fetch_response, desc, format_long): desc for desc in descriptions}
         | 
| 969 | 
            +
             | 
| 970 | 
            +
                        for future in stqdm(concurrent.futures.as_completed(future_to_desc), total=len(descriptions)):
         | 
| 971 | 
            +
                            desc = future_to_desc[future]
         | 
| 972 | 
            +
                            try:
         | 
| 973 | 
            +
                                response = future.result()
         | 
| 974 | 
            +
                                response_text = response.choices[0].message.content if response else None
         | 
| 975 | 
            +
                                if response_text:
         | 
| 976 | 
            +
                                    self.responses[desc] = response_text
         | 
| 977 | 
            +
                            except Exception as exc:
         | 
| 978 | 
            +
                                print(f'Error occurred for description {desc}: {exc}')
         | 
| 979 | 
            +
             | 
| 980 | 
            +
                def parse_responses(self):
         | 
| 981 | 
            +
                    parsed_responses = {}
         | 
| 982 | 
            +
                    not_parsed = 0
         | 
| 983 | 
            +
                    try:
         | 
| 984 | 
            +
                        for k, v in self.responses.items():
         | 
| 985 | 
            +
                            try:
         | 
| 986 | 
            +
                                parsed_responses[k] = json.loads(v)
         | 
| 987 | 
            +
                            except:
         | 
| 988 | 
            +
                                try:
         | 
| 989 | 
            +
                                    parsed_responses[k] = json.loads(v.replace("'", '"'))
         | 
| 990 | 
            +
                                except:
         | 
| 991 | 
            +
                                    not_parsed += 1
         | 
| 992 | 
            +
                    except Exception as e:
         | 
| 993 | 
            +
                        print(f"Error parsing responses: {e}")
         | 
| 994 | 
            +
             
         | 
| 995 | 
            +
                    print(f"Number of unparsed responses: {not_parsed}")
         | 
| 996 | 
            +
                    print(f"Number of parsed responses: {len(parsed_responses)}")
         | 
| 997 | 
            +
                    return parsed_responses
         | 
| 998 | 
            +
             | 
| 999 | 
            +
                def responses_to_df(self, col, parsed_responses):
         | 
| 1000 | 
            +
                    parsed_df = pd.DataFrame(parsed_responses).T
         | 
| 1001 | 
            +
                    if col is not None:
         | 
| 1002 | 
            +
                        parsed_df2 = pd.json_normalize(parsed_df[col])
         | 
| 1003 | 
            +
                        parsed_df2.index = parsed_df.index
         | 
| 1004 | 
            +
                    else:
         | 
| 1005 | 
            +
                        parsed_df2 = pd.json_normalize(parsed_df)
         | 
| 1006 | 
            +
                        parsed_df2.index = parsed_df.index
         | 
| 1007 | 
            +
                    return parsed_df2
         | 
| 1008 | 
            +
             | 
| 1009 | 
            +
             | 
| 1010 | 
            +
             | 
    	
        uap_config.kgl
    ADDED
    
    | @@ -0,0 +1,239 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "version": "v1",
         | 
| 3 | 
            +
              "config": {
         | 
| 4 | 
            +
                "visState": {
         | 
| 5 | 
            +
                  "filters": [],
         | 
| 6 | 
            +
                  "layers": [
         | 
| 7 | 
            +
                    {
         | 
| 8 | 
            +
                      "id": "cl6utnk",
         | 
| 9 | 
            +
                      "type": "point",
         | 
| 10 | 
            +
                      "config": {
         | 
| 11 | 
            +
                        "dataId": "uap_sightings",
         | 
| 12 | 
            +
                        "label": "Sightings",
         | 
| 13 | 
            +
                        "color": [
         | 
| 14 | 
            +
                          243,
         | 
| 15 | 
            +
                          251,
         | 
| 16 | 
            +
                          237
         | 
| 17 | 
            +
                        ],
         | 
| 18 | 
            +
                        "highlightColor": [
         | 
| 19 | 
            +
                          252,
         | 
| 20 | 
            +
                          242,
         | 
| 21 | 
            +
                          26,
         | 
| 22 | 
            +
                          255
         | 
| 23 | 
            +
                        ],
         | 
| 24 | 
            +
                        "columns": {
         | 
| 25 | 
            +
                          "lat": "latitude",
         | 
| 26 | 
            +
                          "lng": "longitude",
         | 
| 27 | 
            +
                          "altitude": null
         | 
| 28 | 
            +
                        },
         | 
| 29 | 
            +
                        "isVisible": true,
         | 
| 30 | 
            +
                        "visConfig": {
         | 
| 31 | 
            +
                          "radius": 9.7,
         | 
| 32 | 
            +
                          "fixedRadius": false,
         | 
| 33 | 
            +
                          "opacity": 0.8,
         | 
| 34 | 
            +
                          "outline": false,
         | 
| 35 | 
            +
                          "thickness": 2,
         | 
| 36 | 
            +
                          "strokeColor": null,
         | 
| 37 | 
            +
                          "colorRange": {
         | 
| 38 | 
            +
                            "name": "Global Warming",
         | 
| 39 | 
            +
                            "type": "sequential",
         | 
| 40 | 
            +
                            "category": "Uber",
         | 
| 41 | 
            +
                            "colors": [
         | 
| 42 | 
            +
                              "#5A1846",
         | 
| 43 | 
            +
                              "#900C3F",
         | 
| 44 | 
            +
                              "#C70039",
         | 
| 45 | 
            +
                              "#E3611C",
         | 
| 46 | 
            +
                              "#F1920E",
         | 
| 47 | 
            +
                              "#FFC300"
         | 
| 48 | 
            +
                            ]
         | 
| 49 | 
            +
                          },
         | 
| 50 | 
            +
                          "strokeColorRange": {
         | 
| 51 | 
            +
                            "name": "Global Warming",
         | 
| 52 | 
            +
                            "type": "sequential",
         | 
| 53 | 
            +
                            "category": "Uber",
         | 
| 54 | 
            +
                            "colors": [
         | 
| 55 | 
            +
                              "#5A1846",
         | 
| 56 | 
            +
                              "#900C3F",
         | 
| 57 | 
            +
                              "#C70039",
         | 
| 58 | 
            +
                              "#E3611C",
         | 
| 59 | 
            +
                              "#F1920E",
         | 
| 60 | 
            +
                              "#FFC300"
         | 
| 61 | 
            +
                            ]
         | 
| 62 | 
            +
                          },
         | 
| 63 | 
            +
                          "radiusRange": [
         | 
| 64 | 
            +
                            0,
         | 
| 65 | 
            +
                            50
         | 
| 66 | 
            +
                          ],
         | 
| 67 | 
            +
                          "filled": true
         | 
| 68 | 
            +
                        },
         | 
| 69 | 
            +
                        "hidden": false,
         | 
| 70 | 
            +
                        "textLabel": [
         | 
| 71 | 
            +
                          {
         | 
| 72 | 
            +
                            "field": null,
         | 
| 73 | 
            +
                            "color": [
         | 
| 74 | 
            +
                              255,
         | 
| 75 | 
            +
                              255,
         | 
| 76 | 
            +
                              255
         | 
| 77 | 
            +
                            ],
         | 
| 78 | 
            +
                            "size": 18,
         | 
| 79 | 
            +
                            "offset": [
         | 
| 80 | 
            +
                              0,
         | 
| 81 | 
            +
                              0
         | 
| 82 | 
            +
                            ],
         | 
| 83 | 
            +
                            "anchor": "start",
         | 
| 84 | 
            +
                            "alignment": "center"
         | 
| 85 | 
            +
                          }
         | 
| 86 | 
            +
                        ]
         | 
| 87 | 
            +
                      },
         | 
| 88 | 
            +
                      "visualChannels": {
         | 
| 89 | 
            +
                        "colorField": null,
         | 
| 90 | 
            +
                        "colorScale": "quantile",
         | 
| 91 | 
            +
                        "strokeColorField": null,
         | 
| 92 | 
            +
                        "strokeColorScale": "quantile",
         | 
| 93 | 
            +
                        "sizeField": null,
         | 
| 94 | 
            +
                        "sizeScale": "linear"
         | 
| 95 | 
            +
                      }
         | 
| 96 | 
            +
                    },
         | 
| 97 | 
            +
                    {
         | 
| 98 | 
            +
                      "id": "bqpymx4",
         | 
| 99 | 
            +
                      "type": "heatmap",
         | 
| 100 | 
            +
                      "config": {
         | 
| 101 | 
            +
                        "dataId": "uap_sightings",
         | 
| 102 | 
            +
                        "label": "HeatMap",
         | 
| 103 | 
            +
                        "color": [
         | 
| 104 | 
            +
                          130,
         | 
| 105 | 
            +
                          154,
         | 
| 106 | 
            +
                          227
         | 
| 107 | 
            +
                        ],
         | 
| 108 | 
            +
                        "highlightColor": [
         | 
| 109 | 
            +
                          252,
         | 
| 110 | 
            +
                          242,
         | 
| 111 | 
            +
                          26,
         | 
| 112 | 
            +
                          255
         | 
| 113 | 
            +
                        ],
         | 
| 114 | 
            +
                        "columns": {
         | 
| 115 | 
            +
                          "lat": "latitude",
         | 
| 116 | 
            +
                          "lng": "longitude"
         | 
| 117 | 
            +
                        },
         | 
| 118 | 
            +
                        "isVisible": true,
         | 
| 119 | 
            +
                        "visConfig": {
         | 
| 120 | 
            +
                          "opacity": 0.8,
         | 
| 121 | 
            +
                          "colorRange": {
         | 
| 122 | 
            +
                            "name": "ColorBrewer BuPu-6",
         | 
| 123 | 
            +
                            "type": "sequential",
         | 
| 124 | 
            +
                            "category": "ColorBrewer",
         | 
| 125 | 
            +
                            "colors": [
         | 
| 126 | 
            +
                              "#810f7c",
         | 
| 127 | 
            +
                              "#8856a7",
         | 
| 128 | 
            +
                              "#8c96c6",
         | 
| 129 | 
            +
                              "#9ebcda",
         | 
| 130 | 
            +
                              "#bfd3e6",
         | 
| 131 | 
            +
                              "#edf8fb"
         | 
| 132 | 
            +
                            ],
         | 
| 133 | 
            +
                            "reversed": true
         | 
| 134 | 
            +
                          },
         | 
| 135 | 
            +
                          "radius": 42.2
         | 
| 136 | 
            +
                        },
         | 
| 137 | 
            +
                        "hidden": false,
         | 
| 138 | 
            +
                        "textLabel": [
         | 
| 139 | 
            +
                          {
         | 
| 140 | 
            +
                            "field": null,
         | 
| 141 | 
            +
                            "color": [
         | 
| 142 | 
            +
                              255,
         | 
| 143 | 
            +
                              255,
         | 
| 144 | 
            +
                              255
         | 
| 145 | 
            +
                            ],
         | 
| 146 | 
            +
                            "size": 18,
         | 
| 147 | 
            +
                            "offset": [
         | 
| 148 | 
            +
                              0,
         | 
| 149 | 
            +
                              0
         | 
| 150 | 
            +
                            ],
         | 
| 151 | 
            +
                            "anchor": "start",
         | 
| 152 | 
            +
                            "alignment": "center"
         | 
| 153 | 
            +
                          }
         | 
| 154 | 
            +
                        ]
         | 
| 155 | 
            +
                      },
         | 
| 156 | 
            +
                      "visualChannels": {
         | 
| 157 | 
            +
                        "weightField": null,
         | 
| 158 | 
            +
                        "weightScale": "linear"
         | 
| 159 | 
            +
                      }
         | 
| 160 | 
            +
                    }
         | 
| 161 | 
            +
                  ],
         | 
| 162 | 
            +
                  "interactionConfig": {
         | 
| 163 | 
            +
                    "tooltip": {
         | 
| 164 | 
            +
                      "fieldsToShow": {
         | 
| 165 | 
            +
                        "040l4v2ys": [
         | 
| 166 | 
            +
                          {
         | 
| 167 | 
            +
                            "name": "0",
         | 
| 168 | 
            +
                            "format": null
         | 
| 169 | 
            +
                          },
         | 
| 170 | 
            +
                          {
         | 
| 171 | 
            +
                            "name": "labels",
         | 
| 172 | 
            +
                            "format": null
         | 
| 173 | 
            +
                          },
         | 
| 174 | 
            +
                          {
         | 
| 175 | 
            +
                            "name": "text",
         | 
| 176 | 
            +
                            "format": null
         | 
| 177 | 
            +
                          },
         | 
| 178 | 
            +
                          {
         | 
| 179 | 
            +
                            "name": "city",
         | 
| 180 | 
            +
                            "format": null
         | 
| 181 | 
            +
                          },
         | 
| 182 | 
            +
                          {
         | 
| 183 | 
            +
                            "name": "country",
         | 
| 184 | 
            +
                            "format": null
         | 
| 185 | 
            +
                          }
         | 
| 186 | 
            +
                        ]
         | 
| 187 | 
            +
                      },
         | 
| 188 | 
            +
                      "compareMode": false,
         | 
| 189 | 
            +
                      "compareType": "absolute",
         | 
| 190 | 
            +
                      "enabled": true
         | 
| 191 | 
            +
                    },
         | 
| 192 | 
            +
                    "brush": {
         | 
| 193 | 
            +
                      "size": 0.5,
         | 
| 194 | 
            +
                      "enabled": false
         | 
| 195 | 
            +
                    },
         | 
| 196 | 
            +
                    "geocoder": {
         | 
| 197 | 
            +
                      "enabled": false
         | 
| 198 | 
            +
                    },
         | 
| 199 | 
            +
                    "coordinate": {
         | 
| 200 | 
            +
                      "enabled": false
         | 
| 201 | 
            +
                    }
         | 
| 202 | 
            +
                  },
         | 
| 203 | 
            +
                  "layerBlending": "normal",
         | 
| 204 | 
            +
                  "splitMaps": [],
         | 
| 205 | 
            +
                  "animationConfig": {
         | 
| 206 | 
            +
                    "currentTime": null,
         | 
| 207 | 
            +
                    "speed": 1
         | 
| 208 | 
            +
                  }
         | 
| 209 | 
            +
                },
         | 
| 210 | 
            +
                "mapState": {
         | 
| 211 | 
            +
                  "bearing": 0,
         | 
| 212 | 
            +
                  "dragRotate": false,
         | 
| 213 | 
            +
                  "latitude": 42.965001236045275,
         | 
| 214 | 
            +
                  "longitude": -72.7338233315822,
         | 
| 215 | 
            +
                  "pitch": 0,
         | 
| 216 | 
            +
                  "zoom": 2.1144352988031674,
         | 
| 217 | 
            +
                  "isSplit": false
         | 
| 218 | 
            +
                },
         | 
| 219 | 
            +
                "mapStyle": {
         | 
| 220 | 
            +
                  "styleType": "dark",
         | 
| 221 | 
            +
                  "topLayerGroups": {},
         | 
| 222 | 
            +
                  "visibleLayerGroups": {
         | 
| 223 | 
            +
                    "label": true,
         | 
| 224 | 
            +
                    "road": true,
         | 
| 225 | 
            +
                    "border": false,
         | 
| 226 | 
            +
                    "building": true,
         | 
| 227 | 
            +
                    "water": true,
         | 
| 228 | 
            +
                    "land": true,
         | 
| 229 | 
            +
                    "3d building": false
         | 
| 230 | 
            +
                  },
         | 
| 231 | 
            +
                  "threeDBuildingColor": [
         | 
| 232 | 
            +
                    9.665468314072013,
         | 
| 233 | 
            +
                    17.18305478057247,
         | 
| 234 | 
            +
                    31.1442867897876
         | 
| 235 | 
            +
                  ],
         | 
| 236 | 
            +
                  "mapStyles": {}
         | 
| 237 | 
            +
                }
         | 
| 238 | 
            +
              }
         | 
| 239 | 
            +
            }
         | 

