import os os.environ['HF_HOME'] = '/tmp' import time import streamlit as st import pandas as pd import io import plotly.express as px import zipfile import json from cryptography.fernet import Fernet from streamlit_extras.stylable_container import stylable_container from typing import Optional from gliner import GLiNER from comet_ml import Experiment st.markdown( """ """, unsafe_allow_html=True ) # --- Page Configuration and UI Elements --- st.set_page_config(layout="wide", page_title="Named Entity Recognition App") st.subheader("Legal", divider="gray") st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary") expander = st.expander("**Important notes on the ProductTag**") expander.write(""" **Named Entities:** This ProductTag predicts twenty-four (24) labels: "Product", "Service", "Organization", "Company", "Currency", "City", "Country", "Region", "Market", "Store", "Shop", "Customer_segment", "Demographics", "Target_market", "Market_segment", "Fiscal_period", "Timeframe", "Date", "Campaign", "Advertisement", "Event", "Media_platform", "Media_channel", "Social_media_platform" Results are presented in easy-to-read tables, visualized in an interactive tree map, pie chart and bar chart, and are available for download along with a Glossary of tags. **How to Use:** Type or paste your text into the text area below, then press Ctrl + Enter. Click the 'Results' button to extract and tag entities in your text data. **Usage Limits:** You can request results unlimited times for one (1) week. **Supported Languages:** English **Technical issues:** If your connection times out, please refresh the page or reopen the app's URL. For any errors or inquiries, please contact us at info@nlpblogs.com """) with st.sidebar: st.subheader("Build your own NER Web App in a minute without writing a single line of code.", divider="gray") st.link_button("NER File Builder", "https://nlpblogs.com/shop/named-entity-recognition-ner/ner-file-builder/", type="primary") st.text("") st.text("") st.write("Use the following code to embed the ProductTag web app on your website. Feel free to adjust the width and height values to fit your page.") code = ''' ''' st.code(code, language="html") # --- Comet ML Setup --- COMET_API_KEY = os.environ.get("COMET_API_KEY") COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE") COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME") comet_initialized = bool(COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME) if not comet_initialized: st.warning("Comet ML not initialized. Check environment variables.") # --- Label Definitions --- # --- Label Definitions --- labels = [ "Plaintiff", "Defendant", "Appellant", "Appellee", "Debtor", "Creditor", "Signer", "Witness", "Courts", "Judges", "Lawyers", "Attorneys", "Statutes", "Laws", "Provisions", "Case_citations", "Legal_documents", "Effective_dates", "Execution_dates", "Expiration_dates", "Money", "Amounts", "Contract_terms", "Case_number", "Witnesses", "Crimes", "Offenses", "Victims" ] # Create a mapping dictionary for labels to categories category_mapping = { "Parties": [ "Plaintiff", "Defendant", "Appellant", "Appellee", "Debtor", "Creditor", "Signer", "Witness" ], "Judicial & Governmental Bodies": [ "Courts", "Judges", "Lawyers", "Attorneys" ], "Legal Instruments & Concepts": [ "Statutes", "Laws", "Provisions", "Case_citations", "Legal_documents" ], "Dates & Timeframes": [ "Effective_dates", "Execution_dates", "Expiration_dates" ], "Financial & Monetary Entities": [ "Money", "Amounts" ], "Contracts": [ "Contract_terms" ], "Court Judgments": [ "Case_number", "Witnesses", ], "Criminal Law:": [ "Crimes", "Offenses", "Victims" ] } # --- Model Loading --- @st.cache_resource def load_ner_model(): """Loads the GLiNER model and caches it.""" try: return GLiNER.from_pretrained("gliner-community/gliner_xxl-v2.5", nested_ner=True, num_gen_sequences=2, gen_constraints= labels) except Exception as e: st.error(f"Failed to load NER model. Please check your internet connection or model availability: {e}") st.stop() model = load_ner_model() # Flatten the mapping to a single dictionary reverse_category_mapping = {label: category for category, label_list in category_mapping.items() for label in label_list} # --- Text Input and Clear Button --- text = st.text_area("Type or paste your text below, and then press Ctrl + Enter", height=250, key='my_text_area') def clear_text(): """Clears the text area.""" st.session_state['my_text_area'] = "" st.button("Clear text", on_click=clear_text) st.divider() # --- Results Section --- if st.button("Results"): start_time = time.time() if not text.strip(): st.warning("Please enter some text to extract entities.") else: with st.spinner("Extracting entities...", show_time=True): entities = model.predict_entities(text, labels) df = pd.DataFrame(entities) if not df.empty: df['category'] = df['label'].map(reverse_category_mapping) if comet_initialized: experiment = Experiment( api_key=COMET_API_KEY, workspace=COMET_WORKSPACE, project_name=COMET_PROJECT_NAME, ) experiment.log_parameter("input_text", text) experiment.log_table("predicted_entities", df) st.subheader("Extracted Entities", divider = "gray") st.dataframe(df.style.set_properties(**{"border": "2px solid gray", "color": "blue", "font-size": "16px"})) with st.expander("See Glossary of tags"): st.write(''' - **text**: ['entity extracted from your text data'] - **score**: ['accuracy score; how accurately a tag has been assigned to a given entity'] - **label**: ['label (tag) assigned to a given extracted entity'] - **category**: ['the high-level category for the label'] - **start**: ['index of the start of the corresponding entity'] - **end**: ['index of the end of the corresponding entity'] ''') st.divider() # Tree map st.subheader("Tree map", divider = "gray") fig_treemap = px.treemap(df, path=[px.Constant("all"), 'category', 'label', 'text'], values='score', color='category') fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25)) st.plotly_chart(fig_treemap) # Pie and Bar charts grouped_counts = df['category'].value_counts().reset_index() grouped_counts.columns = ['category', 'count'] col1, col2 = st.columns(2) with col1: st.subheader("Pie chart", divider = "gray") fig_pie = px.pie(grouped_counts, values='count', names='category', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted categories') fig_pie.update_traces(textposition='inside', textinfo='percent+label') st.plotly_chart(fig_pie) with col2: st.subheader("Bar chart", divider = "gray") fig_bar = px.bar(grouped_counts, x="count", y="category", color="category", text_auto=True, title='Occurrences of predicted categories') st.plotly_chart(fig_bar) # Most Frequent Entities st.subheader("Most Frequent Entities", divider="gray") word_counts = df['text'].value_counts().reset_index() word_counts.columns = ['Entity', 'Count'] repeating_entities = word_counts[word_counts['Count'] > 1] if not repeating_entities.empty: st.dataframe(repeating_entities, use_container_width=True) fig_repeating_bar = px.bar(repeating_entities, x='Entity', y='Count', color='Entity') fig_repeating_bar.update_layout(xaxis={'categoryorder': 'total descending'}) st.plotly_chart(fig_repeating_bar) else: st.warning("No entities were found that occur more than once.") # Download Section st.divider() dfa = pd.DataFrame( data={ 'Column Name': ['text', 'label', 'score', 'start', 'end', 'category'], 'Description': [ 'entity extracted from your text data', 'label (tag) assigned to a given extracted entity', 'accuracy score; how accurately a tag has been assigned to a given entity', 'index of the start of the corresponding entity', 'index of the end of the corresponding entity', 'the broader category the entity belongs to', ] } ) buf = io.BytesIO() with zipfile.ZipFile(buf, "w") as myzip: myzip.writestr("Summary of the results.csv", df.to_csv(index=False)) myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False)) with stylable_container( key="download_button", css_styles="""button { background-color: red; border: 1px solid black; padding: 5px; color: white; }""", ): st.download_button( label="Download results and glossary (zip)", data=buf.getvalue(), file_name="markettag_results.zip", mime="application/zip", ) if comet_initialized: experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap_categories") experiment.end() else: # If df is empty st.warning("No entities were found in the provided text.") end_time = time.time() elapsed_time = end_time - start_time st.text("") st.text("") st.info(f"Results processed in **{elapsed_time:.2f} seconds**.")