Update app.py
Browse files
app.py
CHANGED
@@ -15,7 +15,9 @@ import io
|
|
15 |
import streamlit.components.v1 as components
|
16 |
import functools
|
17 |
import threading
|
18 |
-
from typing import Dict, Tuple, List, Any, Optional
|
|
|
|
|
19 |
|
20 |
# Suppress warnings for a clean console
|
21 |
logging.getLogger("torch").setLevel(logging.CRITICAL)
|
@@ -23,6 +25,14 @@ logging.getLogger("transformers").setLevel(logging.CRITICAL)
|
|
23 |
warnings.filterwarnings("ignore")
|
24 |
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
# Check if CUDA is available, otherwise use CPU
|
27 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
28 |
print(f"Using device: {device}")
|
@@ -744,7 +754,7 @@ def display_analysis_results(transcribed_text):
|
|
744 |
else:
|
745 |
st.write("No emotions detected.")
|
746 |
|
747 |
-
# Expert analysis section
|
748 |
with st.expander("Expert Analysis", expanded=False):
|
749 |
col1, col2 = st.columns(2)
|
750 |
|
@@ -791,7 +801,7 @@ def display_analysis_results(transcribed_text):
|
|
791 |
elif sentiment == "POSITIVE" and not is_sarcastic:
|
792 |
st.markdown("π **Clear Positive Expression:** The content expresses genuine positive sentiment without sarcasm.")
|
793 |
|
794 |
-
#
|
795 |
with st.expander("Debug Information", expanded=False):
|
796 |
st.write("Debugging information for troubleshooting:")
|
797 |
for i, debug_line in enumerate(st.session_state.debug_info[-10:]):
|
@@ -802,7 +812,7 @@ def display_analysis_results(transcribed_text):
|
|
802 |
if score > 0.01: # Only show non-negligible scores
|
803 |
st.text(f"{emotion}: {score:.4f}")
|
804 |
|
805 |
-
#
|
806 |
with st.expander("Analysis Details", expanded=False):
|
807 |
st.write("""
|
808 |
*How this works:*
|
|
|
15 |
import streamlit.components.v1 as components
|
16 |
import functools
|
17 |
import threading
|
18 |
+
from typing import Dict, Tuple, List, Any, Optional, Union
|
19 |
+
from concurrent.futures import ThreadPoolExecutor
|
20 |
+
import numpy as np
|
21 |
|
22 |
# Suppress warnings for a clean console
|
23 |
logging.getLogger("torch").setLevel(logging.CRITICAL)
|
|
|
25 |
warnings.filterwarnings("ignore")
|
26 |
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
27 |
|
28 |
+
# Check if NumPy is available
|
29 |
+
try:
|
30 |
+
test_array = np.array([1, 2, 3])
|
31 |
+
torch.from_numpy(test_array)
|
32 |
+
except Exception as e:
|
33 |
+
st.error(f"NumPy is not available or incompatible with PyTorch: {str(e)}. Ensure 'numpy' is in requirements.txt and reinstall dependencies.")
|
34 |
+
st.stop()
|
35 |
+
|
36 |
# Check if CUDA is available, otherwise use CPU
|
37 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
38 |
print(f"Using device: {device}")
|
|
|
754 |
else:
|
755 |
st.write("No emotions detected.")
|
756 |
|
757 |
+
# Expert analysis section
|
758 |
with st.expander("Expert Analysis", expanded=False):
|
759 |
col1, col2 = st.columns(2)
|
760 |
|
|
|
801 |
elif sentiment == "POSITIVE" and not is_sarcastic:
|
802 |
st.markdown("π **Clear Positive Expression:** The content expresses genuine positive sentiment without sarcasm.")
|
803 |
|
804 |
+
# Debug expander
|
805 |
with st.expander("Debug Information", expanded=False):
|
806 |
st.write("Debugging information for troubleshooting:")
|
807 |
for i, debug_line in enumerate(st.session_state.debug_info[-10:]):
|
|
|
812 |
if score > 0.01: # Only show non-negligible scores
|
813 |
st.text(f"{emotion}: {score:.4f}")
|
814 |
|
815 |
+
# Analysis details expander
|
816 |
with st.expander("Analysis Details", expanded=False):
|
817 |
st.write("""
|
818 |
*How this works:*
|