Spaces:
Build error
Build error
File size: 6,026 Bytes
18c46ab |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 |
# app.py
import gradio as gr
from tabs.speech_stress_analysis import create_voice_stress_tab
from tabs.speech_emotion_recognition import create_emotion_recognition_tab
from tabs.FACS_analysis import create_facs_analysis_tab
from tabs.heart_rate_variability import create_heart_rate_variability_tab
from tabs.deception_detection import create_deception_detection_tab, load_models
import logging
import torch
from typing import Dict
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Custom CSS for better styling
CUSTOM_CSS = """
/* Global styles */
.gradio-container {
font-family: 'Arial', sans-serif;
max-width: 1200px;
margin: auto;
padding: 20px;
background-color: #f8f9fa;
}
/* Header styling */
h1 {
color: #2c3e50;
text-align: center;
padding: 20px 0;
margin-bottom: 30px;
border-bottom: 2px solid #3498db;
}
/* Tab navigation styling */
.gradio-tabs-nav {
background-color: #ffffff;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
margin-bottom: 20px;
}
/* Content areas */
.content-area {
background: white;
padding: 20px;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
margin-top: 20px;
}
/* Results area */
.results-area {
background-color: #ffffff;
padding: 20px;
border-radius: 8px;
margin-top: 20px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
/* Disclaimer styling */
.disclaimer {
background-color: #f8f9fa;
border-left: 4px solid #3498db;
padding: 15px;
margin-top: 30px;
font-size: 0.9em;
color: #666;
}
"""
# HTML content
HEADER_HTML = """
<div style="text-align: center; padding: 20px;">
<h1>AI-Driven Multimodal Emotional State Analysis</h1>
<p style="font-size: 1.2em; color: #666;">
Comprehensive analysis of stress, emotion, and truthfulness through facial expressions,
heart rate variability, and speech patterns.
</p>
</div>
"""
DISCLAIMER_HTML = """
<div class="disclaimer">
<h3>Important Notice</h3>
<p>This application provides AI-driven analysis for:</p>
<ul>
<li>Stress and emotion detection</li>
<li>Heart rate variability analysis</li>
<li>Speech pattern analysis</li>
<li>Truth/deception indication</li>
</ul>
<p><strong>Disclaimer:</strong> This tool is for research and informational purposes only.
It should not be used as a substitute for professional medical advice, diagnosis, or treatment.
The deception detection feature is experimental and should not be used as definitive proof
of truthfulness or deception.</p>
</div>
"""
# Tab structure
TAB_STRUCTURE = [
("Visual Analysis", [
("FACS Analysis", create_facs_analysis_tab),
("Heart Rate Variability", create_heart_rate_variability_tab),
("Truth/Deception Detection", create_deception_detection_tab) # Pass models here
]),
("Speech Analysis", [
("Speech Stress", create_voice_stress_tab),
("Speech Emotion", create_emotion_recognition_tab)
])
]
def create_demo(models: Dict[str, torch.nn.Module]):
"""Create and configure the Gradio interface."""
with gr.Blocks(css=CUSTOM_CSS, title="Multimodal Emotional State Analysis") as demo:
# Header
gr.HTML(HEADER_HTML)
# Main content area with Tabs
with gr.Tabs():
for main_tab, sub_tabs in TAB_STRUCTURE:
with gr.Tab(main_tab):
with gr.Column():
with gr.Tabs():
for sub_tab, create_fn in sub_tabs:
with gr.Tab(sub_tab):
if main_tab == "Visual Analysis" and sub_tab == "Truth/Deception Detection":
# Pass loaded models to the deception detection tab
create_fn(models)
else:
create_fn()
# Add help information below sub-tabs
if main_tab == "Visual Analysis":
gr.Markdown("""
### Visual Analysis Features
- **FACS Analysis**: Facial Action Coding System for emotion detection
- **Heart Rate Variability**: Stress and wellness indicators
- **Truth/Deception Detection**: Physiological response analysis
**For best results:**
1. Use good lighting
2. Face the camera directly
3. Minimize movement during recording
""")
elif main_tab == "Speech Analysis":
gr.Markdown("""
### Speech Analysis Features
- **Speech Stress**: Voice stress analysis
- **Speech Emotion**: Emotional content detection
**For best results:**
1. Use a quiet environment
2. Speak clearly
3. Avoid background noise
""")
# Disclaimer
gr.HTML(DISCLAIMER_HTML)
return demo
def main():
"""Main function to run the application."""
# Load models once and pass them to the deception detection tab
models_loaded = load_models()
if not models_loaded:
logger.error("No models loaded. Exiting application.")
return
# Initialize Gradio interface
demo = create_demo(models_loaded)
# Configure and launch the interface
demo.queue() # Enable queuing without specific concurrency count
demo.launch(
server_name="127.0.0.1",
server_port=7860,
share=True,
debug=True,
show_error=True
)
if __name__ == "__main__":
main()
|