Spaces:
Running
Running
Delete app.py
Browse files
app.py
DELETED
@@ -1,451 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
"""
|
3 |
-
Madverse Music: AI Music Detection Web App
|
4 |
-
A Streamlit interface for testing AI-generated music detection
|
5 |
-
"""
|
6 |
-
|
7 |
-
import streamlit as st
|
8 |
-
import torch
|
9 |
-
import librosa
|
10 |
-
import numpy as np
|
11 |
-
import tempfile
|
12 |
-
import os
|
13 |
-
from pathlib import Path
|
14 |
-
import time
|
15 |
-
|
16 |
-
# Configure the page
|
17 |
-
st.set_page_config(
|
18 |
-
page_title="Madverse Music: AI Music Detector",
|
19 |
-
page_icon="🎵",
|
20 |
-
layout="wide",
|
21 |
-
initial_sidebar_state="expanded"
|
22 |
-
)
|
23 |
-
|
24 |
-
# Custom CSS for styling
|
25 |
-
st.markdown("""
|
26 |
-
<style>
|
27 |
-
.main-header {
|
28 |
-
font-size: 3rem;
|
29 |
-
color: #1f77b4;
|
30 |
-
text-align: center;
|
31 |
-
margin-bottom: 2rem;
|
32 |
-
background: linear-gradient(90deg, #667eea 0%, #764ba2 100%);
|
33 |
-
-webkit-background-clip: text;
|
34 |
-
-webkit-text-fill-color: transparent;
|
35 |
-
font-weight: bold;
|
36 |
-
}
|
37 |
-
|
38 |
-
.sub-header {
|
39 |
-
font-size: 1.2rem;
|
40 |
-
color: #666;
|
41 |
-
text-align: center;
|
42 |
-
margin-bottom: 3rem;
|
43 |
-
}
|
44 |
-
|
45 |
-
.result-box {
|
46 |
-
padding: 1.5rem;
|
47 |
-
border-radius: 10px;
|
48 |
-
margin: 1rem 0;
|
49 |
-
border-left: 5px solid;
|
50 |
-
}
|
51 |
-
|
52 |
-
.real-music {
|
53 |
-
background-color: #d4edda;
|
54 |
-
border-left-color: #28a745;
|
55 |
-
color: #155724;
|
56 |
-
}
|
57 |
-
|
58 |
-
.fake-music {
|
59 |
-
background-color: #f8d7da;
|
60 |
-
border-left-color: #dc3545;
|
61 |
-
color: #721c24;
|
62 |
-
}
|
63 |
-
|
64 |
-
.info-box {
|
65 |
-
background-color: #e3f2fd;
|
66 |
-
padding: 1rem;
|
67 |
-
border-radius: 8px;
|
68 |
-
border-left: 4px solid #2196f3;
|
69 |
-
margin: 1rem 0;
|
70 |
-
}
|
71 |
-
|
72 |
-
.metric-card {
|
73 |
-
background: white;
|
74 |
-
padding: 1rem;
|
75 |
-
border-radius: 8px;
|
76 |
-
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
77 |
-
text-align: center;
|
78 |
-
margin: 0.5rem;
|
79 |
-
}
|
80 |
-
</style>
|
81 |
-
""", unsafe_allow_html=True)
|
82 |
-
|
83 |
-
# Initialize session state
|
84 |
-
if 'model' not in st.session_state:
|
85 |
-
st.session_state.model = None
|
86 |
-
if 'model_loaded' not in st.session_state:
|
87 |
-
st.session_state.model_loaded = False
|
88 |
-
|
89 |
-
@st.cache_resource
|
90 |
-
def load_model():
|
91 |
-
"""Load the AI model (cached for performance)"""
|
92 |
-
try:
|
93 |
-
from sonics import HFAudioClassifier
|
94 |
-
model = HFAudioClassifier.from_pretrained("awsaf49/sonics-spectttra-alpha-120s")
|
95 |
-
model.eval()
|
96 |
-
return model
|
97 |
-
except Exception as e:
|
98 |
-
st.error(f"Failed to load model: {e}")
|
99 |
-
return None
|
100 |
-
|
101 |
-
def classify_audio_file(audio_file, model):
|
102 |
-
"""Classify uploaded audio file"""
|
103 |
-
try:
|
104 |
-
# Create temporary file
|
105 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as tmp_file:
|
106 |
-
tmp_file.write(audio_file.getvalue())
|
107 |
-
tmp_file_path = tmp_file.name
|
108 |
-
|
109 |
-
# Load audio (model uses 16kHz sample rate)
|
110 |
-
audio, sr = librosa.load(tmp_file_path, sr=16000)
|
111 |
-
|
112 |
-
# Convert to tensor and add batch dimension
|
113 |
-
audio_tensor = torch.FloatTensor(audio).unsqueeze(0)
|
114 |
-
|
115 |
-
# Get prediction
|
116 |
-
with torch.no_grad():
|
117 |
-
output = model(audio_tensor)
|
118 |
-
|
119 |
-
# Convert logit to probability using sigmoid
|
120 |
-
prob = torch.sigmoid(output).item()
|
121 |
-
|
122 |
-
# Classify: prob < 0.5 = Real, prob >= 0.5 = Fake
|
123 |
-
if prob < 0.5:
|
124 |
-
classification = "Real"
|
125 |
-
confidence = (1 - prob) * 2 # Convert to 0-1 scale
|
126 |
-
else:
|
127 |
-
classification = "Fake"
|
128 |
-
confidence = (prob - 0.5) * 2 # Convert to 0-1 scale
|
129 |
-
|
130 |
-
# Cleanup
|
131 |
-
os.unlink(tmp_file_path)
|
132 |
-
|
133 |
-
return {
|
134 |
-
"classification": classification,
|
135 |
-
"confidence": min(confidence, 1.0), # Cap at 1.0
|
136 |
-
"probability": prob,
|
137 |
-
"raw_score": output.item(),
|
138 |
-
"duration": len(audio) / sr
|
139 |
-
}
|
140 |
-
|
141 |
-
except Exception as e:
|
142 |
-
# Cleanup on error
|
143 |
-
if 'tmp_file_path' in locals() and os.path.exists(tmp_file_path):
|
144 |
-
os.unlink(tmp_file_path)
|
145 |
-
raise e
|
146 |
-
|
147 |
-
# Main UI
|
148 |
-
def main():
|
149 |
-
# Header
|
150 |
-
st.markdown('<h1 class="main-header">🎵 Madverse Music AI Detector</h1>', unsafe_allow_html=True)
|
151 |
-
st.markdown('<p class="sub-header">Upload audio files to detect if they\'re AI-generated or human-created</p>', unsafe_allow_html=True)
|
152 |
-
|
153 |
-
# Sidebar
|
154 |
-
with st.sidebar:
|
155 |
-
st.header("🔬 Model Information")
|
156 |
-
|
157 |
-
# Model loading status
|
158 |
-
if not st.session_state.model_loaded:
|
159 |
-
if st.button("🚀 Load AI Model", type="primary"):
|
160 |
-
with st.spinner("Loading Madverse Music AI model..."):
|
161 |
-
st.session_state.model = load_model()
|
162 |
-
if st.session_state.model:
|
163 |
-
st.session_state.model_loaded = True
|
164 |
-
st.success("✅ Model loaded successfully!")
|
165 |
-
st.rerun()
|
166 |
-
else:
|
167 |
-
st.error("❌ Failed to load model")
|
168 |
-
else:
|
169 |
-
st.success("✅ Model Ready")
|
170 |
-
|
171 |
-
# Model specs
|
172 |
-
st.markdown("""
|
173 |
-
**Model Details:**
|
174 |
-
- **Architecture**: SpecTTTra-α
|
175 |
-
- **Duration**: 120 seconds
|
176 |
-
- **Sample Rate**: 16kHz
|
177 |
-
- **F1 Score**: 97%
|
178 |
-
- **Sensitivity**: 96%
|
179 |
-
- **Specificity**: 99%
|
180 |
-
""")
|
181 |
-
|
182 |
-
if st.button("🔄 Reload Model"):
|
183 |
-
st.session_state.model_loaded = False
|
184 |
-
st.session_state.model = None
|
185 |
-
st.cache_resource.clear()
|
186 |
-
st.rerun()
|
187 |
-
|
188 |
-
# Main content
|
189 |
-
if st.session_state.model_loaded:
|
190 |
-
st.markdown("### 📁 Upload Audio File")
|
191 |
-
|
192 |
-
# File uploader with bulk support
|
193 |
-
uploaded_files = st.file_uploader(
|
194 |
-
"Choose audio file(s)",
|
195 |
-
type=['wav', 'mp3', 'flac', 'm4a', 'ogg'],
|
196 |
-
accept_multiple_files=True,
|
197 |
-
help="Supported formats: WAV, MP3, FLAC, M4A, OGG (Max 200MB each). You can upload multiple files for bulk analysis."
|
198 |
-
)
|
199 |
-
|
200 |
-
if uploaded_files:
|
201 |
-
# Display summary info for multiple files
|
202 |
-
if len(uploaded_files) == 1:
|
203 |
-
# Single file - show detailed info
|
204 |
-
uploaded_file = uploaded_files[0]
|
205 |
-
col1, col2, col3 = st.columns(3)
|
206 |
-
with col1:
|
207 |
-
st.metric("📝 Filename", uploaded_file.name)
|
208 |
-
with col2:
|
209 |
-
file_size_mb = uploaded_file.size / (1024 * 1024)
|
210 |
-
st.metric("📊 File Size", f"{file_size_mb:.2f} MB")
|
211 |
-
with col3:
|
212 |
-
st.metric("🎵 Format", uploaded_file.type)
|
213 |
-
|
214 |
-
# Audio player
|
215 |
-
st.audio(uploaded_file, format='audio/wav')
|
216 |
-
else:
|
217 |
-
# Multiple files - show summary
|
218 |
-
total_size = sum(f.size for f in uploaded_files) / (1024 * 1024)
|
219 |
-
col1, col2, col3 = st.columns(3)
|
220 |
-
with col1:
|
221 |
-
st.metric("📝 Files Selected", len(uploaded_files))
|
222 |
-
with col2:
|
223 |
-
st.metric("📊 Total Size", f"{total_size:.2f} MB")
|
224 |
-
with col3:
|
225 |
-
formats = list(set(f.type.split('/')[-1].upper() for f in uploaded_files))
|
226 |
-
st.metric("🎵 Formats", ", ".join(formats))
|
227 |
-
|
228 |
-
# Show file list
|
229 |
-
with st.expander(f"📋 File List ({len(uploaded_files)} files)"):
|
230 |
-
for i, file in enumerate(uploaded_files, 1):
|
231 |
-
size_mb = file.size / (1024 * 1024)
|
232 |
-
st.write(f"{i}. **{file.name}** ({size_mb:.2f} MB)")
|
233 |
-
|
234 |
-
# Analyze button
|
235 |
-
if len(uploaded_files) == 1:
|
236 |
-
button_text = "🔍 Analyze Audio"
|
237 |
-
else:
|
238 |
-
button_text = f"🔍 Analyze {len(uploaded_files)} Files"
|
239 |
-
|
240 |
-
if st.button(button_text, type="primary", use_container_width=True):
|
241 |
-
try:
|
242 |
-
if len(uploaded_files) == 1:
|
243 |
-
# Single file analysis
|
244 |
-
with st.spinner("🧠 Analyzing audio with AI..."):
|
245 |
-
start_time = time.time()
|
246 |
-
result = classify_audio_file(uploaded_files[0], st.session_state.model)
|
247 |
-
processing_time = time.time() - start_time
|
248 |
-
|
249 |
-
# Display results for single file
|
250 |
-
st.markdown("### 🎯 Analysis Results")
|
251 |
-
|
252 |
-
# Main result
|
253 |
-
if result["classification"] == "Real":
|
254 |
-
st.markdown(f"""
|
255 |
-
<div class="result-box real-music">
|
256 |
-
<h3>🎤 Human-Created Music</h3>
|
257 |
-
<p>This audio appears to be created by human artists.</p>
|
258 |
-
</div>
|
259 |
-
""", unsafe_allow_html=True)
|
260 |
-
else:
|
261 |
-
st.markdown(f"""
|
262 |
-
<div class="result-box fake-music">
|
263 |
-
<h3>🤖 AI-Generated Music</h3>
|
264 |
-
<p>This audio appears to be generated by AI (like Suno, Udio, etc.)</p>
|
265 |
-
</div>
|
266 |
-
""", unsafe_allow_html=True)
|
267 |
-
|
268 |
-
# Detailed metrics for single file
|
269 |
-
col1, col2, col3, col4 = st.columns(4)
|
270 |
-
|
271 |
-
with col1:
|
272 |
-
st.metric(
|
273 |
-
"🎯 Classification",
|
274 |
-
result["classification"],
|
275 |
-
help="AI model's prediction"
|
276 |
-
)
|
277 |
-
|
278 |
-
with col2:
|
279 |
-
confidence_pct = result["confidence"] * 100
|
280 |
-
st.metric(
|
281 |
-
"📊 Confidence",
|
282 |
-
f"{confidence_pct:.1f}%",
|
283 |
-
help="How confident the model is in its prediction"
|
284 |
-
)
|
285 |
-
|
286 |
-
with col3:
|
287 |
-
st.metric(
|
288 |
-
"⏱️ Duration",
|
289 |
-
f"{result['duration']:.1f}s",
|
290 |
-
help="Audio file duration"
|
291 |
-
)
|
292 |
-
|
293 |
-
with col4:
|
294 |
-
st.metric(
|
295 |
-
"⚡ Processing Time",
|
296 |
-
f"{processing_time:.2f}s",
|
297 |
-
help="Time taken to analyze"
|
298 |
-
)
|
299 |
-
|
300 |
-
# Technical details (expandable)
|
301 |
-
with st.expander("🔬 Technical Details"):
|
302 |
-
col1, col2 = st.columns(2)
|
303 |
-
with col1:
|
304 |
-
st.metric("Raw Sigmoid Probability", f"{result['probability']:.4f}")
|
305 |
-
st.metric("Raw Model Output", f"{result['raw_score']:.4f}")
|
306 |
-
with col2:
|
307 |
-
st.info("""
|
308 |
-
**How it works:**
|
309 |
-
- Audio is resampled to 16kHz
|
310 |
-
- Processed by SpecTTTra transformer
|
311 |
-
- Output < 0.5 = Real, ≥ 0.5 = Fake
|
312 |
-
""")
|
313 |
-
|
314 |
-
# Progress bars for visualization
|
315 |
-
st.markdown("### 📈 Confidence Visualization")
|
316 |
-
if result["classification"] == "Real":
|
317 |
-
st.progress(result["confidence"], text=f"Human Confidence: {result['confidence']:.1%}")
|
318 |
-
else:
|
319 |
-
st.progress(result["confidence"], text=f"AI Confidence: {result['confidence']:.1%}")
|
320 |
-
|
321 |
-
else:
|
322 |
-
# Multiple files analysis
|
323 |
-
progress_bar = st.progress(0, text="🧠 Analyzing files...")
|
324 |
-
results = []
|
325 |
-
start_time = time.time()
|
326 |
-
|
327 |
-
for i, file in enumerate(uploaded_files):
|
328 |
-
progress = (i + 1) / len(uploaded_files)
|
329 |
-
progress_bar.progress(progress, text=f"🧠 Analyzing file {i+1}/{len(uploaded_files)}: {file.name}")
|
330 |
-
|
331 |
-
try:
|
332 |
-
file_result = classify_audio_file(file, st.session_state.model)
|
333 |
-
file_result["filename"] = file.name
|
334 |
-
file_result["success"] = True
|
335 |
-
results.append(file_result)
|
336 |
-
except Exception as e:
|
337 |
-
results.append({
|
338 |
-
"filename": file.name,
|
339 |
-
"success": False,
|
340 |
-
"error": str(e)
|
341 |
-
})
|
342 |
-
|
343 |
-
total_time = time.time() - start_time
|
344 |
-
progress_bar.progress(1.0, text="✅ Analysis complete!")
|
345 |
-
|
346 |
-
# Display bulk results
|
347 |
-
st.markdown("### 🎯 Bulk Analysis Results")
|
348 |
-
|
349 |
-
# Summary metrics
|
350 |
-
successful = sum(1 for r in results if r["success"])
|
351 |
-
failed = len(results) - successful
|
352 |
-
|
353 |
-
col1, col2, col3, col4 = st.columns(4)
|
354 |
-
with col1:
|
355 |
-
st.metric("📁 Total Files", len(results))
|
356 |
-
with col2:
|
357 |
-
st.metric("✅ Successful", successful)
|
358 |
-
with col3:
|
359 |
-
st.metric("❌ Failed", failed)
|
360 |
-
with col4:
|
361 |
-
st.metric("⏱️ Total Time", f"{total_time:.1f}s")
|
362 |
-
|
363 |
-
# Results breakdown
|
364 |
-
if successful > 0:
|
365 |
-
real_count = sum(1 for r in results if r.get("success") and r.get("classification") == "Real")
|
366 |
-
fake_count = sum(1 for r in results if r.get("success") and r.get("classification") == "Fake")
|
367 |
-
|
368 |
-
col1, col2 = st.columns(2)
|
369 |
-
with col1:
|
370 |
-
st.markdown(f"""
|
371 |
-
<div class="result-box real-music">
|
372 |
-
<h4>🎤 Human Music: {real_count} files</h4>
|
373 |
-
</div>
|
374 |
-
""", unsafe_allow_html=True)
|
375 |
-
with col2:
|
376 |
-
st.markdown(f"""
|
377 |
-
<div class="result-box fake-music">
|
378 |
-
<h4>🤖 AI Music: {fake_count} files</h4>
|
379 |
-
</div>
|
380 |
-
""", unsafe_allow_html=True)
|
381 |
-
|
382 |
-
# Detailed results table
|
383 |
-
st.markdown("### 📋 Detailed Results")
|
384 |
-
|
385 |
-
for i, result in enumerate(results, 1):
|
386 |
-
with st.expander(f"📄 {i}. {result['filename']}" +
|
387 |
-
(" ✅" if result["success"] else " ❌")):
|
388 |
-
if result["success"]:
|
389 |
-
col1, col2, col3 = st.columns(3)
|
390 |
-
with col1:
|
391 |
-
st.metric("Classification", result["classification"])
|
392 |
-
with col2:
|
393 |
-
st.metric("Confidence", f"{result['confidence']:.1%}")
|
394 |
-
with col3:
|
395 |
-
st.metric("Duration", f"{result['duration']:.1f}s")
|
396 |
-
|
397 |
-
# Confidence bar
|
398 |
-
if result["classification"] == "Real":
|
399 |
-
st.progress(result["confidence"],
|
400 |
-
text=f"Human Confidence: {result['confidence']:.1%}")
|
401 |
-
else:
|
402 |
-
st.progress(result["confidence"],
|
403 |
-
text=f"AI Confidence: {result['confidence']:.1%}")
|
404 |
-
else:
|
405 |
-
st.error(f"❌ Analysis failed: {result['error']}")
|
406 |
-
|
407 |
-
except Exception as e:
|
408 |
-
st.error(f"❌ Error analyzing audio: {str(e)}")
|
409 |
-
|
410 |
-
else:
|
411 |
-
# Model not loaded
|
412 |
-
st.markdown("""
|
413 |
-
<div class="info-box">
|
414 |
-
<h3>🚀 Getting Started</h3>
|
415 |
-
<p>Click "Load AI Model" in the sidebar to begin analyzing audio files.</p>
|
416 |
-
<p><strong>Note:</strong> The first load may take a moment as the model downloads.</p>
|
417 |
-
</div>
|
418 |
-
""", unsafe_allow_html=True)
|
419 |
-
|
420 |
-
# Show supported formats
|
421 |
-
st.markdown("### 📁 Supported Audio Formats")
|
422 |
-
col1, col2, col3, col4, col5 = st.columns(5)
|
423 |
-
formats = [
|
424 |
-
("🎵 WAV", ".wav"),
|
425 |
-
("🎧 MP3", ".mp3"),
|
426 |
-
("💿 FLAC", ".flac"),
|
427 |
-
("📱 M4A", ".m4a"),
|
428 |
-
("🎼 OGG", ".ogg")
|
429 |
-
]
|
430 |
-
|
431 |
-
for i, (icon_name, ext) in enumerate(formats):
|
432 |
-
with [col1, col2, col3, col4, col5][i]:
|
433 |
-
st.markdown(f"""
|
434 |
-
<div class="metric-card">
|
435 |
-
<h4>{icon_name}</h4>
|
436 |
-
<p>{ext}</p>
|
437 |
-
</div>
|
438 |
-
""", unsafe_allow_html=True)
|
439 |
-
|
440 |
-
# Footer
|
441 |
-
st.markdown("---")
|
442 |
-
st.markdown("""
|
443 |
-
<div style="text-align: center; color: #666; padding: 1rem;">
|
444 |
-
<p>🎵 <strong>Madverse Music</strong> - AI Music Detection Technology</p>
|
445 |
-
<p>Visit <a href="https://madverse.co" target="_blank">madverse.co</a> for more AI music tools</p>
|
446 |
-
<p><em>This tool is designed for research and testing purposes.</em></p>
|
447 |
-
</div>
|
448 |
-
""", unsafe_allow_html=True)
|
449 |
-
|
450 |
-
if __name__ == "__main__":
|
451 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|