Spaces:
Build error
Build error
Michael Hu
commited on
Commit
·
6613cd9
1
Parent(s):
f7492cb
Implement comprehensive error handling
Browse files- src/application/error_handling/__init__.py +1 -0
- src/application/error_handling/error_mapper.py +375 -0
- src/application/error_handling/recovery_manager.py +508 -0
- src/application/error_handling/structured_logger.py +362 -0
- src/application/services/audio_processing_service.py +365 -95
- src/application/services/configuration_service.py +72 -67
- tests/unit/application/error_handling/__init__.py +1 -0
- tests/unit/application/error_handling/test_error_mapper.py +155 -0
- tests/unit/application/error_handling/test_structured_logger.py +298 -0
src/application/error_handling/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
"""Error handling utilities for the application layer."""
|
src/application/error_handling/error_mapper.py
ADDED
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Error message mapping for user-friendly error handling."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
from typing import Dict, Optional, Any
|
5 |
+
from enum import Enum
|
6 |
+
|
7 |
+
from ...domain.exceptions import (
|
8 |
+
DomainException,
|
9 |
+
InvalidAudioFormatException,
|
10 |
+
InvalidTextContentException,
|
11 |
+
TranslationFailedException,
|
12 |
+
SpeechRecognitionException,
|
13 |
+
SpeechSynthesisException,
|
14 |
+
InvalidVoiceSettingsException,
|
15 |
+
AudioProcessingException
|
16 |
+
)
|
17 |
+
|
18 |
+
logger = logging.getLogger(__name__)
|
19 |
+
|
20 |
+
|
21 |
+
class ErrorSeverity(Enum):
|
22 |
+
"""Error severity levels."""
|
23 |
+
LOW = "low"
|
24 |
+
MEDIUM = "medium"
|
25 |
+
HIGH = "high"
|
26 |
+
CRITICAL = "critical"
|
27 |
+
|
28 |
+
|
29 |
+
class ErrorCategory(Enum):
|
30 |
+
"""Error categories for classification."""
|
31 |
+
VALIDATION = "validation"
|
32 |
+
PROCESSING = "processing"
|
33 |
+
SYSTEM = "system"
|
34 |
+
EXTERNAL = "external"
|
35 |
+
CONFIGURATION = "configuration"
|
36 |
+
|
37 |
+
|
38 |
+
class ErrorMapping:
|
39 |
+
"""Error mapping configuration."""
|
40 |
+
|
41 |
+
def __init__(self, user_message: str, error_code: str,
|
42 |
+
severity: ErrorSeverity, category: ErrorCategory,
|
43 |
+
recovery_suggestions: Optional[list] = None,
|
44 |
+
technical_details: Optional[str] = None):
|
45 |
+
self.user_message = user_message
|
46 |
+
self.error_code = error_code
|
47 |
+
self.severity = severity
|
48 |
+
self.category = category
|
49 |
+
self.recovery_suggestions = recovery_suggestions or []
|
50 |
+
self.technical_details = technical_details
|
51 |
+
|
52 |
+
|
53 |
+
class ErrorMapper:
|
54 |
+
"""Maps domain exceptions to user-friendly error messages."""
|
55 |
+
|
56 |
+
def __init__(self):
|
57 |
+
"""Initialize error mapper with predefined mappings."""
|
58 |
+
self._mappings: Dict[type, ErrorMapping] = {
|
59 |
+
# Audio format errors
|
60 |
+
InvalidAudioFormatException: ErrorMapping(
|
61 |
+
user_message="The uploaded audio file format is not supported. Please use WAV, MP3, FLAC, or OGG format.",
|
62 |
+
error_code="INVALID_AUDIO_FORMAT",
|
63 |
+
severity=ErrorSeverity.MEDIUM,
|
64 |
+
category=ErrorCategory.VALIDATION,
|
65 |
+
recovery_suggestions=[
|
66 |
+
"Convert your audio file to a supported format (WAV, MP3, FLAC, or OGG)",
|
67 |
+
"Check that your file is not corrupted",
|
68 |
+
"Ensure the file has a proper audio extension"
|
69 |
+
]
|
70 |
+
),
|
71 |
+
|
72 |
+
# Text content errors
|
73 |
+
InvalidTextContentException: ErrorMapping(
|
74 |
+
user_message="The text content is invalid or too long. Please check your input.",
|
75 |
+
error_code="INVALID_TEXT_CONTENT",
|
76 |
+
severity=ErrorSeverity.MEDIUM,
|
77 |
+
category=ErrorCategory.VALIDATION,
|
78 |
+
recovery_suggestions=[
|
79 |
+
"Ensure text is not empty",
|
80 |
+
"Check that text length is within limits (max 10,000 characters)",
|
81 |
+
"Verify text encoding is valid"
|
82 |
+
]
|
83 |
+
),
|
84 |
+
|
85 |
+
# Translation errors
|
86 |
+
TranslationFailedException: ErrorMapping(
|
87 |
+
user_message="Translation failed. This might be due to unsupported language pairs or service issues.",
|
88 |
+
error_code="TRANSLATION_FAILED",
|
89 |
+
severity=ErrorSeverity.HIGH,
|
90 |
+
category=ErrorCategory.PROCESSING,
|
91 |
+
recovery_suggestions=[
|
92 |
+
"Try a different target language",
|
93 |
+
"Check if the source language is correctly detected",
|
94 |
+
"Retry the operation after a few moments",
|
95 |
+
"Ensure the text is in a supported language"
|
96 |
+
]
|
97 |
+
),
|
98 |
+
|
99 |
+
# Speech recognition errors
|
100 |
+
SpeechRecognitionException: ErrorMapping(
|
101 |
+
user_message="Speech recognition failed. The audio might be unclear or in an unsupported language.",
|
102 |
+
error_code="SPEECH_RECOGNITION_FAILED",
|
103 |
+
severity=ErrorSeverity.HIGH,
|
104 |
+
category=ErrorCategory.PROCESSING,
|
105 |
+
recovery_suggestions=[
|
106 |
+
"Ensure audio quality is good (clear speech, minimal background noise)",
|
107 |
+
"Try a different speech recognition model",
|
108 |
+
"Check that the audio contains speech in a supported language",
|
109 |
+
"Verify audio file is not corrupted"
|
110 |
+
]
|
111 |
+
),
|
112 |
+
|
113 |
+
# Speech synthesis errors
|
114 |
+
SpeechSynthesisException: ErrorMapping(
|
115 |
+
user_message="Text-to-speech generation failed. This might be due to voice availability or text issues.",
|
116 |
+
error_code="SPEECH_SYNTHESIS_FAILED",
|
117 |
+
severity=ErrorSeverity.HIGH,
|
118 |
+
category=ErrorCategory.PROCESSING,
|
119 |
+
recovery_suggestions=[
|
120 |
+
"Try a different voice",
|
121 |
+
"Check if the text contains unsupported characters",
|
122 |
+
"Reduce text length if it's very long",
|
123 |
+
"Retry with a different TTS provider"
|
124 |
+
]
|
125 |
+
),
|
126 |
+
|
127 |
+
# Voice settings errors
|
128 |
+
InvalidVoiceSettingsException: ErrorMapping(
|
129 |
+
user_message="Voice settings are invalid. Please check speed, voice selection, and language settings.",
|
130 |
+
error_code="INVALID_VOICE_SETTINGS",
|
131 |
+
severity=ErrorSeverity.MEDIUM,
|
132 |
+
category=ErrorCategory.VALIDATION,
|
133 |
+
recovery_suggestions=[
|
134 |
+
"Ensure speed is between 0.5 and 2.0",
|
135 |
+
"Select a valid voice from the available options",
|
136 |
+
"Check that the language is supported",
|
137 |
+
"Verify voice is available for the selected language"
|
138 |
+
]
|
139 |
+
),
|
140 |
+
|
141 |
+
# General audio processing errors
|
142 |
+
AudioProcessingException: ErrorMapping(
|
143 |
+
user_message="Audio processing failed. There was an issue processing your audio file.",
|
144 |
+
error_code="AUDIO_PROCESSING_FAILED",
|
145 |
+
severity=ErrorSeverity.HIGH,
|
146 |
+
category=ErrorCategory.PROCESSING,
|
147 |
+
recovery_suggestions=[
|
148 |
+
"Check that your audio file is valid and not corrupted",
|
149 |
+
"Try uploading a different audio file",
|
150 |
+
"Ensure file size is within limits",
|
151 |
+
"Retry the operation"
|
152 |
+
]
|
153 |
+
),
|
154 |
+
|
155 |
+
# Validation errors
|
156 |
+
ValueError: ErrorMapping(
|
157 |
+
user_message="Invalid input provided. Please check your request parameters.",
|
158 |
+
error_code="VALIDATION_ERROR",
|
159 |
+
severity=ErrorSeverity.MEDIUM,
|
160 |
+
category=ErrorCategory.VALIDATION,
|
161 |
+
recovery_suggestions=[
|
162 |
+
"Check all required fields are provided",
|
163 |
+
"Verify parameter values are within valid ranges",
|
164 |
+
"Ensure file formats are supported"
|
165 |
+
]
|
166 |
+
),
|
167 |
+
|
168 |
+
# File size errors
|
169 |
+
PermissionError: ErrorMapping(
|
170 |
+
user_message="Permission denied. Unable to access required files or directories.",
|
171 |
+
error_code="PERMISSION_ERROR",
|
172 |
+
severity=ErrorSeverity.HIGH,
|
173 |
+
category=ErrorCategory.SYSTEM,
|
174 |
+
recovery_suggestions=[
|
175 |
+
"Check file permissions",
|
176 |
+
"Ensure temporary directory is writable",
|
177 |
+
"Contact system administrator if issue persists"
|
178 |
+
]
|
179 |
+
),
|
180 |
+
|
181 |
+
# Memory errors
|
182 |
+
MemoryError: ErrorMapping(
|
183 |
+
user_message="Insufficient memory to process the request. Try with a smaller file.",
|
184 |
+
error_code="MEMORY_ERROR",
|
185 |
+
severity=ErrorSeverity.HIGH,
|
186 |
+
category=ErrorCategory.SYSTEM,
|
187 |
+
recovery_suggestions=[
|
188 |
+
"Use a smaller audio file",
|
189 |
+
"Try processing shorter audio segments",
|
190 |
+
"Retry the operation later"
|
191 |
+
]
|
192 |
+
),
|
193 |
+
|
194 |
+
# Timeout errors
|
195 |
+
TimeoutError: ErrorMapping(
|
196 |
+
user_message="Processing timed out. The operation took too long to complete.",
|
197 |
+
error_code="TIMEOUT_ERROR",
|
198 |
+
severity=ErrorSeverity.HIGH,
|
199 |
+
category=ErrorCategory.SYSTEM,
|
200 |
+
recovery_suggestions=[
|
201 |
+
"Try with a shorter audio file",
|
202 |
+
"Retry the operation",
|
203 |
+
"Check system load and try again later"
|
204 |
+
]
|
205 |
+
)
|
206 |
+
}
|
207 |
+
|
208 |
+
# Default mapping for unknown errors
|
209 |
+
self._default_mapping = ErrorMapping(
|
210 |
+
user_message="An unexpected error occurred. Please try again or contact support.",
|
211 |
+
error_code="UNKNOWN_ERROR",
|
212 |
+
severity=ErrorSeverity.CRITICAL,
|
213 |
+
category=ErrorCategory.SYSTEM,
|
214 |
+
recovery_suggestions=[
|
215 |
+
"Retry the operation",
|
216 |
+
"Check your input parameters",
|
217 |
+
"Contact support if the issue persists"
|
218 |
+
]
|
219 |
+
)
|
220 |
+
|
221 |
+
def map_exception(self, exception: Exception, context: Optional[Dict[str, Any]] = None) -> ErrorMapping:
|
222 |
+
"""
|
223 |
+
Map an exception to user-friendly error information.
|
224 |
+
|
225 |
+
Args:
|
226 |
+
exception: The exception to map
|
227 |
+
context: Optional context information
|
228 |
+
|
229 |
+
Returns:
|
230 |
+
ErrorMapping: Mapped error information
|
231 |
+
"""
|
232 |
+
try:
|
233 |
+
# Get mapping for exception type
|
234 |
+
mapping = self._mappings.get(type(exception))
|
235 |
+
|
236 |
+
if mapping is None:
|
237 |
+
# Try parent classes for domain exceptions
|
238 |
+
for exc_type in type(exception).__mro__:
|
239 |
+
if exc_type in self._mappings:
|
240 |
+
mapping = self._mappings[exc_type]
|
241 |
+
break
|
242 |
+
|
243 |
+
if mapping is None:
|
244 |
+
# Use default mapping
|
245 |
+
mapping = self._default_mapping
|
246 |
+
logger.warning(f"No mapping found for exception type: {type(exception).__name__}")
|
247 |
+
|
248 |
+
# Add context-specific information if available
|
249 |
+
if context:
|
250 |
+
mapping = self._enhance_mapping_with_context(mapping, exception, context)
|
251 |
+
|
252 |
+
logger.debug(f"Mapped {type(exception).__name__} to {mapping.error_code}")
|
253 |
+
return mapping
|
254 |
+
|
255 |
+
except Exception as e:
|
256 |
+
logger.error(f"Error mapping exception {type(exception).__name__}: {e}")
|
257 |
+
return self._default_mapping
|
258 |
+
|
259 |
+
def _enhance_mapping_with_context(self, mapping: ErrorMapping, exception: Exception,
|
260 |
+
context: Dict[str, Any]) -> ErrorMapping:
|
261 |
+
"""
|
262 |
+
Enhance error mapping with context-specific information.
|
263 |
+
|
264 |
+
Args:
|
265 |
+
mapping: Base error mapping
|
266 |
+
exception: Original exception
|
267 |
+
context: Context information
|
268 |
+
|
269 |
+
Returns:
|
270 |
+
ErrorMapping: Enhanced error mapping
|
271 |
+
"""
|
272 |
+
try:
|
273 |
+
# Create a copy of the mapping to avoid modifying the original
|
274 |
+
enhanced_mapping = ErrorMapping(
|
275 |
+
user_message=mapping.user_message,
|
276 |
+
error_code=mapping.error_code,
|
277 |
+
severity=mapping.severity,
|
278 |
+
category=mapping.category,
|
279 |
+
recovery_suggestions=mapping.recovery_suggestions.copy(),
|
280 |
+
technical_details=mapping.technical_details
|
281 |
+
)
|
282 |
+
|
283 |
+
# Add context-specific enhancements
|
284 |
+
if 'file_name' in context:
|
285 |
+
enhanced_mapping.user_message += f" (File: {context['file_name']})"
|
286 |
+
|
287 |
+
if 'operation' in context:
|
288 |
+
enhanced_mapping.technical_details = f"Failed during {context['operation']}: {str(exception)}"
|
289 |
+
|
290 |
+
if 'correlation_id' in context:
|
291 |
+
enhanced_mapping.technical_details = (
|
292 |
+
f"{enhanced_mapping.technical_details or str(exception)} "
|
293 |
+
f"[ID: {context['correlation_id']}]"
|
294 |
+
)
|
295 |
+
|
296 |
+
# Add provider-specific suggestions
|
297 |
+
if 'provider' in context:
|
298 |
+
provider = context['provider']
|
299 |
+
if isinstance(exception, (SpeechRecognitionException, SpeechSynthesisException, TranslationFailedException)):
|
300 |
+
enhanced_mapping.recovery_suggestions.append(f"Try switching from {provider} to an alternative provider")
|
301 |
+
|
302 |
+
# Add file size specific suggestions
|
303 |
+
if 'file_size' in context and context['file_size'] > 50 * 1024 * 1024: # 50MB
|
304 |
+
enhanced_mapping.recovery_suggestions.insert(0, "Try with a smaller file (under 50MB)")
|
305 |
+
|
306 |
+
return enhanced_mapping
|
307 |
+
|
308 |
+
except Exception as e:
|
309 |
+
logger.error(f"Error enhancing mapping with context: {e}")
|
310 |
+
return mapping
|
311 |
+
|
312 |
+
def get_error_code_from_exception(self, exception: Exception) -> str:
|
313 |
+
"""
|
314 |
+
Get error code from exception.
|
315 |
+
|
316 |
+
Args:
|
317 |
+
exception: Exception to get code for
|
318 |
+
|
319 |
+
Returns:
|
320 |
+
str: Error code
|
321 |
+
"""
|
322 |
+
mapping = self.map_exception(exception)
|
323 |
+
return mapping.error_code
|
324 |
+
|
325 |
+
def get_user_message_from_exception(self, exception: Exception,
|
326 |
+
context: Optional[Dict[str, Any]] = None) -> str:
|
327 |
+
"""
|
328 |
+
Get user-friendly message from exception.
|
329 |
+
|
330 |
+
Args:
|
331 |
+
exception: Exception to get message for
|
332 |
+
context: Optional context information
|
333 |
+
|
334 |
+
Returns:
|
335 |
+
str: User-friendly error message
|
336 |
+
"""
|
337 |
+
mapping = self.map_exception(exception, context)
|
338 |
+
return mapping.user_message
|
339 |
+
|
340 |
+
def get_recovery_suggestions(self, exception: Exception,
|
341 |
+
context: Optional[Dict[str, Any]] = None) -> list:
|
342 |
+
"""
|
343 |
+
Get recovery suggestions for an exception.
|
344 |
+
|
345 |
+
Args:
|
346 |
+
exception: Exception to get suggestions for
|
347 |
+
context: Optional context information
|
348 |
+
|
349 |
+
Returns:
|
350 |
+
list: List of recovery suggestions
|
351 |
+
"""
|
352 |
+
mapping = self.map_exception(exception, context)
|
353 |
+
return mapping.recovery_suggestions
|
354 |
+
|
355 |
+
def add_custom_mapping(self, exception_type: type, mapping: ErrorMapping) -> None:
|
356 |
+
"""
|
357 |
+
Add a custom error mapping.
|
358 |
+
|
359 |
+
Args:
|
360 |
+
exception_type: Exception type to map
|
361 |
+
mapping: Error mapping configuration
|
362 |
+
"""
|
363 |
+
self._mappings[exception_type] = mapping
|
364 |
+
logger.info(f"Added custom mapping for {exception_type.__name__}")
|
365 |
+
|
366 |
+
def get_all_error_codes(self) -> list:
|
367 |
+
"""
|
368 |
+
Get all available error codes.
|
369 |
+
|
370 |
+
Returns:
|
371 |
+
list: List of all error codes
|
372 |
+
"""
|
373 |
+
codes = [mapping.error_code for mapping in self._mappings.values()]
|
374 |
+
codes.append(self._default_mapping.error_code)
|
375 |
+
return list(set(codes))
|
src/application/error_handling/recovery_manager.py
ADDED
@@ -0,0 +1,508 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Error recovery and fallback mechanisms."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
import time
|
5 |
+
from typing import Dict, List, Optional, Any, Callable, TypeVar, Union
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from enum import Enum
|
8 |
+
from functools import wraps
|
9 |
+
|
10 |
+
from .structured_logger import StructuredLogger, LogContext, get_structured_logger
|
11 |
+
from ...domain.exceptions import (
|
12 |
+
DomainException,
|
13 |
+
SpeechRecognitionException,
|
14 |
+
TranslationFailedException,
|
15 |
+
SpeechSynthesisException,
|
16 |
+
AudioProcessingException
|
17 |
+
)
|
18 |
+
|
19 |
+
logger = get_structured_logger(__name__)
|
20 |
+
|
21 |
+
T = TypeVar('T')
|
22 |
+
|
23 |
+
|
24 |
+
class RecoveryStrategy(Enum):
|
25 |
+
"""Recovery strategy types."""
|
26 |
+
RETRY = "retry"
|
27 |
+
FALLBACK = "fallback"
|
28 |
+
CIRCUIT_BREAKER = "circuit_breaker"
|
29 |
+
GRACEFUL_DEGRADATION = "graceful_degradation"
|
30 |
+
|
31 |
+
|
32 |
+
class CircuitBreakerState(Enum):
|
33 |
+
"""Circuit breaker states."""
|
34 |
+
CLOSED = "closed"
|
35 |
+
OPEN = "open"
|
36 |
+
HALF_OPEN = "half_open"
|
37 |
+
|
38 |
+
|
39 |
+
@dataclass
|
40 |
+
class RetryConfig:
|
41 |
+
"""Retry configuration."""
|
42 |
+
max_attempts: int = 3
|
43 |
+
base_delay: float = 1.0
|
44 |
+
max_delay: float = 60.0
|
45 |
+
exponential_backoff: bool = True
|
46 |
+
jitter: bool = True
|
47 |
+
retryable_exceptions: Optional[List[type]] = None
|
48 |
+
|
49 |
+
|
50 |
+
@dataclass
|
51 |
+
class CircuitBreakerConfig:
|
52 |
+
"""Circuit breaker configuration."""
|
53 |
+
failure_threshold: int = 5
|
54 |
+
recovery_timeout: float = 60.0
|
55 |
+
success_threshold: int = 2
|
56 |
+
timeout: float = 30.0
|
57 |
+
|
58 |
+
|
59 |
+
@dataclass
|
60 |
+
class FallbackConfig:
|
61 |
+
"""Fallback configuration."""
|
62 |
+
fallback_providers: List[str]
|
63 |
+
fallback_function: Optional[Callable] = None
|
64 |
+
enable_graceful_degradation: bool = True
|
65 |
+
|
66 |
+
|
67 |
+
class CircuitBreaker:
|
68 |
+
"""Circuit breaker implementation."""
|
69 |
+
|
70 |
+
def __init__(self, config: CircuitBreakerConfig, name: str = "default"):
|
71 |
+
"""
|
72 |
+
Initialize circuit breaker.
|
73 |
+
|
74 |
+
Args:
|
75 |
+
config: Circuit breaker configuration
|
76 |
+
name: Circuit breaker name
|
77 |
+
"""
|
78 |
+
self.config = config
|
79 |
+
self.name = name
|
80 |
+
self.state = CircuitBreakerState.CLOSED
|
81 |
+
self.failure_count = 0
|
82 |
+
self.success_count = 0
|
83 |
+
self.last_failure_time = 0.0
|
84 |
+
self.logger = get_structured_logger(f"{__name__}.CircuitBreaker.{name}")
|
85 |
+
|
86 |
+
def call(self, func: Callable[..., T], *args, **kwargs) -> T:
|
87 |
+
"""
|
88 |
+
Call function through circuit breaker.
|
89 |
+
|
90 |
+
Args:
|
91 |
+
func: Function to call
|
92 |
+
*args: Function arguments
|
93 |
+
**kwargs: Function keyword arguments
|
94 |
+
|
95 |
+
Returns:
|
96 |
+
T: Function result
|
97 |
+
|
98 |
+
Raises:
|
99 |
+
Exception: If circuit is open or function fails
|
100 |
+
"""
|
101 |
+
context = LogContext(
|
102 |
+
correlation_id=kwargs.get('correlation_id', 'unknown'),
|
103 |
+
component=f"circuit_breaker_{self.name}",
|
104 |
+
operation=func.__name__
|
105 |
+
)
|
106 |
+
|
107 |
+
if self.state == CircuitBreakerState.OPEN:
|
108 |
+
if time.time() - self.last_failure_time < self.config.recovery_timeout:
|
109 |
+
self.logger.warning(
|
110 |
+
f"Circuit breaker {self.name} is OPEN, rejecting call",
|
111 |
+
context=context
|
112 |
+
)
|
113 |
+
raise AudioProcessingException(f"Circuit breaker {self.name} is open")
|
114 |
+
else:
|
115 |
+
self.state = CircuitBreakerState.HALF_OPEN
|
116 |
+
self.success_count = 0
|
117 |
+
self.logger.info(
|
118 |
+
f"Circuit breaker {self.name} transitioning to HALF_OPEN",
|
119 |
+
context=context
|
120 |
+
)
|
121 |
+
|
122 |
+
try:
|
123 |
+
result = func(*args, **kwargs)
|
124 |
+
self._on_success(context)
|
125 |
+
return result
|
126 |
+
|
127 |
+
except Exception as e:
|
128 |
+
self._on_failure(e, context)
|
129 |
+
raise
|
130 |
+
|
131 |
+
def _on_success(self, context: LogContext) -> None:
|
132 |
+
"""Handle successful call."""
|
133 |
+
if self.state == CircuitBreakerState.HALF_OPEN:
|
134 |
+
self.success_count += 1
|
135 |
+
if self.success_count >= self.config.success_threshold:
|
136 |
+
self.state = CircuitBreakerState.CLOSED
|
137 |
+
self.failure_count = 0
|
138 |
+
self.logger.info(
|
139 |
+
f"Circuit breaker {self.name} transitioning to CLOSED",
|
140 |
+
context=context
|
141 |
+
)
|
142 |
+
elif self.state == CircuitBreakerState.CLOSED:
|
143 |
+
self.failure_count = 0
|
144 |
+
|
145 |
+
def _on_failure(self, exception: Exception, context: LogContext) -> None:
|
146 |
+
"""Handle failed call."""
|
147 |
+
self.failure_count += 1
|
148 |
+
self.last_failure_time = time.time()
|
149 |
+
|
150 |
+
if self.state == CircuitBreakerState.HALF_OPEN:
|
151 |
+
self.state = CircuitBreakerState.OPEN
|
152 |
+
self.logger.warning(
|
153 |
+
f"Circuit breaker {self.name} transitioning to OPEN after failure in HALF_OPEN",
|
154 |
+
context=context,
|
155 |
+
exception=exception
|
156 |
+
)
|
157 |
+
elif (self.state == CircuitBreakerState.CLOSED and
|
158 |
+
self.failure_count >= self.config.failure_threshold):
|
159 |
+
self.state = CircuitBreakerState.OPEN
|
160 |
+
self.logger.warning(
|
161 |
+
f"Circuit breaker {self.name} transitioning to OPEN after {self.failure_count} failures",
|
162 |
+
context=context,
|
163 |
+
exception=exception
|
164 |
+
)
|
165 |
+
|
166 |
+
|
167 |
+
class RecoveryManager:
|
168 |
+
"""Manages error recovery and fallback mechanisms."""
|
169 |
+
|
170 |
+
def __init__(self):
|
171 |
+
"""Initialize recovery manager."""
|
172 |
+
self.circuit_breakers: Dict[str, CircuitBreaker] = {}
|
173 |
+
self.logger = get_structured_logger(__name__)
|
174 |
+
|
175 |
+
# Default retry configurations for different exception types
|
176 |
+
self.default_retry_configs = {
|
177 |
+
SpeechRecognitionException: RetryConfig(
|
178 |
+
max_attempts=2,
|
179 |
+
base_delay=2.0,
|
180 |
+
retryable_exceptions=[SpeechRecognitionException]
|
181 |
+
),
|
182 |
+
TranslationFailedException: RetryConfig(
|
183 |
+
max_attempts=3,
|
184 |
+
base_delay=1.0,
|
185 |
+
retryable_exceptions=[TranslationFailedException]
|
186 |
+
),
|
187 |
+
SpeechSynthesisException: RetryConfig(
|
188 |
+
max_attempts=2,
|
189 |
+
base_delay=1.5,
|
190 |
+
retryable_exceptions=[SpeechSynthesisException]
|
191 |
+
),
|
192 |
+
ConnectionError: RetryConfig(
|
193 |
+
max_attempts=3,
|
194 |
+
base_delay=2.0,
|
195 |
+
exponential_backoff=True,
|
196 |
+
retryable_exceptions=[ConnectionError, TimeoutError]
|
197 |
+
)
|
198 |
+
}
|
199 |
+
|
200 |
+
def get_circuit_breaker(self, name: str, config: Optional[CircuitBreakerConfig] = None) -> CircuitBreaker:
|
201 |
+
"""
|
202 |
+
Get or create circuit breaker.
|
203 |
+
|
204 |
+
Args:
|
205 |
+
name: Circuit breaker name
|
206 |
+
config: Optional configuration
|
207 |
+
|
208 |
+
Returns:
|
209 |
+
CircuitBreaker: Circuit breaker instance
|
210 |
+
"""
|
211 |
+
if name not in self.circuit_breakers:
|
212 |
+
if config is None:
|
213 |
+
config = CircuitBreakerConfig()
|
214 |
+
self.circuit_breakers[name] = CircuitBreaker(config, name)
|
215 |
+
|
216 |
+
return self.circuit_breakers[name]
|
217 |
+
|
218 |
+
def retry_with_backoff(self, func: Callable[..., T],
|
219 |
+
config: Optional[RetryConfig] = None,
|
220 |
+
correlation_id: Optional[str] = None,
|
221 |
+
*args, **kwargs) -> T:
|
222 |
+
"""
|
223 |
+
Execute function with retry and exponential backoff.
|
224 |
+
|
225 |
+
Args:
|
226 |
+
func: Function to execute
|
227 |
+
config: Retry configuration
|
228 |
+
correlation_id: Correlation ID for logging
|
229 |
+
*args: Function arguments
|
230 |
+
**kwargs: Function keyword arguments
|
231 |
+
|
232 |
+
Returns:
|
233 |
+
T: Function result
|
234 |
+
|
235 |
+
Raises:
|
236 |
+
Exception: If all retry attempts fail
|
237 |
+
"""
|
238 |
+
if config is None:
|
239 |
+
config = RetryConfig()
|
240 |
+
|
241 |
+
context = LogContext(
|
242 |
+
correlation_id=correlation_id or 'unknown',
|
243 |
+
component='retry_manager',
|
244 |
+
operation=func.__name__
|
245 |
+
)
|
246 |
+
|
247 |
+
last_exception = None
|
248 |
+
|
249 |
+
for attempt in range(config.max_attempts):
|
250 |
+
try:
|
251 |
+
if attempt > 0:
|
252 |
+
delay = self._calculate_delay(attempt, config)
|
253 |
+
self.logger.info(
|
254 |
+
f"Retrying {func.__name__} (attempt {attempt + 1}/{config.max_attempts}) after {delay:.2f}s delay",
|
255 |
+
context=context
|
256 |
+
)
|
257 |
+
time.sleep(delay)
|
258 |
+
|
259 |
+
result = func(*args, **kwargs)
|
260 |
+
|
261 |
+
if attempt > 0:
|
262 |
+
self.logger.info(
|
263 |
+
f"Retry successful for {func.__name__} on attempt {attempt + 1}",
|
264 |
+
context=context
|
265 |
+
)
|
266 |
+
|
267 |
+
return result
|
268 |
+
|
269 |
+
except Exception as e:
|
270 |
+
last_exception = e
|
271 |
+
|
272 |
+
# Check if exception is retryable
|
273 |
+
if not self._is_retryable_exception(e, config):
|
274 |
+
self.logger.error(
|
275 |
+
f"Non-retryable exception in {func.__name__}: {type(e).__name__}",
|
276 |
+
context=context,
|
277 |
+
exception=e
|
278 |
+
)
|
279 |
+
raise
|
280 |
+
|
281 |
+
self.logger.warning(
|
282 |
+
f"Attempt {attempt + 1}/{config.max_attempts} failed for {func.__name__}: {e}",
|
283 |
+
context=context,
|
284 |
+
exception=e
|
285 |
+
)
|
286 |
+
|
287 |
+
# All attempts failed
|
288 |
+
self.logger.error(
|
289 |
+
f"All {config.max_attempts} retry attempts failed for {func.__name__}",
|
290 |
+
context=context,
|
291 |
+
exception=last_exception
|
292 |
+
)
|
293 |
+
raise last_exception
|
294 |
+
|
295 |
+
def execute_with_fallback(self, primary_func: Callable[..., T],
|
296 |
+
fallback_funcs: List[Callable[..., T]],
|
297 |
+
correlation_id: Optional[str] = None,
|
298 |
+
*args, **kwargs) -> T:
|
299 |
+
"""
|
300 |
+
Execute function with fallback options.
|
301 |
+
|
302 |
+
Args:
|
303 |
+
primary_func: Primary function to execute
|
304 |
+
fallback_funcs: List of fallback functions
|
305 |
+
correlation_id: Correlation ID for logging
|
306 |
+
*args: Function arguments
|
307 |
+
**kwargs: Function keyword arguments
|
308 |
+
|
309 |
+
Returns:
|
310 |
+
T: Function result
|
311 |
+
|
312 |
+
Raises:
|
313 |
+
Exception: If all functions fail
|
314 |
+
"""
|
315 |
+
context = LogContext(
|
316 |
+
correlation_id=correlation_id or 'unknown',
|
317 |
+
component='fallback_manager',
|
318 |
+
operation=primary_func.__name__
|
319 |
+
)
|
320 |
+
|
321 |
+
# Try primary function first
|
322 |
+
try:
|
323 |
+
result = primary_func(*args, **kwargs)
|
324 |
+
return result
|
325 |
+
|
326 |
+
except Exception as e:
|
327 |
+
self.logger.warning(
|
328 |
+
f"Primary function {primary_func.__name__} failed, trying fallbacks",
|
329 |
+
context=context,
|
330 |
+
exception=e
|
331 |
+
)
|
332 |
+
|
333 |
+
last_exception = e
|
334 |
+
|
335 |
+
# Try fallback functions
|
336 |
+
for i, fallback_func in enumerate(fallback_funcs):
|
337 |
+
try:
|
338 |
+
self.logger.info(
|
339 |
+
f"Trying fallback {i + 1}/{len(fallback_funcs)}: {fallback_func.__name__}",
|
340 |
+
context=context
|
341 |
+
)
|
342 |
+
|
343 |
+
result = fallback_func(*args, **kwargs)
|
344 |
+
|
345 |
+
self.logger.info(
|
346 |
+
f"Fallback {fallback_func.__name__} succeeded",
|
347 |
+
context=context
|
348 |
+
)
|
349 |
+
|
350 |
+
return result
|
351 |
+
|
352 |
+
except Exception as fallback_e:
|
353 |
+
last_exception = fallback_e
|
354 |
+
self.logger.warning(
|
355 |
+
f"Fallback {fallback_func.__name__} failed: {fallback_e}",
|
356 |
+
context=context,
|
357 |
+
exception=fallback_e
|
358 |
+
)
|
359 |
+
|
360 |
+
# All functions failed
|
361 |
+
self.logger.error(
|
362 |
+
f"All functions failed (primary + {len(fallback_funcs)} fallbacks)",
|
363 |
+
context=context,
|
364 |
+
exception=last_exception
|
365 |
+
)
|
366 |
+
raise last_exception
|
367 |
+
|
368 |
+
def execute_with_circuit_breaker(self, func: Callable[..., T],
|
369 |
+
circuit_breaker_name: str,
|
370 |
+
config: Optional[CircuitBreakerConfig] = None,
|
371 |
+
correlation_id: Optional[str] = None,
|
372 |
+
*args, **kwargs) -> T:
|
373 |
+
"""
|
374 |
+
Execute function with circuit breaker protection.
|
375 |
+
|
376 |
+
Args:
|
377 |
+
func: Function to execute
|
378 |
+
circuit_breaker_name: Circuit breaker name
|
379 |
+
config: Optional circuit breaker configuration
|
380 |
+
correlation_id: Correlation ID for logging
|
381 |
+
*args: Function arguments
|
382 |
+
**kwargs: Function keyword arguments
|
383 |
+
|
384 |
+
Returns:
|
385 |
+
T: Function result
|
386 |
+
|
387 |
+
Raises:
|
388 |
+
Exception: If circuit is open or function fails
|
389 |
+
"""
|
390 |
+
circuit_breaker = self.get_circuit_breaker(circuit_breaker_name, config)
|
391 |
+
|
392 |
+
# Add correlation ID to kwargs for circuit breaker logging
|
393 |
+
if correlation_id:
|
394 |
+
kwargs['correlation_id'] = correlation_id
|
395 |
+
|
396 |
+
return circuit_breaker.call(func, *args, **kwargs)
|
397 |
+
|
398 |
+
def _calculate_delay(self, attempt: int, config: RetryConfig) -> float:
|
399 |
+
"""
|
400 |
+
Calculate retry delay with exponential backoff and jitter.
|
401 |
+
|
402 |
+
Args:
|
403 |
+
attempt: Attempt number (0-based)
|
404 |
+
config: Retry configuration
|
405 |
+
|
406 |
+
Returns:
|
407 |
+
float: Delay in seconds
|
408 |
+
"""
|
409 |
+
if config.exponential_backoff:
|
410 |
+
delay = config.base_delay * (2 ** attempt)
|
411 |
+
else:
|
412 |
+
delay = config.base_delay
|
413 |
+
|
414 |
+
# Apply maximum delay limit
|
415 |
+
delay = min(delay, config.max_delay)
|
416 |
+
|
417 |
+
# Add jitter to prevent thundering herd
|
418 |
+
if config.jitter:
|
419 |
+
import random
|
420 |
+
delay *= (0.5 + random.random() * 0.5)
|
421 |
+
|
422 |
+
return delay
|
423 |
+
|
424 |
+
def _is_retryable_exception(self, exception: Exception, config: RetryConfig) -> bool:
|
425 |
+
"""
|
426 |
+
Check if exception is retryable.
|
427 |
+
|
428 |
+
Args:
|
429 |
+
exception: Exception to check
|
430 |
+
config: Retry configuration
|
431 |
+
|
432 |
+
Returns:
|
433 |
+
bool: True if exception is retryable
|
434 |
+
"""
|
435 |
+
if config.retryable_exceptions is None:
|
436 |
+
# Default retryable exceptions
|
437 |
+
retryable_types = (
|
438 |
+
ConnectionError,
|
439 |
+
TimeoutError,
|
440 |
+
SpeechRecognitionException,
|
441 |
+
TranslationFailedException,
|
442 |
+
SpeechSynthesisException
|
443 |
+
)
|
444 |
+
return isinstance(exception, retryable_types)
|
445 |
+
|
446 |
+
return any(isinstance(exception, exc_type) for exc_type in config.retryable_exceptions)
|
447 |
+
|
448 |
+
def get_retry_config_for_exception(self, exception: Exception) -> RetryConfig:
|
449 |
+
"""
|
450 |
+
Get appropriate retry configuration for exception type.
|
451 |
+
|
452 |
+
Args:
|
453 |
+
exception: Exception to get config for
|
454 |
+
|
455 |
+
Returns:
|
456 |
+
RetryConfig: Retry configuration
|
457 |
+
"""
|
458 |
+
for exc_type, config in self.default_retry_configs.items():
|
459 |
+
if isinstance(exception, exc_type):
|
460 |
+
return config
|
461 |
+
|
462 |
+
# Return default config
|
463 |
+
return RetryConfig()
|
464 |
+
|
465 |
+
|
466 |
+
def with_retry(config: Optional[RetryConfig] = None):
|
467 |
+
"""
|
468 |
+
Decorator for automatic retry with backoff.
|
469 |
+
|
470 |
+
Args:
|
471 |
+
config: Optional retry configuration
|
472 |
+
|
473 |
+
Returns:
|
474 |
+
Decorated function
|
475 |
+
"""
|
476 |
+
def decorator(func: Callable[..., T]) -> Callable[..., T]:
|
477 |
+
@wraps(func)
|
478 |
+
def wrapper(*args, **kwargs) -> T:
|
479 |
+
recovery_manager = RecoveryManager()
|
480 |
+
correlation_id = kwargs.get('correlation_id')
|
481 |
+
return recovery_manager.retry_with_backoff(
|
482 |
+
func, config, correlation_id, *args, **kwargs
|
483 |
+
)
|
484 |
+
return wrapper
|
485 |
+
return decorator
|
486 |
+
|
487 |
+
|
488 |
+
def with_circuit_breaker(name: str, config: Optional[CircuitBreakerConfig] = None):
|
489 |
+
"""
|
490 |
+
Decorator for circuit breaker protection.
|
491 |
+
|
492 |
+
Args:
|
493 |
+
name: Circuit breaker name
|
494 |
+
config: Optional circuit breaker configuration
|
495 |
+
|
496 |
+
Returns:
|
497 |
+
Decorated function
|
498 |
+
"""
|
499 |
+
def decorator(func: Callable[..., T]) -> Callable[..., T]:
|
500 |
+
@wraps(func)
|
501 |
+
def wrapper(*args, **kwargs) -> T:
|
502 |
+
recovery_manager = RecoveryManager()
|
503 |
+
correlation_id = kwargs.get('correlation_id')
|
504 |
+
return recovery_manager.execute_with_circuit_breaker(
|
505 |
+
func, name, config, correlation_id, *args, **kwargs
|
506 |
+
)
|
507 |
+
return wrapper
|
508 |
+
return decorator
|
src/application/error_handling/structured_logger.py
ADDED
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Structured logging with correlation IDs and context management."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
import json
|
5 |
+
import uuid
|
6 |
+
import time
|
7 |
+
from typing import Dict, Any, Optional, Union
|
8 |
+
from datetime import datetime
|
9 |
+
from contextvars import ContextVar
|
10 |
+
from dataclasses import dataclass, asdict
|
11 |
+
from enum import Enum
|
12 |
+
|
13 |
+
# Context variable for correlation ID
|
14 |
+
correlation_id_context: ContextVar[Optional[str]] = ContextVar('correlation_id', default=None)
|
15 |
+
|
16 |
+
|
17 |
+
class LogLevel(Enum):
|
18 |
+
"""Log levels."""
|
19 |
+
DEBUG = "DEBUG"
|
20 |
+
INFO = "INFO"
|
21 |
+
WARNING = "WARNING"
|
22 |
+
ERROR = "ERROR"
|
23 |
+
CRITICAL = "CRITICAL"
|
24 |
+
|
25 |
+
|
26 |
+
@dataclass
|
27 |
+
class LogContext:
|
28 |
+
"""Structured log context."""
|
29 |
+
correlation_id: str
|
30 |
+
operation: Optional[str] = None
|
31 |
+
component: Optional[str] = None
|
32 |
+
user_id: Optional[str] = None
|
33 |
+
session_id: Optional[str] = None
|
34 |
+
request_id: Optional[str] = None
|
35 |
+
metadata: Optional[Dict[str, Any]] = None
|
36 |
+
|
37 |
+
def to_dict(self) -> Dict[str, Any]:
|
38 |
+
"""Convert to dictionary."""
|
39 |
+
return {k: v for k, v in asdict(self).items() if v is not None}
|
40 |
+
|
41 |
+
|
42 |
+
class StructuredLogger:
|
43 |
+
"""Structured logger with correlation ID support."""
|
44 |
+
|
45 |
+
def __init__(self, name: str, enable_json_logging: bool = True):
|
46 |
+
"""
|
47 |
+
Initialize structured logger.
|
48 |
+
|
49 |
+
Args:
|
50 |
+
name: Logger name
|
51 |
+
enable_json_logging: Whether to use JSON format
|
52 |
+
"""
|
53 |
+
self.logger = logging.getLogger(name)
|
54 |
+
self.enable_json_logging = enable_json_logging
|
55 |
+
self._setup_formatter()
|
56 |
+
|
57 |
+
def _setup_formatter(self) -> None:
|
58 |
+
"""Setup log formatter."""
|
59 |
+
if self.enable_json_logging:
|
60 |
+
formatter = JsonFormatter()
|
61 |
+
else:
|
62 |
+
formatter = ContextFormatter()
|
63 |
+
|
64 |
+
# Apply formatter to all handlers
|
65 |
+
for handler in self.logger.handlers:
|
66 |
+
handler.setFormatter(formatter)
|
67 |
+
|
68 |
+
def _get_log_data(self, message: str, level: str,
|
69 |
+
context: Optional[LogContext] = None,
|
70 |
+
extra: Optional[Dict[str, Any]] = None,
|
71 |
+
exception: Optional[Exception] = None) -> Dict[str, Any]:
|
72 |
+
"""
|
73 |
+
Prepare log data structure.
|
74 |
+
|
75 |
+
Args:
|
76 |
+
message: Log message
|
77 |
+
level: Log level
|
78 |
+
context: Log context
|
79 |
+
extra: Extra data
|
80 |
+
exception: Exception if any
|
81 |
+
|
82 |
+
Returns:
|
83 |
+
Dict[str, Any]: Structured log data
|
84 |
+
"""
|
85 |
+
# Get correlation ID from context or generate new one
|
86 |
+
correlation_id = correlation_id_context.get()
|
87 |
+
if not correlation_id and context:
|
88 |
+
correlation_id = context.correlation_id
|
89 |
+
if not correlation_id:
|
90 |
+
correlation_id = str(uuid.uuid4())
|
91 |
+
|
92 |
+
log_data = {
|
93 |
+
'timestamp': datetime.utcnow().isoformat() + 'Z',
|
94 |
+
'level': level,
|
95 |
+
'message': message,
|
96 |
+
'correlation_id': correlation_id,
|
97 |
+
'logger_name': self.logger.name
|
98 |
+
}
|
99 |
+
|
100 |
+
# Add context information
|
101 |
+
if context:
|
102 |
+
log_data.update(context.to_dict())
|
103 |
+
|
104 |
+
# Add extra data
|
105 |
+
if extra:
|
106 |
+
log_data['extra'] = extra
|
107 |
+
|
108 |
+
# Add exception information
|
109 |
+
if exception:
|
110 |
+
log_data['exception'] = {
|
111 |
+
'type': type(exception).__name__,
|
112 |
+
'message': str(exception),
|
113 |
+
'module': getattr(exception, '__module__', None)
|
114 |
+
}
|
115 |
+
|
116 |
+
# Add stack trace for debugging
|
117 |
+
import traceback
|
118 |
+
log_data['exception']['traceback'] = traceback.format_exc()
|
119 |
+
|
120 |
+
return log_data
|
121 |
+
|
122 |
+
def debug(self, message: str, context: Optional[LogContext] = None,
|
123 |
+
extra: Optional[Dict[str, Any]] = None) -> None:
|
124 |
+
"""Log debug message."""
|
125 |
+
if self.logger.isEnabledFor(logging.DEBUG):
|
126 |
+
log_data = self._get_log_data(message, LogLevel.DEBUG.value, context, extra)
|
127 |
+
self.logger.debug(message, extra=log_data)
|
128 |
+
|
129 |
+
def info(self, message: str, context: Optional[LogContext] = None,
|
130 |
+
extra: Optional[Dict[str, Any]] = None) -> None:
|
131 |
+
"""Log info message."""
|
132 |
+
if self.logger.isEnabledFor(logging.INFO):
|
133 |
+
log_data = self._get_log_data(message, LogLevel.INFO.value, context, extra)
|
134 |
+
self.logger.info(message, extra=log_data)
|
135 |
+
|
136 |
+
def warning(self, message: str, context: Optional[LogContext] = None,
|
137 |
+
extra: Optional[Dict[str, Any]] = None) -> None:
|
138 |
+
"""Log warning message."""
|
139 |
+
if self.logger.isEnabledFor(logging.WARNING):
|
140 |
+
log_data = self._get_log_data(message, LogLevel.WARNING.value, context, extra)
|
141 |
+
self.logger.warning(message, extra=log_data)
|
142 |
+
|
143 |
+
def error(self, message: str, context: Optional[LogContext] = None,
|
144 |
+
extra: Optional[Dict[str, Any]] = None,
|
145 |
+
exception: Optional[Exception] = None) -> None:
|
146 |
+
"""Log error message."""
|
147 |
+
if self.logger.isEnabledFor(logging.ERROR):
|
148 |
+
log_data = self._get_log_data(message, LogLevel.ERROR.value, context, extra, exception)
|
149 |
+
self.logger.error(message, extra=log_data)
|
150 |
+
|
151 |
+
def critical(self, message: str, context: Optional[LogContext] = None,
|
152 |
+
extra: Optional[Dict[str, Any]] = None,
|
153 |
+
exception: Optional[Exception] = None) -> None:
|
154 |
+
"""Log critical message."""
|
155 |
+
if self.logger.isEnabledFor(logging.CRITICAL):
|
156 |
+
log_data = self._get_log_data(message, LogLevel.CRITICAL.value, context, extra, exception)
|
157 |
+
self.logger.critical(message, extra=log_data)
|
158 |
+
|
159 |
+
def log_operation_start(self, operation: str, context: Optional[LogContext] = None,
|
160 |
+
extra: Optional[Dict[str, Any]] = None) -> str:
|
161 |
+
"""
|
162 |
+
Log operation start and return correlation ID.
|
163 |
+
|
164 |
+
Args:
|
165 |
+
operation: Operation name
|
166 |
+
context: Log context
|
167 |
+
extra: Extra data
|
168 |
+
|
169 |
+
Returns:
|
170 |
+
str: Correlation ID for the operation
|
171 |
+
"""
|
172 |
+
correlation_id = str(uuid.uuid4())
|
173 |
+
|
174 |
+
if context:
|
175 |
+
context.correlation_id = correlation_id
|
176 |
+
context.operation = operation
|
177 |
+
else:
|
178 |
+
context = LogContext(correlation_id=correlation_id, operation=operation)
|
179 |
+
|
180 |
+
# Set correlation ID in context
|
181 |
+
correlation_id_context.set(correlation_id)
|
182 |
+
|
183 |
+
self.info(f"Operation started: {operation}", context, extra)
|
184 |
+
return correlation_id
|
185 |
+
|
186 |
+
def log_operation_end(self, operation: str, correlation_id: str,
|
187 |
+
success: bool = True, duration: Optional[float] = None,
|
188 |
+
context: Optional[LogContext] = None,
|
189 |
+
extra: Optional[Dict[str, Any]] = None) -> None:
|
190 |
+
"""
|
191 |
+
Log operation end.
|
192 |
+
|
193 |
+
Args:
|
194 |
+
operation: Operation name
|
195 |
+
correlation_id: Correlation ID
|
196 |
+
success: Whether operation succeeded
|
197 |
+
duration: Operation duration in seconds
|
198 |
+
context: Log context
|
199 |
+
extra: Extra data
|
200 |
+
"""
|
201 |
+
if context:
|
202 |
+
context.correlation_id = correlation_id
|
203 |
+
context.operation = operation
|
204 |
+
else:
|
205 |
+
context = LogContext(correlation_id=correlation_id, operation=operation)
|
206 |
+
|
207 |
+
# Add performance data
|
208 |
+
if extra is None:
|
209 |
+
extra = {}
|
210 |
+
extra['success'] = success
|
211 |
+
if duration is not None:
|
212 |
+
extra['duration_seconds'] = duration
|
213 |
+
|
214 |
+
status = "completed successfully" if success else "failed"
|
215 |
+
message = f"Operation {status}: {operation}"
|
216 |
+
|
217 |
+
if success:
|
218 |
+
self.info(message, context, extra)
|
219 |
+
else:
|
220 |
+
self.error(message, context, extra)
|
221 |
+
|
222 |
+
def log_performance_metric(self, metric_name: str, value: Union[int, float],
|
223 |
+
unit: str = None, context: Optional[LogContext] = None,
|
224 |
+
extra: Optional[Dict[str, Any]] = None) -> None:
|
225 |
+
"""
|
226 |
+
Log performance metric.
|
227 |
+
|
228 |
+
Args:
|
229 |
+
metric_name: Name of the metric
|
230 |
+
value: Metric value
|
231 |
+
unit: Unit of measurement
|
232 |
+
context: Log context
|
233 |
+
extra: Extra data
|
234 |
+
"""
|
235 |
+
if extra is None:
|
236 |
+
extra = {}
|
237 |
+
|
238 |
+
extra['metric'] = {
|
239 |
+
'name': metric_name,
|
240 |
+
'value': value,
|
241 |
+
'unit': unit,
|
242 |
+
'timestamp': time.time()
|
243 |
+
}
|
244 |
+
|
245 |
+
message = f"Performance metric: {metric_name}={value}"
|
246 |
+
if unit:
|
247 |
+
message += f" {unit}"
|
248 |
+
|
249 |
+
self.info(message, context, extra)
|
250 |
+
|
251 |
+
|
252 |
+
class JsonFormatter(logging.Formatter):
|
253 |
+
"""JSON log formatter."""
|
254 |
+
|
255 |
+
def format(self, record: logging.LogRecord) -> str:
|
256 |
+
"""Format log record as JSON."""
|
257 |
+
try:
|
258 |
+
# Get structured data from extra
|
259 |
+
log_data = getattr(record, 'extra', {})
|
260 |
+
|
261 |
+
# Ensure basic fields are present
|
262 |
+
if 'timestamp' not in log_data:
|
263 |
+
log_data['timestamp'] = datetime.utcnow().isoformat() + 'Z'
|
264 |
+
if 'level' not in log_data:
|
265 |
+
log_data['level'] = record.levelname
|
266 |
+
if 'message' not in log_data:
|
267 |
+
log_data['message'] = record.getMessage()
|
268 |
+
if 'logger_name' not in log_data:
|
269 |
+
log_data['logger_name'] = record.name
|
270 |
+
|
271 |
+
# Add standard log record fields
|
272 |
+
log_data.update({
|
273 |
+
'module': record.module,
|
274 |
+
'function': record.funcName,
|
275 |
+
'line': record.lineno,
|
276 |
+
'thread': record.thread,
|
277 |
+
'process': record.process
|
278 |
+
})
|
279 |
+
|
280 |
+
return json.dumps(log_data, default=str, ensure_ascii=False)
|
281 |
+
|
282 |
+
except Exception as e:
|
283 |
+
# Fallback to standard formatting
|
284 |
+
return f"JSON formatting error: {e} | Original: {record.getMessage()}"
|
285 |
+
|
286 |
+
|
287 |
+
class ContextFormatter(logging.Formatter):
|
288 |
+
"""Context-aware log formatter."""
|
289 |
+
|
290 |
+
def __init__(self):
|
291 |
+
"""Initialize formatter."""
|
292 |
+
super().__init__(
|
293 |
+
fmt='%(asctime)s - %(name)s - %(levelname)s - [%(correlation_id)s] - %(message)s',
|
294 |
+
datefmt='%Y-%m-%d %H:%M:%S'
|
295 |
+
)
|
296 |
+
|
297 |
+
def format(self, record: logging.LogRecord) -> str:
|
298 |
+
"""Format log record with context."""
|
299 |
+
try:
|
300 |
+
# Get correlation ID from context or record
|
301 |
+
correlation_id = correlation_id_context.get()
|
302 |
+
if not correlation_id:
|
303 |
+
extra_data = getattr(record, 'extra', {})
|
304 |
+
correlation_id = extra_data.get('correlation_id', 'unknown')
|
305 |
+
|
306 |
+
# Add correlation ID to record
|
307 |
+
record.correlation_id = correlation_id
|
308 |
+
|
309 |
+
# Add context information if available
|
310 |
+
extra_data = getattr(record, 'extra', {})
|
311 |
+
if 'operation' in extra_data:
|
312 |
+
record.message = f"[{extra_data['operation']}] {record.getMessage()}"
|
313 |
+
|
314 |
+
return super().format(record)
|
315 |
+
|
316 |
+
except Exception as e:
|
317 |
+
# Fallback formatting
|
318 |
+
return f"Formatting error: {e} | Original: {record.getMessage()}"
|
319 |
+
|
320 |
+
|
321 |
+
def get_structured_logger(name: str, enable_json_logging: bool = True) -> StructuredLogger:
|
322 |
+
"""
|
323 |
+
Get a structured logger instance.
|
324 |
+
|
325 |
+
Args:
|
326 |
+
name: Logger name
|
327 |
+
enable_json_logging: Whether to use JSON format
|
328 |
+
|
329 |
+
Returns:
|
330 |
+
StructuredLogger: Configured structured logger
|
331 |
+
"""
|
332 |
+
return StructuredLogger(name, enable_json_logging)
|
333 |
+
|
334 |
+
|
335 |
+
def set_correlation_id(correlation_id: str) -> None:
|
336 |
+
"""
|
337 |
+
Set correlation ID in context.
|
338 |
+
|
339 |
+
Args:
|
340 |
+
correlation_id: Correlation ID to set
|
341 |
+
"""
|
342 |
+
correlation_id_context.set(correlation_id)
|
343 |
+
|
344 |
+
|
345 |
+
def get_correlation_id() -> Optional[str]:
|
346 |
+
"""
|
347 |
+
Get current correlation ID from context.
|
348 |
+
|
349 |
+
Returns:
|
350 |
+
Optional[str]: Current correlation ID
|
351 |
+
"""
|
352 |
+
return correlation_id_context.get()
|
353 |
+
|
354 |
+
|
355 |
+
def generate_correlation_id() -> str:
|
356 |
+
"""
|
357 |
+
Generate a new correlation ID.
|
358 |
+
|
359 |
+
Returns:
|
360 |
+
str: New correlation ID
|
361 |
+
"""
|
362 |
+
return str(uuid.uuid4())
|
src/application/services/audio_processing_service.py
CHANGED
@@ -12,6 +12,9 @@ from contextlib import contextmanager
|
|
12 |
from ..dtos.audio_upload_dto import AudioUploadDto
|
13 |
from ..dtos.processing_request_dto import ProcessingRequestDto
|
14 |
from ..dtos.processing_result_dto import ProcessingResultDto
|
|
|
|
|
|
|
15 |
from ...domain.interfaces.speech_recognition import ISpeechRecognitionService
|
16 |
from ...domain.interfaces.translation import ITranslationService
|
17 |
from ...domain.interfaces.speech_synthesis import ISpeechSynthesisService
|
@@ -30,7 +33,7 @@ from ...domain.exceptions import (
|
|
30 |
from ...infrastructure.config.app_config import AppConfig
|
31 |
from ...infrastructure.config.dependency_container import DependencyContainer
|
32 |
|
33 |
-
logger =
|
34 |
|
35 |
|
36 |
class AudioProcessingApplicationService:
|
@@ -51,30 +54,34 @@ class AudioProcessingApplicationService:
|
|
51 |
self._container = container
|
52 |
self._config = config or container.resolve(AppConfig)
|
53 |
self._temp_files: Dict[str, str] = {} # Track temporary files for cleanup
|
54 |
-
|
|
|
|
|
|
|
|
|
55 |
# Setup logging
|
56 |
self._setup_logging()
|
57 |
-
|
58 |
logger.info("AudioProcessingApplicationService initialized")
|
59 |
|
60 |
def _setup_logging(self) -> None:
|
61 |
"""Setup logging configuration."""
|
62 |
try:
|
63 |
log_config = self._config.get_logging_config()
|
64 |
-
|
65 |
# Configure logger level
|
66 |
logger.setLevel(getattr(logging, log_config['level'].upper(), logging.INFO))
|
67 |
-
|
68 |
# Add file handler if enabled
|
69 |
if log_config.get('enable_file_logging', False):
|
70 |
file_handler = logging.FileHandler(log_config['log_file_path'])
|
71 |
file_handler.setLevel(logger.level)
|
72 |
-
|
73 |
formatter = logging.Formatter(log_config['format'])
|
74 |
file_handler.setFormatter(formatter)
|
75 |
-
|
76 |
logger.addHandler(file_handler)
|
77 |
-
|
78 |
except Exception as e:
|
79 |
logger.warning(f"Failed to setup logging configuration: {e}")
|
80 |
|
@@ -88,37 +95,53 @@ class AudioProcessingApplicationService:
|
|
88 |
Returns:
|
89 |
ProcessingResultDto: Result of the complete processing pipeline
|
90 |
"""
|
91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
start_time = time.time()
|
93 |
-
|
94 |
-
|
95 |
-
|
|
|
|
|
|
|
96 |
try:
|
97 |
# Validate request
|
98 |
self._validate_request(request)
|
99 |
-
|
100 |
# Create temporary working directory
|
101 |
with self._create_temp_directory(correlation_id) as temp_dir:
|
102 |
# Step 1: Convert uploaded audio to domain model
|
103 |
audio_content = self._convert_upload_to_audio_content(request.audio, temp_dir)
|
104 |
-
|
105 |
-
# Step 2: Speech-to-Text
|
106 |
-
original_text = self.
|
107 |
-
audio_content,
|
108 |
request.asr_model,
|
109 |
correlation_id
|
110 |
)
|
111 |
-
|
112 |
-
# Step 3: Translation (if needed)
|
113 |
-
translated_text =
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
|
|
|
|
122 |
translated_text,
|
123 |
request.voice,
|
124 |
request.speed,
|
@@ -126,10 +149,10 @@ class AudioProcessingApplicationService:
|
|
126 |
temp_dir,
|
127 |
correlation_id
|
128 |
)
|
129 |
-
|
130 |
# Calculate processing time
|
131 |
processing_time = time.time() - start_time
|
132 |
-
|
133 |
# Create successful result
|
134 |
result = ProcessingResultDto.success_result(
|
135 |
original_text=original_text.text,
|
@@ -145,46 +168,114 @@ class AudioProcessingApplicationService:
|
|
145 |
'translation_required': request.requires_translation
|
146 |
}
|
147 |
)
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
)
|
153 |
-
|
154 |
return result
|
155 |
-
|
156 |
except DomainException as e:
|
157 |
processing_time = time.time() - start_time
|
158 |
-
|
159 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
logger.error(
|
161 |
-
f"Domain error in audio processing pipeline: {
|
162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
)
|
164 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
return ProcessingResultDto.error_result(
|
166 |
-
error_message=
|
167 |
-
error_code=error_code,
|
168 |
processing_time=processing_time,
|
169 |
-
metadata={
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
)
|
171 |
-
|
172 |
except Exception as e:
|
173 |
processing_time = time.time() - start_time
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
)
|
180 |
-
|
181 |
return ProcessingResultDto.error_result(
|
182 |
-
error_message=
|
183 |
-
error_code=
|
184 |
processing_time=processing_time,
|
185 |
-
metadata={
|
|
|
|
|
|
|
|
|
|
|
186 |
)
|
187 |
-
|
188 |
finally:
|
189 |
# Cleanup temporary files
|
190 |
self._cleanup_temp_files()
|
@@ -201,10 +292,10 @@ class AudioProcessingApplicationService:
|
|
201 |
"""
|
202 |
if not isinstance(request, ProcessingRequestDto):
|
203 |
raise ValueError("Request must be a ProcessingRequestDto instance")
|
204 |
-
|
205 |
# Additional validation beyond DTO validation
|
206 |
processing_config = self._config.get_processing_config()
|
207 |
-
|
208 |
# Check file size limits
|
209 |
max_size_bytes = processing_config['max_file_size_mb'] * 1024 * 1024
|
210 |
if request.audio.size > max_size_bytes:
|
@@ -212,7 +303,7 @@ class AudioProcessingApplicationService:
|
|
212 |
f"Audio file too large: {request.audio.size} bytes. "
|
213 |
f"Maximum allowed: {max_size_bytes} bytes"
|
214 |
)
|
215 |
-
|
216 |
# Check supported audio formats
|
217 |
supported_formats = processing_config['supported_audio_formats']
|
218 |
file_ext = request.audio.file_extension.lstrip('.')
|
@@ -235,15 +326,15 @@ class AudioProcessingApplicationService:
|
|
235 |
"""
|
236 |
processing_config = self._config.get_processing_config()
|
237 |
base_temp_dir = processing_config['temp_dir']
|
238 |
-
|
239 |
# Create unique temp directory
|
240 |
temp_dir = os.path.join(base_temp_dir, f"processing_{correlation_id}")
|
241 |
-
|
242 |
try:
|
243 |
os.makedirs(temp_dir, exist_ok=True)
|
244 |
logger.debug(f"Created temporary directory: {temp_dir}")
|
245 |
yield temp_dir
|
246 |
-
|
247 |
finally:
|
248 |
# Cleanup temp directory if configured
|
249 |
if processing_config.get('cleanup_temp_files', True):
|
@@ -255,8 +346,8 @@ class AudioProcessingApplicationService:
|
|
255 |
logger.warning(f"Failed to cleanup temp directory {temp_dir}: {e}")
|
256 |
|
257 |
def _convert_upload_to_audio_content(
|
258 |
-
self,
|
259 |
-
upload: AudioUploadDto,
|
260 |
temp_dir: str
|
261 |
) -> AudioContent:
|
262 |
"""
|
@@ -275,16 +366,16 @@ class AudioProcessingApplicationService:
|
|
275 |
try:
|
276 |
# Save uploaded content to temporary file
|
277 |
temp_file_path = os.path.join(temp_dir, f"input_{upload.filename}")
|
278 |
-
|
279 |
with open(temp_file_path, 'wb') as f:
|
280 |
f.write(upload.content)
|
281 |
-
|
282 |
# Track temp file for cleanup
|
283 |
self._temp_files[temp_file_path] = temp_file_path
|
284 |
-
|
285 |
# Determine audio format from file extension
|
286 |
audio_format = upload.file_extension.lstrip('.').lower()
|
287 |
-
|
288 |
# Create AudioContent (simplified - in real implementation would extract metadata)
|
289 |
audio_content = AudioContent(
|
290 |
data=upload.content,
|
@@ -292,17 +383,17 @@ class AudioProcessingApplicationService:
|
|
292 |
sample_rate=16000, # Default, would be extracted from actual file
|
293 |
duration=0.0 # Would be calculated from actual file
|
294 |
)
|
295 |
-
|
296 |
logger.debug(f"Converted upload to AudioContent: {upload.filename}")
|
297 |
return audio_content
|
298 |
-
|
299 |
except Exception as e:
|
300 |
logger.error(f"Failed to convert upload to AudioContent: {e}")
|
301 |
raise AudioProcessingException(f"Failed to process uploaded audio: {str(e)}")
|
302 |
|
303 |
def _perform_speech_recognition(
|
304 |
-
self,
|
305 |
-
audio: AudioContent,
|
306 |
model: str,
|
307 |
correlation_id: str
|
308 |
) -> TextContent:
|
@@ -322,20 +413,20 @@ class AudioProcessingApplicationService:
|
|
322 |
"""
|
323 |
try:
|
324 |
logger.debug(f"Starting STT with model: {model} [correlation_id={correlation_id}]")
|
325 |
-
|
326 |
# Get STT provider from container
|
327 |
stt_provider = self._container.get_stt_provider(model)
|
328 |
-
|
329 |
# Perform transcription
|
330 |
text_content = stt_provider.transcribe(audio, model)
|
331 |
-
|
332 |
logger.info(
|
333 |
f"STT completed successfully [correlation_id={correlation_id}, "
|
334 |
f"text_length={len(text_content.text)}]"
|
335 |
)
|
336 |
-
|
337 |
return text_content
|
338 |
-
|
339 |
except Exception as e:
|
340 |
logger.error(f"STT failed: {e} [correlation_id={correlation_id}]")
|
341 |
raise SpeechRecognitionException(f"Speech recognition failed: {str(e)}")
|
@@ -367,27 +458,27 @@ class AudioProcessingApplicationService:
|
|
367 |
f"Starting translation: {source_language or 'auto'} -> {target_language} "
|
368 |
f"[correlation_id={correlation_id}]"
|
369 |
)
|
370 |
-
|
371 |
# Get translation provider from container
|
372 |
translation_provider = self._container.get_translation_provider()
|
373 |
-
|
374 |
# Create translation request
|
375 |
translation_request = TranslationRequest(
|
376 |
text=text.text,
|
377 |
source_language=source_language or 'auto',
|
378 |
target_language=target_language
|
379 |
)
|
380 |
-
|
381 |
# Perform translation
|
382 |
translated_text = translation_provider.translate(translation_request)
|
383 |
-
|
384 |
logger.info(
|
385 |
f"Translation completed successfully [correlation_id={correlation_id}, "
|
386 |
f"source_length={len(text.text)}, target_length={len(translated_text.text)}]"
|
387 |
)
|
388 |
-
|
389 |
return translated_text
|
390 |
-
|
391 |
except Exception as e:
|
392 |
logger.error(f"Translation failed: {e} [correlation_id={correlation_id}]")
|
393 |
raise TranslationFailedException(f"Translation failed: {str(e)}")
|
@@ -423,43 +514,43 @@ class AudioProcessingApplicationService:
|
|
423 |
f"Starting TTS with voice: {voice}, speed: {speed} "
|
424 |
f"[correlation_id={correlation_id}]"
|
425 |
)
|
426 |
-
|
427 |
# Get TTS provider from container
|
428 |
tts_provider = self._container.get_tts_provider(voice)
|
429 |
-
|
430 |
# Create voice settings
|
431 |
voice_settings = VoiceSettings(
|
432 |
voice_id=voice,
|
433 |
speed=speed,
|
434 |
language=language
|
435 |
)
|
436 |
-
|
437 |
# Create synthesis request
|
438 |
synthesis_request = SpeechSynthesisRequest(
|
439 |
text=text.text,
|
440 |
voice_settings=voice_settings
|
441 |
)
|
442 |
-
|
443 |
# Perform synthesis
|
444 |
audio_content = tts_provider.synthesize(synthesis_request)
|
445 |
-
|
446 |
# Save output to file
|
447 |
output_filename = f"output_{correlation_id}.{audio_content.format}"
|
448 |
output_path = os.path.join(temp_dir, output_filename)
|
449 |
-
|
450 |
with open(output_path, 'wb') as f:
|
451 |
f.write(audio_content.data)
|
452 |
-
|
453 |
# Track temp file for cleanup
|
454 |
self._temp_files[output_path] = output_path
|
455 |
-
|
456 |
logger.info(
|
457 |
f"TTS completed successfully [correlation_id={correlation_id}, "
|
458 |
f"output_file={output_path}]"
|
459 |
)
|
460 |
-
|
461 |
return output_path
|
462 |
-
|
463 |
except Exception as e:
|
464 |
logger.error(f"TTS failed: {e} [correlation_id={correlation_id}]")
|
465 |
raise SpeechSynthesisException(f"Speech synthesis failed: {str(e)}")
|
@@ -538,10 +629,10 @@ class AudioProcessingApplicationService:
|
|
538 |
def cleanup(self) -> None:
|
539 |
"""Cleanup application service resources."""
|
540 |
logger.info("Cleaning up AudioProcessingApplicationService")
|
541 |
-
|
542 |
# Cleanup temporary files
|
543 |
self._cleanup_temp_files()
|
544 |
-
|
545 |
logger.info("AudioProcessingApplicationService cleanup completed")
|
546 |
|
547 |
def __enter__(self):
|
@@ -550,4 +641,183 @@ class AudioProcessingApplicationService:
|
|
550 |
|
551 |
def __exit__(self, exc_type, exc_val, exc_tb):
|
552 |
"""Context manager exit with cleanup."""
|
553 |
-
self.cleanup()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
from ..dtos.audio_upload_dto import AudioUploadDto
|
13 |
from ..dtos.processing_request_dto import ProcessingRequestDto
|
14 |
from ..dtos.processing_result_dto import ProcessingResultDto
|
15 |
+
from ..error_handling.error_mapper import ErrorMapper
|
16 |
+
from ..error_handling.structured_logger import StructuredLogger, LogContext, get_structured_logger
|
17 |
+
from ..error_handling.recovery_manager import RecoveryManager, RetryConfig, CircuitBreakerConfig
|
18 |
from ...domain.interfaces.speech_recognition import ISpeechRecognitionService
|
19 |
from ...domain.interfaces.translation import ITranslationService
|
20 |
from ...domain.interfaces.speech_synthesis import ISpeechSynthesisService
|
|
|
33 |
from ...infrastructure.config.app_config import AppConfig
|
34 |
from ...infrastructure.config.dependency_container import DependencyContainer
|
35 |
|
36 |
+
logger = get_structured_logger(__name__)
|
37 |
|
38 |
|
39 |
class AudioProcessingApplicationService:
|
|
|
54 |
self._container = container
|
55 |
self._config = config or container.resolve(AppConfig)
|
56 |
self._temp_files: Dict[str, str] = {} # Track temporary files for cleanup
|
57 |
+
|
58 |
+
# Initialize error handling components
|
59 |
+
self._error_mapper = ErrorMapper()
|
60 |
+
self._recovery_manager = RecoveryManager()
|
61 |
+
|
62 |
# Setup logging
|
63 |
self._setup_logging()
|
64 |
+
|
65 |
logger.info("AudioProcessingApplicationService initialized")
|
66 |
|
67 |
def _setup_logging(self) -> None:
|
68 |
"""Setup logging configuration."""
|
69 |
try:
|
70 |
log_config = self._config.get_logging_config()
|
71 |
+
|
72 |
# Configure logger level
|
73 |
logger.setLevel(getattr(logging, log_config['level'].upper(), logging.INFO))
|
74 |
+
|
75 |
# Add file handler if enabled
|
76 |
if log_config.get('enable_file_logging', False):
|
77 |
file_handler = logging.FileHandler(log_config['log_file_path'])
|
78 |
file_handler.setLevel(logger.level)
|
79 |
+
|
80 |
formatter = logging.Formatter(log_config['format'])
|
81 |
file_handler.setFormatter(formatter)
|
82 |
+
|
83 |
logger.addHandler(file_handler)
|
84 |
+
|
85 |
except Exception as e:
|
86 |
logger.warning(f"Failed to setup logging configuration: {e}")
|
87 |
|
|
|
95 |
Returns:
|
96 |
ProcessingResultDto: Result of the complete processing pipeline
|
97 |
"""
|
98 |
+
# Generate correlation ID and start operation logging
|
99 |
+
correlation_id = logger.log_operation_start(
|
100 |
+
"audio_processing_pipeline",
|
101 |
+
extra={
|
102 |
+
'asr_model': request.asr_model,
|
103 |
+
'target_language': request.target_language,
|
104 |
+
'voice': request.voice,
|
105 |
+
'file_name': request.audio.filename,
|
106 |
+
'file_size': request.audio.size
|
107 |
+
}
|
108 |
+
)
|
109 |
+
|
110 |
start_time = time.time()
|
111 |
+
context = LogContext(
|
112 |
+
correlation_id=correlation_id,
|
113 |
+
operation="audio_processing_pipeline",
|
114 |
+
component="AudioProcessingApplicationService"
|
115 |
+
)
|
116 |
+
|
117 |
try:
|
118 |
# Validate request
|
119 |
self._validate_request(request)
|
120 |
+
|
121 |
# Create temporary working directory
|
122 |
with self._create_temp_directory(correlation_id) as temp_dir:
|
123 |
# Step 1: Convert uploaded audio to domain model
|
124 |
audio_content = self._convert_upload_to_audio_content(request.audio, temp_dir)
|
125 |
+
|
126 |
+
# Step 2: Speech-to-Text with retry and fallback
|
127 |
+
original_text = self._perform_speech_recognition_with_recovery(
|
128 |
+
audio_content,
|
129 |
request.asr_model,
|
130 |
correlation_id
|
131 |
)
|
132 |
+
|
133 |
+
# Step 3: Translation (if needed) with retry
|
134 |
+
translated_text = original_text
|
135 |
+
if request.requires_translation:
|
136 |
+
translated_text = self._perform_translation_with_recovery(
|
137 |
+
original_text,
|
138 |
+
request.source_language,
|
139 |
+
request.target_language,
|
140 |
+
correlation_id
|
141 |
+
)
|
142 |
+
|
143 |
+
# Step 4: Text-to-Speech with fallback providers
|
144 |
+
output_audio_path = self._perform_speech_synthesis_with_recovery(
|
145 |
translated_text,
|
146 |
request.voice,
|
147 |
request.speed,
|
|
|
149 |
temp_dir,
|
150 |
correlation_id
|
151 |
)
|
152 |
+
|
153 |
# Calculate processing time
|
154 |
processing_time = time.time() - start_time
|
155 |
+
|
156 |
# Create successful result
|
157 |
result = ProcessingResultDto.success_result(
|
158 |
original_text=original_text.text,
|
|
|
168 |
'translation_required': request.requires_translation
|
169 |
}
|
170 |
)
|
171 |
+
|
172 |
+
# Log successful completion
|
173 |
+
logger.log_operation_end(
|
174 |
+
"audio_processing_pipeline",
|
175 |
+
correlation_id,
|
176 |
+
success=True,
|
177 |
+
duration=processing_time,
|
178 |
+
context=context,
|
179 |
+
extra={
|
180 |
+
'original_text_length': len(original_text.text),
|
181 |
+
'translated_text_length': len(translated_text.text) if translated_text != original_text else 0,
|
182 |
+
'output_file': output_audio_path
|
183 |
+
}
|
184 |
)
|
185 |
+
|
186 |
return result
|
187 |
+
|
188 |
except DomainException as e:
|
189 |
processing_time = time.time() - start_time
|
190 |
+
|
191 |
+
# Map exception to user-friendly error
|
192 |
+
error_context = {
|
193 |
+
'file_name': request.audio.filename,
|
194 |
+
'file_size': request.audio.size,
|
195 |
+
'operation': 'audio_processing_pipeline',
|
196 |
+
'correlation_id': correlation_id
|
197 |
+
}
|
198 |
+
|
199 |
+
error_mapping = self._error_mapper.map_exception(e, error_context)
|
200 |
+
|
201 |
logger.error(
|
202 |
+
f"Domain error in audio processing pipeline: {error_mapping.user_message}",
|
203 |
+
context=context,
|
204 |
+
exception=e,
|
205 |
+
extra={
|
206 |
+
'error_code': error_mapping.error_code,
|
207 |
+
'error_category': error_mapping.category.value,
|
208 |
+
'error_severity': error_mapping.severity.value,
|
209 |
+
'recovery_suggestions': error_mapping.recovery_suggestions
|
210 |
+
}
|
211 |
)
|
212 |
+
|
213 |
+
# Log operation failure
|
214 |
+
logger.log_operation_end(
|
215 |
+
"audio_processing_pipeline",
|
216 |
+
correlation_id,
|
217 |
+
success=False,
|
218 |
+
duration=processing_time,
|
219 |
+
context=context
|
220 |
+
)
|
221 |
+
|
222 |
return ProcessingResultDto.error_result(
|
223 |
+
error_message=error_mapping.user_message,
|
224 |
+
error_code=error_mapping.error_code,
|
225 |
processing_time=processing_time,
|
226 |
+
metadata={
|
227 |
+
'correlation_id': correlation_id,
|
228 |
+
'error_category': error_mapping.category.value,
|
229 |
+
'error_severity': error_mapping.severity.value,
|
230 |
+
'recovery_suggestions': error_mapping.recovery_suggestions,
|
231 |
+
'technical_details': error_mapping.technical_details
|
232 |
+
}
|
233 |
)
|
234 |
+
|
235 |
except Exception as e:
|
236 |
processing_time = time.time() - start_time
|
237 |
+
|
238 |
+
# Map unexpected exception
|
239 |
+
error_context = {
|
240 |
+
'file_name': request.audio.filename,
|
241 |
+
'operation': 'audio_processing_pipeline',
|
242 |
+
'correlation_id': correlation_id
|
243 |
+
}
|
244 |
+
|
245 |
+
error_mapping = self._error_mapper.map_exception(e, error_context)
|
246 |
+
|
247 |
+
logger.critical(
|
248 |
+
f"Unexpected error in audio processing pipeline: {error_mapping.user_message}",
|
249 |
+
context=context,
|
250 |
+
exception=e,
|
251 |
+
extra={
|
252 |
+
'error_code': error_mapping.error_code,
|
253 |
+
'error_category': error_mapping.category.value,
|
254 |
+
'error_severity': error_mapping.severity.value
|
255 |
+
}
|
256 |
+
)
|
257 |
+
|
258 |
+
# Log operation failure
|
259 |
+
logger.log_operation_end(
|
260 |
+
"audio_processing_pipeline",
|
261 |
+
correlation_id,
|
262 |
+
success=False,
|
263 |
+
duration=processing_time,
|
264 |
+
context=context
|
265 |
)
|
266 |
+
|
267 |
return ProcessingResultDto.error_result(
|
268 |
+
error_message=error_mapping.user_message,
|
269 |
+
error_code=error_mapping.error_code,
|
270 |
processing_time=processing_time,
|
271 |
+
metadata={
|
272 |
+
'correlation_id': correlation_id,
|
273 |
+
'error_category': error_mapping.category.value,
|
274 |
+
'error_severity': error_mapping.severity.value,
|
275 |
+
'technical_details': error_mapping.technical_details
|
276 |
+
}
|
277 |
)
|
278 |
+
|
279 |
finally:
|
280 |
# Cleanup temporary files
|
281 |
self._cleanup_temp_files()
|
|
|
292 |
"""
|
293 |
if not isinstance(request, ProcessingRequestDto):
|
294 |
raise ValueError("Request must be a ProcessingRequestDto instance")
|
295 |
+
|
296 |
# Additional validation beyond DTO validation
|
297 |
processing_config = self._config.get_processing_config()
|
298 |
+
|
299 |
# Check file size limits
|
300 |
max_size_bytes = processing_config['max_file_size_mb'] * 1024 * 1024
|
301 |
if request.audio.size > max_size_bytes:
|
|
|
303 |
f"Audio file too large: {request.audio.size} bytes. "
|
304 |
f"Maximum allowed: {max_size_bytes} bytes"
|
305 |
)
|
306 |
+
|
307 |
# Check supported audio formats
|
308 |
supported_formats = processing_config['supported_audio_formats']
|
309 |
file_ext = request.audio.file_extension.lstrip('.')
|
|
|
326 |
"""
|
327 |
processing_config = self._config.get_processing_config()
|
328 |
base_temp_dir = processing_config['temp_dir']
|
329 |
+
|
330 |
# Create unique temp directory
|
331 |
temp_dir = os.path.join(base_temp_dir, f"processing_{correlation_id}")
|
332 |
+
|
333 |
try:
|
334 |
os.makedirs(temp_dir, exist_ok=True)
|
335 |
logger.debug(f"Created temporary directory: {temp_dir}")
|
336 |
yield temp_dir
|
337 |
+
|
338 |
finally:
|
339 |
# Cleanup temp directory if configured
|
340 |
if processing_config.get('cleanup_temp_files', True):
|
|
|
346 |
logger.warning(f"Failed to cleanup temp directory {temp_dir}: {e}")
|
347 |
|
348 |
def _convert_upload_to_audio_content(
|
349 |
+
self,
|
350 |
+
upload: AudioUploadDto,
|
351 |
temp_dir: str
|
352 |
) -> AudioContent:
|
353 |
"""
|
|
|
366 |
try:
|
367 |
# Save uploaded content to temporary file
|
368 |
temp_file_path = os.path.join(temp_dir, f"input_{upload.filename}")
|
369 |
+
|
370 |
with open(temp_file_path, 'wb') as f:
|
371 |
f.write(upload.content)
|
372 |
+
|
373 |
# Track temp file for cleanup
|
374 |
self._temp_files[temp_file_path] = temp_file_path
|
375 |
+
|
376 |
# Determine audio format from file extension
|
377 |
audio_format = upload.file_extension.lstrip('.').lower()
|
378 |
+
|
379 |
# Create AudioContent (simplified - in real implementation would extract metadata)
|
380 |
audio_content = AudioContent(
|
381 |
data=upload.content,
|
|
|
383 |
sample_rate=16000, # Default, would be extracted from actual file
|
384 |
duration=0.0 # Would be calculated from actual file
|
385 |
)
|
386 |
+
|
387 |
logger.debug(f"Converted upload to AudioContent: {upload.filename}")
|
388 |
return audio_content
|
389 |
+
|
390 |
except Exception as e:
|
391 |
logger.error(f"Failed to convert upload to AudioContent: {e}")
|
392 |
raise AudioProcessingException(f"Failed to process uploaded audio: {str(e)}")
|
393 |
|
394 |
def _perform_speech_recognition(
|
395 |
+
self,
|
396 |
+
audio: AudioContent,
|
397 |
model: str,
|
398 |
correlation_id: str
|
399 |
) -> TextContent:
|
|
|
413 |
"""
|
414 |
try:
|
415 |
logger.debug(f"Starting STT with model: {model} [correlation_id={correlation_id}]")
|
416 |
+
|
417 |
# Get STT provider from container
|
418 |
stt_provider = self._container.get_stt_provider(model)
|
419 |
+
|
420 |
# Perform transcription
|
421 |
text_content = stt_provider.transcribe(audio, model)
|
422 |
+
|
423 |
logger.info(
|
424 |
f"STT completed successfully [correlation_id={correlation_id}, "
|
425 |
f"text_length={len(text_content.text)}]"
|
426 |
)
|
427 |
+
|
428 |
return text_content
|
429 |
+
|
430 |
except Exception as e:
|
431 |
logger.error(f"STT failed: {e} [correlation_id={correlation_id}]")
|
432 |
raise SpeechRecognitionException(f"Speech recognition failed: {str(e)}")
|
|
|
458 |
f"Starting translation: {source_language or 'auto'} -> {target_language} "
|
459 |
f"[correlation_id={correlation_id}]"
|
460 |
)
|
461 |
+
|
462 |
# Get translation provider from container
|
463 |
translation_provider = self._container.get_translation_provider()
|
464 |
+
|
465 |
# Create translation request
|
466 |
translation_request = TranslationRequest(
|
467 |
text=text.text,
|
468 |
source_language=source_language or 'auto',
|
469 |
target_language=target_language
|
470 |
)
|
471 |
+
|
472 |
# Perform translation
|
473 |
translated_text = translation_provider.translate(translation_request)
|
474 |
+
|
475 |
logger.info(
|
476 |
f"Translation completed successfully [correlation_id={correlation_id}, "
|
477 |
f"source_length={len(text.text)}, target_length={len(translated_text.text)}]"
|
478 |
)
|
479 |
+
|
480 |
return translated_text
|
481 |
+
|
482 |
except Exception as e:
|
483 |
logger.error(f"Translation failed: {e} [correlation_id={correlation_id}]")
|
484 |
raise TranslationFailedException(f"Translation failed: {str(e)}")
|
|
|
514 |
f"Starting TTS with voice: {voice}, speed: {speed} "
|
515 |
f"[correlation_id={correlation_id}]"
|
516 |
)
|
517 |
+
|
518 |
# Get TTS provider from container
|
519 |
tts_provider = self._container.get_tts_provider(voice)
|
520 |
+
|
521 |
# Create voice settings
|
522 |
voice_settings = VoiceSettings(
|
523 |
voice_id=voice,
|
524 |
speed=speed,
|
525 |
language=language
|
526 |
)
|
527 |
+
|
528 |
# Create synthesis request
|
529 |
synthesis_request = SpeechSynthesisRequest(
|
530 |
text=text.text,
|
531 |
voice_settings=voice_settings
|
532 |
)
|
533 |
+
|
534 |
# Perform synthesis
|
535 |
audio_content = tts_provider.synthesize(synthesis_request)
|
536 |
+
|
537 |
# Save output to file
|
538 |
output_filename = f"output_{correlation_id}.{audio_content.format}"
|
539 |
output_path = os.path.join(temp_dir, output_filename)
|
540 |
+
|
541 |
with open(output_path, 'wb') as f:
|
542 |
f.write(audio_content.data)
|
543 |
+
|
544 |
# Track temp file for cleanup
|
545 |
self._temp_files[output_path] = output_path
|
546 |
+
|
547 |
logger.info(
|
548 |
f"TTS completed successfully [correlation_id={correlation_id}, "
|
549 |
f"output_file={output_path}]"
|
550 |
)
|
551 |
+
|
552 |
return output_path
|
553 |
+
|
554 |
except Exception as e:
|
555 |
logger.error(f"TTS failed: {e} [correlation_id={correlation_id}]")
|
556 |
raise SpeechSynthesisException(f"Speech synthesis failed: {str(e)}")
|
|
|
629 |
def cleanup(self) -> None:
|
630 |
"""Cleanup application service resources."""
|
631 |
logger.info("Cleaning up AudioProcessingApplicationService")
|
632 |
+
|
633 |
# Cleanup temporary files
|
634 |
self._cleanup_temp_files()
|
635 |
+
|
636 |
logger.info("AudioProcessingApplicationService cleanup completed")
|
637 |
|
638 |
def __enter__(self):
|
|
|
641 |
|
642 |
def __exit__(self, exc_type, exc_val, exc_tb):
|
643 |
"""Context manager exit with cleanup."""
|
644 |
+
self.cleanup()
|
645 |
+
|
646 |
+
def _perform_speech_recognition_with_recovery(
|
647 |
+
self,
|
648 |
+
audio: AudioContent,
|
649 |
+
model: str,
|
650 |
+
correlation_id: str
|
651 |
+
) -> TextContent:
|
652 |
+
"""
|
653 |
+
Perform speech-to-text recognition with retry and fallback.
|
654 |
+
|
655 |
+
Args:
|
656 |
+
audio: Audio content to transcribe
|
657 |
+
model: STT model to use
|
658 |
+
correlation_id: Correlation ID for tracking
|
659 |
+
|
660 |
+
Returns:
|
661 |
+
TextContent: Transcribed text
|
662 |
+
|
663 |
+
Raises:
|
664 |
+
SpeechRecognitionException: If all attempts fail
|
665 |
+
"""
|
666 |
+
context = LogContext(
|
667 |
+
correlation_id=correlation_id,
|
668 |
+
operation="speech_recognition",
|
669 |
+
component="AudioProcessingApplicationService"
|
670 |
+
)
|
671 |
+
|
672 |
+
# Configure retry for STT
|
673 |
+
retry_config = RetryConfig(
|
674 |
+
max_attempts=2,
|
675 |
+
base_delay=1.0,
|
676 |
+
retryable_exceptions=[SpeechRecognitionException, ConnectionError, TimeoutError]
|
677 |
+
)
|
678 |
+
|
679 |
+
def stt_operation():
|
680 |
+
return self._perform_speech_recognition(audio, model, correlation_id)
|
681 |
+
|
682 |
+
try:
|
683 |
+
# Try with retry
|
684 |
+
return self._recovery_manager.retry_with_backoff(
|
685 |
+
stt_operation,
|
686 |
+
retry_config,
|
687 |
+
correlation_id
|
688 |
+
)
|
689 |
+
|
690 |
+
except Exception as e:
|
691 |
+
# Try fallback models if primary fails
|
692 |
+
stt_config = self._config.get_stt_config()
|
693 |
+
fallback_models = [m for m in stt_config['preferred_providers'] if m != model]
|
694 |
+
|
695 |
+
if fallback_models:
|
696 |
+
logger.warning(
|
697 |
+
f"STT model {model} failed, trying fallbacks: {fallback_models}",
|
698 |
+
context=context,
|
699 |
+
exception=e
|
700 |
+
)
|
701 |
+
|
702 |
+
fallback_funcs = [
|
703 |
+
lambda m=fallback_model: self._perform_speech_recognition(audio, m, correlation_id)
|
704 |
+
for fallback_model in fallback_models
|
705 |
+
]
|
706 |
+
|
707 |
+
return self._recovery_manager.execute_with_fallback(
|
708 |
+
stt_operation,
|
709 |
+
fallback_funcs,
|
710 |
+
correlation_id
|
711 |
+
)
|
712 |
+
else:
|
713 |
+
raise
|
714 |
+
|
715 |
+
def _perform_translation_with_recovery(
|
716 |
+
self,
|
717 |
+
text: TextContent,
|
718 |
+
source_language: Optional[str],
|
719 |
+
target_language: str,
|
720 |
+
correlation_id: str
|
721 |
+
) -> TextContent:
|
722 |
+
"""
|
723 |
+
Perform text translation with retry.
|
724 |
+
|
725 |
+
Args:
|
726 |
+
text: Text to translate
|
727 |
+
source_language: Source language (optional, auto-detect if None)
|
728 |
+
target_language: Target language
|
729 |
+
correlation_id: Correlation ID for tracking
|
730 |
+
|
731 |
+
Returns:
|
732 |
+
TextContent: Translated text
|
733 |
+
|
734 |
+
Raises:
|
735 |
+
TranslationFailedException: If all attempts fail
|
736 |
+
"""
|
737 |
+
# Configure retry for translation
|
738 |
+
retry_config = RetryConfig(
|
739 |
+
max_attempts=3,
|
740 |
+
base_delay=1.0,
|
741 |
+
exponential_backoff=True,
|
742 |
+
retryable_exceptions=[TranslationFailedException, ConnectionError, TimeoutError]
|
743 |
+
)
|
744 |
+
|
745 |
+
def translation_operation():
|
746 |
+
return self._perform_translation(text, source_language, target_language, correlation_id)
|
747 |
+
|
748 |
+
return self._recovery_manager.retry_with_backoff(
|
749 |
+
translation_operation,
|
750 |
+
retry_config,
|
751 |
+
correlation_id
|
752 |
+
)
|
753 |
+
|
754 |
+
def _perform_speech_synthesis_with_recovery(
|
755 |
+
self,
|
756 |
+
text: TextContent,
|
757 |
+
voice: str,
|
758 |
+
speed: float,
|
759 |
+
language: str,
|
760 |
+
temp_dir: str,
|
761 |
+
correlation_id: str
|
762 |
+
) -> str:
|
763 |
+
"""
|
764 |
+
Perform text-to-speech synthesis with fallback providers.
|
765 |
+
|
766 |
+
Args:
|
767 |
+
text: Text to synthesize
|
768 |
+
voice: Voice to use
|
769 |
+
speed: Speech speed
|
770 |
+
language: Target language
|
771 |
+
temp_dir: Temporary directory for output
|
772 |
+
correlation_id: Correlation ID for tracking
|
773 |
+
|
774 |
+
Returns:
|
775 |
+
str: Path to generated audio file
|
776 |
+
|
777 |
+
Raises:
|
778 |
+
SpeechSynthesisException: If all providers fail
|
779 |
+
"""
|
780 |
+
context = LogContext(
|
781 |
+
correlation_id=correlation_id,
|
782 |
+
operation="speech_synthesis",
|
783 |
+
component="AudioProcessingApplicationService"
|
784 |
+
)
|
785 |
+
|
786 |
+
def tts_operation():
|
787 |
+
return self._perform_speech_synthesis(text, voice, speed, language, temp_dir, correlation_id)
|
788 |
+
|
789 |
+
try:
|
790 |
+
# Try with circuit breaker protection
|
791 |
+
return self._recovery_manager.execute_with_circuit_breaker(
|
792 |
+
tts_operation,
|
793 |
+
f"tts_{voice}",
|
794 |
+
CircuitBreakerConfig(failure_threshold=3, recovery_timeout=30.0),
|
795 |
+
correlation_id
|
796 |
+
)
|
797 |
+
|
798 |
+
except Exception as e:
|
799 |
+
# Try fallback TTS providers
|
800 |
+
tts_config = self._config.get_tts_config()
|
801 |
+
fallback_voices = [v for v in tts_config['preferred_providers'] if v != voice]
|
802 |
+
|
803 |
+
if fallback_voices:
|
804 |
+
logger.warning(
|
805 |
+
f"TTS voice {voice} failed, trying fallbacks: {fallback_voices}",
|
806 |
+
context=context,
|
807 |
+
exception=e
|
808 |
+
)
|
809 |
+
|
810 |
+
fallback_funcs = [
|
811 |
+
lambda v=fallback_voice: self._perform_speech_synthesis(
|
812 |
+
text, v, speed, language, temp_dir, correlation_id
|
813 |
+
)
|
814 |
+
for fallback_voice in fallback_voices
|
815 |
+
]
|
816 |
+
|
817 |
+
return self._recovery_manager.execute_with_fallback(
|
818 |
+
tts_operation,
|
819 |
+
fallback_funcs,
|
820 |
+
correlation_id
|
821 |
+
)
|
822 |
+
else:
|
823 |
+
raise
|
src/application/services/configuration_service.py
CHANGED
@@ -7,11 +7,13 @@ from typing import Dict, List, Any, Optional, Union
|
|
7 |
from pathlib import Path
|
8 |
from dataclasses import asdict
|
9 |
|
|
|
|
|
10 |
from ...infrastructure.config.app_config import AppConfig, TTSConfig, STTConfig, TranslationConfig, ProcessingConfig, LoggingConfig
|
11 |
from ...infrastructure.config.dependency_container import DependencyContainer
|
12 |
from ...domain.exceptions import DomainException
|
13 |
|
14 |
-
logger =
|
15 |
|
16 |
|
17 |
class ConfigurationException(DomainException):
|
@@ -36,7 +38,10 @@ class ConfigurationApplicationService:
|
|
36 |
"""
|
37 |
self._container = container
|
38 |
self._config = config or container.resolve(AppConfig)
|
39 |
-
|
|
|
|
|
|
|
40 |
logger.info("ConfigurationApplicationService initialized")
|
41 |
|
42 |
def get_current_configuration(self) -> Dict[str, Any]:
|
@@ -139,10 +144,10 @@ class ConfigurationApplicationService:
|
|
139 |
try:
|
140 |
# Validate updates
|
141 |
self._validate_tts_updates(updates)
|
142 |
-
|
143 |
# Apply updates to current config
|
144 |
current_config = self._config.get_tts_config()
|
145 |
-
|
146 |
for key, value in updates.items():
|
147 |
if key in current_config:
|
148 |
# Update the actual config object
|
@@ -151,13 +156,13 @@ class ConfigurationApplicationService:
|
|
151 |
logger.debug(f"Updated TTS config: {key} = {value}")
|
152 |
else:
|
153 |
logger.warning(f"Unknown TTS configuration key: {key}")
|
154 |
-
|
155 |
# Return updated configuration
|
156 |
updated_config = self._config.get_tts_config()
|
157 |
logger.info(f"TTS configuration updated: {list(updates.keys())}")
|
158 |
-
|
159 |
return updated_config
|
160 |
-
|
161 |
except Exception as e:
|
162 |
logger.error(f"Failed to update TTS configuration: {e}")
|
163 |
raise ConfigurationException(f"Failed to update TTS configuration: {str(e)}")
|
@@ -178,10 +183,10 @@ class ConfigurationApplicationService:
|
|
178 |
try:
|
179 |
# Validate updates
|
180 |
self._validate_stt_updates(updates)
|
181 |
-
|
182 |
# Apply updates to current config
|
183 |
current_config = self._config.get_stt_config()
|
184 |
-
|
185 |
for key, value in updates.items():
|
186 |
if key in current_config:
|
187 |
# Update the actual config object
|
@@ -190,13 +195,13 @@ class ConfigurationApplicationService:
|
|
190 |
logger.debug(f"Updated STT config: {key} = {value}")
|
191 |
else:
|
192 |
logger.warning(f"Unknown STT configuration key: {key}")
|
193 |
-
|
194 |
# Return updated configuration
|
195 |
updated_config = self._config.get_stt_config()
|
196 |
logger.info(f"STT configuration updated: {list(updates.keys())}")
|
197 |
-
|
198 |
return updated_config
|
199 |
-
|
200 |
except Exception as e:
|
201 |
logger.error(f"Failed to update STT configuration: {e}")
|
202 |
raise ConfigurationException(f"Failed to update STT configuration: {str(e)}")
|
@@ -217,10 +222,10 @@ class ConfigurationApplicationService:
|
|
217 |
try:
|
218 |
# Validate updates
|
219 |
self._validate_translation_updates(updates)
|
220 |
-
|
221 |
# Apply updates to current config
|
222 |
current_config = self._config.get_translation_config()
|
223 |
-
|
224 |
for key, value in updates.items():
|
225 |
if key in current_config:
|
226 |
# Update the actual config object
|
@@ -229,13 +234,13 @@ class ConfigurationApplicationService:
|
|
229 |
logger.debug(f"Updated translation config: {key} = {value}")
|
230 |
else:
|
231 |
logger.warning(f"Unknown translation configuration key: {key}")
|
232 |
-
|
233 |
# Return updated configuration
|
234 |
updated_config = self._config.get_translation_config()
|
235 |
logger.info(f"Translation configuration updated: {list(updates.keys())}")
|
236 |
-
|
237 |
return updated_config
|
238 |
-
|
239 |
except Exception as e:
|
240 |
logger.error(f"Failed to update translation configuration: {e}")
|
241 |
raise ConfigurationException(f"Failed to update translation configuration: {str(e)}")
|
@@ -256,10 +261,10 @@ class ConfigurationApplicationService:
|
|
256 |
try:
|
257 |
# Validate updates
|
258 |
self._validate_processing_updates(updates)
|
259 |
-
|
260 |
# Apply updates to current config
|
261 |
current_config = self._config.get_processing_config()
|
262 |
-
|
263 |
for key, value in updates.items():
|
264 |
if key in current_config:
|
265 |
# Update the actual config object
|
@@ -268,13 +273,13 @@ class ConfigurationApplicationService:
|
|
268 |
logger.debug(f"Updated processing config: {key} = {value}")
|
269 |
else:
|
270 |
logger.warning(f"Unknown processing configuration key: {key}")
|
271 |
-
|
272 |
# Return updated configuration
|
273 |
updated_config = self._config.get_processing_config()
|
274 |
logger.info(f"Processing configuration updated: {list(updates.keys())}")
|
275 |
-
|
276 |
return updated_config
|
277 |
-
|
278 |
except Exception as e:
|
279 |
logger.error(f"Failed to update processing configuration: {e}")
|
280 |
raise ConfigurationException(f"Failed to update processing configuration: {str(e)}")
|
@@ -291,7 +296,7 @@ class ConfigurationApplicationService:
|
|
291 |
"""
|
292 |
valid_providers = ['kokoro', 'dia', 'cosyvoice2', 'dummy']
|
293 |
valid_languages = ['en', 'es', 'fr', 'de', 'it', 'pt', 'ru', 'ja', 'ko', 'zh']
|
294 |
-
|
295 |
for key, value in updates.items():
|
296 |
if key == 'preferred_providers':
|
297 |
if not isinstance(value, list):
|
@@ -299,19 +304,19 @@ class ConfigurationApplicationService:
|
|
299 |
for provider in value:
|
300 |
if provider not in valid_providers:
|
301 |
raise ConfigurationException(f"Invalid TTS provider: {provider}")
|
302 |
-
|
303 |
elif key == 'default_speed':
|
304 |
if not isinstance(value, (int, float)) or not (0.1 <= value <= 3.0):
|
305 |
raise ConfigurationException("default_speed must be between 0.1 and 3.0")
|
306 |
-
|
307 |
elif key == 'default_language':
|
308 |
if value not in valid_languages:
|
309 |
raise ConfigurationException(f"Invalid language: {value}")
|
310 |
-
|
311 |
elif key == 'enable_streaming':
|
312 |
if not isinstance(value, bool):
|
313 |
raise ConfigurationException("enable_streaming must be a boolean")
|
314 |
-
|
315 |
elif key == 'max_text_length':
|
316 |
if not isinstance(value, int) or value <= 0:
|
317 |
raise ConfigurationException("max_text_length must be a positive integer")
|
@@ -327,7 +332,7 @@ class ConfigurationApplicationService:
|
|
327 |
ConfigurationException: If validation fails
|
328 |
"""
|
329 |
valid_providers = ['whisper', 'parakeet']
|
330 |
-
|
331 |
for key, value in updates.items():
|
332 |
if key == 'preferred_providers':
|
333 |
if not isinstance(value, list):
|
@@ -335,19 +340,19 @@ class ConfigurationApplicationService:
|
|
335 |
for provider in value:
|
336 |
if provider not in valid_providers:
|
337 |
raise ConfigurationException(f"Invalid STT provider: {provider}")
|
338 |
-
|
339 |
elif key == 'default_model':
|
340 |
if value not in valid_providers:
|
341 |
raise ConfigurationException(f"Invalid STT model: {value}")
|
342 |
-
|
343 |
elif key == 'chunk_length_s':
|
344 |
if not isinstance(value, int) or value <= 0:
|
345 |
raise ConfigurationException("chunk_length_s must be a positive integer")
|
346 |
-
|
347 |
elif key == 'batch_size':
|
348 |
if not isinstance(value, int) or value <= 0:
|
349 |
raise ConfigurationException("batch_size must be a positive integer")
|
350 |
-
|
351 |
elif key == 'enable_vad':
|
352 |
if not isinstance(value, bool):
|
353 |
raise ConfigurationException("enable_vad must be a boolean")
|
@@ -366,19 +371,19 @@ class ConfigurationApplicationService:
|
|
366 |
if key == 'default_provider':
|
367 |
if not isinstance(value, str) or not value:
|
368 |
raise ConfigurationException("default_provider must be a non-empty string")
|
369 |
-
|
370 |
elif key == 'model_name':
|
371 |
if not isinstance(value, str) or not value:
|
372 |
raise ConfigurationException("model_name must be a non-empty string")
|
373 |
-
|
374 |
elif key == 'max_chunk_length':
|
375 |
if not isinstance(value, int) or value <= 0:
|
376 |
raise ConfigurationException("max_chunk_length must be a positive integer")
|
377 |
-
|
378 |
elif key == 'batch_size':
|
379 |
if not isinstance(value, int) or value <= 0:
|
380 |
raise ConfigurationException("batch_size must be a positive integer")
|
381 |
-
|
382 |
elif key == 'cache_translations':
|
383 |
if not isinstance(value, bool):
|
384 |
raise ConfigurationException("cache_translations must be a boolean")
|
@@ -402,15 +407,15 @@ class ConfigurationApplicationService:
|
|
402 |
Path(value).mkdir(parents=True, exist_ok=True)
|
403 |
except Exception as e:
|
404 |
raise ConfigurationException(f"Invalid temp_dir path: {e}")
|
405 |
-
|
406 |
elif key == 'cleanup_temp_files':
|
407 |
if not isinstance(value, bool):
|
408 |
raise ConfigurationException("cleanup_temp_files must be a boolean")
|
409 |
-
|
410 |
elif key == 'max_file_size_mb':
|
411 |
if not isinstance(value, int) or value <= 0:
|
412 |
raise ConfigurationException("max_file_size_mb must be a positive integer")
|
413 |
-
|
414 |
elif key == 'supported_audio_formats':
|
415 |
if not isinstance(value, list):
|
416 |
raise ConfigurationException("supported_audio_formats must be a list")
|
@@ -418,7 +423,7 @@ class ConfigurationApplicationService:
|
|
418 |
for fmt in value:
|
419 |
if fmt not in valid_formats:
|
420 |
raise ConfigurationException(f"Invalid audio format: {fmt}")
|
421 |
-
|
422 |
elif key == 'processing_timeout_seconds':
|
423 |
if not isinstance(value, int) or value <= 0:
|
424 |
raise ConfigurationException("processing_timeout_seconds must be a positive integer")
|
@@ -456,20 +461,20 @@ class ConfigurationApplicationService:
|
|
456 |
try:
|
457 |
if not os.path.exists(file_path):
|
458 |
raise ConfigurationException(f"Configuration file not found: {file_path}")
|
459 |
-
|
460 |
# Create new config instance with the file
|
461 |
new_config = AppConfig(config_file=file_path)
|
462 |
-
|
463 |
# Update current config
|
464 |
self._config = new_config
|
465 |
-
|
466 |
# Update container with new config
|
467 |
self._container.register_singleton(AppConfig, new_config)
|
468 |
-
|
469 |
logger.info(f"Configuration loaded from {file_path}")
|
470 |
-
|
471 |
return self.get_current_configuration()
|
472 |
-
|
473 |
except Exception as e:
|
474 |
logger.error(f"Failed to load configuration from {file_path}: {e}")
|
475 |
raise ConfigurationException(f"Failed to load configuration: {str(e)}")
|
@@ -487,9 +492,9 @@ class ConfigurationApplicationService:
|
|
487 |
try:
|
488 |
self._config.reload_configuration()
|
489 |
logger.info("Configuration reloaded successfully")
|
490 |
-
|
491 |
return self.get_current_configuration()
|
492 |
-
|
493 |
except Exception as e:
|
494 |
logger.error(f"Failed to reload configuration: {e}")
|
495 |
raise ConfigurationException(f"Failed to reload configuration: {str(e)}")
|
@@ -507,7 +512,7 @@ class ConfigurationApplicationService:
|
|
507 |
'stt': {},
|
508 |
'translation': {}
|
509 |
}
|
510 |
-
|
511 |
# Check TTS providers
|
512 |
tts_factory = self._container.resolve(type(self._container._get_tts_factory()))
|
513 |
for provider in ['kokoro', 'dia', 'cosyvoice2', 'dummy']:
|
@@ -516,7 +521,7 @@ class ConfigurationApplicationService:
|
|
516 |
availability['tts'][provider] = True
|
517 |
except Exception:
|
518 |
availability['tts'][provider] = False
|
519 |
-
|
520 |
# Check STT providers
|
521 |
stt_factory = self._container.resolve(type(self._container._get_stt_factory()))
|
522 |
for provider in ['whisper', 'parakeet']:
|
@@ -525,7 +530,7 @@ class ConfigurationApplicationService:
|
|
525 |
availability['stt'][provider] = True
|
526 |
except Exception:
|
527 |
availability['stt'][provider] = False
|
528 |
-
|
529 |
# Check translation providers
|
530 |
translation_factory = self._container.resolve(type(self._container._get_translation_factory()))
|
531 |
try:
|
@@ -533,9 +538,9 @@ class ConfigurationApplicationService:
|
|
533 |
availability['translation']['nllb'] = True
|
534 |
except Exception:
|
535 |
availability['translation']['nllb'] = False
|
536 |
-
|
537 |
return availability
|
538 |
-
|
539 |
except Exception as e:
|
540 |
logger.error(f"Failed to check provider availability: {e}")
|
541 |
raise ConfigurationException(f"Failed to check provider availability: {str(e)}")
|
@@ -579,41 +584,41 @@ class ConfigurationApplicationService:
|
|
579 |
'processing': [],
|
580 |
'logging': []
|
581 |
}
|
582 |
-
|
583 |
try:
|
584 |
# Validate TTS configuration
|
585 |
tts_config = self._config.get_tts_config()
|
586 |
if not (0.1 <= tts_config['default_speed'] <= 3.0):
|
587 |
issues['tts'].append(f"Invalid default_speed: {tts_config['default_speed']}")
|
588 |
-
|
589 |
if tts_config['max_text_length'] <= 0:
|
590 |
issues['tts'].append(f"Invalid max_text_length: {tts_config['max_text_length']}")
|
591 |
-
|
592 |
# Validate STT configuration
|
593 |
stt_config = self._config.get_stt_config()
|
594 |
if stt_config['chunk_length_s'] <= 0:
|
595 |
issues['stt'].append(f"Invalid chunk_length_s: {stt_config['chunk_length_s']}")
|
596 |
-
|
597 |
if stt_config['batch_size'] <= 0:
|
598 |
issues['stt'].append(f"Invalid batch_size: {stt_config['batch_size']}")
|
599 |
-
|
600 |
# Validate processing configuration
|
601 |
processing_config = self._config.get_processing_config()
|
602 |
if not os.path.exists(processing_config['temp_dir']):
|
603 |
issues['processing'].append(f"Temp directory does not exist: {processing_config['temp_dir']}")
|
604 |
-
|
605 |
if processing_config['max_file_size_mb'] <= 0:
|
606 |
issues['processing'].append(f"Invalid max_file_size_mb: {processing_config['max_file_size_mb']}")
|
607 |
-
|
608 |
# Validate logging configuration
|
609 |
logging_config = self._config.get_logging_config()
|
610 |
valid_levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
|
611 |
if logging_config['level'].upper() not in valid_levels:
|
612 |
issues['logging'].append(f"Invalid log level: {logging_config['level']}")
|
613 |
-
|
614 |
except Exception as e:
|
615 |
issues['general'] = [f"Configuration validation error: {str(e)}"]
|
616 |
-
|
617 |
return issues
|
618 |
|
619 |
def reset_to_defaults(self) -> Dict[str, Any]:
|
@@ -629,17 +634,17 @@ class ConfigurationApplicationService:
|
|
629 |
try:
|
630 |
# Create new config with defaults
|
631 |
default_config = AppConfig()
|
632 |
-
|
633 |
# Update current config
|
634 |
self._config = default_config
|
635 |
-
|
636 |
# Update container with new config
|
637 |
self._container.register_singleton(AppConfig, default_config)
|
638 |
-
|
639 |
logger.info("Configuration reset to defaults")
|
640 |
-
|
641 |
return self.get_current_configuration()
|
642 |
-
|
643 |
except Exception as e:
|
644 |
logger.error(f"Failed to reset configuration: {e}")
|
645 |
raise ConfigurationException(f"Failed to reset configuration: {str(e)}")
|
|
|
7 |
from pathlib import Path
|
8 |
from dataclasses import asdict
|
9 |
|
10 |
+
from ..error_handling.error_mapper import ErrorMapper
|
11 |
+
from ..error_handling.structured_logger import StructuredLogger, LogContext, get_structured_logger
|
12 |
from ...infrastructure.config.app_config import AppConfig, TTSConfig, STTConfig, TranslationConfig, ProcessingConfig, LoggingConfig
|
13 |
from ...infrastructure.config.dependency_container import DependencyContainer
|
14 |
from ...domain.exceptions import DomainException
|
15 |
|
16 |
+
logger = get_structured_logger(__name__)
|
17 |
|
18 |
|
19 |
class ConfigurationException(DomainException):
|
|
|
38 |
"""
|
39 |
self._container = container
|
40 |
self._config = config or container.resolve(AppConfig)
|
41 |
+
|
42 |
+
# Initialize error handling
|
43 |
+
self._error_mapper = ErrorMapper()
|
44 |
+
|
45 |
logger.info("ConfigurationApplicationService initialized")
|
46 |
|
47 |
def get_current_configuration(self) -> Dict[str, Any]:
|
|
|
144 |
try:
|
145 |
# Validate updates
|
146 |
self._validate_tts_updates(updates)
|
147 |
+
|
148 |
# Apply updates to current config
|
149 |
current_config = self._config.get_tts_config()
|
150 |
+
|
151 |
for key, value in updates.items():
|
152 |
if key in current_config:
|
153 |
# Update the actual config object
|
|
|
156 |
logger.debug(f"Updated TTS config: {key} = {value}")
|
157 |
else:
|
158 |
logger.warning(f"Unknown TTS configuration key: {key}")
|
159 |
+
|
160 |
# Return updated configuration
|
161 |
updated_config = self._config.get_tts_config()
|
162 |
logger.info(f"TTS configuration updated: {list(updates.keys())}")
|
163 |
+
|
164 |
return updated_config
|
165 |
+
|
166 |
except Exception as e:
|
167 |
logger.error(f"Failed to update TTS configuration: {e}")
|
168 |
raise ConfigurationException(f"Failed to update TTS configuration: {str(e)}")
|
|
|
183 |
try:
|
184 |
# Validate updates
|
185 |
self._validate_stt_updates(updates)
|
186 |
+
|
187 |
# Apply updates to current config
|
188 |
current_config = self._config.get_stt_config()
|
189 |
+
|
190 |
for key, value in updates.items():
|
191 |
if key in current_config:
|
192 |
# Update the actual config object
|
|
|
195 |
logger.debug(f"Updated STT config: {key} = {value}")
|
196 |
else:
|
197 |
logger.warning(f"Unknown STT configuration key: {key}")
|
198 |
+
|
199 |
# Return updated configuration
|
200 |
updated_config = self._config.get_stt_config()
|
201 |
logger.info(f"STT configuration updated: {list(updates.keys())}")
|
202 |
+
|
203 |
return updated_config
|
204 |
+
|
205 |
except Exception as e:
|
206 |
logger.error(f"Failed to update STT configuration: {e}")
|
207 |
raise ConfigurationException(f"Failed to update STT configuration: {str(e)}")
|
|
|
222 |
try:
|
223 |
# Validate updates
|
224 |
self._validate_translation_updates(updates)
|
225 |
+
|
226 |
# Apply updates to current config
|
227 |
current_config = self._config.get_translation_config()
|
228 |
+
|
229 |
for key, value in updates.items():
|
230 |
if key in current_config:
|
231 |
# Update the actual config object
|
|
|
234 |
logger.debug(f"Updated translation config: {key} = {value}")
|
235 |
else:
|
236 |
logger.warning(f"Unknown translation configuration key: {key}")
|
237 |
+
|
238 |
# Return updated configuration
|
239 |
updated_config = self._config.get_translation_config()
|
240 |
logger.info(f"Translation configuration updated: {list(updates.keys())}")
|
241 |
+
|
242 |
return updated_config
|
243 |
+
|
244 |
except Exception as e:
|
245 |
logger.error(f"Failed to update translation configuration: {e}")
|
246 |
raise ConfigurationException(f"Failed to update translation configuration: {str(e)}")
|
|
|
261 |
try:
|
262 |
# Validate updates
|
263 |
self._validate_processing_updates(updates)
|
264 |
+
|
265 |
# Apply updates to current config
|
266 |
current_config = self._config.get_processing_config()
|
267 |
+
|
268 |
for key, value in updates.items():
|
269 |
if key in current_config:
|
270 |
# Update the actual config object
|
|
|
273 |
logger.debug(f"Updated processing config: {key} = {value}")
|
274 |
else:
|
275 |
logger.warning(f"Unknown processing configuration key: {key}")
|
276 |
+
|
277 |
# Return updated configuration
|
278 |
updated_config = self._config.get_processing_config()
|
279 |
logger.info(f"Processing configuration updated: {list(updates.keys())}")
|
280 |
+
|
281 |
return updated_config
|
282 |
+
|
283 |
except Exception as e:
|
284 |
logger.error(f"Failed to update processing configuration: {e}")
|
285 |
raise ConfigurationException(f"Failed to update processing configuration: {str(e)}")
|
|
|
296 |
"""
|
297 |
valid_providers = ['kokoro', 'dia', 'cosyvoice2', 'dummy']
|
298 |
valid_languages = ['en', 'es', 'fr', 'de', 'it', 'pt', 'ru', 'ja', 'ko', 'zh']
|
299 |
+
|
300 |
for key, value in updates.items():
|
301 |
if key == 'preferred_providers':
|
302 |
if not isinstance(value, list):
|
|
|
304 |
for provider in value:
|
305 |
if provider not in valid_providers:
|
306 |
raise ConfigurationException(f"Invalid TTS provider: {provider}")
|
307 |
+
|
308 |
elif key == 'default_speed':
|
309 |
if not isinstance(value, (int, float)) or not (0.1 <= value <= 3.0):
|
310 |
raise ConfigurationException("default_speed must be between 0.1 and 3.0")
|
311 |
+
|
312 |
elif key == 'default_language':
|
313 |
if value not in valid_languages:
|
314 |
raise ConfigurationException(f"Invalid language: {value}")
|
315 |
+
|
316 |
elif key == 'enable_streaming':
|
317 |
if not isinstance(value, bool):
|
318 |
raise ConfigurationException("enable_streaming must be a boolean")
|
319 |
+
|
320 |
elif key == 'max_text_length':
|
321 |
if not isinstance(value, int) or value <= 0:
|
322 |
raise ConfigurationException("max_text_length must be a positive integer")
|
|
|
332 |
ConfigurationException: If validation fails
|
333 |
"""
|
334 |
valid_providers = ['whisper', 'parakeet']
|
335 |
+
|
336 |
for key, value in updates.items():
|
337 |
if key == 'preferred_providers':
|
338 |
if not isinstance(value, list):
|
|
|
340 |
for provider in value:
|
341 |
if provider not in valid_providers:
|
342 |
raise ConfigurationException(f"Invalid STT provider: {provider}")
|
343 |
+
|
344 |
elif key == 'default_model':
|
345 |
if value not in valid_providers:
|
346 |
raise ConfigurationException(f"Invalid STT model: {value}")
|
347 |
+
|
348 |
elif key == 'chunk_length_s':
|
349 |
if not isinstance(value, int) or value <= 0:
|
350 |
raise ConfigurationException("chunk_length_s must be a positive integer")
|
351 |
+
|
352 |
elif key == 'batch_size':
|
353 |
if not isinstance(value, int) or value <= 0:
|
354 |
raise ConfigurationException("batch_size must be a positive integer")
|
355 |
+
|
356 |
elif key == 'enable_vad':
|
357 |
if not isinstance(value, bool):
|
358 |
raise ConfigurationException("enable_vad must be a boolean")
|
|
|
371 |
if key == 'default_provider':
|
372 |
if not isinstance(value, str) or not value:
|
373 |
raise ConfigurationException("default_provider must be a non-empty string")
|
374 |
+
|
375 |
elif key == 'model_name':
|
376 |
if not isinstance(value, str) or not value:
|
377 |
raise ConfigurationException("model_name must be a non-empty string")
|
378 |
+
|
379 |
elif key == 'max_chunk_length':
|
380 |
if not isinstance(value, int) or value <= 0:
|
381 |
raise ConfigurationException("max_chunk_length must be a positive integer")
|
382 |
+
|
383 |
elif key == 'batch_size':
|
384 |
if not isinstance(value, int) or value <= 0:
|
385 |
raise ConfigurationException("batch_size must be a positive integer")
|
386 |
+
|
387 |
elif key == 'cache_translations':
|
388 |
if not isinstance(value, bool):
|
389 |
raise ConfigurationException("cache_translations must be a boolean")
|
|
|
407 |
Path(value).mkdir(parents=True, exist_ok=True)
|
408 |
except Exception as e:
|
409 |
raise ConfigurationException(f"Invalid temp_dir path: {e}")
|
410 |
+
|
411 |
elif key == 'cleanup_temp_files':
|
412 |
if not isinstance(value, bool):
|
413 |
raise ConfigurationException("cleanup_temp_files must be a boolean")
|
414 |
+
|
415 |
elif key == 'max_file_size_mb':
|
416 |
if not isinstance(value, int) or value <= 0:
|
417 |
raise ConfigurationException("max_file_size_mb must be a positive integer")
|
418 |
+
|
419 |
elif key == 'supported_audio_formats':
|
420 |
if not isinstance(value, list):
|
421 |
raise ConfigurationException("supported_audio_formats must be a list")
|
|
|
423 |
for fmt in value:
|
424 |
if fmt not in valid_formats:
|
425 |
raise ConfigurationException(f"Invalid audio format: {fmt}")
|
426 |
+
|
427 |
elif key == 'processing_timeout_seconds':
|
428 |
if not isinstance(value, int) or value <= 0:
|
429 |
raise ConfigurationException("processing_timeout_seconds must be a positive integer")
|
|
|
461 |
try:
|
462 |
if not os.path.exists(file_path):
|
463 |
raise ConfigurationException(f"Configuration file not found: {file_path}")
|
464 |
+
|
465 |
# Create new config instance with the file
|
466 |
new_config = AppConfig(config_file=file_path)
|
467 |
+
|
468 |
# Update current config
|
469 |
self._config = new_config
|
470 |
+
|
471 |
# Update container with new config
|
472 |
self._container.register_singleton(AppConfig, new_config)
|
473 |
+
|
474 |
logger.info(f"Configuration loaded from {file_path}")
|
475 |
+
|
476 |
return self.get_current_configuration()
|
477 |
+
|
478 |
except Exception as e:
|
479 |
logger.error(f"Failed to load configuration from {file_path}: {e}")
|
480 |
raise ConfigurationException(f"Failed to load configuration: {str(e)}")
|
|
|
492 |
try:
|
493 |
self._config.reload_configuration()
|
494 |
logger.info("Configuration reloaded successfully")
|
495 |
+
|
496 |
return self.get_current_configuration()
|
497 |
+
|
498 |
except Exception as e:
|
499 |
logger.error(f"Failed to reload configuration: {e}")
|
500 |
raise ConfigurationException(f"Failed to reload configuration: {str(e)}")
|
|
|
512 |
'stt': {},
|
513 |
'translation': {}
|
514 |
}
|
515 |
+
|
516 |
# Check TTS providers
|
517 |
tts_factory = self._container.resolve(type(self._container._get_tts_factory()))
|
518 |
for provider in ['kokoro', 'dia', 'cosyvoice2', 'dummy']:
|
|
|
521 |
availability['tts'][provider] = True
|
522 |
except Exception:
|
523 |
availability['tts'][provider] = False
|
524 |
+
|
525 |
# Check STT providers
|
526 |
stt_factory = self._container.resolve(type(self._container._get_stt_factory()))
|
527 |
for provider in ['whisper', 'parakeet']:
|
|
|
530 |
availability['stt'][provider] = True
|
531 |
except Exception:
|
532 |
availability['stt'][provider] = False
|
533 |
+
|
534 |
# Check translation providers
|
535 |
translation_factory = self._container.resolve(type(self._container._get_translation_factory()))
|
536 |
try:
|
|
|
538 |
availability['translation']['nllb'] = True
|
539 |
except Exception:
|
540 |
availability['translation']['nllb'] = False
|
541 |
+
|
542 |
return availability
|
543 |
+
|
544 |
except Exception as e:
|
545 |
logger.error(f"Failed to check provider availability: {e}")
|
546 |
raise ConfigurationException(f"Failed to check provider availability: {str(e)}")
|
|
|
584 |
'processing': [],
|
585 |
'logging': []
|
586 |
}
|
587 |
+
|
588 |
try:
|
589 |
# Validate TTS configuration
|
590 |
tts_config = self._config.get_tts_config()
|
591 |
if not (0.1 <= tts_config['default_speed'] <= 3.0):
|
592 |
issues['tts'].append(f"Invalid default_speed: {tts_config['default_speed']}")
|
593 |
+
|
594 |
if tts_config['max_text_length'] <= 0:
|
595 |
issues['tts'].append(f"Invalid max_text_length: {tts_config['max_text_length']}")
|
596 |
+
|
597 |
# Validate STT configuration
|
598 |
stt_config = self._config.get_stt_config()
|
599 |
if stt_config['chunk_length_s'] <= 0:
|
600 |
issues['stt'].append(f"Invalid chunk_length_s: {stt_config['chunk_length_s']}")
|
601 |
+
|
602 |
if stt_config['batch_size'] <= 0:
|
603 |
issues['stt'].append(f"Invalid batch_size: {stt_config['batch_size']}")
|
604 |
+
|
605 |
# Validate processing configuration
|
606 |
processing_config = self._config.get_processing_config()
|
607 |
if not os.path.exists(processing_config['temp_dir']):
|
608 |
issues['processing'].append(f"Temp directory does not exist: {processing_config['temp_dir']}")
|
609 |
+
|
610 |
if processing_config['max_file_size_mb'] <= 0:
|
611 |
issues['processing'].append(f"Invalid max_file_size_mb: {processing_config['max_file_size_mb']}")
|
612 |
+
|
613 |
# Validate logging configuration
|
614 |
logging_config = self._config.get_logging_config()
|
615 |
valid_levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
|
616 |
if logging_config['level'].upper() not in valid_levels:
|
617 |
issues['logging'].append(f"Invalid log level: {logging_config['level']}")
|
618 |
+
|
619 |
except Exception as e:
|
620 |
issues['general'] = [f"Configuration validation error: {str(e)}"]
|
621 |
+
|
622 |
return issues
|
623 |
|
624 |
def reset_to_defaults(self) -> Dict[str, Any]:
|
|
|
634 |
try:
|
635 |
# Create new config with defaults
|
636 |
default_config = AppConfig()
|
637 |
+
|
638 |
# Update current config
|
639 |
self._config = default_config
|
640 |
+
|
641 |
# Update container with new config
|
642 |
self._container.register_singleton(AppConfig, default_config)
|
643 |
+
|
644 |
logger.info("Configuration reset to defaults")
|
645 |
+
|
646 |
return self.get_current_configuration()
|
647 |
+
|
648 |
except Exception as e:
|
649 |
logger.error(f"Failed to reset configuration: {e}")
|
650 |
raise ConfigurationException(f"Failed to reset configuration: {str(e)}")
|
tests/unit/application/error_handling/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
"""Tests for application error handling."""
|
tests/unit/application/error_handling/test_error_mapper.py
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Tests for error mapper functionality."""
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
from unittest.mock import Mock
|
5 |
+
|
6 |
+
from src.application.error_handling.error_mapper import (
|
7 |
+
ErrorMapper, ErrorMapping, ErrorSeverity, ErrorCategory
|
8 |
+
)
|
9 |
+
from src.domain.exceptions import (
|
10 |
+
InvalidAudioFormatException,
|
11 |
+
TranslationFailedException,
|
12 |
+
SpeechRecognitionException,
|
13 |
+
SpeechSynthesisException
|
14 |
+
)
|
15 |
+
|
16 |
+
|
17 |
+
class TestErrorMapper:
|
18 |
+
"""Test cases for ErrorMapper."""
|
19 |
+
|
20 |
+
def setup_method(self):
|
21 |
+
"""Set up test fixtures."""
|
22 |
+
self.error_mapper = ErrorMapper()
|
23 |
+
|
24 |
+
def test_map_domain_exception(self):
|
25 |
+
"""Test mapping of domain exceptions."""
|
26 |
+
exception = InvalidAudioFormatException("Unsupported format: xyz")
|
27 |
+
context = {
|
28 |
+
'file_name': 'test.xyz',
|
29 |
+
'correlation_id': 'test-123'
|
30 |
+
}
|
31 |
+
|
32 |
+
mapping = self.error_mapper.map_exception(exception, context)
|
33 |
+
|
34 |
+
assert mapping.error_code == "INVALID_AUDIO_FORMAT"
|
35 |
+
assert mapping.severity == ErrorSeverity.MEDIUM
|
36 |
+
assert mapping.category == ErrorCategory.VALIDATION
|
37 |
+
assert "supported format" in mapping.user_message.lower()
|
38 |
+
assert len(mapping.recovery_suggestions) > 0
|
39 |
+
|
40 |
+
def test_map_translation_exception(self):
|
41 |
+
"""Test mapping of translation exceptions."""
|
42 |
+
exception = TranslationFailedException("Translation service unavailable")
|
43 |
+
|
44 |
+
mapping = self.error_mapper.map_exception(exception)
|
45 |
+
|
46 |
+
assert mapping.error_code == "TRANSLATION_FAILED"
|
47 |
+
assert mapping.severity == ErrorSeverity.HIGH
|
48 |
+
assert mapping.category == ErrorCategory.PROCESSING
|
49 |
+
assert "translation failed" in mapping.user_message.lower()
|
50 |
+
|
51 |
+
def test_map_speech_recognition_exception(self):
|
52 |
+
"""Test mapping of speech recognition exceptions."""
|
53 |
+
exception = SpeechRecognitionException("Audio quality too poor")
|
54 |
+
context = {'provider': 'whisper'}
|
55 |
+
|
56 |
+
mapping = self.error_mapper.map_exception(exception, context)
|
57 |
+
|
58 |
+
assert mapping.error_code == "SPEECH_RECOGNITION_FAILED"
|
59 |
+
assert mapping.severity == ErrorSeverity.HIGH
|
60 |
+
assert mapping.category == ErrorCategory.PROCESSING
|
61 |
+
assert any("whisper" in suggestion for suggestion in mapping.recovery_suggestions)
|
62 |
+
|
63 |
+
def test_map_speech_synthesis_exception(self):
|
64 |
+
"""Test mapping of speech synthesis exceptions."""
|
65 |
+
exception = SpeechSynthesisException("Voice not available")
|
66 |
+
|
67 |
+
mapping = self.error_mapper.map_exception(exception)
|
68 |
+
|
69 |
+
assert mapping.error_code == "SPEECH_SYNTHESIS_FAILED"
|
70 |
+
assert mapping.severity == ErrorSeverity.HIGH
|
71 |
+
assert mapping.category == ErrorCategory.PROCESSING
|
72 |
+
|
73 |
+
def test_map_unknown_exception(self):
|
74 |
+
"""Test mapping of unknown exceptions."""
|
75 |
+
exception = RuntimeError("Unknown error")
|
76 |
+
|
77 |
+
mapping = self.error_mapper.map_exception(exception)
|
78 |
+
|
79 |
+
assert mapping.error_code == "UNKNOWN_ERROR"
|
80 |
+
assert mapping.severity == ErrorSeverity.CRITICAL
|
81 |
+
assert mapping.category == ErrorCategory.SYSTEM
|
82 |
+
|
83 |
+
def test_context_enhancement(self):
|
84 |
+
"""Test context enhancement of error mappings."""
|
85 |
+
exception = ValueError("Invalid parameter")
|
86 |
+
context = {
|
87 |
+
'file_name': 'large_file.wav',
|
88 |
+
'file_size': 100 * 1024 * 1024, # 100MB
|
89 |
+
'correlation_id': 'test-456',
|
90 |
+
'operation': 'audio_processing'
|
91 |
+
}
|
92 |
+
|
93 |
+
mapping = self.error_mapper.map_exception(exception, context)
|
94 |
+
|
95 |
+
assert 'large_file.wav' in mapping.user_message
|
96 |
+
assert 'test-456' in mapping.technical_details
|
97 |
+
assert any("smaller file" in suggestion for suggestion in mapping.recovery_suggestions)
|
98 |
+
|
99 |
+
def test_get_error_code_from_exception(self):
|
100 |
+
"""Test getting error code from exception."""
|
101 |
+
exception = TranslationFailedException("Test error")
|
102 |
+
|
103 |
+
error_code = self.error_mapper.get_error_code_from_exception(exception)
|
104 |
+
|
105 |
+
assert error_code == "TRANSLATION_FAILED"
|
106 |
+
|
107 |
+
def test_get_user_message_from_exception(self):
|
108 |
+
"""Test getting user message from exception."""
|
109 |
+
exception = InvalidAudioFormatException("Test error")
|
110 |
+
|
111 |
+
message = self.error_mapper.get_user_message_from_exception(exception)
|
112 |
+
|
113 |
+
assert "supported" in message.lower()
|
114 |
+
assert "format" in message.lower()
|
115 |
+
|
116 |
+
def test_get_recovery_suggestions(self):
|
117 |
+
"""Test getting recovery suggestions."""
|
118 |
+
exception = SpeechRecognitionException("Test error")
|
119 |
+
|
120 |
+
suggestions = self.error_mapper.get_recovery_suggestions(exception)
|
121 |
+
|
122 |
+
assert len(suggestions) > 0
|
123 |
+
assert any("audio quality" in suggestion.lower() for suggestion in suggestions)
|
124 |
+
|
125 |
+
def test_add_custom_mapping(self):
|
126 |
+
"""Test adding custom error mapping."""
|
127 |
+
custom_mapping = ErrorMapping(
|
128 |
+
user_message="Custom error message",
|
129 |
+
error_code="CUSTOM_ERROR",
|
130 |
+
severity=ErrorSeverity.LOW,
|
131 |
+
category=ErrorCategory.VALIDATION
|
132 |
+
)
|
133 |
+
|
134 |
+
self.error_mapper.add_custom_mapping(CustomException, custom_mapping)
|
135 |
+
|
136 |
+
exception = CustomException("Test")
|
137 |
+
mapping = self.error_mapper.map_exception(exception)
|
138 |
+
|
139 |
+
assert mapping.error_code == "CUSTOM_ERROR"
|
140 |
+
assert mapping.user_message == "Custom error message"
|
141 |
+
|
142 |
+
def test_get_all_error_codes(self):
|
143 |
+
"""Test getting all error codes."""
|
144 |
+
error_codes = self.error_mapper.get_all_error_codes()
|
145 |
+
|
146 |
+
assert "INVALID_AUDIO_FORMAT" in error_codes
|
147 |
+
assert "TRANSLATION_FAILED" in error_codes
|
148 |
+
assert "SPEECH_RECOGNITION_FAILED" in error_codes
|
149 |
+
assert "SPEECH_SYNTHESIS_FAILED" in error_codes
|
150 |
+
assert "UNKNOWN_ERROR" in error_codes
|
151 |
+
|
152 |
+
|
153 |
+
class CustomException(Exception):
|
154 |
+
"""Custom exception for testing."""
|
155 |
+
pass
|
tests/unit/application/error_handling/test_structured_logger.py
ADDED
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Tests for structured logger functionality."""
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
import json
|
5 |
+
import logging
|
6 |
+
from unittest.mock import Mock, patch
|
7 |
+
|
8 |
+
from src.application.error_handling.structured_logger import (
|
9 |
+
StructuredLogger, LogContext, JsonFormatter, ContextFormatter,
|
10 |
+
get_structured_logger, set_correlation_id, get_correlation_id,
|
11 |
+
generate_correlation_id
|
12 |
+
)
|
13 |
+
|
14 |
+
|
15 |
+
class TestLogContext:
|
16 |
+
"""Test cases for LogContext."""
|
17 |
+
|
18 |
+
def test_log_context_creation(self):
|
19 |
+
"""Test creating log context."""
|
20 |
+
context = LogContext(
|
21 |
+
correlation_id="test-123",
|
22 |
+
operation="test_operation",
|
23 |
+
component="test_component"
|
24 |
+
)
|
25 |
+
|
26 |
+
assert context.correlation_id == "test-123"
|
27 |
+
assert context.operation == "test_operation"
|
28 |
+
assert context.component == "test_component"
|
29 |
+
|
30 |
+
def test_log_context_to_dict(self):
|
31 |
+
"""Test converting log context to dictionary."""
|
32 |
+
context = LogContext(
|
33 |
+
correlation_id="test-123",
|
34 |
+
operation="test_operation",
|
35 |
+
metadata={"key": "value"}
|
36 |
+
)
|
37 |
+
|
38 |
+
context_dict = context.to_dict()
|
39 |
+
|
40 |
+
assert context_dict["correlation_id"] == "test-123"
|
41 |
+
assert context_dict["operation"] == "test_operation"
|
42 |
+
assert context_dict["metadata"] == {"key": "value"}
|
43 |
+
assert "user_id" not in context_dict # None values should be excluded
|
44 |
+
|
45 |
+
|
46 |
+
class TestStructuredLogger:
|
47 |
+
"""Test cases for StructuredLogger."""
|
48 |
+
|
49 |
+
def setup_method(self):
|
50 |
+
"""Set up test fixtures."""
|
51 |
+
self.logger = StructuredLogger("test_logger", enable_json_logging=False)
|
52 |
+
|
53 |
+
def test_logger_creation(self):
|
54 |
+
"""Test creating structured logger."""
|
55 |
+
assert self.logger.logger.name == "test_logger"
|
56 |
+
assert not self.logger.enable_json_logging
|
57 |
+
|
58 |
+
def test_debug_logging(self):
|
59 |
+
"""Test debug logging."""
|
60 |
+
context = LogContext(correlation_id="test-123", operation="test_op")
|
61 |
+
|
62 |
+
with patch.object(self.logger.logger, 'debug') as mock_debug:
|
63 |
+
self.logger.debug("Test debug message", context=context)
|
64 |
+
|
65 |
+
mock_debug.assert_called_once()
|
66 |
+
args, kwargs = mock_debug.call_args
|
67 |
+
assert "Test debug message" in args[0]
|
68 |
+
assert "extra" in kwargs
|
69 |
+
|
70 |
+
def test_info_logging(self):
|
71 |
+
"""Test info logging."""
|
72 |
+
context = LogContext(correlation_id="test-123")
|
73 |
+
extra = {"key": "value"}
|
74 |
+
|
75 |
+
with patch.object(self.logger.logger, 'info') as mock_info:
|
76 |
+
self.logger.info("Test info message", context=context, extra=extra)
|
77 |
+
|
78 |
+
mock_info.assert_called_once()
|
79 |
+
args, kwargs = mock_info.call_args
|
80 |
+
assert "Test info message" in args[0]
|
81 |
+
assert kwargs["extra"]["extra"] == extra
|
82 |
+
|
83 |
+
def test_error_logging_with_exception(self):
|
84 |
+
"""Test error logging with exception."""
|
85 |
+
context = LogContext(correlation_id="test-123")
|
86 |
+
exception = ValueError("Test error")
|
87 |
+
|
88 |
+
with patch.object(self.logger.logger, 'error') as mock_error:
|
89 |
+
self.logger.error("Test error message", context=context, exception=exception)
|
90 |
+
|
91 |
+
mock_error.assert_called_once()
|
92 |
+
args, kwargs = mock_error.call_args
|
93 |
+
assert "Test error message" in args[0]
|
94 |
+
assert kwargs["extra"]["exception"]["type"] == "ValueError"
|
95 |
+
assert kwargs["extra"]["exception"]["message"] == "Test error"
|
96 |
+
|
97 |
+
def test_log_operation_start(self):
|
98 |
+
"""Test logging operation start."""
|
99 |
+
extra = {"param": "value"}
|
100 |
+
|
101 |
+
with patch.object(self.logger.logger, 'info') as mock_info:
|
102 |
+
correlation_id = self.logger.log_operation_start("test_operation", extra=extra)
|
103 |
+
|
104 |
+
assert correlation_id is not None
|
105 |
+
mock_info.assert_called_once()
|
106 |
+
args, kwargs = mock_info.call_args
|
107 |
+
assert "Operation started: test_operation" in args[0]
|
108 |
+
|
109 |
+
def test_log_operation_end_success(self):
|
110 |
+
"""Test logging successful operation end."""
|
111 |
+
correlation_id = "test-123"
|
112 |
+
|
113 |
+
with patch.object(self.logger.logger, 'info') as mock_info:
|
114 |
+
self.logger.log_operation_end(
|
115 |
+
"test_operation",
|
116 |
+
correlation_id,
|
117 |
+
success=True,
|
118 |
+
duration=1.5
|
119 |
+
)
|
120 |
+
|
121 |
+
mock_info.assert_called_once()
|
122 |
+
args, kwargs = mock_info.call_args
|
123 |
+
assert "completed successfully" in args[0]
|
124 |
+
assert kwargs["extra"]["extra"]["success"] is True
|
125 |
+
assert kwargs["extra"]["extra"]["duration_seconds"] == 1.5
|
126 |
+
|
127 |
+
def test_log_operation_end_failure(self):
|
128 |
+
"""Test logging failed operation end."""
|
129 |
+
correlation_id = "test-123"
|
130 |
+
|
131 |
+
with patch.object(self.logger.logger, 'error') as mock_error:
|
132 |
+
self.logger.log_operation_end(
|
133 |
+
"test_operation",
|
134 |
+
correlation_id,
|
135 |
+
success=False
|
136 |
+
)
|
137 |
+
|
138 |
+
mock_error.assert_called_once()
|
139 |
+
args, kwargs = mock_error.call_args
|
140 |
+
assert "failed" in args[0]
|
141 |
+
assert kwargs["extra"]["extra"]["success"] is False
|
142 |
+
|
143 |
+
def test_log_performance_metric(self):
|
144 |
+
"""Test logging performance metric."""
|
145 |
+
context = LogContext(correlation_id="test-123")
|
146 |
+
|
147 |
+
with patch.object(self.logger.logger, 'info') as mock_info:
|
148 |
+
self.logger.log_performance_metric(
|
149 |
+
"response_time",
|
150 |
+
150.5,
|
151 |
+
"ms",
|
152 |
+
context=context
|
153 |
+
)
|
154 |
+
|
155 |
+
mock_info.assert_called_once()
|
156 |
+
args, kwargs = mock_info.call_args
|
157 |
+
assert "Performance metric: response_time=150.5 ms" in args[0]
|
158 |
+
assert kwargs["extra"]["extra"]["metric"]["name"] == "response_time"
|
159 |
+
assert kwargs["extra"]["extra"]["metric"]["value"] == 150.5
|
160 |
+
assert kwargs["extra"]["extra"]["metric"]["unit"] == "ms"
|
161 |
+
|
162 |
+
|
163 |
+
class TestJsonFormatter:
|
164 |
+
"""Test cases for JsonFormatter."""
|
165 |
+
|
166 |
+
def setup_method(self):
|
167 |
+
"""Set up test fixtures."""
|
168 |
+
self.formatter = JsonFormatter()
|
169 |
+
|
170 |
+
def test_format_log_record(self):
|
171 |
+
"""Test formatting log record as JSON."""
|
172 |
+
record = logging.LogRecord(
|
173 |
+
name="test_logger",
|
174 |
+
level=logging.INFO,
|
175 |
+
pathname="test.py",
|
176 |
+
lineno=10,
|
177 |
+
msg="Test message",
|
178 |
+
args=(),
|
179 |
+
exc_info=None
|
180 |
+
)
|
181 |
+
|
182 |
+
# Add extra data
|
183 |
+
record.extra = {
|
184 |
+
"correlation_id": "test-123",
|
185 |
+
"operation": "test_op"
|
186 |
+
}
|
187 |
+
|
188 |
+
formatted = self.formatter.format(record)
|
189 |
+
|
190 |
+
# Should be valid JSON
|
191 |
+
log_data = json.loads(formatted)
|
192 |
+
assert log_data["message"] == "Test message"
|
193 |
+
assert log_data["level"] == "INFO"
|
194 |
+
assert log_data["correlation_id"] == "test-123"
|
195 |
+
assert log_data["operation"] == "test_op"
|
196 |
+
|
197 |
+
def test_format_error_handling(self):
|
198 |
+
"""Test formatter error handling."""
|
199 |
+
record = logging.LogRecord(
|
200 |
+
name="test_logger",
|
201 |
+
level=logging.INFO,
|
202 |
+
pathname="test.py",
|
203 |
+
lineno=10,
|
204 |
+
msg="Test message",
|
205 |
+
args=(),
|
206 |
+
exc_info=None
|
207 |
+
)
|
208 |
+
|
209 |
+
# Add problematic extra data that can't be JSON serialized
|
210 |
+
record.extra = {
|
211 |
+
"correlation_id": "test-123",
|
212 |
+
"problematic_data": object() # Can't be JSON serialized
|
213 |
+
}
|
214 |
+
|
215 |
+
formatted = self.formatter.format(record)
|
216 |
+
|
217 |
+
# Should still work and include error message
|
218 |
+
assert "Test message" in formatted
|
219 |
+
|
220 |
+
|
221 |
+
class TestContextFormatter:
|
222 |
+
"""Test cases for ContextFormatter."""
|
223 |
+
|
224 |
+
def setup_method(self):
|
225 |
+
"""Set up test fixtures."""
|
226 |
+
self.formatter = ContextFormatter()
|
227 |
+
|
228 |
+
def test_format_with_correlation_id(self):
|
229 |
+
"""Test formatting with correlation ID."""
|
230 |
+
record = logging.LogRecord(
|
231 |
+
name="test_logger",
|
232 |
+
level=logging.INFO,
|
233 |
+
pathname="test.py",
|
234 |
+
lineno=10,
|
235 |
+
msg="Test message",
|
236 |
+
args=(),
|
237 |
+
exc_info=None
|
238 |
+
)
|
239 |
+
|
240 |
+
record.extra = {"correlation_id": "test-123"}
|
241 |
+
|
242 |
+
formatted = self.formatter.format(record)
|
243 |
+
|
244 |
+
assert "[test-123]" in formatted
|
245 |
+
assert "Test message" in formatted
|
246 |
+
|
247 |
+
def test_format_with_operation(self):
|
248 |
+
"""Test formatting with operation context."""
|
249 |
+
record = logging.LogRecord(
|
250 |
+
name="test_logger",
|
251 |
+
level=logging.INFO,
|
252 |
+
pathname="test.py",
|
253 |
+
lineno=10,
|
254 |
+
msg="Test message",
|
255 |
+
args=(),
|
256 |
+
exc_info=None
|
257 |
+
)
|
258 |
+
|
259 |
+
record.extra = {
|
260 |
+
"correlation_id": "test-123",
|
261 |
+
"operation": "test_operation"
|
262 |
+
}
|
263 |
+
|
264 |
+
formatted = self.formatter.format(record)
|
265 |
+
|
266 |
+
assert "[test_operation]" in formatted
|
267 |
+
assert "Test message" in formatted
|
268 |
+
|
269 |
+
|
270 |
+
class TestUtilityFunctions:
|
271 |
+
"""Test cases for utility functions."""
|
272 |
+
|
273 |
+
def test_get_structured_logger(self):
|
274 |
+
"""Test getting structured logger."""
|
275 |
+
logger = get_structured_logger("test_logger")
|
276 |
+
|
277 |
+
assert isinstance(logger, StructuredLogger)
|
278 |
+
assert logger.logger.name == "test_logger"
|
279 |
+
|
280 |
+
def test_correlation_id_context(self):
|
281 |
+
"""Test correlation ID context management."""
|
282 |
+
# Initially should be None
|
283 |
+
assert get_correlation_id() is None
|
284 |
+
|
285 |
+
# Set correlation ID
|
286 |
+
set_correlation_id("test-123")
|
287 |
+
assert get_correlation_id() == "test-123"
|
288 |
+
|
289 |
+
def test_generate_correlation_id(self):
|
290 |
+
"""Test generating correlation ID."""
|
291 |
+
correlation_id = generate_correlation_id()
|
292 |
+
|
293 |
+
assert correlation_id is not None
|
294 |
+
assert len(correlation_id) > 0
|
295 |
+
|
296 |
+
# Should generate different IDs
|
297 |
+
another_id = generate_correlation_id()
|
298 |
+
assert correlation_id != another_id
|