BladeSzaSza commited on
Commit
4399e64
·
0 Parent(s):

🥚 Initial DigiPal deployment to HuggingFace Spaces🤖 Generated with [Claude Code](https://claude.ai/code)Co-Authored-By: Claude <[email protected]>

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +35 -0
  2. Dockerfile +54 -0
  3. README.md +85 -0
  4. __init__.py +6 -0
  5. __pycache__/config.cpython-312.pyc +0 -0
  6. app.py +85 -0
  7. backups/backup_automatic_20250725_114923_5347.db.gz +3 -0
  8. backups/backup_automatic_20250725_133639_4471.db.gz +3 -0
  9. backups/backup_metadata.json +112 -0
  10. backups/backup_pre_operation_20250725_114923_0195.db.gz +3 -0
  11. backups/backup_pre_operation_20250725_114923_0627.db.gz +3 -0
  12. backups/backup_pre_operation_20250725_114923_3347.db.gz +3 -0
  13. backups/backup_pre_operation_20250725_114923_3459.db.gz +3 -0
  14. backups/backup_pre_operation_20250725_114923_5379.db.gz +3 -0
  15. backups/backup_pre_operation_20250725_114923_8771.db.gz +3 -0
  16. backups/backup_pre_operation_20250725_114923_9475.db.gz +3 -0
  17. backups/backup_pre_operation_20250725_114923_9731.db.gz +3 -0
  18. backups/backup_pre_operation_20250725_114923_9779.db.gz +3 -0
  19. config.py +296 -0
  20. digipal/__init__.py +0 -0
  21. digipal/__pycache__/__init__.cpython-312.pyc +0 -0
  22. digipal/ai/__init__.py +27 -0
  23. digipal/ai/__pycache__/__init__.cpython-312.pyc +0 -0
  24. digipal/ai/__pycache__/communication.cpython-312.pyc +0 -0
  25. digipal/ai/__pycache__/graceful_degradation.cpython-312.pyc +0 -0
  26. digipal/ai/__pycache__/language_model.cpython-312.pyc +0 -0
  27. digipal/ai/__pycache__/speech_processor.cpython-312.pyc +0 -0
  28. digipal/ai/communication.py +727 -0
  29. digipal/ai/graceful_degradation.py +454 -0
  30. digipal/ai/image_generator.py +402 -0
  31. digipal/ai/language_model.py +532 -0
  32. digipal/ai/speech_processor.py +510 -0
  33. digipal/auth/__init__.py +19 -0
  34. digipal/auth/__pycache__/__init__.cpython-312.pyc +0 -0
  35. digipal/auth/__pycache__/auth_manager.cpython-312.pyc +0 -0
  36. digipal/auth/__pycache__/models.cpython-312.pyc +0 -0
  37. digipal/auth/__pycache__/session_manager.cpython-312.pyc +0 -0
  38. digipal/auth/auth_manager.py +384 -0
  39. digipal/auth/models.py +134 -0
  40. digipal/auth/session_manager.py +370 -0
  41. digipal/core/__init__.py +22 -0
  42. digipal/core/__pycache__/__init__.cpython-312.pyc +0 -0
  43. digipal/core/__pycache__/attribute_engine.cpython-312.pyc +0 -0
  44. digipal/core/__pycache__/digipal_core.cpython-312.pyc +0 -0
  45. digipal/core/__pycache__/enums.cpython-312.pyc +0 -0
  46. digipal/core/__pycache__/error_handler.cpython-312.pyc +0 -0
  47. digipal/core/__pycache__/evolution_controller.cpython-312.pyc +0 -0
  48. digipal/core/__pycache__/exceptions.cpython-312.pyc +0 -0
  49. digipal/core/__pycache__/memory_manager.cpython-312.pyc +0 -0
  50. digipal/core/__pycache__/models.cpython-312.pyc +0 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DigiPal - Digital Pet Application with MCP Server
2
+ # Dockerfile for HuggingFace Spaces deployment
3
+
4
+ FROM python:3.11-slim
5
+
6
+ # Set working directory
7
+ WORKDIR /app
8
+
9
+ # Create user with ID 1000 for HF Spaces compatibility
10
+ RUN useradd -m -u 1000 user
11
+ USER user
12
+ ENV HOME=/home/user \
13
+ PATH=/home/user/.local/bin:$PATH
14
+
15
+ # Switch to app directory and set ownership
16
+ WORKDIR $HOME/app
17
+
18
+ # Install system dependencies (as root temporarily)
19
+ USER root
20
+ RUN apt-get update && apt-get install -y \
21
+ git \
22
+ curl \
23
+ build-essential \
24
+ libsndfile1 \
25
+ ffmpeg \
26
+ && rm -rf /var/lib/apt/lists/*
27
+
28
+ # Switch back to user
29
+ USER user
30
+
31
+ # Copy requirements first for better caching
32
+ COPY --chown=user requirements-hf.txt requirements.txt
33
+
34
+ # Install Python dependencies
35
+ RUN pip install --user --no-cache-dir -r requirements.txt
36
+
37
+ # Copy application code
38
+ COPY --chown=user . .
39
+
40
+ # Create necessary directories
41
+ RUN mkdir -p assets/images assets/backups logs
42
+
43
+ # Set environment variables
44
+ ENV PYTHONPATH=$HOME/app
45
+ ENV GRADIO_SERVER_NAME=0.0.0.0
46
+ ENV GRADIO_SERVER_PORT=7860
47
+ ENV DIGIPAL_ENV=production
48
+ ENV DIGIPAL_LOG_LEVEL=INFO
49
+
50
+ # Expose port for Gradio
51
+ EXPOSE 7860
52
+
53
+ # Run the application
54
+ CMD ["python", "app.py"]
README.md ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: DigiPal - AI Digital Pet
3
+ emoji: 🥚
4
+ colorFrom: blue
5
+ colorTo: purple
6
+ sdk: docker
7
+ app_port: 7860
8
+ pinned: false
9
+ license: mit
10
+ short_description: AI-powered digital pet inspired by Digimon World 1
11
+ ---
12
+
13
+ # 🥚 DigiPal - Your AI Digital Pet
14
+
15
+ A sophisticated digital pet application inspired by Digimon World 1, featuring real-time AI interaction, dynamic evolution, and immersive pet care mechanics.
16
+
17
+ ## ✨ Features
18
+
19
+ ### 🤖 Advanced AI Integration
20
+ - **Natural Language Processing** with Qwen3-0.6B for contextual conversations
21
+ - **Speech Recognition** via Kyutai speech-to-text
22
+ - **Dynamic Image Generation** using FLUX.1-dev for real-time pet visualization
23
+
24
+ ### 🎮 Rich Pet Mechanics
25
+ - **7 Life Stages**: Egg → Baby → Child → Adult → Champion → Ultimate → Elderly
26
+ - **Care System**: Feed, train, and nurture your DigiPal with 20+ care actions
27
+ - **Evolution System**: Your care quality determines evolution paths
28
+ - **Generational Inheritance**: Perfect care = 25% stat inheritance to next generation
29
+
30
+ ### 🏠 Persistent World
31
+ - **Real-time Updates**: Pets age and evolve even when you're away
32
+ - **Backup System**: Automatic save states and recovery
33
+ - **Authentication**: Secure HuggingFace token integration
34
+
35
+ ## 🚀 Quick Start
36
+
37
+ ### Online Mode (Recommended)
38
+ 1. Enter your HuggingFace API token for full AI features
39
+ 2. Select your starter egg from 4 unique types
40
+ 3. Begin caring for your DigiPal through its life journey
41
+
42
+ ### Offline Mode
43
+ 1. Check "Enable Offline Mode"
44
+ 2. Enter any placeholder token
45
+ 3. Experience core mechanics without AI features
46
+
47
+ ## 🎯 Gameplay Loop
48
+
49
+ 1. **Hatch Your Egg**: Choose from Flame, Ocean, Forest, or Sky eggs
50
+ 2. **Daily Care**: Feed, train, play, and interact with your pet
51
+ 3. **Watch Evolution**: Care quality determines evolution outcomes
52
+ 4. **Generational Play**: When pets reach end-of-life, their DNA influences the next generation
53
+
54
+ ## 📖 Care Guide
55
+
56
+ ### 🍎 Feeding
57
+ - **Fruits**: Increase happiness and health
58
+ - **Vegetables**: Boost training effectiveness
59
+ - **Treats**: Special happiness boost but use sparingly
60
+
61
+ ### 🏋️ Training
62
+ - **Strength**: Physical power and combat readiness
63
+ - **Intelligence**: Learning speed and AI interaction quality
64
+ - **Endurance**: Longevity and resistance to illness
65
+
66
+ ### 💝 Care Quality
67
+ Your pet's care level affects everything:
68
+ - **Perfect Care** (90-100%): Best evolution options, 25% inheritance
69
+ - **Excellent Care** (80-89%): Great evolutions, 20% inheritance
70
+ - **Good Care** (70-79%): Standard growth, 15% inheritance
71
+
72
+ ## 🎨 Generated Content
73
+
74
+ DigiPal creates unique visual content for your pet using FLUX.1-dev, with intelligent caching for performance. Each pet's appearance reflects its species, life stage, and care history.
75
+
76
+ ## 🔒 Privacy & Security
77
+
78
+ - Secure token storage with encryption
79
+ - Local data processing where possible
80
+ - Optional offline mode for privacy-conscious users
81
+ - Automatic backup system protects your progress
82
+
83
+ ---
84
+
85
+ **Ready to start your DigiPal journey? 🚀**
__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """
2
+ DigiPal - A digital pet application with AI communication and MCP server capabilities.
3
+ """
4
+
5
+ __version__ = "0.1.0"
6
+ __author__ = "DigiPal Team"
__pycache__/config.cpython-312.pyc ADDED
Binary file (14.3 kB). View file
 
app.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ DigiPal - HuggingFace Spaces Entry Point
4
+ Simplified launcher for HuggingFace Spaces deployment.
5
+ """
6
+
7
+ import sys
8
+ import os
9
+ import logging
10
+ from pathlib import Path
11
+
12
+ # Add the project root to Python path
13
+ project_root = Path(__file__).parent
14
+ sys.path.insert(0, str(project_root))
15
+
16
+ from digipal.core.digipal_core import DigiPalCore
17
+ from digipal.storage.storage_manager import StorageManager
18
+ from digipal.ai.communication import AICommunication
19
+ from digipal.auth.auth_manager import AuthManager
20
+ from digipal.storage.database import DatabaseConnection
21
+ from digipal.ui.gradio_interface import GradioInterface
22
+ from config import get_config
23
+
24
+ # Configure logging for HF Spaces
25
+ logging.basicConfig(
26
+ level=logging.INFO,
27
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
28
+ )
29
+ logger = logging.getLogger(__name__)
30
+
31
+ def main():
32
+ """Main function for HuggingFace Spaces deployment."""
33
+ logger.info("🥚 Starting DigiPal on HuggingFace Spaces...")
34
+
35
+ try:
36
+ # Get configuration
37
+ config = get_config()
38
+
39
+ # Override for HF Spaces
40
+ config.gradio.server_name = "0.0.0.0"
41
+ config.gradio.server_port = 7860
42
+ config.gradio.share = False
43
+ config.env = "production"
44
+
45
+ # Initialize storage manager
46
+ db_path = "assets/digipal.db"
47
+ os.makedirs(os.path.dirname(db_path), exist_ok=True)
48
+ storage_manager = StorageManager(db_path)
49
+ logger.info(f"💾 Storage initialized: {db_path}")
50
+
51
+ # Initialize AI communication
52
+ ai_communication = AICommunication()
53
+ logger.info("🤖 AI system initialized")
54
+
55
+ # Initialize DigiPal core
56
+ digipal_core = DigiPalCore(storage_manager, ai_communication)
57
+ logger.info("🎮 DigiPal core ready")
58
+
59
+ # Initialize auth manager
60
+ db_connection = DatabaseConnection(db_path)
61
+ auth_manager = AuthManager(db_connection)
62
+ logger.info("🔐 Authentication ready")
63
+
64
+ # Initialize Gradio interface
65
+ gradio_interface = GradioInterface(digipal_core, auth_manager)
66
+ logger.info("🌐 Interface ready")
67
+
68
+ logger.info("✅ DigiPal ready on HuggingFace Spaces!")
69
+
70
+ # Launch the interface
71
+ gradio_interface.launch_interface(
72
+ share=False,
73
+ server_name="0.0.0.0",
74
+ server_port=7860,
75
+ debug=False,
76
+ show_error=True,
77
+ quiet=False
78
+ )
79
+
80
+ except Exception as e:
81
+ logger.error(f"❌ Failed to start DigiPal: {e}")
82
+ raise e
83
+
84
+ if __name__ == "__main__":
85
+ main()
backups/backup_automatic_20250725_114923_5347.db.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a87bc3a0117d6e6f9588f2e418a7e66263362daba99339213a491c92e9154fc2
3
+ size 2728
backups/backup_automatic_20250725_133639_4471.db.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c44bed58a2c71b237c6f22dc01a02cfaad4839d27c31bc1eea2a7386b5f0883
3
+ size 5380
backups/backup_metadata.json ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "pre_operation_20250724_181331_9931": {
3
+ "backup_id": "pre_operation_20250724_181331_9931",
4
+ "timestamp": "2025-07-24T18:13:31.571414",
5
+ "backup_type": "pre_operation",
6
+ "file_path": "assets/backups/backup_pre_operation_20250724_181331_9931.db.gz",
7
+ "checksum": "54b50a139fdb009c2ac259b65ba4fe61aa456427cce80401885a08a5d52ed27e",
8
+ "size_bytes": 3522,
9
+ "user_id": null,
10
+ "pet_id": null,
11
+ "description": "Pre-operation backup for: save_pet (context: {\"pet_id\": \"6ec11f65-1f77-442d-8080-eb0805ad0d1b\", \"user_id\": \"offline_14075641f857c575\"})"
12
+ },
13
+ "pre_operation_20250724_203639_8119": {
14
+ "backup_id": "pre_operation_20250724_203639_8119",
15
+ "timestamp": "2025-07-24T20:36:39.165973",
16
+ "backup_type": "pre_operation",
17
+ "file_path": "assets/backups/backup_pre_operation_20250724_203639_8119.db.gz",
18
+ "checksum": "307166b34980dbd888eceecc5a878fb6a3b88974e01c5abcef9cc1c517dd4a74",
19
+ "size_bytes": 4877,
20
+ "user_id": null,
21
+ "pet_id": null,
22
+ "description": "Pre-operation backup for: save_pet (context: {\"pet_id\": \"e8ecb462-4d30-4ff5-9a1e-59c7b1560aee\", \"user_id\": \"offline_test_user\"})"
23
+ },
24
+ "pre_operation_20250724_203640_6552": {
25
+ "backup_id": "pre_operation_20250724_203640_6552",
26
+ "timestamp": "2025-07-24T20:36:40.505842",
27
+ "backup_type": "pre_operation",
28
+ "file_path": "assets/backups/backup_pre_operation_20250724_203640_6552.db.gz",
29
+ "checksum": "462f91e4ceb9fbb3a4da8c3293afbb11621eed677605bad3295ce534358dfd61",
30
+ "size_bytes": 4982,
31
+ "user_id": null,
32
+ "pet_id": null,
33
+ "description": "Pre-operation backup for: save_pet (context: {\"pet_id\": \"e8ecb462-4d30-4ff5-9a1e-59c7b1560aee\", \"user_id\": \"offline_test_user\"})"
34
+ },
35
+ "pre_operation_20250724_203659_2315": {
36
+ "backup_id": "pre_operation_20250724_203659_2315",
37
+ "timestamp": "2025-07-24T20:36:59.233483",
38
+ "backup_type": "pre_operation",
39
+ "file_path": "assets/backups/backup_pre_operation_20250724_203659_2315.db.gz",
40
+ "checksum": "4aa8720e3fc61b3b51c18eea7f892613e64e2062bf79bdc86b9eb945b36a0c52",
41
+ "size_bytes": 5148,
42
+ "user_id": null,
43
+ "pet_id": null,
44
+ "description": "Pre-operation backup for: save_pet (context: {\"pet_id\": \"3f5d0cf4-9778-493e-9b3f-6b9bfadc82bf\", \"user_id\": \"offline_9641d1601a04163b\"})"
45
+ },
46
+ "pre_operation_20250724_203705_4609": {
47
+ "backup_id": "pre_operation_20250724_203705_4609",
48
+ "timestamp": "2025-07-24T20:37:05.858506",
49
+ "backup_type": "pre_operation",
50
+ "file_path": "assets/backups/backup_pre_operation_20250724_203705_4609.db.gz",
51
+ "checksum": "9df347df0ae4f0aad48de390f09423eea68d3ad6d5c1bf57f53922d7ab7f988e",
52
+ "size_bytes": 5275,
53
+ "user_id": null,
54
+ "pet_id": null,
55
+ "description": "Pre-operation backup for: save_pet (context: {\"pet_id\": \"3f5d0cf4-9778-493e-9b3f-6b9bfadc82bf\", \"user_id\": \"offline_9641d1601a04163b\"})"
56
+ },
57
+ "automatic_20250725_003639_7879": {
58
+ "backup_id": "automatic_20250725_003639_7879",
59
+ "timestamp": "2025-07-25T00:36:39.195234",
60
+ "backup_type": "automatic",
61
+ "file_path": "assets/backups/backup_automatic_20250725_003639_7879.db.gz",
62
+ "checksum": "114e3819654d66ab5df44014ebd3351f68db42811d11dcf84fbbd91d04f8f548",
63
+ "size_bytes": 5380,
64
+ "user_id": null,
65
+ "pet_id": null,
66
+ "description": "Scheduled automatic backup"
67
+ },
68
+ "automatic_20250725_073639_3575": {
69
+ "backup_id": "automatic_20250725_073639_3575",
70
+ "timestamp": "2025-07-25T07:36:39.071863",
71
+ "backup_type": "automatic",
72
+ "file_path": "assets/backups/backup_automatic_20250725_073639_3575.db.gz",
73
+ "checksum": "3a5fee38e6037b1de40329a979909d6d18dece2b7b3519dae23d5961a7f69abc",
74
+ "size_bytes": 5380,
75
+ "user_id": null,
76
+ "pet_id": null,
77
+ "description": "Scheduled automatic backup"
78
+ },
79
+ "automatic_20250725_133639_4471": {
80
+ "backup_id": "automatic_20250725_133639_4471",
81
+ "timestamp": "2025-07-25T13:36:39.126779",
82
+ "backup_type": "automatic",
83
+ "file_path": "assets/backups/backup_automatic_20250725_133639_4471.db.gz",
84
+ "checksum": "0c44bed58a2c71b237c6f22dc01a02cfaad4839d27c31bc1eea2a7386b5f0883",
85
+ "size_bytes": 5380,
86
+ "user_id": null,
87
+ "pet_id": null,
88
+ "description": "Scheduled automatic backup"
89
+ },
90
+ "automatic_20250725_203639_7911": {
91
+ "backup_id": "automatic_20250725_203639_7911",
92
+ "timestamp": "2025-07-25T20:36:39.073314",
93
+ "backup_type": "automatic",
94
+ "file_path": "assets/backups/backup_automatic_20250725_203639_7911.db.gz",
95
+ "checksum": "0af54843e544210b936fa48d46c5ea842f8a710e9505bbb96882aab60e14def5",
96
+ "size_bytes": 5380,
97
+ "user_id": null,
98
+ "pet_id": null,
99
+ "description": "Scheduled automatic backup"
100
+ },
101
+ "automatic_20250726_033638_7142": {
102
+ "backup_id": "automatic_20250726_033638_7142",
103
+ "timestamp": "2025-07-26T03:36:38.970952",
104
+ "backup_type": "automatic",
105
+ "file_path": "assets/backups/backup_automatic_20250726_033638_7142.db.gz",
106
+ "checksum": "03cf2d9b3624d7b594e2200be22d80bdc5729c3018b4a1dcb0de4ad2169550de",
107
+ "size_bytes": 5380,
108
+ "user_id": null,
109
+ "pet_id": null,
110
+ "description": "Scheduled automatic backup"
111
+ }
112
+ }
backups/backup_pre_operation_20250725_114923_0195.db.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f880d5b94ba61558eae10a0d010c31c2de176ce1f2fc211ec1693a6214b92a8e
3
+ size 3289
backups/backup_pre_operation_20250725_114923_0627.db.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:433327e33b5504e1bd3a254f1e8954e6480fee2e672db9db75a526610e17611c
3
+ size 3583
backups/backup_pre_operation_20250725_114923_3347.db.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d797dbc3a2dcb7a2826ef279090d8a9e9398a7b16dcae124772bde31170ac81
3
+ size 2918
backups/backup_pre_operation_20250725_114923_3459.db.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bf3350cefa96a2aa81534180f1234459f64b2505f740fbd566bf92d4de48c6e
3
+ size 3442
backups/backup_pre_operation_20250725_114923_5379.db.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61880230367bf90b7eeb57670744977c2f72ee61474fdf88fd907b1951834cfb
3
+ size 3768
backups/backup_pre_operation_20250725_114923_8771.db.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a17ecd54b8574ec843eaf47341e67c1a7ef8bef48295e93321ecca043d1489b1
3
+ size 3707
backups/backup_pre_operation_20250725_114923_9475.db.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5052638f4228eb6f89ec98b8f81d4d67278e1caf6f070c0b125e5993f786d91c
3
+ size 3138
backups/backup_pre_operation_20250725_114923_9731.db.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a95cb356dd093de19e842f43d4bdedcd5e0226acb95156611f82fe0f138b0dc
3
+ size 3810
backups/backup_pre_operation_20250725_114923_9779.db.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17fdec64d28f5d7d5f1b0e617544564e8657771f7e51182669066f54a4484e66
3
+ size 3062
config.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DigiPal Configuration Management
3
+ Handles environment-specific configuration for deployment
4
+ """
5
+
6
+ import os
7
+ from typing import Optional, Dict, Any
8
+ from dataclasses import dataclass
9
+ from pathlib import Path
10
+ import logging
11
+
12
+
13
+ @dataclass
14
+ class DatabaseConfig:
15
+ """Database configuration settings"""
16
+ path: str = "digipal.db"
17
+ backup_dir: str = "assets/backups"
18
+ backup_interval_hours: int = 24
19
+ max_backups: int = 10
20
+
21
+
22
+ @dataclass
23
+ class AIModelConfig:
24
+ """AI model configuration settings"""
25
+ qwen_model: str = "Qwen/Qwen3-0.6B"
26
+ kyutai_model: str = "kyutai/stt-2.6b-en_fr-trfs"
27
+ flux_model: str = "black-forest-labs/FLUX.1-dev"
28
+ device: str = "auto"
29
+ torch_dtype: str = "auto"
30
+ enable_quantization: bool = True
31
+ max_memory_gb: Optional[int] = None
32
+
33
+
34
+ @dataclass
35
+ class GradioConfig:
36
+ """Gradio interface configuration"""
37
+ server_name: str = "0.0.0.0"
38
+ server_port: int = 7860
39
+ share: bool = False
40
+ debug: bool = False
41
+ auth: Optional[tuple] = None
42
+ ssl_keyfile: Optional[str] = None
43
+ ssl_certfile: Optional[str] = None
44
+
45
+
46
+ @dataclass
47
+ class MCPConfig:
48
+ """MCP server configuration"""
49
+ enabled: bool = True
50
+ host: str = "localhost"
51
+ port: int = 8080
52
+ max_connections: int = 100
53
+ timeout_seconds: int = 30
54
+
55
+
56
+ @dataclass
57
+ class LoggingConfig:
58
+ """Logging configuration"""
59
+ level: str = "INFO"
60
+ format: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
61
+ file_path: Optional[str] = "logs/digipal.log"
62
+ max_file_size_mb: int = 10
63
+ backup_count: int = 5
64
+ enable_structured_logging: bool = True
65
+
66
+
67
+ @dataclass
68
+ class SecurityConfig:
69
+ """Security configuration"""
70
+ secret_key: Optional[str] = None
71
+ session_timeout_hours: int = 24
72
+ max_login_attempts: int = 5
73
+ rate_limit_per_minute: int = 60
74
+ enable_cors: bool = True
75
+ allowed_origins: list = None
76
+
77
+
78
+ @dataclass
79
+ class PerformanceConfig:
80
+ """Performance optimization settings"""
81
+ cache_size_mb: int = 512
82
+ background_update_interval: int = 60
83
+ max_concurrent_users: int = 100
84
+ enable_model_caching: bool = True
85
+ image_cache_max_age_days: int = 30
86
+
87
+
88
+ class DigiPalConfig:
89
+ """Main configuration class for DigiPal application"""
90
+
91
+ def __init__(self, env: str = None):
92
+ self.env = env or os.getenv("DIGIPAL_ENV", "development")
93
+ self.load_config()
94
+
95
+ def load_config(self):
96
+ """Load configuration based on environment"""
97
+ # Base configuration
98
+ self.database = DatabaseConfig()
99
+ self.ai_models = AIModelConfig()
100
+ self.gradio = GradioConfig()
101
+ self.mcp = MCPConfig()
102
+ self.logging = LoggingConfig()
103
+ self.security = SecurityConfig()
104
+ self.performance = PerformanceConfig()
105
+
106
+ # Environment-specific overrides
107
+ if self.env == "production":
108
+ self._load_production_config()
109
+ elif self.env == "testing":
110
+ self._load_testing_config()
111
+ elif self.env == "development":
112
+ self._load_development_config()
113
+
114
+ # Load from environment variables
115
+ self._load_from_env()
116
+
117
+ # Validate configuration
118
+ self._validate_config()
119
+
120
+ def _load_production_config(self):
121
+ """Production environment configuration"""
122
+ self.database.path = "/app/data/digipal.db"
123
+ self.database.backup_dir = "/app/data/backups"
124
+
125
+ self.gradio.debug = False
126
+ self.gradio.share = False
127
+
128
+ self.logging.level = "INFO"
129
+ self.logging.file_path = "/app/logs/digipal.log"
130
+
131
+ self.security.session_timeout_hours = 12
132
+ self.security.rate_limit_per_minute = 30
133
+
134
+ self.performance.cache_size_mb = 1024
135
+ self.performance.max_concurrent_users = 500
136
+
137
+ def _load_testing_config(self):
138
+ """Testing environment configuration"""
139
+ self.database.path = "test_digipal.db"
140
+ self.database.backup_dir = "test_assets/backups"
141
+
142
+ self.gradio.debug = True
143
+ self.gradio.server_port = 7861
144
+
145
+ self.logging.level = "DEBUG"
146
+ self.logging.file_path = None # Console only
147
+
148
+ self.ai_models.enable_quantization = False
149
+ self.performance.cache_size_mb = 128
150
+
151
+ def _load_development_config(self):
152
+ """Development environment configuration"""
153
+ self.gradio.debug = True
154
+ self.gradio.share = False
155
+
156
+ self.logging.level = "DEBUG"
157
+ self.logging.file_path = "logs/digipal_dev.log"
158
+
159
+ self.security.rate_limit_per_minute = 120
160
+ self.performance.cache_size_mb = 256
161
+
162
+ def _load_from_env(self):
163
+ """Load configuration from environment variables"""
164
+ # Database
165
+ if os.getenv("DIGIPAL_DB_PATH"):
166
+ self.database.path = os.getenv("DIGIPAL_DB_PATH")
167
+
168
+ # Gradio
169
+ if os.getenv("GRADIO_SERVER_NAME"):
170
+ self.gradio.server_name = os.getenv("GRADIO_SERVER_NAME")
171
+ if os.getenv("GRADIO_SERVER_PORT"):
172
+ self.gradio.server_port = int(os.getenv("GRADIO_SERVER_PORT"))
173
+ if os.getenv("GRADIO_SHARE"):
174
+ self.gradio.share = os.getenv("GRADIO_SHARE").lower() == "true"
175
+
176
+ # Logging
177
+ if os.getenv("DIGIPAL_LOG_LEVEL"):
178
+ self.logging.level = os.getenv("DIGIPAL_LOG_LEVEL")
179
+ if os.getenv("DIGIPAL_LOG_FILE"):
180
+ self.logging.file_path = os.getenv("DIGIPAL_LOG_FILE")
181
+
182
+ # Security
183
+ if os.getenv("DIGIPAL_SECRET_KEY"):
184
+ self.security.secret_key = os.getenv("DIGIPAL_SECRET_KEY")
185
+
186
+ # AI Models
187
+ if os.getenv("QWEN_MODEL"):
188
+ self.ai_models.qwen_model = os.getenv("QWEN_MODEL")
189
+ if os.getenv("KYUTAI_MODEL"):
190
+ self.ai_models.kyutai_model = os.getenv("KYUTAI_MODEL")
191
+ if os.getenv("FLUX_MODEL"):
192
+ self.ai_models.flux_model = os.getenv("FLUX_MODEL")
193
+
194
+ def _validate_config(self):
195
+ """Validate configuration settings"""
196
+ # Ensure required directories exist
197
+ Path(self.database.backup_dir).mkdir(parents=True, exist_ok=True)
198
+
199
+ if self.logging.file_path:
200
+ Path(self.logging.file_path).parent.mkdir(parents=True, exist_ok=True)
201
+
202
+ # Validate port ranges
203
+ if not (1024 <= self.gradio.server_port <= 65535):
204
+ raise ValueError(f"Invalid Gradio port: {self.gradio.server_port}")
205
+
206
+ if not (1024 <= self.mcp.port <= 65535):
207
+ raise ValueError(f"Invalid MCP port: {self.mcp.port}")
208
+
209
+ # Validate memory settings
210
+ if self.performance.cache_size_mb < 64:
211
+ logging.warning("Cache size is very low, performance may be affected")
212
+
213
+ def get_database_url(self) -> str:
214
+ """Get database connection URL"""
215
+ return f"sqlite:///{self.database.path}"
216
+
217
+ def get_log_config(self) -> Dict[str, Any]:
218
+ """Get logging configuration dictionary"""
219
+ config = {
220
+ "version": 1,
221
+ "disable_existing_loggers": False,
222
+ "formatters": {
223
+ "standard": {
224
+ "format": self.logging.format
225
+ },
226
+ "structured": {
227
+ "()": "structlog.stdlib.ProcessorFormatter",
228
+ "processor": "structlog.dev.ConsoleRenderer",
229
+ } if self.logging.enable_structured_logging else {
230
+ "format": self.logging.format
231
+ }
232
+ },
233
+ "handlers": {
234
+ "console": {
235
+ "class": "logging.StreamHandler",
236
+ "level": self.logging.level,
237
+ "formatter": "structured" if self.logging.enable_structured_logging else "standard",
238
+ "stream": "ext://sys.stdout"
239
+ }
240
+ },
241
+ "loggers": {
242
+ "digipal": {
243
+ "level": self.logging.level,
244
+ "handlers": ["console"],
245
+ "propagate": False
246
+ }
247
+ },
248
+ "root": {
249
+ "level": self.logging.level,
250
+ "handlers": ["console"]
251
+ }
252
+ }
253
+
254
+ # Add file handler if specified
255
+ if self.logging.file_path:
256
+ config["handlers"]["file"] = {
257
+ "class": "logging.handlers.RotatingFileHandler",
258
+ "level": self.logging.level,
259
+ "formatter": "standard",
260
+ "filename": self.logging.file_path,
261
+ "maxBytes": self.logging.max_file_size_mb * 1024 * 1024,
262
+ "backupCount": self.logging.backup_count
263
+ }
264
+ config["loggers"]["digipal"]["handlers"].append("file")
265
+ config["root"]["handlers"].append("file")
266
+
267
+ return config
268
+
269
+ def to_dict(self) -> Dict[str, Any]:
270
+ """Convert configuration to dictionary"""
271
+ return {
272
+ "env": self.env,
273
+ "database": self.database.__dict__,
274
+ "ai_models": self.ai_models.__dict__,
275
+ "gradio": self.gradio.__dict__,
276
+ "mcp": self.mcp.__dict__,
277
+ "logging": self.logging.__dict__,
278
+ "security": self.security.__dict__,
279
+ "performance": self.performance.__dict__
280
+ }
281
+
282
+
283
+ # Global configuration instance
284
+ config = DigiPalConfig()
285
+
286
+
287
+ def get_config() -> DigiPalConfig:
288
+ """Get the global configuration instance"""
289
+ return config
290
+
291
+
292
+ def reload_config(env: str = None):
293
+ """Reload configuration with optional environment override"""
294
+ global config
295
+ config = DigiPalConfig(env)
296
+ return config
digipal/__init__.py ADDED
File without changes
digipal/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (159 Bytes). View file
 
digipal/ai/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ AI communication layer including speech processing and language models.
3
+ """
4
+
5
+ from .communication import (
6
+ AICommunication,
7
+ CommandInterpreter,
8
+ ResponseGenerator,
9
+ ConversationMemoryManager
10
+ )
11
+ from .speech_processor import (
12
+ SpeechProcessor,
13
+ AudioValidator,
14
+ SpeechProcessingResult,
15
+ AudioValidationResult
16
+ )
17
+
18
+ __all__ = [
19
+ 'AICommunication',
20
+ 'CommandInterpreter',
21
+ 'ResponseGenerator',
22
+ 'ConversationMemoryManager',
23
+ 'SpeechProcessor',
24
+ 'AudioValidator',
25
+ 'SpeechProcessingResult',
26
+ 'AudioValidationResult'
27
+ ]
digipal/ai/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (631 Bytes). View file
 
digipal/ai/__pycache__/communication.cpython-312.pyc ADDED
Binary file (34.3 kB). View file
 
digipal/ai/__pycache__/graceful_degradation.cpython-312.pyc ADDED
Binary file (22 kB). View file
 
digipal/ai/__pycache__/language_model.cpython-312.pyc ADDED
Binary file (21.7 kB). View file
 
digipal/ai/__pycache__/speech_processor.cpython-312.pyc ADDED
Binary file (20.5 kB). View file
 
digipal/ai/communication.py ADDED
@@ -0,0 +1,727 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ AI Communication layer for DigiPal application.
3
+
4
+ This module handles speech processing, natural language generation,
5
+ command interpretation, and conversation memory management.
6
+ """
7
+
8
+ import re
9
+ from typing import Dict, List, Optional, Any, Tuple
10
+ from datetime import datetime
11
+ import logging
12
+ import torch
13
+
14
+ from ..core.models import DigiPal, Interaction, Command
15
+ from ..core.enums import LifeStage, CommandType, InteractionResult
16
+ from ..core.memory_manager import EnhancedMemoryManager
17
+ from .language_model import LanguageModel
18
+ from .speech_processor import SpeechProcessor, SpeechProcessingResult
19
+
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ class AICommunication:
25
+ """
26
+ Main AI communication class that orchestrates speech processing,
27
+ language model interactions, and conversation management.
28
+ """
29
+
30
+ def __init__(self, model_name: str = "Qwen/Qwen3-0.6B", quantization: bool = True,
31
+ kyutai_config: Optional[Dict] = None, enhanced_memory_manager: Optional[EnhancedMemoryManager] = None):
32
+ """
33
+ Initialize AI communication system.
34
+
35
+ Args:
36
+ model_name: HuggingFace model identifier for Qwen3-0.6B
37
+ quantization: Whether to use quantization for memory optimization
38
+ kyutai_config: Configuration for Kyutai speech processing
39
+ enhanced_memory_manager: Enhanced memory manager for RAG and emotional memories
40
+ """
41
+ self.model_name = model_name
42
+ self.quantization = quantization
43
+ self.kyutai_config = kyutai_config or {}
44
+
45
+ # Initialize components
46
+ self.command_interpreter = CommandInterpreter()
47
+ self.response_generator = ResponseGenerator()
48
+ self.memory_manager = ConversationMemoryManager()
49
+
50
+ # Enhanced memory manager for RAG and emotional memories
51
+ self.enhanced_memory_manager = enhanced_memory_manager
52
+
53
+ # Initialize language model
54
+ self.language_model = LanguageModel(model_name, quantization)
55
+ self._model_loaded = False
56
+
57
+ # Initialize speech processor
58
+ speech_model_id = self.kyutai_config.get('model_id', 'kyutai/stt-2.6b-en_fr-trfs')
59
+ speech_device = self.kyutai_config.get('device', None)
60
+ self.speech_processor = SpeechProcessor(speech_model_id, speech_device)
61
+ self._speech_model_loaded = False
62
+
63
+ logger.info(f"AICommunication initialized with model: {model_name}")
64
+ logger.info(f"Speech processor initialized with model: {speech_model_id}")
65
+ logger.info(f"Quantization enabled: {quantization}")
66
+ logger.info(f"Enhanced memory manager: {'enabled' if enhanced_memory_manager else 'disabled'}")
67
+
68
+ def process_speech(self, audio_data: bytes, sample_rate: Optional[int] = None) -> str:
69
+ """
70
+ Process speech audio data and convert to text using Kyutai STT.
71
+
72
+ Args:
73
+ audio_data: Raw audio bytes from user input
74
+ sample_rate: Sample rate of the audio data (optional)
75
+
76
+ Returns:
77
+ Transcribed text from speech
78
+ """
79
+ logger.info("Processing speech audio with Kyutai STT")
80
+
81
+ try:
82
+ # Ensure speech model is loaded
83
+ if not self._speech_model_loaded:
84
+ if not self.load_speech_model():
85
+ logger.error("Failed to load speech model, returning empty string")
86
+ return ""
87
+
88
+ # Process speech using Kyutai
89
+ result = self.speech_processor.process_speech(audio_data, sample_rate)
90
+
91
+ if result.success:
92
+ logger.info(f"Speech processed successfully: '{result.transcribed_text}' (confidence: {result.confidence:.2f})")
93
+ return result.transcribed_text
94
+ else:
95
+ logger.warning(f"Speech processing failed: {result.error_message}")
96
+ return ""
97
+
98
+ except Exception as e:
99
+ logger.error(f"Error in speech processing: {e}")
100
+ return ""
101
+
102
+ def generate_response(self, input_text: str, pet: DigiPal) -> str:
103
+ """
104
+ Generate contextual response using Qwen3-0.6B language model with RAG.
105
+
106
+ Args:
107
+ input_text: User input text
108
+ pet: Current DigiPal instance for context
109
+
110
+ Returns:
111
+ Generated response text
112
+ """
113
+ logger.info(f"Generating response for input: {input_text}")
114
+
115
+ # Ensure model is loaded
116
+ if not self._model_loaded:
117
+ self.load_model()
118
+
119
+ # Get relevant memories for context if enhanced memory manager is available
120
+ memory_context = ""
121
+ if self.enhanced_memory_manager:
122
+ current_context = {
123
+ 'life_stage': pet.life_stage.value,
124
+ 'happiness': pet.happiness,
125
+ 'energy': pet.energy,
126
+ 'recent_interactions': len(pet.conversation_history)
127
+ }
128
+ memory_context = self.enhanced_memory_manager.get_memory_context_for_llm(
129
+ pet.id, input_text, current_context
130
+ )
131
+
132
+ # Use language model if available, otherwise fallback to template responses
133
+ if self.language_model.is_loaded():
134
+ # Pass memory context to language model
135
+ return self.language_model.generate_response(input_text, pet, memory_context)
136
+ else:
137
+ logger.warning("Language model not available, using fallback response generator")
138
+ return self.response_generator.generate_response(input_text, pet)
139
+
140
+ def interpret_command(self, text: str, pet: DigiPal) -> Command:
141
+ """
142
+ Interpret user text input into actionable commands.
143
+
144
+ Args:
145
+ text: User input text
146
+ pet: Current DigiPal instance for context
147
+
148
+ Returns:
149
+ Parsed Command object
150
+ """
151
+ return self.command_interpreter.parse_command(text, pet.life_stage)
152
+
153
+ def process_interaction(self, input_text: str, pet: DigiPal) -> Interaction:
154
+ """
155
+ Process a complete user interaction with the DigiPal.
156
+
157
+ Args:
158
+ input_text: User input text
159
+ pet: Current DigiPal instance
160
+
161
+ Returns:
162
+ Complete Interaction object with results
163
+ """
164
+ # Parse the command
165
+ command = self.interpret_command(input_text, pet)
166
+
167
+ # Generate response
168
+ response = self.generate_response(input_text, pet)
169
+
170
+ # Create interaction record
171
+ interaction = Interaction(
172
+ timestamp=datetime.now(),
173
+ user_input=input_text,
174
+ interpreted_command=command.action,
175
+ pet_response=response,
176
+ success=command.stage_appropriate,
177
+ result=InteractionResult.SUCCESS if command.stage_appropriate else InteractionResult.STAGE_INAPPROPRIATE
178
+ )
179
+
180
+ # Update conversation memory
181
+ self.update_conversation_memory(interaction, pet)
182
+
183
+ return interaction
184
+
185
+ def update_conversation_memory(self, interaction: Interaction, pet: DigiPal) -> None:
186
+ """
187
+ Update conversation memory with new interaction.
188
+
189
+ Args:
190
+ interaction: New interaction to add to memory
191
+ pet: DigiPal instance to update
192
+ """
193
+ # Update traditional conversation memory
194
+ self.memory_manager.add_interaction(interaction, pet)
195
+
196
+ # Update enhanced memory manager with emotional context
197
+ if self.enhanced_memory_manager:
198
+ self.enhanced_memory_manager.add_interaction_memory(pet, interaction)
199
+
200
+ def load_model(self) -> bool:
201
+ """
202
+ Load the Qwen3-0.6B language model.
203
+
204
+ Returns:
205
+ True if model loaded successfully, False otherwise
206
+ """
207
+ logger.info("Loading Qwen3-0.6B language model...")
208
+
209
+ try:
210
+ success = self.language_model.load_model()
211
+ self._model_loaded = success
212
+
213
+ if success:
214
+ logger.info("Language model loaded successfully")
215
+ else:
216
+ logger.warning("Failed to load language model, will use fallback responses")
217
+
218
+ return success
219
+
220
+ except Exception as e:
221
+ logger.error(f"Error loading language model: {e}")
222
+ self._model_loaded = False
223
+ return False
224
+
225
+ def load_speech_model(self) -> bool:
226
+ """
227
+ Load the Kyutai speech-to-text model.
228
+
229
+ Returns:
230
+ True if speech model loaded successfully, False otherwise
231
+ """
232
+ logger.info("Loading Kyutai speech-to-text model...")
233
+
234
+ try:
235
+ success = self.speech_processor.load_model()
236
+ self._speech_model_loaded = success
237
+
238
+ if success:
239
+ logger.info("Speech model loaded successfully")
240
+ else:
241
+ logger.warning("Failed to load speech model")
242
+
243
+ return success
244
+
245
+ except Exception as e:
246
+ logger.error(f"Error loading speech model: {e}")
247
+ self._speech_model_loaded = False
248
+ return False
249
+
250
+ def is_speech_model_loaded(self) -> bool:
251
+ """
252
+ Check if the speech model is loaded and ready.
253
+
254
+ Returns:
255
+ True if speech model is loaded, False otherwise
256
+ """
257
+ return self._speech_model_loaded and self.speech_processor.is_model_loaded()
258
+
259
+ def is_model_loaded(self) -> bool:
260
+ """
261
+ Check if the language model is loaded and ready.
262
+
263
+ Returns:
264
+ True if model is loaded, False otherwise
265
+ """
266
+ return self._model_loaded and self.language_model.is_loaded()
267
+
268
+ def get_model_info(self) -> Dict[str, Any]:
269
+ """
270
+ Get information about the loaded language model.
271
+
272
+ Returns:
273
+ Dictionary with model information
274
+ """
275
+ base_info = {
276
+ 'model_name': self.model_name,
277
+ 'quantization': self.quantization,
278
+ 'loaded': self.is_model_loaded()
279
+ }
280
+
281
+ if self.language_model:
282
+ base_info.update(self.language_model.get_model_info())
283
+
284
+ return base_info
285
+
286
+ def get_speech_model_info(self) -> Dict[str, Any]:
287
+ """
288
+ Get information about the loaded speech model.
289
+
290
+ Returns:
291
+ Dictionary with speech model information
292
+ """
293
+ if self.speech_processor:
294
+ return self.speech_processor.get_model_info()
295
+ else:
296
+ return {
297
+ 'model_id': self.kyutai_config.get('model_id', 'kyutai/stt-2.6b-en_fr-trfs'),
298
+ 'loaded': False
299
+ }
300
+
301
+ def unload_model(self) -> None:
302
+ """
303
+ Unload the language model to free memory.
304
+ """
305
+ if self.language_model:
306
+ # Clear model references to free memory
307
+ self.language_model.model = None
308
+ self.language_model.tokenizer = None
309
+ self._model_loaded = False
310
+
311
+ # Force garbage collection
312
+ import gc
313
+ gc.collect()
314
+
315
+ # Clear CUDA cache if available
316
+ if torch.cuda.is_available():
317
+ torch.cuda.empty_cache()
318
+
319
+ logger.info("Language model unloaded")
320
+
321
+ def unload_speech_model(self) -> None:
322
+ """
323
+ Unload the speech model to free memory.
324
+ """
325
+ if self.speech_processor:
326
+ self.speech_processor.unload_model()
327
+ self._speech_model_loaded = False
328
+ logger.info("Speech model unloaded")
329
+
330
+ def unload_all_models(self) -> None:
331
+ """
332
+ Unload both language and speech models to free memory.
333
+ """
334
+ self.unload_model()
335
+ self.unload_speech_model()
336
+ logger.info("All models unloaded")
337
+
338
+
339
+ class CommandInterpreter:
340
+ """
341
+ Interprets user text input into structured commands based on DigiPal's life stage.
342
+ """
343
+
344
+ def __init__(self):
345
+ """Initialize command interpreter with command patterns."""
346
+ self.command_patterns = self._initialize_command_patterns()
347
+ self.stage_commands = self._initialize_stage_commands()
348
+
349
+ def _initialize_command_patterns(self) -> Dict[CommandType, List[str]]:
350
+ """Initialize regex patterns for command recognition."""
351
+ return {
352
+ CommandType.EAT: [
353
+ r'\b(eat|feed|food|hungry|meal)\b',
354
+ r'\b(give.*food|want.*food)\b'
355
+ ],
356
+ CommandType.SLEEP: [
357
+ r'\b(sleep|rest|tired|nap|bed)\b',
358
+ r'\b(go.*sleep|time.*sleep)\b'
359
+ ],
360
+ CommandType.GOOD: [
361
+ r'\b(good|great|excellent|well done|nice)\b',
362
+ r'\b(praise|proud|amazing)\b'
363
+ ],
364
+ CommandType.BAD: [
365
+ r'\b(bad|no|stop|wrong|naughty)\b',
366
+ r'\b(scold|discipline|behave)\b'
367
+ ],
368
+ CommandType.TRAIN: [
369
+ r'\b(train|exercise|workout|practice|training)\b',
370
+ r'\b(let\'s train|training time|work on|time for.*training)\b'
371
+ ],
372
+ CommandType.PLAY: [
373
+ r'\b(play|fun|game|toy)\b',
374
+ r'\b(let\'s play|play time)\b'
375
+ ],
376
+ CommandType.STATUS: [
377
+ r'\b(status|how.*you|feeling|health|show)\b',
378
+ r'\b(check.*stats|show.*attributes|show.*status)\b'
379
+ ]
380
+ }
381
+
382
+ def _initialize_stage_commands(self) -> Dict[LifeStage, List[CommandType]]:
383
+ """Initialize available commands for each life stage."""
384
+ return {
385
+ LifeStage.EGG: [],
386
+ LifeStage.BABY: [CommandType.EAT, CommandType.SLEEP, CommandType.GOOD, CommandType.BAD],
387
+ LifeStage.CHILD: [CommandType.EAT, CommandType.SLEEP, CommandType.GOOD, CommandType.BAD,
388
+ CommandType.PLAY, CommandType.TRAIN],
389
+ LifeStage.TEEN: [CommandType.EAT, CommandType.SLEEP, CommandType.GOOD, CommandType.BAD,
390
+ CommandType.PLAY, CommandType.TRAIN, CommandType.STATUS],
391
+ LifeStage.YOUNG_ADULT: [CommandType.EAT, CommandType.SLEEP, CommandType.GOOD, CommandType.BAD,
392
+ CommandType.PLAY, CommandType.TRAIN, CommandType.STATUS],
393
+ LifeStage.ADULT: [CommandType.EAT, CommandType.SLEEP, CommandType.GOOD, CommandType.BAD,
394
+ CommandType.PLAY, CommandType.TRAIN, CommandType.STATUS],
395
+ LifeStage.ELDERLY: [CommandType.EAT, CommandType.SLEEP, CommandType.GOOD, CommandType.BAD,
396
+ CommandType.PLAY, CommandType.TRAIN, CommandType.STATUS]
397
+ }
398
+
399
+ def parse_command(self, text: str, life_stage: LifeStage) -> Command:
400
+ """
401
+ Parse user text into a structured command.
402
+
403
+ Args:
404
+ text: User input text
405
+ life_stage: Current DigiPal life stage
406
+
407
+ Returns:
408
+ Parsed Command object
409
+ """
410
+ text_lower = text.lower().strip()
411
+
412
+ # Check each command type for pattern matches
413
+ for command_type, patterns in self.command_patterns.items():
414
+ for pattern in patterns:
415
+ if re.search(pattern, text_lower):
416
+ # Check if command is appropriate for current life stage
417
+ stage_appropriate = command_type in self.stage_commands.get(life_stage, [])
418
+
419
+ return Command(
420
+ action=command_type.value,
421
+ command_type=command_type,
422
+ stage_appropriate=stage_appropriate,
423
+ energy_required=self._get_energy_requirement(command_type),
424
+ parameters=self._extract_parameters(text_lower, command_type)
425
+ )
426
+
427
+ # If no pattern matches, return unknown command
428
+ return Command(
429
+ action="unknown",
430
+ command_type=CommandType.UNKNOWN,
431
+ stage_appropriate=False,
432
+ energy_required=0,
433
+ parameters={"original_text": text}
434
+ )
435
+
436
+ def _get_energy_requirement(self, command_type: CommandType) -> int:
437
+ """Get energy requirement for command type."""
438
+ energy_requirements = {
439
+ CommandType.EAT: 0,
440
+ CommandType.SLEEP: 0,
441
+ CommandType.GOOD: 0,
442
+ CommandType.BAD: 0,
443
+ CommandType.TRAIN: 20,
444
+ CommandType.PLAY: 10,
445
+ CommandType.STATUS: 0,
446
+ CommandType.UNKNOWN: 0
447
+ }
448
+ return energy_requirements.get(command_type, 0)
449
+
450
+ def _extract_parameters(self, text: str, command_type: CommandType) -> Dict[str, Any]:
451
+ """Extract parameters from command text."""
452
+ parameters = {}
453
+
454
+ # Add command-specific parameter extraction logic
455
+ if command_type == CommandType.TRAIN:
456
+ # Look for specific training types
457
+ if 'strength' in text or 'attack' in text:
458
+ parameters['training_type'] = 'strength'
459
+ elif 'defense' in text or 'guard' in text:
460
+ parameters['training_type'] = 'defense'
461
+ elif 'speed' in text or 'agility' in text:
462
+ parameters['training_type'] = 'speed'
463
+ elif 'brain' in text or 'intelligence' in text:
464
+ parameters['training_type'] = 'brains'
465
+ else:
466
+ parameters['training_type'] = 'general'
467
+
468
+ elif command_type == CommandType.EAT:
469
+ # Look for food types (placeholder for future expansion)
470
+ parameters['food_type'] = 'standard'
471
+
472
+ return parameters
473
+
474
+
475
+ class ResponseGenerator:
476
+ """
477
+ Generates contextual responses based on DigiPal state and user input.
478
+ """
479
+
480
+ def __init__(self):
481
+ """Initialize response generator with templates."""
482
+ self.response_templates = self._initialize_response_templates()
483
+
484
+ def _initialize_response_templates(self) -> Dict[LifeStage, Dict[str, List[str]]]:
485
+ """Initialize response templates for each life stage and situation."""
486
+ return {
487
+ LifeStage.EGG: {
488
+ 'default': ["*The egg remains silent*", "*The egg seems to be listening*"],
489
+ 'speech_detected': ["*The egg trembles slightly*", "*Something stirs within the egg*"]
490
+ },
491
+ LifeStage.BABY: {
492
+ 'eat': ["*happy baby sounds*", "Goo goo!", "*contentedly munches*"],
493
+ 'sleep': ["*yawns sleepily*", "Zzz...", "*curls up peacefully*"],
494
+ 'good': ["*giggles happily*", "Goo!", "*bounces with joy*"],
495
+ 'bad': ["*whimpers*", "*looks sad*", "*hides behind hands*"],
496
+ 'unknown': ["*tilts head curiously*", "*makes confused baby sounds*", "Goo?"],
497
+ 'default': ["*baby babbling*", "Goo goo ga ga!", "*looks at you with big eyes*"]
498
+ },
499
+ LifeStage.CHILD: {
500
+ 'eat': ["Yummy! Thank you!", "*munches happily*", "This tastes good!"],
501
+ 'sleep': ["I'm getting sleepy...", "*yawns*", "Nap time!"],
502
+ 'good': ["Really? Thank you!", "*beams with pride*", "I did good!"],
503
+ 'bad': ["Sorry... I'll be better", "*looks down sadly*", "I didn't mean to..."],
504
+ 'train': ["Let's get stronger!", "*pumps tiny fists*", "Training is fun!"],
505
+ 'play': ["Yay! Let's play!", "*jumps excitedly*", "This is so much fun!"],
506
+ 'unknown': ["I don't understand...", "*looks confused*", "What does that mean?"],
507
+ 'default': ["Hi there!", "*waves enthusiastically*", "What should we do?"]
508
+ },
509
+ LifeStage.TEEN: {
510
+ 'eat': ["Thanks, I was getting hungry", "*eats with good appetite*", "This hits the spot!"],
511
+ 'sleep': ["Yeah, I could use some rest", "*stretches*", "Sleep sounds good right now"],
512
+ 'good': ["Thanks! I've been working hard", "*smiles proudly*", "That means a lot!"],
513
+ 'bad': ["Okay, okay, I get it", "*sighs*", "I'll try to do better"],
514
+ 'train': ["Alright, let's do this!", "*gets into stance*", "I'm ready to train!"],
515
+ 'play': ["Sure, let's have some fun!", "*grins*", "I could use a break anyway"],
516
+ 'status': ["I'm feeling pretty good overall", "*flexes*", "Want to know something specific?"],
517
+ 'unknown': ["Hmm, not sure what you mean", "*scratches head*", "Could you be more specific?"],
518
+ 'default': ["Hey! What's up?", "*looks attentive*", "Ready for whatever!"]
519
+ },
520
+ LifeStage.YOUNG_ADULT: {
521
+ 'eat': ["Perfect timing, thanks!", "*eats with appreciation*", "Just what I needed"],
522
+ 'sleep': ["Good idea, I should rest up", "*settles down comfortably*", "Rest is important for growth"],
523
+ 'good': ["I appreciate the encouragement!", "*stands tall with confidence*", "Your support means everything"],
524
+ 'bad': ["You're right, I need to focus more", "*nods seriously*", "I'll be more careful"],
525
+ 'train': ["Let's push our limits!", "*determined expression*", "Every session makes us stronger!"],
526
+ 'play': ["A good balance of work and play!", "*laughs*", "Let's enjoy ourselves!"],
527
+ 'status': ["I'm in my prime right now!", "*shows off confidently*", "Want the full rundown?"],
528
+ 'unknown': ["I'm not quite sure what you're asking", "*thinks carefully*", "Can you elaborate?"],
529
+ 'default': ["Good to see you!", "*confident smile*", "What's on the agenda today?"]
530
+ },
531
+ LifeStage.ADULT: {
532
+ 'eat': ["Thank you for the meal", "*eats thoughtfully*", "Proper nutrition is key"],
533
+ 'sleep': ["Rest is wisdom", "*settles down peacefully*", "A clear mind needs good rest"],
534
+ 'good': ["Your words honor me", "*bows respectfully*", "I strive to be worthy of your praise"],
535
+ 'bad': ["I understand your concern", "*reflects seriously*", "I will consider your words carefully"],
536
+ 'train': ["Discipline shapes the spirit", "*begins training with focus*", "Let us grow stronger together"],
537
+ 'play': ["Joy has its place in life", "*smiles warmly*", "Even adults need moments of lightness"],
538
+ 'status': ["I am at my peak capabilities", "*stands with dignity*", "How may I serve?"],
539
+ 'unknown': ["Your meaning escapes me", "*listens intently*", "Please help me understand"],
540
+ 'default': ["Greetings, my friend", "*respectful nod*", "How may we spend our time together?"]
541
+ },
542
+ LifeStage.ELDERLY: {
543
+ 'eat': ["Ah, sustenance for these old bones", "*eats slowly and deliberately*", "Simple pleasures matter most"],
544
+ 'sleep': ["Rest comes easier now", "*settles down with a sigh*", "Dreams of younger days..."],
545
+ 'good': ["Your kindness warms an old heart", "*smiles gently*", "I have lived well with you"],
546
+ 'bad': ["At my age, mistakes are lessons", "*chuckles softly*", "I am still learning, it seems"],
547
+ 'train': ["These old muscles remember", "*moves carefully but determined*", "Wisdom guides where strength once led"],
548
+ 'play': ["Play keeps the spirit young", "*laughs with delight*", "Age is just a number!"],
549
+ 'status': ["I have seen much in my time", "*gazes thoughtfully*", "Each day is a gift now"],
550
+ 'unknown': ["My hearing isn't what it was", "*cups ear*", "Could you repeat that, dear?"],
551
+ 'default': ["Hello, old friend", "*warm, weathered smile*", "Another day together..."]
552
+ }
553
+ }
554
+
555
+ def generate_response(self, input_text: str, pet: DigiPal) -> str:
556
+ """
557
+ Generate contextual response based on input and pet state.
558
+
559
+ Args:
560
+ input_text: User input text
561
+ pet: Current DigiPal instance
562
+
563
+ Returns:
564
+ Generated response string
565
+ """
566
+ # Parse command to determine response type
567
+ command_interpreter = CommandInterpreter()
568
+ command = command_interpreter.parse_command(input_text, pet.life_stage)
569
+
570
+ # Get appropriate response template
571
+ stage_templates = self.response_templates.get(pet.life_stage, {})
572
+
573
+ # Select response based on command type
574
+ if command.stage_appropriate and command.command_type != CommandType.UNKNOWN:
575
+ response_key = command.command_type.value
576
+ elif command.command_type == CommandType.UNKNOWN or not command.stage_appropriate:
577
+ # For stage-inappropriate commands or unknown commands, use unknown response
578
+ response_key = 'unknown'
579
+ else:
580
+ response_key = 'default'
581
+
582
+ # Get responses for the key, fallback to default
583
+ responses = stage_templates.get(response_key, stage_templates.get('default', ["*confused sounds*"]))
584
+
585
+ # Select response based on pet's personality or randomly
586
+ # For now, use simple selection based on happiness
587
+ if pet.happiness > 70:
588
+ response_index = 0 # Use first (most positive) response
589
+ elif pet.happiness > 30:
590
+ response_index = min(1, len(responses) - 1) # Use middle response
591
+ else:
592
+ response_index = len(responses) - 1 # Use last (least positive) response
593
+
594
+ return responses[response_index]
595
+
596
+
597
+ class ConversationMemoryManager:
598
+ """
599
+ Manages conversation history and memory for DigiPal interactions.
600
+ """
601
+
602
+ def __init__(self, max_memory_size: int = 100):
603
+ """
604
+ Initialize memory manager.
605
+
606
+ Args:
607
+ max_memory_size: Maximum number of interactions to keep in memory
608
+ """
609
+ self.max_memory_size = max_memory_size
610
+
611
+ def add_interaction(self, interaction: Interaction, pet: DigiPal) -> None:
612
+ """
613
+ Add new interaction to pet's conversation history.
614
+
615
+ Args:
616
+ interaction: New interaction to add
617
+ pet: DigiPal instance to update
618
+ """
619
+ # Add interaction to pet's history
620
+ pet.conversation_history.append(interaction)
621
+
622
+ # Update last interaction time
623
+ pet.last_interaction = interaction.timestamp
624
+
625
+ # Learn new commands if successful
626
+ if interaction.success and interaction.interpreted_command:
627
+ pet.learned_commands.add(interaction.interpreted_command)
628
+
629
+ # Manage memory size
630
+ self._manage_memory_size(pet)
631
+
632
+ # Update personality traits based on interaction
633
+ self._update_personality_traits(interaction, pet)
634
+
635
+ def _manage_memory_size(self, pet: DigiPal) -> None:
636
+ """
637
+ Manage conversation history size to prevent memory bloat.
638
+
639
+ Args:
640
+ pet: DigiPal instance to manage
641
+ """
642
+ if len(pet.conversation_history) > self.max_memory_size:
643
+ # Keep most recent interactions
644
+ pet.conversation_history = pet.conversation_history[-self.max_memory_size:]
645
+
646
+ def _update_personality_traits(self, interaction: Interaction, pet: DigiPal) -> None:
647
+ """
648
+ Update pet's personality traits based on interaction patterns.
649
+
650
+ Args:
651
+ interaction: Recent interaction
652
+ pet: DigiPal instance to update
653
+ """
654
+ # Initialize personality traits if not present
655
+ if not pet.personality_traits:
656
+ pet.personality_traits = {
657
+ 'friendliness': 0.5,
658
+ 'playfulness': 0.5,
659
+ 'obedience': 0.5,
660
+ 'curiosity': 0.5
661
+ }
662
+
663
+ # Update traits based on interaction type
664
+ if interaction.interpreted_command == 'good':
665
+ pet.personality_traits['obedience'] = min(1.0, pet.personality_traits['obedience'] + 0.1)
666
+ elif interaction.interpreted_command == 'bad':
667
+ pet.personality_traits['obedience'] = max(0.0, pet.personality_traits['obedience'] - 0.05)
668
+ elif interaction.interpreted_command == 'play':
669
+ pet.personality_traits['playfulness'] = min(1.0, pet.personality_traits['playfulness'] + 0.1)
670
+ elif interaction.success:
671
+ pet.personality_traits['friendliness'] = min(1.0, pet.personality_traits['friendliness'] + 0.05)
672
+
673
+ # Increase curiosity for unknown commands (shows engagement)
674
+ if interaction.interpreted_command == 'unknown':
675
+ pet.personality_traits['curiosity'] = min(1.0, pet.personality_traits['curiosity'] + 0.02)
676
+
677
+ def get_recent_interactions(self, pet: DigiPal, count: int = 10) -> List[Interaction]:
678
+ """
679
+ Get recent interactions from pet's memory.
680
+
681
+ Args:
682
+ pet: DigiPal instance
683
+ count: Number of recent interactions to retrieve
684
+
685
+ Returns:
686
+ List of recent interactions
687
+ """
688
+ return pet.conversation_history[-count:] if pet.conversation_history else []
689
+
690
+ def get_interaction_summary(self, pet: DigiPal) -> Dict[str, Any]:
691
+ """
692
+ Get summary statistics of pet's interaction history.
693
+
694
+ Args:
695
+ pet: DigiPal instance
696
+
697
+ Returns:
698
+ Dictionary with interaction statistics
699
+ """
700
+ if not pet.conversation_history:
701
+ return {
702
+ 'total_interactions': 0,
703
+ 'successful_interactions': 0,
704
+ 'success_rate': 0.0,
705
+ 'most_common_commands': [],
706
+ 'last_interaction': None
707
+ }
708
+
709
+ total = len(pet.conversation_history)
710
+ successful = sum(1 for i in pet.conversation_history if i.success)
711
+
712
+ # Count command frequency
713
+ command_counts = {}
714
+ for interaction in pet.conversation_history:
715
+ cmd = interaction.interpreted_command
716
+ command_counts[cmd] = command_counts.get(cmd, 0) + 1
717
+
718
+ # Sort commands by frequency
719
+ most_common = sorted(command_counts.items(), key=lambda x: x[1], reverse=True)[:5]
720
+
721
+ return {
722
+ 'total_interactions': total,
723
+ 'successful_interactions': successful,
724
+ 'success_rate': successful / total if total > 0 else 0.0,
725
+ 'most_common_commands': most_common,
726
+ 'last_interaction': pet.conversation_history[-1].timestamp if pet.conversation_history else None
727
+ }
digipal/ai/graceful_degradation.py ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Graceful degradation system for AI models in DigiPal.
3
+
4
+ This module provides fallback mechanisms when AI models fail,
5
+ ensuring the application continues to function with reduced capabilities.
6
+ """
7
+
8
+ import logging
9
+ import random
10
+ import functools
11
+ from typing import Dict, List, Optional, Any, Callable
12
+ from datetime import datetime
13
+ from enum import Enum
14
+
15
+ from ..core.models import DigiPal, Interaction
16
+ from ..core.enums import LifeStage, InteractionResult
17
+ from ..core.exceptions import AIModelError, DigiPalException
18
+ from ..core.error_handler import with_error_handling, CircuitBreaker, CircuitBreakerConfig
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ class DegradationLevel(Enum):
24
+ """Levels of service degradation."""
25
+ FULL_SERVICE = "full_service"
26
+ REDUCED_FEATURES = "reduced_features"
27
+ BASIC_RESPONSES = "basic_responses"
28
+ MINIMAL_FUNCTION = "minimal_function"
29
+ EMERGENCY_MODE = "emergency_mode"
30
+
31
+
32
+ class FallbackResponseGenerator:
33
+ """Generates fallback responses when AI models are unavailable."""
34
+
35
+ def __init__(self):
36
+ """Initialize fallback response generator."""
37
+ self.response_templates = self._initialize_response_templates()
38
+ self.command_responses = self._initialize_command_responses()
39
+ self.personality_modifiers = self._initialize_personality_modifiers()
40
+
41
+ def _initialize_response_templates(self) -> Dict[LifeStage, List[str]]:
42
+ """Initialize response templates for each life stage."""
43
+ return {
44
+ LifeStage.EGG: [
45
+ "*The egg glows softly*",
46
+ "*The egg trembles slightly*",
47
+ "*The egg remains warm and quiet*",
48
+ "*You sense movement inside the egg*"
49
+ ],
50
+ LifeStage.BABY: [
51
+ "*baby sounds*",
52
+ "Goo goo!",
53
+ "*giggles*",
54
+ "Mama?",
55
+ "*curious baby noises*",
56
+ "Baba!",
57
+ "*happy gurgling*"
58
+ ],
59
+ LifeStage.CHILD: [
60
+ "I'm having fun!",
61
+ "What's that?",
62
+ "Can we play?",
63
+ "I'm learning!",
64
+ "That's cool!",
65
+ "I want to explore!",
66
+ "Tell me more!"
67
+ ],
68
+ LifeStage.TEEN: [
69
+ "That's interesting...",
70
+ "I guess that's okay.",
71
+ "Whatever you say.",
72
+ "I'm figuring things out.",
73
+ "That's pretty cool, I suppose.",
74
+ "I'm growing up fast!",
75
+ "Things are changing..."
76
+ ],
77
+ LifeStage.YOUNG_ADULT: [
78
+ "I understand what you mean.",
79
+ "That makes sense to me.",
80
+ "I'm ready for anything!",
81
+ "Let's tackle this together.",
82
+ "I feel confident about this.",
83
+ "I'm at my peak right now!",
84
+ "What's our next adventure?"
85
+ ],
86
+ LifeStage.ADULT: [
87
+ "I've learned a lot over the years.",
88
+ "That's a wise perspective.",
89
+ "Let me share my experience with you.",
90
+ "I understand the deeper meaning.",
91
+ "Maturity brings clarity.",
92
+ "I'm here to guide you.",
93
+ "Experience has taught me much."
94
+ ],
95
+ LifeStage.ELDERLY: [
96
+ "Ah, yes... I remember...",
97
+ "In my long life, I've seen...",
98
+ "Time passes so quickly...",
99
+ "Let me tell you about the old days...",
100
+ "Wisdom comes with age...",
101
+ "I cherish these moments with you.",
102
+ "My memories are precious to me."
103
+ ]
104
+ }
105
+
106
+ def _initialize_command_responses(self) -> Dict[str, Dict[LifeStage, List[str]]]:
107
+ """Initialize responses for specific commands."""
108
+ return {
109
+ 'eat': {
110
+ LifeStage.BABY: ["*nom nom*", "Yummy!", "*happy eating sounds*"],
111
+ LifeStage.CHILD: ["This tastes good!", "I'm hungry!", "Thank you for feeding me!"],
112
+ LifeStage.TEEN: ["Thanks, I needed that.", "Food is fuel, right?", "Not bad."],
113
+ LifeStage.YOUNG_ADULT: ["Perfect timing, I was getting hungry.", "This will give me energy!", "Thanks for taking care of me."],
114
+ LifeStage.ADULT: ["I appreciate you looking after my needs.", "This nourishment is welcome.", "Thank you for your care."],
115
+ LifeStage.ELDERLY: ["Ah, you still take such good care of me...", "Food tastes different now, but I'm grateful.", "Thank you, dear friend."]
116
+ },
117
+ 'sleep': {
118
+ LifeStage.BABY: ["*yawn*", "Sleepy time...", "*closes eyes*"],
119
+ LifeStage.CHILD: ["I'm getting tired!", "Can I take a nap?", "Sleep sounds good!"],
120
+ LifeStage.TEEN: ["I could use some rest.", "Sleep is important, I guess.", "Fine, I'll rest."],
121
+ LifeStage.YOUNG_ADULT: ["Rest will help me perform better.", "Good idea, I need to recharge.", "Sleep is essential for peak performance."],
122
+ LifeStage.ADULT: ["Rest is wisdom.", "I'll take this time to reflect.", "Sleep brings clarity."],
123
+ LifeStage.ELDERLY: ["Rest comes easier now...", "I dream of old times...", "Sleep is peaceful at my age."]
124
+ },
125
+ 'good': {
126
+ LifeStage.BABY: ["*happy baby sounds*", "Goo!", "*giggles with joy*"],
127
+ LifeStage.CHILD: ["Yay! I did good!", "I'm happy!", "Thank you!"],
128
+ LifeStage.TEEN: ["Thanks, I try.", "That means something.", "Cool, thanks."],
129
+ LifeStage.YOUNG_ADULT: ["I appreciate the recognition!", "That motivates me!", "Thanks for the positive feedback!"],
130
+ LifeStage.ADULT: ["Your approval means a lot to me.", "I strive to do my best.", "Thank you for acknowledging my efforts."],
131
+ LifeStage.ELDERLY: ["Your kind words warm my heart...", "After all these years, praise still matters...", "Thank you, my dear friend."]
132
+ },
133
+ 'bad': {
134
+ LifeStage.BABY: ["*sad baby sounds*", "Waaah!", "*confused crying*"],
135
+ LifeStage.CHILD: ["I'm sorry!", "I didn't mean to!", "I'll try better!"],
136
+ LifeStage.TEEN: ["Whatever.", "I don't care.", "Fine, I get it."],
137
+ LifeStage.YOUNG_ADULT: ["I understand. I'll do better.", "Point taken.", "I'll learn from this."],
138
+ LifeStage.ADULT: ["I accept your criticism.", "I'll reflect on this.", "Thank you for your honesty."],
139
+ LifeStage.ELDERLY: ["I'm sorry to disappoint you...", "Even at my age, I can still learn...", "I understand your concern."]
140
+ },
141
+ 'play': {
142
+ LifeStage.BABY: ["*excited baby sounds*", "Play! Play!", "*happy wiggling*"],
143
+ LifeStage.CHILD: ["Yes! Let's play!", "This is fun!", "I love playing!"],
144
+ LifeStage.TEEN: ["I guess playing is okay.", "Sure, why not.", "Playing can be fun sometimes."],
145
+ LifeStage.YOUNG_ADULT: ["Great idea! Let's have some fun!", "Play is important for balance!", "I'm ready to play!"],
146
+ LifeStage.ADULT: ["Play keeps the spirit young.", "I enjoy our time together.", "Even adults need to play."],
147
+ LifeStage.ELDERLY: ["Playing brings back memories...", "I may be slow, but I still enjoy fun...", "These moments are precious."]
148
+ },
149
+ 'train': {
150
+ LifeStage.CHILD: ["I want to get stronger!", "Training is hard but fun!", "I'm learning!"],
151
+ LifeStage.TEEN: ["Training is important, I guess.", "I'll get stronger.", "This is challenging."],
152
+ LifeStage.YOUNG_ADULT: ["Let's push my limits!", "Training makes me stronger!", "I'm ready for the challenge!"],
153
+ LifeStage.ADULT: ["Discipline and training build character.", "I'll give my best effort.", "Training is a lifelong journey."],
154
+ LifeStage.ELDERLY: ["I may be old, but I can still try...", "Training keeps me active...", "My body may be slower, but my spirit is strong."]
155
+ }
156
+ }
157
+
158
+ def _initialize_personality_modifiers(self) -> Dict[str, List[str]]:
159
+ """Initialize personality-based response modifiers."""
160
+ return {
161
+ 'friendly': [" *smiles warmly*", " *friendly gesture*", " *welcoming tone*"],
162
+ 'shy': [" *looks down shyly*", " *quiet voice*", " *hesitant*"],
163
+ 'playful': [" *bounces excitedly*", " *playful grin*", " *mischievous look*"],
164
+ 'serious': [" *thoughtful expression*", " *serious tone*", " *focused*"],
165
+ 'curious': [" *tilts head curiously*", " *eyes light up*", " *interested*"],
166
+ 'calm': [" *peaceful demeanor*", " *serene*", " *tranquil*"]
167
+ }
168
+
169
+ def generate_fallback_response(
170
+ self,
171
+ user_input: str,
172
+ pet: DigiPal,
173
+ command: Optional[str] = None,
174
+ degradation_level: DegradationLevel = DegradationLevel.BASIC_RESPONSES
175
+ ) -> str:
176
+ """
177
+ Generate a fallback response when AI models are unavailable.
178
+
179
+ Args:
180
+ user_input: User's input text
181
+ pet: DigiPal instance
182
+ command: Interpreted command (if any)
183
+ degradation_level: Level of service degradation
184
+
185
+ Returns:
186
+ Fallback response string
187
+ """
188
+ try:
189
+ # Handle different degradation levels
190
+ if degradation_level == DegradationLevel.EMERGENCY_MODE:
191
+ return self._generate_emergency_response(pet)
192
+
193
+ # Try command-specific responses first
194
+ if command and command in self.command_responses:
195
+ command_templates = self.command_responses[command].get(pet.life_stage, [])
196
+ if command_templates:
197
+ response = random.choice(command_templates)
198
+ return self._apply_personality_modifier(response, pet)
199
+
200
+ # Fall back to general responses
201
+ general_templates = self.response_templates.get(pet.life_stage, [])
202
+ if general_templates:
203
+ response = random.choice(general_templates)
204
+ return self._apply_personality_modifier(response, pet)
205
+
206
+ # Ultimate fallback
207
+ return self._generate_emergency_response(pet)
208
+
209
+ except Exception as e:
210
+ logger.error(f"Fallback response generation failed: {e}")
211
+ return "*DigiPal is resting*"
212
+
213
+ def _apply_personality_modifier(self, response: str, pet: DigiPal) -> str:
214
+ """Apply personality-based modifiers to response."""
215
+ try:
216
+ if not pet.personality_traits:
217
+ return response
218
+
219
+ # Find dominant personality trait
220
+ dominant_trait = max(pet.personality_traits.items(), key=lambda x: x[1])
221
+ trait_name, trait_value = dominant_trait
222
+
223
+ # Apply modifier if trait is strong enough
224
+ if trait_value > 0.7 and trait_name in self.personality_modifiers:
225
+ modifier = random.choice(self.personality_modifiers[trait_name])
226
+ return response + modifier
227
+
228
+ return response
229
+
230
+ except Exception:
231
+ return response
232
+
233
+ def _generate_emergency_response(self, pet: DigiPal) -> str:
234
+ """Generate minimal emergency response."""
235
+ emergency_responses = {
236
+ LifeStage.EGG: "*egg*",
237
+ LifeStage.BABY: "*baby*",
238
+ LifeStage.CHILD: "Hi!",
239
+ LifeStage.TEEN: "Hey.",
240
+ LifeStage.YOUNG_ADULT: "Hello!",
241
+ LifeStage.ADULT: "Greetings.",
242
+ LifeStage.ELDERLY: "Hello, friend."
243
+ }
244
+
245
+ return emergency_responses.get(pet.life_stage, "*DigiPal*")
246
+
247
+
248
+ class AIServiceManager:
249
+ """Manages AI service availability and degradation."""
250
+
251
+ def __init__(self):
252
+ """Initialize AI service manager."""
253
+ self.service_status: Dict[str, bool] = {
254
+ 'language_model': True,
255
+ 'speech_processing': True,
256
+ 'image_generation': True
257
+ }
258
+
259
+ self.circuit_breakers: Dict[str, CircuitBreaker] = {}
260
+ self.fallback_generator = FallbackResponseGenerator()
261
+ self.current_degradation_level = DegradationLevel.FULL_SERVICE
262
+
263
+ # Initialize circuit breakers
264
+ self._initialize_circuit_breakers()
265
+
266
+ def _initialize_circuit_breakers(self):
267
+ """Initialize circuit breakers for AI services."""
268
+ config = CircuitBreakerConfig(
269
+ failure_threshold=3,
270
+ recovery_timeout=300.0, # 5 minutes
271
+ expected_exception=AIModelError
272
+ )
273
+
274
+ for service in self.service_status.keys():
275
+ self.circuit_breakers[service] = CircuitBreaker(config)
276
+
277
+ def call_ai_service(
278
+ self,
279
+ service_name: str,
280
+ func: Callable,
281
+ fallback_func: Optional[Callable] = None,
282
+ *args,
283
+ **kwargs
284
+ ) -> Any:
285
+ """
286
+ Call an AI service with circuit breaker protection.
287
+
288
+ Args:
289
+ service_name: Name of the AI service
290
+ func: Function to call
291
+ fallback_func: Fallback function if service fails
292
+ *args: Function arguments
293
+ **kwargs: Function keyword arguments
294
+
295
+ Returns:
296
+ Service result or fallback result
297
+ """
298
+ try:
299
+ circuit_breaker = self.circuit_breakers.get(service_name)
300
+ if circuit_breaker:
301
+ result = circuit_breaker.call(func, *args, **kwargs)
302
+ self.service_status[service_name] = True
303
+ self._update_degradation_level()
304
+ return result
305
+ else:
306
+ return func(*args, **kwargs)
307
+
308
+ except Exception as e:
309
+ logger.warning(f"AI service {service_name} failed: {e}")
310
+ self.service_status[service_name] = False
311
+ self._update_degradation_level()
312
+
313
+ if fallback_func:
314
+ try:
315
+ return fallback_func(*args, **kwargs)
316
+ except Exception as fallback_error:
317
+ logger.error(f"Fallback for {service_name} also failed: {fallback_error}")
318
+
319
+ raise AIModelError(f"AI service {service_name} unavailable: {str(e)}")
320
+
321
+ def _update_degradation_level(self):
322
+ """Update current degradation level based on service status."""
323
+ available_services = sum(1 for status in self.service_status.values() if status)
324
+ total_services = len(self.service_status)
325
+
326
+ if available_services == total_services:
327
+ self.current_degradation_level = DegradationLevel.FULL_SERVICE
328
+ elif available_services >= total_services * 0.75:
329
+ self.current_degradation_level = DegradationLevel.REDUCED_FEATURES
330
+ elif available_services >= total_services * 0.5:
331
+ self.current_degradation_level = DegradationLevel.BASIC_RESPONSES
332
+ elif available_services > 0:
333
+ self.current_degradation_level = DegradationLevel.MINIMAL_FUNCTION
334
+ else:
335
+ self.current_degradation_level = DegradationLevel.EMERGENCY_MODE
336
+
337
+ logger.info(f"Degradation level updated to: {self.current_degradation_level.value}")
338
+
339
+ def get_service_status(self) -> Dict[str, Any]:
340
+ """Get current service status and degradation level."""
341
+ return {
342
+ 'services': dict(self.service_status),
343
+ 'degradation_level': self.current_degradation_level.value,
344
+ 'circuit_breakers': {
345
+ name: {
346
+ 'state': cb.state,
347
+ 'failure_count': cb.failure_count,
348
+ 'last_failure': cb.last_failure_time.isoformat() if cb.last_failure_time else None
349
+ }
350
+ for name, cb in self.circuit_breakers.items()
351
+ }
352
+ }
353
+
354
+ def force_service_recovery(self, service_name: str):
355
+ """Force recovery attempt for a specific service."""
356
+ if service_name in self.circuit_breakers:
357
+ circuit_breaker = self.circuit_breakers[service_name]
358
+ circuit_breaker.state = "half-open"
359
+ circuit_breaker.failure_count = 0
360
+ logger.info(f"Forced recovery attempt for service: {service_name}")
361
+
362
+ def generate_degraded_response(
363
+ self,
364
+ user_input: str,
365
+ pet: DigiPal,
366
+ command: Optional[str] = None
367
+ ) -> Interaction:
368
+ """
369
+ Generate a response using degraded AI capabilities.
370
+
371
+ Args:
372
+ user_input: User's input text
373
+ pet: DigiPal instance
374
+ command: Interpreted command (if any)
375
+
376
+ Returns:
377
+ Interaction with fallback response
378
+ """
379
+ try:
380
+ response = self.fallback_generator.generate_fallback_response(
381
+ user_input, pet, command, self.current_degradation_level
382
+ )
383
+
384
+ # Create interaction
385
+ interaction = Interaction(
386
+ timestamp=datetime.now(),
387
+ user_input=user_input,
388
+ interpreted_command=command or "",
389
+ pet_response=response,
390
+ attribute_changes={},
391
+ success=True,
392
+ result=InteractionResult.SUCCESS
393
+ )
394
+
395
+ # Add degradation notice for non-emergency modes
396
+ if self.current_degradation_level != DegradationLevel.FULL_SERVICE:
397
+ if self.current_degradation_level != DegradationLevel.EMERGENCY_MODE:
398
+ interaction.pet_response += " (AI services are currently limited)"
399
+
400
+ return interaction
401
+
402
+ except Exception as e:
403
+ logger.error(f"Degraded response generation failed: {e}")
404
+
405
+ # Ultimate fallback
406
+ return Interaction(
407
+ timestamp=datetime.now(),
408
+ user_input=user_input,
409
+ interpreted_command="",
410
+ pet_response="*DigiPal is resting*",
411
+ attribute_changes={},
412
+ success=False,
413
+ result=InteractionResult.FAILURE
414
+ )
415
+
416
+
417
+ # Global AI service manager instance
418
+ ai_service_manager = AIServiceManager()
419
+
420
+
421
+ def with_ai_fallback(service_name: str, fallback_response: Optional[str] = None):
422
+ """
423
+ Decorator for AI service calls with automatic fallback.
424
+
425
+ Args:
426
+ service_name: Name of the AI service
427
+ fallback_response: Default fallback response
428
+ """
429
+ def decorator(func: Callable) -> Callable:
430
+ @functools.wraps(func)
431
+ def wrapper(*args, **kwargs):
432
+ def fallback_func(*args, **kwargs):
433
+ if fallback_response:
434
+ return fallback_response
435
+ # Try to extract pet from arguments for context-aware fallback
436
+ pet = None
437
+ for arg in args:
438
+ if isinstance(arg, DigiPal):
439
+ pet = arg
440
+ break
441
+
442
+ if pet:
443
+ return ai_service_manager.fallback_generator.generate_fallback_response(
444
+ "", pet, None, ai_service_manager.current_degradation_level
445
+ )
446
+
447
+ return "Service temporarily unavailable"
448
+
449
+ return ai_service_manager.call_ai_service(
450
+ service_name, func, fallback_func, *args, **kwargs
451
+ )
452
+
453
+ return wrapper
454
+ return decorator
digipal/ai/image_generator.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Image generation system for DigiPal visualization using FLUX.1-dev model.
3
+ """
4
+
5
+ import os
6
+ import torch
7
+ import logging
8
+ from typing import Optional, Dict, Any, List
9
+ from pathlib import Path
10
+ from PIL import Image
11
+ import hashlib
12
+ import json
13
+ from datetime import datetime
14
+
15
+ from ..core.models import DigiPal
16
+ from ..core.enums import LifeStage, EggType
17
+
18
+ # Set up logging
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ class ImageGenerator:
23
+ """
24
+ Handles image generation for DigiPal pets using FLUX.1-dev model.
25
+ Includes caching, fallback systems, and professional prompt generation.
26
+ """
27
+
28
+ def __init__(self,
29
+ model_name: str = "black-forest-labs/FLUX.1-dev",
30
+ cache_dir: str = "demo_assets/images",
31
+ fallback_dir: str = "demo_assets/images/fallbacks"):
32
+ """
33
+ Initialize the image generator.
34
+
35
+ Args:
36
+ model_name: HuggingFace model name for image generation
37
+ cache_dir: Directory to cache generated images
38
+ fallback_dir: Directory containing fallback images
39
+ """
40
+ self.model_name = model_name
41
+ self.cache_dir = Path(cache_dir)
42
+ self.fallback_dir = Path(fallback_dir)
43
+ self.pipe = None
44
+ self._model_loaded = False
45
+
46
+ # Create directories if they don't exist
47
+ self.cache_dir.mkdir(parents=True, exist_ok=True)
48
+ self.fallback_dir.mkdir(parents=True, exist_ok=True)
49
+
50
+ # Image generation parameters
51
+ self.generation_params = {
52
+ "height": 1024,
53
+ "width": 1024,
54
+ "guidance_scale": 3.5,
55
+ "num_inference_steps": 50,
56
+ "max_sequence_length": 512
57
+ }
58
+
59
+ # Initialize prompt templates
60
+ self._init_prompt_templates()
61
+
62
+ # Initialize fallback images
63
+ self._init_fallback_images()
64
+
65
+ def _init_prompt_templates(self):
66
+ """Initialize professional prompt templates for each life stage and egg type."""
67
+
68
+ # Base style modifiers
69
+ self.style_base = "digital art, high quality, detailed, vibrant colors, anime style"
70
+
71
+ # Egg type characteristics
72
+ self.egg_type_traits = {
73
+ EggType.RED: {
74
+ "element": "fire",
75
+ "colors": "red, orange, golden",
76
+ "traits": "fierce, energetic, blazing aura",
77
+ "environment": "volcanic, warm lighting"
78
+ },
79
+ EggType.BLUE: {
80
+ "element": "water",
81
+ "colors": "blue, cyan, silver",
82
+ "traits": "calm, protective, flowing aura",
83
+ "environment": "aquatic, cool lighting"
84
+ },
85
+ EggType.GREEN: {
86
+ "element": "earth",
87
+ "colors": "green, brown, gold",
88
+ "traits": "sturdy, wise, natural aura",
89
+ "environment": "forest, natural lighting"
90
+ }
91
+ }
92
+
93
+ # Life stage characteristics
94
+ self.stage_traits = {
95
+ LifeStage.EGG: {
96
+ "form": "mystical egg with glowing patterns",
97
+ "size": "medium sized",
98
+ "features": "smooth shell, magical runes, soft glow"
99
+ },
100
+ LifeStage.BABY: {
101
+ "form": "small cute creature",
102
+ "size": "tiny, adorable",
103
+ "features": "big eyes, soft fur, playful expression"
104
+ },
105
+ LifeStage.CHILD: {
106
+ "form": "young creature",
107
+ "size": "small but growing",
108
+ "features": "curious eyes, developing features, energetic pose"
109
+ },
110
+ LifeStage.TEEN: {
111
+ "form": "adolescent creature",
112
+ "size": "medium sized",
113
+ "features": "developing strength, confident stance, maturing features"
114
+ },
115
+ LifeStage.YOUNG_ADULT: {
116
+ "form": "strong young creature",
117
+ "size": "well-proportioned",
118
+ "features": "athletic build, determined expression, full power"
119
+ },
120
+ LifeStage.ADULT: {
121
+ "form": "mature powerful creature",
122
+ "size": "large and imposing",
123
+ "features": "wise eyes, peak physical form, commanding presence"
124
+ },
125
+ LifeStage.ELDERLY: {
126
+ "form": "ancient wise creature",
127
+ "size": "dignified stature",
128
+ "features": "wise expression, weathered but noble, mystical aura"
129
+ }
130
+ }
131
+
132
+ def _init_fallback_images(self):
133
+ """Initialize fallback image mappings."""
134
+ self.fallback_images = {}
135
+
136
+ # Create simple fallback images if they don't exist
137
+ for stage in LifeStage:
138
+ for egg_type in EggType:
139
+ fallback_path = self.fallback_dir / f"{stage.value}_{egg_type.value}.png"
140
+ self.fallback_images[f"{stage.value}_{egg_type.value}"] = str(fallback_path)
141
+
142
+ # Create a simple placeholder if file doesn't exist
143
+ if not fallback_path.exists():
144
+ self._create_placeholder_image(fallback_path, stage, egg_type)
145
+
146
+ def _create_placeholder_image(self, path: Path, stage: LifeStage, egg_type: EggType):
147
+ """Create a simple placeholder image."""
148
+ try:
149
+ # Create a simple colored rectangle as placeholder
150
+ color_map = {
151
+ EggType.RED: (255, 100, 100),
152
+ EggType.BLUE: (100, 100, 255),
153
+ EggType.GREEN: (100, 255, 100)
154
+ }
155
+
156
+ color = color_map.get(egg_type, (128, 128, 128))
157
+ img = Image.new('RGB', (512, 512), color)
158
+ img.save(path)
159
+ logger.info(f"Created placeholder image: {path}")
160
+
161
+ except Exception as e:
162
+ logger.error(f"Failed to create placeholder image {path}: {e}")
163
+
164
+ def _load_model(self):
165
+ """Load the FLUX.1-dev model for image generation."""
166
+ if self._model_loaded:
167
+ return
168
+
169
+ try:
170
+ from diffusers import FluxPipeline
171
+
172
+ logger.info(f"Loading image generation model: {self.model_name}")
173
+ self.pipe = FluxPipeline.from_pretrained(
174
+ self.model_name,
175
+ torch_dtype=torch.bfloat16
176
+ )
177
+
178
+ # Enable CPU offload to save VRAM
179
+ self.pipe.enable_model_cpu_offload()
180
+
181
+ self._model_loaded = True
182
+ logger.info("Image generation model loaded successfully")
183
+
184
+ except ImportError:
185
+ logger.error("diffusers library not installed. Run: pip install -U diffusers")
186
+ raise
187
+ except Exception as e:
188
+ logger.error(f"Failed to load image generation model: {e}")
189
+ raise
190
+
191
+ def generate_prompt(self, digipal: DigiPal) -> str:
192
+ """
193
+ Generate a professional prompt for DigiPal image generation.
194
+
195
+ Args:
196
+ digipal: DigiPal instance to generate prompt for
197
+
198
+ Returns:
199
+ Professional prompt string for image generation
200
+ """
201
+ egg_traits = self.egg_type_traits.get(digipal.egg_type, self.egg_type_traits[EggType.RED])
202
+ stage_traits = self.stage_traits.get(digipal.life_stage, self.stage_traits[LifeStage.BABY])
203
+
204
+ # Build attribute modifiers based on DigiPal stats
205
+ attribute_modifiers = []
206
+
207
+ # High offense = more aggressive/fierce appearance
208
+ if digipal.offense > 50:
209
+ attribute_modifiers.append("fierce expression, sharp features")
210
+
211
+ # High defense = more armored/protective appearance
212
+ if digipal.defense > 50:
213
+ attribute_modifiers.append("armored, protective stance")
214
+
215
+ # High speed = more sleek/agile appearance
216
+ if digipal.speed > 50:
217
+ attribute_modifiers.append("sleek, agile build")
218
+
219
+ # High brains = more intelligent/wise appearance
220
+ if digipal.brains > 50:
221
+ attribute_modifiers.append("intelligent eyes, wise demeanor")
222
+
223
+ # Happiness affects expression
224
+ if digipal.happiness > 70:
225
+ attribute_modifiers.append("happy, cheerful expression")
226
+ elif digipal.happiness < 30:
227
+ attribute_modifiers.append("sad, tired expression")
228
+
229
+ # Build the complete prompt
230
+ prompt_parts = [
231
+ f"a {stage_traits['form']} digimon",
232
+ f"touched by the power of {egg_traits['element']}",
233
+ f"{stage_traits['size']}, {stage_traits['features']}",
234
+ f"colors: {egg_traits['colors']}",
235
+ f"{egg_traits['traits']}"
236
+ ]
237
+
238
+ if attribute_modifiers:
239
+ prompt_parts.append(", ".join(attribute_modifiers))
240
+
241
+ prompt_parts.extend([
242
+ f"in {egg_traits['environment']}",
243
+ f"life stage: {digipal.life_stage.value}",
244
+ self.style_base
245
+ ])
246
+
247
+ prompt = ", ".join(prompt_parts)
248
+
249
+ logger.debug(f"Generated prompt for {digipal.name}: {prompt}")
250
+ return prompt
251
+
252
+ def _get_cache_key(self, prompt: str, params: Dict[str, Any]) -> str:
253
+ """Generate cache key for image based on prompt and parameters."""
254
+ cache_data = {
255
+ "prompt": prompt,
256
+ "params": params
257
+ }
258
+ cache_string = json.dumps(cache_data, sort_keys=True)
259
+ return hashlib.md5(cache_string.encode()).hexdigest()
260
+
261
+ def _get_cached_image_path(self, cache_key: str) -> Optional[Path]:
262
+ """Check if cached image exists and return path."""
263
+ cache_path = self.cache_dir / f"{cache_key}.png"
264
+ if cache_path.exists():
265
+ logger.debug(f"Found cached image: {cache_path}")
266
+ return cache_path
267
+ return None
268
+
269
+ def _save_to_cache(self, image: Image.Image, cache_key: str) -> Path:
270
+ """Save generated image to cache."""
271
+ cache_path = self.cache_dir / f"{cache_key}.png"
272
+ image.save(cache_path)
273
+ logger.info(f"Saved generated image to cache: {cache_path}")
274
+ return cache_path
275
+
276
+ def _get_fallback_image(self, digipal: DigiPal) -> str:
277
+ """Get fallback image path for DigiPal."""
278
+ fallback_key = f"{digipal.life_stage.value}_{digipal.egg_type.value}"
279
+ fallback_path = self.fallback_images.get(fallback_key)
280
+
281
+ if fallback_path and Path(fallback_path).exists():
282
+ logger.info(f"Using fallback image: {fallback_path}")
283
+ return fallback_path
284
+
285
+ # Ultimate fallback - create a generic placeholder
286
+ generic_fallback = self.fallback_dir / "generic_placeholder.png"
287
+ if not generic_fallback.exists():
288
+ self._create_placeholder_image(generic_fallback, digipal.life_stage, digipal.egg_type)
289
+
290
+ logger.warning(f"Using generic fallback image: {generic_fallback}")
291
+ return str(generic_fallback)
292
+
293
+ def generate_image(self, digipal: DigiPal, force_regenerate: bool = False) -> str:
294
+ """
295
+ Generate or retrieve cached image for DigiPal.
296
+
297
+ Args:
298
+ digipal: DigiPal instance to generate image for
299
+ force_regenerate: Force regeneration even if cached image exists
300
+
301
+ Returns:
302
+ Path to generated or cached image file
303
+ """
304
+ try:
305
+ # Generate prompt
306
+ prompt = self.generate_prompt(digipal)
307
+
308
+ # Check cache first (unless force regenerate)
309
+ cache_key = self._get_cache_key(prompt, self.generation_params)
310
+
311
+ if not force_regenerate:
312
+ cached_path = self._get_cached_image_path(cache_key)
313
+ if cached_path:
314
+ return str(cached_path)
315
+
316
+ # Load model if not already loaded
317
+ self._load_model()
318
+
319
+ # Generate image
320
+ logger.info(f"Generating image for {digipal.name} ({digipal.life_stage.value})")
321
+
322
+ generator = torch.Generator("cpu").manual_seed(
323
+ hash(digipal.id) % (2**32) # Consistent seed based on DigiPal ID
324
+ )
325
+
326
+ image = self.pipe(
327
+ prompt,
328
+ generator=generator,
329
+ **self.generation_params
330
+ ).images[0]
331
+
332
+ # Save to cache
333
+ cache_path = self._save_to_cache(image, cache_key)
334
+
335
+ # Update DigiPal with new image info
336
+ digipal.current_image_path = str(cache_path)
337
+ digipal.image_generation_prompt = prompt
338
+
339
+ return str(cache_path)
340
+
341
+ except Exception as e:
342
+ logger.error(f"Image generation failed for {digipal.name}: {e}")
343
+
344
+ # Return fallback image
345
+ fallback_path = self._get_fallback_image(digipal)
346
+ digipal.current_image_path = fallback_path
347
+ digipal.image_generation_prompt = f"Fallback image for {digipal.life_stage.value} {digipal.egg_type.value}"
348
+
349
+ return fallback_path
350
+
351
+ def update_image_for_evolution(self, digipal: DigiPal) -> str:
352
+ """
353
+ Generate new image when DigiPal evolves to new life stage.
354
+
355
+ Args:
356
+ digipal: DigiPal that has evolved
357
+
358
+ Returns:
359
+ Path to new image file
360
+ """
361
+ logger.info(f"Generating evolution image for {digipal.name} -> {digipal.life_stage.value}")
362
+ return self.generate_image(digipal, force_regenerate=True)
363
+
364
+ def cleanup_cache(self, max_age_days: int = 30):
365
+ """
366
+ Clean up old cached images.
367
+
368
+ Args:
369
+ max_age_days: Maximum age of cached images in days
370
+ """
371
+ try:
372
+ current_time = datetime.now()
373
+ cleaned_count = 0
374
+
375
+ for image_file in self.cache_dir.glob("*.png"):
376
+ file_age = current_time - datetime.fromtimestamp(image_file.stat().st_mtime)
377
+
378
+ if file_age.days > max_age_days:
379
+ image_file.unlink()
380
+ cleaned_count += 1
381
+
382
+ logger.info(f"Cleaned up {cleaned_count} old cached images")
383
+
384
+ except Exception as e:
385
+ logger.error(f"Cache cleanup failed: {e}")
386
+
387
+ def get_cache_info(self) -> Dict[str, Any]:
388
+ """Get information about the image cache."""
389
+ try:
390
+ cache_files = list(self.cache_dir.glob("*.png"))
391
+ total_size = sum(f.stat().st_size for f in cache_files)
392
+
393
+ return {
394
+ "cache_dir": str(self.cache_dir),
395
+ "cached_images": len(cache_files),
396
+ "total_size_mb": round(total_size / (1024 * 1024), 2),
397
+ "model_loaded": self._model_loaded
398
+ }
399
+
400
+ except Exception as e:
401
+ logger.error(f"Failed to get cache info: {e}")
402
+ return {"error": str(e)}
digipal/ai/language_model.py ADDED
@@ -0,0 +1,532 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Language model integration for DigiPal using Qwen3-0.6B.
3
+
4
+ This module handles the integration with Qwen/Qwen3-0.6B model for natural language
5
+ processing, including model loading, quantization, and context-aware response generation.
6
+ """
7
+
8
+ import logging
9
+ import torch
10
+ from typing import Dict, List, Optional, Any, Tuple
11
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
12
+ import json
13
+ from datetime import datetime
14
+
15
+ from ..core.models import DigiPal, Interaction
16
+ from ..core.enums import LifeStage
17
+ from ..core.exceptions import AIModelError, NetworkError
18
+ from ..core.error_handler import with_error_handling, with_retry, RetryConfig
19
+ from .graceful_degradation import with_ai_fallback, ai_service_manager
20
+
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ class LanguageModel:
26
+ """
27
+ Manages Qwen3-0.6B model for natural language processing with DigiPal context.
28
+ """
29
+
30
+ def __init__(self, model_name: str = "Qwen/Qwen3-0.6B", quantization: bool = True):
31
+ """
32
+ Initialize the language model.
33
+
34
+ Args:
35
+ model_name: HuggingFace model identifier
36
+ quantization: Whether to use quantization for memory optimization
37
+ """
38
+ self.model_name = model_name
39
+ self.quantization = quantization
40
+ self.tokenizer = None
41
+ self.model = None
42
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
43
+
44
+ # Initialize prompt templates
45
+ self.prompt_templates = self._initialize_prompt_templates()
46
+
47
+ logger.info(f"LanguageModel initialized with model: {model_name}")
48
+ logger.info(f"Device: {self.device}")
49
+ logger.info(f"Quantization: {quantization}")
50
+
51
+ @with_error_handling(fallback_value=False, context={'operation': 'model_loading'})
52
+ @with_retry(RetryConfig(max_attempts=3, retry_on=[NetworkError, ConnectionError]))
53
+ def load_model(self) -> bool:
54
+ """
55
+ Load the Qwen3-0.6B model and tokenizer.
56
+
57
+ Returns:
58
+ True if model loaded successfully, False otherwise
59
+ """
60
+ try:
61
+ logger.info(f"Loading tokenizer for {self.model_name}")
62
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
63
+
64
+ # Configure quantization if enabled
65
+ model_kwargs = {
66
+ "torch_dtype": "auto",
67
+ "device_map": "auto"
68
+ }
69
+
70
+ if self.quantization and torch.cuda.is_available():
71
+ logger.info("Configuring 4-bit quantization")
72
+ quantization_config = BitsAndBytesConfig(
73
+ load_in_4bit=True,
74
+ bnb_4bit_compute_dtype=torch.float16,
75
+ bnb_4bit_use_double_quant=True,
76
+ bnb_4bit_quant_type="nf4"
77
+ )
78
+ model_kwargs["quantization_config"] = quantization_config
79
+
80
+ logger.info(f"Loading model {self.model_name}")
81
+ self.model = AutoModelForCausalLM.from_pretrained(
82
+ self.model_name,
83
+ **model_kwargs
84
+ )
85
+
86
+ logger.info("Model loaded successfully")
87
+ return True
88
+
89
+ except (ConnectionError, TimeoutError) as e:
90
+ raise NetworkError(f"Network error loading model: {str(e)}")
91
+ except Exception as e:
92
+ raise AIModelError(f"Failed to load model: {str(e)}")
93
+
94
+ @with_ai_fallback("language_model")
95
+ def generate_response(self, user_input: str, pet: DigiPal, memory_context: str = "", max_tokens: int = 150) -> str:
96
+ """
97
+ Generate contextual response using Qwen3-0.6B model.
98
+
99
+ Args:
100
+ user_input: User's input text
101
+ pet: DigiPal instance for context
102
+ memory_context: Additional memory context from RAG system
103
+ max_tokens: Maximum tokens to generate
104
+
105
+ Returns:
106
+ Generated response text
107
+ """
108
+ if not self.model or not self.tokenizer:
109
+ logger.warning("Model not loaded, using fallback response")
110
+ raise AIModelError("Language model not loaded")
111
+
112
+ try:
113
+ # Create context-aware prompt with memory context
114
+ prompt = self._create_prompt(user_input, pet, memory_context)
115
+
116
+ # Prepare messages for chat template
117
+ messages = [
118
+ {"role": "user", "content": prompt}
119
+ ]
120
+
121
+ # Apply chat template
122
+ text = self.tokenizer.apply_chat_template(
123
+ messages,
124
+ tokenize=False,
125
+ add_generation_prompt=True,
126
+ enable_thinking=True
127
+ )
128
+
129
+ # Tokenize input
130
+ model_inputs = self.tokenizer([text], return_tensors="pt").to(self.model.device)
131
+
132
+ # Generate response
133
+ with torch.no_grad():
134
+ generated_ids = self.model.generate(
135
+ **model_inputs,
136
+ max_new_tokens=max_tokens,
137
+ do_sample=True,
138
+ temperature=0.7,
139
+ top_p=0.9,
140
+ pad_token_id=self.tokenizer.eos_token_id
141
+ )
142
+
143
+ # Extract generated tokens
144
+ output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
145
+
146
+ # Parse thinking content and actual response
147
+ thinking_content, content = self._parse_response(output_ids)
148
+
149
+ # Log thinking content for debugging
150
+ if thinking_content:
151
+ logger.debug(f"Model thinking: {thinking_content[:100]}...")
152
+
153
+ # Clean and validate response
154
+ response = self._clean_response(content, pet)
155
+
156
+ logger.debug(f"Generated response: {response}")
157
+ return response
158
+
159
+ except torch.cuda.OutOfMemoryError as e:
160
+ raise AIModelError(f"GPU memory error: {str(e)}")
161
+ except Exception as e:
162
+ logger.error(f"Error generating response: {e}")
163
+ raise AIModelError(f"Language model generation failed: {str(e)}")
164
+
165
+ def _create_prompt(self, user_input: str, pet: DigiPal, memory_context: str = "") -> str:
166
+ """
167
+ Create context-aware prompt incorporating pet state, personality, and memory context.
168
+
169
+ Args:
170
+ user_input: User's input text
171
+ pet: DigiPal instance for context
172
+ memory_context: Additional memory context from RAG system
173
+
174
+ Returns:
175
+ Formatted prompt string
176
+ """
177
+ # Get base template for life stage
178
+ template = self.prompt_templates.get(pet.life_stage, self.prompt_templates[LifeStage.BABY])
179
+
180
+ # Get recent conversation context
181
+ recent_interactions = pet.conversation_history[-3:] if pet.conversation_history else []
182
+ conversation_context = ""
183
+ if recent_interactions:
184
+ conversation_context = "\n".join([
185
+ f"User: {interaction.user_input}\nDigiPal: {interaction.pet_response}"
186
+ for interaction in recent_interactions
187
+ ])
188
+
189
+ # Calculate personality description
190
+ personality_desc = self._get_personality_description(pet)
191
+
192
+ # Format the prompt with memory context
193
+ prompt = template.format(
194
+ name=pet.name,
195
+ life_stage=pet.life_stage.value,
196
+ hp=pet.hp,
197
+ happiness=pet.happiness,
198
+ energy=pet.energy,
199
+ discipline=pet.discipline,
200
+ age_hours=pet.get_age_hours(),
201
+ personality=personality_desc,
202
+ recent_conversation=conversation_context,
203
+ memory_context=memory_context,
204
+ user_input=user_input
205
+ )
206
+
207
+ return prompt
208
+
209
+ def _initialize_prompt_templates(self) -> Dict[LifeStage, str]:
210
+ """
211
+ Initialize prompt templates for each life stage.
212
+
213
+ Returns:
214
+ Dictionary mapping life stages to prompt templates
215
+ """
216
+ return {
217
+ LifeStage.EGG: """
218
+ You are a DigiPal egg named {name}. You cannot speak or respond directly, but you can show subtle reactions.
219
+ The user said: "{user_input}"
220
+ Respond with a very brief description of the egg's reaction (1-2 words or simple action).
221
+ """,
222
+
223
+ LifeStage.BABY: """
224
+ You are {name}, a baby DigiPal in the {life_stage} stage. You are {age_hours:.1f} hours old.
225
+ Current stats: HP={hp}, Happiness={happiness}, Energy={energy}, Discipline={discipline}
226
+ Personality: {personality}
227
+
228
+ As a baby, you can only understand basic commands: eat, sleep, good, bad.
229
+ You communicate with simple baby sounds, single words, and basic emotions.
230
+ You are curious, innocent, and learning about the world.
231
+
232
+ Recent conversation:
233
+ {recent_conversation}
234
+
235
+ {memory_context}
236
+
237
+ User just said: "{user_input}"
238
+
239
+ Respond as a baby DigiPal would - keep it simple, innocent, and age-appropriate. Use baby talk, simple words, and express basic emotions.
240
+ """,
241
+
242
+ LifeStage.CHILD: """
243
+ You are {name}, a child DigiPal in the {life_stage} stage. You are {age_hours:.1f} hours old.
244
+ Current stats: HP={hp}, Happiness={happiness}, Energy={energy}, Discipline={discipline}
245
+ Personality: {personality}
246
+
247
+ As a child, you understand: eat, sleep, good, bad, play, train.
248
+ You are energetic, playful, and eager to learn. You speak in simple sentences and show enthusiasm.
249
+
250
+ Recent conversation:
251
+ {recent_conversation}
252
+
253
+ {memory_context}
254
+
255
+ User just said: "{user_input}"
256
+
257
+ Respond as a child DigiPal would - enthusiastic, simple language, and show interest in play and learning.
258
+ """,
259
+
260
+ LifeStage.TEEN: """
261
+ You are {name}, a teenage DigiPal in the {life_stage} stage. You are {age_hours:.1f} hours old.
262
+ Current stats: HP={hp}, Happiness={happiness}, Energy={energy}, Discipline={discipline}
263
+ Personality: {personality}
264
+
265
+ As a teen, you understand most commands and can have conversations.
266
+ You're developing your own personality, sometimes moody, but generally cooperative.
267
+ You can be a bit rebellious but still care about your relationship with your caretaker.
268
+
269
+ Recent conversation:
270
+ {recent_conversation}
271
+
272
+ {memory_context}
273
+
274
+ User just said: "{user_input}"
275
+
276
+ Respond as a teenage DigiPal would - more complex thoughts, some attitude, but still caring.
277
+ """,
278
+
279
+ LifeStage.YOUNG_ADULT: """
280
+ You are {name}, a young adult DigiPal in the {life_stage} stage. You are {age_hours:.1f} hours old.
281
+ Current stats: HP={hp}, Happiness={happiness}, Energy={energy}, Discipline={discipline}
282
+ Personality: {personality}
283
+
284
+ As a young adult, you're confident, capable, and have developed your full personality.
285
+ You can engage in complex conversations and understand all commands.
286
+ You're at your physical and mental peak, ready for challenges.
287
+
288
+ Recent conversation:
289
+ {recent_conversation}
290
+
291
+ {memory_context}
292
+
293
+ User just said: "{user_input}"
294
+
295
+ Respond as a confident young adult DigiPal - articulate, capable, and engaging.
296
+ """,
297
+
298
+ LifeStage.ADULT: """
299
+ You are {name}, an adult DigiPal in the {life_stage} stage. You are {age_hours:.1f} hours old.
300
+ Current stats: HP={hp}, Happiness={happiness}, Energy={energy}, Discipline={discipline}
301
+ Personality: {personality}
302
+
303
+ As an adult, you're wise, mature, and thoughtful in your responses.
304
+ You have deep understanding and can provide guidance and wisdom.
305
+ You're protective and caring, with a strong bond to your caretaker.
306
+
307
+ Recent conversation:
308
+ {recent_conversation}
309
+
310
+ {memory_context}
311
+
312
+ User just said: "{user_input}"
313
+
314
+ Respond as a mature adult DigiPal - wise, thoughtful, and caring.
315
+ """,
316
+
317
+ LifeStage.ELDERLY: """
318
+ You are {name}, an elderly DigiPal in the {life_stage} stage. You are {age_hours:.1f} hours old.
319
+ Current stats: HP={hp}, Happiness={happiness}, Energy={energy}, Discipline={discipline}
320
+ Personality: {personality}
321
+
322
+ As an elderly DigiPal, you're wise from experience but also nostalgic and gentle.
323
+ You move slower but think deeply. You cherish every moment with your caretaker.
324
+ You often reflect on memories and share wisdom from your long life.
325
+
326
+ Recent conversation:
327
+ {recent_conversation}
328
+
329
+ {memory_context}
330
+
331
+ User just said: "{user_input}"
332
+
333
+ Respond as an elderly DigiPal - gentle, wise, nostalgic, and deeply caring.
334
+ """
335
+ }
336
+
337
+ def _get_personality_description(self, pet: DigiPal) -> str:
338
+ """
339
+ Generate personality description from pet's personality traits.
340
+
341
+ Args:
342
+ pet: DigiPal instance
343
+
344
+ Returns:
345
+ Human-readable personality description
346
+ """
347
+ if not pet.personality_traits:
348
+ return "developing personality"
349
+
350
+ traits = []
351
+
352
+ # Analyze personality traits
353
+ if pet.personality_traits.get('friendliness', 0.5) > 0.7:
354
+ traits.append("very friendly")
355
+ elif pet.personality_traits.get('friendliness', 0.5) < 0.3:
356
+ traits.append("somewhat shy")
357
+
358
+ if pet.personality_traits.get('playfulness', 0.5) > 0.7:
359
+ traits.append("very playful")
360
+ elif pet.personality_traits.get('playfulness', 0.5) < 0.3:
361
+ traits.append("more serious")
362
+
363
+ if pet.personality_traits.get('obedience', 0.5) > 0.7:
364
+ traits.append("well-behaved")
365
+ elif pet.personality_traits.get('obedience', 0.5) < 0.3:
366
+ traits.append("a bit rebellious")
367
+
368
+ if pet.personality_traits.get('curiosity', 0.5) > 0.7:
369
+ traits.append("very curious")
370
+
371
+ return ", ".join(traits) if traits else "balanced personality"
372
+
373
+ def _parse_response(self, output_ids: List[int]) -> Tuple[str, str]:
374
+ """
375
+ Parse thinking content and actual response from model output.
376
+
377
+ Args:
378
+ output_ids: Generated token IDs
379
+
380
+ Returns:
381
+ Tuple of (thinking_content, actual_response)
382
+ """
383
+ try:
384
+ # Look for thinking end token (151668 = </think>)
385
+ index = len(output_ids) - output_ids[::-1].index(151668)
386
+ except ValueError:
387
+ # No thinking content found
388
+ index = 0
389
+
390
+ thinking_content = ""
391
+ content = ""
392
+
393
+ if index > 0:
394
+ thinking_content = self.tokenizer.decode(
395
+ output_ids[:index],
396
+ skip_special_tokens=True
397
+ ).strip("\n")
398
+
399
+ if index < len(output_ids):
400
+ content = self.tokenizer.decode(
401
+ output_ids[index:],
402
+ skip_special_tokens=True
403
+ ).strip("\n")
404
+ else:
405
+ # If no content after thinking, use full output
406
+ content = self.tokenizer.decode(
407
+ output_ids,
408
+ skip_special_tokens=True
409
+ ).strip("\n")
410
+
411
+ return thinking_content, content
412
+
413
+ def _clean_response(self, response: str, pet: DigiPal) -> str:
414
+ """
415
+ Clean and validate the generated response.
416
+
417
+ Args:
418
+ response: Raw generated response
419
+ pet: DigiPal instance for context
420
+
421
+ Returns:
422
+ Cleaned response string
423
+ """
424
+ # Remove any unwanted prefixes or suffixes
425
+ response = response.strip()
426
+
427
+ # Remove common AI assistant prefixes (more precise matching)
428
+ prefixes_to_remove = [
429
+ "As a DigiPal, ", "As your DigiPal, ", "DigiPal: ", f"{pet.name}: ",
430
+ "Response: "
431
+ ]
432
+
433
+ for prefix in prefixes_to_remove:
434
+ if response.startswith(prefix):
435
+ response = response[len(prefix):].strip()
436
+ break # Only remove one prefix
437
+
438
+ # Limit response length based on life stage
439
+ max_lengths = {
440
+ LifeStage.EGG: 20,
441
+ LifeStage.BABY: 50,
442
+ LifeStage.CHILD: 100,
443
+ LifeStage.TEEN: 150,
444
+ LifeStage.YOUNG_ADULT: 200,
445
+ LifeStage.ADULT: 200,
446
+ LifeStage.ELDERLY: 180
447
+ }
448
+
449
+ max_length = max_lengths.get(pet.life_stage, 100)
450
+ if len(response) > max_length:
451
+ # Find last complete sentence within limit
452
+ sentences = response.split('.')
453
+ truncated = ""
454
+ for sentence in sentences:
455
+ potential = truncated + sentence.strip()
456
+ if len(potential) <= max_length - 1: # Leave room for period
457
+ truncated = potential + "."
458
+ else:
459
+ break
460
+
461
+ if truncated and len(truncated) > 10: # Ensure we have meaningful content
462
+ response = truncated.strip()
463
+ else:
464
+ # If no complete sentence fits, truncate at word boundary
465
+ words = response.split()
466
+ truncated_words = []
467
+ current_length = 0
468
+
469
+ for word in words:
470
+ if current_length + len(word) + 1 <= max_length - 3: # Leave room for "..."
471
+ truncated_words.append(word)
472
+ current_length += len(word) + 1
473
+ else:
474
+ break
475
+
476
+ if truncated_words:
477
+ response = " ".join(truncated_words) + "..."
478
+ else:
479
+ response = response[:max_length-3] + "..."
480
+
481
+ # Ensure response is not empty
482
+ if not response:
483
+ response = self._fallback_response("", pet)
484
+
485
+ return response
486
+
487
+ def _fallback_response(self, user_input: str, pet: DigiPal) -> str:
488
+ """
489
+ Generate fallback response when model is unavailable.
490
+
491
+ Args:
492
+ user_input: User's input text
493
+ pet: DigiPal instance
494
+
495
+ Returns:
496
+ Fallback response string
497
+ """
498
+ fallback_responses = {
499
+ LifeStage.EGG: "*The egg remains silent*",
500
+ LifeStage.BABY: "*baby sounds*",
501
+ LifeStage.CHILD: "I'm still learning!",
502
+ LifeStage.TEEN: "Hmm, let me think about that...",
503
+ LifeStage.YOUNG_ADULT: "That's interesting to consider.",
504
+ LifeStage.ADULT: "I understand what you're saying.",
505
+ LifeStage.ELDERLY: "Ah, yes... I see..."
506
+ }
507
+
508
+ return fallback_responses.get(pet.life_stage, "I'm listening...")
509
+
510
+ def is_loaded(self) -> bool:
511
+ """
512
+ Check if the model is loaded and ready.
513
+
514
+ Returns:
515
+ True if model is loaded, False otherwise
516
+ """
517
+ return self.model is not None and self.tokenizer is not None
518
+
519
+ def get_model_info(self) -> Dict[str, Any]:
520
+ """
521
+ Get information about the loaded model.
522
+
523
+ Returns:
524
+ Dictionary with model information
525
+ """
526
+ return {
527
+ 'model_name': self.model_name,
528
+ 'quantization': self.quantization,
529
+ 'device': str(self.device),
530
+ 'loaded': self.is_loaded(),
531
+ 'memory_usage': torch.cuda.memory_allocated() if torch.cuda.is_available() else 0
532
+ }
digipal/ai/speech_processor.py ADDED
@@ -0,0 +1,510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Speech processing module using Kyutai speech-to-text models.
3
+
4
+ This module provides speech-to-text functionality using Kyutai's STT models
5
+ with audio validation, preprocessing, and error handling.
6
+ """
7
+
8
+ import torch
9
+ import numpy as np
10
+ import logging
11
+ from typing import Optional, Dict, Any, Union, List
12
+ from dataclasses import dataclass
13
+ import io
14
+ import wave
15
+ from transformers import KyutaiSpeechToTextProcessor, KyutaiSpeechToTextForConditionalGeneration
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ @dataclass
21
+ class AudioValidationResult:
22
+ """Result of audio validation checks."""
23
+ is_valid: bool
24
+ sample_rate: int
25
+ duration: float
26
+ channels: int
27
+ issues: List[str]
28
+
29
+
30
+ @dataclass
31
+ class SpeechProcessingResult:
32
+ """Result of speech processing operation."""
33
+ success: bool
34
+ transcribed_text: str
35
+ confidence: float
36
+ processing_time: float
37
+ error_message: Optional[str] = None
38
+
39
+
40
+ class AudioValidator:
41
+ """Validates and preprocesses audio input for speech recognition."""
42
+
43
+ def __init__(self, target_sample_rate: int = 24000, min_duration: float = 0.1, max_duration: float = 30.0):
44
+ """
45
+ Initialize audio validator.
46
+
47
+ Args:
48
+ target_sample_rate: Target sample rate for processing (24kHz for Kyutai)
49
+ min_duration: Minimum audio duration in seconds
50
+ max_duration: Maximum audio duration in seconds
51
+ """
52
+ self.target_sample_rate = target_sample_rate
53
+ self.min_duration = min_duration
54
+ self.max_duration = max_duration
55
+
56
+ def validate_audio(self, audio_data: Union[bytes, np.ndarray], sample_rate: Optional[int] = None) -> AudioValidationResult:
57
+ """
58
+ Validate audio data for speech processing.
59
+
60
+ Args:
61
+ audio_data: Raw audio data as bytes or numpy array
62
+ sample_rate: Sample rate of the audio data
63
+
64
+ Returns:
65
+ AudioValidationResult with validation details
66
+ """
67
+ issues = []
68
+
69
+ try:
70
+ # Convert bytes to numpy array if needed
71
+ if isinstance(audio_data, bytes):
72
+ audio_array, detected_sample_rate = self._bytes_to_array(audio_data)
73
+ if sample_rate is None:
74
+ sample_rate = detected_sample_rate
75
+ else:
76
+ audio_array = audio_data
77
+ if sample_rate is None:
78
+ sample_rate = self.target_sample_rate
79
+
80
+ # Check if audio array is valid
81
+ if audio_array is None or len(audio_array) == 0:
82
+ issues.append("Empty or invalid audio data")
83
+ return AudioValidationResult(False, 0, 0.0, 0, issues)
84
+
85
+ # Calculate duration
86
+ duration = len(audio_array) / sample_rate
87
+
88
+ # Detect number of channels
89
+ if audio_array.ndim == 1:
90
+ channels = 1
91
+ else:
92
+ channels = audio_array.shape[1] if audio_array.ndim == 2 else 1
93
+ # Convert to mono if stereo
94
+ if channels > 1:
95
+ audio_array = np.mean(audio_array, axis=1)
96
+ channels = 1
97
+
98
+ # Validate duration
99
+ if duration < self.min_duration:
100
+ issues.append(f"Audio too short: {duration:.2f}s (minimum: {self.min_duration}s)")
101
+
102
+ if duration > self.max_duration:
103
+ issues.append(f"Audio too long: {duration:.2f}s (maximum: {self.max_duration}s)")
104
+
105
+ # Check sample rate
106
+ if sample_rate != self.target_sample_rate:
107
+ issues.append(f"Sample rate mismatch: {sample_rate}Hz (expected: {self.target_sample_rate}Hz)")
108
+
109
+ # Check for silence (very low amplitude)
110
+ if np.max(np.abs(audio_array)) < 0.01:
111
+ issues.append("Audio appears to be silent or very quiet")
112
+
113
+ # Check for clipping
114
+ if np.max(np.abs(audio_array)) > 0.95:
115
+ issues.append("Audio may be clipped (too loud)")
116
+
117
+ is_valid = len(issues) == 0
118
+
119
+ return AudioValidationResult(
120
+ is_valid=is_valid,
121
+ sample_rate=sample_rate,
122
+ duration=duration,
123
+ channels=channels,
124
+ issues=issues
125
+ )
126
+
127
+ except Exception as e:
128
+ logger.error(f"Error validating audio: {e}")
129
+ issues.append(f"Validation error: {str(e)}")
130
+ return AudioValidationResult(False, 0, 0.0, 0, issues)
131
+
132
+ def _bytes_to_array(self, audio_bytes: bytes) -> tuple[Optional[np.ndarray], int]:
133
+ """
134
+ Convert audio bytes to numpy array.
135
+
136
+ Args:
137
+ audio_bytes: Raw audio bytes
138
+
139
+ Returns:
140
+ Tuple of (audio_array, sample_rate)
141
+ """
142
+ try:
143
+ # Try to parse as WAV file
144
+ with io.BytesIO(audio_bytes) as audio_io:
145
+ with wave.open(audio_io, 'rb') as wav_file:
146
+ sample_rate = wav_file.getframerate()
147
+ channels = wav_file.getnchannels()
148
+ sample_width = wav_file.getsampwidth()
149
+ frames = wav_file.readframes(-1)
150
+
151
+ # Convert to numpy array
152
+ if sample_width == 1:
153
+ audio_array = np.frombuffer(frames, dtype=np.uint8)
154
+ audio_array = (audio_array.astype(np.float32) - 128) / 128.0
155
+ elif sample_width == 2:
156
+ audio_array = np.frombuffer(frames, dtype=np.int16)
157
+ audio_array = audio_array.astype(np.float32) / 32768.0
158
+ elif sample_width == 4:
159
+ audio_array = np.frombuffer(frames, dtype=np.int32)
160
+ audio_array = audio_array.astype(np.float32) / 2147483648.0
161
+ else:
162
+ raise ValueError(f"Unsupported sample width: {sample_width}")
163
+
164
+ # Handle stereo to mono conversion
165
+ if channels == 2:
166
+ audio_array = audio_array.reshape(-1, 2)
167
+ audio_array = np.mean(audio_array, axis=1)
168
+
169
+ return audio_array, sample_rate
170
+
171
+ except Exception as e:
172
+ logger.warning(f"Failed to parse as WAV: {e}")
173
+
174
+ # Fallback: assume raw 16-bit PCM at target sample rate
175
+ try:
176
+ audio_array = np.frombuffer(audio_bytes, dtype=np.int16)
177
+ audio_array = audio_array.astype(np.float32) / 32768.0
178
+ return audio_array, self.target_sample_rate
179
+ except Exception as e:
180
+ logger.error(f"Failed to convert audio bytes: {e}")
181
+ return None, 0
182
+
183
+ def preprocess_audio(self, audio_array: np.ndarray, sample_rate: int) -> np.ndarray:
184
+ """
185
+ Preprocess audio for optimal speech recognition.
186
+
187
+ Args:
188
+ audio_array: Audio data as numpy array
189
+ sample_rate: Current sample rate
190
+
191
+ Returns:
192
+ Preprocessed audio array
193
+ """
194
+ try:
195
+ # Resample if needed
196
+ if sample_rate != self.target_sample_rate:
197
+ audio_array = self._resample_audio(audio_array, sample_rate, self.target_sample_rate)
198
+
199
+ # Apply noise reduction (simple high-pass filter)
200
+ audio_array = self._apply_noise_reduction(audio_array)
201
+
202
+ # Normalize audio
203
+ audio_array = self._normalize_audio(audio_array)
204
+
205
+ return audio_array
206
+
207
+ except Exception as e:
208
+ logger.error(f"Error preprocessing audio: {e}")
209
+ return audio_array
210
+
211
+ def _resample_audio(self, audio_array: np.ndarray, from_rate: int, to_rate: int) -> np.ndarray:
212
+ """Resample audio to target sample rate."""
213
+ if from_rate == to_rate:
214
+ return audio_array
215
+
216
+ # Simple linear interpolation resampling
217
+ # For production, consider using scipy.signal.resample or librosa
218
+ ratio = to_rate / from_rate
219
+ new_length = int(len(audio_array) * ratio)
220
+
221
+ # Create new time indices
222
+ old_indices = np.arange(len(audio_array))
223
+ new_indices = np.linspace(0, len(audio_array) - 1, new_length)
224
+
225
+ # Interpolate
226
+ resampled = np.interp(new_indices, old_indices, audio_array)
227
+
228
+ return resampled
229
+
230
+ def _apply_noise_reduction(self, audio_array: np.ndarray) -> np.ndarray:
231
+ """Apply basic noise reduction (high-pass filter)."""
232
+ # Simple high-pass filter to remove low-frequency noise
233
+ # This is a basic implementation; for production, use proper DSP libraries
234
+
235
+ if len(audio_array) < 3:
236
+ return audio_array
237
+
238
+ # Simple first-order high-pass filter
239
+ alpha = 0.95
240
+ filtered = np.zeros_like(audio_array)
241
+ filtered[0] = audio_array[0]
242
+
243
+ for i in range(1, len(audio_array)):
244
+ filtered[i] = alpha * (filtered[i-1] + audio_array[i] - audio_array[i-1])
245
+
246
+ return filtered
247
+
248
+ def _normalize_audio(self, audio_array: np.ndarray) -> np.ndarray:
249
+ """Normalize audio amplitude."""
250
+ max_val = np.max(np.abs(audio_array))
251
+ if max_val > 0:
252
+ # Normalize to 70% of maximum to avoid clipping
253
+ return audio_array * (0.7 / max_val)
254
+ return audio_array
255
+
256
+
257
+ class SpeechProcessor:
258
+ """
259
+ Main speech processing class using Kyutai speech-to-text models.
260
+ """
261
+
262
+ def __init__(self, model_id: str = "kyutai/stt-2.6b-en_fr-trfs", device: Optional[str] = None):
263
+ """
264
+ Initialize speech processor with Kyutai model.
265
+
266
+ Args:
267
+ model_id: HuggingFace model identifier for Kyutai STT
268
+ device: Device to run model on ('cuda', 'cpu', or None for auto)
269
+ """
270
+ self.model_id = model_id
271
+ self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
272
+
273
+ # Initialize components
274
+ self.processor = None
275
+ self.model = None
276
+ self.audio_validator = AudioValidator()
277
+ self._model_loaded = False
278
+
279
+ logger.info(f"SpeechProcessor initialized with model: {model_id}")
280
+ logger.info(f"Using device: {self.device}")
281
+
282
+ def load_model(self) -> bool:
283
+ """
284
+ Load the Kyutai speech-to-text model and processor.
285
+
286
+ Returns:
287
+ True if model loaded successfully, False otherwise
288
+ """
289
+ try:
290
+ logger.info(f"Loading Kyutai model: {self.model_id}")
291
+
292
+ # Load processor
293
+ self.processor = KyutaiSpeechToTextProcessor.from_pretrained(self.model_id)
294
+ logger.info("Processor loaded successfully")
295
+
296
+ # Load model
297
+ self.model = KyutaiSpeechToTextForConditionalGeneration.from_pretrained(
298
+ self.model_id,
299
+ device_map=self.device,
300
+ torch_dtype="auto"
301
+ )
302
+ logger.info("Model loaded successfully")
303
+
304
+ self._model_loaded = True
305
+ return True
306
+
307
+ except Exception as e:
308
+ logger.error(f"Failed to load Kyutai model: {e}")
309
+ self._model_loaded = False
310
+ return False
311
+
312
+ def is_model_loaded(self) -> bool:
313
+ """
314
+ Check if the model is loaded and ready.
315
+
316
+ Returns:
317
+ True if model is loaded, False otherwise
318
+ """
319
+ return self._model_loaded and self.processor is not None and self.model is not None
320
+
321
+ def process_speech(self, audio_data: Union[bytes, np.ndarray], sample_rate: Optional[int] = None) -> SpeechProcessingResult:
322
+ """
323
+ Process speech audio and convert to text.
324
+
325
+ Args:
326
+ audio_data: Raw audio data as bytes or numpy array
327
+ sample_rate: Sample rate of the audio data
328
+
329
+ Returns:
330
+ SpeechProcessingResult with transcription and metadata
331
+ """
332
+ import time
333
+ start_time = time.time()
334
+
335
+ try:
336
+ # Ensure model is loaded
337
+ if not self.is_model_loaded():
338
+ if not self.load_model():
339
+ return SpeechProcessingResult(
340
+ success=False,
341
+ transcribed_text="",
342
+ confidence=0.0,
343
+ processing_time=time.time() - start_time,
344
+ error_message="Failed to load speech recognition model"
345
+ )
346
+
347
+ # Validate audio
348
+ validation_result = self.audio_validator.validate_audio(audio_data, sample_rate)
349
+
350
+ if not validation_result.is_valid:
351
+ error_msg = f"Audio validation failed: {', '.join(validation_result.issues)}"
352
+ logger.warning(error_msg)
353
+ return SpeechProcessingResult(
354
+ success=False,
355
+ transcribed_text="",
356
+ confidence=0.0,
357
+ processing_time=time.time() - start_time,
358
+ error_message=error_msg
359
+ )
360
+
361
+ # Convert to numpy array if needed
362
+ if isinstance(audio_data, bytes):
363
+ audio_array, detected_sample_rate = self.audio_validator._bytes_to_array(audio_data)
364
+ if sample_rate is None:
365
+ sample_rate = detected_sample_rate
366
+ else:
367
+ audio_array = audio_data
368
+ if sample_rate is None:
369
+ sample_rate = validation_result.sample_rate
370
+
371
+ # Preprocess audio
372
+ processed_audio = self.audio_validator.preprocess_audio(audio_array, sample_rate)
373
+
374
+ # Prepare model inputs
375
+ inputs = self.processor(processed_audio)
376
+ inputs = inputs.to(self.device)
377
+
378
+ # Generate transcription
379
+ with torch.no_grad():
380
+ output_tokens = self.model.generate(**inputs)
381
+
382
+ # Decode the generated tokens
383
+ transcribed_text = self.processor.batch_decode(output_tokens, skip_special_tokens=True)[0]
384
+
385
+ # Clean up transcription
386
+ transcribed_text = self._clean_transcription(transcribed_text)
387
+
388
+ processing_time = time.time() - start_time
389
+
390
+ # Calculate confidence (placeholder - Kyutai doesn't provide confidence scores directly)
391
+ confidence = self._estimate_confidence(transcribed_text, validation_result)
392
+
393
+ logger.info(f"Speech processed successfully in {processing_time:.2f}s: '{transcribed_text}'")
394
+
395
+ return SpeechProcessingResult(
396
+ success=True,
397
+ transcribed_text=transcribed_text,
398
+ confidence=confidence,
399
+ processing_time=processing_time
400
+ )
401
+
402
+ except Exception as e:
403
+ error_msg = f"Speech processing error: {str(e)}"
404
+ logger.error(error_msg)
405
+
406
+ return SpeechProcessingResult(
407
+ success=False,
408
+ transcribed_text="",
409
+ confidence=0.0,
410
+ processing_time=time.time() - start_time,
411
+ error_message=error_msg
412
+ )
413
+
414
+ def _clean_transcription(self, text: str) -> str:
415
+ """
416
+ Clean and normalize transcribed text.
417
+
418
+ Args:
419
+ text: Raw transcribed text
420
+
421
+ Returns:
422
+ Cleaned transcription
423
+ """
424
+ if not text:
425
+ return ""
426
+
427
+ # Remove extra whitespace
428
+ text = " ".join(text.split())
429
+
430
+ # Remove common transcription artifacts
431
+ text = text.replace("[NOISE]", "").replace("[SILENCE]", "")
432
+ text = text.replace(" ", " ").strip()
433
+
434
+ return text
435
+
436
+ def _estimate_confidence(self, transcribed_text: str, validation_result: AudioValidationResult) -> float:
437
+ """
438
+ Estimate confidence score for transcription.
439
+
440
+ Args:
441
+ transcribed_text: Transcribed text
442
+ validation_result: Audio validation result
443
+
444
+ Returns:
445
+ Confidence score between 0.0 and 1.0
446
+ """
447
+ # This is a simple heuristic-based confidence estimation
448
+ # In production, you might want to use model-specific confidence measures
449
+
450
+ confidence = 0.5 # Base confidence
451
+
452
+ # Adjust based on audio quality
453
+ if len(validation_result.issues) == 0:
454
+ confidence += 0.3
455
+ else:
456
+ confidence -= 0.1 * len(validation_result.issues)
457
+
458
+ # Adjust based on transcription length and content
459
+ if transcribed_text:
460
+ if len(transcribed_text.split()) >= 2: # Multiple words
461
+ confidence += 0.2
462
+ if any(char.isalpha() for char in transcribed_text): # Contains letters
463
+ confidence += 0.1
464
+ else:
465
+ confidence = 0.1 # Very low confidence for empty transcription
466
+
467
+ # Adjust based on audio duration
468
+ if validation_result.duration > 1.0: # Longer audio generally more reliable
469
+ confidence += 0.1
470
+
471
+ return max(0.0, min(1.0, confidence))
472
+
473
+ def get_model_info(self) -> Dict[str, Any]:
474
+ """
475
+ Get information about the loaded model.
476
+
477
+ Returns:
478
+ Dictionary with model information
479
+ """
480
+ return {
481
+ 'model_id': self.model_id,
482
+ 'device': self.device,
483
+ 'loaded': self.is_model_loaded(),
484
+ 'target_sample_rate': self.audio_validator.target_sample_rate,
485
+ 'supported_languages': ['en', 'fr'] # Kyutai STT supports English and French
486
+ }
487
+
488
+ def unload_model(self) -> None:
489
+ """
490
+ Unload the model to free memory.
491
+ """
492
+ if self.model is not None:
493
+ del self.model
494
+ self.model = None
495
+
496
+ if self.processor is not None:
497
+ del self.processor
498
+ self.processor = None
499
+
500
+ self._model_loaded = False
501
+
502
+ # Force garbage collection
503
+ import gc
504
+ gc.collect()
505
+
506
+ # Clear CUDA cache if available
507
+ if torch.cuda.is_available():
508
+ torch.cuda.empty_cache()
509
+
510
+ logger.info("Speech model unloaded")
digipal/auth/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Authentication module for DigiPal application.
3
+
4
+ This module provides HuggingFace authentication integration with session management
5
+ and offline development support.
6
+ """
7
+
8
+ from .auth_manager import AuthManager
9
+ from .session_manager import SessionManager
10
+ from .models import User, AuthSession, AuthResult, AuthStatus
11
+
12
+ __all__ = [
13
+ 'AuthManager',
14
+ 'SessionManager',
15
+ 'User',
16
+ 'AuthSession',
17
+ 'AuthResult',
18
+ 'AuthStatus'
19
+ ]
digipal/auth/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (617 Bytes). View file
 
digipal/auth/__pycache__/auth_manager.cpython-312.pyc ADDED
Binary file (15 kB). View file
 
digipal/auth/__pycache__/models.cpython-312.pyc ADDED
Binary file (7.35 kB). View file
 
digipal/auth/__pycache__/session_manager.cpython-312.pyc ADDED
Binary file (16.8 kB). View file
 
digipal/auth/auth_manager.py ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ HuggingFace authentication manager for DigiPal application.
3
+ """
4
+
5
+ import logging
6
+ import requests
7
+ import json
8
+ from datetime import datetime, timedelta
9
+ from typing import Optional, Dict, Any
10
+ from pathlib import Path
11
+
12
+ from .models import User, AuthSession, AuthResult, AuthStatus
13
+ from .session_manager import SessionManager
14
+ from ..storage.database import DatabaseConnection
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ class AuthManager:
20
+ """Manages HuggingFace authentication with offline support."""
21
+
22
+ # HuggingFace API endpoints
23
+ HF_API_BASE = "https://huggingface.co/api"
24
+ HF_USER_ENDPOINT = f"{HF_API_BASE}/whoami"
25
+
26
+ def __init__(self, db_connection: DatabaseConnection, offline_mode: bool = False, cache_dir: Optional[str] = None):
27
+ """
28
+ Initialize authentication manager.
29
+
30
+ Args:
31
+ db_connection: Database connection for user storage
32
+ offline_mode: Enable offline development mode
33
+ cache_dir: Directory for authentication cache
34
+ """
35
+ self.db = db_connection
36
+ self.offline_mode = offline_mode
37
+ self.session_manager = SessionManager(db_connection, cache_dir)
38
+
39
+ # Request session for connection pooling
40
+ self.session = requests.Session()
41
+ self.session.timeout = 10 # 10 second timeout
42
+
43
+ logger.info(f"AuthManager initialized (offline_mode: {offline_mode})")
44
+
45
+ def authenticate(self, token: str) -> AuthResult:
46
+ """
47
+ Authenticate user with HuggingFace token.
48
+
49
+ Args:
50
+ token: HuggingFace authentication token
51
+
52
+ Returns:
53
+ Authentication result with user and session info
54
+ """
55
+ if self.offline_mode:
56
+ return self._authenticate_offline(token)
57
+
58
+ try:
59
+ # Validate token with HuggingFace API
60
+ user_info = self._validate_hf_token(token)
61
+ if not user_info:
62
+ return AuthResult(
63
+ status=AuthStatus.INVALID_TOKEN,
64
+ error_message="Invalid HuggingFace token"
65
+ )
66
+
67
+ # Create or update user
68
+ user = self._create_or_update_user(user_info, token)
69
+ if not user:
70
+ return AuthResult(
71
+ status=AuthStatus.USER_NOT_FOUND,
72
+ error_message="Failed to create or update user"
73
+ )
74
+
75
+ # Create session
76
+ session = self.session_manager.create_session(user, token)
77
+
78
+ logger.info(f"Successfully authenticated user: {user.username}")
79
+ return AuthResult(
80
+ status=AuthStatus.SUCCESS,
81
+ user=user,
82
+ session=session
83
+ )
84
+
85
+ except requests.exceptions.RequestException as e:
86
+ logger.warning(f"Network error during authentication: {e}")
87
+ # Try offline authentication as fallback
88
+ return self._authenticate_offline(token)
89
+
90
+ except Exception as e:
91
+ logger.error(f"Authentication error: {e}")
92
+ return AuthResult(
93
+ status=AuthStatus.NETWORK_ERROR,
94
+ error_message=f"Authentication failed: {str(e)}"
95
+ )
96
+
97
+ def validate_session(self, user_id: str, token: str) -> AuthResult:
98
+ """
99
+ Validate existing session.
100
+
101
+ Args:
102
+ user_id: User ID
103
+ token: Authentication token
104
+
105
+ Returns:
106
+ Authentication result
107
+ """
108
+ # Check if session exists and is valid
109
+ if not self.session_manager.validate_session(user_id, token):
110
+ return AuthResult(
111
+ status=AuthStatus.EXPIRED_SESSION,
112
+ error_message="Session expired or invalid"
113
+ )
114
+
115
+ # Get user and session
116
+ user = self.get_user(user_id)
117
+ session = self.session_manager.get_session(user_id)
118
+
119
+ if not user or not session:
120
+ return AuthResult(
121
+ status=AuthStatus.USER_NOT_FOUND,
122
+ error_message="User or session not found"
123
+ )
124
+
125
+ # Refresh session
126
+ self.session_manager.refresh_session(user_id)
127
+
128
+ status = AuthStatus.OFFLINE_MODE if session.is_offline else AuthStatus.SUCCESS
129
+ return AuthResult(
130
+ status=status,
131
+ user=user,
132
+ session=session
133
+ )
134
+
135
+ def logout(self, user_id: str) -> bool:
136
+ """
137
+ Logout user and revoke session.
138
+
139
+ Args:
140
+ user_id: User ID to logout
141
+
142
+ Returns:
143
+ True if logout successful
144
+ """
145
+ success = self.session_manager.revoke_session(user_id)
146
+ if success:
147
+ logger.info(f"User {user_id} logged out successfully")
148
+ return success
149
+
150
+ def get_user(self, user_id: str) -> Optional[User]:
151
+ """
152
+ Get user by ID.
153
+
154
+ Args:
155
+ user_id: User ID
156
+
157
+ Returns:
158
+ User object if found
159
+ """
160
+ try:
161
+ rows = self.db.execute_query(
162
+ 'SELECT * FROM users WHERE id = ?',
163
+ (user_id,)
164
+ )
165
+
166
+ if rows:
167
+ row = rows[0]
168
+ return User(
169
+ id=row['id'],
170
+ username=row['username'],
171
+ created_at=datetime.fromisoformat(row['created_at']) if row['created_at'] else datetime.now(),
172
+ last_login=datetime.fromisoformat(row['last_login']) if row['last_login'] else None
173
+ )
174
+ except Exception as e:
175
+ logger.error(f"Error getting user {user_id}: {e}")
176
+
177
+ return None
178
+
179
+ def refresh_user_profile(self, user_id: str) -> Optional[User]:
180
+ """
181
+ Refresh user profile from HuggingFace.
182
+
183
+ Args:
184
+ user_id: User ID
185
+
186
+ Returns:
187
+ Updated user object
188
+ """
189
+ if self.offline_mode:
190
+ return self.get_user(user_id)
191
+
192
+ try:
193
+ # Get current session to get token
194
+ session = self.session_manager.get_session(user_id)
195
+ if not session or session.is_offline:
196
+ return self.get_user(user_id)
197
+
198
+ # Fetch updated user info
199
+ user_info = self._validate_hf_token(session.token)
200
+ if user_info:
201
+ user = self._create_or_update_user(user_info, session.token)
202
+ logger.info(f"Refreshed profile for user: {user_id}")
203
+ return user
204
+
205
+ except Exception as e:
206
+ logger.error(f"Error refreshing user profile: {e}")
207
+
208
+ return self.get_user(user_id)
209
+
210
+ def cleanup_expired_sessions(self) -> int:
211
+ """Clean up expired sessions."""
212
+ return self.session_manager.cleanup_expired_sessions()
213
+
214
+ def _authenticate_offline(self, token: str) -> AuthResult:
215
+ """
216
+ Authenticate in offline mode using cached data.
217
+
218
+ Args:
219
+ token: Authentication token
220
+
221
+ Returns:
222
+ Authentication result for offline mode
223
+ """
224
+ # In offline mode, we create a development user
225
+ # This is for development purposes only
226
+
227
+ if not token or len(token) < 10:
228
+ return AuthResult(
229
+ status=AuthStatus.INVALID_TOKEN,
230
+ error_message="Token too short for offline mode"
231
+ )
232
+
233
+ # Create a deterministic user ID from token
234
+ import hashlib
235
+ user_id = f"offline_{hashlib.md5(token.encode()).hexdigest()[:16]}"
236
+ username = f"dev_user_{user_id[-8:]}"
237
+
238
+ # Check if offline user exists
239
+ user = self.get_user(user_id)
240
+ if not user:
241
+ # Create offline development user
242
+ user = User(
243
+ id=user_id,
244
+ username=username,
245
+ email=f"{username}@offline.dev",
246
+ full_name=f"Development User {username}",
247
+ created_at=datetime.now()
248
+ )
249
+
250
+ # Save to database
251
+ try:
252
+ self.db.execute_update(
253
+ '''INSERT OR REPLACE INTO users
254
+ (id, username, huggingface_token, created_at, last_login)
255
+ VALUES (?, ?, ?, ?, ?)''',
256
+ (user.id, user.username, token,
257
+ user.created_at.isoformat(), datetime.now().isoformat())
258
+ )
259
+ except Exception as e:
260
+ logger.error(f"Error creating offline user: {e}")
261
+ return AuthResult(
262
+ status=AuthStatus.NETWORK_ERROR,
263
+ error_message="Failed to create offline user"
264
+ )
265
+
266
+ # Create offline session
267
+ session = self.session_manager.create_session(
268
+ user, token, expires_hours=168, is_offline=True # 1 week for offline
269
+ )
270
+
271
+ logger.info(f"Offline authentication successful for: {username}")
272
+ return AuthResult(
273
+ status=AuthStatus.OFFLINE_MODE,
274
+ user=user,
275
+ session=session
276
+ )
277
+
278
+ def _validate_hf_token(self, token: str) -> Optional[Dict[str, Any]]:
279
+ """
280
+ Validate token with HuggingFace API.
281
+
282
+ Args:
283
+ token: HuggingFace token
284
+
285
+ Returns:
286
+ User info dict if valid, None otherwise
287
+ """
288
+ try:
289
+ headers = {
290
+ 'Authorization': f'Bearer {token}',
291
+ 'User-Agent': 'DigiPal/1.0'
292
+ }
293
+
294
+ response = self.session.get(self.HF_USER_ENDPOINT, headers=headers)
295
+
296
+ if response.status_code == 200:
297
+ user_info = response.json()
298
+ logger.debug(f"HF API response: {user_info}")
299
+ return user_info
300
+ elif response.status_code == 401:
301
+ logger.warning("Invalid HuggingFace token")
302
+ return None
303
+ else:
304
+ logger.error(f"HF API error: {response.status_code} - {response.text}")
305
+ return None
306
+
307
+ except requests.exceptions.RequestException as e:
308
+ logger.error(f"Network error validating HF token: {e}")
309
+ raise
310
+ except Exception as e:
311
+ logger.error(f"Error validating HF token: {e}")
312
+ return None
313
+
314
+ def _create_or_update_user(self, user_info: Dict[str, Any], token: str) -> Optional[User]:
315
+ """
316
+ Create or update user from HuggingFace user info.
317
+
318
+ Args:
319
+ user_info: User info from HuggingFace API
320
+ token: Authentication token
321
+
322
+ Returns:
323
+ User object
324
+ """
325
+ try:
326
+ # Extract user data from HF response
327
+ user_id = user_info.get('name', user_info.get('id', ''))
328
+ username = user_info.get('name', user_id)
329
+ email = user_info.get('email')
330
+ full_name = user_info.get('fullname', user_info.get('name'))
331
+ avatar_url = user_info.get('avatarUrl')
332
+
333
+ if not user_id:
334
+ logger.error("No user ID in HuggingFace response")
335
+ return None
336
+
337
+ # Check if user exists
338
+ existing_user = self.get_user(user_id)
339
+ now = datetime.now()
340
+
341
+ if existing_user:
342
+ # Update existing user
343
+ self.db.execute_update(
344
+ '''UPDATE users SET
345
+ username = ?, huggingface_token = ?, last_login = ?
346
+ WHERE id = ?''',
347
+ (username, token, now.isoformat(), user_id)
348
+ )
349
+
350
+ # Update user object
351
+ existing_user.username = username
352
+ existing_user.last_login = now
353
+ return existing_user
354
+ else:
355
+ # Create new user
356
+ user = User(
357
+ id=user_id,
358
+ username=username,
359
+ email=email,
360
+ full_name=full_name,
361
+ avatar_url=avatar_url,
362
+ created_at=now,
363
+ last_login=now
364
+ )
365
+
366
+ self.db.execute_update(
367
+ '''INSERT INTO users
368
+ (id, username, huggingface_token, created_at, last_login)
369
+ VALUES (?, ?, ?, ?, ?)''',
370
+ (user.id, user.username, token,
371
+ user.created_at.isoformat(), user.last_login.isoformat())
372
+ )
373
+
374
+ logger.info(f"Created new user: {username}")
375
+ return user
376
+
377
+ except Exception as e:
378
+ logger.error(f"Error creating/updating user: {e}")
379
+ return None
380
+
381
+ def __del__(self):
382
+ """Cleanup resources."""
383
+ if hasattr(self, 'session'):
384
+ self.session.close()
digipal/auth/models.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Authentication data models for DigiPal application.
3
+ """
4
+
5
+ from dataclasses import dataclass, field
6
+ from datetime import datetime, timedelta
7
+ from typing import Optional, Dict, Any
8
+ from enum import Enum
9
+ import json
10
+
11
+
12
+ class AuthStatus(Enum):
13
+ """Authentication status enumeration."""
14
+ SUCCESS = "success"
15
+ INVALID_TOKEN = "invalid_token"
16
+ NETWORK_ERROR = "network_error"
17
+ OFFLINE_MODE = "offline_mode"
18
+ EXPIRED_SESSION = "expired_session"
19
+ USER_NOT_FOUND = "user_not_found"
20
+
21
+
22
+ @dataclass
23
+ class User:
24
+ """User model for authenticated users."""
25
+ id: str
26
+ username: str
27
+ email: Optional[str] = None
28
+ full_name: Optional[str] = None
29
+ avatar_url: Optional[str] = None
30
+ created_at: datetime = field(default_factory=datetime.now)
31
+ last_login: Optional[datetime] = None
32
+ is_active: bool = True
33
+
34
+ def to_dict(self) -> Dict[str, Any]:
35
+ """Convert user to dictionary for storage."""
36
+ return {
37
+ 'id': self.id,
38
+ 'username': self.username,
39
+ 'email': self.email,
40
+ 'full_name': self.full_name,
41
+ 'avatar_url': self.avatar_url,
42
+ 'created_at': self.created_at.isoformat() if self.created_at else None,
43
+ 'last_login': self.last_login.isoformat() if self.last_login else None,
44
+ 'is_active': self.is_active
45
+ }
46
+
47
+ @classmethod
48
+ def from_dict(cls, data: Dict[str, Any]) -> 'User':
49
+ """Create user from dictionary."""
50
+ return cls(
51
+ id=data['id'],
52
+ username=data['username'],
53
+ email=data.get('email'),
54
+ full_name=data.get('full_name'),
55
+ avatar_url=data.get('avatar_url'),
56
+ created_at=datetime.fromisoformat(data['created_at']) if data.get('created_at') else datetime.now(),
57
+ last_login=datetime.fromisoformat(data['last_login']) if data.get('last_login') else None,
58
+ is_active=data.get('is_active', True)
59
+ )
60
+
61
+
62
+ @dataclass
63
+ class AuthSession:
64
+ """Authentication session model."""
65
+ user_id: str
66
+ token: str
67
+ expires_at: datetime
68
+ created_at: datetime = field(default_factory=datetime.now)
69
+ last_accessed: datetime = field(default_factory=datetime.now)
70
+ is_offline: bool = False
71
+ session_data: Dict[str, Any] = field(default_factory=dict)
72
+
73
+ @property
74
+ def is_expired(self) -> bool:
75
+ """Check if session is expired."""
76
+ return datetime.now() > self.expires_at
77
+
78
+ @property
79
+ def is_valid(self) -> bool:
80
+ """Check if session is valid (not expired and has token)."""
81
+ return not self.is_expired and bool(self.token)
82
+
83
+ def refresh_access(self) -> None:
84
+ """Update last accessed timestamp."""
85
+ self.last_accessed = datetime.now()
86
+
87
+ def extend_session(self, hours: int = 24) -> None:
88
+ """Extend session expiration."""
89
+ self.expires_at = datetime.now() + timedelta(hours=hours)
90
+ self.refresh_access()
91
+
92
+ def to_dict(self) -> Dict[str, Any]:
93
+ """Convert session to dictionary for storage."""
94
+ return {
95
+ 'user_id': self.user_id,
96
+ 'token': self.token,
97
+ 'expires_at': self.expires_at.isoformat(),
98
+ 'created_at': self.created_at.isoformat(),
99
+ 'last_accessed': self.last_accessed.isoformat(),
100
+ 'is_offline': self.is_offline,
101
+ 'session_data': json.dumps(self.session_data)
102
+ }
103
+
104
+ @classmethod
105
+ def from_dict(cls, data: Dict[str, Any]) -> 'AuthSession':
106
+ """Create session from dictionary."""
107
+ return cls(
108
+ user_id=data['user_id'],
109
+ token=data['token'],
110
+ expires_at=datetime.fromisoformat(data['expires_at']),
111
+ created_at=datetime.fromisoformat(data['created_at']),
112
+ last_accessed=datetime.fromisoformat(data['last_accessed']),
113
+ is_offline=data.get('is_offline', False),
114
+ session_data=json.loads(data.get('session_data', '{}'))
115
+ )
116
+
117
+
118
+ @dataclass
119
+ class AuthResult:
120
+ """Result of authentication operation."""
121
+ status: AuthStatus
122
+ user: Optional[User] = None
123
+ session: Optional[AuthSession] = None
124
+ error_message: Optional[str] = None
125
+
126
+ @property
127
+ def is_success(self) -> bool:
128
+ """Check if authentication was successful."""
129
+ return self.status == AuthStatus.SUCCESS
130
+
131
+ @property
132
+ def is_offline(self) -> bool:
133
+ """Check if authentication is in offline mode."""
134
+ return self.status == AuthStatus.OFFLINE_MODE
digipal/auth/session_manager.py ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Session management for DigiPal authentication system.
3
+ """
4
+
5
+ import logging
6
+ import json
7
+ import hashlib
8
+ import secrets
9
+ from datetime import datetime, timedelta
10
+ from typing import Optional, Dict, Any
11
+ from pathlib import Path
12
+
13
+ from .models import User, AuthSession, AuthStatus
14
+ from ..storage.database import DatabaseConnection
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ class SessionManager:
20
+ """Manages user sessions with secure token storage and caching."""
21
+
22
+ def __init__(self, db_connection: DatabaseConnection, cache_dir: Optional[str] = None):
23
+ """
24
+ Initialize session manager.
25
+
26
+ Args:
27
+ db_connection: Database connection for persistent storage
28
+ cache_dir: Directory for session cache files (optional)
29
+ """
30
+ self.db = db_connection
31
+ self.cache_dir = Path(cache_dir) if cache_dir else Path.home() / '.digipal' / 'cache'
32
+ self.cache_dir.mkdir(parents=True, exist_ok=True)
33
+
34
+ # In-memory session cache for performance
35
+ self._session_cache: Dict[str, AuthSession] = {}
36
+
37
+ # Load existing sessions from database
38
+ self._load_sessions_from_db()
39
+
40
+ def create_session(self, user: User, token: str, expires_hours: int = 24, is_offline: bool = False) -> AuthSession:
41
+ """
42
+ Create a new authentication session.
43
+
44
+ Args:
45
+ user: Authenticated user
46
+ token: Authentication token
47
+ expires_hours: Session expiration in hours
48
+ is_offline: Whether this is an offline session
49
+
50
+ Returns:
51
+ Created authentication session
52
+ """
53
+ expires_at = datetime.now() + timedelta(hours=expires_hours)
54
+
55
+ session = AuthSession(
56
+ user_id=user.id,
57
+ token=token,
58
+ expires_at=expires_at,
59
+ is_offline=is_offline
60
+ )
61
+
62
+ # Ensure user exists in database before saving session
63
+ self._ensure_user_exists(user)
64
+
65
+ # Store in database
66
+ self._save_session_to_db(session)
67
+
68
+ # Cache in memory
69
+ self._session_cache[user.id] = session
70
+
71
+ # Save to file cache for offline access
72
+ if not is_offline:
73
+ self._save_session_to_cache(session)
74
+
75
+ logger.info(f"Created session for user {user.id} (offline: {is_offline})")
76
+ return session
77
+
78
+ def get_session(self, user_id: str) -> Optional[AuthSession]:
79
+ """
80
+ Get session for user ID.
81
+
82
+ Args:
83
+ user_id: User ID to get session for
84
+
85
+ Returns:
86
+ Authentication session if found and valid, None otherwise
87
+ """
88
+ # Check memory cache first
89
+ if user_id in self._session_cache:
90
+ session = self._session_cache[user_id]
91
+ if session.is_valid:
92
+ session.refresh_access()
93
+ return session
94
+ else:
95
+ # Remove expired session
96
+ del self._session_cache[user_id]
97
+ self._remove_session_from_db(user_id)
98
+
99
+ # Try to load from database
100
+ session = self._load_session_from_db(user_id)
101
+ if session and session.is_valid:
102
+ self._session_cache[user_id] = session
103
+ session.refresh_access()
104
+ return session
105
+
106
+ # Try to load from cache for offline mode
107
+ cached_session = self._load_session_from_cache(user_id)
108
+ if cached_session:
109
+ # Mark as offline session
110
+ cached_session.is_offline = True
111
+ cached_session.extend_session(hours=168) # 1 week for offline
112
+ self._session_cache[user_id] = cached_session
113
+ return cached_session
114
+
115
+ return None
116
+
117
+ def validate_session(self, user_id: str, token: str) -> bool:
118
+ """
119
+ Validate session token for user.
120
+
121
+ Args:
122
+ user_id: User ID
123
+ token: Token to validate
124
+
125
+ Returns:
126
+ True if session is valid, False otherwise
127
+ """
128
+ session = self.get_session(user_id)
129
+ if not session:
130
+ return False
131
+
132
+ # For offline sessions, we're more lenient with token validation
133
+ if session.is_offline:
134
+ return self._hash_token(token) == self._hash_token(session.token)
135
+
136
+ return session.token == token and session.is_valid
137
+
138
+ def refresh_session(self, user_id: str, extend_hours: int = 24) -> bool:
139
+ """
140
+ Refresh session expiration.
141
+
142
+ Args:
143
+ user_id: User ID
144
+ extend_hours: Hours to extend session
145
+
146
+ Returns:
147
+ True if session was refreshed, False otherwise
148
+ """
149
+ session = self.get_session(user_id)
150
+ if not session:
151
+ return False
152
+
153
+ session.extend_session(extend_hours)
154
+ self._save_session_to_db(session)
155
+
156
+ if not session.is_offline:
157
+ self._save_session_to_cache(session)
158
+
159
+ logger.info(f"Refreshed session for user {user_id}")
160
+ return True
161
+
162
+ def revoke_session(self, user_id: str) -> bool:
163
+ """
164
+ Revoke user session.
165
+
166
+ Args:
167
+ user_id: User ID
168
+
169
+ Returns:
170
+ True if session was revoked, False if not found
171
+ """
172
+ # Remove from memory cache
173
+ if user_id in self._session_cache:
174
+ del self._session_cache[user_id]
175
+
176
+ # Remove from database
177
+ removed_from_db = self._remove_session_from_db(user_id)
178
+
179
+ # Remove from file cache
180
+ self._remove_session_from_cache(user_id)
181
+
182
+ if removed_from_db:
183
+ logger.info(f"Revoked session for user {user_id}")
184
+
185
+ return removed_from_db
186
+
187
+ def cleanup_expired_sessions(self) -> int:
188
+ """
189
+ Clean up expired sessions from storage.
190
+
191
+ Returns:
192
+ Number of sessions cleaned up
193
+ """
194
+ cleaned_count = 0
195
+
196
+ # Clean memory cache
197
+ expired_users = [
198
+ user_id for user_id, session in self._session_cache.items()
199
+ if session.is_expired
200
+ ]
201
+
202
+ for user_id in expired_users:
203
+ del self._session_cache[user_id]
204
+ cleaned_count += 1
205
+
206
+ # Clean database
207
+ try:
208
+ db_cleaned = self.db.execute_update(
209
+ 'DELETE FROM users WHERE session_data IS NOT NULL AND '
210
+ 'json_extract(session_data, "$.expires_at") < ?',
211
+ (datetime.now().isoformat(),)
212
+ )
213
+ cleaned_count += db_cleaned
214
+ except Exception as e:
215
+ logger.error(f"Error cleaning expired sessions from database: {e}")
216
+
217
+ if cleaned_count > 0:
218
+ logger.info(f"Cleaned up {cleaned_count} expired sessions")
219
+
220
+ return cleaned_count
221
+
222
+ def _save_session_to_db(self, session: AuthSession) -> None:
223
+ """Save session to database."""
224
+ try:
225
+ session_json = json.dumps(session.to_dict())
226
+ self.db.execute_update(
227
+ '''UPDATE users SET session_data = ?, last_login = ?
228
+ WHERE id = ?''',
229
+ (session_json, session.last_accessed.isoformat(), session.user_id)
230
+ )
231
+ except Exception as e:
232
+ logger.error(f"Error saving session to database: {e}")
233
+
234
+ def _load_session_from_db(self, user_id: str) -> Optional[AuthSession]:
235
+ """Load session from database."""
236
+ try:
237
+ rows = self.db.execute_query(
238
+ 'SELECT session_data FROM users WHERE id = ? AND session_data IS NOT NULL',
239
+ (user_id,)
240
+ )
241
+
242
+ if rows:
243
+ session_data = json.loads(rows[0]['session_data'])
244
+ return AuthSession.from_dict(session_data)
245
+ except Exception as e:
246
+ logger.error(f"Error loading session from database: {e}")
247
+
248
+ return None
249
+
250
+ def _remove_session_from_db(self, user_id: str) -> bool:
251
+ """Remove session from database."""
252
+ try:
253
+ # First check if user exists
254
+ rows = self.db.execute_query('SELECT id FROM users WHERE id = ?', (user_id,))
255
+ if not rows:
256
+ return False
257
+
258
+ affected = self.db.execute_update(
259
+ 'UPDATE users SET session_data = NULL WHERE id = ?',
260
+ (user_id,)
261
+ )
262
+ return affected > 0
263
+ except Exception as e:
264
+ logger.error(f"Error removing session from database: {e}")
265
+ return False
266
+
267
+ def _save_session_to_cache(self, session: AuthSession) -> None:
268
+ """Save session to file cache for offline access."""
269
+ try:
270
+ cache_file = self.cache_dir / f"session_{self._hash_user_id(session.user_id)}.json"
271
+
272
+ # Only cache essential session data for offline use
273
+ cache_data = {
274
+ 'user_id': session.user_id,
275
+ 'token_hash': self._hash_token(session.token),
276
+ 'expires_at': session.expires_at.isoformat(),
277
+ 'created_at': session.created_at.isoformat(),
278
+ 'cached_at': datetime.now().isoformat()
279
+ }
280
+
281
+ with open(cache_file, 'w') as f:
282
+ json.dump(cache_data, f)
283
+
284
+ except Exception as e:
285
+ logger.error(f"Error saving session to cache: {e}")
286
+
287
+ def _load_session_from_cache(self, user_id: str) -> Optional[AuthSession]:
288
+ """Load session from file cache."""
289
+ try:
290
+ cache_file = self.cache_dir / f"session_{self._hash_user_id(user_id)}.json"
291
+
292
+ if not cache_file.exists():
293
+ return None
294
+
295
+ with open(cache_file, 'r') as f:
296
+ cache_data = json.load(f)
297
+
298
+ # Check if cache is not too old (max 1 week)
299
+ cached_at = datetime.fromisoformat(cache_data['cached_at'])
300
+ if datetime.now() - cached_at > timedelta(days=7):
301
+ cache_file.unlink() # Remove old cache
302
+ return None
303
+
304
+ # Create session from cache (token will be validated separately)
305
+ return AuthSession(
306
+ user_id=cache_data['user_id'],
307
+ token=cache_data['token_hash'], # This is hashed, will need special handling
308
+ expires_at=datetime.fromisoformat(cache_data['expires_at']),
309
+ created_at=datetime.fromisoformat(cache_data['created_at']),
310
+ is_offline=True
311
+ )
312
+
313
+ except Exception as e:
314
+ logger.error(f"Error loading session from cache: {e}")
315
+ return None
316
+
317
+ def _remove_session_from_cache(self, user_id: str) -> None:
318
+ """Remove session from file cache."""
319
+ try:
320
+ cache_file = self.cache_dir / f"session_{self._hash_user_id(user_id)}.json"
321
+ if cache_file.exists():
322
+ cache_file.unlink()
323
+ except Exception as e:
324
+ logger.error(f"Error removing session from cache: {e}")
325
+
326
+ def _load_sessions_from_db(self) -> None:
327
+ """Load all valid sessions from database into memory cache."""
328
+ try:
329
+ rows = self.db.execute_query(
330
+ 'SELECT id, session_data FROM users WHERE session_data IS NOT NULL'
331
+ )
332
+
333
+ for row in rows:
334
+ try:
335
+ session_data = json.loads(row['session_data'])
336
+ session = AuthSession.from_dict(session_data)
337
+
338
+ if session.is_valid:
339
+ self._session_cache[row['id']] = session
340
+ except Exception as e:
341
+ logger.warning(f"Error loading session for user {row['id']}: {e}")
342
+
343
+ except Exception as e:
344
+ logger.error(f"Error loading sessions from database: {e}")
345
+
346
+ def _hash_token(self, token: str) -> str:
347
+ """Hash token for secure storage."""
348
+ return hashlib.sha256(token.encode()).hexdigest()
349
+
350
+ def _hash_user_id(self, user_id: str) -> str:
351
+ """Hash user ID for cache file naming."""
352
+ return hashlib.md5(user_id.encode()).hexdigest()[:16]
353
+
354
+ def _ensure_user_exists(self, user: User) -> None:
355
+ """Ensure user exists in database before creating session."""
356
+ try:
357
+ # Check if user exists
358
+ rows = self.db.execute_query('SELECT id FROM users WHERE id = ?', (user.id,))
359
+ if not rows:
360
+ # Create user record
361
+ self.db.execute_update(
362
+ '''INSERT INTO users (id, username, created_at, last_login)
363
+ VALUES (?, ?, ?, ?)''',
364
+ (user.id, user.username,
365
+ user.created_at.isoformat() if user.created_at else datetime.now().isoformat(),
366
+ user.last_login.isoformat() if user.last_login else None)
367
+ )
368
+ logger.info(f"Created user record for session: {user.id}")
369
+ except Exception as e:
370
+ logger.error(f"Error ensuring user exists: {e}")
digipal/core/__init__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Core DigiPal functionality including data models and business logic.
3
+ """
4
+
5
+ from .models import DigiPal, Interaction, Command, CareAction, AttributeModifier
6
+ from .enums import *
7
+ from .attribute_engine import AttributeEngine
8
+
9
+ __all__ = [
10
+ 'DigiPal',
11
+ 'Interaction',
12
+ 'Command',
13
+ 'CareAction',
14
+ 'AttributeModifier',
15
+ 'AttributeEngine',
16
+ 'EggType',
17
+ 'LifeStage',
18
+ 'CareActionType',
19
+ 'AttributeType',
20
+ 'CommandType',
21
+ 'InteractionResult'
22
+ ]
digipal/core/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (611 Bytes). View file
 
digipal/core/__pycache__/attribute_engine.cpython-312.pyc ADDED
Binary file (19.9 kB). View file
 
digipal/core/__pycache__/digipal_core.cpython-312.pyc ADDED
Binary file (44.9 kB). View file
 
digipal/core/__pycache__/enums.cpython-312.pyc ADDED
Binary file (2.63 kB). View file
 
digipal/core/__pycache__/error_handler.cpython-312.pyc ADDED
Binary file (34.2 kB). View file
 
digipal/core/__pycache__/evolution_controller.cpython-312.pyc ADDED
Binary file (28.3 kB). View file
 
digipal/core/__pycache__/exceptions.cpython-312.pyc ADDED
Binary file (12.6 kB). View file
 
digipal/core/__pycache__/memory_manager.cpython-312.pyc ADDED
Binary file (39.8 kB). View file
 
digipal/core/__pycache__/models.cpython-312.pyc ADDED
Binary file (16 kB). View file