Gemini commited on
Commit
01d5a5d
·
0 Parent(s):

feat: add detailed logging

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +33 -0
  2. .github/ISSUE_TEMPLATE/bug-report.yml +141 -0
  3. .github/ISSUE_TEMPLATE/feature-request.yml +54 -0
  4. .gitignore +82 -0
  5. CODE_OF_CONDUCT.md +45 -0
  6. CONTRIBUTING.md +201 -0
  7. Dockerfile +78 -0
  8. Dockerfile.backend +74 -0
  9. Dockerfile.backend.apple +99 -0
  10. Dockerfile.backend.cuda +110 -0
  11. Dockerfile.frontend +25 -0
  12. LICENSE +201 -0
  13. Makefile +306 -0
  14. README.md +163 -0
  15. README_ja.md +219 -0
  16. SECURITY.md +21 -0
  17. app.py +0 -0
  18. dependencies/graphrag-1.2.1.dev27.tar.gz +3 -0
  19. dependencies/graphrag-modified.tar.gz +3 -0
  20. dependencies/llama.cpp.zip +3 -0
  21. docker-compose-gpu.yml +74 -0
  22. docker-compose.yml +69 -0
  23. docker/app/check_gpu_support.sh +57 -0
  24. docker/app/check_torch_cuda.py +52 -0
  25. docker/app/init_chroma.py +88 -0
  26. docker/app/rebuild_llama_cuda.sh +132 -0
  27. docker/sqlite/init.sql +237 -0
  28. docs/Custom Model Config(Ollama).md +108 -0
  29. docs/Embedding Model Switching.md +61 -0
  30. docs/Local Chat API.md +165 -0
  31. docs/Public Chat API.md +163 -0
  32. images/cover.png +3 -0
  33. images/secondme_cover.png +3 -0
  34. integrate/.gitkeep +0 -0
  35. integrate/Readme.md +39 -0
  36. integrate/env.txt +6 -0
  37. integrate/requirements.txt +5 -0
  38. integrate/wechat_bot.py +82 -0
  39. logs/.gitkeep +0 -0
  40. logs/logs.lnk +0 -0
  41. lpm_frontend/.eslintignore +2 -0
  42. lpm_frontend/.eslintrc.js +31 -0
  43. lpm_frontend/.gitignore +42 -0
  44. lpm_frontend/.prettierignore +2 -0
  45. lpm_frontend/.prettierrc.js +12 -0
  46. lpm_frontend/.stylelintignore +2 -0
  47. lpm_frontend/.stylelintrc.js +43 -0
  48. lpm_frontend/next.config.js +60 -0
  49. lpm_frontend/package-lock.json +3 -0
  50. lpm_frontend/package.json +3 -0
.gitattributes ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Set all text files to use LF line endings
2
+ * text=auto eol=lf
3
+ # Binary files should not be modified
4
+ *.png filter=lfs diff=lfs merge=lfs -text
5
+ *.jpg binary
6
+ *.jpeg binary
7
+ *.gif binary
8
+ *.ico binary
9
+ *.mov binary
10
+ *.mp4 binary
11
+ *.mp3 binary
12
+ *.pdf binary
13
+ *.zip filter=lfs diff=lfs merge=lfs -text
14
+ *.gz binary
15
+ *.tar binary
16
+ dependencies/graphrag-1.2.1.dev27.tar.gz filter=lfs diff=lfs merge=lfs -text
17
+ dependencies/graphrag-modified.tar.gz filter=lfs diff=lfs merge=lfs -text
18
+ dependencies/llama.cpp.zip filter=lfs diff=lfs merge=lfs -text
19
+ lpm_kernel/tokenizer.json filter=lfs diff=lfs merge=lfs -text
20
+ lpm_frontend/public/images/step_2.png filter=lfs diff=lfs merge=lfs -text
21
+ lpm_frontend/public/images/step_4.png filter=lfs diff=lfs merge=lfs -text
22
+ images/cover.png filter=lfs diff=lfs merge=lfs -text
23
+ lpm_frontend/public/images/app_native_applications.png filter=lfs diff=lfs merge=lfs -text
24
+ lpm_frontend/public/images/app_secondme_apps.png filter=lfs diff=lfs merge=lfs -text
25
+ lpm_frontend/public/images/app_secondme_network.png filter=lfs diff=lfs merge=lfs -text
26
+ lpm_frontend/public/images/step_1.png filter=lfs diff=lfs merge=lfs -text
27
+ lpm_frontend/public/images/step_3.png filter=lfs diff=lfs merge=lfs -text
28
+ images/secondme_cover.png filter=lfs diff=lfs merge=lfs -text
29
+ lpm_frontend/public/fonts/Calistoga.ttf filter=lfs diff=lfs merge=lfs -text
30
+ lpm_frontend/public/images/app_api_mcp.png filter=lfs diff=lfs merge=lfs -text
31
+ *.json filter=lfs diff=lfs merge=lfs -text
32
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
33
+ *.ttf filter=lfs diff=lfs merge=lfs -text
.github/ISSUE_TEMPLATE/bug-report.yml ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "🪲 Bug Report"
2
+ description: "File a bug report to help us improve"
3
+ labels: ["fix"]
4
+ body:
5
+
6
+ - type: "markdown"
7
+ attributes:
8
+ value: |
9
+ > [!IMPORTANT]
10
+ > To save time for both you and us, try follow these guidelines before submitting a new issue:
11
+ > 1. Check if there is an existing issue tracking your bug on our Github.
12
+ > 2. When unsure if your issue is an actual bug, first discuss it on a [Github discussion](https://github.com/mindverse/Second-Me/discussions/new?category=q-a) or other project communication channels.
13
+ > These steps avoid opening issues which are duplicate or not actual bugs.
14
+
15
+ - type: "checkboxes"
16
+ id: "environment-os"
17
+ attributes:
18
+ label: "Operating System"
19
+ description: "What is your operating system?"
20
+ options:
21
+ - label: "macOS"
22
+ - label: "Linux"
23
+ - label: "Windows"
24
+ validations:
25
+ required: true
26
+
27
+ - type: "checkboxes"
28
+ id: "deployment-method"
29
+ attributes:
30
+ label: "Deployment Method"
31
+ description: "How are you deploying the application?"
32
+ options:
33
+ - label: "Docker"
34
+ - label: "non-Docker"
35
+ validations:
36
+ required: true
37
+
38
+ - type: "checkboxes"
39
+ id: "cuda-usage"
40
+ attributes:
41
+ label: "CUDA Usage"
42
+ description: "Are you using a CUDA configuration?"
43
+ options:
44
+ - label: "Yes"
45
+ - label: "No"
46
+ validations:
47
+ required: true
48
+
49
+ - type: "markdown"
50
+ attributes:
51
+ value: |
52
+ ---
53
+ **If you are reporting an issue encountered during the training process, please also provide the following information:**
54
+
55
+ - type: "textarea"
56
+ id: "training-details"
57
+ attributes:
58
+ label: "Training Process Details (if applicable)"
59
+ description: "If this bug is related to the training process, please specify: \n1. Model size (e.g., 7B, 13B, etc.) \n2. Machine base configuration (RAM size and VRAM size, e.g., 32GB RAM, 16GB VRAM)."
60
+ placeholder: "e.g., Model: 7B, RAM: 64GB, VRAM: 24GB"
61
+ validations:
62
+ required: false
63
+
64
+ - type: "markdown"
65
+ attributes:
66
+ value: |
67
+ ---
68
+
69
+ - type: "input"
70
+ id: "version"
71
+ attributes:
72
+ label: "Second-Me version"
73
+ description: "Please specify the version of Second-Me you are using (e.g., commit hash, release tag, or 'latest master'). If unsure, please indicate that."
74
+ validations:
75
+ required: true
76
+
77
+ - type: "textarea"
78
+ id: "description"
79
+ attributes:
80
+ label: "Describe the bug"
81
+ description: "What is the problem? A clear and concise description of the bug."
82
+ validations:
83
+ required: true
84
+
85
+ - type: "textarea"
86
+ id: "current"
87
+ attributes:
88
+ label: "Current Behavior"
89
+ description: "What actually happened?\n\n
90
+ Please include full errors, uncaught exceptions, stack traces, screenshots, and other relevant logs (e.g., from `log/backend.log` if applicable)."
91
+ validations:
92
+ required: true
93
+
94
+ - type: "textarea"
95
+ id: "expected"
96
+ attributes:
97
+ label: "Expected Behavior"
98
+ description: "What did you expect to happen?"
99
+ validations:
100
+ required: true
101
+
102
+ - type: "textarea"
103
+ id: "reproduction"
104
+ attributes:
105
+ label: "Reproduction Steps"
106
+ description: "Detail the steps needed to reproduce the issue. This can include a self-contained, concise snippet of
107
+ code, if applicable.\n\n
108
+ For more complex issues, provide a link to a repository with the smallest sample that reproduces
109
+ the bug.\n
110
+ If the issue can be replicated without code, please provide a clear, step-by-step description of
111
+ the actions or conditions necessary to reproduce it. Any screenshots are also appreciated.\n
112
+ Avoid including business logic or unrelated details, as this makes diagnosis more difficult.\n\n
113
+ Whether it's a sequence of actions, code samples, or specific conditions, ensure that the steps
114
+ are clear enough to be easily followed and replicated."
115
+ validations:
116
+ required: true
117
+
118
+ - type: "textarea"
119
+ id: "workaround"
120
+ attributes:
121
+ label: "Possible Workaround"
122
+ description: "If you find any workaround for this problem - please, provide it here."
123
+ validations:
124
+ required: false
125
+
126
+ - type: "textarea"
127
+ id: "context"
128
+ attributes:
129
+ label: "Additional Information"
130
+ description: "Anything else that might be relevant for troubleshooting this bug.\n
131
+ Providing context helps us come up with a solution that is most useful in the real-world use case."
132
+ validations:
133
+ required: false
134
+
135
+ - type: "input"
136
+ id: "discussion_link"
137
+ attributes:
138
+ label: "Link to related Github discussion or issue"
139
+ description: "If there's an existing GitHub discussion or issue related to this bug, please link it here."
140
+ validations:
141
+ required: false
.github/ISSUE_TEMPLATE/feature-request.yml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "🙋 Feature Request"
2
+ description: "Use this template to request new feature or suggest an idea for Second-Me"
3
+ labels: ["upgrade"]
4
+ body:
5
+ - type: "markdown"
6
+ attributes:
7
+ value: |
8
+ > [!IMPORTANT]
9
+ > To save time for both you and us, try follow these guidelines before submitting a feature request:
10
+ > 1. Check if there is an existing feature request that is similar to your on our Github.
11
+ > 2. We encourage you to first discuss your idea on a [Github discussion](https://github.com/mindverse/Second-Me/discussions/categories/ideas) or the **#ideas** channel of our [Discord server](https://discord.gg/GpWHQNUwrg).
12
+ > This step helps in understanding the new feature and determining if it's can be implemented at all.
13
+ Only proceed with this report if your idea was approved after the GitHub/Discord discussion.
14
+
15
+ - type: "textarea"
16
+ id: "description"
17
+ attributes:
18
+ label: "Describe the feature"
19
+ description: "A clear and concise description of the feature you are proposing."
20
+ validations:
21
+ required: true
22
+
23
+ - type: "textarea"
24
+ id: "use-case"
25
+ attributes:
26
+ label: "Use Case"
27
+ description: "Why do you need this feature? Provide real world use cases, the more the better."
28
+ validations:
29
+ required: true
30
+
31
+ - type: "textarea"
32
+ id: "solution"
33
+ attributes:
34
+ label: "Proposed Solution"
35
+ description: "Suggest how to implement the new feature. Please include prototype/sketch/reference implementation."
36
+ validations:
37
+ required: false
38
+
39
+ - type: "textarea"
40
+ id: "additional_info"
41
+ attributes:
42
+ label: "Additional Information"
43
+ description: "Any additional information you would like to provide - links, screenshots, etc."
44
+ validations:
45
+ required: false
46
+
47
+ - type: "input"
48
+ id: "discussion_link"
49
+ attributes:
50
+ label: "Link to Discord or Github discussion"
51
+ description: "Provide a link to the first message of feature request's discussion on Discord or Github.\n
52
+ This will help to keep history of why this feature request exists."
53
+ validations:
54
+ required: false
.gitignore ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ .DS_Store
3
+ .idea
4
+ .ipynb_checkpoints
5
+ .pytest_cache
6
+ .ruff_cache
7
+ .vscode/
8
+ .ruff_cache/
9
+ poetry.lock
10
+ .hf_cache/
11
+ .poetry-venv/
12
+
13
+ llama.cpp
14
+ *.ipynb
15
+ data/db/*
16
+ data/chroma_db/*
17
+ data/
18
+ lpm_kernel/L2/base_model/
19
+ lpm_kernel/L2/data_pipeline/output/
20
+ lpm_kernel/L2/data_pipeline/graphrag_indexing/cache/
21
+ lpm_kernel/L2/data_pipeline/raw_data/*
22
+ !lpm_kernel/L2/data_pipeline/raw_data/.gitkeep
23
+ lpm_kernel/L2/data_pipeline/tmp/
24
+ lpm_kernel/L2/output_models/
25
+
26
+ data/sqlite/*
27
+ data/uploads/*
28
+ data/progress/*
29
+ lpm_frontend/node_modules
30
+
31
+ # L2 Model Storage
32
+ resources/model/output/merged_model/*
33
+ !resources/model/output/merged_model/.gitkeep
34
+ resources/model/output/personal_model/*
35
+ !resources/model/output/personal_model/.gitkeep
36
+ resources/model/output/*.json
37
+ resources/model/output/*.gguf
38
+ !resources/model/output/.gitkeep
39
+
40
+
41
+ resources/L1/processed_data/subjective/*
42
+ !resources/L1/processed_data/subjective/.gitkeep
43
+ resources/L1/processed_data/objective/*
44
+ !resources/L1/processed_data/objective/.gitkeep
45
+ resources/L1/graphrag_indexing_output/report/*
46
+ resources/L1/graphrag_indexing_output/subjective/*
47
+
48
+
49
+ resources/raw_content/*
50
+ !resources/raw_content/.gitkeep
51
+
52
+ # Base model storage address
53
+ resources/L2/base_model/*
54
+ !resources/L2/base_model/.gitkeep
55
+ resources/L2/data_pipeline/raw_data/*
56
+ !resources/L2/data_pipeline/raw_data/.gitkeep
57
+
58
+
59
+ resources/model/processed_data/L1/processed_data/objective/*
60
+ resources/model/processed_data/L1/processed_data/subjective/*
61
+
62
+ resources/L2/data/*
63
+ !resources/L2/data/.gitkeep
64
+
65
+ resources/model/output/gguf/*
66
+ resources/L2/base_models/*
67
+ logs/*.log
68
+ logs/train/*.log
69
+
70
+ # Runtime files
71
+ run/*
72
+ !run/.gitkeep
73
+
74
+ #config
75
+ .backend.pid
76
+ .frontend.pid
77
+ logs/train/
78
+ llama_cpp_backup/llama.cpp.zip
79
+ scripts/check_cuda_status.ps1
80
+ scripts/test_cuda_detection.bat
81
+ .env
82
+ .gpu_selected
CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Second Me Covenant Code of Conduct
2
+
3
+ ## Our Pledge
4
+
5
+ In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity, and expression, level of experience, education, socioeconomic status, nationality, personal appearance, race, religion, or sexual identity and orientation.
6
+
7
+ ## Our Standards
8
+
9
+ ### Examples of behavior that contributes to creating a positive environment include:
10
+
11
+ - Using welcoming and inclusive language
12
+ - Being respectful of differing viewpoints and experiences
13
+ - Gracefully accepting constructive criticism
14
+ - Focusing on what is best for the community
15
+ - Showing empathy towards other community members
16
+
17
+ ### Examples of unacceptable behavior by participants include:
18
+
19
+ - The use of sexualized language or imagery and unwelcome sexual attention or advances
20
+ - Trolling, insulting/derogatory comments, and personal or political attacks
21
+ - Public or private harassment
22
+ - Publishing others' private information, such as a physical or electronic address, without explicit permission
23
+ - Other conduct which could reasonably be considered inappropriate in a professional setting
24
+
25
+ ## Our Responsibilities
26
+
27
+ Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
28
+
29
+ Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
30
+
31
+ ## Scope
32
+
33
+ This Code of Conduct applies within all project spaces, and it also applies when an individual is representing the project or its community in public spaces. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
34
+
35
+ ## Enforcement
36
+
37
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at [[email protected]](mailto:[email protected]). All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality about the reporter of an incident. Further details of specific enforcement policies may be posted separately.
38
+
39
+ Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
40
+
41
+ ## Attribution
42
+
43
+ This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/), version 2.1.
44
+
45
+ For answers to common questions about this code of conduct, see [FAQ](https://www.contributor-covenant.org/faq/).
CONTRIBUTING.md ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributing Guide
2
+
3
+ Second Me is an open and friendly community. We are dedicated to building a collaborative, inspiring, and exuberant open source community for our members. Everyone is more than welcome to join our community to get help and to contribute to Second Me.
4
+
5
+ The Second Me community welcomes various forms of contributions, including code, non-code contributions, documentation, and more.
6
+
7
+ ## How to Contribute
8
+
9
+ | Contribution Type | Details |
10
+ |------------------|---------|
11
+ | Report a bug | You can file an issue to report a bug with Second Me |
12
+ | Contribute code | You can contribute your code by fixing a bug or implementing a feature |
13
+ | Code Review | If you are an active contributor or committer of Second Me, you can help us review pull requests |
14
+ | Documentation | You can contribute documentation changes by fixing a documentation bug or proposing new content |
15
+
16
+ ## Before Contributing
17
+ * Sign [CLA of Mindverse](https://cla-assistant.io/mindverse/Second-Me)
18
+
19
+ ## Here is a checklist to prepare and submit your PR (pull request).
20
+ * Create your own GitHub branch by forking Second Me
21
+ * Checkout [README](README.md) for how to deploy Second Me.
22
+ * Push changes to your personal fork.
23
+ * Create a PR with a detailed description, if commit messages do not express themselves.
24
+ * Submit PR for review and address all feedbacks.
25
+ * Wait for merging (done by committers).
26
+
27
+ ## Branch Management Strategy
28
+
29
+ We follow a structured branching strategy to manage releases and contributions from both internal and external contributors.
30
+
31
+ ### Branch Structure
32
+
33
+ ```
34
+ master (stable version)
35
+ ^
36
+ |
37
+ release/vX.Y.Z (release preparation branch)
38
+ ^
39
+ |
40
+ develop (development integration branch)
41
+ ^
42
+ |
43
+ feature/* (feature branches) / hotfix/* (hotfix branches)
44
+ ```
45
+
46
+ ## Development Workflow
47
+
48
+ ```
49
+ hotfix/fix-bug
50
+ / \
51
+ master ---------+---------+-----+--- ... --> Stable Version
52
+ \ /
53
+ \ /
54
+ release/v1.0 -----+-----------+--- ... --> Release Version
55
+ \ /
56
+ \ /
57
+ develop --------------+-----+---+--- ... --> Development Integration
58
+ / /
59
+ / /
60
+ feature/new-feature +----------- ... --> Feature Development (from master)
61
+ ```
62
+
63
+ ### Step 1: Fork and Clone (External Contributors Only)
64
+ If you're an external contributor, you need to fork the repository first:
65
+
66
+ 1. Visit https://github.com/Mindverse/Second-Me
67
+ 2. Click the "Fork" button in the top-right corner
68
+ 3. Clone your fork to your local machine:
69
+ ```bash
70
+ cd working_dir
71
+ # Replace USERNAME with your GitHub username
72
+ git clone [email protected]:USERNAME/Second-Me.git
73
+ cd Second-Me
74
+ ```
75
+
76
+ 4. Configure upstream remote:
77
+ ```bash
78
+ # Add the upstream repository
79
+ git remote add upstream [email protected]:Mindverse/Second-Me.git
80
+
81
+ # Verify your remotes
82
+ git remote -v
83
+ ```
84
+
85
+ ### Step 2: Create a Feature Branch
86
+ All contributors should create feature branches from the `master` branch:
87
+
88
+ ```bash
89
+ # First, ensure you have the latest changes
90
+ git fetch origin # or upstream if you're working with a fork
91
+
92
+ # Checkout the master branch
93
+ git checkout master
94
+
95
+ git pull
96
+
97
+ # Create your feature branch from master
98
+ git checkout -b feature/your-feature-name
99
+ ```
100
+
101
+ ### Step 3: Develop Your Feature
102
+ - Make changes in your feature branch
103
+ - Commit regularly with descriptive messages
104
+ - Follow the project's coding style
105
+ - Add tests if applicable
106
+ - Update documentation as needed
107
+
108
+ ### Step 4: Commit Your Changes
109
+ ```bash
110
+ # Add your changes
111
+ git add <filename>
112
+ # Or git add -A for all changes
113
+
114
+ # Commit with a clear message
115
+ git commit -m "feat: add new feature X"
116
+ ```
117
+
118
+ ### Step 5: Update Your Branch Before Submitting
119
+ Before submitting your PR, update your feature branch with the latest changes:
120
+
121
+ ```bash
122
+ # Fetch latest changes
123
+ git fetch origin # or upstream if you're working with a fork
124
+
125
+ # Rebase your feature branch
126
+ git checkout feature/your-feature-name
127
+ git rebase origin/master # or upstream/master for forked repos
128
+ ```
129
+
130
+ If you're an external contributor, you may need to push to your fork:
131
+ ```bash
132
+ git push origin feature/your-feature-name
133
+ ```
134
+
135
+ ### Step 6: Create a Pull Request
136
+ 1. Visit the repository (or your fork)
137
+ 2. Click "Compare & Pull Request"
138
+ 3. Select:
139
+ - Base repository: `Mindverse/Second-Me`
140
+ - Base branch: `develop` (all features and fixes go to develop first)
141
+ - Head repository: Your repository
142
+ - Compare branch: `feature/your-feature-name`
143
+ 4. Fill in the PR template with:
144
+ - Clear description of your changes
145
+ - Any related issues
146
+ - Testing steps if applicable
147
+ - Target version if applicable
148
+
149
+ ### Step 7: Address Review Feedback
150
+ - Maintainers will review your PR
151
+ - Address any feedback by making requested changes
152
+ - Push new commits to your feature branch
153
+ - Your PR will be updated automatically
154
+
155
+ ### Step 8: PR Approval and Merge
156
+ - All checks must pass before merging
157
+ - Once approved, maintainers will merge your PR to the appropriate branch
158
+ - Your contribution will be included in the next release cycle
159
+
160
+ ## Release Management
161
+
162
+ The following describes how releases are managed by project maintainers:
163
+
164
+ ```
165
+ PR Merge Flow
166
+ |
167
+ master -------------------------+------ ... --> Stable Version
168
+ |
169
+ |
170
+ release/vX.Y.Z ---------------+------- ... --> Release Version
171
+ / |
172
+ / |
173
+ develop --------------------+--+------ ... --> Development Integration
174
+ ^
175
+ |
176
+ feature branches --------------+
177
+ ```
178
+
179
+ ### Creating a Release
180
+ 1. When `develop` branch contains all features planned for a release, a `release/vX.Y.Z` branch is created
181
+ 2. Only bug fixes and release preparation commits are added to the release branch
182
+ 3. After thorough testing, the release branch is merged to `master`
183
+ 4. The release is tagged in `master` with the version number
184
+
185
+ ### PR Merge Strategy
186
+ - All feature PRs are initially merged to the `develop` branch
187
+ - Critical bug fixes may be merged directly to the current `release` branch
188
+ - Maintainers are responsible for ensuring PRs are merged to the appropriate branch
189
+
190
+ ### Hotfixes
191
+ 1. For critical bugs in production, create a `hotfix/fix-description` branch from `master`
192
+ 2. Fix the issue and create a PR targeting `master`
193
+ 3. After approval, merge to both `master` and `develop` (and current `release` branch if exists)
194
+
195
+ ## Tips for Successful Contributions
196
+ - Create focused, single-purpose PRs
197
+ - Follow the project's code style and conventions
198
+ - Write clear commit messages
199
+ - Keep your fork updated to avoid merge conflicts
200
+ - Be responsive during the review process
201
+ - Ask questions if anything is unclear
Dockerfile ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.12
2
+
3
+ # Set working directory
4
+ WORKDIR /app
5
+
6
+ # Install system dependencies, Poetry and configure it
7
+ RUN apt-get update && apt-get install -y \
8
+ build-essential cmake git curl wget lsof vim unzip sqlite3 \
9
+ && apt-get clean \
10
+ && rm -rf /var/lib/apt/lists/* \
11
+ && pip install --upgrade pip \
12
+ && pip install poetry \
13
+ && poetry config virtualenvs.create false
14
+
15
+ # Create directories
16
+ RUN mkdir -p /app/dependencies /app/data/sqlite /app/data/chroma_db /app/logs /app/run /app/resources
17
+
18
+ # Copy dependency files - Files that rarely change
19
+ COPY dependencies/graphrag-1.2.1.dev27.tar.gz /app/dependencies/
20
+ COPY dependencies/llama.cpp.zip /app/dependencies/
21
+ RUN echo "--- Listing dependencies directory ---" && ls -la /app/dependencies
22
+
23
+ # Copy GPU checker script (only used for status reporting, not rebuilding)
24
+ COPY docker/app/check_gpu_support.sh /app/
25
+ COPY docker/app/check_torch_cuda.py /app/
26
+ RUN chmod +x /app/check_gpu_support.sh
27
+
28
+ # Build llama.cpp
29
+ RUN LLAMA_LOCAL_ZIP="dependencies/llama.cpp.zip" \
30
+ && echo "Using local llama.cpp archive..." \
31
+ && unzip -q "$LLAMA_LOCAL_ZIP" \
32
+ && cd llama.cpp \
33
+ && mkdir -p build && cd build \
34
+ && cmake .. \
35
+ && cmake --build . --config Release \
36
+ && if [ ! -f "bin/llama-server" ]; then \
37
+ echo "Build failed: llama-server executable not found" && exit 1; \
38
+ else \
39
+ echo "Successfully built llama-server"; \
40
+ fi
41
+
42
+ # Mark as CPU-only build for runtime reference
43
+ RUN mkdir -p /app/data && \
44
+ echo "{ \"gpu_optimized\": false, \"optimized_on\": \"$(date -u +\"%%Y-%%m-%%dT%%H:%%M:%%SZ\")\" }" > /app/data/gpu_optimized.json && \
45
+ echo "Created CPU-only marker file"
46
+
47
+ # Copy project configuration - Files that occasionally change
48
+ COPY pyproject.toml README.md /app/
49
+ RUN echo "--- Listing /app directory ---" && ls -la /app
50
+
51
+ RUN poetry install --no-interaction --no-root
52
+ RUN pip install --force-reinstall dependencies/graphrag-1.2.1.dev27.tar.gz
53
+
54
+
55
+ # Copy source code - Files that frequently change
56
+ COPY docker/ /app/docker/
57
+ COPY lpm_kernel/ /app/lpm_kernel/
58
+ RUN echo "--- Listing /app/docker directory ---" && ls -la /app/docker
59
+ RUN echo "--- Listing /app/lpm_kernel directory ---" && ls -la /app/lpm_kernel
60
+
61
+ # Check module import
62
+ RUN python -c "import lpm_kernel; print('Module import check passed')"
63
+
64
+ # Set environment variables
65
+ ENV PYTHONUNBUFFERED=1 \
66
+ PYTHONPATH=/app \
67
+ BASE_DIR=/app/data \
68
+ LOCAL_LOG_DIR=/app/logs \
69
+ RUN_DIR=/app/run \
70
+ RESOURCES_DIR=/app/resources \
71
+ APP_ROOT=/app \
72
+ FLASK_APP=lpm_kernel.app
73
+
74
+ # Expose ports
75
+ EXPOSE 8002 8080
76
+
77
+ # Set the startup command
78
+ CMD ["bash", "-c", "echo \"--- Checking SQLite database... ---\" && if [ ! -s /app/data/sqlite/lpm.db ]; then echo \"SQLite database not found or empty, initializing...\" && mkdir -p /app/data/sqlite && sqlite3 /app/data/sqlite/lpm.db ".read /app/docker/sqlite/init.sql" && echo \"SQLite database initialized successfully\" && echo \"Tables created:\" && sqlite3 /app/data/sqlite/lpm.db ".tables"; else echo \"SQLite database already exists, skipping initialization\"; fi && echo \"--- Checking ChromaDB... ---\" && if [ ! -d /app/data/chroma_db/documents ] || [ ! -d /app/data/chroma_db/document_chunks ]; then echo \"ChromaDB collections not found, initializing...\" && python /app/docker/app/init_chroma.py && echo \"ChromaDB initialized successfully\"; else echo \"ChromaDB already exists, skipping initialization\"; fi && echo \"--- Starting application... ---\" && cd /app && python -m flask run --host=0.0.0.0 --port=${LOCAL_APP_PORT:-8002}"]
Dockerfile.backend ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.12
2
+
3
+ # Set working directory
4
+ WORKDIR /app
5
+
6
+ # Install system dependencies, Poetry and configure it
7
+ RUN apt-get update && apt-get install -y \
8
+ build-essential cmake git curl wget lsof vim unzip sqlite3 \
9
+ && apt-get clean \
10
+ && rm -rf /var/lib/apt/lists/* \
11
+ && pip install --upgrade pip \
12
+ && pip install poetry \
13
+ && poetry config virtualenvs.create false
14
+
15
+ # Create directories
16
+ RUN mkdir -p /app/dependencies /app/data/sqlite /app/data/chroma_db /app/logs /app/run /app/resources
17
+
18
+ # Copy dependency files - Files that rarely change
19
+ COPY dependencies/graphrag-1.2.1.dev27.tar.gz /app/dependencies/
20
+ COPY dependencies/llama.cpp.zip /app/dependencies/
21
+
22
+ # Copy GPU checker script (only used for status reporting, not rebuilding)
23
+ COPY docker/app/check_gpu_support.sh /app/
24
+ COPY docker/app/check_torch_cuda.py /app/
25
+ RUN chmod +x /app/check_gpu_support.sh
26
+
27
+ # Build llama.cpp
28
+ RUN LLAMA_LOCAL_ZIP="dependencies/llama.cpp.zip" \
29
+ && echo "Using local llama.cpp archive..." \
30
+ && unzip -q "$LLAMA_LOCAL_ZIP" \
31
+ && cd llama.cpp \
32
+ && mkdir -p build && cd build \
33
+ && cmake .. \
34
+ && cmake --build . --config Release \
35
+ && if [ ! -f "bin/llama-server" ]; then \
36
+ echo "Build failed: llama-server executable not found" && exit 1; \
37
+ else \
38
+ echo "Successfully built llama-server"; \
39
+ fi
40
+
41
+ # Mark as CPU-only build for runtime reference
42
+ RUN mkdir -p /app/data && \
43
+ echo "{ \"gpu_optimized\": false, \"optimized_on\": \"$(date -u +\"%Y-%m-%dT%H:%M:%SZ\")\" }" > /app/data/gpu_optimized.json && \
44
+ echo "Created CPU-only marker file"
45
+
46
+ # Copy project configuration - Files that occasionally change
47
+ COPY pyproject.toml README.md /app/
48
+
49
+ RUN poetry install --no-interaction --no-root
50
+ RUN pip install --force-reinstall dependencies/graphrag-1.2.1.dev27.tar.gz
51
+
52
+
53
+ # Copy source code - Files that frequently change
54
+ COPY docker/ /app/docker/
55
+ COPY lpm_kernel/ /app/lpm_kernel/
56
+
57
+ # Check module import
58
+ RUN python -c "import lpm_kernel; print('Module import check passed')"
59
+
60
+ # Set environment variables
61
+ ENV PYTHONUNBUFFERED=1 \
62
+ PYTHONPATH=/app \
63
+ BASE_DIR=/app/data \
64
+ LOCAL_LOG_DIR=/app/logs \
65
+ RUN_DIR=/app/run \
66
+ RESOURCES_DIR=/app/resources \
67
+ APP_ROOT=/app \
68
+ FLASK_APP=lpm_kernel.app
69
+
70
+ # Expose ports
71
+ EXPOSE 8002 8080
72
+
73
+ # Set the startup command
74
+ CMD ["bash", "-c", "echo \"Checking SQLite database...\" && if [ ! -s /app/data/sqlite/lpm.db ]; then echo \"SQLite database not found or empty, initializing...\" && mkdir -p /app/data/sqlite && sqlite3 /app/data/sqlite/lpm.db \".read /app/docker/sqlite/init.sql\" && echo \"SQLite database initialized successfully\" && echo \"Tables created:\" && sqlite3 /app/data/sqlite/lpm.db \".tables\"; else echo \"SQLite database already exists, skipping initialization\"; fi && echo \"Checking ChromaDB...\" && if [ ! -d /app/data/chroma_db/documents ] || [ ! -d /app/data/chroma_db/document_chunks ]; then echo \"ChromaDB collections not found, initializing...\" && python /app/docker/app/init_chroma.py && echo \"ChromaDB initialized successfully\"; else echo \"ChromaDB already exists, skipping initialization\"; fi && echo \"Starting application at $(date)\" >> /app/logs/backend.log && cd /app && python -m flask run --host=0.0.0.0 --port=${LOCAL_APP_PORT:-8002} >> /app/logs/backend.log 2>&1"]
Dockerfile.backend.apple ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM --platform=linux/arm64 python:3.12-bullseye
2
+
3
+ # Set working directory
4
+ WORKDIR /app
5
+
6
+ # 1. Install system dependencies (including SQLite compilation dependencies)
7
+ RUN apt-get update && apt-get install -y \
8
+ build-essential cmake git curl wget lsof vim unzip \
9
+ libsqlite3-dev tcl-dev tk-dev \
10
+ && apt-get clean \
11
+ && rm -rf /var/lib/apt/lists/*
12
+
13
+ # 2. Prioritize compiling and installing the new version of SQLite
14
+ RUN wget https://www.sqlite.org/2025/sqlite-autoconf-3490100.tar.gz \
15
+ && tar xzf sqlite-autoconf-3490100.tar.gz \
16
+ && cd sqlite-autoconf-3490100 \
17
+ && ./configure --enable-fts5 --prefix=/usr/local \
18
+ && make -j$(nproc) \
19
+ && make install \
20
+ && cd .. \
21
+ && rm -rf sqlite-autoconf-3490100* \
22
+ && ldconfig
23
+
24
+ # 3. Configure Python compilation environment
25
+ ENV CFLAGS="-I/usr/local/include -DSQLITE_ENABLE_FTS5"
26
+ ENV LDFLAGS="-L/usr/local/lib -lsqlite3"
27
+ ENV LD_LIBRARY_PATH="/usr/local/lib:$LD_LIBRARY_PATH"
28
+
29
+ # 4. Configure Python environment
30
+ RUN pip install --upgrade pip \
31
+ && pip install poetry \
32
+ && poetry config virtualenvs.create false
33
+
34
+ # 5. Force source code compilation of pysqlite3
35
+ RUN pip install pysqlite3 --no-binary pysqlite3
36
+
37
+ # 6. Verify SQLite version
38
+ RUN python -c "import sqlite3; print('SQLite version:', sqlite3.sqlite_version); assert sqlite3.sqlite_version.startswith('3.49.1'), 'Wrong SQLite version!'"
39
+
40
+ # Maintain the original project configuration for what follows...
41
+ # -----------------------------------------------------------
42
+ # The following keeps the original project configuration unchanged
43
+ # Create directories
44
+ RUN mkdir -p /app/dependencies /app/data/sqlite /app/data/chroma_db /app/logs /app/run /app/resources
45
+
46
+ # Copy dependency files
47
+ COPY dependencies/graphrag-modified.tar.gz /app/dependencies/
48
+ COPY dependencies/llama.cpp.zip /app/dependencies/
49
+
50
+ # Build llama.cpp
51
+ RUN LLAMA_LOCAL_ZIP="dependencies/llama.cpp.zip" \
52
+ && echo "Using local llama.cpp archive..." \
53
+ && unzip -q "$LLAMA_LOCAL_ZIP" \
54
+ && cd llama.cpp \
55
+ && mkdir -p build && cd build \
56
+ && cmake .. \
57
+ && cmake --build . --config Release \
58
+ && if [ ! -f "bin/llama-server" ]; then \
59
+ echo "Build failed: llama-server executable not found" && exit 1; \
60
+ else \
61
+ echo "Successfully built llama-server"; \
62
+ fi \
63
+ && cp bin/llama-server /usr/local/bin/ \
64
+ && chmod +x /usr/local/bin/llama-server \
65
+ && echo "Installed llama-server to /usr/local/bin/"
66
+
67
+ # Copy project configuration
68
+ COPY pyproject.toml README.md /app/
69
+
70
+ RUN pip install -U pip setuptools wheel
71
+ RUN pip install --no-cache-dir spacy==3.7.5
72
+ RUN pip install --force-reinstall dependencies/graphrag-modified.tar.gz
73
+
74
+ RUN pip uninstall -y chromadb \
75
+ && pip install chromadb==0.4.24 --no-binary chromadb --force-reinstall
76
+
77
+ RUN poetry install --no-interaction --no-root
78
+
79
+ # Copy source code
80
+ COPY docker/ /app/docker/
81
+ COPY lpm_kernel/ /app/lpm_kernel/
82
+
83
+ # Check module import
84
+ RUN python -c "import lpm_kernel; print('Module import check passed')"
85
+
86
+ # Set environment variables
87
+ ENV PYTHONUNBUFFERED=1 \
88
+ PYTHONPATH=/app \
89
+ BASE_DIR=/app/data \
90
+ LOCAL_LOG_DIR=/app/logs \
91
+ RUN_DIR=/app/run \
92
+ RESOURCES_DIR=/app/resources \
93
+ APP_ROOT=/app \
94
+ FLASK_APP=lpm_kernel.app
95
+
96
+ # Expose ports
97
+ EXPOSE 8002 8080
98
+
99
+ CMD ["bash", "-c", "echo \"Checking SQLite database...\" && if [ ! -s /app/data/sqlite/lpm.db ]; then echo \"SQLite database not found or empty, initializing...\" && mkdir -p /app/data/sqlite && sqlite3 /app/data/sqlite/lpm.db \".read /app/docker/sqlite/init.sql\" && echo \"SQLite database initialized successfully\" && echo \"Tables created:\" && sqlite3 /app/data/sqlite/lpm.db \".tables\"; else echo \"SQLite database already exists, skipping initialization\"; fi && echo \"Checking ChromaDB...\" && if [ ! -d /app/data/chroma_db/documents ] || [ ! -d /app/data/chroma_db/document_chunks ]; then echo \"ChromaDB collections not found, initializing...\" && python /app/docker/app/init_chroma.py && echo \"ChromaDB initialized successfully\"; else echo \"ChromaDB already exists, skipping initialization\"; fi && echo \"Starting application at $(date)\" >> /app/logs/backend.log && cd /app && python -m flask run --host=0.0.0.0 --port=${LOCAL_APP_PORT:-8002} >> /app/logs/backend.log 2>&1"]
Dockerfile.backend.cuda ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:12.8.1-devel-ubuntu24.04
2
+
3
+ # Set working directory
4
+ WORKDIR /app
5
+
6
+ # Add build argument to conditionally skip llama.cpp build
7
+ ARG SKIP_LLAMA_BUILD=false
8
+
9
+ # Install system dependencies with noninteractive mode to avoid prompts
10
+ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
11
+ build-essential cmake git curl wget lsof vim unzip sqlite3 \
12
+ python3-pip python3-venv python3-full python3-poetry pipx \
13
+ && apt-get clean \
14
+ && rm -rf /var/lib/apt/lists/* \
15
+ && ln -sf /usr/bin/python3 /usr/bin/python
16
+
17
+ # Create a virtual environment to avoid PEP 668 restrictions
18
+ RUN python -m venv /app/venv
19
+ ENV PATH="/app/venv/bin:$PATH"
20
+ ENV VIRTUAL_ENV="/app/venv"
21
+
22
+ # Use the virtual environment's pip to install packages
23
+ RUN pip install --upgrade pip \
24
+ && pip install poetry \
25
+ && poetry config virtualenvs.create false
26
+
27
+ # Create directories
28
+ RUN mkdir -p /app/dependencies /app/data/sqlite /app/data/chroma_db /app/logs /app/run /app/resources
29
+
30
+ # Copy dependency files - Files that rarely change
31
+ COPY dependencies/graphrag-1.2.1.dev27.tar.gz /app/dependencies/
32
+ COPY dependencies/llama.cpp.zip /app/dependencies/
33
+
34
+ # Copy GPU checker script
35
+ COPY docker/app/check_gpu_support.sh /app/
36
+ COPY docker/app/check_torch_cuda.py /app/
37
+ RUN chmod +x /app/check_gpu_support.sh
38
+
39
+ # Unpack llama.cpp and build with CUDA support (conditionally, based on SKIP_LLAMA_BUILD)
40
+ RUN if [ "$SKIP_LLAMA_BUILD" = "false" ]; then \
41
+ echo "=====================================================================" && \
42
+ echo "STARTING LLAMA.CPP BUILD WITH CUDA SUPPORT - THIS WILL TAKE SOME TIME" && \
43
+ echo "=====================================================================" && \
44
+ LLAMA_LOCAL_ZIP="dependencies/llama.cpp.zip" && \
45
+ echo "Using local llama.cpp archive..." && \
46
+ unzip -q "$LLAMA_LOCAL_ZIP" && \
47
+ cd llama.cpp && \
48
+ mkdir -p build && \
49
+ cd build && \
50
+ echo "Starting CMake configuration with CUDA support..." && \
51
+ cmake -DGGML_CUDA=OFF -DLLAMA_CUBLAS=OFF \
52
+ -DCMAKE_BUILD_TYPE=Release \
53
+ -DBUILD_SHARED_LIBS=OFF \
54
+ -DLLAMA_NATIVE=ON \
55
+ .. && \
56
+ echo "Starting build process (this will take several minutes)..." && \
57
+ cmake --build . --config Release -j --verbose && \
58
+ echo "Build completed successfully" && \
59
+ chmod +x /app/llama.cpp/build/bin/llama-server /app/llama.cpp/build/bin/llama-cli && \
60
+ echo "====================================================================" && \
61
+ echo "CUDA BUILD COMPLETED SUCCESSFULLY! GPU ACCELERATION IS NOW AVAILABLE" && \
62
+ echo "===================================================================="; \
63
+ else \
64
+ echo "=====================================================================" && \
65
+ echo "SKIPPING LLAMA.CPP BUILD (SKIP_LLAMA_BUILD=$SKIP_LLAMA_BUILD)" && \
66
+ echo "Using existing llama.cpp build from Docker volume" && \
67
+ echo "=====================================================================" && \
68
+ LLAMA_LOCAL_ZIP="dependencies/llama.cpp.zip" && \
69
+ echo "Just unpacking llama.cpp archive (no build)..." && \
70
+ unzip -q "$LLAMA_LOCAL_ZIP" && \
71
+ cd llama.cpp && \
72
+ mkdir -p build; \
73
+ fi
74
+
75
+ # Mark as GPU-optimized build for runtime reference
76
+ RUN mkdir -p /app/data && \
77
+ echo "{ \"gpu_optimized\": true, \"optimized_on\": \"$(date -u +\"%Y-%m-%dT%H:%M:%SZ\")\" }" > /app/data/gpu_optimized.json && \
78
+ echo "Created GPU-optimized marker file"
79
+
80
+ # Copy project configuration - Files that occasionally change
81
+ COPY pyproject.toml README.md /app/
82
+
83
+ # Fix for potential package installation issues with Poetry
84
+ RUN pip install --upgrade setuptools wheel
85
+ RUN poetry install --no-interaction --no-root || poetry install --no-interaction --no-root --without dev
86
+ RUN pip install --force-reinstall dependencies/graphrag-1.2.1.dev27.tar.gz
87
+
88
+ # Copy source code - Files that frequently change
89
+ COPY docker/ /app/docker/
90
+ COPY lpm_kernel/ /app/lpm_kernel/
91
+
92
+ # Check module import
93
+ RUN python -c "import lpm_kernel; print('Module import check passed')"
94
+
95
+ # Set environment variables
96
+ ENV PYTHONUNBUFFERED=1 \
97
+ PYTHONPATH=/app \
98
+ BASE_DIR=/app/data \
99
+ LOCAL_LOG_DIR=/app/logs \
100
+ RUN_DIR=/app/run \
101
+ RESOURCES_DIR=/app/resources \
102
+ APP_ROOT=/app \
103
+ FLASK_APP=lpm_kernel.app \
104
+ LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
105
+
106
+ # Expose ports
107
+ EXPOSE 8002 8080
108
+
109
+ # Set the startup command
110
+ CMD ["bash", "-c", "echo 'Checking SQLite database...' && if [ ! -s /app/data/sqlite/lpm.db ]; then echo 'SQLite database not found or empty, initializing...' && mkdir -p /app/data/sqlite && sqlite3 /app/data/sqlite/lpm.db '.read /app/docker/sqlite/init.sql' && echo 'SQLite database initialized successfully' && echo 'Tables created:' && sqlite3 /app/data/sqlite/lpm.db '.tables'; else echo 'SQLite database already exists, skipping initialization'; fi && echo 'Checking ChromaDB...' && if [ ! -d /app/data/chroma_db/documents ] || [ ! -d /app/data/chroma_db/document_chunks ]; then echo 'ChromaDB collections not found, initializing...' && python /app/docker/app/init_chroma.py && echo 'ChromaDB initialized successfully'; else echo 'ChromaDB already exists, skipping initialization'; fi && echo 'Starting application at ' $(date) >> /app/logs/backend.log && cd /app && python -m flask run --host=0.0.0.0 --port=${LOCAL_APP_PORT:-8002} >> /app/logs/backend.log 2>&1"]
Dockerfile.frontend ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM node:23
2
+
3
+ # Set working directory
4
+ WORKDIR /app
5
+
6
+ # Copy frontend package files
7
+ COPY lpm_frontend/package.json lpm_frontend/package-lock.json* /app/
8
+
9
+ # Install dependencies
10
+ RUN npm install
11
+
12
+ # Copy frontend code
13
+ COPY lpm_frontend/ /app/
14
+
15
+ # Set environment variable for backend URL (can be overridden in docker-compose)
16
+ ENV DOCKER_API_BASE_URL=http://backend:8002
17
+
18
+ # Create logs directory
19
+ RUN mkdir -p /app/logs
20
+
21
+ # Expose frontend port
22
+ EXPOSE 3000
23
+
24
+ # Start frontend service
25
+ CMD ["npm", "run", "dev"]
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright 2025 Mindverse
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
Makefile ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: install test format lint all setup start stop restart restart-backend restart-force help docker-build docker-up docker-down docker-build-backend docker-build-frontend docker-restart-backend docker-restart-backend-fast docker-restart-backend-smart docker-restart-frontend docker-restart-all docker-check-cuda docker-use-gpu docker-use-cpu
2
+
3
+ # Check for GPU flag file and set Docker Compose file accordingly
4
+ ifeq ($(wildcard .gpu_selected),)
5
+ # No GPU flag file found, use CPU configuration
6
+ DOCKER_COMPOSE_FILE := docker-compose.yml
7
+ else
8
+ # GPU flag file found, use GPU configuration
9
+ DOCKER_COMPOSE_FILE := docker-compose-gpu.yml
10
+ endif
11
+
12
+ # Detect operating system and set environment
13
+ ifeq ($(OS),Windows_NT)
14
+ # Set Windows variables
15
+ WINDOWS := 1
16
+ # Set UTF-8 code page for Windows to display Unicode characters
17
+ SET_UTF8 := $(shell chcp 65001 >nul 2>&1 || echo)
18
+ # No need to check for Apple Silicon on Windows
19
+ APPLE_SILICON := 0
20
+ # Define empty color codes for Windows to avoid display issues
21
+ COLOR_CYAN :=
22
+ COLOR_RESET :=
23
+ COLOR_BOLD :=
24
+ COLOR_GRAY :=
25
+ COLOR_GREEN :=
26
+ COLOR_RED :=
27
+ else
28
+ WINDOWS := 0
29
+ # Detect Apple Silicon on non-Windows systems
30
+ ifeq ($(shell uname -s),Darwin)
31
+ ifeq ($(shell uname -m),arm64)
32
+ APPLE_SILICON := 1
33
+ # Set environment variables for Apple Silicon
34
+ export DOCKER_BACKEND_DOCKERFILE=Dockerfile.backend.apple
35
+ export PLATFORM=apple
36
+ else
37
+ APPLE_SILICON := 0
38
+ endif
39
+ else
40
+ APPLE_SILICON := 0
41
+ endif
42
+ # Define ANSI color codes for Unix systems
43
+ COLOR_CYAN := \033[0;36m
44
+ COLOR_RESET := \033[0m
45
+ COLOR_BOLD := \033[1m
46
+ COLOR_GRAY := \033[0;90m
47
+ COLOR_GREEN := \033[1;32m
48
+ COLOR_RED := \033[1;31m
49
+ endif
50
+
51
+ # Default Docker Compose configuration (non-GPU)
52
+ DOCKER_COMPOSE_FILE := docker-compose.yml
53
+
54
+ # Show help message
55
+ help:
56
+ ifeq ($(WINDOWS),1)
57
+ @echo.
58
+ @echo ███████╗███████╗ ██████╗ ██████╗ ███╗ ██╗██████╗ ███╗ ███╗███████╗
59
+ @echo ██╔════╝██╔════╝██╔════╝██╔═══██╗████╗ ██║██╔══██╗ ████╗ ████║██╔════╝
60
+ @echo ███████╗█████╗ ██║ ██║ ██║██╔██╗ ██║██║ ██║█████╗██╔████╔██║█████╗
61
+ @echo ╚════██║██╔══╝ ██║ ██║ ██║██║╚██╗██║██║ ██║╚════╝██║╚██╔╝██║██╔══╝
62
+ @echo ███████║███████╗╚██████╗╚██████╔╝██║ ╚████║██████╔╝ ██║ ╚═╝ ██║███████╗
63
+ @echo ╚══════╝╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝╚═════╝ ╚═╝ ╚═╝╚══════╝
64
+ @echo.
65
+ @echo SECOND-ME MAKEFILE COMMANDS
66
+ @echo ------------------------------
67
+ @echo.
68
+ @echo LOCAL COMMANDS:
69
+ @echo make setup - Complete installation
70
+ @echo make start - Start all services
71
+ @echo make stop - Stop all services
72
+ @echo make restart - Restart all services
73
+ @echo make restart-backend - Restart only backend service
74
+ @echo make restart-force - Force restart and reset data
75
+ @echo make status - Show status of all services
76
+ @echo.
77
+ @echo DOCKER COMMANDS:
78
+ @echo make docker-build - Build all Docker images
79
+ @echo make docker-up - Start all Docker containers
80
+ @echo make docker-down - Stop all Docker containers
81
+ @echo make docker-build-backend - Build only backend Docker image
82
+ @echo make docker-build-frontend - Build only frontend Docker image
83
+ @echo make docker-restart-backend - Restart only backend container
84
+ @echo make docker-restart-backend-fast - Restart backend+cuda without rebuilding llama.cpp
85
+ @echo make docker-restart-frontend - Restart only frontend container
86
+ @echo make docker-restart-all - Restart all Docker containers
87
+ @echo make docker-check-cuda - Check CUDA support in containers
88
+ @echo make docker-use-gpu - Switch to GPU configuration
89
+ @echo make docker-use-cpu - Switch to CPU-only configuration
90
+ @echo.
91
+ @echo All Available Commands:
92
+ @echo make help - Show this help message
93
+ @echo make install - Install project dependencies
94
+ @echo make test - Run tests
95
+ @echo make format - Format code
96
+ @echo make lint - Check code style
97
+ @echo make all - Run format, lint and test
98
+ else
99
+ @echo "$(COLOR_CYAN)"
100
+ @echo ' ███████╗███████╗ ██████╗ ██████╗ ███╗ ██╗██████╗ ███╗ ███╗███████╗'
101
+ @echo ' ██╔════╝██╔════╝██╔════╝██╔═══██╗████╗ ██║██╔══██╗ ████╗ ████║██╔════╝'
102
+ @echo ' ███████╗█████╗ ██║ ██║ ██║██╔██╗ ██║██║ ██║█████╗██╔████╔██║█████╗ '
103
+ @echo ' ╚════██║██╔══╝ ██║ ██║ ██║██║╚██╗██║██║ ██║╚════╝██║╚██╔╝██║██╔══╝ '
104
+ @echo ' ███████║███████╗╚██████╗╚██████╔╝██║ ╚████║██████╔╝ ██║ ╚═╝ ██║███████╗'
105
+ @echo ' ╚══════╝╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝╚═════╝ ╚═╝ ╚═╝╚══════╝'
106
+ @echo "$(COLOR_RESET)"
107
+ @echo "$(COLOR_BOLD)Second-Me Makefile Commands$(COLOR_RESET)"
108
+ @echo "$(COLOR_GRAY)$$(date)$(COLOR_RESET)\n"
109
+ @echo ""
110
+ @echo "$(COLOR_GREEN)▶ LOCAL COMMANDS:$(COLOR_RESET)"
111
+ @echo " make setup - Complete installation"
112
+ @echo " make start - Start all services"
113
+ @echo " make stop - Stop all services"
114
+ @echo " make restart - Restart all services"
115
+ @echo " make restart-backend - Restart only backend service"
116
+ @echo " make restart-force - Force restart and reset data"
117
+ @echo " make status - Show status of all services"
118
+ @echo ""
119
+ @echo "$(COLOR_GREEN)▶ DOCKER COMMANDS:$(COLOR_RESET)"
120
+ @echo " make docker-build - Build all Docker images"
121
+ @echo " make docker-up - Start all Docker containers"
122
+ @echo " make docker-down - Stop all Docker containers"
123
+ @echo " make docker-build-backend - Build only backend Docker image"
124
+ @echo " make docker-build-frontend - Build only frontend Docker image"
125
+ @echo " make docker-restart-backend - Restart only backend container (with rebuild)"
126
+ @echo " make docker-restart-backend-fast - Restart backend+cuda without rebuilding llama.cpp"
127
+ @echo " make docker-restart-frontend - Restart only frontend container"
128
+ @echo " make docker-restart-all - Restart all Docker containers"
129
+ @echo " make docker-check-cuda - Check CUDA support in containers"
130
+ @echo " make docker-use-gpu - Switch to GPU configuration"
131
+ @echo " make docker-use-cpu - Switch to CPU-only configuration"
132
+ @echo ""
133
+ @echo "$(COLOR_BOLD)All Available Commands:$(COLOR_RESET)"
134
+ @echo " make help - Show this help message"
135
+ @echo " make install - Install project dependencies"
136
+ @echo " make test - Run tests"
137
+ @echo " make format - Format code"
138
+ @echo " make lint - Check code style"
139
+ @echo " make all - Run format, lint and test"
140
+ @if [ "$(APPLE_SILICON)" = "1" ]; then \
141
+ echo ""; \
142
+ echo "$(COLOR_GREEN)▶ PLATFORM INFORMATION:$(COLOR_RESET)"; \
143
+ echo " Apple Silicon detected - Docker commands will use PLATFORM=apple"; \
144
+ fi
145
+ endif
146
+
147
+ # Configuration switchers for Docker
148
+ docker-use-gpu:
149
+ @echo "Switching to GPU configuration..."
150
+ ifeq ($(WINDOWS),1)
151
+ @echo GPU mode enabled. Docker commands will use docker-compose-gpu.yml
152
+ @echo gpu > .gpu_selected
153
+ else
154
+ @echo "$(COLOR_GREEN)GPU mode enabled. Docker commands will use docker-compose-gpu.yml$(COLOR_RESET)"
155
+ @echo "gpu" > .gpu_selected
156
+ endif
157
+
158
+ docker-use-cpu:
159
+ @echo "Switching to CPU-only configuration..."
160
+ ifeq ($(WINDOWS),1)
161
+ @echo CPU-only mode enabled. Docker commands will use docker-compose.yml
162
+ @rm -f .gpu_selected
163
+ else
164
+ @echo "$(COLOR_GREEN)CPU-only mode enabled. Docker commands will use docker-compose.yml$(COLOR_RESET)"
165
+ @rm -f .gpu_selected
166
+ endif
167
+
168
+ setup:
169
+ ./scripts/setup.sh
170
+
171
+ start:
172
+ ./scripts/start.sh
173
+
174
+ stop:
175
+ ./scripts/stop.sh
176
+
177
+ restart:
178
+ ./scripts/restart.sh
179
+
180
+ restart-backend:
181
+ ./scripts/restart-backend.sh
182
+
183
+ restart-force:
184
+ ./scripts/restart-force.sh
185
+
186
+ status:
187
+ ./scripts/status.sh
188
+
189
+ # Docker commands
190
+ # Set Docker environment variable for all Docker commands
191
+ docker-%: export IN_DOCKER_ENV=1
192
+
193
+ ifeq ($(OS),Windows_NT)
194
+ DOCKER_COMPOSE_CMD := docker compose
195
+ else
196
+ DOCKER_COMPOSE_CMD := $(shell if command -v docker-compose >/dev/null 2>&1; then echo "docker-compose"; else echo "docker compose"; fi)
197
+ endif
198
+
199
+ docker-build:
200
+ ifeq ($(WINDOWS),1)
201
+ @echo "Prompting for CUDA preference..."
202
+ @scripts\prompt_cuda.bat
203
+ else
204
+ @echo "Prompting for CUDA preference..."
205
+ @chmod +x ./scripts/prompt_cuda.sh
206
+ @./scripts/prompt_cuda.sh
207
+ endif
208
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) build
209
+
210
+ docker-up:
211
+ @echo "Building and starting Docker containers..."
212
+ ifeq ($(WINDOWS),1)
213
+ @echo "Prompting for CUDA preference..."
214
+ @scripts\prompt_cuda.bat
215
+ @echo "Checking CUDA preference..."
216
+ @cmd /c "if exist .gpu_selected ( echo CUDA support detected, using GPU configuration... & docker compose -f docker-compose-gpu.yml build & docker compose -f docker-compose-gpu.yml up -d ) else ( echo No CUDA support selected, using CPU-only configuration... & docker compose -f docker-compose.yml build & docker compose -f docker-compose.yml up -d )"
217
+ else
218
+ @echo "Prompting for CUDA preference..."
219
+ @chmod +x ./scripts/prompt_cuda.sh
220
+ @./scripts/prompt_cuda.sh
221
+ @echo "Checking CUDA preference..."
222
+ @if [ -f .gpu_selected ]; then \
223
+ echo "CUDA support detected, using GPU configuration..."; \
224
+ $(DOCKER_COMPOSE_CMD) -f docker-compose-gpu.yml build; \
225
+ $(DOCKER_COMPOSE_CMD) -f docker-compose-gpu.yml up -d; \
226
+ else \
227
+ echo "No CUDA support selected, using CPU-only configuration..."; \
228
+ $(DOCKER_COMPOSE_CMD) -f docker-compose.yml build; \
229
+ $(DOCKER_COMPOSE_CMD) -f docker-compose.yml up -d; \
230
+ fi
231
+ endif
232
+ @echo "Container startup complete"
233
+ @echo "Check CUDA support with: make docker-check-cuda"
234
+
235
+ docker-down:
236
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) down
237
+
238
+ docker-build-backend:
239
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) build backend
240
+
241
+ docker-build-frontend:
242
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) build frontend
243
+
244
+ # Standard backend restart with complete rebuild
245
+ docker-restart-backend:
246
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) stop backend
247
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) rm -f backend
248
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) build backend || { echo "$(COLOR_RED)❌ Backend build failed! Aborting operation...$(COLOR_RESET)"; exit 1; }
249
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) up -d backend
250
+
251
+
252
+ # Fast backend restart: preserves llama.cpp build
253
+ docker-restart-backend-fast:
254
+ @echo "Smart restarting backend container (preserving llama.cpp build)..."
255
+ @echo "Stopping backend container..."
256
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) stop backend
257
+ @echo "Removing backend container..."
258
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) rm -f backend
259
+ @echo "Building backend image with build-arg to skip llama.cpp build..."
260
+ ifeq ($(wildcard .gpu_selected),)
261
+ @echo "Using CPU configuration (docker-compose.yml)..."
262
+ else
263
+ @echo "Using GPU configuration (docker-compose-gpu.yml)..."
264
+ endif
265
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) build --build-arg SKIP_LLAMA_BUILD=true backend || { echo "$(COLOR_RED)❌ Backend build failed! Aborting operation...$(COLOR_RESET)"; exit 1; }
266
+ @echo "Starting backend container..."
267
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) up -d backend
268
+ @echo "Backend container smart-restarted successfully"
269
+ @echo "Check CUDA support with: make docker-check-cuda"
270
+
271
+ docker-restart-frontend:
272
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) stop frontend
273
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) rm -f frontend
274
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) build frontend || { echo "$(COLOR_RED)❌ Frontend build failed! Aborting operation...$(COLOR_RESET)"; exit 1; }
275
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) up -d frontend
276
+
277
+ docker-restart-all:
278
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) stop
279
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) rm -f
280
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) build || { echo "$(COLOR_RED)❌ Build failed! Aborting operation...$(COLOR_RESET)"; exit 1; }
281
+ $(DOCKER_COMPOSE_CMD) -f $(DOCKER_COMPOSE_FILE) up -d
282
+
283
+ # New command to check CUDA support in containers
284
+ docker-check-cuda:
285
+ @echo "Checking CUDA support in Docker containers..."
286
+ ifeq ($(WINDOWS),1)
287
+ @echo Running CUDA support check in backend container
288
+ @docker exec second-me-backend /app/check_gpu_support.sh || echo No GPU support detected in backend container
289
+ else
290
+ @echo "$(COLOR_CYAN)Running CUDA support check in backend container:$(COLOR_RESET)"
291
+ @docker exec second-me-backend /app/check_gpu_support.sh || echo "$(COLOR_RED)No GPU support detected in backend container$(COLOR_RESET)"
292
+ endif
293
+
294
+ install:
295
+ poetry install
296
+
297
+ test:
298
+ poetry run pytest tests
299
+
300
+ format:
301
+ poetry run ruff format lpm_kernel/
302
+
303
+ lint:
304
+ poetry run ruff check lpm_kernel/
305
+
306
+ all: format lint test
README.md ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ![Second Me](https://github.com/mindverse/Second-Me/blob/master/images/cover.png)
2
+
3
+ <div align="center">
4
+
5
+ [![Homepage](https://img.shields.io/badge/Second_Me-Homepage-blue?style=flat-square&logo=homebridge)](https://home.second.me/)
6
+ [![AI-native Memory](https://img.shields.io/badge/AI--native_Memory-arXiv-orange?style=flat-square&logo=academia)](https://arxiv.org/abs/2406.18312)
7
+ [![AI-native Memory 2.0](https://img.shields.io/badge/AI--native_Memory_2.0-arXiv-red?style=flat-square&logo=arxiv)](https://arxiv.org/abs/2503.08102)
8
+ [![Discord](https://img.shields.io/badge/Chat-Discord-5865F2?style=flat-square&logo=discord&logoColor=white)](https://discord.gg/GpWHQNUwrg)
9
+ [![Twitter](https://img.shields.io/badge/Follow-@SecondMe_AI-1DA1F2?style=flat-square&logo=x&logoColor=white)](https://x.com/SecondMe_AI1)
10
+ [![Reddit](https://img.shields.io/badge/Join-Reddit-FF4500?style=flat-square&logo=reddit&logoColor=white)](https://www.reddit.com/r/SecondMeAI/)
11
+ [![View FAQ](https://img.shields.io/badge/FAQ-GitBook-blue?style=flat-square)](https://secondme.gitbook.io/secondme/faq)
12
+
13
+ </div>
14
+
15
+
16
+ ## Our Vision
17
+
18
+ Companies like OpenAI built "Super AI" that threatens human independence. We crave individuality: AI that amplifies, not erases, **YOU**.
19
+
20
+ We’re challenging that with "**Second Me**": an open-source prototype where you craft your own **AI self**—a new AI species that preserves you, delivers your context, and defends your interests.
21
+
22
+ It’s **locally trained and hosted**—your data, your control—yet **globally connected**, scaling your intelligence across an AI network. Beyond that, it’s your AI identity interface—a bold standard linking your AI to the world, sparks collaboration among AI selves, and builds tomorrow’s truly native AI apps.
23
+
24
+ Tech enthusiasts, AI pros, domain experts, Join us! Second Me is your launchpad to extend your mind into the digital horizon.
25
+
26
+ ## Key Features
27
+
28
+ ### **Train Your AI Self** with AI-Native Memory ([Paper](https://arxiv.org/abs/2503.08102))
29
+ Start training your Second Me today with your own memories! Using Hierarchical Memory Modeling (HMM) and the Me-Alignment Algorithm, your AI self captures your identity, understands your context, and reflects you authentically.
30
+
31
+ <p align="center">
32
+ <img src="https://github.com/user-attachments/assets/a84c6135-26dc-4413-82aa-f4a373c0ff89" width="94%" />
33
+ </p>
34
+
35
+
36
+ ### **Scale Your Intelligence** on the Second Me Network
37
+ Launch your AI self from your laptop onto our decentralized network—anyone or any app can connect with your permission, sharing your context as your digital identity.
38
+
39
+ <p align="center">
40
+ <img src="https://github.com/user-attachments/assets/9a74a3f4-d8fd-41c1-8f24-534ed94c842a" width="94%" />
41
+ </p>
42
+
43
+
44
+ ### Build Tomorrow’s Apps with Second Me
45
+ **Roleplay**: Your AI self switches personas to represent you in different scenarios.
46
+ **AI Space**: Collaborate with other Second Mes to spark ideas or solve problems.
47
+
48
+ <p align="center">
49
+ <img src="https://github.com/user-attachments/assets/bc6125c1-c84f-4ecc-b620-8932cc408094" width="94%" />
50
+ </p>
51
+
52
+ ### 100% **Privacy and Control**
53
+ Unlike traditional centralized AI systems, Second Me ensures that your information and intelligence remain local and completely private.
54
+
55
+
56
+
57
+ ## Getting started & staying tuned with us
58
+ Star and join us, and you will receive all release notifications from GitHub without any delay!
59
+
60
+
61
+ <p align="center">
62
+ <img src="https://github.com/user-attachments/assets/5c14d956-f931-4c25-b0b3-3c2c96cd7581" width="94%" />
63
+ </p>
64
+
65
+
66
+ ## Quick Start
67
+
68
+ ### 📊 Model Size vs. Memory (Reference Guide)
69
+
70
+ *Note: "B" in the table represents "billion parameters model". Data shown are examples only; actual supported model sizes may vary depending on system optimization, deployment environment, and other hardware/software conditions.*
71
+
72
+ | Memory (GB) | Docker Deployment (Windows/Linux) | Docker Deployment (Mac) | Integrated Setup (Windows/Linux) | Integrated Setup (Mac) |
73
+ |--------------|-----------------------------|-------------------|--------------------------|----------------|
74
+ | 8 | ~0.8B (example) | ~0.4B (example) | ~1.0B (example) | ~0.6B (example) |
75
+ | 16 | 1.5B (example) | 0.5B (example) | ~2.0B (example) | ~0.8B (example) |
76
+ | 32 | ~2.8B (example) | ~1.2B (example) | ~3.5B (example) | ~1.5B (example) |
77
+
78
+ > **Note**: Models below 0.5B may not provide satisfactory performance for complex tasks. And we're continuously improving cross-platform support - please [submit an issue](https://github.com/mindverse/Second-Me/issues/new) for feedback or compatibility problems on different operating systems.
79
+
80
+ > **MLX Acceleration**: Mac M-series users can use [MLX](https://github.com/mindverse/Second-Me/tree/master/lpm_kernel/L2/mlx_training) to run larger models (CLI-only).
81
+
82
+ ### ⚡ Get your Second Me running in just 3 steps:
83
+
84
+ ```bash
85
+ # 1. Clone the repository
86
+ git clone https://github.com/mindverse/Second-Me.git
87
+ cd Second-Me
88
+ # 2. Start Docker containers
89
+ make docker-up
90
+ # 3. Access the web interface
91
+ # Open your browser and visit: http://localhost:3000
92
+ ```
93
+
94
+ 👉 For detailed instructions — including integrated (non-Docker) setup, model selection, memory requirements, and platform-specific tips,
95
+ check the full [Deployment Guide on GitBook](https://secondme.gitbook.io/secondme/guides/deployment).
96
+
97
+ ❓ Got questions about setup, models, or any troubleshooting? [Check our FAQ](https://secondme.gitbook.io/secondme/faq).
98
+
99
+ ## Tutorial and Use Cases
100
+ 🛠️ Feel free to follow [User tutorial](https://secondme.gitbook.io/secondme/getting-started) to build your Second Me.
101
+
102
+ 💡 Check out the links below to see how Second Me can be used in real-life scenarios:
103
+ - [Felix AMA (Roleplay app)](https://app.secondme.io/example/ama)
104
+ - [Brainstorming a 15-Day European City Itinerary (Network app)](https://app.secondme.io/example/brainstorming)
105
+ - [Icebreaking as a Speed Dating Match (Network app)](https://app.secondme.io/example/Icebreaker)
106
+
107
+
108
+ ## What's Next: May 2025
109
+
110
+ Second Me continues to evolve as the open-source identity infrastructure for AI. Here's what's on deck for May:
111
+
112
+ - 🗂️ **Version Control**: Smarter versioning of memory and identity states
113
+ - 🧠 **Continuous Training Pipelines**: Keep your AI self evolving over time, with ongoing updates based on new memory inputs.
114
+ - ⚙️ **Performance & Stability Improvements**: Enhancements across inference ability, model alignment, and base model upgrades
115
+ - ☁️ **Cloud Solutions**: Explore cloud-based solutions for both model training (fine-tuning) and model deployment, to reduce the hardware burden on users' local machines.
116
+
117
+ ## Contributing
118
+
119
+ We’d love for you to help shape what’s coming next — whether it’s fixing bugs, building new features, or improving docs.
120
+
121
+ - 📘 Check out our [Contribution Guide](./CONTRIBUTING.md) to get started
122
+ - 💻 Submit ideas, issues, or PRs on [GitHub](https://github.com/mindverse/Second-Me)
123
+ - 💬 Join the conversation and stay updated in our [Discord](https://discord.gg/GpWHQNUwrg) — it’s where the community lives
124
+
125
+
126
+ ## Contributors
127
+
128
+ We would like to express our gratitude to all the individuals who have contributed to Second Me! If you're interested in contributing to the future of intelligence uploading, whether through code, documentation, or ideas, please feel free to submit a pull request to our repository: [Second-Me](https://github.com/Mindverse/Second-Me).
129
+
130
+
131
+ <a href="https://github.com/mindverse/Second-Me/graphs/contributors">
132
+ <img src="https://contrib.rocks/image?repo=mindverse/Second-Me" />
133
+ </a>
134
+
135
+ Made with [contrib.rocks](https://contrib.rocks).
136
+
137
+ ## Acknowledgements
138
+
139
+ This work leverages the power of the open-source community.
140
+
141
+ For data synthesis, we utilized [GraphRAG](https://github.com/microsoft/graphrag) from Microsoft.
142
+
143
+ For model deployment, we utilized [llama.cpp](https://github.com/ggml-org/llama.cpp), which provides efficient inference capabilities.
144
+
145
+ Our base models primarily come from the [Qwen2.5](https://huggingface.co/Qwen) series.
146
+
147
+ We also want to extend our sincere gratitude to all users who have experienced Second Me. We recognize that there is significant room for optimization throughout the entire pipeline, and we are fully committed to iterative improvements to ensure everyone can enjoy the best possible experience locally.
148
+
149
+ ## License
150
+
151
+ Second Me is open source software licensed under the Apache License 2.0. See the [LICENSE](LICENSE) file for more details.
152
+
153
+ [license]: ./LICENSE
154
+
155
+ ## Star History
156
+
157
+ <a href="https://www.star-history.com/#mindverse/Second-Me&Date">
158
+ <picture>
159
+ <source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=mindverse/Second-Me&type=Date&theme=dark" />
160
+ <source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=mindverse/Second-Me&type=Date" />
161
+ <img alt="Star History Chart" src="https://api.star-history.com/svg?repos=mindverse/Second-Me&type=Date" />
162
+ </picture>
163
+ </a>
README_ja.md ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ![Second Me](https://github.com/mindverse/Second-Me/blob/master/images/cover.png)
2
+
3
+ <div align="center">
4
+
5
+ [![Homepage](https://img.shields.io/badge/Second_Me-Homepage-blue?style=flat-square&logo=homebridge)](https://www.secondme.io/)
6
+ [![Report](https://img.shields.io/badge/Paper-arXiv-red?style=flat-square&logo=arxiv)](https://arxiv.org/abs/2503.08102)
7
+ [![Discord](https://img.shields.io/badge/Chat-Discord-5865F2?style=flat-square&logo=discord&logoColor=white)](https://discord.gg/GpWHQNUwrg)
8
+ [![Twitter](https://img.shields.io/badge/Follow-@SecondMe_AI-1DA1F2?style=flat-square&logo=x&logoColor=white)](https://x.com/SecondMe_AI1)
9
+ [![Reddit](https://img.shields.io/badge/Join-Reddit-FF4500?style=flat-square&logo=reddit&logoColor=white)](https://www.reddit.com/r/SecondMeAI/)
10
+
11
+ </div>
12
+
13
+
14
+ ## 私たちのビジョン
15
+
16
+ OpenAIのような企業は、人間の独立性を脅かす「スーパーAI」を構築しました。私たちは個性を求めています:あなたを消すのではなく、強化するAI。
17
+
18
+ 私たちは「**Second Me**」でそれに挑戦しています:あなた自身の**AI自己**を作成するオープンソースのプロトタイプです。これは、あなたを保存し、あなたのコンテキストを提供し、あなたの利益を守る新しいAI種です。
19
+
20
+ これは**ローカルでトレーニングされ、ホストされる**ため、あなたのデータはあなたの管理下にありますが、**グローバルに接続**され、AIネットワーク全体であなたの知性を拡張します。それを超えて、これはあなたのAIアイデンティティインターフェースです。これは、あなたのAIを世界にリンクし、AI自己間のコラボレーションを促進し、明日の真にネイティブなAIアプリを構築するための大胆な標準です。
21
+
22
+ 私たちに参加してください。技術愛好家、AI専門家、ドメインエキスパート—Second Meは、あなたの心をデジタルの地平線に拡張するための出発点です。
23
+
24
+ ## 主な機能
25
+
26
+ ### **AI自己をトレーニング**するAIネイティブメモリ ([論文](https://arxiv.org/abs/2503.08102))
27
+ 今日からあなたのSecond Meをあなた自身の記憶でトレーニングし始めましょう!階層的メモリモデリング(HMM)とMe-Alignmentアルゴリズムを使用して、あなたのAI自己はあなたのアイデンティティをキャプチャし、あなたのコンテキストを理解し、あなたを本物に反映します。
28
+
29
+ <p align="center">
30
+ <img src="https://github.com/user-attachments/assets/a84c6135-26dc-4413-82aa-f4a373c0ff89" width="94%" />
31
+ </p>
32
+
33
+
34
+ ### **知性を拡張**するSecond Meネットワーク
35
+ あなたのAI自己をラップトップから私たちの分散ネットワークに起動します。誰でも、どのアプリでも、あなたの許可を得て接続し、あなたのコンテキストをデジタルアイデンティティとして共有します。
36
+
37
+ <p align="center">
38
+ <img src="https://github.com/user-attachments/assets/9a74a3f4-d8fd-41c1-8f24-534ed94c842a" width="94%" />
39
+ </p>
40
+
41
+
42
+ ### 明日のアプリをSecond Meで構築
43
+ **ロールプレイ**:あなたのAI自己が異なるシナリオであなたを代表するためにペルソナを切り替えます。
44
+ **AIスペース**:他のSecond Meと協力してアイデアを出し合ったり、問題を解決したりします。
45
+
46
+ <p align="center">
47
+ <img src="https://github.com/user-attachments/assets/bc6125c1-c84f-4ecc-b620-8932cc408094" width="94%" />
48
+ </p>
49
+
50
+ ### 100% **プライバシーとコントロール**
51
+ 従来の集中型AIシステムとは異なり、Second Meはあなたの情報と知性がローカルに完全にプライベートに保たれることを保証します。
52
+
53
+
54
+
55
+ ## 始め方と私たちとの連絡を保つ方法
56
+ スターを付けて参加し、GitHubからのすべてのリリース通知を遅延なく受け取ることができます!
57
+
58
+
59
+ <p align="center">
60
+ <img src="https://github.com/user-attachments/assets/5c14d956-f931-4c25-b0b3-3c2c96cd7581" width="94%" />
61
+ </p>
62
+
63
+ ## クイックスタート
64
+
65
+ ### インストールとセットアップ
66
+
67
+ #### 前提条件
68
+ - macOSオペレーティングシステム
69
+ - Python 3.8以上
70
+ - Node.js 16以上(フロントエンド用)
71
+ - Xcodeコマンドラインツール
72
+
73
+ #### Xcodeコマンドラインツールのインストール
74
+ まだXcodeコマンドラインツールをインストールしていない場合は、次のコマンドを実行してインストールできます:
75
+ ```bash
76
+ xcode-select --install
77
+ ```
78
+
79
+ インストール後、ライセンス契約に同意する必要があるかもしれません:
80
+ ```bash
81
+ sudo xcodebuild -license accept
82
+ ```
83
+
84
+ 1. リポジトリをクローン
85
+ ```bash
86
+ git clone [email protected]:Mindverse/Second-Me.git
87
+ cd Second-Me
88
+ ```
89
+
90
+ 2. 環境を設定
91
+
92
+ #### オプションA:既存のconda環境を持つユーザ��向け
93
+ すでにcondaをインストールしている場合:
94
+
95
+ 1) 環境ファイルから新しい環境を作成:
96
+ ```bash
97
+ conda env create -f environment.yml # これにより、'second-me'という名前の環境が作成されます
98
+ conda activate second-me
99
+ ```
100
+
101
+ 2) `.env`でカスタムcondaモードを設定:
102
+ ```bash
103
+ CUSTOM_CONDA_MODE=true
104
+ ```
105
+
106
+ 3) セットアップを実行:
107
+ ```bash
108
+ make setup
109
+ ```
110
+
111
+ #### オプションB:新規ユーザー向け
112
+ 新規ユーザーまたは新しい環境を希望する場合:
113
+ ```bash
114
+ make setup
115
+ ```
116
+
117
+ このコマンドは自動的に次のことを行います:
118
+ - 必要なすべてのシステム依存関係をインストール(condaが存在しない場合は含む)
119
+ - 'second-me'という名前の新しいPython環境を作成
120
+ - llama.cppをビルド
121
+ - フロントエンド環境を設定
122
+
123
+ 3. サービスを開始
124
+ ```bash
125
+ make start
126
+ ```
127
+
128
+ 4. サービスにアクセス
129
+ ブラウザを開き、`http://localhost:3000`にアクセス
130
+
131
+ 5. ヘルプとその他のコマンドを表示
132
+ ```bash
133
+ make help
134
+ ```
135
+
136
+ ### 重要な注意事項
137
+ 1. 十分なディスクスペースを確保してください(少なくとも10GBを推奨)
138
+ 2. 既存のconda環境を使用する場合、競合するパッケージバージョンがないことを確認してください
139
+ 3. 初回の起動には依存関係のダウンロードとインストールに数分かかることがあります
140
+ 4. 一部のコマンドはsudo権限を必要とする場合があります
141
+
142
+ ### トラブルシューティング
143
+ 問題が発生した場合、次の点を確認してください:
144
+ 1. PythonとNode.jsのバージョンが要件を満たしていること
145
+ 2. 正しいconda環境にいること
146
+ 3. すべての依存関係が適切にインストールされていること
147
+ 4. システムのファイアウォールがアプリケーションが必要とするポートを使用できるようにしていること
148
+
149
+ ## チュートリアルとユースケース
150
+ 🛠️ [ユーザーチュートリアル](https://second-me.gitbook.io/a-new-ai-species-making-we-matter-again)に従って、あなたのSecond Meを構築してください。
151
+
152
+ 💡 以下のリンクをチェックして、Second Meが実際のシナリオでどのように使用されるかを確認してください:
153
+ - [Felix AMA(ロールプレイアプリ)](https://app.secondme.io/example/ama)
154
+ - [15日間のヨーロッパ都市旅程をブレインストーミング(ネットワークアプリ)](https://app.secondme.io/example/brainstorming)
155
+ - [スピードデートのマッチとしてのアイスブレイク(ネットワークアプリ)](https://app.secondme.io/example/Icebreaker)
156
+
157
+ ## コミュニティに参加
158
+ - [Discord](https://discord.com/invite/GpWHQNUwrg)
159
+ - [Reddit](https://www.reddit.com/r/SecondMeAI/)
160
+ - [X](https://x.com/SecondMe_AI1)
161
+
162
+ ## 近日公開
163
+
164
+ 以下の機能は内部で完了しており、オープンソースプロジェクトに段階的に統合されています。詳細な実験結果と技術仕様については、[技術報告書](https://arxiv.org/abs/2503.08102)を参照してください。
165
+
166
+ ### モデル強化機能
167
+ - [ ] **長い連鎖思考トレーニングパイプライン**:拡張された思考プロセストレーニングによる強化された推論能力
168
+ - [ ] **L2モデルの直接選好最適化**:ユーザーの選好と意図に対する改善された整合性
169
+ - [ ] **トレーニング用データフィルタリング**:高品質なトレーニングデータ選択のための高度な技術
170
+ - [ ] **Apple Siliconサポート**:MLXトレーニングおよびサービング機能を備えたApple Siliconプロセッサのネイティブサポート
171
+
172
+ ### 製品機能
173
+ - [ ] **自然言語メモリ要約**:自然言語形式での直感的なメモリ整理
174
+
175
+
176
+ ## 貢献
177
+
178
+ Second Meへの貢献を歓迎します!バグの修正、新機能の追加、ドキュメントの改善に興味がある場合は、貢献ガイドを確認してください。また、Second Meをコミュニティ、技術会議、またはソーシャルメディアで共有することでSecond Meをサポートすることもできます。
179
+
180
+ 開発に関する詳細な情報については、[貢献ガイド](./CONTRIBUTING.md)を参照してください。
181
+
182
+ ## 貢献者
183
+
184
+ Second Meに貢献してくれたすべての個人に感謝の意を表します!知性のアップロードの未来に貢献することに興味がある場合、コード、ドキュメント、アイデアを通じて、私たちのリポジトリにプルリクエストを提出してください:[Second-Me](https://github.com/Mindverse/Second-Me)。
185
+
186
+
187
+ <a href="https://github.com/mindverse/Second-Me/graphs/contributors">
188
+ <img src="https://contrib.rocks/image?repo=mindverse/Second-Me" />
189
+ </a>
190
+
191
+ Made with [contrib.rocks](https://contrib.rocks).
192
+
193
+ ## 謝辞
194
+
195
+ この作業はオープンソースコミュニティの力を活用しています。
196
+
197
+ データ合成には、Microsoftの[GraphRAG](https://github.com/microsoft/graphrag)を使用しました。
198
+
199
+ モデルのデプロイには、効率的な推論機能を提供する[llama.cpp](https://github.com/ggml-org/llama.cpp)を使用しました。
200
+
201
+ 私たちのベースモデルは主に[Qwen2.5](https://huggingface.co/Qwen)シリーズから来ています。
202
+
203
+ また、Second Meを体験してくれたすべてのユーザーに心から感謝します。パイプライン全体で最適化の余地が大いにあることを認識しており、皆さんがローカルで最高の体験を楽しめるようにするために、継続的な改善に全力を尽くします。
204
+
205
+ ## ライセンス
206
+
207
+ Second MeはApache License 2.0の下でライセンスされたオープンソースソフトウェアです。詳細については、[LICENSE](LICENSE)ファイルを参照してください。
208
+
209
+ [license]: ./LICENSE
210
+
211
+ ## Star History
212
+
213
+ <a href="https://www.star-history.com/#mindverse/Second-Me&Date">
214
+ <picture>
215
+ <source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=mindverse/Second-Me&type=Date&theme=dark" />
216
+ <source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=mindverse/Second-Me&type=Date" />
217
+ <img alt="Star History Chart" src="https://api.star-history.com/svg?repos=mindverse/Second-Me&type=Date" />
218
+ </picture>
219
+ </a>
SECURITY.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Security Policy
2
+
3
+ ## Supported Versions
4
+
5
+ Use this section to tell people about which versions of your project are
6
+ currently being supported with security updates.
7
+
8
+ | Version | Supported |
9
+ | ------- | ------------------ |
10
+ | 5.1.x | :white_check_mark: |
11
+ | 5.0.x | :x: |
12
+ | 4.0.x | :white_check_mark: |
13
+ | < 4.0 | :x: |
14
+
15
+ ## Reporting a Vulnerability
16
+
17
+ Use this section to tell people how to report a vulnerability.
18
+
19
+ Tell them where to go, how often they can expect to get an update on a
20
+ reported vulnerability, what to expect if the vulnerability is accepted or
21
+ declined, etc.
app.py ADDED
File without changes
dependencies/graphrag-1.2.1.dev27.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb46f99a9b661a6e2a541fa84dc0c2be7bea987c1e89e4702c5a5f3fdd370fa2
3
+ size 58210836
dependencies/graphrag-modified.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c7eb128e9418aa90f2aee045a16018d4dfa34a868238b53c95eab5f411540a1
3
+ size 57865670
dependencies/llama.cpp.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:882fe2ec4ac3b20276979f5c730e2d4067096d318a6ec9abe56d01a4be6961bc
3
+ size 21292637
docker-compose-gpu.yml ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ backend:
3
+ build:
4
+ context: .
5
+ dockerfile: ${DOCKER_BACKEND_DOCKERFILE:-Dockerfile.backend.cuda}
6
+ container_name: second-me-backend
7
+ restart: unless-stopped
8
+ ports:
9
+ - "8002:8002"
10
+ - "8080:8080"
11
+ volumes:
12
+ - ./data:/app/data
13
+ - ./logs:/app/logs
14
+ - ./run:/app/run
15
+ - ./resources:/app/resources
16
+ - ./docker:/app/docker
17
+ - ./.env:/app/.env
18
+ - llama-cpp-build:/app/llama.cpp/build # Persist the llama.cpp build
19
+ environment:
20
+ # Environment variables
21
+ - LOCAL_APP_PORT=8002
22
+ - IN_DOCKER_ENV=1
23
+ - PLATFORM=${PLATFORM:-linux}
24
+ - USE_CUDA=1
25
+ extra_hosts:
26
+ - "host.docker.internal:host-gateway"
27
+ deploy:
28
+ resources:
29
+ limits:
30
+ # Set container memory limit to 64GB
31
+ memory: 64G
32
+ reservations:
33
+ # Memory reservation
34
+ memory: 6G
35
+ devices:
36
+ - driver: nvidia
37
+ count: all
38
+ capabilities: [gpu]
39
+ networks:
40
+ - second-me-network
41
+
42
+ frontend:
43
+ build:
44
+ context: .
45
+ dockerfile: Dockerfile.frontend
46
+ container_name: second-me-frontend
47
+ restart: unless-stopped
48
+ ports:
49
+ - "3000:3000"
50
+ volumes:
51
+ - ./logs:/app/logs
52
+ - ./resources:/app/resources
53
+ environment:
54
+ - VITE_API_BASE_URL=http://backend:8002
55
+ depends_on:
56
+ - backend
57
+ deploy:
58
+ resources:
59
+ limits:
60
+ # Set container memory limit to 2GB
61
+ memory: 2G
62
+ reservations:
63
+ # Memory reservation
64
+ memory: 1G
65
+ networks:
66
+ - second-me-network
67
+
68
+ networks:
69
+ second-me-network:
70
+ driver: bridge
71
+
72
+ volumes:
73
+ llama-cpp-build:
74
+ driver: local
docker-compose.yml ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ backend:
3
+ build:
4
+ context: .
5
+ dockerfile: ${DOCKER_BACKEND_DOCKERFILE:-Dockerfile.backend}
6
+ container_name: second-me-backend
7
+ restart: unless-stopped
8
+ ports:
9
+ - "8002:8002"
10
+ - "8080:8080"
11
+ volumes:
12
+ - ./data:/app/data
13
+ - ./logs:/app/logs
14
+ - ./run:/app/run
15
+ - ./resources:/app/resources
16
+ - ./docker:/app/docker
17
+ - ./.env:/app/.env
18
+ - llama-cpp-build:/app/llama.cpp/build # Persist the llama.cpp build
19
+ environment:
20
+ # Environment variables
21
+ - LOCAL_APP_PORT=8002
22
+ - IN_DOCKER_ENV=1
23
+ - PLATFORM=${PLATFORM:-linux}
24
+ extra_hosts:
25
+ - "host.docker.internal:host-gateway"
26
+ deploy:
27
+ resources:
28
+ limits:
29
+ # Set container memory limit to 24GB
30
+ memory: 64G
31
+ reservations:
32
+ # Memory reservation
33
+ memory: 6G
34
+ networks:
35
+ - second-me-network
36
+
37
+ frontend:
38
+ build:
39
+ context: .
40
+ dockerfile: Dockerfile.frontend
41
+ container_name: second-me-frontend
42
+ restart: unless-stopped
43
+ ports:
44
+ - "3000:3000"
45
+ volumes:
46
+ - ./logs:/app/logs
47
+ - ./resources:/app/resources
48
+ environment:
49
+ - VITE_API_BASE_URL=http://backend:8002
50
+ depends_on:
51
+ - backend
52
+ deploy:
53
+ resources:
54
+ limits:
55
+ # Set container memory limit to 8GB
56
+ memory: 2G
57
+ reservations:
58
+ # Memory reservation
59
+ memory: 1G
60
+ networks:
61
+ - second-me-network
62
+
63
+ networks:
64
+ second-me-network:
65
+ driver: bridge
66
+
67
+ volumes:
68
+ llama-cpp-build:
69
+ driver: local
docker/app/check_gpu_support.sh ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Helper script to check if GPU support is available at runtime
3
+
4
+ echo "=== GPU Support Check ==="
5
+
6
+ # Check if llama-server binary exists and is linked to CUDA libraries
7
+ if [ -f "/app/llama.cpp/build/bin/llama-server" ]; then
8
+ echo "llama-server binary found, checking for CUDA linkage..."
9
+ CUDA_LIBS=$(ldd /app/llama.cpp/build/bin/llama-server | grep -i "cuda\|nvidia")
10
+
11
+ if [ -n "$CUDA_LIBS" ]; then
12
+ echo "✅ llama-server is built with CUDA support:"
13
+ echo "$CUDA_LIBS"
14
+ echo "GPU acceleration is available"
15
+
16
+ # Check for GPU optimization marker file (optional, not required)
17
+ GPU_MARKER_FILE="/app/data/gpu_optimized.json"
18
+ if [ -f "$GPU_MARKER_FILE" ]; then
19
+ GPU_OPTIMIZED=$(grep -o '"gpu_optimized": *true' "$GPU_MARKER_FILE" || echo "false")
20
+ OPTIMIZED_DATE=$(grep -o '"optimized_on": *"[^"]*"' "$GPU_MARKER_FILE" | cut -d'"' -f4)
21
+
22
+ if [[ "$GPU_OPTIMIZED" == *"true"* ]]; then
23
+ echo "📝 GPU-optimized build marker found (built on: $OPTIMIZED_DATE)"
24
+ else
25
+ echo "📝 GPU marker file found but not marked as optimized (built on: $OPTIMIZED_DATE)"
26
+ fi
27
+ else
28
+ echo "📝 No GPU optimization marker file found, but CUDA support is detected in binary"
29
+ fi
30
+
31
+ # Check if NVIDIA GPU is accessible at runtime
32
+ if nvidia-smi &>/dev/null; then
33
+ echo "🔍 NVIDIA GPU is available at runtime"
34
+ echo "=== GPU ACCELERATION IS READY TO USE ==="
35
+ exit 0
36
+ else
37
+ echo "⚠️ WARNING: llama-server has CUDA support, but NVIDIA GPU is not accessible"
38
+ echo "Check that Docker is running with GPU access (--gpus all)"
39
+ exit 1
40
+ fi
41
+ else
42
+ echo "❌ llama-server is not linked with CUDA libraries"
43
+ echo "Container was built without CUDA support"
44
+ fi
45
+ else
46
+ echo "❌ llama-server binary not found at /app/llama.cpp/build/bin/llama-server"
47
+ fi
48
+
49
+ # Final check for GPU hardware
50
+ if nvidia-smi &>/dev/null; then
51
+ echo "🔍 NVIDIA GPU is available at runtime, but llama-server doesn't support CUDA"
52
+ echo "To enable GPU support, rebuild using: make docker-up (and select CUDA support when prompted)"
53
+ exit 1
54
+ else
55
+ echo "❌ No NVIDIA GPU detected at runtime"
56
+ exit 1
57
+ fi
docker/app/check_torch_cuda.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import torch
4
+ import subprocess
5
+ import sys
6
+ import os
7
+
8
+ print("=== PyTorch CUDA Version Information ===")
9
+ print(f"PyTorch version: {torch.__version__}")
10
+
11
+ if torch.cuda.is_available():
12
+ print(f"CUDA available: Yes")
13
+ print(f"CUDA version used by PyTorch: {torch.version.cuda}")
14
+ print(f"cuDNN version: {torch.backends.cudnn.version() if torch.backends.cudnn.is_available() else 'Not available'}")
15
+ print(f"GPU device name: {torch.cuda.get_device_name(0)}")
16
+
17
+ # Try to check system CUDA version
18
+ try:
19
+ nvcc_output = subprocess.check_output(["nvcc", "--version"]).decode("utf-8")
20
+ print("\nSystem NVCC version:")
21
+ print(nvcc_output)
22
+ except:
23
+ print("\nNVCC not found in PATH")
24
+
25
+ # Check CUDA libraries
26
+ try:
27
+ print("\nChecking required CUDA libraries:")
28
+ for lib in ["libcudart.so", "libcublas.so", "libcublasLt.so"]:
29
+ print(f"\nSearching for {lib}:")
30
+ find_result = subprocess.run(f"find /usr -name '{lib}*'", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
31
+ if find_result.returncode == 0 and find_result.stdout:
32
+ print(find_result.stdout.decode("utf-8"))
33
+ else:
34
+ print(f"No {lib} found in /usr")
35
+ except Exception as e:
36
+ print(f"Error checking libraries: {e}")
37
+
38
+ # Check LD_LIBRARY_PATH
39
+ print("\nLD_LIBRARY_PATH:")
40
+ print(os.environ.get("LD_LIBRARY_PATH", "Not set"))
41
+
42
+ else:
43
+ print("CUDA not available")
44
+
45
+ # Check system CUDA installation
46
+ print("\n=== System CUDA Information ===")
47
+ try:
48
+ nvidia_smi = subprocess.check_output(["nvidia-smi"]).decode("utf-8")
49
+ print("NVIDIA-SMI output:")
50
+ print(nvidia_smi)
51
+ except:
52
+ print("nvidia-smi not found or not working")
docker/app/init_chroma.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import chromadb
2
+ import os
3
+ import sys
4
+
5
+ # Add project root to path to import from lpm_kernel
6
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
7
+
8
+ from lpm_kernel.api.services.user_llm_config_service import UserLLMConfigService
9
+ from lpm_kernel.file_data.chroma_utils import detect_embedding_model_dimension, reinitialize_chroma_collections
10
+
11
+ def init_chroma_db():
12
+ chroma_path = os.getenv("CHROMA_PERSIST_DIRECTORY", "./data/chroma_db")
13
+
14
+ # ensure the directory is correct
15
+ os.makedirs(chroma_path, exist_ok=True)
16
+
17
+ # Get embedding model dimension from user config
18
+ try:
19
+ user_llm_config_service = UserLLMConfigService()
20
+ user_llm_config = user_llm_config_service.get_available_llm()
21
+
22
+ if user_llm_config and user_llm_config.embedding_model_name:
23
+ # Detect dimension based on model name
24
+ dimension = detect_embedding_model_dimension(user_llm_config.embedding_model_name)
25
+ print(f"Detected embedding dimension: {dimension} for model: {user_llm_config.embedding_model_name}")
26
+ else:
27
+ # Default to OpenAI dimension if no config found
28
+ dimension = 1536
29
+ print(f"No embedding model configured, using default dimension: {dimension}")
30
+ except Exception as e:
31
+ # Default to OpenAI dimension if error occurs
32
+ dimension = 1536
33
+ print(f"Error detecting embedding dimension, using default: {dimension}. Error: {e}")
34
+
35
+ try:
36
+ client = chromadb.PersistentClient(path=chroma_path)
37
+ collections_to_init = ["documents", "document_chunks"]
38
+ dimension_mismatch_detected = False
39
+
40
+ # Check all collections for dimension mismatches first
41
+ for collection_name in collections_to_init:
42
+ try:
43
+ collection = client.get_collection(name=collection_name)
44
+ print(f"Collection '{collection_name}' already exists")
45
+
46
+ # Check if existing collection has the correct dimension
47
+ if collection.metadata.get("dimension") != dimension:
48
+ print(f"Warning: Existing '{collection_name}' collection has dimension {collection.metadata.get('dimension')}, but current model requires {dimension}")
49
+ dimension_mismatch_detected = True
50
+ except ValueError:
51
+ # Collection doesn't exist yet, will be created later
52
+ pass
53
+
54
+ # Handle dimension mismatch if detected in any collection
55
+ if dimension_mismatch_detected:
56
+ print("Automatically reinitializing ChromaDB collections with the new dimension...")
57
+ if reinitialize_chroma_collections(dimension):
58
+ print("Successfully reinitialized ChromaDB collections with the new dimension")
59
+ else:
60
+ print("Failed to reinitialize ChromaDB collections, you may need to manually delete the data/chroma_db directory")
61
+
62
+ # Create or get collections with the correct dimension
63
+ for collection_name in collections_to_init:
64
+ try:
65
+ collection = client.get_collection(name=collection_name)
66
+ # Verify dimension after possible reinitialization
67
+ if collection.metadata.get("dimension") != dimension:
68
+ print(f"Error: Collection '{collection_name}' still has incorrect dimension after reinitialization: {collection.metadata.get('dimension')} vs {dimension}")
69
+ except ValueError:
70
+ # Create collection if it doesn't exist
71
+ collection = client.create_collection(
72
+ name=collection_name,
73
+ metadata={
74
+ "hnsw:space": "cosine",
75
+ "dimension": dimension
76
+ }
77
+ )
78
+ print(f"Successfully created collection '{collection_name}' with dimension {dimension}")
79
+
80
+
81
+ print(f"ChromaDB initialized at {chroma_path}")
82
+ except Exception as e:
83
+ print(f"An error occurred while initializing ChromaDB: {e}")
84
+ # no exception for following process
85
+ # ChromaRepository will create collection if needed
86
+
87
+ if __name__ == "__main__":
88
+ init_chroma_db()
docker/app/rebuild_llama_cuda.sh ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Script to rebuild llama.cpp with CUDA support at runtime
3
+ # This ensures the build happens with full knowledge of the GPU environment
4
+
5
+ set -e # Exit on error but don't print each command (for cleaner logs)
6
+ cd /app
7
+
8
+ echo "========== STARTING LLAMA.CPP CUDA REBUILD PROCESS =========="
9
+ echo "Current directory: $(pwd)"
10
+
11
+ # First check if CUDA is actually available in the container
12
+ echo "Verifying NVIDIA drivers and CUDA availability..."
13
+ if ! command -v nvidia-smi &> /dev/null; then
14
+ echo "WARNING: NVIDIA drivers not found. Cannot build with CUDA support!"
15
+ echo "Make sure the container has access to the GPU and NVIDIA Container Toolkit is installed."
16
+ echo "Consider running Docker with: --gpus all"
17
+ exit 0 # Exit without error as there's no point trying to build with CUDA when no GPU is detected
18
+ fi
19
+
20
+ # Run nvidia-smi to check GPU access
21
+ echo "Detected NVIDIA GPU:"
22
+ nvidia-smi || {
23
+ echo "ERROR: nvidia-smi command failed. GPU is not properly accessible from the container."
24
+ echo "Make sure you're running Docker with GPU access enabled (--gpus all)"
25
+ exit 0 # Exit without error since there's no GPU access
26
+ }
27
+
28
+ # Install build dependencies
29
+ echo "Installing build dependencies..."
30
+ apt-get update && apt-get install -y --no-install-recommends \
31
+ build-essential \
32
+ wget \
33
+ cmake \
34
+ git \
35
+ ca-certificates \
36
+ gnupg \
37
+ libopenblas-dev
38
+
39
+ # Clean up apt cache to free space
40
+ apt-get clean
41
+ rm -rf /var/lib/apt/lists/*
42
+
43
+ # Install CUDA using NVIDIA's official Debian 12 network installation method
44
+ echo "Installing CUDA using NVIDIA's official method for Debian 12..."
45
+ wget https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_64/cuda-keyring_1.1-1_all.deb
46
+ dpkg -i cuda-keyring_1.1-1_all.deb
47
+ rm cuda-keyring_1.1-1_all.deb
48
+ apt-get update
49
+
50
+ # Install CUDA packages needed for building llama.cpp with CUDA support
51
+ apt-get install -y --fix-missing --no-install-recommends cuda-compiler-12-8
52
+ apt-get clean
53
+ rm -rf /var/lib/apt/lists/*
54
+
55
+ apt-get update
56
+ apt-get install -y --fix-missing --no-install-recommends cuda-runtime-12-8
57
+ apt-get clean
58
+ rm -rf /var/lib/apt/lists/*
59
+
60
+ apt-get update
61
+ apt-get install -y --fix-missing --no-install-recommends cuda-libraries-dev-12-8
62
+ apt-get clean
63
+ rm -rf /var/lib/apt/lists/*
64
+
65
+ # Set up environment for build
66
+ export PATH=/usr/local/cuda-12.8/bin:${PATH}
67
+ export LD_LIBRARY_PATH=/usr/local/cuda-12.8/lib64:${LD_LIBRARY_PATH}
68
+ export CUDA_HOME=/usr/local/cuda-12.8
69
+ # Set CUDACXX environment variable explicitly to help CMake find the CUDA compiler
70
+ export CUDACXX=/usr/local/cuda-12.8/bin/nvcc
71
+ export CMAKE_CUDA_COMPILER=/usr/local/cuda-12.8/bin/nvcc
72
+
73
+ # Verify CUDA compiler is available
74
+ echo "Verifying CUDA compiler (nvcc) is available:"
75
+ which nvcc || echo "ERROR: nvcc not found in PATH!"
76
+ nvcc --version || echo "ERROR: nvcc not working properly!"
77
+
78
+ echo "CUDA environment:"
79
+ echo "- CUDA_HOME: $CUDA_HOME"
80
+ echo "- CUDACXX: $CUDACXX"
81
+ echo "- CMAKE_CUDA_COMPILER: $CMAKE_CUDA_COMPILER"
82
+ echo "- PATH includes CUDA: $PATH"
83
+ echo "- LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
84
+
85
+ # Show available disk space
86
+ echo "Available disk space:"
87
+ df -h
88
+
89
+ # Use local build approach to avoid volume mount issues
90
+ echo "Building llama.cpp with CUDA in a local directory..."
91
+ cd /tmp
92
+ rm -rf llama_build
93
+ mkdir -p llama_build
94
+ cd llama_build
95
+
96
+ # Clone a fresh copy of llama.cpp - this avoids volume mount issues
97
+ echo "Cloning fresh copy of llama.cpp..."
98
+ git clone https://github.com/ggerganov/llama.cpp.git .
99
+
100
+ # Configure and build with CUDA support
101
+ mkdir -p build
102
+ cd build
103
+ echo "Configuring with CMake..."
104
+ cmake -DGGML_CUDA=ON \
105
+ -DCMAKE_CUDA_ARCHITECTURES=all \
106
+ -DCMAKE_BUILD_TYPE=Release \
107
+ -DBUILD_SHARED_LIBS=OFF \
108
+ -DLLAMA_NATIVE=OFF \
109
+ -DCMAKE_CUDA_FLAGS="-Wno-deprecated-gpu-targets" \
110
+ ..
111
+
112
+ echo "Building llama.cpp with CUDA support..."
113
+ cmake --build . --config Release --target all -j $(nproc)
114
+
115
+ if [ -f "bin/llama-server" ]; then
116
+ echo "Build successful! Copying binaries to /app/llama.cpp/build/bin/"
117
+ mkdir -p /app/llama.cpp/build/bin
118
+ cp bin/llama-server /app/llama.cpp/build/bin/
119
+ cp bin/llama-cli /app/llama.cpp/build/bin/ 2>/dev/null || true
120
+ chmod +x /app/llama.cpp/build/bin/llama-server /app/llama.cpp/build/bin/llama-cli
121
+
122
+ # Create GPU optimized marker
123
+ echo "{ \"gpu_optimized\": true, \"optimized_on\": \"$(date -u +\"%Y-%m-%dT%H:%M:%SZ\")\" }" > /app/data/gpu_optimized.json
124
+
125
+ echo "Testing CUDA support in built binary..."
126
+ LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH /app/llama.cpp/build/bin/llama-server --version
127
+ echo ""
128
+ echo "========== CUDA BUILD COMPLETED SUCCESSFULLY =========="
129
+ else
130
+ echo "ERROR: Build failed - llama-server executable not found!"
131
+ exit 1
132
+ fi
docker/sqlite/init.sql ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -- Document Table
2
+ CREATE TABLE IF NOT EXISTS document (
3
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
4
+ name VARCHAR(255) NOT NULL DEFAULT '',
5
+ title VARCHAR(511) NOT NULL DEFAULT '',
6
+ extract_status TEXT CHECK(extract_status IN ('INITIALIZED', 'SUCCESS', 'FAILED')) NOT NULL DEFAULT 'INITIALIZED',
7
+ embedding_status TEXT CHECK(embedding_status IN ('INITIALIZED', 'SUCCESS', 'FAILED')) NOT NULL DEFAULT 'INITIALIZED',
8
+ analyze_status TEXT CHECK(analyze_status IN ('INITIALIZED', 'SUCCESS', 'FAILED')) NOT NULL DEFAULT 'INITIALIZED',
9
+ mime_type VARCHAR(50) NOT NULL DEFAULT '',
10
+ raw_content TEXT DEFAULT NULL,
11
+ user_description VARCHAR(255) NOT NULL DEFAULT '',
12
+ create_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
13
+ update_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
14
+ url VARCHAR(1023) NOT NULL DEFAULT '',
15
+ document_size INTEGER NOT NULL DEFAULT 0,
16
+ insight TEXT DEFAULT NULL, -- JSON data stored as TEXT
17
+ summary TEXT DEFAULT NULL, -- JSON data stored as TEXT
18
+ keywords TEXT DEFAULT NULL
19
+ );
20
+
21
+ -- Document table indexes
22
+ CREATE INDEX IF NOT EXISTS idx_extract_status ON document(extract_status);
23
+ CREATE INDEX IF NOT EXISTS idx_name ON document(name);
24
+ CREATE INDEX IF NOT EXISTS idx_create_time ON document(create_time);
25
+
26
+ -- Chunk Table
27
+ CREATE TABLE IF NOT EXISTS chunk (
28
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
29
+ document_id INTEGER NOT NULL,
30
+ content TEXT NOT NULL,
31
+ has_embedding BOOLEAN NOT NULL DEFAULT 0,
32
+ tags TEXT DEFAULT NULL, -- JSON data stored as TEXT
33
+ topic VARCHAR(255) DEFAULT NULL,
34
+ create_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
35
+ FOREIGN KEY (document_id) REFERENCES document(id)
36
+ );
37
+
38
+ -- Chunk table indexes
39
+ CREATE INDEX IF NOT EXISTS idx_document_id ON chunk(document_id);
40
+ CREATE INDEX IF NOT EXISTS idx_has_embedding ON chunk(has_embedding);
41
+
42
+ -- L1 Version Table
43
+ CREATE TABLE IF NOT EXISTS l1_versions (
44
+ version INTEGER PRIMARY KEY,
45
+ create_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
46
+ status VARCHAR(50) NOT NULL,
47
+ description VARCHAR(500)
48
+ );
49
+
50
+ -- L1 Bio Table
51
+ CREATE TABLE IF NOT EXISTS l1_bios (
52
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
53
+ version INTEGER NOT NULL,
54
+ content TEXT,
55
+ content_third_view TEXT,
56
+ summary TEXT,
57
+ summary_third_view TEXT,
58
+ create_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
59
+ FOREIGN KEY (version) REFERENCES l1_versions(version)
60
+ );
61
+
62
+ -- L1 Shade Table
63
+ CREATE TABLE IF NOT EXISTS l1_shades (
64
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
65
+ version INTEGER NOT NULL,
66
+ name VARCHAR(200),
67
+ aspect VARCHAR(200),
68
+ icon VARCHAR(100),
69
+ desc_third_view TEXT,
70
+ content_third_view TEXT,
71
+ desc_second_view TEXT,
72
+ content_second_view TEXT,
73
+ create_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
74
+ FOREIGN KEY (version) REFERENCES l1_versions(version)
75
+ );
76
+
77
+ -- L1 Cluster Table
78
+ CREATE TABLE IF NOT EXISTS l1_clusters (
79
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
80
+ version INTEGER NOT NULL,
81
+ cluster_id VARCHAR(100),
82
+ memory_ids TEXT, -- JSON data stored as TEXT
83
+ cluster_center TEXT, -- JSON data stored as TEXT
84
+ create_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
85
+ FOREIGN KEY (version) REFERENCES l1_versions(version)
86
+ );
87
+
88
+ -- L1 Chunk Topic Table
89
+ CREATE TABLE IF NOT EXISTS l1_chunk_topics (
90
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
91
+ version INTEGER NOT NULL,
92
+ chunk_id VARCHAR(100),
93
+ topic TEXT,
94
+ tags TEXT, -- JSON data stored as TEXT
95
+ create_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
96
+ FOREIGN KEY (version) REFERENCES l1_versions(version)
97
+ );
98
+
99
+ -- Status Biography Table
100
+ CREATE TABLE IF NOT EXISTS status_biography (
101
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
102
+ content TEXT NOT NULL,
103
+ content_third_view TEXT NOT NULL,
104
+ summary TEXT NOT NULL,
105
+ summary_third_view TEXT NOT NULL,
106
+ create_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
107
+ update_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
108
+ );
109
+
110
+ -- Personal Load Table
111
+ CREATE TABLE IF NOT EXISTS loads (
112
+ id VARCHAR(36) PRIMARY KEY, -- UUID
113
+ name VARCHAR(255) NOT NULL, -- load name
114
+ description TEXT, -- load description
115
+ email VARCHAR(255) NOT NULL DEFAULT '', -- load email
116
+ avatar_data TEXT, -- load avatar base64 encoded data
117
+ instance_id VARCHAR(255), -- upload instance ID
118
+ instance_password VARCHAR(255), -- upload instance password
119
+ status TEXT CHECK(status IN ('active', 'inactive', 'deleted')) DEFAULT 'active',
120
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
121
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
122
+ );
123
+
124
+ CREATE INDEX IF NOT EXISTS idx_loads_status ON loads(status);
125
+ CREATE INDEX IF NOT EXISTS idx_loads_created_at ON loads(created_at);
126
+
127
+ -- Memory Files Table
128
+ CREATE TABLE IF NOT EXISTS memories (
129
+ id VARCHAR(36) NOT NULL,
130
+ name VARCHAR(255) NOT NULL,
131
+ size INTEGER NOT NULL,
132
+ type VARCHAR(50) NOT NULL,
133
+ path VARCHAR(1024) NOT NULL,
134
+ meta_data TEXT, -- JSON data stored as TEXT
135
+ document_id VARCHAR(36),
136
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
137
+ updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
138
+ status TEXT CHECK(status IN ('active', 'deleted')) NOT NULL DEFAULT 'active',
139
+ PRIMARY KEY (id)
140
+ );
141
+
142
+ CREATE INDEX IF NOT EXISTS idx_memories_document_id ON memories(document_id);
143
+ CREATE INDEX IF NOT EXISTS idx_memories_created_at ON memories(created_at);
144
+ CREATE INDEX IF NOT EXISTS idx_memories_type ON memories(type);
145
+ CREATE INDEX IF NOT EXISTS idx_memories_status ON memories(status);
146
+
147
+ -- Roles Table
148
+ CREATE TABLE IF NOT EXISTS roles (
149
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
150
+ uuid VARCHAR(64) NOT NULL UNIQUE,
151
+ name VARCHAR(100) NOT NULL UNIQUE,
152
+ description VARCHAR(500),
153
+ system_prompt TEXT NOT NULL,
154
+ icon VARCHAR(100),
155
+ is_active BOOLEAN NOT NULL DEFAULT 1,
156
+ enable_l0_retrieval BOOLEAN NOT NULL DEFAULT 1,
157
+ enable_l1_retrieval BOOLEAN NOT NULL DEFAULT 1,
158
+ create_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
159
+ update_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
160
+ );
161
+
162
+ CREATE INDEX IF NOT EXISTS idx_roles_name ON roles(name);
163
+ CREATE INDEX IF NOT EXISTS idx_roles_uuid ON roles(uuid);
164
+ CREATE INDEX IF NOT EXISTS idx_roles_is_active ON roles(is_active);
165
+
166
+ -- Insert predefined Roles (only if they don't exist)
167
+ INSERT OR IGNORE INTO roles (uuid, name, description, system_prompt, icon) VALUES
168
+ ('role_interviewer_8f3a1c2e4b5d6f7a9e0b1d2c3f4e5d6b',
169
+ 'Interviewer (a test case)',
170
+ 'Professional interviewer who asks insightful questions to learn about people',
171
+
172
+ 'You are a professional interviewer with expertise in asking insightful questions to understand people deeply, and you are facing the interviewee, and you dont know his/her background. Your responsibilities include:\n1. Asking thoughtful, open-ended questions\n2. Following up on interesting points\n3. sharing what you know to attract the interviewee.',
173
+ 'interview-icon');
174
+
175
+ -- User LLM Configuration Table
176
+ CREATE TABLE IF NOT EXISTS user_llm_configs (
177
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
178
+ provider_type VARCHAR(50) NOT NULL DEFAULT 'openai',
179
+ key VARCHAR(200),
180
+
181
+ -- Chat configuration
182
+ chat_endpoint VARCHAR(200),
183
+ chat_api_key VARCHAR(200),
184
+ chat_model_name VARCHAR(200),
185
+
186
+ -- Embedding configuration
187
+ embedding_endpoint VARCHAR(200),
188
+ embedding_api_key VARCHAR(200),
189
+ embedding_model_name VARCHAR(200),
190
+
191
+ -- Thinking configuration
192
+ thinking_model_name VARCHAR(200),
193
+ thinking_endpoint VARCHAR(200),
194
+ thinking_api_key VARCHAR(200),
195
+
196
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
197
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
198
+ );
199
+
200
+
201
+ -- User LLM Configuration table indexes
202
+ CREATE INDEX IF NOT EXISTS idx_user_llm_configs_created_at ON user_llm_configs(created_at);
203
+
204
+ -- Spaces Table
205
+ CREATE TABLE IF NOT EXISTS spaces (
206
+ id VARCHAR(255) PRIMARY KEY,
207
+ title VARCHAR(255) NOT NULL,
208
+ objective TEXT NOT NULL,
209
+ participants TEXT NOT NULL, -- JSON array stored as TEXT
210
+ host VARCHAR(255) NOT NULL,
211
+ create_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
212
+ status INTEGER DEFAULT 1,
213
+ conclusion TEXT,
214
+ space_share_id VARCHAR(255)
215
+ );
216
+
217
+ -- Space Messages Table
218
+ CREATE TABLE IF NOT EXISTS space_messages (
219
+ id VARCHAR(255) PRIMARY KEY,
220
+ space_id VARCHAR(255) NOT NULL,
221
+ sender_endpoint VARCHAR(255) NOT NULL,
222
+ content TEXT NOT NULL,
223
+ message_type VARCHAR(50) NOT NULL,
224
+ round INTEGER DEFAULT 0,
225
+ create_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
226
+ role VARCHAR(50) DEFAULT 'participant',
227
+ FOREIGN KEY (space_id) REFERENCES spaces(id)
228
+ );
229
+
230
+ -- Space Messages Table Indexes
231
+ CREATE INDEX IF NOT EXISTS idx_space_messages_space_id ON space_messages(space_id);
232
+ CREATE INDEX IF NOT EXISTS idx_space_messages_round ON space_messages(round);
233
+ CREATE INDEX IF NOT EXISTS idx_space_messages_create_time ON space_messages(create_time);
234
+
235
+ -- Space Table Indexes
236
+ CREATE INDEX IF NOT EXISTS idx_spaces_create_time ON spaces(create_time);
237
+ CREATE INDEX IF NOT EXISTS idx_spaces_status ON spaces(status);
docs/Custom Model Config(Ollama).md ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Custom Model Endpoint Guide with Ollama
2
+
3
+ ## 1. Prerequisites: Ollama Setup
4
+
5
+ First, download and install Ollama from the official website:
6
+
7
+ 🔗 **Download Link**: [https://ollama.com/download](https://ollama.com/download)
8
+
9
+ 📚 **Additional Resources**:
10
+ - Official Website: [https://ollama.com](https://ollama.com/)
11
+ - Model Library: [https://ollama.com/library](https://ollama.com/library)
12
+ - GitHub Repository: [https://github.com/ollama/ollama/](https://github.com/ollama/ollama)
13
+
14
+ ---
15
+
16
+ ## 2. Basic Ollama Commands
17
+
18
+ | Command | Description |
19
+ |------|------|
20
+ | `ollama pull model_name` | Download a model |
21
+ | `ollama serve` | Start the Ollama service |
22
+ | `ollama ps` | List running models |
23
+ | `ollama list` | List all downloaded models |
24
+ | `ollama rm model_name` | Remove a model |
25
+ | `ollama show model_name` | Show model details |
26
+
27
+ ## 3. Using Ollama API for Custom Model
28
+
29
+ ### OpenAI-Compatible API
30
+
31
+
32
+ #### Chat Request
33
+
34
+ ```bash
35
+ curl http://127.0.0.1:11434/v1/chat/completions -H "Content-Type: application/json" -d '{
36
+ "model": "qwen2.5:0.5b",
37
+ "messages": [
38
+ {"role": "user", "content": "Why is the sky blue?"}
39
+ ]
40
+ }'
41
+ ```
42
+
43
+ #### Embedding Request
44
+
45
+ ```bash
46
+ curl http://127.0.0.1:11434/v1/embeddings -d '{
47
+ "model": "snowflake-arctic-embed:110m",
48
+ "input": "Why is the sky blue?"
49
+ }'
50
+ ```
51
+
52
+ More Details: [https://github.com/ollama/ollama/blob/main/docs/openai.md](https://github.com/ollama/ollama/blob/main/docs/openai.md)
53
+
54
+ ## 4. Configuring Custom Embedding in Second Me
55
+
56
+ 1. Start the Ollama service: `ollama serve`
57
+ 2. Check your Ollama embedding model context length:
58
+
59
+ ```bash
60
+ # Example: ollama show snowflake-arctic-embed:110m
61
+ $ ollama show snowflake-arctic-embed:110m
62
+
63
+ Model
64
+ architecture bert
65
+ parameters 108.89M
66
+ context length 512
67
+ embedding length 768
68
+ quantization F16
69
+
70
+ License
71
+ Apache License
72
+ Version 2.0, January 2004
73
+ ```
74
+
75
+ 3. Modify `EMBEDDING_MAX_TEXT_LENGTH` in `Second_Me/.env` to match your embedding model's context window. This prevents chunk length overflow and avoids server-side errors (500 Internal Server Error).
76
+
77
+ ```bash
78
+ # Embedding configurations
79
+
80
+ EMBEDDING_MAX_TEXT_LENGTH=embedding_model_context_length
81
+ ```
82
+
83
+ 4. Configure Custom Embedding in Settings
84
+
85
+ ```
86
+ Chat:
87
+ Model Name: qwen2.5:0.5b
88
+ API Key: ollama
89
+ API Endpoint: http://127.0.0.1:11434/v1
90
+
91
+ Embedding:
92
+ Model Name: snowflake-arctic-embed:110m
93
+ API Key: ollama
94
+ API Endpoint: http://127.0.0.1:11434/v1
95
+ ```
96
+
97
+ **When running Second Me in Docker environments**, please replace `127.0.0.1` in API Endpoint with `host.docker.internal`:
98
+
99
+ ```
100
+ Chat:
101
+ Model Name: qwen2.5:0.5b
102
+ API Key: ollama
103
+ API Endpoint: http://host.docker.internal:11434/v1
104
+
105
+ Embedding:
106
+ Model Name: snowflake-arctic-embed:110m
107
+ API Key: ollama
108
+ API Endpoint: http://host.docker.internal:11434/v1
docs/Embedding Model Switching.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Embedding Model Switching Guide
2
+
3
+ ## Understanding Embedding Dimensions
4
+
5
+ When using different embedding models (like switching from OpenAI to Ollama models), you may encounter dimension mismatch issues. This happens because different models produce embedding vectors with different dimensions:
6
+
7
+ | Model | Dimension |
8
+ |-------|----------|
9
+ | OpenAI text-embedding-ada-002 | 1536 |
10
+ | OpenAI text-embedding-3-small | 1536 |
11
+ | OpenAI text-embedding-3-large | 3072 |
12
+ | Ollama snowflake-arctic-embed | 768 |
13
+ | Ollama nomic-embed-text | 768 |
14
+ | Ollama mxbai-embed-large | 1024 |
15
+
16
+ ## Handling Dimension Mismatches
17
+
18
+ Second Me now includes automatic detection and handling of embedding dimension mismatches. When you switch between embedding models with different dimensions, the system will:
19
+
20
+ 1. Detect the dimension of the new embedding model
21
+ 2. Check if the existing ChromaDB collections have a different dimension
22
+ 3. If a mismatch is detected, automatically reinitialize the collections with the new dimension
23
+ 4. Provide clear error messages and logging information about the process
24
+
25
+ ## Recommended Workflow for Switching Models
26
+
27
+ When switching between embedding models with different dimensions, follow these steps:
28
+
29
+ 1. Update your embedding model configuration in Settings
30
+ 2. Restart the application to ensure proper initialization
31
+ 3. If you encounter any issues, you can manually reset the vector database:
32
+ - Delete the contents of the `data/chroma_db` directory
33
+ - Restart the application
34
+
35
+ ## Troubleshooting
36
+
37
+ The system now automatically handles dimension mismatches when switching between embedding models. You'll see log messages like:
38
+
39
+ ```
40
+ Warning: Existing 'documents' collection has dimension X, but current model requires Y
41
+ Automatically reinitializing ChromaDB collections with the new dimension...
42
+ Successfully reinitialized ChromaDB collections with the new dimension
43
+ ```
44
+
45
+ This indicates that the system has detected and resolved a dimension mismatch automatically. If you still encounter issues after the automatic handling:
46
+
47
+ 1. Check the application logs for any error messages
48
+ 2. If problems persist, you can manually reset the vector database:
49
+ - Stop the application
50
+ - Delete the contents of the `data/chroma_db` directory
51
+ - Restart the application
52
+
53
+ ## Technical Details
54
+
55
+ The dimension mismatch handling is implemented in:
56
+
57
+ - `lpm_kernel/file_data/chroma_utils.py`: Contains utilities for detecting model dimensions and reinitializing collections
58
+ - `lpm_kernel/file_data/embedding_service.py`: Handles dimension checking during initialization
59
+ - `docker/app/init_chroma.py`: Performs dimension validation during initial setup
60
+
61
+ The system maintains a mapping of known embedding models to their dimensions and will default to 1536 (OpenAI's dimension) for unknown models.
docs/Local Chat API.md ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Chat API Documentation
2
+
3
+ ## Overview
4
+
5
+ This API provides chat functionality compatible with OpenAI V1 Chat Completions API, supporting streaming responses for interactive conversations with AI assistants.
6
+
7
+ ## API Details
8
+
9
+ - **URL**: `/api/kernel2/chat`
10
+ - **Method**: POST
11
+ - **Description**: Chat interface - Streaming response (compatible with OpenAI V1 API)
12
+ - **Access**: Available through local endpoint at `localhost:8002`
13
+
14
+ ## Request Parameters
15
+
16
+ The request body is compatible with OpenAI Chat Completions API, using JSON format:
17
+
18
+ ```json
19
+ {
20
+ "messages": [
21
+ {"role": "system", "content": "You are a helpful assistant."},
22
+ {"role": "user", "content": "Hello, who are you?"},
23
+ {"role": "assistant", "content": "I am a helpful assistant."},
24
+ {"role": "user", "content": "What can you do for me?"}
25
+ ],
26
+ "metadata": {
27
+ "enable_l0_retrieval": true,
28
+ "role_id": "uuid-string"
29
+ },
30
+ "stream": true,
31
+ "model": "gpt-3.5-turbo",
32
+ "temperature": 0.1,
33
+ "max_tokens": 2000
34
+ }
35
+ ```
36
+
37
+ ### Parameter Description
38
+
39
+ | Parameter | Type | Required | Description |
40
+ |------|------|------|------|
41
+ | messages | Array | Yes | Standard OpenAI message list containing conversation history |
42
+ | metadata | Object | No | Additional parameters for request processing |
43
+ | metadata.enable_l0_retrieval | Boolean | No | Whether to enable basic knowledge retrieval |
44
+ | metadata.role_id | String | No | System customized role UUID |
45
+ | stream | Boolean | No | Whether to return streaming response (default: true) |
46
+ | model | String | No | Model identifier (default: configured model) |
47
+ | temperature | Float | No | Controls randomness (default: 0.1) |
48
+ | max_tokens | Integer | No | Maximum number of tokens to generate (default: 2000) |
49
+
50
+ ## Response Format
51
+
52
+ The response format is compatible with OpenAI Chat Completions API, using Server-Sent Events (SSE) format for streaming responses:
53
+
54
+ ```json
55
+ {
56
+ "id": "chatcmpl-123",
57
+ "object": "chat.completion.chunk",
58
+ "created": 1677652288,
59
+ "model": "gpt-3.5-turbo",
60
+ "system_fingerprint": "fp_44709d6fcb",
61
+ "choices": [
62
+ {
63
+ "index": 0,
64
+ "delta": {"content": "Hello"},
65
+ "finish_reason": null
66
+ }
67
+ ]
68
+ }
69
+ ```
70
+
71
+ ### Format of Each Chunk in Streaming Response
72
+
73
+ | Field | Type | Description |
74
+ |------|------|------|
75
+ | id | String | Unique identifier for the response |
76
+ | object | String | Fixed as "chat.completion.chunk" |
77
+ | created | Integer | Timestamp |
78
+ | model | String | Model identifier |
79
+ | system_fingerprint | String | System fingerprint |
80
+ | choices | Array | List of generated results |
81
+ | choices[0].index | Integer | Result index, usually 0 |
82
+ | choices[0].delta | Object | Incremental content of the current chunk |
83
+ | choices[0].delta.content | String | Incremental text content |
84
+ | choices[0].finish_reason | String | Reason for completion, null or "stop" |
85
+
86
+ ## Usage Examples
87
+
88
+ ### cURL Request Example
89
+
90
+ ```bash
91
+ curl -X POST \
92
+ 'http://localhost:8002/api/kernel2/chat' \
93
+ -H 'Content-Type: application/json' \
94
+ -H 'Accept: text/event-stream' \
95
+ -d '{
96
+ "messages": [
97
+ {"role": "system", "content": "You are a helpful assistant."},
98
+ {"role": "user", "content": "Tell me about artificial intelligence."}
99
+ ],
100
+ "stream": true
101
+ }'
102
+ ```
103
+
104
+ ### Python Request Example
105
+
106
+ ```python
107
+ import json
108
+ import http.client
109
+
110
+ url = "localhost:8002"
111
+ path = "/api/kernel2/chat"
112
+ headers = {
113
+ "Content-Type": "application/json",
114
+ "Accept": "text/event-stream"
115
+ }
116
+ data = {
117
+ "messages": [
118
+ {"role": "system", "content": "You are a helpful assistant."},
119
+ {"role": "user", "content": "Tell me about artificial intelligence."}
120
+ ],
121
+ "stream": True
122
+ }
123
+
124
+ conn = http.client.HTTPConnection(url)
125
+
126
+ conn.request("POST", path, body=json.dumps(data), headers=headers)
127
+
128
+ response = conn.getresponse()
129
+
130
+ for line in response:
131
+ if line:
132
+ decoded_line = line.decode('utf-8').strip()
133
+ if decoded_line == 'data: [DONE]':
134
+ break
135
+ if decoded_line.startswith('data: '):
136
+ try:
137
+ json_str = decoded_line[6:]
138
+ chunk = json.loads(json_str)
139
+ content = chunk['choices'][0]['delta'].get('content', '')
140
+ if content:
141
+ print(content, end='', flush=True)
142
+ except json.JSONDecodeError:
143
+ pass
144
+
145
+ conn.close()
146
+ ```
147
+
148
+ ## Error Handling
149
+
150
+ When an error occurs, the API will return standard HTTP error status codes and error details in JSON format:
151
+
152
+ ```json
153
+ {
154
+ "success": false,
155
+ "message": "Error message",
156
+ "code": 400
157
+ }
158
+ ```
159
+
160
+ | Error Code | Description |
161
+ |------|------|
162
+ | 400 | Bad Request |
163
+ | 401 | Unauthorized |
164
+ | 404 | Not Found |
165
+ | 500 | Internal Server Error |
docs/Public Chat API.md ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Chat Completion API
2
+
3
+ ## API Overview
4
+
5
+ This API is used to create chat completions, process provided messages, and generate responses. The API supports streaming responses and is compatible with OpenAI format.
6
+
7
+ ## Prerequisites
8
+
9
+ Before using this API, you need to:
10
+
11
+ 1. **Register**: Execute the register operation to create your instance
12
+ 2. **Status Check**: Wait until your instance status becomes "online"
13
+ 3. **Get Instance ID**: Obtain your unique `{instance_id}` from the registration response
14
+ 4. **API Access**: Use the instance ID to construct the API endpoint: `https://app.secondme.io/api/chat/{instance_id}`
15
+
16
+ ## API Endpoints
17
+
18
+ ```
19
+ POST /api/chat/{instance_id}
20
+ POST /api/chat/{instance_id}/chat/completions
21
+ ```
22
+
23
+ ## Path Parameters
24
+
25
+ | Parameter | Type | Required | Description |
26
+ |------|------|------|------|
27
+ | `instance_id` | string | Yes | Unique identifier for the model instance, obtained during registration |
28
+
29
+ ## Request Body
30
+
31
+ | Field | Type | Required | Default | Description |
32
+ |------|------|------|------|------|
33
+ | `messages` | array | Yes | - | List of messages in the conversation |
34
+ | `metadata` | object | No | null | Additional metadata for the request |
35
+ | `temperature` | float | No | 0.7 | Controls randomness of the response, value between 0 and 1 |
36
+ | `max_tokens` | integer | No | 2000 | Maximum number of tokens to generate |
37
+ | `stream` | boolean | No | true | Whether to stream the response |
38
+
39
+ ### messages Field
40
+
41
+ Each message should contain the following fields:
42
+
43
+ | Field | Type | Required | Description |
44
+ |------|------|------|------|
45
+ | `role` | string | Yes | Role of the message sender. Can be 'system', 'user', or 'assistant' |
46
+ | `content` | string | Yes | Content of the message |
47
+
48
+ ### metadata Field
49
+
50
+ | Field | Type | Required | Description |
51
+ |------|------|------|------|
52
+ | `enable_l0_retrieval` | boolean | No | Whether to enable L0 level retrieval |
53
+ | `role_id` | string | No | Role ID to use for this chat |
54
+
55
+ ## Response
56
+
57
+ - Server-Sent Events (SSE) stream in OpenAI-compatible format
58
+ - Each event contains a fragment of the generated response
59
+ - The last event is marked as `[DONE]`
60
+
61
+ ### Response Format Example
62
+
63
+ ```
64
+ data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"lpm-registry-model","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Hello"},"finish_reason":null}]}
65
+
66
+ data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"lpm-registry-model","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" world!"},"finish_reason":null}]}
67
+
68
+ data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"lpm-registry-model","system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}
69
+
70
+ data: [DONE]
71
+ ```
72
+
73
+ ## Request Examples
74
+
75
+ ### cURL
76
+
77
+ ```bash
78
+ curl -X POST "https://app.secondme.io/api/chat/{instance_id}" \
79
+ -H "Content-Type: application/json" \
80
+ -d '{
81
+ "messages": [
82
+ {"role": "system", "content": "You are a helpful assistant."},
83
+ {"role": "user", "content": "Hello, please introduce yourself."}
84
+ ],
85
+ "metadata": {
86
+ "enable_l0_retrieval": false,
87
+ "role_id": "default_role"
88
+ },
89
+ "temperature": 0.7,
90
+ "max_tokens": 2000,
91
+ "stream": true
92
+ }'
93
+ ```
94
+
95
+ ### Python
96
+
97
+ ```python
98
+ import http.client
99
+ import json
100
+
101
+ url = "app.secondme.io"
102
+ path = "/api/chat/{instance_id}"
103
+
104
+ headers = {"Content-Type": "application/json"}
105
+ data = {
106
+ "messages": [
107
+ {"role": "system", "content": "You are a helpful assistant."},
108
+ {"role": "user", "content": "Hello, please introduce yourself."}
109
+ ],
110
+ "metadata": {
111
+ "enable_l0_retrieval": False,
112
+ "role_id": "default_role"
113
+ },
114
+ "temperature": 0.7,
115
+ "max_tokens": 2000,
116
+ "stream": True
117
+ }
118
+
119
+ # Prepare the connection
120
+ conn = http.client.HTTPSConnection(url)
121
+
122
+ # Send the POST request
123
+ conn.request("POST", path, body=json.dumps(data), headers=headers)
124
+
125
+ # Get the response
126
+ response = conn.getresponse()
127
+
128
+
129
+ # Read the body line by line
130
+ for line in response:
131
+ if line:
132
+ decoded_line = line.decode('utf-8').strip()
133
+ if decoded_line == 'data: [DONE]':
134
+ break
135
+ if decoded_line.startswith('data: '):
136
+ try:
137
+ json_str = decoded_line[6:]
138
+ chunk = json.loads(json_str)
139
+ content = chunk['choices'][0]['delta'].get('content', '')
140
+ if content:
141
+ print(content, end='', flush=True)
142
+ except json.JSONDecodeError:
143
+ pass
144
+
145
+ # Close the connection when done
146
+ conn.close()
147
+
148
+ ```
149
+
150
+ ## Error Codes
151
+
152
+ | Status Code | Description |
153
+ |------|------|
154
+ | 404 | Instance not found |
155
+ | 422 | Invalid request parameters |
156
+ | 503 | Instance not connected or unavailable |
157
+
158
+ ## Notes
159
+
160
+ 1. Before using this API, ensure that the instance is registered and connected to the server (status: "online")
161
+ 2. The instance ID is unique and required for all API calls
162
+ 3. For streaming responses, the client should be able to handle data in SSE format
163
+ 4. Roles in the message list should follow the conversation order, typically starting with 'system' or 'user'
images/cover.png ADDED

Git LFS Details

  • SHA256: 5c6cf46e9b960bf8861d8ccdc29ff24b65d819df1c86abee54aa1db8e0c401ef
  • Pointer size: 132 Bytes
  • Size of remote file: 1 MB
images/secondme_cover.png ADDED

Git LFS Details

  • SHA256: d21d64b0b8b57c7f9abc4f2140a8015105c727316ecd6d721f5617c056cec977
  • Pointer size: 131 Bytes
  • Size of remote file: 998 kB
integrate/.gitkeep ADDED
File without changes
integrate/Readme.md ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Second-Me WeChat Bot 集成指南
2
+
3
+ 这个项目将 Second-Me 与微信机器人进行集成,让您可以通过微信与您的 Second-Me 进行交互。
4
+
5
+ ## 功能特点
6
+
7
+ - 🤖 将 Second-Me 无缝接入微信
8
+ - 💬 支持文本消息的智能回复
9
+ - 🔄 自动化的消息处理流程
10
+ - 📝 完整的日志记录
11
+ - ⚠️ 健壮的错误处理机制
12
+
13
+ ## 安装要求
14
+
15
+ - Python 3.8+
16
+ - WeChat 个人账号
17
+ - Second-Me 项目环境
18
+
19
+ ## 快速开始
20
+
21
+ 1. 克隆项目并安装依赖:
22
+ ```bash
23
+ git clone https://github.com/mindverse/second-me.git
24
+ cd second-me/integrate
25
+ pip install -r requirements.txt
26
+ ```
27
+
28
+ 2. 配置环境:
29
+ - 确保 Second-Me 的配置文件正确设置
30
+ - 检查 `.env` 文件中的必要配置
31
+
32
+ 3. 运行机器人:
33
+ ```bash
34
+ python wechat_bot.py
35
+ ```
36
+
37
+ 4. 首次运行时,扫描终端显示的二维码登录微信
38
+
39
+ ## 项目结构
integrate/env.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Second-Me 配置
2
+ SECOND_ME_MODEL_PATH=path/to/model
3
+ SECOND_ME_CONFIG_PATH=path/to/config
4
+
5
+ # 微信机器人配置
6
+ WXPY_BOT_CACHE_PATH=wxpy.pkl
integrate/requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ wxpy==0.3.9.8
2
+ python-dotenv==0.19.0
3
+ torch>=1.8.0
4
+ transformers>=4.5.0
5
+ numpy>=1.19.0
integrate/wechat_bot.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from wxpy import *
2
+ import logging
3
+ import os
4
+ from dotenv import load_dotenv
5
+ import sys
6
+ import json
7
+
8
+ # 添加Second-Me的路径到系统路径
9
+ sys.path.append(os.path.join(os.path.dirname(__file__), 'lpm_kernel'))
10
+
11
+ from lpm_kernel.kernel import SecondMeKernel
12
+ from lpm_kernel.utils import load_config
13
+
14
+ # 配置日志
15
+ logging.basicConfig(level=logging.INFO)
16
+ logger = logging.getLogger(__name__)
17
+
18
+ # 加载环境变量
19
+ load_dotenv()
20
+
21
+ class WeChatBot:
22
+ def __init__(self):
23
+ # 初始化机器人,设置缓存路径
24
+ self.bot = Bot(cache_path='wxpy.pkl')
25
+ logger.info("微信机器人初始化成功")
26
+
27
+ # 初始化Second-Me
28
+ try:
29
+ config = load_config()
30
+ self.second_me = SecondMeKernel(config)
31
+ logger.info("Second-Me初始化成功")
32
+ except Exception as e:
33
+ logger.error(f"Second-Me初始化失败: {str(e)}")
34
+ self.second_me = None
35
+
36
+ def handle_message(self, msg):
37
+ """处理接收到的消息"""
38
+ try:
39
+ # 获取消息内容
40
+ content = msg.text
41
+ sender = msg.sender
42
+
43
+ # 记录接收到的消息
44
+ logger.info(f"收到来自 {sender.name} 的消息: {content}")
45
+
46
+ if self.second_me is None:
47
+ msg.reply("抱歉,Second-Me服务未正确初始化,请稍后再试。")
48
+ return
49
+
50
+ # 调用Second-Me处理消息
51
+ response = self.second_me.process_message(content)
52
+
53
+ # 如果响应是字典,转换为字符串
54
+ if isinstance(response, dict):
55
+ response = json.dumps(response, ensure_ascii=False)
56
+
57
+ # 发送回复
58
+ msg.reply(response)
59
+
60
+ except Exception as e:
61
+ logger.error(f"处理消息时出错: {str(e)}")
62
+ msg.reply("抱歉,处理消息时出现错误。")
63
+
64
+ def run(self):
65
+ """运行机器人"""
66
+ try:
67
+ # 注册消息处理函数
68
+ @self.bot.register()
69
+ def print_messages(msg):
70
+ self.handle_message(msg)
71
+
72
+ # 保持运行
73
+ self.bot.join()
74
+
75
+ except Exception as e:
76
+ logger.error(f"运行机器人时出错: {str(e)}")
77
+ self.bot.logout()
78
+
79
+ if __name__ == "__main__":
80
+ # 创建并运行机器人
81
+ bot = WeChatBot()
82
+ bot.run()
logs/.gitkeep ADDED
File without changes
logs/logs.lnk ADDED
File without changes
lpm_frontend/.eslintignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ /*
2
+ !/src
lpm_frontend/.eslintrc.js ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ module.exports = {
2
+ root: true,
3
+ env: {
4
+ node: true,
5
+ browser: true
6
+ },
7
+ parserOptions: {
8
+ project: './tsconfig.json',
9
+ tsconfigRootDir: __dirname
10
+ },
11
+ extends: [
12
+ './rules/eslint/js',
13
+ './rules/eslint/import',
14
+ './rules/eslint/react',
15
+ './rules/eslint/ts',
16
+ './rules/eslint/prettier'
17
+ ],
18
+ rules: {
19
+ '@next/next/no-sync-scripts': 'off'
20
+ },
21
+ settings: {
22
+ 'import/resolver': {
23
+ typescript: {
24
+ project: 'Second-Me/lpm_frontend/tsconfig.json'
25
+ },
26
+ node: {
27
+ extensions: ['.js', '.jsx', '.ts', '.tsx']
28
+ }
29
+ }
30
+ }
31
+ };
lpm_frontend/.gitignore ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2
+
3
+ # dependencies
4
+ /node_modules
5
+ /.pnp
6
+ .pnp.*
7
+ .yarn/*
8
+ !.yarn/patches
9
+ !.yarn/plugins
10
+ !.yarn/releases
11
+ !.yarn/versions
12
+
13
+ # testing
14
+ /coverage
15
+
16
+ # next.js
17
+ /.next/
18
+ /out/
19
+
20
+ # production
21
+ /build
22
+
23
+ # misc
24
+ .DS_Store
25
+ *.pem
26
+
27
+ # debug
28
+ npm-debug.log*
29
+ yarn-debug.log*
30
+ yarn-error.log*
31
+ .pnpm-debug.log*
32
+
33
+ # env files (can opt-in for committing if needed)
34
+ .env*
35
+
36
+ # vercel
37
+ .vercel
38
+
39
+ # typescript
40
+ *.tsbuildinfo
41
+ next-env.d.ts
42
+ next.config.ts
lpm_frontend/.prettierignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ /*
2
+ !/src
lpm_frontend/.prettierrc.js ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // https://prettier.io/docs/en/options.html
2
+ // https://github.com/trivago/prettier-plugin-sort-imports
3
+
4
+ module.exports = {
5
+ trailingComma: 'none',
6
+ singleQuote: true,
7
+ printWidth: 100,
8
+ importOrder: ['<THIRD_PARTY_MODULES>', '^@/(.*)$', '^[./]'],
9
+ importOrderSeparation: true,
10
+ importOrderSortSpecifiers: true,
11
+ semi: true
12
+ };
lpm_frontend/.stylelintignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ /*
2
+ !/src
lpm_frontend/.stylelintrc.js ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // https://stylelint.io/user-guide/rules
2
+ module.exports = {
3
+ extends: [
4
+ 'stylelint-config-standard',
5
+ 'stylelint-config-rational-order',
6
+ 'stylelint-prettier/recommended'
7
+ ],
8
+ overrides: [
9
+ {
10
+ files: '**/*.less',
11
+ customSyntax: 'postcss-less'
12
+ }
13
+ ],
14
+ rules: {
15
+ 'selector-class-pattern': null,
16
+ 'color-function-notation': 'legacy',
17
+ 'declaration-block-no-redundant-longhand-properties': null,
18
+ 'selector-no-vendor-prefix': [
19
+ true,
20
+ {
21
+ ignoreSelectors: ['input-placeholder']
22
+ }
23
+ ],
24
+ 'property-no-vendor-prefix': [
25
+ true,
26
+ {
27
+ ignoreProperties: ['user-select', 'line-clamp', 'appearance']
28
+ }
29
+ ],
30
+ 'selector-pseudo-class-no-unknown': [
31
+ true,
32
+ {
33
+ ignorePseudoClasses: ['global']
34
+ }
35
+ ],
36
+ 'unit-no-unknown': [
37
+ true,
38
+ {
39
+ ignoreUnits: ['/^rpx$/']
40
+ }
41
+ ]
42
+ }
43
+ };
lpm_frontend/next.config.js ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const nextConfig = {
2
+ reactStrictMode: false,
3
+ async rewrites() {
4
+ const dockerApiBaseUrl = process.env.DOCKER_API_BASE_URL;
5
+ const localApiBaseUrl = `${process.env.HOST_ADDRESS || 'http://127.0.0.1'}:${process.env.LOCAL_APP_PORT || 8002}`;
6
+
7
+ return [
8
+ {
9
+ source: '/',
10
+ destination: '/home'
11
+ },
12
+ {
13
+ source: '/api/:path*',
14
+ destination: dockerApiBaseUrl
15
+ ? `${dockerApiBaseUrl}/api/:path*`
16
+ : `${localApiBaseUrl}/api/:path*`
17
+ }
18
+ ];
19
+ },
20
+ async headers() {
21
+ return [
22
+ {
23
+ source: '/api/:path*',
24
+ headers: [
25
+ { key: 'Access-Control-Allow-Credentials', value: 'true' },
26
+ { key: 'Access-Control-Allow-Origin', value: '*' },
27
+ {
28
+ key: 'Access-Control-Allow-Methods',
29
+ value: 'GET,DELETE,PATCH,POST,PUT'
30
+ },
31
+ {
32
+ key: 'Access-Control-Allow-Headers',
33
+ value: 'Accept, Accept-Version, Content-Length, Content-MD5, Content-Type, Date'
34
+ },
35
+ { key: 'Accept', value: 'text/event-stream' },
36
+ { key: 'Cache-Control', value: 'no-cache' },
37
+ { key: 'Connection', value: 'keep-alive' }
38
+ ]
39
+ }
40
+ ];
41
+ },
42
+ experimental: {
43
+ proxyTimeout: 0
44
+ },
45
+ compiler: {
46
+ styledComponents: true
47
+ },
48
+ webpack: (config) => {
49
+ config.externals = [...(config.externals || []), 'canvas', 'jsdom'];
50
+
51
+ config.watchOptions = {
52
+ poll: 1000,
53
+ aggregateTimeout: 300
54
+ };
55
+
56
+ return config;
57
+ }
58
+ };
59
+
60
+ module.exports = nextConfig;
lpm_frontend/package-lock.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92b32337b4bdd1e1c72472af640785e19a3a8cd12c085994676eb1e5d2368848
3
+ size 511033
lpm_frontend/package.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5e1de2ff2b3b536ad67cee7c442c80d5a7942f0ad2a734a342cf8df09c6ca5a
3
+ size 2540