vansin commited on
Commit
458ecfb
·
0 Parent(s):

feat: update

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .DS_Store +0 -0
  2. .dockerignore +12 -0
  3. .env.example +6 -0
  4. .gitattributes +4 -0
  5. .gitignore +165 -0
  6. .pre-commit-config.yaml +46 -0
  7. .pylintrc +428 -0
  8. Dockerfile +27 -0
  9. LICENSE +201 -0
  10. README.md +171 -0
  11. README_zh-CN.md +140 -0
  12. app.py +318 -0
  13. assets/logo.svg +24 -0
  14. assets/mindsearch_openset.png +3 -0
  15. assets/teaser.gif +3 -0
  16. backend_example.py +37 -0
  17. docker/README.md +125 -0
  18. docker/README_zh-CN.md +125 -0
  19. docker/msdl/__init__.py +0 -0
  20. docker/msdl/__main__.py +107 -0
  21. docker/msdl/config.py +57 -0
  22. docker/msdl/docker_manager.py +175 -0
  23. docker/msdl/i18n.py +64 -0
  24. docker/msdl/templates/backend/cloud_llm.dockerfile +25 -0
  25. docker/msdl/templates/backend/local_llm.dockerfile +30 -0
  26. docker/msdl/templates/docker-compose.yaml +62 -0
  27. docker/msdl/templates/frontend/react.dockerfile +35 -0
  28. docker/msdl/translations/en.yaml +77 -0
  29. docker/msdl/translations/zh_CN.yaml +77 -0
  30. docker/msdl/user_interaction.py +253 -0
  31. docker/msdl/utils.py +257 -0
  32. docker/setup.py +24 -0
  33. frontend/React/.gitignore +25 -0
  34. frontend/React/.prettierignore +7 -0
  35. frontend/React/.prettierrc.json +7 -0
  36. frontend/React/README.md +184 -0
  37. frontend/React/README_zh-CN.md +135 -0
  38. frontend/React/index.html +14 -0
  39. frontend/React/package-lock.json +0 -0
  40. frontend/React/package.json +55 -0
  41. frontend/React/src/App.module.less +52 -0
  42. frontend/React/src/App.tsx +25 -0
  43. frontend/React/src/assets/background.png +3 -0
  44. frontend/React/src/assets/fold-icon.svg +3 -0
  45. frontend/React/src/assets/logo.svg +24 -0
  46. frontend/React/src/assets/pack-up.svg +4 -0
  47. frontend/React/src/assets/sendIcon.svg +4 -0
  48. frontend/React/src/assets/show-right-icon.png +3 -0
  49. frontend/React/src/assets/unflod-icon.svg +3 -0
  50. frontend/React/src/global.d.ts +1 -0
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.dockerignore ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ **/node_modules
2
+ **/dist
3
+ **/.git
4
+ **/.gitignore
5
+ **/.vscode
6
+ **/README.md
7
+ **/LICENSE
8
+ **/.env
9
+ **/npm-debug.log
10
+ **/yarn-debug.log
11
+ **/yarn-error.log
12
+ **/.pnpm-debug.log
.env.example ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ OPENAI_API_KEY=
2
+ OPENAI_API_BASE=
3
+ OPENAI_MODEL=
4
+ SILICON_API_KEY=
5
+ SILICON_MODEL=
6
+ InternLM_API_KEY=
.gitattributes ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ *.png filter=lfs diff=lfs merge=lfs -text
2
+ *.jpg filter=lfs diff=lfs merge=lfs -text
3
+ *.gif filter=lfs diff=lfs merge=lfs -text
4
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[ciod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110
+ .pdm.toml
111
+ .pdm-python
112
+ .pdm-build/
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
+
143
+ # mypy
144
+ .mypy_cache/
145
+ .dmypy.json
146
+ dmypy.json
147
+
148
+ # Pyre type checker
149
+ .pyre/
150
+
151
+ # pytype static type analyzer
152
+ .pytype/
153
+
154
+ # Cython debug symbols
155
+ cython_debug/
156
+
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
163
+
164
+ .env
165
+ temp
.pre-commit-config.yaml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ exclude: ^(tests/data|scripts|frontend/React)/
2
+ repos:
3
+ - repo: https://github.com/PyCQA/flake8
4
+ rev: 7.0.0
5
+ hooks:
6
+ - id: flake8
7
+ args: ["--max-line-length=120"]
8
+ - repo: https://github.com/PyCQA/isort
9
+ rev: 5.13.2
10
+ hooks:
11
+ - id: isort
12
+ - repo: https://github.com/pre-commit/mirrors-yapf
13
+ rev: v0.32.0
14
+ hooks:
15
+ - id: yapf
16
+ - repo: https://github.com/pre-commit/pre-commit-hooks
17
+ rev: v4.5.0
18
+ hooks:
19
+ - id: trailing-whitespace
20
+ - id: check-yaml
21
+ - id: end-of-file-fixer
22
+ - id: requirements-txt-fixer
23
+ - id: double-quote-string-fixer
24
+ - id: check-merge-conflict
25
+ - id: fix-encoding-pragma
26
+ args: ["--remove"]
27
+ - id: mixed-line-ending
28
+ args: ["--fix=lf"]
29
+ - repo: https://github.com/executablebooks/mdformat
30
+ rev: 0.7.17
31
+ hooks:
32
+ - id: mdformat
33
+ args: ["--number"]
34
+ additional_dependencies:
35
+ - mdformat-openmmlab
36
+ - mdformat_frontmatter
37
+ - linkify-it-py
38
+ - repo: https://github.com/codespell-project/codespell
39
+ rev: v2.2.6
40
+ hooks:
41
+ - id: codespell
42
+ - repo: https://github.com/asottile/pyupgrade
43
+ rev: v3.15.0
44
+ hooks:
45
+ - id: pyupgrade
46
+ args: ["--py36-plus"]
.pylintrc ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This Pylint rcfile contains a best-effort configuration to uphold the
2
+ # best-practices and style described in the Google Python style guide:
3
+ # https://google.github.io/styleguide/pyguide.html
4
+ #
5
+ # Its canonical open-source location is:
6
+ # https://google.github.io/styleguide/pylintrc
7
+
8
+ [MASTER]
9
+
10
+ # Files or directories to be skipped. They should be base names, not paths.
11
+ ignore=third_party,storage
12
+
13
+ # Files or directories matching the regex patterns are skipped. The regex
14
+ # matches against base names, not paths.
15
+ ignore-patterns=
16
+
17
+ # Pickle collected data for later comparisons.
18
+ persistent=no
19
+
20
+ # List of plugins (as comma separated values of python modules names) to load,
21
+ # usually to register additional checkers.
22
+ load-plugins=
23
+
24
+ # Use multiple processes to speed up Pylint.
25
+ jobs=4
26
+
27
+ # Allow loading of arbitrary C extensions. Extensions are imported into the
28
+ # active Python interpreter and may run arbitrary code.
29
+ unsafe-load-any-extension=no
30
+
31
+
32
+ [MESSAGES CONTROL]
33
+
34
+ # Only show warnings with the listed confidence levels. Leave empty to show
35
+ # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
36
+ confidence=
37
+
38
+ # Enable the message, report, category or checker with the given id(s). You can
39
+ # either give multiple identifier separated by comma (,) or put this option
40
+ # multiple time (only on the command line, not in the configuration file where
41
+ # it should appear only once). See also the "--disable" option for examples.
42
+ #enable=
43
+
44
+ # Disable the message, report, category or checker with the given id(s). You
45
+ # can either give multiple identifiers separated by comma (,) or put this
46
+ # option multiple times (only on the command line, not in the configuration
47
+ # file where it should appear only once).You can also use "--disable=all" to
48
+ # disable everything first and then reenable specific checks. For example, if
49
+ # you want to run only the similarities checker, you can use "--disable=all
50
+ # --enable=similarities". If you want to run only the classes checker, but have
51
+ # no Warning level messages displayed, use"--disable=all --enable=classes
52
+ # --disable=W"
53
+ disable=abstract-method,
54
+ apply-builtin,
55
+ arguments-differ,
56
+ attribute-defined-outside-init,
57
+ backtick,
58
+ bad-option-value,
59
+ basestring-builtin,
60
+ buffer-builtin,
61
+ c-extension-no-member,
62
+ consider-using-enumerate,
63
+ cmp-builtin,
64
+ cmp-method,
65
+ coerce-builtin,
66
+ coerce-method,
67
+ delslice-method,
68
+ div-method,
69
+ duplicate-code,
70
+ eq-without-hash,
71
+ execfile-builtin,
72
+ file-builtin,
73
+ filter-builtin-not-iterating,
74
+ fixme,
75
+ getslice-method,
76
+ global-statement,
77
+ hex-method,
78
+ idiv-method,
79
+ implicit-str-concat,
80
+ import-error,
81
+ import-self,
82
+ import-star-module-level,
83
+ inconsistent-return-statements,
84
+ input-builtin,
85
+ intern-builtin,
86
+ invalid-str-codec,
87
+ locally-disabled,
88
+ long-builtin,
89
+ long-suffix,
90
+ map-builtin-not-iterating,
91
+ misplaced-comparison-constant,
92
+ missing-function-docstring,
93
+ metaclass-assignment,
94
+ next-method-called,
95
+ next-method-defined,
96
+ no-absolute-import,
97
+ no-else-break,
98
+ no-else-continue,
99
+ no-else-raise,
100
+ no-else-return,
101
+ no-init, # added
102
+ no-member,
103
+ no-name-in-module,
104
+ no-self-use,
105
+ nonzero-method,
106
+ oct-method,
107
+ old-division,
108
+ old-ne-operator,
109
+ old-octal-literal,
110
+ old-raise-syntax,
111
+ parameter-unpacking,
112
+ print-statement,
113
+ raising-string,
114
+ range-builtin-not-iterating,
115
+ raw_input-builtin,
116
+ rdiv-method,
117
+ reduce-builtin,
118
+ relative-import,
119
+ reload-builtin,
120
+ round-builtin,
121
+ setslice-method,
122
+ signature-differs,
123
+ standarderror-builtin,
124
+ suppressed-message,
125
+ sys-max-int,
126
+ too-few-public-methods,
127
+ too-many-ancestors,
128
+ too-many-arguments,
129
+ too-many-boolean-expressions,
130
+ too-many-branches,
131
+ too-many-instance-attributes,
132
+ too-many-locals,
133
+ too-many-nested-blocks,
134
+ too-many-public-methods,
135
+ too-many-return-statements,
136
+ too-many-statements,
137
+ trailing-newlines,
138
+ unichr-builtin,
139
+ unicode-builtin,
140
+ unnecessary-pass,
141
+ unpacking-in-except,
142
+ useless-else-on-loop,
143
+ useless-object-inheritance,
144
+ useless-suppression,
145
+ using-cmp-argument,
146
+ wrong-import-order,
147
+ xrange-builtin,
148
+ zip-builtin-not-iterating,
149
+
150
+
151
+ [REPORTS]
152
+
153
+ # Set the output format. Available formats are text, parseable, colorized, msvs
154
+ # (visual studio) and html. You can also give a reporter class, eg
155
+ # mypackage.mymodule.MyReporterClass.
156
+ output-format=colorized
157
+
158
+ # Tells whether to display a full report or only the messages
159
+ reports=no
160
+
161
+ # Python expression which should return a note less than 10 (10 is the highest
162
+ # note). You have access to the variables errors warning, statement which
163
+ # respectively contain the number of errors / warnings messages and the total
164
+ # number of statements analyzed. This is used by the global evaluation report
165
+ # (RP0004).
166
+ evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
167
+
168
+ # Template used to display messages. This is a python new-style format string
169
+ # used to format the message information. See doc for all details
170
+ #msg-template=
171
+
172
+
173
+ [BASIC]
174
+
175
+ # Good variable names which should always be accepted, separated by a comma
176
+ good-names=main,_
177
+
178
+ # Bad variable names which should always be refused, separated by a comma
179
+ bad-names=
180
+
181
+ # Colon-delimited sets of names that determine each other's naming style when
182
+ # the name regexes allow several styles.
183
+ name-group=
184
+
185
+ # Include a hint for the correct naming format with invalid-name
186
+ include-naming-hint=no
187
+
188
+ # List of decorators that produce properties, such as abc.abstractproperty. Add
189
+ # to this list to register other decorators that produce valid properties.
190
+ property-classes=abc.abstractproperty,cached_property.cached_property,cached_property.threaded_cached_property,cached_property.cached_property_with_ttl,cached_property.threaded_cached_property_with_ttl
191
+
192
+ # Regular expression matching correct function names
193
+ function-rgx=^(?:(?P<exempt>setUp|tearDown|setUpModule|tearDownModule)|(?P<camel_case>_?[A-Z][a-zA-Z0-9]*)|(?P<snake_case>_?[a-z][a-z0-9_]*))$
194
+
195
+ # Regular expression matching correct variable names
196
+ variable-rgx=^[a-z][a-z0-9_]*$
197
+
198
+ # Regular expression matching correct constant names
199
+ const-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$
200
+
201
+ # Regular expression matching correct attribute names
202
+ attr-rgx=^_{0,2}[a-z][a-z0-9_]*$
203
+
204
+ # Regular expression matching correct argument names
205
+ argument-rgx=^[a-z][a-z0-9_]*$
206
+
207
+ # Regular expression matching correct class attribute names
208
+ class-attribute-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$
209
+
210
+ # Regular expression matching correct inline iteration names
211
+ inlinevar-rgx=^[a-z][a-z0-9_]*$
212
+
213
+ # Regular expression matching correct class names
214
+ class-rgx=^_?[A-Z][a-zA-Z0-9]*$
215
+
216
+ # Regular expression matching correct module names
217
+ module-rgx=^(_?[a-z][a-z0-9_]*|__init__)$
218
+
219
+ # Regular expression matching correct method names
220
+ method-rgx=(?x)^(?:(?P<exempt>_[a-z0-9_]+__|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|setUpClass|(test|assert)_*[A-Z0-9][a-zA-Z0-9_]*|next)|(?P<camel_case>_{0,2}[A-Z][a-zA-Z0-9_]*)|(?P<snake_case>_{0,2}[a-z][a-z0-9_]*))$
221
+
222
+ # Regular expression which should only match function or class names that do
223
+ # not require a docstring.
224
+ no-docstring-rgx=(__.*__|main|test.*|.*test|.*Test)$
225
+
226
+ # Minimum line length for functions/classes that require docstrings, shorter
227
+ # ones are exempt.
228
+ docstring-min-length=10
229
+
230
+
231
+ [TYPECHECK]
232
+
233
+ # List of decorators that produce context managers, such as
234
+ # contextlib.contextmanager. Add to this list to register other decorators that
235
+ # produce valid context managers.
236
+ contextmanager-decorators=contextlib.contextmanager,contextlib2.contextmanager
237
+
238
+ # Tells whether missing members accessed in mixin class should be ignored. A
239
+ # mixin class is detected if its name ends with "mixin" (case insensitive).
240
+ ignore-mixin-members=yes
241
+
242
+ # List of module names for which member attributes should not be checked
243
+ # (useful for modules/projects where namespaces are manipulated during runtime
244
+ # and thus existing member attributes cannot be deduced by static analysis. It
245
+ # supports qualified module names, as well as Unix pattern matching.
246
+ ignored-modules=
247
+
248
+ # List of class names for which member attributes should not be checked (useful
249
+ # for classes with dynamically set attributes). This supports the use of
250
+ # qualified names.
251
+ ignored-classes=optparse.Values,thread._local,_thread._local
252
+
253
+ # List of members which are set dynamically and missed by pylint inference
254
+ # system, and so shouldn't trigger E1101 when accessed. Python regular
255
+ # expressions are accepted.
256
+ generated-members=
257
+
258
+
259
+ [FORMAT]
260
+
261
+ # Maximum number of characters on a single line.
262
+ max-line-length=120
263
+
264
+ # TODO(https://github.com/PyCQA/pylint/issues/3352): Direct pylint to exempt
265
+ # lines made too long by directives to pytype.
266
+
267
+ # Regexp for a line that is allowed to be longer than the limit.
268
+ ignore-long-lines=(?x)(
269
+ ^\s*(\#\ )?<?https?://\S+>?$|
270
+ ^\s*(from\s+\S+\s+)?import\s+.+$)
271
+
272
+ # Allow the body of an if to be on the same line as the test if there is no
273
+ # else.
274
+ single-line-if-stmt=yes
275
+
276
+ # Maximum number of lines in a module
277
+ max-module-lines=99999
278
+
279
+ # String used as indentation unit. The internal Google style guide mandates 2
280
+ # spaces. Google's externaly-published style guide says 4, consistent with
281
+ # PEP 8. Here, we use 2 spaces, for conformity with many open-sourced Google
282
+ # projects (like TensorFlow).
283
+ indent-string=' '
284
+
285
+ # Number of spaces of indent required inside a hanging or continued line.
286
+ indent-after-paren=4
287
+
288
+ # Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
289
+ expected-line-ending-format=
290
+
291
+
292
+ [MISCELLANEOUS]
293
+
294
+ # List of note tags to take in consideration, separated by a comma.
295
+ notes=TODO
296
+
297
+
298
+ [STRING]
299
+
300
+ # This flag controls whether inconsistent-quotes generates a warning when the
301
+ # character used as a quote delimiter is used inconsistently within a module.
302
+ check-quote-consistency=yes
303
+
304
+
305
+ [VARIABLES]
306
+
307
+ # Tells whether we should check for unused import in __init__ files.
308
+ init-import=no
309
+
310
+ # A regular expression matching the name of dummy variables (i.e. expectedly
311
+ # not used).
312
+ dummy-variables-rgx=^\*{0,2}(_$|unused_|dummy_)
313
+
314
+ # List of additional names supposed to be defined in builtins. Remember that
315
+ # you should avoid to define new builtins when possible.
316
+ additional-builtins=
317
+
318
+ # List of strings which can identify a callback function by name. A callback
319
+ # name must start or end with one of those strings.
320
+ callbacks=cb_,_cb
321
+
322
+ # List of qualified module names which can have objects that can redefine
323
+ # builtins.
324
+ redefining-builtins-modules=six,six.moves,past.builtins,future.builtins,functools
325
+
326
+
327
+ [LOGGING]
328
+
329
+ # Logging modules to check that the string format arguments are in logging
330
+ # function parameter format
331
+ logging-modules=logging,absl.logging,tensorflow.io.logging
332
+
333
+
334
+ [SIMILARITIES]
335
+
336
+ # Minimum lines number of a similarity.
337
+ min-similarity-lines=4
338
+
339
+ # Ignore comments when computing similarities.
340
+ ignore-comments=yes
341
+
342
+ # Ignore docstrings when computing similarities.
343
+ ignore-docstrings=yes
344
+
345
+ # Ignore imports when computing similarities.
346
+ ignore-imports=no
347
+
348
+
349
+ [SPELLING]
350
+
351
+ # Spelling dictionary name. Available dictionaries: none. To make it working
352
+ # install python-enchant package.
353
+ spelling-dict=
354
+
355
+ # List of comma separated words that should not be checked.
356
+ spelling-ignore-words=
357
+
358
+ # A path to a file that contains private dictionary; one word per line.
359
+ spelling-private-dict-file=
360
+
361
+ # Tells whether to store unknown words to indicated private dictionary in
362
+ # --spelling-private-dict-file option instead of raising a message.
363
+ spelling-store-unknown-words=no
364
+
365
+
366
+ [IMPORTS]
367
+
368
+ # Deprecated modules which should not be used, separated by a comma
369
+ deprecated-modules=regsub,
370
+ TERMIOS,
371
+ Bastion,
372
+ rexec,
373
+ sets
374
+
375
+ # Create a graph of every (i.e. internal and external) dependencies in the
376
+ # given file (report RP0402 must not be disabled)
377
+ import-graph=
378
+
379
+ # Create a graph of external dependencies in the given file (report RP0402 must
380
+ # not be disabled)
381
+ ext-import-graph=
382
+
383
+ # Create a graph of internal dependencies in the given file (report RP0402 must
384
+ # not be disabled)
385
+ int-import-graph=
386
+
387
+ # Force import order to recognize a module as part of the standard
388
+ # compatibility libraries.
389
+ known-standard-library=
390
+
391
+ # Force import order to recognize a module as part of a third party library.
392
+ known-third-party=enchant, absl
393
+
394
+ # Analyse import fallback blocks. This can be used to support both Python 2 and
395
+ # 3 compatible code, which means that the block might have code that exists
396
+ # only in one or another interpreter, leading to false positives when analysed.
397
+ analyse-fallback-blocks=no
398
+
399
+
400
+ [CLASSES]
401
+
402
+ # List of method names used to declare (i.e. assign) instance attributes.
403
+ defining-attr-methods=__init__,
404
+ __new__,
405
+ setUp
406
+
407
+ # List of member names, which should be excluded from the protected access
408
+ # warning.
409
+ exclude-protected=_asdict,
410
+ _fields,
411
+ _replace,
412
+ _source,
413
+ _make
414
+
415
+ # List of valid names for the first argument in a class method.
416
+ valid-classmethod-first-arg=cls,
417
+ class_
418
+
419
+ # List of valid names for the first argument in a metaclass class method.
420
+ valid-metaclass-classmethod-first-arg=mcs
421
+
422
+
423
+ [EXCEPTIONS]
424
+
425
+ # Exceptions that will emit a warning when being caught. Defaults to
426
+ # "Exception"
427
+ overgeneral-exceptions=builtins.BaseException,
428
+ builtins.Exception
Dockerfile ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM continuumio/miniconda3
2
+
3
+ ARG OPENAI_API_KEY
4
+ ENV OPENAI_API_KEY=${OPENAI_API_KEY}
5
+
6
+ ARG BING_API_KEY
7
+ ENV BING_API_KEY=${BING_API_KEY}
8
+
9
+ # 设置环境变量
10
+ ENV PATH=/opt/conda/bin:$PATH
11
+
12
+ # 克隆git仓库
13
+ RUN git clone https://github.com/InternLM/MindSearch.git /app
14
+
15
+ WORKDIR /app
16
+
17
+ # 创建并激活 fastapi 环境,并安装依赖包
18
+ RUN conda create --name fastapi python=3.10 -y && \
19
+ conda run -n fastapi pip install -r requirements.txt && \
20
+ conda clean --all -f -y
21
+
22
+ # 暴露 FastAPI 默认端口
23
+ EXPOSE 8000
24
+
25
+ # 启动 FastAPI 服务
26
+ ENTRYPOINT ["conda", "run", "--no-capture-output", "-n", "fastapi"]
27
+ CMD ["python3", "-m", "mindsearch.app", "--asy", "--host", "0.0.0.0", "--port", "8002"]
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright 2024 Shanghai AI Laboratory.
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
README.md ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ <div id="top"></div>
4
+
5
+ <div align="center">
6
+
7
+ <img src="assets/logo.svg" style="width: 50%; height: auto;">
8
+
9
+ [📃 Paper](https://arxiv.org/abs/2407.20183) | [💻 Demo](https://internlm-chat.intern-ai.org.cn/)
10
+
11
+ English | [简体中文](README_zh-CN.md)
12
+
13
+ <https://github.com/user-attachments/assets/44ffe4b9-be26-4b93-a77b-02fed16e33fe>
14
+
15
+ </div>
16
+ </p>
17
+
18
+ ## ✨ MindSearch: Mimicking Human Minds Elicits Deep AI Searcher
19
+
20
+ ## 📅 Changelog
21
+
22
+ - 2024/11/05: 🥳 MindSearch is now deployed on Puyu! 👉 [Try it](https://internlm-chat.intern-ai.org.cn/) 👈
23
+ - Refactored the agent module based on [Lagent v0.5](https://github.com/InternLM/lagent) for better performance in concurrency.
24
+ - Improved the UI to embody the simultaneous multi-query search.
25
+
26
+
27
+ ## ⚽️ Build Your Own MindSearch
28
+
29
+ ### Step1: Dependencies Installation
30
+
31
+ ```bash
32
+ git clone https://github.com/InternLM/MindSearch
33
+ cd MindSearch
34
+ pip install -r requirements.txt
35
+ ```
36
+
37
+ ### Step2: Setup Environment Variables
38
+
39
+ Before setting up the API, you need to configure environment variables. Rename the `.env.example` file to `.env` and fill in the required values.
40
+
41
+ ```bash
42
+ mv .env.example .env
43
+ # Open .env and add your keys and model configurations
44
+ ```
45
+
46
+ ### Step3: Setup MindSearch API
47
+
48
+ Setup FastAPI Server.
49
+
50
+ ```bash
51
+ python -m mindsearch.app --lang en --model_format internlm_silicon --search_engine DuckDuckGoSearch --asy
52
+ ```
53
+
54
+ - `--lang`: language of the model, `en` for English and `cn` for Chinese.
55
+ - `--model_format`: format of the model.
56
+ - `internlm_server` for InternLM2.5-7b-chat with local server. (InternLM2.5-7b-chat has been better optimized for Chinese.)
57
+ - `gpt4` for GPT4.
58
+ if you want to use other models, please modify [models](./mindsearch/agent/models.py)
59
+ - `--search_engine`: Search engine.
60
+ - `DuckDuckGoSearch` for search engine for DuckDuckGo.
61
+ - `BingSearch` for Bing search engine.
62
+ - `BraveSearch` for Brave search web api engine.
63
+ - `GoogleSearch` for Google Serper web search api engine.
64
+ - `TencentSearch` for Tencent search api engine.
65
+
66
+ Please set your Web Search engine API key as the `WEB_SEARCH_API_KEY` environment variable unless you are using `DuckDuckGo`, or `TencentSearch` that requires secret id as `TENCENT_SEARCH_SECRET_ID` and secret key as `TENCENT_SEARCH_SECRET_KEY`.
67
+ - `--asy`: deploy asynchronous agents.
68
+
69
+ ### Step4: Setup MindSearch Frontend
70
+
71
+ Providing following frontend interfaces,
72
+
73
+ - React
74
+
75
+ First configurate the backend URL for Vite proxy.
76
+
77
+ ```bash
78
+ HOST="127.0.0.1" # modify as you need
79
+ PORT=8002
80
+ sed -i -r "s/target:\s*\"\"/target: \"${HOST}:${PORT}\"/" frontend/React/vite.config.ts
81
+ ```
82
+
83
+ ```bash
84
+ # Install Node.js and npm
85
+ # for Ubuntu
86
+ sudo apt install nodejs npm
87
+
88
+ # for windows
89
+ # download from https://nodejs.org/zh-cn/download/prebuilt-installer
90
+
91
+ # Install dependencies
92
+
93
+ cd frontend/React
94
+ npm install
95
+ npm start
96
+ ```
97
+
98
+ Details can be found in [React](./frontend/React/README.md)
99
+
100
+ - Gradio
101
+
102
+ ```bash
103
+ python frontend/mindsearch_gradio.py
104
+ ```
105
+
106
+ - Streamlit
107
+
108
+ ```bash
109
+ streamlit run frontend/mindsearch_streamlit.py
110
+ ```
111
+
112
+ ## 🌐 Change Web Search API
113
+
114
+ To use a different type of web search API, modify the `searcher_type` attribute in the `searcher_cfg` located in `mindsearch/agent/__init__.py`. Currently supported web search APIs include:
115
+
116
+ - `GoogleSearch`
117
+ - `DuckDuckGoSearch`
118
+ - `BraveSearch`
119
+ - `BingSearch`
120
+ - `TencentSearch`
121
+
122
+ For example, to change to the Brave Search API, you would configure it as follows:
123
+
124
+ ```python
125
+ BingBrowser(
126
+ searcher_type='BraveSearch',
127
+ topk=2,
128
+ api_key=os.environ.get('BRAVE_API_KEY', 'YOUR BRAVE API')
129
+ )
130
+ ```
131
+
132
+ ## 🐞 Using the Backend Without Frontend
133
+
134
+ For users who prefer to interact with the backend directly, use the `backend_example.py` script. This script demonstrates how to send a query to the backend and process the response.
135
+
136
+ ```bash
137
+ python backend_example.py
138
+ ```
139
+
140
+ Make sure you have set up the environment variables and the backend is running before executing the script.
141
+
142
+ ## 🐞 Debug Locally
143
+
144
+ ```bash
145
+ python -m mindsearch.terminal
146
+ ```
147
+
148
+ ## 📝 License
149
+
150
+ This project is released under the [Apache 2.0 license](LICENSE).
151
+
152
+ ## Citation
153
+
154
+ If you find this project useful in your research, please consider cite:
155
+
156
+ ```
157
+ @article{chen2024mindsearch,
158
+ title={MindSearch: Mimicking Human Minds Elicits Deep AI Searcher},
159
+ author={Chen, Zehui and Liu, Kuikun and Wang, Qiuchen and Liu, Jiangning and Zhang, Wenwei and Chen, Kai and Zhao, Feng},
160
+ journal={arXiv preprint arXiv:2407.20183},
161
+ year={2024}
162
+ }
163
+ ```
164
+
165
+ ## Our Projects
166
+
167
+ Explore our additional research on large language models, focusing on LLM agents.
168
+
169
+ - [Lagent](https://github.com/InternLM/lagent): A lightweight framework for building LLM-based agents
170
+ - [AgentFLAN](https://github.com/InternLM/Agent-FLAN): An innovative approach for constructing and training with high-quality agent datasets (ACL 2024 Findings)
171
+ - [T-Eval](https://github.com/open-compass/T-Eval): A Fine-grained tool utilization evaluation benchmark (ACL 2024)
README_zh-CN.md ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div id="top"></div>
2
+
3
+ <div align="center">
4
+
5
+ <img src="assets/logo.svg" style="width: 50%; height: auto;">
6
+
7
+ [📃 Paper](https://arxiv.org/abs/2407.20183) | [💻 浦语入口](https://internlm-chat.intern-ai.org.cn/)
8
+
9
+ [English](README.md) | 简体中文
10
+
11
+ <https://github.com/user-attachments/assets/b4312e9c-5b40-43e5-8c69-929c373e4965>
12
+
13
+ </div>
14
+ </p>
15
+
16
+ ## ✨ MindSearch: Mimicking Human Minds Elicits Deep AI Searcher
17
+
18
+ MindSearch 是一个开源的 AI 搜索引擎框架,具有与 Perplexity.ai Pro 相同的性能。您可以轻松部署它来构建您自己的搜索引擎,可以使用闭源 LLM(如 GPT、Claude)或开源 LLM([InternLM2.5 系列模型](https://huggingface.co/internlm/internlm2_5-7b-chat)经过专门优化,能够在 MindSearch 框架中提供卓越的性能;其他开源模型没做过具体测试)。其拥有以下特性:
19
+
20
+ - 🤔 **任何想知道的问题**:MindSearch 通过搜索解决你在生活中遇到的各种问题
21
+ - 📚 **深度知识探索**:MindSearch 通过数百网页的浏览,提供更广泛、深层次的答案
22
+ - 🔍 **透明的解决方案路径**:MindSearch 提供了思考路径、搜索关键词等完整的内容,提高回复的可信度和可用性。
23
+ - 💻 **多种用户界面**:为用户提供各种接口,包括 React、Gradio、Streamlit 和本地调试。根据需要选择任意类型。
24
+ - 🧠 **动态图构建过程**:MindSearch 将用户查询分解为图中的子问题节点,并根据 WebSearcher 的搜索结果逐步扩展图。
25
+
26
+ <div align="center">
27
+
28
+ <img src="assets/teaser.gif">
29
+
30
+ </div>
31
+
32
+ ## ⚡️ MindSearch VS 其他 AI 搜索引擎
33
+
34
+ 在深度、广度和生成响应的准确性三个方面,对 ChatGPT-Web、Perplexity.ai(Pro)和 MindSearch 的表现进行比较。评估结果基于 100 个由人类专家精心设计的现实问题,并由 5 位专家进行评分\*。
35
+
36
+ <div align="center">
37
+ <img src="assets/mindsearch_openset.png" width="90%">
38
+ </div>
39
+ * 所有实验均在 2024 年 7 月 7 日之前完成。
40
+
41
+ ## ⚽️ 构建您自己的 MindSearch
42
+
43
+ ### 步骤1: 依赖安装
44
+
45
+ ```bash
46
+ pip install -r requirements.txt
47
+ ```
48
+
49
+ ### 步骤2: 启动 MindSearch API
50
+
51
+ 启动 FastAPI 服务器
52
+
53
+ ```bash
54
+ python -m mindsearch.app --lang en --model_format internlm_server --search_engine DuckDuckGoSearch
55
+ ```
56
+
57
+ - `--lang`: 模型的语言,`en` 为英语,`cn` 为中文。
58
+ - `--model_format`: 模型的格式。
59
+ - `internlm_server` 为 InternLM2.5-7b-chat 本地服务器。
60
+ - `gpt4` 为 GPT4。
61
+ 如果您想使用其他模型,请修改 [models](./mindsearch/agent/models.py)
62
+ - `--search_engine`: 搜索引擎。
63
+ - `DuckDuckGoSearch` 为 DuckDuckGo 搜索引擎。
64
+ - `BingSearch` 为 Bing 搜索引擎。
65
+ - `BraveSearch` 为 Brave 搜索引擎。
66
+ - `GoogleSearch` 为 Google Serper 搜索引擎。
67
+ - `TencentSearch` 为 Tencent 搜索引擎。
68
+
69
+ 请将 DuckDuckGo 和 Tencent 以外的网页搜索引擎 API 密钥设置为 `WEB_SEARCH_API_KEY` 环境变量。如果使用 DuckDuckGo,则无需设置;如果使用 Tencent,请设置 `TENCENT_SEARCH_SECRET_ID` 和 `TENCENT_SEARCH_SECRET_KEY`。
70
+
71
+ ### 步骤3: 启动 MindSearch 前端
72
+
73
+ 提供以下几种前端界面:
74
+
75
+ - React
76
+
77
+ 首先配置Vite的API代理,指定实际后端URL
78
+
79
+ ```bash
80
+ HOST="127.0.0.1"
81
+ PORT=8002
82
+ sed -i -r "s/target:\s*\"\"/target: \"${HOST}:${PORT}\"/" frontend/React/vite.config.ts
83
+ ```
84
+
85
+ ```bash
86
+ # 安装 Node.js 和 npm
87
+ # 对于 Ubuntu
88
+ sudo apt install nodejs npm
89
+ # 对于 Windows
90
+ # 从 https://nodejs.org/zh-cn/download/prebuilt-installer 下载
91
+
92
+ cd frontend/React
93
+ npm install
94
+ npm start
95
+ ```
96
+
97
+ 更多细节请参考 [React](./frontend/React/README.md)
98
+
99
+ - Gradio
100
+
101
+ ```bash
102
+ python frontend/mindsearch_gradio.py
103
+ ```
104
+
105
+ - Streamlit
106
+
107
+ ```bash
108
+ streamlit run frontend/mindsearch_streamlit.py
109
+ ```
110
+
111
+ ## 🐞 本地调试
112
+
113
+ ```bash
114
+ python mindsearch/terminal.py
115
+ ```
116
+
117
+ ## 📝 许可证
118
+
119
+ 该项目按照 [Apache 2.0 许可证](LICENSE) 发行。
120
+
121
+ ## 学术引用
122
+
123
+ 如果此项目对您的研究有帮助,请参考如下方式进行引用:
124
+
125
+ ```
126
+ @article{chen2024mindsearch,
127
+ title={MindSearch: Mimicking Human Minds Elicits Deep AI Searcher},
128
+ author={Chen, Zehui and Liu, Kuikun and Wang, Qiuchen and Liu, Jiangning and Zhang, Wenwei and Chen, Kai and Zhao, Feng},
129
+ journal={arXiv preprint arXiv:2407.20183},
130
+ year={2024}
131
+ }
132
+ ```
133
+
134
+ ## 相关项目
135
+
136
+ 关注我们其他在大语言模型上的一些探索,主要为LLM智能体方向。
137
+
138
+ - [Lagent](https://github.com/InternLM/lagent): 一个轻便简洁的大语言模型智能体框架
139
+ - [AgentFLAN](https://github.com/InternLM/Agent-FLAN): 一套构建高质量智能体语料和训练模型的方法 (ACL 2024 Findings)
140
+ - [T-Eval](https://github.com/open-compass/T-Eval): 一个细粒度评估LLM调用工具能力的评测及 (ACL 2024)
app.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import mimetypes
3
+ import os
4
+ import sys
5
+ import tempfile
6
+
7
+ import gradio as gr
8
+ import requests
9
+
10
+ sys.path.insert(0, os.path.dirname(__file__))
11
+
12
+ import schemdraw
13
+ from frontend.gradio_agentchatbot.agentchatbot import AgentChatbot
14
+ from frontend.gradio_agentchatbot.utils import ChatFileMessage, ChatMessage, ThoughtMetadata
15
+ from lagent.schema import AgentStatusCode
16
+ from schemdraw import flow
17
+
18
+
19
+ import os
20
+
21
+ os.system("pip install tenacity")
22
+ os.system("python -m mindsearch.app --lang en --model_format internlm_silicon --search_engine DuckDuckGoSearch &")
23
+
24
+
25
+ print('MindSearch is running on http://')
26
+
27
+ PLANNER_HISTORY = []
28
+ SEARCHER_HISTORY = []
29
+
30
+
31
+ def create_search_graph(adjacency_list: dict):
32
+ import matplotlib.pyplot as plt
33
+
34
+ plt.rcParams["font.sans-serif"] = ["SimHei"]
35
+
36
+ with schemdraw.Drawing(fontsize=10, unit=1) as graph:
37
+ node_pos, nodes, edges = {}, {}, []
38
+ if "root" in adjacency_list:
39
+ queue, layer, response_level = ["root"], 0, 0
40
+ while queue:
41
+ layer_len = len(queue)
42
+ for i in range(layer_len):
43
+ node_name = queue.pop(0)
44
+ node_pos[node_name] = (layer * 5, -i * 3)
45
+ for item in adjacency_list[node_name]:
46
+ if item["name"] == "response":
47
+ response_level = max(response_level, (layer + 1) * 5)
48
+ else:
49
+ queue.append(item["name"])
50
+ edges.append((node_name, item["name"]))
51
+ layer += 1
52
+ for node_name, (x, y) in node_pos.items():
53
+ if node_name == "root":
54
+ node = flow.Terminal().label(node_name).at((x, y)).color("pink")
55
+ else:
56
+ node = flow.RoundBox(w=3.5, h=1.75).label(node_name).at((x, y)).color("teal")
57
+ nodes[node_name] = node
58
+ if response_level:
59
+ response_node = (
60
+ flow.Terminal().label("response").at((response_level, 0)).color("orange")
61
+ )
62
+ nodes["response"] = response_node
63
+ for start, end in edges:
64
+ flow.Arc3(arrow="->").linestyle("--" if end == "response" else "-").at(
65
+ nodes[start].E
66
+ ).to(nodes[end].W).color("grey" if end == "response" else "lightblue")
67
+ return graph
68
+
69
+
70
+ def draw_search_graph(adjacency_list: dict, suffix=".png", dpi=360) -> str:
71
+ g = create_search_graph(adjacency_list)
72
+ path = tempfile.mktemp(suffix=suffix)
73
+ g.save(path, dpi=dpi)
74
+ return path
75
+
76
+
77
+ def rst_mem():
78
+ """Reset the chatbot memory."""
79
+ if PLANNER_HISTORY:
80
+ PLANNER_HISTORY.clear()
81
+ return [], [], 0
82
+
83
+
84
+ def format_response(gr_history, message, response, idx=-1):
85
+ if idx < 0:
86
+ idx = len(gr_history) + idx
87
+ if message["stream_state"] == AgentStatusCode.STREAM_ING:
88
+ gr_history[idx].content = response
89
+ elif message["stream_state"] == AgentStatusCode.CODING:
90
+ if gr_history[idx].thought_metadata.tool_name is None:
91
+ gr_history[idx].content = gr_history[idx].content.split("<|action_start|>")[0]
92
+ gr_history.insert(
93
+ idx + 1,
94
+ ChatMessage(
95
+ role="assistant",
96
+ content=response,
97
+ thought_metadata=ThoughtMetadata(tool_name="🖥️ Code Interpreter"),
98
+ ),
99
+ )
100
+ else:
101
+ gr_history[idx].content = response
102
+ elif message["stream_state"] == AgentStatusCode.PLUGIN_START:
103
+ if isinstance(response, dict):
104
+ response = json.dumps(response, ensure_ascii=False, indent=4)
105
+ if gr_history[idx].thought_metadata.tool_name is None:
106
+ gr_history[idx].content = gr_history[idx].content.split("<|action_start|>")[0]
107
+ gr_history.insert(
108
+ idx + 1,
109
+ ChatMessage(
110
+ role="assistant",
111
+ content="```json\n" + response,
112
+ thought_metadata=ThoughtMetadata(tool_name="🌐 Web Browser"),
113
+ ),
114
+ )
115
+ else:
116
+ gr_history[idx].content = "```json\n" + response
117
+ elif message["stream_state"] == AgentStatusCode.PLUGIN_END and isinstance(response, dict):
118
+ gr_history[idx].content = (
119
+ f"```json\n{json.dumps(response, ensure_ascii=False, indent=4)}\n```"
120
+ )
121
+ elif message["stream_state"] in [AgentStatusCode.CODE_RETURN, AgentStatusCode.PLUGIN_RETURN]:
122
+ try:
123
+ content = json.loads(message["content"])
124
+ except json.decoder.JSONDecodeError:
125
+ content = message["content"]
126
+ if gr_history[idx].thought_metadata.tool_name:
127
+ gr_history.insert(
128
+ idx + 1,
129
+ ChatMessage(
130
+ role="assistant",
131
+ content=(
132
+ content
133
+ if isinstance(content, str)
134
+ else f"\n```json\n{json.dumps(content, ensure_ascii=False, indent=4)}\n```\n"
135
+ ),
136
+ thought_metadata=ThoughtMetadata(tool_name="Execution"),
137
+ ),
138
+ )
139
+ gr_history.insert(idx + 2, ChatMessage(role="assistant", content=""))
140
+
141
+
142
+ def predict(history_planner, history_searcher, node_cnt):
143
+
144
+ def streaming(raw_response):
145
+ for chunk in raw_response.iter_lines(
146
+ chunk_size=8192, decode_unicode=False, delimiter=b"\n"
147
+ ):
148
+ if chunk:
149
+ decoded = chunk.decode("utf-8")
150
+ if decoded == "\r":
151
+ continue
152
+ if decoded[:6] == "data: ":
153
+ decoded = decoded[6:]
154
+ elif decoded.startswith(": ping - "):
155
+ continue
156
+ response = json.loads(decoded)
157
+ yield (
158
+ response["current_node"],
159
+ (
160
+ response["response"]["formatted"]["node"][response["current_node"]]
161
+ if response["current_node"]
162
+ else response["response"]
163
+ ),
164
+ response["response"]["formatted"]["adjacency_list"],
165
+ )
166
+
167
+ global PLANNER_HISTORY
168
+ PLANNER_HISTORY.extend(history_planner[-3:])
169
+ search_graph_msg = history_planner[-1]
170
+
171
+ url = "http://localhost:8002/solve"
172
+ data = {"inputs": PLANNER_HISTORY[-3].content}
173
+ raw_response = requests.post(url, json=data, timeout=60, stream=True)
174
+
175
+ node_id2msg_idx = {}
176
+ for resp in streaming(raw_response):
177
+ node_name, agent_message, adjacency_list = resp
178
+ dedup_nodes = set(adjacency_list) | {
179
+ val["name"] for vals in adjacency_list.values() for val in vals
180
+ }
181
+ if dedup_nodes and len(dedup_nodes) != node_cnt:
182
+ node_cnt = len(dedup_nodes)
183
+ graph_path = draw_search_graph(adjacency_list)
184
+ search_graph_msg.file.path = graph_path
185
+ search_graph_msg.file.mime_type = mimetypes.guess_type(graph_path)[0]
186
+ if node_name:
187
+ if node_name in ["root", "response"]:
188
+ continue
189
+ node_id = f'【{node_name}】{agent_message["content"]}'
190
+ agent_message = agent_message["response"]
191
+ response = (
192
+ agent_message["formatted"]["action"]
193
+ if agent_message["stream_state"]
194
+ in [AgentStatusCode.PLUGIN_START, AgentStatusCode.PLUGIN_END]
195
+ else agent_message["formatted"] and agent_message["formatted"].get("thought")
196
+ )
197
+ if node_id not in node_id2msg_idx:
198
+ node_id2msg_idx[node_id] = len(history_searcher) + 1
199
+ history_searcher.append(ChatMessage(role="user", content=node_id))
200
+ history_searcher.append(ChatMessage(role="assistant", content=""))
201
+ offset = len(history_searcher)
202
+ format_response(history_searcher, agent_message, response, node_id2msg_idx[node_id])
203
+ flag, incr = False, len(history_searcher) - offset
204
+ for key, value in node_id2msg_idx.items():
205
+ if flag or key == node_id:
206
+ node_id2msg_idx[key] = value + incr
207
+ if not flag:
208
+ flag = True
209
+ yield history_planner, history_searcher, node_cnt
210
+ else:
211
+ response = (
212
+ agent_message["formatted"]["action"]
213
+ if agent_message["stream_state"]
214
+ in [AgentStatusCode.CODING, AgentStatusCode.CODE_END]
215
+ else agent_message["formatted"] and agent_message["formatted"].get("thought")
216
+ )
217
+ format_response(history_planner, agent_message, response, -2)
218
+ if agent_message["stream_state"] == AgentStatusCode.END:
219
+ PLANNER_HISTORY = history_planner
220
+ yield history_planner, history_searcher, node_cnt
221
+ return history_planner, history_searcher, node_cnt
222
+
223
+
224
+ with gr.Blocks(css=os.path.join(os.path.dirname(__file__), "css", "gradio_front.css")) as demo:
225
+ with gr.Column(elem_classes="chat-box"):
226
+ gr.HTML("""<h1 align="center">MindSearch Gradio Demo</h1>""")
227
+ gr.HTML(
228
+ """<p style="text-align: center; font-family: Arial, sans-serif;">
229
+ MindSearch is an open-source AI Search Engine Framework with Perplexity.ai Pro performance.
230
+ You can deploy your own Perplexity.ai-style search engine using either
231
+ closed-source LLMs (GPT, Claude)
232
+ or open-source LLMs (InternLM2.5-7b-chat).</p> """
233
+ )
234
+ gr.HTML(
235
+ """
236
+ <div style="text-align: center; font-size: 16px;">
237
+ <a href="https://github.com/InternLM/MindSearch" style="margin-right: 15px;
238
+ text-decoration: none; color: #4A90E2;" target="_blank">🔗 GitHub</a>
239
+ <a href="https://arxiv.org/abs/2407.20183" style="margin-right: 15px;
240
+ text-decoration: none; color: #4A90E2;" target="_blank">📄 Arxiv</a>
241
+ <a href="https://huggingface.co/papers/2407.20183" style="margin-right:
242
+ 15px; text-decoration: none; color: #4A90E2;" target="_blank">📚 Hugging Face Papers</a>
243
+ <a href="https://huggingface.co/spaces/internlm/MindSearch"
244
+ style="text-decoration: none; color: #4A90E2;" target="_blank">🤗 Hugging Face Demo</a>
245
+ </div>"""
246
+ )
247
+ gr.HTML(
248
+ """
249
+ <h1 align='right'><img
250
+ src=
251
+ 'https://raw.githubusercontent.com/InternLM/MindSearch/98fd84d566fe9e3adc5028727f72f2944098fd05/assets/logo.svg'
252
+ alt='MindSearch Logo1' class="logo" width="200"></h1> """
253
+ )
254
+ node_count = gr.State(0)
255
+ with gr.Row():
256
+ planner = AgentChatbot(
257
+ label="planner",
258
+ height=600,
259
+ show_label=True,
260
+ show_copy_button=True,
261
+ bubble_full_width=False,
262
+ render_markdown=True,
263
+ elem_classes="chatbot-container",
264
+ )
265
+ searcher = AgentChatbot(
266
+ label="searcher",
267
+ height=600,
268
+ show_label=True,
269
+ show_copy_button=True,
270
+ bubble_full_width=False,
271
+ render_markdown=True,
272
+ elem_classes="chatbot-container",
273
+ )
274
+ with gr.Row(elem_classes="chat-box"):
275
+ # Text input area
276
+ user_input = gr.Textbox(
277
+ show_label=False,
278
+ placeholder="Type your message...",
279
+ lines=1,
280
+ container=False,
281
+ elem_classes="editor",
282
+ scale=4,
283
+ )
284
+ # Buttons (now in the same Row)
285
+ submitBtn = gr.Button("submit", variant="primary", elem_classes="toolbarButton", scale=1)
286
+ clearBtn = gr.Button("clear", variant="secondary", elem_classes="toolbarButton", scale=1)
287
+ with gr.Row(elem_classes="examples-container"):
288
+ examples_component = gr.Examples(
289
+ [
290
+ ["Find legal precedents in contract law."],
291
+ ["What are the top 10 e-commerce websites?"],
292
+ ["Generate a report on global climate change."],
293
+ ],
294
+ inputs=user_input,
295
+ label="Try these examples:",
296
+ )
297
+
298
+ def user(query, history):
299
+ history.append(ChatMessage(role="user", content=query))
300
+ history.append(ChatMessage(role="assistant", content=""))
301
+ graph_path = draw_search_graph({"root": []})
302
+ history.append(
303
+ ChatFileMessage(
304
+ role="assistant",
305
+ file=gr.FileData(path=graph_path, mime_type=mimetypes.guess_type(graph_path)[0]),
306
+ )
307
+ )
308
+ return "", history
309
+
310
+ submitBtn.click(user, [user_input, planner], [user_input, planner], queue=False).then(
311
+ predict,
312
+ [planner, searcher, node_count],
313
+ [planner, searcher, node_count],
314
+ )
315
+ clearBtn.click(rst_mem, None, [planner, searcher, node_count], queue=False)
316
+
317
+ demo.queue()
318
+ demo.launch(server_name="127.0.0.1", inbrowser=True, share=False)
assets/logo.svg ADDED
assets/mindsearch_openset.png ADDED

Git LFS Details

  • SHA256: f40b4523db6ab851573642b438503c7c644ba989fd3c6a83d05bd32c0aa558f2
  • Pointer size: 131 Bytes
  • Size of remote file: 118 kB
assets/teaser.gif ADDED

Git LFS Details

  • SHA256: c0e6cd23ade8a36c7e94b6c204d0cf74a5e5a5baa8200571555fcc9183f28612
  • Pointer size: 132 Bytes
  • Size of remote file: 3.28 MB
backend_example.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import requests
4
+
5
+ # Define the backend URL
6
+ url = "http://localhost:8002/solve"
7
+ headers = {"Content-Type": "application/json"}
8
+
9
+
10
+ # Function to send a query to the backend and get the response
11
+ def get_response(query):
12
+ # Prepare the input data
13
+ data = {"inputs": query}
14
+
15
+ # Send the request to the backend
16
+ response = requests.post(url, headers=headers, data=json.dumps(data), timeout=20, stream=True)
17
+
18
+ # Process the streaming response
19
+ for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b"\n"):
20
+ if chunk:
21
+ decoded = chunk.decode("utf-8")
22
+ if decoded == "\r":
23
+ continue
24
+ if decoded[:6] == "data: ":
25
+ decoded = decoded[6:]
26
+ elif decoded.startswith(": ping - "):
27
+ continue
28
+ response_data = json.loads(decoded)
29
+ agent_return = response_data["response"]
30
+ node_name = response_data["current_node"]
31
+ print(f"Node: {node_name}, Response: {agent_return['response']}")
32
+
33
+
34
+ # Example usage
35
+ if __name__ == "__main__":
36
+ query = "What is the weather like today in New York?"
37
+ get_response(query)
docker/README.md ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MSDL (MindSearch Docker Launcher) User Guide
2
+
3
+ English | [简体中文](README_zh-CN.md)
4
+
5
+ ## Introduction
6
+
7
+ MSDL (MindSearch Docker Launcher) is a command-line tool designed to simplify the deployment process of MindSearch. It helps users configure and launch the Docker environment for MindSearch through an interactive interface, reducing the complexity of deployment. MSDL primarily serves as a scaffold for deploying containers and does not involve optimization of MindSearch's core logic.
8
+
9
+ ## Prerequisites
10
+
11
+ - Python 3.7 or higher
12
+ - Docker (Docker Compose included; most newer Docker versions have it integrated)
13
+ - Git (for cloning the repository)
14
+ - Stable internet connection
15
+ - Sufficient disk space (required space varies depending on the selected deployment option)
16
+
17
+ ## Installation Steps
18
+
19
+ 1. Clone the MindSearch repository:
20
+ ```bash
21
+ git clone https://github.com/InternLM/MindSearch.git # If you have already cloned the repository, you can skip this step.
22
+ cd MindSearch/docker
23
+ ```
24
+
25
+ 2. Install MSDL:
26
+ ```bash
27
+ pip install -e .
28
+ ```
29
+
30
+ ## Usage
31
+
32
+ After installation, you can run the MSDL command from any directory:
33
+
34
+ ```bash
35
+ msdl
36
+ ```
37
+
38
+ Follow the interactive prompts for configuration:
39
+ - Choose the language for the Agent (Chinese or English; this only affects the language of prompts).
40
+ - Select the model deployment type (local model or cloud model).
41
+ - Choose the model format:
42
+ - Currently, only `internlm_silicon` works properly for cloud models.
43
+ - For local models, only `internlm_server` has passed tests and runs correctly.
44
+ - Enter the necessary API keys (e.g., SILICON_API_KEY).
45
+
46
+ MSDL will automatically perform the following actions:
47
+ - Copy and configure the necessary Dockerfile and docker-compose.yaml files.
48
+ - Build Docker images.
49
+ - Launch Docker containers.
50
+
51
+ ## Deployment Options Comparison
52
+
53
+ ### Cloud Model Deployment (Recommended)
54
+
55
+ **Advantages**:
56
+ - Lightweight deployment with minimal disk usage (frontend around 510MB, backend around 839MB).
57
+ - No need for high-performance hardware.
58
+ - Easy to deploy and maintain.
59
+ - You can freely use the `internlm/internlm2_5-7b-chat` model via SiliconCloud.
60
+ - High concurrency, fast inference speed.
61
+
62
+ **Instructions**:
63
+ - Select the "Cloud Model" option.
64
+ - Choose "internlm_silicon" as the model format.
65
+ - Enter the SiliconCloud API Key (register at https://cloud.siliconflow.cn/ to obtain it).
66
+
67
+ **Important Notes**:
68
+ - The `internlm/internlm2_5-7b-chat` model is freely accessible on SiliconCloud.
69
+ - MindSearch has no financial relationship with SiliconCloud; this service is recommended solely because it provides valuable resources to the open-source community.
70
+
71
+ ### Local Model Deployment
72
+
73
+ **Features**:
74
+ - Uses the `openmmlab/lmdeploy` image.
75
+ - Based on the PyTorch environment.
76
+ - Requires significant disk space (backend container 15GB+, model 15GB+, totaling 30GB+).
77
+ - Requires a powerful GPU (12GB or more of VRAM recommended).
78
+
79
+ **Instructions**:
80
+ - Select the "Local Model" option.
81
+ - Choose "internlm_server" as the model format.
82
+
83
+ **Relevant Links**:
84
+ - lmdeploy image: https://hub.docker.com/r/openmmlab/lmdeploy/tags
85
+ - InternLM2.5 project: https://huggingface.co/internlm/internlm2_5-7b-chat
86
+
87
+ ## Notes
88
+
89
+ - Currently, only the `internlm_silicon` format works properly for cloud models, and only the `internlm_server` format has passed tests for local models.
90
+ - The language selection only affects the language of the Agent's prompts and does not change the language of the React frontend.
91
+ - The first run might take a long time to download necessary model files and Docker images.
92
+ - When using cloud models, ensure a stable network connection.
93
+
94
+ ## Troubleshooting
95
+
96
+ 1. Ensure the Docker service is running.
97
+ 2. Check if there is sufficient disk space.
98
+ 3. Ensure all necessary environment variables are set correctly.
99
+ 4. Check if the network connection is stable.
100
+ 5. Verify the validity of API keys (e.g., for cloud models).
101
+
102
+ If problems persist, check the Issues section in the MindSearch GitHub repository or submit a new issue.
103
+
104
+ ## Privacy and Security
105
+
106
+ MSDL is a locally executed tool and does not transmit any API keys or sensitive information. All configuration information is stored in the `msdl/temp/.env` file, used only to simplify the deployment process.
107
+
108
+ ## Updating MSDL
109
+
110
+ To update MSDL to the latest version, follow these steps:
111
+
112
+ 1. Navigate to the MindSearch directory.
113
+ 2. Pull the latest code:
114
+ ```bash
115
+ git pull origin main
116
+ ```
117
+ 3. Reinstall MSDL:
118
+ ```bash
119
+ cd docker
120
+ pip install -e .
121
+ ```
122
+
123
+ ## Conclusion
124
+
125
+ If you have any questions or suggestions, feel free to submit an issue on GitHub or contact us directly. Thank you for using MindSearch and MSDL!
docker/README_zh-CN.md ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MSDL (MindSearch Docker Launcher) 使用指南
2
+
3
+ [English](README.md) | 简体中文
4
+
5
+ ## 简介
6
+
7
+ MSDL (MindSearch Docker Launcher) 是一个专为简化 MindSearch 部署过程而设计的命令行工具。它通过交互式界面帮助用户轻松配置和启动 MindSearch 的 Docker 环境,降低了部署的复杂性。MSDL 主要作为部署容器的脚手架,不涉及 MindSearch 核心逻辑的优化。
8
+
9
+ ## 环境要求
10
+
11
+ - Python 3.7 或更高版本
12
+ - Docker (需包含 Docker Compose,新版本的 Docker 通常已集成)
13
+ - Git (用于克隆仓库)
14
+ - 稳定的网络连接
15
+ - 充足的磁盘空间(根据选择的部署方案,所需空间有所不同)
16
+
17
+ ## 安装步骤
18
+
19
+ 1. 克隆 MindSearch 仓库:
20
+ ```bash
21
+ git clone https://github.com/InternLM/MindSearch.git # 已经克隆过的,可以忽略执行此步骤
22
+ cd MindSearch/docker
23
+ ```
24
+
25
+ 2. 安装 MSDL:
26
+ ```bash
27
+ pip install -e .
28
+ ```
29
+
30
+ ## 使用方法
31
+
32
+ 安装完成后,您可以在任意目录下运行 MSDL 命令:
33
+
34
+ ```bash
35
+ msdl
36
+ ```
37
+
38
+ 按照交互式提示进行配置:
39
+ - 选择 Agent 使用的语言(中文或英文,仅影响 Agent 的提示词语言)
40
+ - 选择模型部署类型(本地模型或云端模型)
41
+ - 选择模型格式
42
+ - 云端模型目前只有 internlm_silicon 能够正常运行
43
+ - 本地模型目前只有 internlm_server 通过测试,能正常运行
44
+ - 输入必要的 API 密钥(如 SILICON_API_KEY)
45
+
46
+ MSDL 将自动执行以下操作:
47
+ - 复制并配置必要的 Dockerfile 和 docker-compose.yaml 文件
48
+ - 构建 Docker 镜像
49
+ - 启动 Docker 容器
50
+
51
+ ## 部署方案比较
52
+
53
+ ### 云端模型部署(推荐)
54
+
55
+ **优势**:
56
+ - 轻量级部署,磁盘占用小(前端约 510MB,后端约 839MB)
57
+ - 无需高性能硬件
58
+ - 部署和维护简单
59
+ - 使用 SiliconCloud 可免费调用 internlm/internlm2_5-7b-chat 模型
60
+ - 高并发量,推理速度快
61
+
62
+ **使用说明**:
63
+ - 选择"云端模型"选项
64
+ - 选择 "internlm_silicon" 作为模型格式
65
+ - 输入 SiliconCloud API Key(需在 https://cloud.siliconflow.cn/ 注册获取)
66
+
67
+ **重要说明**:
68
+ - internlm/internlm2_5-7b-chat 模型在 SiliconCloud 上可以免费调用,但 API Key 仍需妥善保管好。
69
+ - MindSearch 项目与 SiliconCloud 并无利益关系,只是使用它能更好地体验 MindSearch 的效果,感谢 SiliconCloud 为开源社区所做的贡献。
70
+
71
+ ### 本地模型部署
72
+
73
+ **特点**:
74
+ - 使用 openmmlab/lmdeploy 镜像
75
+ - 基于 PyTorch 环境
76
+ - 需要大量磁盘空间(后端容器 15GB+,模型 15GB+,总计 30GB 以上)
77
+ - 需要强大的 GPU(建议 12GB 或以上显存)
78
+
79
+ **使用说明**:
80
+ - 选择"本地模型"选项
81
+ - 选择 "internlm_server" 作为模型格式
82
+
83
+ **相关链接**:
84
+ - lmdeploy 镜像: https://hub.docker.com/r/openmmlab/lmdeploy/tags
85
+ - InternLM2.5 项目: https://huggingface.co/internlm/internlm2_5-7b-chat
86
+
87
+ ## 注意事项
88
+
89
+ - 云端模型目前只有 internlm_silicon 格式能够正常运行,本地模型只有 internlm_server 格式通过测试能正常运行。
90
+ - 选择语言只会影响 Agent 的提示词语言,不会改变 React 前端的界面语言。
91
+ - 首次运行可能需要较长时间来下载必要的模型文件和 Docker 镜像。
92
+ - 使用云端模型时,请确保网络连接稳定。
93
+
94
+ ## 故障排除
95
+
96
+ 1. 确保 Docker 服务正在运行。
97
+ 2. 检查是否有足够的磁盘空间。
98
+ 3. 确保所有必要的环境变量已正确设置。
99
+ 4. 检查网络连接是否正常。
100
+ 5. 验证 API Key 是否有效(如使用云端模型)。
101
+
102
+ 如果问题持续,请查看 MindSearch 的 GitHub 仓库中的 Issues 部分,或提交新的 Issue。
103
+
104
+ ## 隐私和安全
105
+
106
+ MSDL 是纯本地执行的工具,不会上报任何 API Key 或其他敏感信息。所有配置信息存储在 `msdl/temp/.env` 文件中,仅用于简化部署过程。
107
+
108
+ ## 更新 MSDL
109
+
110
+ 要更新 MSDL 到最新版本,请执行以下步骤:
111
+
112
+ 1. 进入 MindSearch 目录
113
+ 2. 拉取最新的代码:
114
+ ```bash
115
+ git pull origin main
116
+ ```
117
+ 3. 重新安装 MSDL:
118
+ ```bash
119
+ cd docker
120
+ pip install -e .
121
+ ```
122
+
123
+ ## 结语
124
+
125
+ 如有任何问题或建议,欢迎在 GitHub 上提交 Issue 或直接联系我们。感谢您使用 MindSearch 和 MSDL!
docker/msdl/__init__.py ADDED
File without changes
docker/msdl/__main__.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # msdl/__main__.py
2
+ import signal
3
+ import sys
4
+ import argparse
5
+ import os
6
+ from pathlib import Path
7
+
8
+ from msdl.config import (
9
+ BACKEND_DOCKERFILE_DIR,
10
+ FRONTEND_DOCKERFILE_DIR,
11
+ PACKAGE_DIR,
12
+ PROJECT_ROOT,
13
+ REACT_DOCKERFILE,
14
+ TEMP_DIR,
15
+ TEMPLATE_FILES,
16
+ )
17
+ from msdl.docker_manager import (
18
+ check_docker_install,
19
+ run_docker_compose,
20
+ stop_and_remove_containers,
21
+ update_docker_compose_paths,
22
+ )
23
+ from msdl.i18n import (
24
+ setup_i18n,
25
+ t,
26
+ )
27
+ from msdl.utils import (
28
+ copy_templates_to_temp,
29
+ copy_backend_dockerfile,
30
+ copy_frontend_dockerfile,
31
+ modify_docker_compose,
32
+ )
33
+ from msdl.user_interaction import get_user_choices
34
+
35
+
36
+ def signal_handler(signum, frame):
37
+ print(t("TERMINATION_SIGNAL"))
38
+ stop_and_remove_containers()
39
+ sys.exit(0)
40
+
41
+
42
+ def parse_args():
43
+ parser = argparse.ArgumentParser(description=t("CLI_DESCRIPTION"))
44
+ parser.add_argument('--language', '-l',
45
+ help=t("LANGUAGE_HELP"),
46
+ choices=["en", "zh_CN"],
47
+ default=None)
48
+ parser.add_argument('--config-language', action='store_true',
49
+ help=t("CONFIG_LANGUAGE_HELP"))
50
+ return parser.parse_args()
51
+
52
+
53
+ def main():
54
+ # Setup signal handler
55
+ signal.signal(signal.SIGINT, signal_handler)
56
+ signal.signal(signal.SIGTERM, signal_handler)
57
+
58
+ # Initialize i18n
59
+ setup_i18n()
60
+
61
+ # Parse command line arguments
62
+ args = parse_args()
63
+ if args.language:
64
+ # set_language(args.language)
65
+ # Reinitialize i18n with new language
66
+ setup_i18n()
67
+
68
+ try:
69
+ # Check if TEMP_DIR exists, if not, create it
70
+ if not TEMP_DIR.exists():
71
+ TEMP_DIR.mkdir(parents=True, exist_ok=True)
72
+ print(t("TEMP_DIR_CREATED", dir=str(TEMP_DIR)))
73
+
74
+ check_docker_install()
75
+
76
+ # Get user choices using the new module
77
+ backend_language, model, model_format, search_engine = get_user_choices()
78
+
79
+ # Copy template files
80
+ copy_templates_to_temp(TEMPLATE_FILES)
81
+
82
+ # Copy Dockerfiles
83
+ copy_backend_dockerfile(model)
84
+ copy_frontend_dockerfile()
85
+
86
+ # Update paths in docker-compose.yml
87
+ update_docker_compose_paths()
88
+
89
+ # Modify docker-compose.yml based on user choices
90
+ modify_docker_compose(model, backend_language, model_format, search_engine)
91
+
92
+ stop_and_remove_containers()
93
+ run_docker_compose()
94
+
95
+ print(t("DOCKER_LAUNCHER_COMPLETE"))
96
+ except KeyboardInterrupt:
97
+ print(t("KEYBOARD_INTERRUPT"))
98
+ # stop_and_remove_containers()
99
+ sys.exit(0)
100
+ except Exception as e:
101
+ print(t("UNEXPECTED_ERROR", error=str(e)))
102
+ # stop_and_remove_containers()
103
+ sys.exit(1)
104
+
105
+
106
+ if __name__ == "__main__":
107
+ main()
docker/msdl/config.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # msdl/config.py
2
+
3
+ from pathlib import Path
4
+
5
+
6
+ class FileSystemManager:
7
+
8
+ @staticmethod
9
+ def ensure_dir(dir_path):
10
+ """Ensure the directory exists, create if it doesn't"""
11
+ path = Path(dir_path)
12
+ if not path.exists():
13
+ path.mkdir(parents=True, exist_ok=True)
14
+ return path
15
+
16
+ @staticmethod
17
+ def ensure_file(file_path, default_content=""):
18
+ """Ensure the file exists, create if it doesn't"""
19
+ path = Path(file_path)
20
+ if not path.parent.exists():
21
+ FileSystemManager.ensure_dir(path.parent)
22
+ if not path.exists():
23
+ with open(path, "w") as f:
24
+ f.write(default_content)
25
+ return path
26
+
27
+
28
+ # Get the directory where the script is located
29
+ PACKAGE_DIR = Path(__file__).resolve().parent
30
+
31
+ # Get the root directory of the MindSearch project
32
+ PROJECT_ROOT = PACKAGE_DIR.parent.parent
33
+
34
+ # Get the temp directory path, which is actually the working directory for executing the docker compose up command
35
+ TEMP_DIR = FileSystemManager.ensure_dir(PACKAGE_DIR / "temp")
36
+
37
+ # Configuration file name list
38
+ TEMPLATE_FILES = ["docker-compose.yaml"]
39
+
40
+ # Backend Dockerfile directory
41
+ BACKEND_DOCKERFILE_DIR = "backend"
42
+
43
+ # Backend Dockerfile name
44
+ CLOUD_LLM_DOCKERFILE = "cloud_llm.dockerfile"
45
+ LOCAL_LLM_DOCKERFILE = "local_llm.dockerfile"
46
+
47
+ # Frontend Dockerfile directory
48
+ FRONTEND_DOCKERFILE_DIR = "frontend"
49
+
50
+ # Frontend Dockerfile name
51
+ REACT_DOCKERFILE = "react.dockerfile"
52
+
53
+ # i18n translations directory
54
+ TRANSLATIONS_DIR = FileSystemManager.ensure_dir(PACKAGE_DIR / "translations")
55
+
56
+ # Get the path of the .env file
57
+ ENV_FILE_PATH = FileSystemManager.ensure_file(TEMP_DIR / ".env")
docker/msdl/docker_manager.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # msdl/docker_manager.py
2
+
3
+ import os
4
+ import subprocess
5
+ import sys
6
+ from functools import lru_cache
7
+
8
+ import yaml
9
+ from msdl.config import PROJECT_ROOT, TEMP_DIR
10
+ from msdl.i18n import t
11
+
12
+
13
+ @lru_cache(maxsize=1)
14
+ def get_docker_command():
15
+ try:
16
+ subprocess.run(
17
+ ["docker", "compose", "version"], check=True, capture_output=True
18
+ )
19
+ return ["docker", "compose"]
20
+ except subprocess.CalledProcessError:
21
+ try:
22
+ subprocess.run(
23
+ ["docker-compose", "--version"], check=True, capture_output=True
24
+ )
25
+ return ["docker-compose"]
26
+ except subprocess.CalledProcessError:
27
+ print(t("DOCKER_COMPOSE_NOT_FOUND"))
28
+ sys.exit(1)
29
+
30
+
31
+ @lru_cache(maxsize=1)
32
+ def check_docker_install():
33
+ try:
34
+ subprocess.run(["docker", "--version"], check=True, capture_output=True)
35
+ docker_compose_cmd = get_docker_command()
36
+ subprocess.run(
37
+ docker_compose_cmd + ["version"], check=True, capture_output=True
38
+ )
39
+ print(t("DOCKER_INSTALLED"))
40
+ return True
41
+ except subprocess.CalledProcessError as e:
42
+ print(t("DOCKER_INSTALL_ERROR", error=str(e)))
43
+ return False
44
+ except FileNotFoundError:
45
+ print(t("DOCKER_NOT_FOUND"))
46
+ return False
47
+
48
+
49
+ def stop_and_remove_containers():
50
+ docker_compose_cmd = get_docker_command()
51
+ compose_file = os.path.join(TEMP_DIR, "docker-compose.yaml")
52
+
53
+ # Read the docker-compose.yaml file
54
+ try:
55
+ with open(compose_file, "r") as file:
56
+ compose_config = yaml.safe_load(file)
57
+ except Exception as e:
58
+ print(t("COMPOSE_FILE_READ_ERROR", error=str(e)))
59
+ return
60
+
61
+ # Get project name and service names
62
+ project_name = compose_config.get("name", "mindsearch")
63
+ service_names = list(compose_config.get("services", {}).keys())
64
+
65
+ # Use only the project name as the container prefix
66
+ container_prefix = f"{project_name}_"
67
+
68
+ try:
69
+ # 1. Try to stop containers using the current docker-compose.yaml
70
+ subprocess.run(
71
+ docker_compose_cmd + ["-f", compose_file, "down", "-v", "--remove-orphans"],
72
+ check=True,
73
+ )
74
+ except subprocess.CalledProcessError:
75
+ print(t("CURRENT_COMPOSE_STOP_FAILED"))
76
+
77
+ # 2. Attempt to clean up potentially existing containers, regardless of the success of the previous step
78
+ try:
79
+ # List all containers (including stopped ones)
80
+ result = subprocess.run(
81
+ ["docker", "ps", "-a", "--format", "{{.Names}}"],
82
+ check=True,
83
+ capture_output=True,
84
+ text=True,
85
+ )
86
+ all_containers = result.stdout.splitlines()
87
+
88
+ # 3. Filter out containers belonging to our project
89
+ project_containers = [
90
+ c
91
+ for c in all_containers
92
+ if c.startswith(container_prefix)
93
+ or any(c == f"{project_name}-{service}" for service in service_names)
94
+ ]
95
+
96
+ if project_containers:
97
+ # 4. Force stop and remove these containers
98
+ for container in project_containers:
99
+ try:
100
+ subprocess.run(["docker", "stop", container], check=True)
101
+ subprocess.run(["docker", "rm", "-f", container], check=True)
102
+ print(t("CONTAINER_STOPPED_AND_REMOVED", container=container))
103
+ except subprocess.CalledProcessError as e:
104
+ print(t("CONTAINER_STOP_ERROR", container=container, error=str(e)))
105
+
106
+ # 5. Clean up potentially leftover networks
107
+ try:
108
+ subprocess.run(["docker", "network", "prune", "-f"], check=True)
109
+ print(t("NETWORKS_PRUNED"))
110
+ except subprocess.CalledProcessError as e:
111
+ print(t("NETWORK_PRUNE_ERROR", error=str(e)))
112
+
113
+ except subprocess.CalledProcessError as e:
114
+ print(t("DOCKER_LIST_ERROR", error=str(e)))
115
+
116
+ print(t("CONTAINERS_STOPPED_AND_REMOVED"))
117
+
118
+
119
+ def run_docker_compose():
120
+ docker_compose_cmd = get_docker_command()
121
+ try:
122
+ print(t("STARTING_CONTAINERS_WITH_BUILD"))
123
+ subprocess.run(
124
+ docker_compose_cmd
125
+ + [
126
+ "-f",
127
+ os.path.join(TEMP_DIR, "docker-compose.yaml"),
128
+ "--env-file",
129
+ os.path.join(TEMP_DIR, ".env"),
130
+ "up",
131
+ "-d",
132
+ "--build",
133
+ ],
134
+ check=True,
135
+ )
136
+ print(t("CONTAINERS_STARTED"))
137
+ except subprocess.CalledProcessError as e:
138
+ print(t("DOCKER_ERROR", error=str(e)))
139
+ print(t("DOCKER_OUTPUT"))
140
+ print(e.output.decode() if e.output else "No output")
141
+ stop_and_remove_containers()
142
+ sys.exit(1)
143
+
144
+
145
+ def update_docker_compose_paths(project_root=PROJECT_ROOT):
146
+ docker_compose_path = os.path.join(TEMP_DIR, "docker-compose.yaml")
147
+ with open(docker_compose_path, "r") as file:
148
+ compose_data = yaml.safe_load(file)
149
+ for service in compose_data["services"].values():
150
+ if "build" in service:
151
+ if "context" in service["build"]:
152
+ if service["build"]["context"] == "..":
153
+ service["build"]["context"] = project_root
154
+ else:
155
+ service["build"]["context"] = os.path.join(
156
+ project_root, service["build"]["context"]
157
+ )
158
+ if "dockerfile" in service["build"]:
159
+ dockerfile_name = os.path.basename(service["build"]["dockerfile"])
160
+ service["build"]["dockerfile"] = os.path.join(TEMP_DIR, dockerfile_name)
161
+ with open(docker_compose_path, "w") as file:
162
+ yaml.dump(compose_data, file)
163
+ print(t("PATHS_UPDATED"))
164
+
165
+
166
+ def main():
167
+ if check_docker_install():
168
+ update_docker_compose_paths()
169
+ run_docker_compose()
170
+ else:
171
+ sys.exit(1)
172
+
173
+
174
+ if __name__ == "__main__":
175
+ main()
docker/msdl/i18n.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # msdl/translations/i18n.py
2
+
3
+ import os
4
+ import i18n
5
+ import locale
6
+ from dotenv import load_dotenv, set_key, find_dotenv
7
+ from msdl.config import TRANSLATIONS_DIR, ENV_FILE_PATH
8
+ from pathlib import Path
9
+
10
+ # Load environment variables at module level
11
+ load_dotenv(ENV_FILE_PATH)
12
+
13
+ def get_env_variable(var_name, default=None):
14
+ return os.getenv(var_name, default)
15
+
16
+ def set_env_variable(var_name, value):
17
+ dotenv_file = find_dotenv(ENV_FILE_PATH)
18
+ set_key(dotenv_file, var_name, value)
19
+ # Reload environment variables after setting
20
+ os.environ[var_name] = value
21
+
22
+ def get_system_language():
23
+ try:
24
+ return locale.getlocale()[0].split("_")[0]
25
+ except:
26
+ return "en"
27
+
28
+ def get_available_languages():
29
+ """Get list of available language codes from translation files"""
30
+ translations_path = Path(TRANSLATIONS_DIR)
31
+ if not translations_path.exists():
32
+ return ["en"]
33
+ return [f.stem for f in translations_path.glob("*.yaml")]
34
+
35
+ def set_language(language_code):
36
+ """Set the interaction language and persist it to .env file"""
37
+ available_langs = get_available_languages()
38
+ if language_code not in available_langs:
39
+ print(f"Warning: Language '{language_code}' not available. Using 'en' instead.")
40
+ language_code = "en"
41
+
42
+ set_env_variable("LAUNCHER_INTERACTION_LANGUAGE", language_code)
43
+ i18n.set("locale", language_code)
44
+
45
+
46
+ def setup_i18n():
47
+ # Initialize i18n settings
48
+ i18n.load_path.append(TRANSLATIONS_DIR)
49
+ i18n.set("filename_format", "{locale}.{format}")
50
+ i18n.set("file_format", "yaml")
51
+
52
+ # Get language from environment
53
+ env_language = get_env_variable("LAUNCHER_INTERACTION_LANGUAGE")
54
+ if not env_language:
55
+ # If no language is set, use English as default without saving to .env
56
+ env_language = "en"
57
+
58
+ # Force reload translations
59
+ i18n.set("locale", None) # Clear current locale
60
+ i18n.set("locale", env_language) # Set new locale
61
+
62
+
63
+ def t(key, **kwargs):
64
+ return i18n.t(key, **kwargs)
docker/msdl/templates/backend/cloud_llm.dockerfile ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Python 3.11.9 as the base image
2
+ FROM python:3.11.9-slim
3
+
4
+ # Set the working directory
5
+ WORKDIR /root
6
+
7
+ # Install Git
8
+ RUN apt-get update && apt-get install -y git && apt-get clean && rm -rf /var/lib/apt/lists/*
9
+
10
+ # Install specified dependency packages
11
+ RUN pip install --no-cache-dir \
12
+ duckduckgo_search==5.3.1b1 \
13
+ einops \
14
+ fastapi \
15
+ janus \
16
+ pyvis \
17
+ sse-starlette \
18
+ termcolor \
19
+ uvicorn \
20
+ griffe==0.48.0 \
21
+ python-dotenv \
22
+ lagent==0.5.0rc1
23
+
24
+ # Copy the mindsearch folder to the /root directory of the container
25
+ COPY mindsearch /root/mindsearch
docker/msdl/templates/backend/local_llm.dockerfile ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use openmmlab/lmdeploy:latest-cu12 as the base image
2
+ # Note: Before using this Dockerfile, you should visit https://hub.docker.com/r/openmmlab/lmdeploy/tags
3
+ # to select a base image that's compatible with your specific GPU architecture.
4
+ # The 'latest-cu12' tag is used here as an example, but you should choose the most
5
+ # appropriate tag for your setup (e.g., cu11 for CUDA 11, cu12 for CUDA 12, etc.)
6
+ FROM openmmlab/lmdeploy:latest-cu12
7
+
8
+ # Set the working directory
9
+ WORKDIR /root
10
+
11
+ # Install Git
12
+ RUN apt-get update && apt-get install -y git && apt-get clean && rm -rf /var/lib/apt/lists/*
13
+
14
+ # Install specified dependency packages
15
+ # Note: lmdeploy dependency is already included in the base image, no need to reinstall
16
+ RUN pip install --no-cache-dir \
17
+ duckduckgo_search==5.3.1b1 \
18
+ einops \
19
+ fastapi \
20
+ janus \
21
+ pyvis \
22
+ sse-starlette \
23
+ termcolor \
24
+ uvicorn \
25
+ griffe==0.48.0 \
26
+ python-dotenv \
27
+ lagent==0.5.0rc1
28
+
29
+ # Copy the mindsearch folder to the /root directory of the container
30
+ COPY mindsearch /root/mindsearch
docker/msdl/templates/docker-compose.yaml ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ backend:
3
+ container_name: mindsearch-backend
4
+ build:
5
+ context: .
6
+ dockerfile: backend.dockerfile
7
+ image: mindsearch/backend:latest
8
+ restart: unless-stopped
9
+ # Uncomment the following line to force using local build
10
+ # pull: never
11
+ ports:
12
+ - "8002:8002"
13
+ environment:
14
+ - PYTHONUNBUFFERED=1
15
+ # - OPENAI_API_KEY=${OPENAI_API_KEY:-}
16
+ - OPENAI_API_BASE=${OPENAI_API_BASE:-https://api.openai.com/v1}
17
+ # - QWEN_API_KEY=${QWEN_API_KEY:-}
18
+ # - SILICON_API_KEY=${SILICON_API_KEY:-}
19
+ command: python -m mindsearch.app --lang ${LANG:-cn} --model_format ${MODEL_FORMAT:-internlm_server}
20
+ volumes:
21
+ - /root/.cache:/root/.cache
22
+ deploy:
23
+ resources:
24
+ reservations:
25
+ devices:
26
+ - driver: nvidia
27
+ count: 1
28
+ capabilities: [gpu]
29
+ # GPU support explanation:
30
+ # The current configuration has been tested with NVIDIA GPUs. If you use other types of GPUs, you may need to adjust the configuration.
31
+ # For AMD GPUs, you can try using the ROCm driver by modifying the configuration as follows:
32
+ # deploy:
33
+ # resources:
34
+ # reservations:
35
+ # devices:
36
+ # - driver: amd
37
+ # count: 1
38
+ # capabilities: [gpu]
39
+ #
40
+ # For other GPU types, you may need to consult the respective Docker GPU support documentation.
41
+ # In theory, any GPU supported by PyTorch should be configurable here.
42
+ # If you encounter issues, try the following steps:
43
+ # 1. Ensure the correct GPU drivers are installed on the host
44
+ # 2. Check if your Docker version supports your GPU type
45
+ # 3. Install necessary GPU-related libraries in the Dockerfile
46
+ # 4. Adjust the deploy configuration here to match your GPU type
47
+ #
48
+ # Note: After changing GPU configuration, you may need to rebuild the image.
49
+
50
+ frontend:
51
+ container_name: mindsearch-frontend
52
+ build:
53
+ context: .
54
+ dockerfile: frontend.dockerfile
55
+ image: mindsearch/frontend:latest
56
+ restart: unless-stopped
57
+ # Uncomment the following line to force using local build
58
+ # pull: never
59
+ ports:
60
+ - "8080:8080"
61
+ depends_on:
62
+ - backend
docker/msdl/templates/frontend/react.dockerfile ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Node.js 18 as the base image
2
+ FROM node:18-alpine
3
+
4
+ # Set the working directory
5
+ WORKDIR /app
6
+
7
+ # Copy package files first to leverage Docker cache
8
+ COPY frontend/React/package*.json ./
9
+
10
+ # Install dependencies
11
+ RUN npm install
12
+
13
+ # Copy source code after npm install to prevent unnecessary reinstalls
14
+ COPY frontend/React/ ./
15
+
16
+ # Modify vite.config.ts for Docker environment
17
+ # Beacuse we use Docker Compose to manage the backend and frontend services, we can use the service name as the hostname
18
+ RUN sed -i '/server: {/,/},/c\
19
+ server: {\
20
+ host: "0.0.0.0",\
21
+ port: 8080,\
22
+ proxy: {\
23
+ "/solve": {\
24
+ target: "http://backend:8002",\
25
+ changeOrigin: true,\
26
+ },\
27
+ // "/solve": {\
28
+ // target: "https://mindsearch.openxlab.org.cn",\
29
+ // changeOrigin: true,\
30
+ // },\
31
+ },\
32
+ },' vite.config.ts
33
+
34
+ # Start the development server
35
+ CMD ["npm", "start"]
docker/msdl/translations/en.yaml ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ en:
2
+ SCRIPT_DIR: "Script directory: %{dir}"
3
+ PROJECT_ROOT: "Project root directory: %{dir}"
4
+ TEMP_DIR: "Temporary directory: %{dir}"
5
+ DOCKER_LAUNCHER_START: "Starting Docker launcher process"
6
+ DOCKER_LAUNCHER_COMPLETE: "Docker launcher process completed"
7
+ DIR_CREATED: "Directory created: %{dir}"
8
+ FILE_COPIED: "Copied %{file} to the temp directory"
9
+ FILE_NOT_FOUND: "Error: %{file} not found in the templates directory"
10
+ CONTAINERS_STOPPED: "Existing containers and volumes stopped and removed"
11
+ CONTAINER_STOP_ERROR: "Error stopping and removing containers (this may be normal if there were no running containers): %{error}"
12
+ BUILDING_IMAGES: "Starting to build Docker images..."
13
+ IMAGES_BUILT: "Docker images built successfully"
14
+ STARTING_CONTAINERS: "Starting Docker containers..."
15
+ STARTING_CONTAINERS_WITH_BUILD: "Starting to build and start Docker containers..."
16
+ CONTAINERS_STARTED: "Docker containers started successfully"
17
+ DOCKER_ERROR: "Error while building or starting Docker containers: %{error}"
18
+ DOCKER_OUTPUT: "Docker command output:"
19
+ DOCKER_INSTALLED: "Docker and Docker Compose installed correctly"
20
+ DOCKER_INSTALL_ERROR: "Error: Docker or Docker Compose may not be installed correctly: %{error}"
21
+ DOCKER_NOT_FOUND: "Error: Docker or Docker Compose command not found. Please ensure they are correctly installed and added to the PATH."
22
+ DOCKER_COMPOSE_NOT_FOUND: "Error: Docker Compose command not found. Please ensure it is correctly installed and added to the PATH."
23
+ PATHS_UPDATED: "Paths updated in docker-compose.yaml"
24
+ COMPOSE_FILE_CONTENT: "docker-compose.yaml file content:"
25
+ COMPOSE_FILE_NOT_FOUND: "Error: %{file} file not found"
26
+ COMPOSE_FILE_READ_ERROR: "Error reading docker-compose.yaml file: %{error}"
27
+ TERMINATION_SIGNAL: "Termination signal caught. Exiting gracefully..."
28
+ KEYBOARD_INTERRUPT: "Keyboard interrupt caught. Exiting gracefully..."
29
+ UNEXPECTED_ERROR: "An unexpected error occurred: %{error}"
30
+ BACKEND_LANGUAGE_CHOICE: "Select MindSearch backend language (default is cn)"
31
+ CHINESE: "Chinese (cn)"
32
+ ENGLISH: "English (en)"
33
+ MODEL_DEPLOYMENT_TYPE: "Select model deployment type:"
34
+ CLOUD_MODEL: "Cloud model"
35
+ LOCAL_MODEL: "Local model"
36
+ MODEL_FORMAT_CHOICE: "Select model format:"
37
+ CONFIRM_USE_EXISTING_API_KEY: "Do you want to use the existing %{ENV_VAR_NAME} API key?"
38
+ CONFIRM_OVERWRITE_EXISTING_API_KEY: "Do you want to overwrite the existing %{ENV_VAR_NAME} API key?"
39
+ PLEASE_INPUT_NEW_API_KEY: "Please enter a new %{ENV_VAR_NAME} API key:"
40
+ PLEASE_INPUT_NEW_API_KEY_FROM_ZERO: "Please enter a new %{ENV_VAR_NAME} API key:"
41
+ INVALID_API_KEY_FORMAT: "Invalid API key format"
42
+ RETRY_API_KEY_INPUT: "Retry API key input"
43
+ API_KEY_INPUT_CANCELLED: "API key input cancelled"
44
+ UNKNOWN_API_KEY_TYPE: "Unknown API key type: %{KEY_TYPE}"
45
+ UNKNOWN_MODEL_FORMAT: "Unknown model format: %{MODEL_FORMAT}"
46
+ INVALID_API_KEY: "Invalid API key: %{KEY_TYPE}"
47
+ API_KEY_SAVED: "API key for %{ENV_VAR_NAME} saved"
48
+ UNKNOWN_DOCKERFILE: "Unknown Dockerfile: %{dockerfile}"
49
+ UNKNOWN_MODEL_TYPE: "Unknown model type: %{model_type}"
50
+ BACKEND_DOCKERFILE_COPIED: "Backend Dockerfile copied from %{source_path} to %{dest_path}"
51
+ FRONTEND_DOCKERFILE_COPIED: "Frontend Dockerfile copied from %{source_path} to %{dest_path}"
52
+ TEMP_DIR_CREATED: "Temporary directory created at %{dir}"
53
+ CURRENT_COMPOSE_STOP_FAILED: "Current containers stop failed"
54
+ CONTAINER_STOPPED_AND_REMOVED: "Container stopped and removed"
55
+ NETWORKS_PRUNED: "Corresponding Docker networks pruned"
56
+ NETWORK_PRUNE_ERROR: "Error pruning corresponding Docker networks: %{error}"
57
+ DOCKER_LIST_ERROR: "Error listing Docker containers: %{error}"
58
+ CONTAINERS_STOPPED_AND_REMOVED: "Docker containers stopped and removed"
59
+ CLI_DESCRIPTION: "MindSearch Docker Launcher - A tool to manage MindSearch docker containers"
60
+ LANGUAGE_HELP: "Set the msdl tool interface language (e.g. en, zh_CN)"
61
+ CONFIG_LANGUAGE_HELP: "Show language configuration prompt"
62
+ LANGUAGE_NOT_AVAILABLE: "Warning: Language '%{lang}' not available. Using English instead."
63
+ SELECT_INTERFACE_LANGUAGE: "Select msdl tool interface language"
64
+ SELECT_BACKEND_LANGUAGE: "Select MindSearch backend language (default is cn)"
65
+ LANGUAGE_CHANGED_RESTARTING: "Language changed, restarting msdl..."
66
+ SELECT_SEARCH_ENGINE: "Select search engine:"
67
+ NO_API_KEY_NEEDED: "No API key needed"
68
+ API_KEY_REQUIRED: "API key required"
69
+ SEARCH_ENGINE_GOOGLE: "Google Search"
70
+ SEARCH_ENGINE_BING: "Bing Search"
71
+ SEARCH_ENGINE_DUCKDUCKGO: "DuckDuckGo Search"
72
+ SEARCH_ENGINE_BRAVE: "Brave Search"
73
+ SEARCH_ENGINE_TENCENT: "Tencent Search"
74
+ TENCENT_ID_REQUIRED: "Please enter your Tencent Search Secret ID"
75
+ TENCENT_KEY_REQUIRED: "Please enter your Tencent Search Secret Key"
76
+ WEB_SEARCH_KEY_REQUIRED: "Please enter your Web Search API Key"
77
+ SEARCH_ENGINE_CONFIGURED: "Search engine %{engine} configured successfully"
docker/msdl/translations/zh_CN.yaml ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ zh_CN:
2
+ SCRIPT_DIR: "脚本目录:%{dir}"
3
+ PROJECT_ROOT: "项目根目录:%{dir}"
4
+ TEMP_DIR: "临时目录:%{dir}"
5
+ DOCKER_LAUNCHER_START: "开始 Docker 启动器流程"
6
+ DOCKER_LAUNCHER_COMPLETE: "Docker 启动器流程完成"
7
+ DIR_CREATED: "创建目录:%{dir}"
8
+ FILE_COPIED: "已复制 %{file} 到 temp 目录"
9
+ FILE_NOT_FOUND: "错误:%{file} 在 templates 目录中不存在"
10
+ CONTAINERS_STOPPED: "已停止并删除现有容器和卷"
11
+ CONTAINER_STOP_ERROR: "停止和删除容器时出错(这可能是正常的,如果没有正在运行的容器):%{error}"
12
+ BUILDING_IMAGES: "开始构建Docker镜像..."
13
+ IMAGES_BUILT: "Docker镜像构建成功"
14
+ STARTING_CONTAINERS: "开始启动Docker容器..."
15
+ STARTING_CONTAINERS_WITH_BUILD: "开始构建并启动Docker容器..."
16
+ CONTAINERS_STARTED: "Docker 容器已成功启动"
17
+ DOCKER_ERROR: "构建或启动 Docker 容器时出错:%{error}"
18
+ DOCKER_OUTPUT: "Docker 命令输出:"
19
+ DOCKER_INSTALLED: "Docker 和 Docker Compose 安装正确"
20
+ DOCKER_INSTALL_ERROR: "错误:Docker 或 Docker Compose 可能没有正确安装:%{error}"
21
+ DOCKER_NOT_FOUND: "错误:Docker 或 Docker Compose 命令未找到。请确保它们已正确安装并添加到PATH中。"
22
+ DOCKER_COMPOSE_NOT_FOUND: "错误:Docker Compose 命令未找到。请确保它已正确安装并添加到PATH中。"
23
+ PATHS_UPDATED: "已更新 docker-compose.yaml 中的路径"
24
+ COMPOSE_FILE_CONTENT: "docker-compose.yaml 文件内容:"
25
+ COMPOSE_FILE_NOT_FOUND: "错误:%{file} 文件不存在"
26
+ COMPOSE_FILE_READ_ERROR: "读取 docker-compose.yaml 文件时出错:%{error}"
27
+ TERMINATION_SIGNAL: "捕获到终止信号。正在优雅地退出..."
28
+ KEYBOARD_INTERRUPT: "捕获到键盘中断。正在优雅地退出..."
29
+ UNEXPECTED_ERROR: "发生未预期的错误:%{error}"
30
+ BACKEND_LANGUAGE_CHOICE: "选择 MindSearch 后端语言(默认为中文)"
31
+ SELECT_INTERFACE_LANGUAGE: "选择 msdl 工具界面语言"
32
+ SELECT_BACKEND_LANGUAGE: "选择 MindSearch 后端语言(默认为中文)"
33
+ CHINESE: "中文 (cn)"
34
+ ENGLISH: "英文 (en)"
35
+ MODEL_DEPLOYMENT_TYPE: "选择模型部署类型:"
36
+ CLOUD_MODEL: "云端模型"
37
+ LOCAL_MODEL: "本地模型"
38
+ MODEL_FORMAT_CHOICE: "选择模型格式:"
39
+ CONFIRM_USE_EXISTING_API_KEY: "是否使用现有的 %{ENV_VAR_NAME} API 密钥?"
40
+ CONFIRM_OVERWRITE_EXISTING_API_KEY: "是否覆盖现有的 %{ENV_VAR_NAME} API 密钥?"
41
+ PLEASE_INPUT_NEW_API_KEY: "请输入新的 %{ENV_VAR_NAME} API 密钥:"
42
+ PLEASE_INPUT_NEW_API_KEY_FROM_ZERO: "请输入新的 %{ENV_VAR_NAME} API 密钥:"
43
+ INVALID_API_KEY_FORMAT: "无效的 API 密钥格式"
44
+ RETRY_API_KEY_INPUT: "重试 API 密钥输入"
45
+ API_KEY_INPUT_CANCELLED: "API 密钥输入已取消"
46
+ UNKNOWN_API_KEY_TYPE: "未知的 API 密钥类型:%{KEY_TYPE}"
47
+ UNKNOWN_MODEL_FORMAT: "未知的模型格式:%{MODEL_FORMAT}"
48
+ INVALID_API_KEY: "无效的 API 密钥:%{KEY_TYPE}"
49
+ API_KEY_SAVED: "%{ENV_VAR_NAME} 的 API 密钥已保存"
50
+ UNKNOWN_DOCKERFILE: "未知的 Dockerfile:%{dockerfile}"
51
+ UNKNOWN_MODEL_TYPE: "未知的模型类型:%{model_type}"
52
+ BACKEND_DOCKERFILE_COPIED: "后端 Dockerfile 已经从 %{source_path} 复制为 %{dest_path}"
53
+ FRONTEND_DOCKERFILE_COPIED: "前端 Dockerfile 已经从 %{source_path} 复制为 %{dest_path}"
54
+ TEMP_DIR_CREATED: "已在 %{dir} 创建临时目录"
55
+ CURRENT_COMPOSE_STOP_FAILED: "当前的容器停止失败"
56
+ CONTAINER_STOPPED_AND_REMOVED: "容器已停止并删除"
57
+ NETWORKS_PRUNED: "已清理对应的Docker网络"
58
+ NETWORK_PRUNE_ERROR: "清理对应的Docker网络时出错:%{error}"
59
+ DOCKER_LIST_ERROR: "列出 Docker 容器时出错:%{error}"
60
+ CONTAINERS_STOPPED_AND_REMOVED: "已停止并删除 Docker 容器"
61
+ CLI_DESCRIPTION: "MindSearch Docker 启动器 - 用于管理 MindSearch docker 容器的工具"
62
+ LANGUAGE_HELP: "设置 msdl 工具界面语言(例如:en, zh_CN)"
63
+ CONFIG_LANGUAGE_HELP: "显示语言配置提示"
64
+ LANGUAGE_NOT_AVAILABLE: "警告:语言'%{lang}'不可用。使用英语作为替代。"
65
+ LANGUAGE_CHANGED_RESTARTING: "语言已更改,正在重启 msdl..."
66
+ SELECT_SEARCH_ENGINE: "选择搜索引擎:"
67
+ NO_API_KEY_NEEDED: "无需 API 密钥"
68
+ API_KEY_REQUIRED: "需要 API 密钥"
69
+ SEARCH_ENGINE_DUCKDUCKGO: "DuckDuckGo 搜索"
70
+ SEARCH_ENGINE_BING: "必应搜索"
71
+ SEARCH_ENGINE_BRAVE: "Brave 搜索"
72
+ SEARCH_ENGINE_GOOGLE: "Google 搜索"
73
+ SEARCH_ENGINE_TENCENT: "腾讯搜索"
74
+ TENCENT_ID_REQUIRED: "请输入您的腾讯搜索 Secret ID"
75
+ TENCENT_KEY_REQUIRED: "请输入您的腾讯搜索 Secret Key"
76
+ WEB_SEARCH_KEY_REQUIRED: "请输入您的网页搜索 API 密钥"
77
+ SEARCH_ENGINE_CONFIGURED: "搜索引擎 %{engine} 配置成功"
docker/msdl/user_interaction.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from InquirerPy import inquirer
2
+ import sys
3
+ import os
4
+ from pathlib import Path
5
+
6
+ from msdl.config import (
7
+ CLOUD_LLM_DOCKERFILE,
8
+ LOCAL_LLM_DOCKERFILE,
9
+ )
10
+ from msdl.i18n import (
11
+ t,
12
+ get_available_languages,
13
+ set_language,
14
+ get_env_variable,
15
+ )
16
+ from msdl.utils import (
17
+ clean_api_key,
18
+ get_model_formats,
19
+ get_existing_api_key,
20
+ save_api_key_to_env,
21
+ validate_api_key,
22
+ )
23
+
24
+ SEARCH_ENGINES = {
25
+ "DuckDuckGoSearch": {
26
+ "name": "DuckDuckGo",
27
+ "key": "DUCKDUCKGO",
28
+ "requires_key": False,
29
+ "env_var": None
30
+ },
31
+ "BingSearch": {
32
+ "name": "Bing",
33
+ "key": "BING",
34
+ "requires_key": True,
35
+ "env_var": "BING_SEARCH_API_KEY"
36
+ },
37
+ "BraveSearch": {
38
+ "name": "Brave",
39
+ "key": "BRAVE",
40
+ "requires_key": True,
41
+ "env_var": "BRAVE_SEARCH_API_KEY"
42
+ },
43
+ "GoogleSearch": {
44
+ "name": "Google Serper",
45
+ "key": "GOOGLE",
46
+ "requires_key": True,
47
+ "env_var": "GOOGLE_SERPER_API_KEY"
48
+ },
49
+ "TencentSearch": {
50
+ "name": "Tencent",
51
+ "key": "TENCENT",
52
+ "requires_key": True,
53
+ "env_vars": ["TENCENT_SEARCH_SECRET_ID", "TENCENT_SEARCH_SECRET_KEY"]
54
+ }
55
+ }
56
+
57
+ def get_language_choice():
58
+ """Get user's language preference"""
59
+ def _get_language_options():
60
+ available_langs = get_available_languages()
61
+ lang_choices = {
62
+ "en": "English",
63
+ "zh_CN": "中文"
64
+ }
65
+ return [{"name": f"{lang_choices.get(lang, lang)}", "value": lang} for lang in available_langs]
66
+
67
+ current_lang = get_env_variable("LAUNCHER_INTERACTION_LANGUAGE")
68
+ if not current_lang:
69
+ lang_options = _get_language_options()
70
+ language = inquirer.select(
71
+ message=t("SELECT_INTERFACE_LANGUAGE"),
72
+ choices=lang_options,
73
+ default="en"
74
+ ).execute()
75
+
76
+ if language:
77
+ set_language(language)
78
+ sys.stdout.flush()
79
+ restart_program()
80
+
81
+ def get_backend_language():
82
+ """Get user's backend language preference"""
83
+ return inquirer.select(
84
+ message=t("SELECT_BACKEND_LANGUAGE"),
85
+ choices=[
86
+ {"name": t("CHINESE"), "value": "cn"},
87
+ {"name": t("ENGLISH"), "value": "en"},
88
+ ],
89
+ default="cn",
90
+ ).execute()
91
+
92
+ def get_model_choice():
93
+ """Get user's model deployment type preference"""
94
+ model_deployment_type = [
95
+ {
96
+ "name": t("CLOUD_MODEL"),
97
+ "value": CLOUD_LLM_DOCKERFILE
98
+ },
99
+ {
100
+ "name": t("LOCAL_MODEL"),
101
+ "value": LOCAL_LLM_DOCKERFILE
102
+ },
103
+ ]
104
+
105
+ return inquirer.select(
106
+ message=t("MODEL_DEPLOYMENT_TYPE"),
107
+ choices=model_deployment_type,
108
+ ).execute()
109
+
110
+ def get_model_format(model):
111
+ """Get user's model format preference"""
112
+ model_formats = get_model_formats(model)
113
+ return inquirer.select(
114
+ message=t("MODEL_FORMAT_CHOICE"),
115
+ choices=[{
116
+ "name": format,
117
+ "value": format
118
+ } for format in model_formats],
119
+ ).execute()
120
+
121
+ def _handle_api_key_input(env_var_name, message=None):
122
+ """Handle API key input and validation for a given environment variable"""
123
+ if message is None:
124
+ message = t("PLEASE_INPUT_NEW_API_KEY", ENV_VAR_NAME=env_var_name)
125
+ print(message)
126
+
127
+ while True:
128
+ api_key = inquirer.secret(
129
+ message=t("PLEASE_INPUT_NEW_API_KEY_FROM_ZERO", ENV_VAR_NAME=env_var_name)
130
+ ).execute()
131
+ cleaned_api_key = clean_api_key(api_key)
132
+
133
+ try:
134
+ save_api_key_to_env(env_var_name, cleaned_api_key, t)
135
+ break
136
+ except ValueError as e:
137
+ print(str(e))
138
+ retry = inquirer.confirm(
139
+ message=t("RETRY_API_KEY_INPUT"), default=True
140
+ ).execute()
141
+ if not retry:
142
+ print(t("API_KEY_INPUT_CANCELLED"))
143
+ sys.exit(1)
144
+
145
+ def handle_api_key_input(model, model_format):
146
+ """Handle API key input and validation"""
147
+ if model != CLOUD_LLM_DOCKERFILE:
148
+ return
149
+
150
+ env_var_name = {
151
+ "internlm_silicon": "SILICON_API_KEY",
152
+ "gpt4": "OPENAI_API_KEY",
153
+ "qwen": "QWEN_API_KEY",
154
+ }.get(model_format)
155
+
156
+ existing_api_key = get_existing_api_key(env_var_name)
157
+
158
+ if existing_api_key:
159
+ use_existing = inquirer.confirm(
160
+ message=t("CONFIRM_USE_EXISTING_API_KEY", ENV_VAR_NAME=env_var_name),
161
+ default=True,
162
+ ).execute()
163
+
164
+ if use_existing:
165
+ return
166
+
167
+ print(t("CONFIRM_OVERWRITE_EXISTING_API_KEY", ENV_VAR_NAME=env_var_name))
168
+
169
+ try:
170
+ save_api_key_to_env(model_format, clean_api_key(inquirer.secret(
171
+ message=t("PLEASE_INPUT_NEW_API_KEY_FROM_ZERO", ENV_VAR_NAME=env_var_name)
172
+ ).execute()), t)
173
+ except ValueError as e:
174
+ print(str(e))
175
+ retry = inquirer.confirm(
176
+ message=t("RETRY_API_KEY_INPUT"), default=True
177
+ ).execute()
178
+ if not retry:
179
+ print(t("API_KEY_INPUT_CANCELLED"))
180
+ sys.exit(1)
181
+
182
+ def get_search_engine():
183
+ """Get user's preferred search engine and handle API key if needed"""
184
+ search_engine = inquirer.select(
185
+ message=t("SELECT_SEARCH_ENGINE"),
186
+ choices=[{
187
+ "name": f"{t(f'SEARCH_ENGINE_{info["key"]}')} ({t('NO_API_KEY_NEEDED') if not info['requires_key'] else t('API_KEY_REQUIRED')})",
188
+ "value": engine
189
+ } for engine, info in SEARCH_ENGINES.items()],
190
+ ).execute()
191
+
192
+ engine_info = SEARCH_ENGINES[search_engine]
193
+
194
+ if engine_info['requires_key']:
195
+ if search_engine == "TencentSearch":
196
+ # Handle Tencent's special case with two keys
197
+ for env_var in engine_info['env_vars']:
198
+ is_id = "ID" in env_var
199
+ message = t("TENCENT_ID_REQUIRED") if is_id else t("TENCENT_KEY_REQUIRED")
200
+ existing_key = get_existing_api_key(env_var)
201
+ if existing_key:
202
+ use_existing = inquirer.confirm(
203
+ message=t("CONFIRM_USE_EXISTING_API_KEY", ENV_VAR_NAME=env_var),
204
+ default=True,
205
+ ).execute()
206
+ if not use_existing:
207
+ _handle_api_key_input(env_var, message)
208
+ else:
209
+ _handle_api_key_input(env_var, message)
210
+ else:
211
+ # Handle standard case with single WEB_SEARCH_API_KEY
212
+ env_var = engine_info['env_var']
213
+ existing_key = get_existing_api_key(env_var)
214
+ if existing_key:
215
+ use_existing = inquirer.confirm(
216
+ message=t("CONFIRM_USE_EXISTING_API_KEY", ENV_VAR_NAME=env_var),
217
+ default=True,
218
+ ).execute()
219
+ if not use_existing:
220
+ _handle_api_key_input(env_var, t("WEB_SEARCH_KEY_REQUIRED"))
221
+ else:
222
+ _handle_api_key_input(env_var, t("WEB_SEARCH_KEY_REQUIRED"))
223
+
224
+ print(t("SEARCH_ENGINE_CONFIGURED", engine=engine_info['name']))
225
+ return search_engine
226
+
227
+ def restart_program():
228
+ """Restart the current program with the same arguments"""
229
+ print(t("LANGUAGE_CHANGED_RESTARTING"))
230
+ python = sys.executable
231
+ os.execl(python, python, *sys.argv)
232
+
233
+ def get_user_choices():
234
+ """Get all user choices in a single function"""
235
+ # Get language preference
236
+ get_language_choice()
237
+
238
+ # Get backend language
239
+ backend_language = get_backend_language()
240
+
241
+ # Get model choice
242
+ model = get_model_choice()
243
+
244
+ # Get model format
245
+ model_format = get_model_format(model)
246
+
247
+ # Get search engine choice
248
+ search_engine = get_search_engine()
249
+
250
+ # Handle API key if needed
251
+ handle_api_key_input(model, model_format)
252
+
253
+ return backend_language, model, model_format, search_engine
docker/msdl/utils.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # msdl/utils.py
2
+
3
+ import os
4
+ import re
5
+ import shutil
6
+ import sys
7
+ import yaml
8
+ from functools import lru_cache
9
+ from pathlib import Path
10
+ from msdl.config import (
11
+ BACKEND_DOCKERFILE_DIR,
12
+ CLOUD_LLM_DOCKERFILE,
13
+ FRONTEND_DOCKERFILE_DIR,
14
+ LOCAL_LLM_DOCKERFILE,
15
+ PACKAGE_DIR,
16
+ REACT_DOCKERFILE,
17
+ TEMP_DIR,
18
+ ENV_FILE_PATH,
19
+ )
20
+ from msdl.i18n import t
21
+
22
+
23
+ @lru_cache(maxsize=None)
24
+ def get_env_variable(var_name, default=None):
25
+ if ENV_FILE_PATH.exists():
26
+ with ENV_FILE_PATH.open("r") as env_file:
27
+ for line in env_file:
28
+ if line.startswith(f"{var_name}="):
29
+ return line.strip().split("=", 1)[1]
30
+ return os.getenv(var_name, default)
31
+
32
+
33
+ @lru_cache(maxsize=None)
34
+ def get_existing_api_key(env_var_name):
35
+ env_vars = read_env_file()
36
+ return env_vars.get(env_var_name)
37
+
38
+
39
+ @lru_cache(maxsize=None)
40
+ def read_env_file():
41
+ env_vars = {}
42
+ if ENV_FILE_PATH.exists():
43
+ with ENV_FILE_PATH.open("r") as env_file:
44
+ for line in env_file:
45
+ if "=" in line and not line.strip().startswith("#"):
46
+ key, value = line.strip().split("=", 1)
47
+ env_vars[key] = value.strip('"').strip("'")
48
+ return env_vars
49
+
50
+
51
+ def clean_api_key(api_key):
52
+ cleaned_key = api_key.strip()
53
+ cleaned_key = re.sub(r"\s+", "", cleaned_key)
54
+ return cleaned_key
55
+
56
+
57
+ @lru_cache(maxsize=None)
58
+ def validate_api_key(api_key, key_type, t):
59
+ basic_pattern = r"^sk-[A-Za-z0-9]+$"
60
+ web_search_pattern = r"^[A-Za-z0-9_\-\.]+$"
61
+ tencent_pattern = r"^[A-Za-z0-9]+$"
62
+
63
+ validation_rules = {
64
+ # Model API Keys
65
+ "SILICON_API_KEY": basic_pattern,
66
+ "OPENAI_API_KEY": basic_pattern,
67
+ "QWEN_API_KEY": basic_pattern,
68
+ # Search Engine API Keys
69
+ "BING_SEARCH_API_KEY": web_search_pattern,
70
+ "BRAVE_SEARCH_API_KEY": web_search_pattern,
71
+ "GOOGLE_SERPER_API_KEY": web_search_pattern,
72
+ "TENCENT_SEARCH_SECRET_ID": tencent_pattern,
73
+ "TENCENT_SEARCH_SECRET_KEY": tencent_pattern,
74
+ # Legacy support
75
+ "WEB_SEARCH_API_KEY": web_search_pattern,
76
+ }
77
+
78
+ if key_type not in validation_rules:
79
+ raise ValueError(t("UNKNOWN_API_KEY_TYPE", KEY_TYPE=key_type))
80
+
81
+ pattern = validation_rules[key_type]
82
+ return re.match(pattern, api_key) is not None
83
+
84
+
85
+ def save_api_key_to_env(key_type, api_key, t):
86
+ """Save API key to .env file
87
+
88
+ Args:
89
+ key_type: Environment variable name or model format
90
+ api_key: API key value
91
+ t: Translation function
92
+ """
93
+ # Convert model format to env var name if needed
94
+ env_var_name = {
95
+ "internlm_silicon": "SILICON_API_KEY",
96
+ "gpt4": "OPENAI_API_KEY",
97
+ "qwen": "QWEN_API_KEY",
98
+ }.get(key_type, key_type) # If not a model format, use key_type directly
99
+
100
+ if not validate_api_key(api_key, env_var_name, t):
101
+ raise ValueError(t("INVALID_API_KEY", KEY_TYPE=env_var_name))
102
+
103
+ env_vars = read_env_file()
104
+ env_vars[env_var_name] = api_key
105
+
106
+ with ENV_FILE_PATH.open("w") as env_file:
107
+ for key, value in env_vars.items():
108
+ env_file.write(f"{key}={value}\n")
109
+
110
+ print(t("API_KEY_SAVED", ENV_VAR_NAME=env_var_name))
111
+
112
+
113
+ def ensure_directory(path):
114
+ path = Path(path)
115
+ if not path.exists():
116
+ path.mkdir(parents=True, exist_ok=True)
117
+ print(t("DIR_CREATED", dir=path))
118
+
119
+
120
+ def copy_templates_to_temp(template_files):
121
+ template_dir = PACKAGE_DIR / "templates"
122
+
123
+ ensure_directory(TEMP_DIR)
124
+
125
+ for filename in template_files:
126
+ src = template_dir / filename
127
+ dst = TEMP_DIR / filename
128
+ if src.exists():
129
+ shutil.copy2(src, dst)
130
+ print(t("FILE_COPIED", file=filename))
131
+ else:
132
+ print(t("FILE_NOT_FOUND", file=filename))
133
+ sys.exit(1)
134
+
135
+
136
+ def modify_docker_compose(model_type, backend_language, model_format, search_engine):
137
+ """Modify docker-compose.yaml based on user choices"""
138
+ docker_compose_path = os.path.join(TEMP_DIR, "docker-compose.yaml")
139
+ with open(docker_compose_path, "r") as file:
140
+ compose_data = yaml.safe_load(file)
141
+
142
+ # Set the name of the project
143
+ compose_data["name"] = "mindsearch"
144
+
145
+ # Configure backend service
146
+ backend_service = compose_data["services"]["backend"]
147
+
148
+ # Set environment variables
149
+ if "environment" not in backend_service:
150
+ backend_service["environment"] = []
151
+
152
+ # Add or update environment variables
153
+ env_vars = {
154
+ "LANG": backend_language,
155
+ "MODEL_FORMAT": model_format,
156
+ "SEARCH_ENGINE": search_engine
157
+ }
158
+
159
+ # Ensure .env file is included
160
+ if "env_file" not in backend_service:
161
+ backend_service["env_file"] = [".env"]
162
+ elif ".env" not in backend_service["env_file"]:
163
+ backend_service["env_file"].append(".env")
164
+
165
+ # Set command with all parameters
166
+ command = f"python -m mindsearch.app --lang {backend_language} --model_format {model_format} --search_engine {search_engine}"
167
+ backend_service["command"] = command
168
+
169
+ # Convert environment variables to docker-compose format
170
+ backend_service["environment"] = [
171
+ f"{key}={value}" for key, value in env_vars.items()
172
+ ]
173
+
174
+ # Configure based on model type
175
+ if model_type == CLOUD_LLM_DOCKERFILE:
176
+ if "deploy" in backend_service:
177
+ del backend_service["deploy"]
178
+ # Remove volumes for cloud deployment
179
+ if "volumes" in backend_service:
180
+ del backend_service["volumes"]
181
+ elif model_type == LOCAL_LLM_DOCKERFILE:
182
+ # Add GPU configuration for local deployment
183
+ if "deploy" not in backend_service:
184
+ backend_service["deploy"] = {
185
+ "resources": {
186
+ "reservations": {
187
+ "devices": [
188
+ {"driver": "nvidia", "count": 1, "capabilities": ["gpu"]}
189
+ ]
190
+ }
191
+ }
192
+ }
193
+ # Add volume for cache in local deployment
194
+ backend_service["volumes"] = ["/root/.cache:/root/.cache"]
195
+ else:
196
+ raise ValueError(t("UNKNOWN_DOCKERFILE", dockerfile=model_type))
197
+
198
+ # Save the modified docker-compose.yaml
199
+ with open(docker_compose_path, "w") as file:
200
+ yaml.dump(compose_data, file)
201
+
202
+ print(
203
+ t(
204
+ "docker_compose_updated",
205
+ mode=(t("CLOUD") if model_type == CLOUD_LLM_DOCKERFILE else t("LOCAL")),
206
+ format=model_format,
207
+ )
208
+ )
209
+
210
+
211
+ def get_model_formats(model_type):
212
+ if model_type == CLOUD_LLM_DOCKERFILE:
213
+ return ["internlm_silicon", "qwen", "gpt4"]
214
+ elif model_type == LOCAL_LLM_DOCKERFILE:
215
+ return ["internlm_server", "internlm_client", "internlm_hf"]
216
+ else:
217
+ raise ValueError(t("UNKNOWN_MODEL_TYPE", model_type=model_type))
218
+
219
+
220
+ def copy_backend_dockerfile(choice):
221
+ """Copy backend Dockerfile to temp directory based on user choice"""
222
+ source_file = Path(BACKEND_DOCKERFILE_DIR) / choice
223
+ dest_file = "backend.dockerfile"
224
+ source_path = PACKAGE_DIR / "templates" / source_file
225
+ dest_path = TEMP_DIR / dest_file
226
+
227
+ if not source_path.exists():
228
+ raise FileNotFoundError(t("FILE_NOT_FOUND", file=source_file))
229
+
230
+ dest_path.parent.mkdir(parents=True, exist_ok=True)
231
+ dest_path.write_text(source_path.read_text())
232
+ print(
233
+ t(
234
+ "BACKEND_DOCKERFILE_COPIED",
235
+ source_path=str(source_path),
236
+ dest_path=str(dest_path),
237
+ ))
238
+
239
+
240
+ def copy_frontend_dockerfile():
241
+ """Copy frontend Dockerfile to temp directory"""
242
+ source_file = Path(FRONTEND_DOCKERFILE_DIR) / REACT_DOCKERFILE
243
+ dest_file = "frontend.dockerfile"
244
+ source_path = PACKAGE_DIR / "templates" / source_file
245
+ dest_path = TEMP_DIR / dest_file
246
+
247
+ if not source_path.exists():
248
+ raise FileNotFoundError(t("FILE_NOT_FOUND", file=source_file))
249
+
250
+ dest_path.parent.mkdir(parents=True, exist_ok=True)
251
+ dest_path.write_text(source_path.read_text())
252
+ print(
253
+ t(
254
+ "FRONTEND_DOCKERFILE_COPIED",
255
+ source_path=str(source_path),
256
+ dest_path=str(dest_path),
257
+ ))
docker/setup.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import find_packages, setup
2
+
3
+ setup(
4
+ name="msdl",
5
+ version="0.1.1",
6
+ description="MindSearch Docker Launcher",
7
+ packages=find_packages(),
8
+ python_requires=">=3.7",
9
+ install_requires=[
10
+ "pyyaml>=6.0",
11
+ "python-i18n>=0.3.9",
12
+ "inquirerpy>=0.3.4",
13
+ "python-dotenv>=0.19.1",
14
+ ],
15
+ entry_points={
16
+ "console_scripts": [
17
+ "msdl=msdl.__main__:main",
18
+ ],
19
+ },
20
+ include_package_data=True,
21
+ package_data={
22
+ "msdl": ["translations/*.yaml", "templates/*"],
23
+ },
24
+ )
frontend/React/.gitignore ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
3
+
4
+ # dependencies
5
+ /node_modules
6
+ /.pnp
7
+ .pnp.js
8
+
9
+ # testing
10
+ /coverage
11
+
12
+ # production
13
+ /build
14
+
15
+ # misc
16
+ .DS_Store
17
+ .env.local
18
+ .env.development.local
19
+ .env.test.local
20
+ .env.production.local
21
+
22
+ npm-debug.log*
23
+ yarn-debug.log*
24
+ yarn-error.log*
25
+
frontend/React/.prettierignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ dist
2
+ deploy
3
+ values
4
+ node_modules
5
+ .gitignore
6
+ .prettierignore
7
+ .husky
frontend/React/.prettierrc.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "printWidth": 120,
3
+ "tabWidth": 4,
4
+ "singleQuote": true,
5
+ "quoteProps": "as-needed",
6
+ "bracketSpacing": true
7
+ }
frontend/React/README.md ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Notice
2
+ - If you leave the page (Make the page invisible) and come back again, it will cause sse to reconnect.
3
+ - the project requires Node.js version >= 18.0.0.
4
+
5
+ # Prepare your dev-environment for frontend
6
+ [Node.js](https://nodejs.org/en)® is a free, open-source, cross-platform JavaScript runtime environment that lets developers create servers, web apps, command line tools and scripts.
7
+
8
+ # Node.js Installation Guide (Windows, Linux, macOS)
9
+ ## Windows Installation
10
+ - Step 1: Download Node.js
11
+
12
+ 1. Open your web browser and visit the [Node.js official website](https://nodejs.org/en).
13
+
14
+ 2. Navigate to the "Downloads" section.
15
+
16
+ 3. Select the desired version (LTS recommended for long-term stability). As of August 2024, the latest LTS version might be v20.x.x.
17
+
18
+ 4. Click on the "Windows Installer (.msi)" link to download the installation package.
19
+
20
+ - Step 2: Install Node.js
21
+
22
+ 1. Double-click the downloaded .msi file to start the installation wizard.
23
+
24
+ 2. Click "Next" to proceed.
25
+
26
+ 3. Read and accept the license agreement by checking the "I accept the terms in the License Agreement" box.
27
+
28
+ 4. Click "Next" again and select the installation directory. It's recommended to change the default location to avoid installing in the C drive.
29
+
30
+ 5. Continue clicking "Next" to use the default settings until you reach the "Install" button.
31
+
32
+ 6. Click "Install" to start the installation process.
33
+
34
+ 7. Wait for the installation to complete and click "Finish" to exit the installation wizard.
35
+
36
+ - Step 3: Verify Installation
37
+ 1. Open the Command Prompt (cmd) by pressing `Win + R`, typing `cmd`, and pressing Enter.
38
+ 2. Type `node -v` and press Enter. You should see the installed Node.js version displayed.
39
+ 3. Type `npm -v` and press Enter to verify the installed npm version. npm is the package manager that comes bundled with Node.js.
40
+
41
+ - Step 4: Configure npm Global Path (Optional)
42
+ If you want to change the default global installation path for npm, follow these steps:
43
+
44
+ 1. Open the Command Prompt (cmd) as an administrator.
45
+
46
+ 2. Navigate to your Node.js installation directory (e.g., C:\Program Files\nodejs).
47
+
48
+ 3. Create two new folders named node_global and node_cache.
49
+
50
+ 4. Run the following commands to set the new paths:
51
+
52
+ ```bash
53
+ npm config set prefix "C:\Program Files\nodejs\node_global"
54
+ npm config set cache "C:\Program Files\nodejs\node_cache"
55
+ ```
56
+
57
+ 5. Open the Environment Variables settings in the System Properties.
58
+ 6. Add `C:\Program Files\nodejs\node_global` to the `PATH` variable under User Variables.
59
+ 7. Optionally, create a new system variable named `NODE_PATH` and set its value to ` C:\Program Files\nodejs\node_global\node_modules`.
60
+
61
+ ## Linux Installation
62
+ - Step 1: Update Your System
63
+ Before installing Node.js, ensure your Linux system is up-to-date:
64
+
65
+ ```bash
66
+ sudo apt-get update
67
+ sudo apt-get upgrade
68
+ ```
69
+
70
+ - Step 2: Install Dependencies
71
+ Node.js requires certain dependencies to function properly:
72
+
73
+ ```bash
74
+ sudo apt-get install build-essential libssl-dev
75
+ ```
76
+
77
+ - Step 3: Download and Install Node.js
78
+ You can download the Node.js source code or use a package manager like `curl` or `wget` to download a pre-built binary. For simplicity, this guide assumes you're using a package manager.
79
+
80
+ 1. Navigate to the Node.js download page for package managers.
81
+ Follow the instructions for your Linux distribution. For example, on Ubuntu, you can use:
82
+
83
+ ```bash
84
+ curl -fsSL https://deb.nodesource.com/setup_20.x | sudo -E bash -
85
+ sudo apt-get install -y nodejs
86
+ ```
87
+
88
+ Replace 20.x with the desired version number if you don't want the latest version.
89
+
90
+ - Step 4: Verify Installation
91
+ 1. Open a terminal.
92
+ 2. Type `node -v` and press Enter to check the Node.js version.
93
+ 3. Type `npm -v` and press Enter to verify the npm version.
94
+
95
+
96
+ ## Installing Node.js on macOS
97
+
98
+ Installing Node.js on macOS is a straightforward process that can be accomplished using the official installer from the Node.js website or through package managers like Homebrew. This guide will cover both methods.
99
+
100
+ ### Method 1: Using the Official Installer
101
+ - Visit the Node.js Website
102
+ - Open your web browser and navigate to https://nodejs.org/.
103
+ - Download the Installer
104
+ - Scroll down to the "Downloads" section.
105
+ - Click on the "macOS Installer" button to download the .pkg file. Ensure you download the latest version, which as of August 2024, might be v20.x.x or higher.
106
+ - Install Node.js
107
+ - Once the download is complete, locate the .pkg file in your Downloads folder.
108
+ - Double-click the file to start the installation process.
109
+ - Follow the on-screen instructions. Typically, you'll need to agree to the license agreement, select an installation location (the default is usually fine), and click "Continue" or "Install" until the installation is complete.
110
+ - Verify the Installation
111
+ - Open the Terminal application by going to "Finder" > "Applications" > "Utilities" > "Terminal" or using Spotlight Search (press `Cmd + Space` and type "Terminal").
112
+ - Type `node -v` and press Enter. This command should display the installed version of Node.js.
113
+ - Type `npm -v` and press Enter to verify that npm, the Node.js package manager, is also installed.
114
+
115
+ ### Method 2: Using Homebrew
116
+ If you prefer to use a package manager, Homebrew is a popular choice for macOS.
117
+
118
+ - Install Homebrew (if not already installed)
119
+
120
+ - Open the Terminal.
121
+
122
+ - Copy and paste the following command into the Terminal and press Enter:
123
+ ```bash
124
+ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
125
+ ```
126
+
127
+ - Follow the on-screen instructions to complete the Homebrew installation.
128
+
129
+ - Install Node.js with Homebrew
130
+ - Once Homebrew is installed, update your package list by running brew update in the Terminal.
131
+ - To install Node.js, run the following command in the Terminal:
132
+ ```bash
133
+ brew install node
134
+ ```
135
+ - Homebrew will download and install the latest version of Node.js and npm.
136
+ - Verify the Installation
137
+ - As with the official installer method, you can verify the installation by typing node -v and npm -v in the Terminal and pressing Enter.
138
+
139
+ ### Additional Configuration (Optional)
140
+ - Configure npm's Global Installation Path (if desired):
141
+ - You may want to change the default location where globally installed npm packages are stored. Follow the steps outlined in the Node.js documentation or search for guides online to configure this.
142
+ - Switch to a Different Node.js Version (if needed):
143
+ - If you need to switch between multiple Node.js versions, consider using a version manager like nvm (Node Version Manager). Follow the instructions on the nvm GitHub page to install and use it.
144
+
145
+
146
+ By following these steps, you should be able to successfully install Node.js on your system. Remember to keep your Node.js and npm versions up-to-date to take advantage of the latest features and security updates.
147
+
148
+ If your env has been prepared, you can
149
+
150
+ # Installation and Setup Instructions
151
+
152
+ ## Installation
153
+ ```
154
+ npm install
155
+ ```
156
+
157
+ ## Start Server
158
+ ```
159
+ npm start
160
+ ```
161
+
162
+ ## Visit Server
163
+ ```
164
+ http://localhost:8080
165
+ ```
166
+
167
+ pay attention to the real port in your terminal.maybe it won`t be 8080.
168
+
169
+ # Config
170
+ ## How to modify the request URL
171
+
172
+ - Open the file `vite.config.ts`, modify the target like:
173
+
174
+ ```
175
+ server: {
176
+ port: 8080,
177
+ proxy: {
178
+ "/solve": {
179
+ target: "{HOST}:{PORT}",
180
+ changeOrigin: true,
181
+ }
182
+ }
183
+ }
184
+ ```
frontend/React/README_zh-CN.md ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Notice
2
+ 问题回答过程中离开页面后再回到页面,会导致sse重连!
3
+ # 开始
4
+ ## 请使用大于18.0.0的node版本
5
+ ## 准备node.js开发环境
6
+ Node.js 是一个基于 Chrome V8 引擎的 JavaScript 运行环境,允许你在服务器端运行 JavaScript。以下是在 Windows、Linux 和 macOS 上安装 Node.js 的详细步骤。
7
+
8
+ ### 在 Windows 上安装 Node.js
9
+ - 步骤 1: 访问 Node.js 官网
10
+
11
+ 打开浏览器,访问 [Node.js](https://nodejs.org/zh-cn/download/prebuilt-installer) 官方网站。
12
+
13
+ - 步骤 2: 下载 Node.js 安装包
14
+
15
+ 选择你需要的nodejs版本,设备的类型,点击下载,示例如下图:
16
+ ![windows install](./windows-.png)
17
+
18
+ - 步骤 3: 安装 Node.js
19
+
20
+ 双击下载的安装包开始安装。
21
+
22
+ 跟随安装向导的指示进行安装。在安装过程中,你可以选择安装位置、是否将 Node.js 添加到系统 PATH 环境变量等选项。推荐选择“添加到 PATH”以便在任何地方都能通过命令行访问 Node.js。
23
+ 安装完成后,点击“Finish”结束安装。
24
+
25
+ - 步骤 4: 验证安装
26
+
27
+ 打开命令提示符(CMD)或 PowerShell。
28
+ 输入 node -v 并回车,如果系统返回了 Node.js 的版本号,说明安装成功。
29
+ 接着,输入 npm -v 并回车,npm 是 Node.js 的包管理器,如果返回了版本号,表示 npm 也已正确安装。
30
+
31
+ ### 在 Linux 上安装 Node.js
32
+ 注意: 由于 Linux 发行版众多,以下以 Ubuntu 为例说明,其他发行版(如 CentOS、Debian 等)的安装方式可能略有不同,可自行查询对应的安装办法。
33
+
34
+ - 步骤 1: 更新你的包管理器
35
+
36
+ 打开终端。
37
+
38
+ 输入 sudo apt update 并回车,以更新 Ubuntu 的包索引。
39
+
40
+ - 步骤 2: 安装 Node.js
41
+
42
+ 对于 Ubuntu 18.04 及更高版本,Node.js 可以直接从 Ubuntu 的仓库中安装。
43
+ 输入 sudo apt install nodejs npm 并回车。
44
+ 对于旧版本的 Ubuntu 或需要安装特定版本的 Node.js,你可能需要使用如 NodeSource 这样的第三方仓库。
45
+
46
+ - 步骤 3: 验证安装
47
+
48
+ 在终端中,输入 node -v 和 npm -v 来验证 Node.js 和 npm 是否已正确安装。
49
+
50
+ ### 在 macOS 上安装 Node.js
51
+
52
+ #### 下载安装
53
+ - 步骤 1: 访问 Node.js 官网
54
+
55
+ 打开浏览器,访问 Node.js 官方网站。
56
+
57
+ - 步骤 2: 下载 Node.js 安装包
58
+
59
+ 在首页找到 macOS 对应的安装包(通常是 .pkg 文件),点击下载。
60
+
61
+ - 步骤 3: 安装 Node.js
62
+
63
+ 找到下载的 .pkg 文件,双击打开。
64
+ 跟随安装向导的指示进行安装。
65
+ 安装完成后,点击“Close”结束安装。
66
+
67
+ - 步骤 4: 验证安装
68
+
69
+ 打开终端。
70
+
71
+ 输入 node -v 和 npm -v 来验证 Node.js 和 npm 是否已正确安装。
72
+
73
+ #### 使用HomeBrew安装
74
+ 前提条件:确保你的macOS上已经安装了Homebrew。如果尚未安装,可以通过以下命令进行安装(以终端操作为例):
75
+ ```
76
+ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
77
+ ```
78
+ 按照提示输入密码以确认安装。安装过程中,可能需要你同意许可协议等。
79
+
80
+ - 打开终端:
81
+ 在macOS上找到并打开“终端”应用程序。
82
+
83
+ - 使用Homebrew安装Node.js:
84
+ 在终端中输入以下命令来安装最新版本的Node.js
85
+ ```
86
+ brew install node
87
+ ```
88
+ Homebrew会自动下载Node.js的安装包,并处理相关的依赖项和安装过程。你需要等待一段时间,直到安装完成。
89
+
90
+ - 验证安装:
91
+ 安装完成后,你可以通过输入以下命令来验证Node.js是否成功安装:
92
+ ```
93
+ node -v
94
+ ```
95
+ 如果终端输出了Node.js的版本号,那么表示安装成功。同时,你也可以通过输入npm -v来验证npm(Node.js的包管理器)是否也成功安装。
96
+
97
+ 完成以上步骤后,你应该能在你的 Windows、Linux 或 macOS 系统上成功安装并运行 Node.js。
98
+
99
+ ### 更多
100
+ 如需了解更多,可参照:https://nodejs.org/en
101
+
102
+ 如环境已经准备好,跳转下一步
103
+
104
+ ## 安装依赖
105
+ 进入前端项目根目录
106
+ ```
107
+ npm install
108
+ ```
109
+
110
+ ## 启动
111
+ ```
112
+ npm start
113
+ ```
114
+
115
+ 启动成功后,界面将出现可访问的本地url
116
+
117
+ ## 配置
118
+ ### 接口请求配置
119
+
120
+ - 在vite.config.ts中配置proxy,示例如下:
121
+
122
+ ```
123
+ server: {
124
+ port: 8080,
125
+ proxy: {
126
+ "/solve": {
127
+ target: "{HOST}:{PORT}",
128
+ changeOrigin: true,
129
+ }
130
+ }
131
+ }
132
+ ```
133
+
134
+ ## 知悉
135
+ - 前端服务基于react开发,如需了解react相关知识,可参考:https://react.dev/
frontend/React/index.html ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!doctype html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <link rel="icon" type="image/svg+xml" href="" />
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
+ <title></title>
8
+ </head>
9
+
10
+ <body>
11
+ <div id="root"></div>
12
+ <script type="module" src="/src/index.tsx"></script>
13
+ </body>
14
+ </html>
frontend/React/package-lock.json ADDED
The diff for this file is too large to render. See raw diff
 
frontend/React/package.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "test-react-flow",
3
+ "private": true,
4
+ "version": "0.0.0",
5
+ "type": "module",
6
+ "scripts": {
7
+ "start": "vite --host --mode dev",
8
+ "start:dev": "vite --host --mode dev",
9
+ "start:staging": "vite --host --mode staging",
10
+ "start:prod": "vite --host --mode production",
11
+ "build": "tsc && vite build",
12
+ "build:dev": "tsc && vite build --mode dev",
13
+ "build:staging": "tsc && vite build --mode staging",
14
+ "build:prod": "tsc && vite build --mode production",
15
+ "preview": "vite preview",
16
+ "prettier": "prettier --write ."
17
+ },
18
+ "devDependencies": {
19
+ "@babel/plugin-proposal-optional-chaining": "^7.21.0",
20
+ "@types/classnames": "^2.3.1",
21
+ "@types/js-cookie": "^3.0.3",
22
+ "@types/node": "^18.15.11",
23
+ "@types/react": "^18.0.28",
24
+ "@types/react-dom": "^18.0.11",
25
+ "@vitejs/plugin-legacy": "^4.0.2",
26
+ "@vitejs/plugin-react": "^3.1.0",
27
+ "husky": "^9.0.11",
28
+ "less": "^4.1.3",
29
+ "lint-staged": "^15.2.7",
30
+ "prettier": "^3.0.0",
31
+ "react": "^18.2.0",
32
+ "react-dom": "^18.2.0",
33
+ "terser": "^5.16.9",
34
+ "typescript": "^4.9.3",
35
+ "vite": "^4.2.1",
36
+ "vite-babel-plugin": "^0.0.2"
37
+ },
38
+ "dependencies": {
39
+ "@antv/x6": "^2.18.1",
40
+ "@microsoft/fetch-event-source": "^2.0.1",
41
+ "antd": "^5.18.3",
42
+ "axios": "^1.3.5",
43
+ "classnames": "^2.5.1",
44
+ "elkjs": "^0.9.3",
45
+ "js-cookie": "^3.0.1",
46
+ "react-markdown": "^9.0.1",
47
+ "react-router": "^6.11.2",
48
+ "react-router-dom": "^6.11.2",
49
+ "reactflow": "^11.11.3",
50
+ "rehype-raw": "^7.0.0"
51
+ },
52
+ "lint-staged": {
53
+ "**/*.{ts, tsx, less, module.less, json, md, .html}": "prettier --write ."
54
+ }
55
+ }
frontend/React/src/App.module.less ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .app {
2
+ height: 100%;
3
+ display: flex;
4
+ justify-content: space-between;
5
+ background: url(./assets/background.png) rgb(247, 248, 255);
6
+ background-size: cover;
7
+ overflow: hidden;
8
+ }
9
+
10
+ .content {
11
+ padding: 64px 0 16px 0;
12
+ width: 100%;
13
+ height: 100%;
14
+ box-sizing: border-box;
15
+ }
16
+
17
+ .header {
18
+ position: fixed;
19
+ padding: 16px 32px;
20
+ width: 100%;
21
+ display: flex;
22
+ align-items: center;
23
+ box-sizing: border-box;
24
+
25
+ &-nav {
26
+ flex: 1;
27
+
28
+ img {
29
+ height: 40px;
30
+ }
31
+
32
+ a {
33
+ display: inline-block;
34
+ text-decoration: none;
35
+ color: black;
36
+
37
+ &:not(:first-of-type) {
38
+ margin-left: 40px;
39
+ }
40
+
41
+ &.active {
42
+ font-weight: bold;
43
+ }
44
+ }
45
+ }
46
+
47
+ &-opt {
48
+ flex-shrink: 0;
49
+ display: flex;
50
+ align-items: center;
51
+ }
52
+ }
frontend/React/src/App.tsx ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import style from "./App.module.less";
2
+
3
+ import { BrowserRouter } from "react-router-dom";
4
+ import RouterRoutes from "@/routes/routes";
5
+ import Logo from "@/assets/logo.svg";
6
+
7
+ function App() {
8
+ return (
9
+ <BrowserRouter>
10
+ <div className={style.app} id="app">
11
+ <div className={style.header}>
12
+ <div className={style.headerNav}>
13
+ <img src={Logo} />
14
+ </div>
15
+ </div>
16
+
17
+ <div className={style.content}>
18
+ <RouterRoutes />
19
+ </div>
20
+ </div>
21
+ </BrowserRouter>
22
+ );
23
+ }
24
+
25
+ export default App;
frontend/React/src/assets/background.png ADDED

Git LFS Details

  • SHA256: 95159880fac7f096323aac3e3d34a2c3aacb144dc01ad9bd0712fe88b06ec043
  • Pointer size: 130 Bytes
  • Size of remote file: 36.9 kB
frontend/React/src/assets/fold-icon.svg ADDED
frontend/React/src/assets/logo.svg ADDED
frontend/React/src/assets/pack-up.svg ADDED
frontend/React/src/assets/sendIcon.svg ADDED
frontend/React/src/assets/show-right-icon.png ADDED

Git LFS Details

  • SHA256: 12c14da51cd99c6b8e1a99d8f299c64a918e521f0b3749b2972608988e028785
  • Pointer size: 129 Bytes
  • Size of remote file: 8.87 kB
frontend/React/src/assets/unflod-icon.svg ADDED
frontend/React/src/global.d.ts ADDED
@@ -0,0 +1 @@
 
 
1
+ declare module 'event-source-polyfill';