github-actions[bot] commited on
Commit
ed8833e
Β·
0 Parent(s):

Sync from https://github.com/felladrin/MiniSearch

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. .dockerignore +25 -0
  2. .editorconfig +7 -0
  3. .env.example +33 -0
  4. .github/hf-space-config.yml +11 -0
  5. .github/workflows/ai-review.yml +138 -0
  6. .github/workflows/deploy-to-hugging-face.yml +20 -0
  7. .github/workflows/on-pull-request-to-main.yml +9 -0
  8. .github/workflows/on-push-to-main.yml +7 -0
  9. .github/workflows/publish-docker-image.yml +39 -0
  10. .github/workflows/reusable-test-lint-ping.yml +26 -0
  11. .gitignore +7 -0
  12. .husky/pre-commit +1 -0
  13. .npmrc +1 -0
  14. Dockerfile +95 -0
  15. README.md +127 -0
  16. biome.json +34 -0
  17. client/components/AiResponse/AiModelDownloadAllowanceContent.tsx +62 -0
  18. client/components/AiResponse/AiResponseContent.tsx +214 -0
  19. client/components/AiResponse/AiResponseSection.tsx +88 -0
  20. client/components/AiResponse/ChatHeader.tsx +33 -0
  21. client/components/AiResponse/ChatInputArea.tsx +89 -0
  22. client/components/AiResponse/ChatInterface.tsx +236 -0
  23. client/components/AiResponse/CopyIconButton.tsx +32 -0
  24. client/components/AiResponse/EnableAiResponsePrompt.tsx +80 -0
  25. client/components/AiResponse/ExpandableLink.tsx +123 -0
  26. client/components/AiResponse/FormattedMarkdown.tsx +41 -0
  27. client/components/AiResponse/LoadingModelContent.tsx +40 -0
  28. client/components/AiResponse/MarkdownRenderer.tsx +93 -0
  29. client/components/AiResponse/MessageList.tsx +59 -0
  30. client/components/AiResponse/PreparingContent.tsx +33 -0
  31. client/components/AiResponse/ReasoningSection.tsx +71 -0
  32. client/components/AiResponse/WebLlmModelSelect.tsx +81 -0
  33. client/components/AiResponse/WllamaModelSelect.tsx +42 -0
  34. client/components/AiResponse/hooks/useReasoningContent.ts +42 -0
  35. client/components/App/App.tsx +100 -0
  36. client/components/Logs/LogsModal.tsx +136 -0
  37. client/components/Logs/ShowLogsButton.tsx +42 -0
  38. client/components/Pages/AccessPage.tsx +70 -0
  39. client/components/Pages/Main/MainPage.tsx +81 -0
  40. client/components/Pages/Main/Menu/AISettings/AISettingsForm.tsx +208 -0
  41. client/components/Pages/Main/Menu/AISettings/components/AIParameterSlider.tsx +17 -0
  42. client/components/Pages/Main/Menu/AISettings/components/BrowserSettings.tsx +60 -0
  43. client/components/Pages/Main/Menu/AISettings/components/HordeSettings.tsx +44 -0
  44. client/components/Pages/Main/Menu/AISettings/components/OpenAISettings.tsx +57 -0
  45. client/components/Pages/Main/Menu/AISettings/components/SystemPromptInput.tsx +102 -0
  46. client/components/Pages/Main/Menu/AISettings/hooks/useHordeModels.ts +35 -0
  47. client/components/Pages/Main/Menu/AISettings/hooks/useHordeUserInfo.ts +43 -0
  48. client/components/Pages/Main/Menu/AISettings/hooks/useOpenAiModels.ts +56 -0
  49. client/components/Pages/Main/Menu/AISettings/types.ts +15 -0
  50. client/components/Pages/Main/Menu/ActionsForm.tsx +12 -0
.dockerignore ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Logs
2
+ logs
3
+ *.log
4
+ npm-debug.log*
5
+ yarn-debug.log*
6
+ yarn-error.log*
7
+ pnpm-debug.log*
8
+ lerna-debug.log*
9
+
10
+ node_modules
11
+ dist
12
+ dist-ssr
13
+ *.local
14
+
15
+ # Editor directories and files
16
+ .vscode/*
17
+ !.vscode/extensions.json
18
+ .idea
19
+ .DS_Store
20
+ *.suo
21
+ *.ntvs*
22
+ *.njsproj
23
+ *.sln
24
+ *.sw?
25
+
.editorconfig ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ [*]
2
+ charset = utf-8
3
+ insert_final_newline = true
4
+ end_of_line = lf
5
+ indent_style = space
6
+ indent_size = 2
7
+ max_line_length = 80
.env.example ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A comma-separated list of access keys. Example: `ACCESS_KEYS="ABC123,JUD71F,HUWE3"`. Leave blank for unrestricted access.
2
+ ACCESS_KEYS=""
3
+
4
+ # The timeout in hours for access key validation. Set to 0 to require validation on every page load.
5
+ ACCESS_KEY_TIMEOUT_HOURS="24"
6
+
7
+ # The default model ID for WebLLM with F16 shaders.
8
+ WEBLLM_DEFAULT_F16_MODEL_ID="Qwen3-1.7B-q4f16_1-MLC"
9
+
10
+ # The default model ID for WebLLM with F32 shaders.
11
+ WEBLLM_DEFAULT_F32_MODEL_ID="Qwen3-1.7B-q4f32_1-MLC"
12
+
13
+ # The default model ID for Wllama.
14
+ WLLAMA_DEFAULT_MODEL_ID="qwen-3-0.6b"
15
+
16
+ # The base URL for the internal OpenAI compatible API. Example: `INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL="https://api.openai.com/v1"`. Leave blank to disable internal OpenAI compatible API.
17
+ INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL=""
18
+
19
+ # The access key for the internal OpenAI compatible API.
20
+ INTERNAL_OPENAI_COMPATIBLE_API_KEY=""
21
+
22
+ # The model for the internal OpenAI compatible API.
23
+ INTERNAL_OPENAI_COMPATIBLE_API_MODEL=""
24
+
25
+ # The name of the internal OpenAI compatible API, displayed in the UI.
26
+ INTERNAL_OPENAI_COMPATIBLE_API_NAME="Internal API"
27
+
28
+ # The type of inference to use by default. The possible values are:
29
+ # "browser" -> In the browser (Private)
30
+ # "openai" -> Remote Server (API)
31
+ # "horde" -> AI Horde (Pre-configured)
32
+ # "internal" -> $INTERNAL_OPENAI_COMPATIBLE_API_NAME
33
+ DEFAULT_INFERENCE_TYPE="browser"
.github/hf-space-config.yml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ title: MiniSearch
2
+ emoji: πŸ‘ŒπŸ”
3
+ colorFrom: yellow
4
+ colorTo: yellow
5
+ sdk: docker
6
+ short_description: Minimalist web-searching app with browser-based AI assistant
7
+ pinned: true
8
+ custom_headers:
9
+ cross-origin-embedder-policy: require-corp
10
+ cross-origin-opener-policy: same-origin
11
+ cross-origin-resource-policy: cross-origin
.github/workflows/ai-review.yml ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Review Pull Request with AI
2
+
3
+ on:
4
+ pull_request:
5
+ types: [opened, synchronize, reopened]
6
+ branches: ["main"]
7
+
8
+ concurrency:
9
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
10
+ cancel-in-progress: true
11
+
12
+ jobs:
13
+ ai-review:
14
+ if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip-ai-review') }}
15
+ continue-on-error: true
16
+ runs-on: ubuntu-latest
17
+ name: AI Review
18
+ permissions:
19
+ pull-requests: write
20
+ contents: read
21
+ timeout-minutes: 30
22
+ steps:
23
+ - name: Checkout Repository
24
+ uses: actions/checkout@v4
25
+
26
+ - name: Create temporary directory
27
+ run: mkdir -p /tmp/pr_review
28
+
29
+ - name: Process PR description
30
+ id: process_pr
31
+ run: |
32
+ PR_BODY_ESCAPED=$(cat << 'EOF'
33
+ ${{ github.event.pull_request.body }}
34
+ EOF
35
+ )
36
+ PROCESSED_BODY=$(echo "$PR_BODY_ESCAPED" | sed -E 's/\[(.*?)\]\(.*?\)/\1/g')
37
+ echo "$PROCESSED_BODY" > /tmp/pr_review/processed_body.txt
38
+
39
+ - name: Fetch branches and output the diff
40
+ run: |
41
+ git fetch origin main:main
42
+ git fetch origin pull/${{ github.event.pull_request.number }}/head:pr-branch
43
+ git diff main..pr-branch > /tmp/pr_review/diff.txt
44
+
45
+ - name: Prepare review request
46
+ id: prepare_request
47
+ run: |
48
+ PR_TITLE=$(echo "${{ github.event.pull_request.title }}" | sed 's/[()]/\\&/g')
49
+ DIFF_CONTENT=$(cat /tmp/pr_review/diff.txt)
50
+ PROCESSED_BODY=$(cat /tmp/pr_review/processed_body.txt)
51
+
52
+ jq -n \
53
+ --arg model "${{ vars.OPENAI_COMPATIBLE_API_MODEL }}" \
54
+ --arg http_referer "${{ github.event.repository.html_url }}" \
55
+ --arg title "${{ github.event.repository.name }}" \
56
+ --arg system "You are an experienced developer reviewing a Pull Request. You focus only on what matters and provide concise, actionable feedback.
57
+
58
+ Review Context:
59
+ Repository Name: \"${{ github.event.repository.name }}\"
60
+ Repository Description: \"${{ github.event.repository.description }}\"
61
+ Branch: \"${{ github.event.pull_request.head.ref }}\"
62
+ PR Title: \"$PR_TITLE\"
63
+
64
+ Guidelines:
65
+ 1. Only comment on issues that:
66
+ - Could cause bugs or security issues
67
+ - Significantly impact performance
68
+ - Make the code harder to maintain
69
+ - Violate critical best practices
70
+
71
+ 2. For each issue:
72
+ - Point to the specific line/file
73
+ - Explain why it's a problem
74
+ - Suggest a concrete fix
75
+
76
+ 3. Praise exceptional solutions briefly, only if truly innovative
77
+
78
+ 4. Skip commenting on:
79
+ - Minor style issues
80
+ - Obvious changes
81
+ - Working code that could be marginally improved
82
+ - Things that are just personal preference
83
+
84
+ Remember:
85
+ Less is more. If the code is good and working, just say so, with a short message." \
86
+ --arg user "This is the description of the pull request:
87
+ \`\`\`markdown
88
+ $PROCESSED_BODY
89
+ \`\`\`
90
+
91
+ And here is the diff of the changes, for you to review:
92
+ \`\`\`diff
93
+ $DIFF_CONTENT
94
+ \`\`\`" \
95
+ '{
96
+ "model": $model,
97
+ "messages": [
98
+ {"role": "system", "content": $system},
99
+ {"role": "user", "content": $user}
100
+ ],
101
+ "temperature": 0.7,
102
+ "top_p": 0.9
103
+ }' > /tmp/pr_review/request.json
104
+ - name: Get AI Review
105
+ id: ai_review
106
+ run: |
107
+ RESPONSE=$(curl -s ${{ vars.OPENAI_COMPATIBLE_API_BASE_URL }}/chat/completions \
108
+ -H "Content-Type: application/json" \
109
+ -H "Authorization: Bearer ${{ secrets.OPENAI_COMPATIBLE_API_KEY }}" \
110
+ -d @/tmp/pr_review/request.json)
111
+
112
+ # Check for errors in the response
113
+ if echo "$RESPONSE" | jq -e '.object == "error"' > /dev/null; then
114
+ echo "Error from API:" >&2
115
+ ERROR_MSG=$(echo "$RESPONSE" | jq -r '.message.detail[0].msg // .message')
116
+ echo "$ERROR_MSG" >&2
117
+ exit 1
118
+ fi
119
+
120
+ echo "### Review" > /tmp/pr_review/response.txt
121
+ echo "" >> /tmp/pr_review/response.txt
122
+ echo "$RESPONSE" | jq -r '.choices[0].message.content' >> /tmp/pr_review/response.txt
123
+
124
+ - name: Find Comment
125
+ uses: peter-evans/find-comment@v3
126
+ id: find_comment
127
+ with:
128
+ issue-number: ${{ github.event.pull_request.number }}
129
+ comment-author: "github-actions[bot]"
130
+ body-includes: "### Review"
131
+
132
+ - name: Post or Update PR Review
133
+ uses: peter-evans/create-or-update-comment@v4
134
+ with:
135
+ comment-id: ${{ steps.find_comment.outputs.comment-id }}
136
+ issue-number: ${{ github.event.pull_request.number }}
137
+ body-path: /tmp/pr_review/response.txt
138
+ edit-mode: replace
.github/workflows/deploy-to-hugging-face.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Deploy to Hugging Face
2
+
3
+ on:
4
+ workflow_dispatch:
5
+
6
+ jobs:
7
+ sync-to-hf:
8
+ name: Sync to Hugging Face Spaces
9
+ runs-on: ubuntu-latest
10
+ steps:
11
+ - uses: actions/checkout@v4
12
+ with:
13
+ lfs: true
14
+ - uses: JacobLinCool/huggingface-sync@v1
15
+ with:
16
+ github: ${{ secrets.GITHUB_TOKEN }}
17
+ user: ${{ vars.HF_SPACE_OWNER }}
18
+ space: ${{ vars.HF_SPACE_NAME }}
19
+ token: ${{ secrets.HF_TOKEN }}
20
+ configuration: ".github/hf-space-config.yml"
.github/workflows/on-pull-request-to-main.yml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ name: On Pull Request To Main
2
+ on:
3
+ pull_request:
4
+ types: [opened, synchronize, reopened]
5
+ branches: ["main"]
6
+ jobs:
7
+ test-lint-ping:
8
+ if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip-test-lint-ping') }}
9
+ uses: ./.github/workflows/reusable-test-lint-ping.yml
.github/workflows/on-push-to-main.yml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ name: On Push To Main
2
+ on:
3
+ push:
4
+ branches: ["main"]
5
+ jobs:
6
+ test-lint-ping:
7
+ uses: ./.github/workflows/reusable-test-lint-ping.yml
.github/workflows/publish-docker-image.yml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Publish Docker Image
2
+
3
+ on:
4
+ workflow_dispatch:
5
+
6
+ jobs:
7
+ build-and-push-image:
8
+ name: Publish Docker Image to GitHub Packages
9
+ runs-on: ubuntu-latest
10
+ env:
11
+ REGISTRY: ghcr.io
12
+ IMAGE_NAME: ${{ github.repository }}
13
+ permissions:
14
+ contents: read
15
+ packages: write
16
+ steps:
17
+ - name: Checkout repository
18
+ uses: actions/checkout@v4
19
+ - name: Log in to the Container registry
20
+ uses: docker/login-action@v3
21
+ with:
22
+ registry: ${{ env.REGISTRY }}
23
+ username: ${{ github.actor }}
24
+ password: ${{ secrets.GITHUB_TOKEN }}
25
+ - name: Extract metadata (tags, labels) for Docker
26
+ id: meta
27
+ uses: docker/metadata-action@v5
28
+ with:
29
+ images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
30
+ - name: Set up Docker Buildx
31
+ uses: docker/setup-buildx-action@v3
32
+ - name: Build and push Docker Image
33
+ uses: docker/build-push-action@v6
34
+ with:
35
+ context: .
36
+ push: true
37
+ tags: ${{ steps.meta.outputs.tags }}
38
+ labels: ${{ steps.meta.outputs.labels }}
39
+ platforms: linux/amd64,linux/arm64
.github/workflows/reusable-test-lint-ping.yml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ on:
2
+ workflow_call:
3
+ jobs:
4
+ check-code-quality:
5
+ name: Check Code Quality
6
+ runs-on: ubuntu-latest
7
+ steps:
8
+ - uses: actions/checkout@v4
9
+ - uses: actions/setup-node@v4
10
+ with:
11
+ node-version: "lts/*"
12
+ cache: "npm"
13
+ - run: npm ci --ignore-scripts
14
+ - run: npm test
15
+ - run: npm run lint
16
+ check-docker-container:
17
+ needs: [check-code-quality]
18
+ name: Check Docker Container
19
+ runs-on: ubuntu-latest
20
+ steps:
21
+ - uses: actions/checkout@v4
22
+ - run: docker compose -f docker-compose.production.yml up -d
23
+ - name: Check if main page is available
24
+ run: until curl -s -o /dev/null -w "%{http_code}" localhost:7860 | grep 200; do sleep 1; done
25
+ timeout-minutes: 1
26
+ - run: docker compose -f docker-compose.production.yml down
.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ node_modules
2
+ .DS_Store
3
+ /client/dist
4
+ /server/models
5
+ .vscode
6
+ /vite-build-stats.html
7
+ .env
.husky/pre-commit ADDED
@@ -0,0 +1 @@
 
 
1
+ npx lint-staged
.npmrc ADDED
@@ -0,0 +1 @@
 
 
1
+ legacy-peer-deps = true
Dockerfile ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM node:lts AS llama-builder
2
+
3
+ ARG LLAMA_CPP_RELEASE_TAG="b5595"
4
+
5
+ RUN apt-get update && apt-get install -y \
6
+ build-essential \
7
+ cmake \
8
+ ccache \
9
+ git \
10
+ curl
11
+
12
+ RUN cd /tmp && \
13
+ git clone https://github.com/ggerganov/llama.cpp.git && \
14
+ cd llama.cpp && \
15
+ git checkout $LLAMA_CPP_RELEASE_TAG && \
16
+ cmake -B build -DGGML_NATIVE=OFF -DLLAMA_CURL=OFF && \
17
+ cmake --build build --config Release -j --target llama-server && \
18
+ mkdir -p /usr/local/lib/llama && \
19
+ find build -type f \( -name "libllama.so" -o -name "libmtmd.so" -o -name "libggml.so" -o -name "libggml-base.so" -o -name "libggml-cpu.so" \) -exec cp {} /usr/local/lib/llama/ \;
20
+
21
+ FROM node:lts
22
+
23
+ ENV PORT=7860
24
+ EXPOSE $PORT
25
+
26
+ ARG USERNAME=node
27
+ ARG HOME_DIR=/home/${USERNAME}
28
+ ARG APP_DIR=${HOME_DIR}/app
29
+
30
+ RUN apt-get update && \
31
+ apt-get install -y --no-install-recommends \
32
+ python3 \
33
+ python3-venv && \
34
+ apt-get clean && \
35
+ rm -rf /var/lib/apt/lists/*
36
+
37
+ RUN mkdir -p /usr/local/searxng /etc/searxng && \
38
+ chown -R ${USERNAME}:${USERNAME} /usr/local/searxng /etc/searxng && \
39
+ chmod 755 /etc/searxng
40
+
41
+ WORKDIR /usr/local/searxng
42
+ RUN python3 -m venv searxng-venv && \
43
+ chown -R ${USERNAME}:${USERNAME} /usr/local/searxng/searxng-venv && \
44
+ /usr/local/searxng/searxng-venv/bin/pip install --upgrade pip && \
45
+ /usr/local/searxng/searxng-venv/bin/pip install wheel setuptools pyyaml lxml
46
+
47
+ RUN git clone https://github.com/searxng/searxng.git /usr/local/searxng/searxng-src && \
48
+ chown -R ${USERNAME}:${USERNAME} /usr/local/searxng/searxng-src
49
+
50
+ ARG SEARXNG_SETTINGS_PATH="/etc/searxng/settings.yml"
51
+
52
+ WORKDIR /usr/local/searxng/searxng-src
53
+ RUN cp searx/settings.yml $SEARXNG_SETTINGS_PATH && \
54
+ chown ${USERNAME}:${USERNAME} $SEARXNG_SETTINGS_PATH && \
55
+ chmod 644 $SEARXNG_SETTINGS_PATH && \
56
+ sed -i 's/ultrasecretkey/'$(openssl rand -hex 32)'/g' $SEARXNG_SETTINGS_PATH && \
57
+ sed -i 's/- html/- json/' $SEARXNG_SETTINGS_PATH && \
58
+ /usr/local/searxng/searxng-venv/bin/pip install -e .
59
+
60
+ COPY --from=llama-builder /tmp/llama.cpp/build/bin/llama-server /usr/local/bin/
61
+ COPY --from=llama-builder /usr/local/lib/llama/* /usr/local/lib/
62
+ RUN ldconfig /usr/local/lib
63
+
64
+ USER ${USERNAME}
65
+
66
+ WORKDIR ${APP_DIR}
67
+
68
+ ARG ACCESS_KEYS
69
+ ARG ACCESS_KEY_TIMEOUT_HOURS
70
+ ARG WEBLLM_DEFAULT_F16_MODEL_ID
71
+ ARG WEBLLM_DEFAULT_F32_MODEL_ID
72
+ ARG WLLAMA_DEFAULT_MODEL_ID
73
+ ARG INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL
74
+ ARG INTERNAL_OPENAI_COMPATIBLE_API_KEY
75
+ ARG INTERNAL_OPENAI_COMPATIBLE_API_MODEL
76
+ ARG INTERNAL_OPENAI_COMPATIBLE_API_NAME
77
+ ARG DEFAULT_INFERENCE_TYPE
78
+ ARG HOST
79
+ ARG HMR_PORT
80
+ ARG ALLOWED_HOSTS
81
+
82
+ COPY --chown=${USERNAME}:${USERNAME} ./package.json ./package-lock.json ./.npmrc ./
83
+
84
+ RUN npm ci
85
+
86
+ COPY --chown=${USERNAME}:${USERNAME} . .
87
+
88
+ RUN git config --global --add safe.directory ${APP_DIR} && \
89
+ npm run build
90
+
91
+ HEALTHCHECK --interval=5m CMD curl -f http://localhost:7860/status || exit 1
92
+
93
+ ENTRYPOINT [ "/bin/sh", "-c" ]
94
+
95
+ CMD ["(cd /usr/local/searxng/searxng-src && /usr/local/searxng/searxng-venv/bin/python -m searx.webapp > /dev/null 2>&1) & (npx pm2 start ecosystem.config.cjs && npx pm2 logs)" ]
README.md ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: MiniSearch
3
+ emoji: πŸ‘ŒπŸ”
4
+ colorFrom: yellow
5
+ colorTo: yellow
6
+ sdk: docker
7
+ short_description: Minimalist web-searching app with browser-based AI assistant
8
+ pinned: true
9
+ custom_headers:
10
+ cross-origin-embedder-policy: require-corp
11
+ cross-origin-opener-policy: same-origin
12
+ cross-origin-resource-policy: cross-origin
13
+ ---
14
+
15
+ # MiniSearch
16
+
17
+ A minimalist web-searching app with an AI assistant that runs directly from your browser.
18
+
19
+ Live demo: https://felladrin-minisearch.hf.space
20
+
21
+ ## Screenshot
22
+
23
+ ![MiniSearch Screenshot](https://github.com/user-attachments/assets/f8d72a8e-a725-42e9-9358-e6ebade2acb2)
24
+
25
+ ## Features
26
+
27
+ - **Privacy-focused**: [No tracking, no ads, no data collection](https://docs.searxng.org/own-instance.html#how-does-searxng-protect-privacy)
28
+ - **Easy to use**: Minimalist yet intuitive interface for all users
29
+ - **Cross-platform**: Models run inside the browser, both on desktop and mobile
30
+ - **Integrated**: Search from the browser address bar by setting it as the default search engine
31
+ - **Efficient**: Models are loaded and cached only when needed
32
+ - **Customizable**: Tweakable settings for search results and text generation
33
+ - **Open-source**: [The code is available for inspection and contribution at GitHub](https://github.com/felladrin/MiniSearch)
34
+
35
+ ## Prerequisites
36
+
37
+ - [Docker](https://docs.docker.com/get-docker/)
38
+
39
+ ## Getting started
40
+
41
+ Here are the easiest ways to get started with MiniSearch. Pick the one that suits you best.
42
+
43
+ **Option 1** - Use [MiniSearch's Docker Image](https://github.com/felladrin/MiniSearch/pkgs/container/minisearch) by running in your terminal:
44
+
45
+ ```bash
46
+ docker run -p 7860:7860 ghcr.io/felladrin/minisearch:main
47
+ ```
48
+
49
+ **Option 2** - Add MiniSearch's Docker Image to your existing Docker Compose file:
50
+
51
+ ```yaml
52
+ services:
53
+ minisearch:
54
+ image: ghcr.io/felladrin/minisearch:main
55
+ ports:
56
+ - "7860:7860"
57
+ ```
58
+
59
+ **Option 3** - Build from source by [downloading the repository files](https://github.com/felladrin/MiniSearch/archive/refs/heads/main.zip) and running:
60
+
61
+ ```bash
62
+ docker compose -f docker-compose.production.yml up --build
63
+ ```
64
+
65
+ Once the container is running, open http://localhost:7860 in your browser and start searching!
66
+
67
+ ## Frequently asked questions
68
+
69
+ <details>
70
+ <summary>How do I search via the browser's address bar?</summary>
71
+ <p>
72
+ You can set MiniSearch as your browser's address-bar search engine using the pattern <code>http://localhost:7860/?q=%s</code>, in which your search term replaces <code>%s</code>.
73
+ </p>
74
+ </details>
75
+
76
+ <details>
77
+ <summary>How do I search via Raycast?</summary>
78
+ <p>
79
+ You can add <a href="https://ray.so/quicklinks/shared?quicklinks=%7B%22link%22:%22https:%5C/%5C/felladrin-minisearch.hf.space%5C/?q%3D%7BQuery%7D%22,%22name%22:%22MiniSearch%22%7D" target="_blank">this Quicklink</a> to Raycast, so typing your query will open MiniSearch with the search results. You can also edit it to point to your own domain.
80
+ </p>
81
+ <img width="744" alt="image" src="https://github.com/user-attachments/assets/521dca22-c77b-42de-8cc8-9feb06f9a97e">
82
+ </details>
83
+
84
+ <details>
85
+ <summary>Can I use custom models via OpenAI-Compatible API?</summary>
86
+ <p>
87
+ Yes! For this, open the Menu and change the "AI Processing Location" to <code>Remote server (API)</code>. Then configure the Base URL, and optionally set an API Key and a Model to use.
88
+ </p>
89
+ </details>
90
+
91
+ <details>
92
+ <summary>How do I restrict the access to my MiniSearch instance via password?</summary>
93
+ <p>
94
+ Create a <code>.env</code> file and set a value for <code>ACCESS_KEYS</code>. Then reset the MiniSearch docker container.
95
+ </p>
96
+ <p>
97
+ For example, if you to set the password to <code>PepperoniPizza</code>, then this is what you should add to your <code>.env</code>:<br/>
98
+ <code>ACCESS_KEYS="PepperoniPizza"</code>
99
+ </p>
100
+ <p>
101
+ You can find more examples in the <code>.env.example</code> file.
102
+ </p>
103
+ </details>
104
+
105
+ <details>
106
+ <summary>I want to serve MiniSearch to other users, allowing them to use my own OpenAI-Compatible API key, but without revealing it to them. Is it possible?</summary>
107
+ <p>Yes! In MiniSearch, we call this text-generation feature "Internal OpenAI-Compatible API". To use this it:</p>
108
+ <ol>
109
+ <li>Set up your OpenAI-Compatible API endpoint by configuring the following environment variables in your <code>.env</code> file:
110
+ <ul>
111
+ <li><code>INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL</code>: The base URL for your API</li>
112
+ <li><code>INTERNAL_OPENAI_COMPATIBLE_API_KEY</code>: Your API access key</li>
113
+ <li><code>INTERNAL_OPENAI_COMPATIBLE_API_MODEL</code>: The model to use</li>
114
+ <li><code>INTERNAL_OPENAI_COMPATIBLE_API_NAME</code>: The name to display in the UI</li>
115
+ </ul>
116
+ </li>
117
+ <li>Restart MiniSearch server.</li>
118
+ <li>In the MiniSearch menu, select the new option (named as per your <code>INTERNAL_OPENAI_COMPATIBLE_API_NAME</code> setting) from the "AI Processing Location" dropdown.</li>
119
+ </ol>
120
+ </details>
121
+
122
+ <details>
123
+ <summary>How can I contribute to the development of this tool?</summary>
124
+ <p>Fork this repository and clone it. Then, start the development server by running the following command:</p>
125
+ <p><code>docker compose up</code></p>
126
+ <p>Make your changes, push them to your fork, and open a pull request! All contributions are welcome!</p>
127
+ </details>
biome.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "$schema": "https://biomejs.dev/schemas/latest/schema.json",
3
+ "vcs": {
4
+ "enabled": false,
5
+ "clientKind": "git",
6
+ "useIgnoreFile": false
7
+ },
8
+ "files": {
9
+ "ignoreUnknown": false
10
+ },
11
+ "formatter": {
12
+ "enabled": true,
13
+ "indentStyle": "space"
14
+ },
15
+ "linter": {
16
+ "enabled": true,
17
+ "rules": {
18
+ "recommended": true
19
+ }
20
+ },
21
+ "javascript": {
22
+ "formatter": {
23
+ "quoteStyle": "double"
24
+ }
25
+ },
26
+ "assist": {
27
+ "enabled": true,
28
+ "actions": {
29
+ "source": {
30
+ "organizeImports": "on"
31
+ }
32
+ }
33
+ }
34
+ }
client/components/AiResponse/AiModelDownloadAllowanceContent.tsx ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Alert, Button, Group, Text } from "@mantine/core";
2
+ import { IconCheck, IconInfoCircle, IconX } from "@tabler/icons-react";
3
+ import { usePubSub } from "create-pubsub/react";
4
+ import { useState } from "react";
5
+ import { addLogEntry } from "../../modules/logEntries";
6
+ import { settingsPubSub } from "../../modules/pubSub";
7
+
8
+ export default function AiModelDownloadAllowanceContent() {
9
+ const [settings, setSettings] = usePubSub(settingsPubSub);
10
+ const [hasDeniedDownload, setDeniedDownload] = useState(false);
11
+
12
+ const handleAccept = () => {
13
+ setSettings({
14
+ ...settings,
15
+ allowAiModelDownload: true,
16
+ });
17
+ addLogEntry("User allowed the AI model download");
18
+ };
19
+
20
+ const handleDecline = () => {
21
+ setDeniedDownload(true);
22
+ addLogEntry("User denied the AI model download");
23
+ };
24
+
25
+ return hasDeniedDownload ? null : (
26
+ <Alert
27
+ variant="light"
28
+ color="blue"
29
+ title="Allow AI model download?"
30
+ icon={<IconInfoCircle />}
31
+ >
32
+ <Text size="sm" mb="md">
33
+ To obtain AI responses, a language model needs to be downloaded to your
34
+ browser. Enabling this option lets the app store it and load it
35
+ instantly on subsequent uses.
36
+ </Text>
37
+ <Text size="sm" mb="md">
38
+ Please note that the download size ranges from 100 MB to 4 GB, depending
39
+ on the model you select in the Menu, so it's best to avoid using mobile
40
+ data for this.
41
+ </Text>
42
+ <Group justify="flex-end" mt="md">
43
+ <Button
44
+ variant="subtle"
45
+ color="gray"
46
+ leftSection={<IconX size="1rem" />}
47
+ onClick={handleDecline}
48
+ size="xs"
49
+ >
50
+ Not now
51
+ </Button>
52
+ <Button
53
+ leftSection={<IconCheck size="1rem" />}
54
+ onClick={handleAccept}
55
+ size="xs"
56
+ >
57
+ Allow download
58
+ </Button>
59
+ </Group>
60
+ </Alert>
61
+ );
62
+ }
client/components/AiResponse/AiResponseContent.tsx ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ ActionIcon,
3
+ Alert,
4
+ Badge,
5
+ Box,
6
+ Card,
7
+ Group,
8
+ ScrollArea,
9
+ Text,
10
+ Tooltip,
11
+ } from "@mantine/core";
12
+ import {
13
+ IconArrowsMaximize,
14
+ IconArrowsMinimize,
15
+ IconHandStop,
16
+ IconInfoCircle,
17
+ IconRefresh,
18
+ IconVolume2,
19
+ } from "@tabler/icons-react";
20
+ import type { PublishFunction } from "create-pubsub";
21
+ import { usePubSub } from "create-pubsub/react";
22
+ import { type ReactNode, useMemo, useState } from "react";
23
+ import { addLogEntry } from "../../modules/logEntries";
24
+ import { settingsPubSub } from "../../modules/pubSub";
25
+ import { searchAndRespond } from "../../modules/textGeneration";
26
+ import CopyIconButton from "./CopyIconButton";
27
+ import FormattedMarkdown from "./FormattedMarkdown";
28
+
29
+ export default function AiResponseContent({
30
+ textGenerationState,
31
+ response,
32
+ setTextGenerationState,
33
+ }: {
34
+ textGenerationState: string;
35
+ response: string;
36
+ setTextGenerationState: PublishFunction<
37
+ | "failed"
38
+ | "awaitingSearchResults"
39
+ | "preparingToGenerate"
40
+ | "idle"
41
+ | "loadingModel"
42
+ | "generating"
43
+ | "interrupted"
44
+ | "completed"
45
+ >;
46
+ }) {
47
+ const [settings, setSettings] = usePubSub(settingsPubSub);
48
+ const [isSpeaking, setIsSpeaking] = useState(false);
49
+
50
+ const ConditionalScrollArea = useMemo(
51
+ () =>
52
+ ({ children }: { children: ReactNode }) => {
53
+ return settings.enableAiResponseScrolling ? (
54
+ <ScrollArea.Autosize mah={300} type="auto" offsetScrollbars>
55
+ {children}
56
+ </ScrollArea.Autosize>
57
+ ) : (
58
+ <Box>{children}</Box>
59
+ );
60
+ },
61
+ [settings.enableAiResponseScrolling],
62
+ );
63
+
64
+ function speakResponse(text: string) {
65
+ if (isSpeaking) {
66
+ self.speechSynthesis.cancel();
67
+ setIsSpeaking(false);
68
+ return;
69
+ }
70
+
71
+ const prepareTextForSpeech = (textToClean: string) => {
72
+ const withoutReasoning = textToClean.replace(
73
+ new RegExp(
74
+ `${settings.reasoningStartMarker}[\\s\\S]*?${settings.reasoningEndMarker}`,
75
+ "g",
76
+ ),
77
+ "",
78
+ );
79
+ const withoutLinks = withoutReasoning.replace(
80
+ /\[([^\]]+)\]\([^)]+\)/g,
81
+ "($1)",
82
+ );
83
+ const withoutMarkdown = withoutLinks.replace(/[#*`_~[\]]/g, "");
84
+ return withoutMarkdown.trim();
85
+ };
86
+
87
+ const utterance = new SpeechSynthesisUtterance(prepareTextForSpeech(text));
88
+
89
+ const voices = self.speechSynthesis.getVoices();
90
+
91
+ if (voices.length > 0 && settings.selectedVoiceId) {
92
+ const voice = voices.find(
93
+ (voice) => voice.voiceURI === settings.selectedVoiceId,
94
+ );
95
+
96
+ if (voice) {
97
+ utterance.voice = voice;
98
+ utterance.lang = voice.lang;
99
+ }
100
+ }
101
+
102
+ utterance.onerror = () => {
103
+ addLogEntry("Failed to speak response");
104
+ setIsSpeaking(false);
105
+ };
106
+
107
+ utterance.onend = () => setIsSpeaking(false);
108
+
109
+ setIsSpeaking(true);
110
+ self.speechSynthesis.speak(utterance);
111
+ }
112
+
113
+ return (
114
+ <Card withBorder shadow="sm" radius="md">
115
+ <Card.Section withBorder inheritPadding py="xs">
116
+ <Group justify="space-between">
117
+ <Group gap="xs" align="center">
118
+ <Text fw={500}>
119
+ {textGenerationState === "generating"
120
+ ? "Generating AI Response..."
121
+ : "AI Response"}
122
+ </Text>
123
+ {textGenerationState === "interrupted" && (
124
+ <Badge variant="light" color="yellow" size="xs">
125
+ Interrupted
126
+ </Badge>
127
+ )}
128
+ </Group>
129
+ <Group gap="xs" align="center">
130
+ {textGenerationState === "generating" ? (
131
+ <Tooltip label="Interrupt generation">
132
+ <ActionIcon
133
+ onClick={() => setTextGenerationState("interrupted")}
134
+ variant="subtle"
135
+ color="gray"
136
+ >
137
+ <IconHandStop size={16} />
138
+ </ActionIcon>
139
+ </Tooltip>
140
+ ) : (
141
+ <Tooltip label="Regenerate response">
142
+ <ActionIcon
143
+ onClick={() => searchAndRespond()}
144
+ variant="subtle"
145
+ color="gray"
146
+ >
147
+ <IconRefresh size={16} />
148
+ </ActionIcon>
149
+ </Tooltip>
150
+ )}
151
+ <Tooltip
152
+ label={isSpeaking ? "Stop speaking" : "Listen to response"}
153
+ >
154
+ <ActionIcon
155
+ onClick={() => speakResponse(response)}
156
+ variant="subtle"
157
+ color={isSpeaking ? "blue" : "gray"}
158
+ >
159
+ <IconVolume2 size={16} />
160
+ </ActionIcon>
161
+ </Tooltip>
162
+ {settings.enableAiResponseScrolling ? (
163
+ <Tooltip label="Show full response without scroll bar">
164
+ <ActionIcon
165
+ onClick={() => {
166
+ setSettings({
167
+ ...settings,
168
+ enableAiResponseScrolling: false,
169
+ });
170
+ }}
171
+ variant="subtle"
172
+ color="gray"
173
+ >
174
+ <IconArrowsMaximize size={16} />
175
+ </ActionIcon>
176
+ </Tooltip>
177
+ ) : (
178
+ <Tooltip label="Enable scroll bar">
179
+ <ActionIcon
180
+ onClick={() => {
181
+ setSettings({
182
+ ...settings,
183
+ enableAiResponseScrolling: true,
184
+ });
185
+ }}
186
+ variant="subtle"
187
+ color="gray"
188
+ >
189
+ <IconArrowsMinimize size={16} />
190
+ </ActionIcon>
191
+ </Tooltip>
192
+ )}
193
+ <CopyIconButton value={response} tooltipLabel="Copy response" />
194
+ </Group>
195
+ </Group>
196
+ </Card.Section>
197
+ <Card.Section withBorder>
198
+ <ConditionalScrollArea>
199
+ <FormattedMarkdown>{response}</FormattedMarkdown>
200
+ </ConditionalScrollArea>
201
+ {textGenerationState === "failed" && (
202
+ <Alert
203
+ variant="light"
204
+ color="yellow"
205
+ title="Failed to generate response"
206
+ icon={<IconInfoCircle />}
207
+ >
208
+ Could not generate response. Please try refreshing the page.
209
+ </Alert>
210
+ )}
211
+ </Card.Section>
212
+ </Card>
213
+ );
214
+ }
client/components/AiResponse/AiResponseSection.tsx ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { CodeHighlightAdapterProvider } from "@mantine/code-highlight";
2
+ import { usePubSub } from "create-pubsub/react";
3
+ import { useMemo } from "react";
4
+ import {
5
+ modelLoadingProgressPubSub,
6
+ modelSizeInMegabytesPubSub,
7
+ queryPubSub,
8
+ responsePubSub,
9
+ settingsPubSub,
10
+ textGenerationStatePubSub,
11
+ } from "../../modules/pubSub";
12
+ import { shikiAdapter } from "../../modules/shiki";
13
+ import "@mantine/code-highlight/styles.css";
14
+ import AiModelDownloadAllowanceContent from "./AiModelDownloadAllowanceContent";
15
+ import AiResponseContent from "./AiResponseContent";
16
+ import ChatInterface from "./ChatInterface";
17
+ import LoadingModelContent from "./LoadingModelContent";
18
+ import PreparingContent from "./PreparingContent";
19
+
20
+ export default function AiResponseSection() {
21
+ const [query] = usePubSub(queryPubSub);
22
+ const [response] = usePubSub(responsePubSub);
23
+ const [textGenerationState, setTextGenerationState] = usePubSub(
24
+ textGenerationStatePubSub,
25
+ );
26
+ const [modelLoadingProgress] = usePubSub(modelLoadingProgressPubSub);
27
+ const [settings] = usePubSub(settingsPubSub);
28
+ const [modelSizeInMegabytes] = usePubSub(modelSizeInMegabytesPubSub);
29
+
30
+ return useMemo(() => {
31
+ if (!settings.enableAiResponse || textGenerationState === "idle") {
32
+ return null;
33
+ }
34
+
35
+ const generatingStates = [
36
+ "generating",
37
+ "interrupted",
38
+ "completed",
39
+ "failed",
40
+ ];
41
+ if (generatingStates.includes(textGenerationState)) {
42
+ return (
43
+ <CodeHighlightAdapterProvider adapter={shikiAdapter}>
44
+ <AiResponseContent
45
+ textGenerationState={textGenerationState}
46
+ response={response}
47
+ setTextGenerationState={setTextGenerationState}
48
+ />
49
+
50
+ {textGenerationState === "completed" && (
51
+ <ChatInterface initialQuery={query} initialResponse={response} />
52
+ )}
53
+ </CodeHighlightAdapterProvider>
54
+ );
55
+ }
56
+
57
+ if (textGenerationState === "loadingModel") {
58
+ return (
59
+ <LoadingModelContent
60
+ modelLoadingProgress={modelLoadingProgress}
61
+ modelSizeInMegabytes={modelSizeInMegabytes}
62
+ />
63
+ );
64
+ }
65
+
66
+ if (textGenerationState === "preparingToGenerate") {
67
+ return <PreparingContent textGenerationState={textGenerationState} />;
68
+ }
69
+
70
+ if (textGenerationState === "awaitingSearchResults") {
71
+ return <PreparingContent textGenerationState={textGenerationState} />;
72
+ }
73
+
74
+ if (textGenerationState === "awaitingModelDownloadAllowance") {
75
+ return <AiModelDownloadAllowanceContent />;
76
+ }
77
+
78
+ return null;
79
+ }, [
80
+ settings.enableAiResponse,
81
+ textGenerationState,
82
+ response,
83
+ query,
84
+ modelLoadingProgress,
85
+ modelSizeInMegabytes,
86
+ setTextGenerationState,
87
+ ]);
88
+ }
client/components/AiResponse/ChatHeader.tsx ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Group, Text } from "@mantine/core";
2
+ import type { ChatMessage } from "gpt-tokenizer/GptEncoding";
3
+ import CopyIconButton from "./CopyIconButton";
4
+
5
+ interface ChatHeaderProps {
6
+ messages: ChatMessage[];
7
+ }
8
+
9
+ function ChatHeader({ messages }: ChatHeaderProps) {
10
+ const getChatContent = () => {
11
+ return messages
12
+ .slice(2)
13
+ .map(
14
+ (msg, index) =>
15
+ `${index + 1}. ${msg.role?.toUpperCase()}\n\n${msg.content}`,
16
+ )
17
+ .join("\n\n");
18
+ };
19
+
20
+ return (
21
+ <Group justify="space-between">
22
+ <Text fw={500}>Follow-up questions</Text>
23
+ {messages.length > 2 && (
24
+ <CopyIconButton
25
+ value={getChatContent()}
26
+ tooltipLabel="Copy conversation"
27
+ />
28
+ )}
29
+ </Group>
30
+ );
31
+ }
32
+
33
+ export default ChatHeader;
client/components/AiResponse/ChatInputArea.tsx ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Button, Group, Textarea } from "@mantine/core";
2
+ import { IconSend } from "@tabler/icons-react";
3
+ import { usePubSub } from "create-pubsub/react";
4
+ import {
5
+ chatGenerationStatePubSub,
6
+ chatInputPubSub,
7
+ followUpQuestionPubSub,
8
+ } from "../../modules/pubSub";
9
+
10
+ interface ChatInputAreaProps {
11
+ onKeyDown: (event: React.KeyboardEvent<HTMLTextAreaElement>) => void;
12
+ handleSend: (textToSend?: string) => void;
13
+ }
14
+
15
+ function ChatInputArea({ onKeyDown, handleSend }: ChatInputAreaProps) {
16
+ const [input, setInput] = usePubSub(chatInputPubSub);
17
+ const [generationState] = usePubSub(chatGenerationStatePubSub);
18
+ const [followUpQuestion] = usePubSub(followUpQuestionPubSub);
19
+
20
+ const isGenerating =
21
+ generationState.isGeneratingResponse &&
22
+ !generationState.isGeneratingFollowUpQuestion;
23
+
24
+ const placeholder =
25
+ followUpQuestion || "Anything else you would like to know?";
26
+
27
+ const onChange = (event: React.ChangeEvent<HTMLTextAreaElement>) => {
28
+ setInput(event.target.value);
29
+ };
30
+ const handleKeyDownWithPlaceholder = (
31
+ event: React.KeyboardEvent<HTMLTextAreaElement>,
32
+ ) => {
33
+ if (input.trim() === "" && followUpQuestion) {
34
+ if (event.key === "Enter" && !event.shiftKey) {
35
+ event.preventDefault();
36
+ handleSend(followUpQuestion);
37
+ return;
38
+ }
39
+ }
40
+
41
+ onKeyDown(event);
42
+ };
43
+
44
+ const handleSendWithPlaceholder = () => {
45
+ if (input.trim() === "" && followUpQuestion) {
46
+ handleSend(followUpQuestion);
47
+ } else {
48
+ handleSend();
49
+ }
50
+ };
51
+
52
+ return (
53
+ <Group align="flex-end" style={{ position: "relative" }}>
54
+ <Textarea
55
+ size="sm"
56
+ aria-label="Chat input"
57
+ placeholder={placeholder}
58
+ value={input}
59
+ onChange={onChange}
60
+ onKeyDown={handleKeyDownWithPlaceholder}
61
+ autosize
62
+ minRows={1}
63
+ maxRows={4}
64
+ style={{ flexGrow: 1, paddingRight: "50px" }}
65
+ disabled={isGenerating}
66
+ />
67
+ <Button
68
+ aria-label="Send message"
69
+ size="sm"
70
+ variant="default"
71
+ onClick={handleSendWithPlaceholder}
72
+ loading={isGenerating}
73
+ style={{
74
+ height: "100%",
75
+ position: "absolute",
76
+ right: 0,
77
+ top: 0,
78
+ bottom: 0,
79
+ borderTopLeftRadius: 0,
80
+ borderBottomLeftRadius: 0,
81
+ }}
82
+ >
83
+ <IconSend size={16} />
84
+ </Button>
85
+ </Group>
86
+ );
87
+ }
88
+
89
+ export default ChatInputArea;
client/components/AiResponse/ChatInterface.tsx ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Card, Stack } from "@mantine/core";
2
+ import { usePubSub } from "create-pubsub/react";
3
+ import type { ChatMessage } from "gpt-tokenizer/GptEncoding";
4
+ import { type KeyboardEvent, useCallback, useEffect, useState } from "react";
5
+ import throttle from "throttleit";
6
+ import { generateFollowUpQuestion } from "../../modules/followUpQuestions";
7
+ import { handleEnterKeyDown } from "../../modules/keyboard";
8
+ import { addLogEntry } from "../../modules/logEntries";
9
+ import {
10
+ chatGenerationStatePubSub,
11
+ chatInputPubSub,
12
+ followUpQuestionPubSub,
13
+ getImageSearchResults,
14
+ getTextSearchResults,
15
+ settingsPubSub,
16
+ updateImageSearchResults,
17
+ updateLlmTextSearchResults,
18
+ updateTextSearchResults,
19
+ } from "../../modules/pubSub";
20
+ import { generateRelatedSearchQuery } from "../../modules/relatedSearchQuery";
21
+ import { searchImages, searchText } from "../../modules/search";
22
+ import { generateChatResponse } from "../../modules/textGeneration";
23
+ import ChatHeader from "./ChatHeader";
24
+ import ChatInputArea from "./ChatInputArea";
25
+ import MessageList from "./MessageList";
26
+
27
+ interface ChatInterfaceProps {
28
+ initialQuery?: string;
29
+ initialResponse?: string;
30
+ }
31
+
32
+ export default function ChatInterface({
33
+ initialQuery,
34
+ initialResponse,
35
+ }: ChatInterfaceProps) {
36
+ const [messages, setMessages] = useState<ChatMessage[]>([]);
37
+ const [input, setInput] = usePubSub(chatInputPubSub);
38
+ const [generationState, setGenerationState] = usePubSub(
39
+ chatGenerationStatePubSub,
40
+ );
41
+ const [, setFollowUpQuestion] = usePubSub(followUpQuestionPubSub);
42
+ const [previousFollowUpQuestions, setPreviousFollowUpQuestions] = useState<
43
+ string[]
44
+ >([]);
45
+ const [settings] = usePubSub(settingsPubSub);
46
+ const [streamedResponse, setStreamedResponse] = useState("");
47
+ const updateStreamedResponse = useCallback(
48
+ throttle((response: string) => {
49
+ setStreamedResponse(response);
50
+ }, 1000 / 12),
51
+ [],
52
+ );
53
+
54
+ const regenerateFollowUpQuestion = useCallback(
55
+ async (currentQuery: string, currentResponse: string) => {
56
+ if (!currentResponse || !currentQuery.trim()) return;
57
+
58
+ try {
59
+ setGenerationState({
60
+ isGeneratingResponse: false,
61
+ isGeneratingFollowUpQuestion: true,
62
+ });
63
+
64
+ const newQuestion = await generateFollowUpQuestion({
65
+ topic: currentQuery,
66
+ currentContent: currentResponse,
67
+ previousQuestions: previousFollowUpQuestions,
68
+ });
69
+
70
+ setPreviousFollowUpQuestions((prev) =>
71
+ [...prev, newQuestion].slice(-5),
72
+ );
73
+ setFollowUpQuestion(newQuestion);
74
+ setGenerationState({
75
+ isGeneratingResponse: false,
76
+ isGeneratingFollowUpQuestion: false,
77
+ });
78
+ } catch (_) {
79
+ setFollowUpQuestion("");
80
+ setGenerationState({
81
+ isGeneratingResponse: false,
82
+ isGeneratingFollowUpQuestion: false,
83
+ });
84
+ }
85
+ },
86
+ [setFollowUpQuestion, setGenerationState, previousFollowUpQuestions],
87
+ );
88
+
89
+ useEffect(() => {
90
+ if (messages.length === 0 && initialQuery && initialResponse) {
91
+ setMessages([
92
+ { role: "user", content: initialQuery },
93
+ { role: "assistant", content: initialResponse },
94
+ ]);
95
+ regenerateFollowUpQuestion(initialQuery, initialResponse);
96
+ }
97
+ }, [
98
+ initialQuery,
99
+ initialResponse,
100
+ messages.length,
101
+ regenerateFollowUpQuestion,
102
+ ]);
103
+
104
+ const handleSend = async (textToSend?: string) => {
105
+ const currentInput = textToSend ?? input;
106
+ if (currentInput.trim() === "" || generationState.isGeneratingResponse)
107
+ return;
108
+
109
+ const userMessage: ChatMessage = { role: "user", content: currentInput };
110
+ const newMessages: ChatMessage[] = [...messages, userMessage];
111
+
112
+ if (messages.length === 0) {
113
+ setPreviousFollowUpQuestions([]);
114
+ }
115
+
116
+ setMessages(newMessages);
117
+ setInput(textToSend ? input : "");
118
+ setGenerationState({
119
+ ...generationState,
120
+ isGeneratingResponse: true,
121
+ });
122
+ setFollowUpQuestion("");
123
+ setStreamedResponse("");
124
+
125
+ try {
126
+ const relatedQuery = await generateRelatedSearchQuery([...newMessages]);
127
+ const searchQuery = relatedQuery || currentInput;
128
+
129
+ if (settings.enableTextSearch) {
130
+ const freshResults = await searchText(
131
+ searchQuery,
132
+ settings.searchResultsLimit,
133
+ );
134
+
135
+ if (freshResults.length > 0) {
136
+ const existingUrls = new Set(
137
+ getTextSearchResults().map(([, , url]) => url),
138
+ );
139
+
140
+ const uniqueFreshResults = freshResults.filter(
141
+ ([, , url]) => !existingUrls.has(url),
142
+ );
143
+
144
+ if (uniqueFreshResults.length > 0) {
145
+ updateTextSearchResults([
146
+ ...getTextSearchResults(),
147
+ ...uniqueFreshResults,
148
+ ]);
149
+ updateLlmTextSearchResults(
150
+ uniqueFreshResults.slice(0, settings.searchResultsToConsider),
151
+ );
152
+ }
153
+ }
154
+ }
155
+
156
+ if (settings.enableImageSearch) {
157
+ searchImages(searchQuery, settings.searchResultsLimit)
158
+ .then((imageResults) => {
159
+ if (imageResults.length > 0) {
160
+ const existingUrls = new Set(
161
+ getImageSearchResults().map(([, url]) => url),
162
+ );
163
+
164
+ const uniqueFreshResults = imageResults.filter(
165
+ ([, url]) => !existingUrls.has(url),
166
+ );
167
+
168
+ if (uniqueFreshResults.length > 0) {
169
+ updateImageSearchResults([
170
+ ...uniqueFreshResults,
171
+ ...getImageSearchResults(),
172
+ ]);
173
+ }
174
+ }
175
+ })
176
+ .catch((error) => {
177
+ addLogEntry(`Error in follow-up image search: ${error}`);
178
+ });
179
+ }
180
+ } catch (error) {
181
+ addLogEntry(`Error in follow-up search: ${error}`);
182
+ }
183
+
184
+ try {
185
+ const finalResponse = await generateChatResponse(
186
+ newMessages,
187
+ updateStreamedResponse,
188
+ );
189
+
190
+ setMessages((prevMessages) => [
191
+ ...prevMessages,
192
+ { role: "assistant", content: finalResponse },
193
+ ]);
194
+
195
+ addLogEntry("AI response completed");
196
+
197
+ await regenerateFollowUpQuestion(currentInput, finalResponse);
198
+ } catch (error) {
199
+ addLogEntry(`Error in chat response: ${error}`);
200
+ setMessages((prevMessages) => [
201
+ ...prevMessages,
202
+ {
203
+ role: "assistant",
204
+ content: "Sorry, I encountered an error while generating a response.",
205
+ },
206
+ ]);
207
+ } finally {
208
+ setGenerationState({
209
+ ...generationState,
210
+ isGeneratingResponse: false,
211
+ });
212
+ }
213
+ };
214
+
215
+ const handleKeyDown = (event: KeyboardEvent<HTMLTextAreaElement>) => {
216
+ handleEnterKeyDown(event, settings, handleSend);
217
+ };
218
+
219
+ return (
220
+ <Card withBorder shadow="sm" radius="md">
221
+ <Card.Section withBorder inheritPadding py="xs">
222
+ <ChatHeader messages={messages} />
223
+ </Card.Section>
224
+ <Stack gap="md" pt="md">
225
+ <MessageList
226
+ messages={
227
+ generationState.isGeneratingResponse
228
+ ? [...messages, { role: "assistant", content: streamedResponse }]
229
+ : messages
230
+ }
231
+ />
232
+ <ChatInputArea onKeyDown={handleKeyDown} handleSend={handleSend} />
233
+ </Stack>
234
+ </Card>
235
+ );
236
+ }
client/components/AiResponse/CopyIconButton.tsx ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ActionIcon, CopyButton, Tooltip } from "@mantine/core";
2
+ import { IconCheck, IconCopy } from "@tabler/icons-react";
3
+
4
+ interface CopyIconButtonProps {
5
+ value: string;
6
+ tooltipLabel?: string;
7
+ }
8
+
9
+ export default function CopyIconButton({
10
+ value,
11
+ tooltipLabel = "Copy",
12
+ }: CopyIconButtonProps) {
13
+ return (
14
+ <CopyButton value={value} timeout={2000}>
15
+ {({ copied, copy }) => (
16
+ <Tooltip
17
+ label={copied ? "Copied" : tooltipLabel}
18
+ withArrow
19
+ position="right"
20
+ >
21
+ <ActionIcon
22
+ color={copied ? "teal" : "gray"}
23
+ variant="subtle"
24
+ onClick={copy}
25
+ >
26
+ {copied ? <IconCheck size={16} /> : <IconCopy size={16} />}
27
+ </ActionIcon>
28
+ </Tooltip>
29
+ )}
30
+ </CopyButton>
31
+ );
32
+ }
client/components/AiResponse/EnableAiResponsePrompt.tsx ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ ActionIcon,
3
+ Alert,
4
+ Button,
5
+ Flex,
6
+ Group,
7
+ Popover,
8
+ Stack,
9
+ Text,
10
+ } from "@mantine/core";
11
+ import { IconCheck, IconInfoCircle, IconX } from "@tabler/icons-react";
12
+
13
+ interface EnableAiResponsePromptProps {
14
+ onAccept: () => void;
15
+ onDecline: () => void;
16
+ }
17
+
18
+ export default function EnableAiResponsePrompt({
19
+ onAccept,
20
+ onDecline,
21
+ }: EnableAiResponsePromptProps) {
22
+ const helpContent = (
23
+ <Stack gap="xs" p="xs">
24
+ <Text size="sm">
25
+ MiniSearch is a web-searching app with an integrated AI assistant.
26
+ </Text>
27
+ <Text size="sm">
28
+ With AI Responses enabled, it will generate summaries and answer
29
+ questions based on search results.
30
+ </Text>
31
+ <Text size="sm">
32
+ If disabled, it will function as a classic web search tool.
33
+ </Text>
34
+ <Text size="sm" c="dimmed" component="em">
35
+ You can toggle this feature at anytime through the Menu.
36
+ </Text>
37
+ </Stack>
38
+ );
39
+
40
+ return (
41
+ <Alert variant="light" color="blue" p="xs">
42
+ <Flex align="center" gap="xs">
43
+ <Text fw={500}>Enable AI Responses?</Text>
44
+ <Popover
45
+ width={300}
46
+ styles={{ dropdown: { maxWidth: "92vw" } }}
47
+ position="bottom"
48
+ withArrow
49
+ shadow="md"
50
+ >
51
+ <Popover.Target>
52
+ <ActionIcon variant="subtle" color="blue" size="sm">
53
+ <IconInfoCircle size="1rem" />
54
+ </ActionIcon>
55
+ </Popover.Target>
56
+ <Popover.Dropdown>{helpContent}</Popover.Dropdown>
57
+ </Popover>
58
+ <div style={{ flex: 1 }} />
59
+ <Group>
60
+ <Button
61
+ variant="subtle"
62
+ color="gray"
63
+ leftSection={<IconX size="1rem" />}
64
+ onClick={onDecline}
65
+ size="xs"
66
+ >
67
+ No, thanks
68
+ </Button>
69
+ <Button
70
+ leftSection={<IconCheck size="1rem" />}
71
+ onClick={onAccept}
72
+ size="xs"
73
+ >
74
+ Yes, please
75
+ </Button>
76
+ </Group>
77
+ </Flex>
78
+ </Alert>
79
+ );
80
+ }
client/components/AiResponse/ExpandableLink.tsx ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { MantineTheme } from "@mantine/core";
2
+ import { Button } from "@mantine/core";
3
+ import React from "react";
4
+
5
+ interface ExpandableLinkProps {
6
+ href: string;
7
+ children: React.ReactNode;
8
+ }
9
+
10
+ export default function ExpandableLink({
11
+ href,
12
+ children,
13
+ }: ExpandableLinkProps) {
14
+ const childContent = children?.toString() || "";
15
+ const firstChar = childContent.charAt(0);
16
+ const [isExpanded, setIsExpanded] = React.useState(true);
17
+ const timerRef = React.useRef<number | null>(null);
18
+
19
+ React.useEffect(() => {
20
+ timerRef.current = window.setTimeout(() => {
21
+ setIsExpanded(false);
22
+ timerRef.current = null;
23
+ }, 3000);
24
+
25
+ return () => {
26
+ if (timerRef.current) {
27
+ clearTimeout(timerRef.current);
28
+ }
29
+ };
30
+ }, []);
31
+
32
+ const handleMouseEnter = () => {
33
+ if (timerRef.current) {
34
+ clearTimeout(timerRef.current);
35
+ timerRef.current = null;
36
+ }
37
+ setIsExpanded(true);
38
+ };
39
+
40
+ const handleMouseLeave = () => {
41
+ timerRef.current = window.setTimeout(() => {
42
+ setIsExpanded(false);
43
+ timerRef.current = null;
44
+ }, 3000);
45
+ };
46
+
47
+ const fullTextRef = React.useRef<HTMLDivElement>(null);
48
+ const [fullTextWidth, setFullTextWidth] = React.useState(0);
49
+
50
+ React.useEffect(() => {
51
+ const measureText = () => {
52
+ if (fullTextRef.current) {
53
+ setFullTextWidth(fullTextRef.current.scrollWidth);
54
+ }
55
+ };
56
+
57
+ measureText();
58
+
59
+ window.addEventListener("resize", measureText);
60
+ return () => {
61
+ window.removeEventListener("resize", measureText);
62
+ };
63
+ }, []);
64
+
65
+ return (
66
+ <Button
67
+ component="a"
68
+ href={href}
69
+ target="_blank"
70
+ rel="nofollow noopener noreferrer"
71
+ variant="light"
72
+ color="gray"
73
+ size="compact-xs"
74
+ radius="xl"
75
+ style={(theme: MantineTheme) => ({
76
+ textDecoration: "none",
77
+ transform: "translateY(-2px)",
78
+ overflow: "hidden",
79
+ position: "relative",
80
+ width: isExpanded ? `${fullTextWidth + theme.spacing.md}px` : "2em",
81
+ transition: "width 0.3s ease-in-out",
82
+ textAlign: "center",
83
+ })}
84
+ onMouseEnter={handleMouseEnter}
85
+ onMouseLeave={handleMouseLeave}
86
+ onFocus={handleMouseEnter}
87
+ onBlur={handleMouseLeave}
88
+ >
89
+ <span
90
+ style={{
91
+ position: "absolute",
92
+ top: 0,
93
+ left: 0,
94
+ right: 0,
95
+ bottom: 0,
96
+ display: "flex",
97
+ alignItems: "center",
98
+ justifyContent: "center",
99
+ opacity: isExpanded ? 0 : 1,
100
+ transition: "opacity 0.2s ease-in-out",
101
+ }}
102
+ >
103
+ {firstChar}
104
+ </span>
105
+ <span
106
+ ref={fullTextRef}
107
+ style={{
108
+ opacity: isExpanded ? 1 : 0,
109
+ transition: "opacity 0.3s ease-in-out",
110
+ visibility: isExpanded ? "visible" : "hidden",
111
+ whiteSpace: "nowrap",
112
+ display: "flex",
113
+ alignItems: "center",
114
+ justifyContent: "center",
115
+ height: "100%",
116
+ position: "relative",
117
+ }}
118
+ >
119
+ {children}
120
+ </span>
121
+ </Button>
122
+ );
123
+ }
client/components/AiResponse/FormattedMarkdown.tsx ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { TypographyStylesProvider } from "@mantine/core";
2
+ import { useReasoningContent } from "./hooks/useReasoningContent";
3
+ import MarkdownRenderer from "./MarkdownRenderer";
4
+ import ReasoningSection from "./ReasoningSection";
5
+
6
+ interface FormattedMarkdownProps {
7
+ children: string;
8
+ className?: string;
9
+ enableCopy?: boolean;
10
+ }
11
+
12
+ export default function FormattedMarkdown({
13
+ children,
14
+ className = "",
15
+ enableCopy = true,
16
+ }: FormattedMarkdownProps) {
17
+ const { reasoningContent, mainContent, isGenerating } =
18
+ useReasoningContent(children);
19
+
20
+ if (!children) {
21
+ return null;
22
+ }
23
+
24
+ return (
25
+ <TypographyStylesProvider p="lg">
26
+ {reasoningContent && (
27
+ <ReasoningSection
28
+ content={reasoningContent}
29
+ isGenerating={isGenerating}
30
+ />
31
+ )}
32
+ {!isGenerating && mainContent && (
33
+ <MarkdownRenderer
34
+ content={mainContent}
35
+ enableCopy={enableCopy}
36
+ className={className}
37
+ />
38
+ )}
39
+ </TypographyStylesProvider>
40
+ );
41
+ }
client/components/AiResponse/LoadingModelContent.tsx ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Card, Group, Progress, Stack, Text } from "@mantine/core";
2
+
3
+ export default function LoadingModelContent({
4
+ modelLoadingProgress,
5
+ modelSizeInMegabytes,
6
+ }: {
7
+ modelLoadingProgress: number;
8
+ modelSizeInMegabytes: number;
9
+ }) {
10
+ const isLoadingStarting = modelLoadingProgress === 0;
11
+ const isLoadingComplete = modelLoadingProgress === 100;
12
+ const percent =
13
+ isLoadingComplete || isLoadingStarting ? 100 : modelLoadingProgress;
14
+ const strokeColor = percent === 100 ? "#52c41a" : "#3385ff";
15
+ const downloadedSize = (modelSizeInMegabytes * modelLoadingProgress) / 100;
16
+ const sizeText = `${downloadedSize.toFixed(0)} MB / ${modelSizeInMegabytes.toFixed(0)} MB`;
17
+
18
+ return (
19
+ <Card withBorder shadow="sm" radius="md">
20
+ <Card.Section withBorder inheritPadding py="xs">
21
+ <Text fw={500}>Loading AI...</Text>
22
+ </Card.Section>
23
+ <Card.Section withBorder inheritPadding py="md">
24
+ <Stack gap="xs">
25
+ <Progress color={strokeColor} value={percent} animated />
26
+ {!isLoadingStarting && (
27
+ <Group justify="space-between">
28
+ <Text size="sm" c="dimmed">
29
+ {sizeText}
30
+ </Text>
31
+ <Text size="sm" c="dimmed">
32
+ {percent.toFixed(1)}%
33
+ </Text>
34
+ </Group>
35
+ )}
36
+ </Stack>
37
+ </Card.Section>
38
+ </Card>
39
+ );
40
+ }
client/components/AiResponse/MarkdownRenderer.tsx ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { CodeHighlight } from "@mantine/code-highlight";
2
+ import { Box, Code, Divider } from "@mantine/core";
3
+ import React from "react";
4
+ import { ErrorBoundary } from "react-error-boundary";
5
+ import Markdown from "react-markdown";
6
+ import rehypeExternalLinks from "rehype-external-links";
7
+ import remarkGfm from "remark-gfm";
8
+ import ExpandableLink from "./ExpandableLink";
9
+
10
+ interface MarkdownRendererProps {
11
+ content: string;
12
+ enableCopy?: boolean;
13
+ className?: string;
14
+ }
15
+
16
+ export default function MarkdownRenderer({
17
+ content,
18
+ enableCopy = true,
19
+ className = "",
20
+ }: MarkdownRendererProps) {
21
+ if (!content) {
22
+ return null;
23
+ }
24
+
25
+ return (
26
+ <Box className={className}>
27
+ <Markdown
28
+ remarkPlugins={[remarkGfm]}
29
+ rehypePlugins={[
30
+ [
31
+ rehypeExternalLinks,
32
+ { target: "_blank", rel: ["nofollow", "noopener", "noreferrer"] },
33
+ ],
34
+ ]}
35
+ components={{
36
+ a(props) {
37
+ const { href, children } = props;
38
+ return (
39
+ <ExpandableLink href={href || ""}>{children}</ExpandableLink>
40
+ );
41
+ },
42
+ li(props) {
43
+ const { children } = props;
44
+ const processedChildren = React.Children.map(children, (child) => {
45
+ if (React.isValidElement(child) && child.type === "p") {
46
+ return (child.props as { children: React.ReactNode }).children;
47
+ }
48
+ return child;
49
+ });
50
+ return <li>{processedChildren}</li>;
51
+ },
52
+ hr() {
53
+ return <Divider variant="dashed" my="md" />;
54
+ },
55
+ pre(props) {
56
+ return <>{props.children}</>;
57
+ },
58
+ code(props) {
59
+ const { children, className, node } = props;
60
+ const codeContent = children?.toString().replace(/\n$/, "") ?? "";
61
+ let language = "text";
62
+
63
+ if (className) {
64
+ const languageMatch = /language-(\w+)/.exec(className);
65
+ if (languageMatch) language = languageMatch[1];
66
+ }
67
+
68
+ if (
69
+ language === "text" &&
70
+ node?.position?.end.line === node?.position?.start.line
71
+ ) {
72
+ return <Code>{codeContent}</Code>;
73
+ }
74
+
75
+ return (
76
+ <ErrorBoundary fallback={<Code block>{codeContent}</Code>}>
77
+ <CodeHighlight
78
+ code={codeContent}
79
+ language={language}
80
+ radius="md"
81
+ withCopyButton={enableCopy}
82
+ mb="xs"
83
+ />
84
+ </ErrorBoundary>
85
+ );
86
+ },
87
+ }}
88
+ >
89
+ {content}
90
+ </Markdown>
91
+ </Box>
92
+ );
93
+ }
client/components/AiResponse/MessageList.tsx ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Paper, Stack } from "@mantine/core";
2
+ import type { ChatMessage } from "gpt-tokenizer/GptEncoding";
3
+ import { memo } from "react";
4
+ import FormattedMarkdown from "./FormattedMarkdown";
5
+
6
+ interface MessageListProps {
7
+ messages: ChatMessage[];
8
+ }
9
+
10
+ interface MessageProps {
11
+ message: ChatMessage;
12
+ index: number;
13
+ }
14
+
15
+ const Message = memo(
16
+ function Message({ message, index }: MessageProps) {
17
+ return (
18
+ <Paper
19
+ key={`${message.role}-${index}`}
20
+ shadow="xs"
21
+ radius="xl"
22
+ p="sm"
23
+ maw="90%"
24
+ style={{
25
+ alignSelf: message.role === "user" ? "flex-end" : "flex-start",
26
+ }}
27
+ >
28
+ <FormattedMarkdown>{message.content}</FormattedMarkdown>
29
+ </Paper>
30
+ );
31
+ },
32
+ (prevProps, nextProps) => {
33
+ return (
34
+ prevProps.message.content === nextProps.message.content &&
35
+ prevProps.message.role === nextProps.message.role
36
+ );
37
+ },
38
+ );
39
+
40
+ const MessageList = memo(function MessageList({ messages }: MessageListProps) {
41
+ if (messages.length <= 2) return null;
42
+
43
+ return (
44
+ <Stack gap="md">
45
+ {messages
46
+ .slice(2)
47
+ .filter((message) => message.content.length > 0)
48
+ .map((message, index) => (
49
+ <Message
50
+ key={`${message.role}-${index}`}
51
+ message={message}
52
+ index={index}
53
+ />
54
+ ))}
55
+ </Stack>
56
+ );
57
+ });
58
+
59
+ export default MessageList;
client/components/AiResponse/PreparingContent.tsx ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Card, Skeleton, Stack, Text } from "@mantine/core";
2
+
3
+ export default function PreparingContent({
4
+ textGenerationState,
5
+ }: {
6
+ textGenerationState: string;
7
+ }) {
8
+ const getStateText = () => {
9
+ if (textGenerationState === "awaitingSearchResults") {
10
+ return "Awaiting search results...";
11
+ }
12
+ if (textGenerationState === "preparingToGenerate") {
13
+ return "Preparing AI response...";
14
+ }
15
+ return null;
16
+ };
17
+
18
+ return (
19
+ <Card withBorder shadow="sm" radius="md">
20
+ <Card.Section withBorder inheritPadding py="xs">
21
+ <Text fw={500}>{getStateText()}</Text>
22
+ </Card.Section>
23
+ <Card.Section withBorder inheritPadding py="md">
24
+ <Stack>
25
+ <Skeleton height={8} radius="xl" />
26
+ <Skeleton height={8} width="70%" radius="xl" />
27
+ <Skeleton height={8} radius="xl" />
28
+ <Skeleton height={8} width="43%" radius="xl" />
29
+ </Stack>
30
+ </Card.Section>
31
+ </Card>
32
+ );
33
+ }
client/components/AiResponse/ReasoningSection.tsx ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ Box,
3
+ Collapse,
4
+ Flex,
5
+ Group,
6
+ Loader,
7
+ Text,
8
+ UnstyledButton,
9
+ } from "@mantine/core";
10
+ import { IconChevronDown, IconChevronRight } from "@tabler/icons-react";
11
+ import { useState } from "react";
12
+ import MarkdownRenderer from "./MarkdownRenderer";
13
+
14
+ interface ReasoningSectionProps {
15
+ content: string;
16
+ isGenerating?: boolean;
17
+ }
18
+
19
+ export default function ReasoningSection({
20
+ content,
21
+ isGenerating = false,
22
+ }: ReasoningSectionProps) {
23
+ const [isOpen, setIsOpen] = useState(false);
24
+
25
+ return (
26
+ <Box mb="xs">
27
+ <UnstyledButton
28
+ onClick={() => setIsOpen(!isOpen)}
29
+ style={(theme) => ({
30
+ width: "100%",
31
+ padding: theme.spacing.xs,
32
+ borderStartStartRadius: theme.radius.md,
33
+ borderStartEndRadius: theme.radius.md,
34
+ borderEndEndRadius: !isOpen ? theme.radius.md : 0,
35
+ borderEndStartRadius: !isOpen ? theme.radius.md : 0,
36
+ backgroundColor: theme.colors.dark[8],
37
+ "&:hover": {
38
+ backgroundColor: theme.colors.dark[5],
39
+ },
40
+ cursor: isOpen ? "zoom-out" : "zoom-in",
41
+ })}
42
+ >
43
+ <Group gap={3}>
44
+ {isOpen ? (
45
+ <IconChevronDown size={16} />
46
+ ) : (
47
+ <IconChevronRight size={16} />
48
+ )}
49
+ <Flex align="center" gap={6}>
50
+ <Text size="sm" c="dimmed" fs="italic" span>
51
+ {isGenerating ? "Thinking" : "Thought Process"}
52
+ </Text>
53
+ {isGenerating && <Loader size="sm" color="dimmed" type="dots" />}
54
+ </Flex>
55
+ </Group>
56
+ </UnstyledButton>
57
+ <Collapse in={isOpen}>
58
+ <Box
59
+ style={(theme) => ({
60
+ backgroundColor: theme.colors.dark[8],
61
+ padding: theme.spacing.sm,
62
+ borderBottomLeftRadius: theme.radius.md,
63
+ borderBottomRightRadius: theme.radius.md,
64
+ })}
65
+ >
66
+ <MarkdownRenderer content={content} enableCopy={false} />
67
+ </Box>
68
+ </Collapse>
69
+ </Box>
70
+ );
71
+ }
client/components/AiResponse/WebLlmModelSelect.tsx ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { type ComboboxItem, Select } from "@mantine/core";
2
+ import { prebuiltAppConfig } from "@mlc-ai/web-llm";
3
+ import { useCallback, useEffect, useState } from "react";
4
+ import { isF16Supported } from "../../modules/webGpu";
5
+
6
+ export default function WebLlmModelSelect({
7
+ value,
8
+ onChange,
9
+ }: {
10
+ value: string;
11
+ onChange: (value: string) => void;
12
+ }) {
13
+ const [webGpuModels] = useState<ComboboxItem[]>(() => {
14
+ const models = prebuiltAppConfig.model_list
15
+ .filter((model) => {
16
+ const isSmall = isSmallModel(model);
17
+ const suffix = getModelSuffix(isF16Supported, isSmall);
18
+ return model.model_id.endsWith(suffix);
19
+ })
20
+ .sort((a, b) => (a.vram_required_MB ?? 0) - (b.vram_required_MB ?? 0))
21
+ .map((model) => {
22
+ const modelSizeInMegabytes =
23
+ Math.round(model.vram_required_MB ?? 0) || "N/A";
24
+ const isSmall = isSmallModel(model);
25
+ const suffix = getModelSuffix(isF16Supported, isSmall);
26
+ const modelName = model.model_id.replace(suffix, "");
27
+
28
+ return {
29
+ label: `${modelSizeInMegabytes} MB β€’ ${modelName}`,
30
+ value: model.model_id,
31
+ };
32
+ });
33
+
34
+ return models;
35
+ });
36
+
37
+ useEffect(() => {
38
+ const isCurrentModelValid = webGpuModels.some(
39
+ (model) => model.value === value,
40
+ );
41
+
42
+ if (!isCurrentModelValid && webGpuModels.length > 0) {
43
+ onChange(webGpuModels[0].value);
44
+ }
45
+ }, [onChange, webGpuModels, value]);
46
+
47
+ const handleChange = useCallback(
48
+ (value: string | null) => {
49
+ if (value) onChange(value);
50
+ },
51
+ [onChange],
52
+ );
53
+
54
+ return (
55
+ <Select
56
+ value={value}
57
+ onChange={handleChange}
58
+ label="AI Model"
59
+ description="Select the model to use for AI responses."
60
+ data={webGpuModels}
61
+ allowDeselect={false}
62
+ searchable
63
+ />
64
+ );
65
+ }
66
+
67
+ type ModelConfig = (typeof prebuiltAppConfig.model_list)[number];
68
+
69
+ const smallModels = ["SmolLM2-135M", "SmolLM2-360M"] as const;
70
+
71
+ function isSmallModel(model: ModelConfig) {
72
+ return smallModels.some((smallModel) =>
73
+ model.model_id.startsWith(smallModel),
74
+ );
75
+ }
76
+
77
+ function getModelSuffix(isF16: boolean, isSmall: boolean) {
78
+ if (isSmall) return isF16 ? "-q0f16-MLC" : "-q0f32-MLC";
79
+
80
+ return isF16 ? "-q4f16_1-MLC" : "-q4f32_1-MLC";
81
+ }
client/components/AiResponse/WllamaModelSelect.tsx ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { type ComboboxItem, Select } from "@mantine/core";
2
+ import { useEffect, useState } from "react";
3
+ import { wllamaModels } from "../../modules/wllama";
4
+
5
+ export default function WllamaModelSelect({
6
+ value,
7
+ onChange,
8
+ }: {
9
+ value: string;
10
+ onChange: (value: string) => void;
11
+ }) {
12
+ const [wllamaModelOptions] = useState<ComboboxItem[]>(
13
+ Object.entries(wllamaModels)
14
+ .sort(([, a], [, b]) => a.fileSizeInMegabytes - b.fileSizeInMegabytes)
15
+ .map(([value, { label, fileSizeInMegabytes }]) => ({
16
+ label: `${fileSizeInMegabytes} MB β€’ ${label}`,
17
+ value,
18
+ })),
19
+ );
20
+
21
+ useEffect(() => {
22
+ const isCurrentModelValid = wllamaModelOptions.some(
23
+ (model) => model.value === value,
24
+ );
25
+
26
+ if (!isCurrentModelValid && wllamaModelOptions.length > 0) {
27
+ onChange(wllamaModelOptions[0].value);
28
+ }
29
+ }, [onChange, wllamaModelOptions, value]);
30
+
31
+ return (
32
+ <Select
33
+ value={value}
34
+ onChange={(value) => value && onChange(value)}
35
+ label="AI Model"
36
+ description="Select the model to use for AI responses."
37
+ data={wllamaModelOptions}
38
+ allowDeselect={false}
39
+ searchable
40
+ />
41
+ );
42
+ }
client/components/AiResponse/hooks/useReasoningContent.ts ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { usePubSub } from "create-pubsub/react";
2
+ import { useCallback } from "react";
3
+ import { settingsPubSub } from "../../../modules/pubSub";
4
+
5
+ export function useReasoningContent(text: string) {
6
+ const [settings] = usePubSub(settingsPubSub);
7
+
8
+ const extractReasoningAndMainContent = useCallback(
9
+ (text: string, startMarker: string, endMarker: string) => {
10
+ if (!text)
11
+ return { reasoningContent: "", mainContent: "", isGenerating: false };
12
+
13
+ if (!text.trim().startsWith(startMarker))
14
+ return { reasoningContent: "", mainContent: text, isGenerating: false };
15
+
16
+ const endIndex = text.indexOf(endMarker);
17
+
18
+ if (endIndex === -1) {
19
+ return {
20
+ reasoningContent: text.slice(startMarker.length),
21
+ mainContent: "",
22
+ isGenerating: true,
23
+ };
24
+ }
25
+
26
+ return {
27
+ reasoningContent: text.slice(startMarker.length, endIndex),
28
+ mainContent: text.slice(endIndex + endMarker.length),
29
+ isGenerating: false,
30
+ };
31
+ },
32
+ [],
33
+ );
34
+
35
+ const result = extractReasoningAndMainContent(
36
+ text,
37
+ settings.reasoningStartMarker,
38
+ settings.reasoningEndMarker,
39
+ );
40
+
41
+ return result;
42
+ }
client/components/App/App.tsx ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { MantineProvider } from "@mantine/core";
2
+ import { Route, Switch } from "wouter";
3
+ import "@mantine/core/styles.css";
4
+ import { Notifications } from "@mantine/notifications";
5
+ import { usePubSub } from "create-pubsub/react";
6
+ import { lazy, useEffect, useState } from "react";
7
+ import { addLogEntry } from "../../modules/logEntries";
8
+ import { settingsPubSub } from "../../modules/pubSub";
9
+ import { defaultSettings } from "../../modules/settings";
10
+ import "@mantine/notifications/styles.css";
11
+ import { verifyStoredAccessKey } from "../../modules/accessKey";
12
+ import MainPage from "../Pages/Main/MainPage";
13
+
14
+ const AccessPage = lazy(() => import("../Pages/AccessPage"));
15
+
16
+ export function App() {
17
+ useInitializeSettings();
18
+ const { hasValidatedAccessKey, isCheckingStoredKey, setValidatedAccessKey } =
19
+ useAccessKeyValidation();
20
+
21
+ if (isCheckingStoredKey) {
22
+ return null;
23
+ }
24
+
25
+ return (
26
+ <MantineProvider defaultColorScheme="dark">
27
+ <Notifications />
28
+ <Switch>
29
+ <Route path="/">
30
+ {VITE_ACCESS_KEYS_ENABLED && !hasValidatedAccessKey ? (
31
+ <AccessPage onAccessKeyValid={() => setValidatedAccessKey(true)} />
32
+ ) : (
33
+ <MainPage />
34
+ )}
35
+ </Route>
36
+ </Switch>
37
+ </MantineProvider>
38
+ );
39
+ }
40
+
41
+ /**
42
+ * A custom React hook that initializes the application settings.
43
+ *
44
+ * @returns The initialized settings object.
45
+ *
46
+ * @remarks
47
+ * This hook uses the `usePubSub` hook to access and update the settings state.
48
+ * It initializes the settings by merging the default settings with any existing settings.
49
+ * The initialization is performed once when the component mounts.
50
+ */
51
+ function useInitializeSettings() {
52
+ const [settings, setSettings] = usePubSub(settingsPubSub);
53
+ const [state, setState] = useState({
54
+ settingsInitialized: false,
55
+ });
56
+
57
+ useEffect(() => {
58
+ if (state.settingsInitialized) return;
59
+
60
+ setSettings({ ...defaultSettings, ...settings });
61
+
62
+ setState({ settingsInitialized: true });
63
+
64
+ addLogEntry("Settings initialized");
65
+ }, [settings, setSettings, state.settingsInitialized]);
66
+
67
+ return settings;
68
+ }
69
+
70
+ /**
71
+ * A custom React hook that validates the stored access key on mount.
72
+ *
73
+ * @returns An object containing the validation state and loading state
74
+ */
75
+ function useAccessKeyValidation() {
76
+ const [state, setState] = useState({
77
+ hasValidatedAccessKey: false,
78
+ isCheckingStoredKey: true,
79
+ });
80
+
81
+ useEffect(() => {
82
+ async function checkStoredAccessKey() {
83
+ if (VITE_ACCESS_KEYS_ENABLED) {
84
+ const isValid = await verifyStoredAccessKey();
85
+ if (isValid)
86
+ setState((prev) => ({ ...prev, hasValidatedAccessKey: true }));
87
+ }
88
+ setState((prev) => ({ ...prev, isCheckingStoredKey: false }));
89
+ }
90
+
91
+ checkStoredAccessKey();
92
+ }, []);
93
+
94
+ return {
95
+ hasValidatedAccessKey: state.hasValidatedAccessKey,
96
+ isCheckingStoredKey: state.isCheckingStoredKey,
97
+ setValidatedAccessKey: (value: boolean) =>
98
+ setState((prev) => ({ ...prev, hasValidatedAccessKey: value })),
99
+ };
100
+ }
client/components/Logs/LogsModal.tsx ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ Alert,
3
+ Button,
4
+ Center,
5
+ CloseButton,
6
+ Group,
7
+ Modal,
8
+ Pagination,
9
+ Table,
10
+ TextInput,
11
+ Tooltip,
12
+ } from "@mantine/core";
13
+ import { IconInfoCircle, IconSearch } from "@tabler/icons-react";
14
+ import { usePubSub } from "create-pubsub/react";
15
+ import { useCallback, useEffect, useMemo, useState } from "react";
16
+ import { logEntriesPubSub } from "../../modules/logEntries";
17
+
18
+ export default function LogsModal({
19
+ opened,
20
+ onClose,
21
+ }: {
22
+ opened: boolean;
23
+ onClose: () => void;
24
+ }) {
25
+ const [logEntries] = usePubSub(logEntriesPubSub);
26
+
27
+ const [page, setPage] = useState(1);
28
+ const [filterText, setFilterText] = useState("");
29
+
30
+ const logEntriesPerPage = 5;
31
+
32
+ const filteredLogEntries = useMemo(() => {
33
+ if (!filterText) return logEntries;
34
+ const lowerCaseFilter = filterText.toLowerCase();
35
+ return logEntries.filter((entry) =>
36
+ entry.message.toLowerCase().includes(lowerCaseFilter),
37
+ );
38
+ }, [logEntries, filterText]);
39
+
40
+ const logEntriesFromCurrentPage = useMemo(
41
+ () =>
42
+ filteredLogEntries.slice(
43
+ (page - 1) * logEntriesPerPage,
44
+ page * logEntriesPerPage,
45
+ ),
46
+ [filteredLogEntries, page],
47
+ );
48
+
49
+ useEffect(() => {
50
+ void filterText;
51
+ setPage(1);
52
+ }, [filterText]);
53
+
54
+ const downloadLogsAsJson = useCallback(() => {
55
+ const jsonString = JSON.stringify(logEntries, null, 2);
56
+ const blob = new Blob([jsonString], { type: "application/json" });
57
+ const url = URL.createObjectURL(blob);
58
+ const link = document.createElement("a");
59
+ link.href = url;
60
+ link.download = "logs.json";
61
+ document.body.appendChild(link);
62
+ link.click();
63
+ document.body.removeChild(link);
64
+ URL.revokeObjectURL(url);
65
+ }, [logEntries]);
66
+
67
+ return (
68
+ <Modal opened={opened} onClose={onClose} size="xl" title="Logs">
69
+ <Alert variant="light" color="blue" icon={<IconInfoCircle />} mb="md">
70
+ <Group justify="space-between" align="center">
71
+ <span>
72
+ This information is stored solely in your browser for personal use.
73
+ It isn't sent automatically and is retained for debugging purposes
74
+ should you need to{" "}
75
+ <a
76
+ href="https://github.com/felladrin/MiniSearch/issues/new?labels=bug&template=bug_report.yml"
77
+ target="_blank"
78
+ rel="noopener noreferrer"
79
+ >
80
+ report a bug
81
+ </a>
82
+ .
83
+ </span>
84
+ <Button onClick={downloadLogsAsJson} size="xs" data-autofocus>
85
+ Download Logs
86
+ </Button>
87
+ </Group>
88
+ </Alert>
89
+ <TextInput
90
+ placeholder="Filter logs..."
91
+ mb="md"
92
+ leftSection={<IconSearch size={16} />}
93
+ value={filterText}
94
+ onChange={(event) => setFilterText(event.currentTarget.value)}
95
+ rightSection={
96
+ filterText ? (
97
+ <Tooltip label="Clear filter" withArrow>
98
+ <CloseButton
99
+ size="sm"
100
+ onClick={() => setFilterText("")}
101
+ aria-label="Clear filter"
102
+ />
103
+ </Tooltip>
104
+ ) : null
105
+ }
106
+ />
107
+ <Table striped highlightOnHover withTableBorder>
108
+ <Table.Thead>
109
+ <Table.Tr>
110
+ <Table.Th style={{ width: 80 }}>Time</Table.Th>
111
+ <Table.Th>Message</Table.Th>
112
+ </Table.Tr>
113
+ </Table.Thead>
114
+ <Table.Tbody>
115
+ {logEntriesFromCurrentPage.map((entry, index) => (
116
+ <Table.Tr key={`${entry.timestamp}-${index}`}>
117
+ <Table.Td>
118
+ {new Date(entry.timestamp).toLocaleTimeString()}
119
+ </Table.Td>
120
+ <Table.Td>{entry.message}</Table.Td>
121
+ </Table.Tr>
122
+ ))}
123
+ </Table.Tbody>
124
+ </Table>
125
+ <Center>
126
+ <Pagination
127
+ total={Math.ceil(filteredLogEntries.length / logEntriesPerPage)}
128
+ value={page}
129
+ onChange={setPage}
130
+ size="sm"
131
+ mt="md"
132
+ />
133
+ </Center>
134
+ </Modal>
135
+ );
136
+ }
client/components/Logs/ShowLogsButton.tsx ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Button, Center, Loader, Stack, Text } from "@mantine/core";
2
+ import { lazy, Suspense, useState } from "react";
3
+ import { addLogEntry } from "../../modules/logEntries";
4
+
5
+ const LogsModal = lazy(() => import("./LogsModal"));
6
+
7
+ export default function ShowLogsButton() {
8
+ const [isLogsModalOpen, setLogsModalOpen] = useState(false);
9
+
10
+ const handleShowLogsButtonClick = () => {
11
+ addLogEntry("User opened the logs modal");
12
+ setLogsModalOpen(true);
13
+ };
14
+
15
+ const handleCloseLogsButtonClick = () => {
16
+ addLogEntry("User closed the logs modal");
17
+ setLogsModalOpen(false);
18
+ };
19
+
20
+ return (
21
+ <Stack gap="xs">
22
+ <Suspense
23
+ fallback={
24
+ <Center>
25
+ <Loader color="gray" type="bars" />
26
+ </Center>
27
+ }
28
+ >
29
+ <Button size="sm" onClick={handleShowLogsButtonClick} variant="default">
30
+ Show logs
31
+ </Button>
32
+ <Text size="xs" c="dimmed">
33
+ View session logs for debugging.
34
+ </Text>
35
+ <LogsModal
36
+ opened={isLogsModalOpen}
37
+ onClose={handleCloseLogsButtonClick}
38
+ />
39
+ </Suspense>
40
+ </Stack>
41
+ );
42
+ }
client/components/Pages/AccessPage.tsx ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Button, Container, Stack, TextInput, Title } from "@mantine/core";
2
+ import { type FormEvent, useState } from "react";
3
+ import { validateAccessKey } from "../../modules/accessKey";
4
+ import { addLogEntry } from "../../modules/logEntries";
5
+
6
+ interface AccessPageState {
7
+ accessKey: string;
8
+ error: string;
9
+ }
10
+
11
+ export default function AccessPage({
12
+ onAccessKeyValid,
13
+ }: {
14
+ onAccessKeyValid: () => void;
15
+ }) {
16
+ const [state, setState] = useState<AccessPageState>({
17
+ accessKey: "",
18
+ error: "",
19
+ });
20
+
21
+ const handleSubmit = async (formEvent: FormEvent<HTMLFormElement>) => {
22
+ formEvent.preventDefault();
23
+ setState((prev) => ({ ...prev, error: "" }));
24
+ try {
25
+ const isValid = await validateAccessKey(state.accessKey);
26
+ if (isValid) {
27
+ addLogEntry("Valid access key entered");
28
+ onAccessKeyValid();
29
+ } else {
30
+ setState((prev) => ({ ...prev, error: "Invalid access key" }));
31
+ addLogEntry("Invalid access key attempt");
32
+ }
33
+ } catch (error) {
34
+ setState((prev) => ({ ...prev, error: "Error validating access key" }));
35
+ addLogEntry(`Error validating access key: ${error}`);
36
+ }
37
+ };
38
+
39
+ return (
40
+ <Container size="xs">
41
+ <Stack p="lg" mih="100vh" justify="center">
42
+ <Title order={2} ta="center">
43
+ Access Restricted
44
+ </Title>
45
+ <form onSubmit={handleSubmit}>
46
+ <Stack gap="xs">
47
+ <TextInput
48
+ value={state.accessKey}
49
+ onChange={({ target }) =>
50
+ setState((prev) => ({ ...prev, accessKey: target.value }))
51
+ }
52
+ placeholder="Enter your access key to continue"
53
+ required
54
+ autoFocus
55
+ error={state.error}
56
+ styles={{
57
+ input: {
58
+ textAlign: "center",
59
+ },
60
+ }}
61
+ />
62
+ <Button size="xs" type="submit">
63
+ Submit
64
+ </Button>
65
+ </Stack>
66
+ </form>
67
+ </Stack>
68
+ </Container>
69
+ );
70
+ }
client/components/Pages/Main/MainPage.tsx ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Container, Stack } from "@mantine/core";
2
+ import { usePubSub } from "create-pubsub/react";
3
+ import { lazy, Suspense } from "react";
4
+ import {
5
+ imageSearchStatePubSub,
6
+ queryPubSub,
7
+ settingsPubSub,
8
+ textGenerationStatePubSub,
9
+ textSearchStatePubSub,
10
+ } from "../../../modules/pubSub";
11
+ import { searchAndRespond } from "../../../modules/textGeneration";
12
+ import SearchForm from "../../Search/Form/SearchForm";
13
+ import MenuButton from "./Menu/MenuButton";
14
+
15
+ const AiResponseSection = lazy(
16
+ () => import("../../AiResponse/AiResponseSection"),
17
+ );
18
+ const SearchResultsSection = lazy(
19
+ () => import("../../Search/Results/SearchResultsSection"),
20
+ );
21
+ const EnableAiResponsePrompt = lazy(
22
+ () => import("../../AiResponse/EnableAiResponsePrompt"),
23
+ );
24
+
25
+ export default function MainPage() {
26
+ const [query, updateQuery] = usePubSub(queryPubSub);
27
+ const [textSearchState] = usePubSub(textSearchStatePubSub);
28
+ const [imageSearchState] = usePubSub(imageSearchStatePubSub);
29
+ const [textGenerationState] = usePubSub(textGenerationStatePubSub);
30
+ const [settings, setSettings] = usePubSub(settingsPubSub);
31
+
32
+ const isQueryEmpty = query.length === 0;
33
+
34
+ return (
35
+ <Container>
36
+ <Stack py="md" mih="100vh" justify={isQueryEmpty ? "center" : undefined}>
37
+ <SearchForm
38
+ query={query}
39
+ updateQuery={updateQuery}
40
+ additionalButtons={<MenuButton />}
41
+ />
42
+ {!isQueryEmpty && (
43
+ <>
44
+ {settings.showEnableAiResponsePrompt && (
45
+ <Suspense>
46
+ <EnableAiResponsePrompt
47
+ onAccept={() => {
48
+ setSettings({
49
+ ...settings,
50
+ showEnableAiResponsePrompt: false,
51
+ enableAiResponse: true,
52
+ });
53
+ searchAndRespond();
54
+ }}
55
+ onDecline={() => {
56
+ setSettings({
57
+ ...settings,
58
+ showEnableAiResponsePrompt: false,
59
+ enableAiResponse: false,
60
+ });
61
+ }}
62
+ />
63
+ </Suspense>
64
+ )}
65
+ {!settings.showEnableAiResponsePrompt &&
66
+ textGenerationState !== "idle" && (
67
+ <Suspense>
68
+ <AiResponseSection />
69
+ </Suspense>
70
+ )}
71
+ {(textSearchState !== "idle" || imageSearchState !== "idle") && (
72
+ <Suspense>
73
+ <SearchResultsSection />
74
+ </Suspense>
75
+ )}
76
+ </>
77
+ )}
78
+ </Stack>
79
+ </Container>
80
+ );
81
+ }
client/components/Pages/Main/Menu/AISettings/AISettingsForm.tsx ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Select, Slider, Stack, Switch, Text, TextInput } from "@mantine/core";
2
+ import { useForm } from "@mantine/form";
3
+ import { usePubSub } from "create-pubsub/react";
4
+ import { useMemo } from "react";
5
+ import { settingsPubSub } from "../../../../../modules/pubSub";
6
+ import {
7
+ defaultSettings,
8
+ inferenceTypes,
9
+ } from "../../../../../modules/settings";
10
+ import { isWebGPUAvailable } from "../../../../../modules/webGpu";
11
+ import { AIParameterSlider } from "./components/AIParameterSlider";
12
+ import { BrowserSettings } from "./components/BrowserSettings";
13
+ import { HordeSettings } from "./components/HordeSettings";
14
+ import { OpenAISettings } from "./components/OpenAISettings";
15
+ import { SystemPromptInput } from "./components/SystemPromptInput";
16
+ import { useHordeModels } from "./hooks/useHordeModels";
17
+ import { useHordeUserInfo } from "./hooks/useHordeUserInfo";
18
+ import { useOpenAiModels } from "./hooks/useOpenAiModels";
19
+
20
+ export default function AISettingsForm() {
21
+ const [settings, setSettings] = usePubSub(settingsPubSub);
22
+ const { openAiModels, useTextInput } = useOpenAiModels(settings);
23
+ const hordeModels = useHordeModels(settings);
24
+ const hordeUserInfo = useHordeUserInfo(settings);
25
+
26
+ const form = useForm({
27
+ initialValues: settings,
28
+ onValuesChange: setSettings,
29
+ });
30
+
31
+ const inferenceTypeSupportsMinP =
32
+ (form.values.inferenceType === "browser" &&
33
+ (!isWebGPUAvailable || !form.values.enableWebGpu)) ||
34
+ form.values.inferenceType === "horde";
35
+
36
+ const penaltySliderMarks = useMemo(
37
+ () => [
38
+ { value: -2.0, label: "-2.0" },
39
+ { value: 0.0, label: "0" },
40
+ { value: 2.0, label: "2.0" },
41
+ ],
42
+ [],
43
+ );
44
+
45
+ const probabilitySliderMarks = useMemo(
46
+ () =>
47
+ Array.from({ length: 3 }, (_, index) => ({
48
+ value: index / 2,
49
+ label: (index / 2).toString(),
50
+ })),
51
+ [],
52
+ );
53
+
54
+ const searchResultsToConsiderSliderMarks = useMemo(
55
+ () =>
56
+ Array.from({ length: 7 }, (_, index) => ({
57
+ value: index,
58
+ label: index.toString(),
59
+ })),
60
+ [],
61
+ );
62
+
63
+ const temperatureSliderMarks = useMemo(
64
+ () => [
65
+ { value: 0, label: "0" },
66
+ { value: 1, label: "1" },
67
+ { value: 2, label: "2" },
68
+ ],
69
+ [],
70
+ );
71
+
72
+ return (
73
+ <Stack gap="md">
74
+ <Switch
75
+ label="AI Response"
76
+ {...form.getInputProps("enableAiResponse", { type: "checkbox" })}
77
+ labelPosition="left"
78
+ description="Enable or disable AI-generated responses to your queries. When disabled, you'll only see web search results."
79
+ />
80
+
81
+ {form.values.enableAiResponse && (
82
+ <>
83
+ <Stack gap="xs" mb="md">
84
+ <Text size="sm">Search results to consider</Text>
85
+ <Text size="xs" c="dimmed">
86
+ Determines the number of search results to consider when
87
+ generating AI responses. A higher value may enhance accuracy, but
88
+ it will also increase response time.
89
+ </Text>
90
+ <Slider
91
+ {...form.getInputProps("searchResultsToConsider")}
92
+ min={0}
93
+ max={6}
94
+ marks={searchResultsToConsiderSliderMarks}
95
+ />
96
+ </Stack>
97
+
98
+ <Select
99
+ {...form.getInputProps("inferenceType")}
100
+ label="AI Processing Location"
101
+ data={inferenceTypes}
102
+ allowDeselect={false}
103
+ />
104
+
105
+ {form.values.inferenceType === "openai" && (
106
+ <OpenAISettings
107
+ form={form}
108
+ openAiModels={openAiModels}
109
+ useTextInput={useTextInput}
110
+ />
111
+ )}
112
+
113
+ {form.values.inferenceType === "horde" && (
114
+ <HordeSettings
115
+ form={form}
116
+ hordeUserInfo={hordeUserInfo}
117
+ hordeModels={hordeModels}
118
+ />
119
+ )}
120
+
121
+ {form.values.inferenceType === "browser" && (
122
+ <BrowserSettings
123
+ form={form}
124
+ isWebGPUAvailable={isWebGPUAvailable}
125
+ />
126
+ )}
127
+
128
+ <SystemPromptInput form={form} />
129
+
130
+ <AIParameterSlider
131
+ label="Temperature"
132
+ description="Controls randomness in responses. Lower values make responses more focused and deterministic, while higher values make them more creative and diverse."
133
+ defaultValue={defaultSettings.inferenceTemperature}
134
+ {...form.getInputProps("inferenceTemperature")}
135
+ min={0}
136
+ max={2}
137
+ step={0.01}
138
+ marks={temperatureSliderMarks}
139
+ />
140
+
141
+ <AIParameterSlider
142
+ label="Top P"
143
+ description="Controls diversity by limiting cumulative probability of tokens. Lower values make responses more focused, while higher values allow more variety."
144
+ defaultValue={defaultSettings.inferenceTopP}
145
+ {...form.getInputProps("inferenceTopP")}
146
+ min={0}
147
+ max={1}
148
+ step={0.01}
149
+ marks={probabilitySliderMarks}
150
+ />
151
+
152
+ {inferenceTypeSupportsMinP && (
153
+ <AIParameterSlider
154
+ label="Min P"
155
+ description="Sets a minimum probability for token selection. Helps to filter out very unlikely tokens, making responses more coherent."
156
+ defaultValue={defaultSettings.minP}
157
+ {...form.getInputProps("minP")}
158
+ min={0}
159
+ max={1}
160
+ step={0.01}
161
+ marks={probabilitySliderMarks}
162
+ />
163
+ )}
164
+
165
+ <AIParameterSlider
166
+ label="Frequency Penalty"
167
+ description="Reduces repetition by penalizing tokens based on their frequency. Higher values decrease the likelihood of repeating the same information."
168
+ defaultValue={defaultSettings.inferenceFrequencyPenalty}
169
+ {...form.getInputProps("inferenceFrequencyPenalty")}
170
+ min={-2.0}
171
+ max={2.0}
172
+ step={0.01}
173
+ marks={penaltySliderMarks}
174
+ />
175
+
176
+ <AIParameterSlider
177
+ label="Presence Penalty"
178
+ description="Encourages new topics by penalizing tokens that have appeared. Higher values increase the model's likelihood to talk about new topics."
179
+ defaultValue={defaultSettings.inferencePresencePenalty}
180
+ {...form.getInputProps("inferencePresencePenalty")}
181
+ min={-2.0}
182
+ max={2.0}
183
+ step={0.01}
184
+ marks={penaltySliderMarks}
185
+ />
186
+
187
+ <Stack gap="xs" mb="md">
188
+ <Text size="sm">Reasoning Section Parsing</Text>
189
+ <Text size="xs" c="dimmed">
190
+ Configure how the AI's reasoning section is parsed in the
191
+ response.
192
+ </Text>
193
+ <Stack gap="xs">
194
+ <TextInput
195
+ {...form.getInputProps("reasoningStartMarker")}
196
+ description="Start Marker, indicating the start of a reasoning section."
197
+ />
198
+ <TextInput
199
+ {...form.getInputProps("reasoningEndMarker")}
200
+ description="End Marker, indicating the end of a reasoning section."
201
+ />
202
+ </Stack>
203
+ </Stack>
204
+ </>
205
+ )}
206
+ </Stack>
207
+ );
208
+ }
client/components/Pages/Main/Menu/AISettings/components/AIParameterSlider.tsx ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Slider, Stack, Text } from "@mantine/core";
2
+ import type { AIParameterSliderProps } from "../types";
3
+
4
+ export const AIParameterSlider = ({
5
+ label,
6
+ description,
7
+ defaultValue,
8
+ ...props
9
+ }: AIParameterSliderProps) => (
10
+ <Stack gap="xs" mb="md">
11
+ <Text size="sm">{label}</Text>
12
+ <Text size="xs" c="dimmed">
13
+ {description} Defaults to {defaultValue}.
14
+ </Text>
15
+ <Slider {...props} />
16
+ </Stack>
17
+ );
client/components/Pages/Main/Menu/AISettings/components/BrowserSettings.tsx ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NumberInput, Skeleton, Switch } from "@mantine/core";
2
+ import type { UseFormReturnType } from "@mantine/form";
3
+ import { lazy, Suspense } from "react";
4
+ import type { defaultSettings } from "../../../../../../modules/settings";
5
+
6
+ const WebLlmModelSelect = lazy(
7
+ () => import("../../../../../../components/AiResponse/WebLlmModelSelect"),
8
+ );
9
+ const WllamaModelSelect = lazy(
10
+ () => import("../../../../../../components/AiResponse/WllamaModelSelect"),
11
+ );
12
+
13
+ interface BrowserSettingsProps {
14
+ form: UseFormReturnType<typeof defaultSettings>;
15
+ isWebGPUAvailable: boolean;
16
+ }
17
+
18
+ export const BrowserSettings = ({
19
+ form,
20
+ isWebGPUAvailable,
21
+ }: BrowserSettingsProps) => (
22
+ <>
23
+ {isWebGPUAvailable && (
24
+ <Switch
25
+ label="WebGPU"
26
+ {...form.getInputProps("enableWebGpu", { type: "checkbox" })}
27
+ labelPosition="left"
28
+ description="Enable or disable WebGPU usage. When disabled, the app will use the CPU instead."
29
+ />
30
+ )}
31
+
32
+ {isWebGPUAvailable && form.values.enableWebGpu ? (
33
+ <Suspense fallback={<Skeleton height={50} />}>
34
+ <WebLlmModelSelect
35
+ value={form.values.webLlmModelId}
36
+ onChange={(value: string) =>
37
+ form.setFieldValue("webLlmModelId", value)
38
+ }
39
+ />
40
+ </Suspense>
41
+ ) : (
42
+ <>
43
+ <Suspense fallback={<Skeleton height={50} />}>
44
+ <WllamaModelSelect
45
+ value={form.values.wllamaModelId}
46
+ onChange={(value: string) =>
47
+ form.setFieldValue("wllamaModelId", value)
48
+ }
49
+ />
50
+ </Suspense>
51
+ <NumberInput
52
+ label="CPU threads to use"
53
+ description="Number of threads to use for the AI model. Lower values will use less CPU but may take longer to respond. A value that is too high may cause the app to hang."
54
+ min={1}
55
+ {...form.getInputProps("cpuThreads")}
56
+ />
57
+ </>
58
+ )}
59
+ </>
60
+ );
client/components/Pages/Main/Menu/AISettings/components/HordeSettings.tsx ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Select, TextInput } from "@mantine/core";
2
+ import type { UseFormReturnType } from "@mantine/form";
3
+ import type { defaultSettings } from "../../../../../../modules/settings";
4
+ import { aiHordeDefaultApiKey } from "../../../../../../modules/textGenerationWithHorde";
5
+ import type { HordeUserInfo, ModelOption } from "../types";
6
+
7
+ interface HordeSettingsProps {
8
+ form: UseFormReturnType<typeof defaultSettings>;
9
+ hordeUserInfo: HordeUserInfo | null;
10
+ hordeModels: ModelOption[];
11
+ }
12
+
13
+ export const HordeSettings = ({
14
+ form,
15
+ hordeUserInfo,
16
+ hordeModels,
17
+ }: HordeSettingsProps) => (
18
+ <>
19
+ <TextInput
20
+ label="API Key"
21
+ description={
22
+ hordeUserInfo
23
+ ? `Logged in as ${
24
+ hordeUserInfo.username
25
+ } (${hordeUserInfo.kudos.toLocaleString()} kudos)`
26
+ : "By default, it's set to '0000000000', for anonymous access. However, anonymous accounts have the lowest priority when there's too many concurrent requests."
27
+ }
28
+ type="password"
29
+ {...form.getInputProps("hordeApiKey")}
30
+ />
31
+ {form.values.hordeApiKey.length > 0 &&
32
+ form.values.hordeApiKey !== aiHordeDefaultApiKey && (
33
+ <Select
34
+ label="Model"
35
+ description="Optional. When not selected, AI Horde will automatically choose an available model."
36
+ placeholder="Auto-selected"
37
+ data={hordeModels}
38
+ {...form.getInputProps("hordeModel")}
39
+ searchable
40
+ clearable
41
+ />
42
+ )}
43
+ </>
44
+ );
client/components/Pages/Main/Menu/AISettings/components/OpenAISettings.tsx ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Group, Select, Text, TextInput } from "@mantine/core";
2
+ import type { UseFormReturnType } from "@mantine/form";
3
+ import { IconInfoCircle } from "@tabler/icons-react";
4
+ import type { defaultSettings } from "../../../../../../modules/settings";
5
+ import type { ModelOption } from "../types";
6
+
7
+ interface OpenAISettingsProps {
8
+ form: UseFormReturnType<typeof defaultSettings>;
9
+ openAiModels: ModelOption[];
10
+ useTextInput: boolean;
11
+ }
12
+
13
+ export const OpenAISettings = ({
14
+ form,
15
+ openAiModels,
16
+ useTextInput,
17
+ }: OpenAISettingsProps) => (
18
+ <>
19
+ <TextInput
20
+ {...form.getInputProps("openAiApiBaseUrl")}
21
+ label="API Base URL"
22
+ placeholder="http://localhost:11434/v1"
23
+ required
24
+ />
25
+ <Group gap="xs">
26
+ <IconInfoCircle size={16} />
27
+ <Text size="xs" c="dimmed" flex={1}>
28
+ You may need to add{" "}
29
+ <em>{`${self.location.protocol}//${self.location.hostname}`}</em> to the
30
+ list of allowed network origins in your API server settings.
31
+ </Text>
32
+ </Group>
33
+ <TextInput
34
+ {...form.getInputProps("openAiApiKey")}
35
+ label="API Key"
36
+ type="password"
37
+ description="Optional, as local API servers usually do not require it."
38
+ />
39
+ {useTextInput ? (
40
+ <TextInput
41
+ {...form.getInputProps("openAiApiModel")}
42
+ label="API Model"
43
+ description="Enter the model identifier"
44
+ />
45
+ ) : (
46
+ <Select
47
+ {...form.getInputProps("openAiApiModel")}
48
+ label="API Model"
49
+ data={openAiModels}
50
+ description="Optional, as some API servers don't provide a model list."
51
+ allowDeselect={false}
52
+ disabled={openAiModels.length === 0}
53
+ searchable
54
+ />
55
+ )}
56
+ </>
57
+ );
client/components/Pages/Main/Menu/AISettings/components/SystemPromptInput.tsx ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Text, Textarea } from "@mantine/core";
2
+ import type { UseFormReturnType } from "@mantine/form";
3
+ import { defaultSettings } from "../../../../../../modules/settings";
4
+
5
+ interface SystemPromptInputProps {
6
+ form: UseFormReturnType<typeof defaultSettings>;
7
+ }
8
+
9
+ export const SystemPromptInput = ({ form }: SystemPromptInputProps) => {
10
+ const isUsingCustomInstructions =
11
+ form.values.systemPrompt !== defaultSettings.systemPrompt;
12
+
13
+ const handleRestoreDefaultInstructions = () => {
14
+ form.setFieldValue("systemPrompt", defaultSettings.systemPrompt);
15
+ };
16
+
17
+ return (
18
+ <Textarea
19
+ size="sm"
20
+ label="Instructions for AI"
21
+ descriptionProps={{
22
+ // @ts-expect-error Mantine v7: `InputDescriptionProps` does not support `component`.
23
+ component: "div",
24
+ }}
25
+ description={
26
+ <>
27
+ <Text size="xs" component="span">
28
+ Customize instructions for the AI to tailor its responses.
29
+ </Text>
30
+ <ul>
31
+ <li>
32
+ Specify preferences
33
+ <ul>
34
+ <li>
35
+ <em>"use simple language"</em>
36
+ </li>
37
+ <li>
38
+ <em>"provide step-by-step explanations"</em>
39
+ </li>
40
+ </ul>
41
+ </li>
42
+ <li>
43
+ Set a response style
44
+ <ul>
45
+ <li>
46
+ <em>"answer in a friendly tone"</em>
47
+ </li>
48
+ <li>
49
+ <em>"write your response in Spanish"</em>
50
+ </li>
51
+ </ul>
52
+ </li>
53
+ <li>
54
+ Provide context about the audience
55
+ <ul>
56
+ <li>
57
+ <em>"you're talking to a high school student"</em>
58
+ </li>
59
+ <li>
60
+ <em>
61
+ "consider that your audience is composed of professionals in
62
+ the field of graphic design"
63
+ </em>
64
+ </li>
65
+ </ul>
66
+ </li>
67
+ </ul>
68
+
69
+ <Text size="xs" component="span">
70
+ The special tag <em>{"{{searchResults}}"}</em> will be replaced with
71
+ the search results, while <em>{"{{dateTime}}"}</em> will be replaced
72
+ with the current date and time.
73
+ </Text>
74
+
75
+ {isUsingCustomInstructions && (
76
+ <>
77
+ <br />
78
+ <br />
79
+ <Text size="xs" component="span">
80
+ Currently, you're using custom instructions. If you ever need to
81
+ restore the default instructions, you can do so by clicking{" "}
82
+ <Text
83
+ component="span"
84
+ size="xs"
85
+ c="blue"
86
+ style={{ cursor: "pointer" }}
87
+ onClick={handleRestoreDefaultInstructions}
88
+ >
89
+ here
90
+ </Text>
91
+ .
92
+ </Text>
93
+ </>
94
+ )}
95
+ </>
96
+ }
97
+ autosize
98
+ maxRows={10}
99
+ {...form.getInputProps("systemPrompt")}
100
+ />
101
+ );
102
+ };
client/components/Pages/Main/Menu/AISettings/hooks/useHordeModels.ts ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useEffect, useState } from "react";
2
+ import { addLogEntry } from "../../../../../../modules/logEntries";
3
+ import type { defaultSettings } from "../../../../../../modules/settings";
4
+ import { fetchHordeModels } from "../../../../../../modules/textGenerationWithHorde";
5
+ import type { ModelOption } from "../types";
6
+
7
+ type Settings = typeof defaultSettings;
8
+
9
+ export const useHordeModels = (settings: Settings) => {
10
+ const [hordeModels, setHordeModels] = useState<ModelOption[]>([]);
11
+
12
+ useEffect(() => {
13
+ async function fetchAvailableHordeModels() {
14
+ try {
15
+ const models = await fetchHordeModels();
16
+ const formattedModels = models.map((model) => ({
17
+ label: model.name,
18
+ value: model.name,
19
+ }));
20
+ setHordeModels(formattedModels);
21
+ } catch (error) {
22
+ const errorMessage =
23
+ error instanceof Error ? error.message : String(error);
24
+ addLogEntry(`Error fetching AI Horde models: ${errorMessage}`);
25
+ setHordeModels([]);
26
+ }
27
+ }
28
+
29
+ if (settings.inferenceType === "horde") {
30
+ fetchAvailableHordeModels();
31
+ }
32
+ }, [settings.inferenceType]);
33
+
34
+ return hordeModels;
35
+ };
client/components/Pages/Main/Menu/AISettings/hooks/useHordeUserInfo.ts ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useEffect, useState } from "react";
2
+ import { addLogEntry } from "../../../../../../modules/logEntries";
3
+ import type { defaultSettings } from "../../../../../../modules/settings";
4
+ import {
5
+ aiHordeDefaultApiKey,
6
+ fetchHordeUserInfo,
7
+ } from "../../../../../../modules/textGenerationWithHorde";
8
+ import type { HordeUserInfo } from "../types";
9
+
10
+ type Settings = typeof defaultSettings;
11
+
12
+ export const useHordeUserInfo = (settings: Settings) => {
13
+ const [hordeUserInfo, setHordeUserInfo] = useState<HordeUserInfo | null>(
14
+ null,
15
+ );
16
+
17
+ useEffect(() => {
18
+ async function fetchUserInfo() {
19
+ try {
20
+ if (
21
+ settings.hordeApiKey &&
22
+ settings.hordeApiKey !== aiHordeDefaultApiKey
23
+ ) {
24
+ const userInfo = await fetchHordeUserInfo(settings.hordeApiKey);
25
+ setHordeUserInfo(userInfo);
26
+ } else {
27
+ setHordeUserInfo(null);
28
+ }
29
+ } catch (error) {
30
+ const errorMessage =
31
+ error instanceof Error ? error.message : String(error);
32
+ addLogEntry(`Error fetching AI Horde user info: ${errorMessage}`);
33
+ setHordeUserInfo(null);
34
+ }
35
+ }
36
+
37
+ if (settings.inferenceType === "horde") {
38
+ fetchUserInfo();
39
+ }
40
+ }, [settings.inferenceType, settings.hordeApiKey]);
41
+
42
+ return hordeUserInfo;
43
+ };
client/components/Pages/Main/Menu/AISettings/hooks/useOpenAiModels.ts ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useEffect, useState } from "react";
2
+ import { addLogEntry } from "../../../../../../modules/logEntries";
3
+ import type { defaultSettings } from "../../../../../../modules/settings";
4
+ import type { ModelOption } from "../types";
5
+
6
+ type Settings = typeof defaultSettings;
7
+
8
+ export const useOpenAiModels = (settings: Settings) => {
9
+ const [openAiModels, setOpenAiModels] = useState<ModelOption[]>([]);
10
+ const [useTextInput, setUseTextInput] = useState(false);
11
+
12
+ useEffect(() => {
13
+ async function fetchOpenAiModels() {
14
+ try {
15
+ const response = await fetch(`${settings.openAiApiBaseUrl}/models`, {
16
+ headers: {
17
+ Authorization: `Bearer ${settings.openAiApiKey}`,
18
+ },
19
+ });
20
+
21
+ if (!response.ok) {
22
+ throw new Error(`Failed to fetch models: ${response.statusText}`);
23
+ }
24
+
25
+ const data = await response.json();
26
+ const modelIds: string[] = data.data.map(
27
+ (model: { id: string }) => model.id,
28
+ );
29
+ const uniqueModelIds = [...new Set(modelIds)];
30
+ const models = uniqueModelIds.map((id) => ({
31
+ label: id,
32
+ value: id,
33
+ }));
34
+
35
+ setOpenAiModels(models);
36
+ setUseTextInput(!Array.isArray(models) || models.length === 0);
37
+ } catch (error) {
38
+ const errorMessage =
39
+ error instanceof Error ? error.message : String(error);
40
+ addLogEntry(`Error fetching OpenAI models: ${errorMessage}`);
41
+ setOpenAiModels([]);
42
+ setUseTextInput(true);
43
+ }
44
+ }
45
+
46
+ if (settings.inferenceType === "openai" && settings.openAiApiBaseUrl) {
47
+ fetchOpenAiModels();
48
+ }
49
+ }, [
50
+ settings.inferenceType,
51
+ settings.openAiApiBaseUrl,
52
+ settings.openAiApiKey,
53
+ ]);
54
+
55
+ return { openAiModels, useTextInput };
56
+ };
client/components/Pages/Main/Menu/AISettings/types.ts ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export interface ModelOption {
2
+ label: string;
3
+ value: string;
4
+ }
5
+
6
+ export interface HordeUserInfo {
7
+ username: string;
8
+ kudos: number;
9
+ }
10
+
11
+ export interface AIParameterSliderProps extends Record<string, unknown> {
12
+ label: string;
13
+ description: string;
14
+ defaultValue: number;
15
+ }
client/components/Pages/Main/Menu/ActionsForm.tsx ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Stack } from "@mantine/core";
2
+ import ShowLogsButton from "../../../Logs/ShowLogsButton";
3
+ import ClearDataButton from "./ClearDataButton";
4
+
5
+ export default function ActionsForm() {
6
+ return (
7
+ <Stack gap="lg">
8
+ <ClearDataButton />
9
+ <ShowLogsButton />
10
+ </Stack>
11
+ );
12
+ }