diff --git a/.env.example b/.env.example index 46a21e892905a0bcb389c513daf84cc6ec6c331c..7306f36172fada99fa6399326f1ace8d902ca404 100644 --- a/.env.example +++ b/.env.example @@ -5,6 +5,12 @@ # You only need this environment variable set if you want to use Groq models GROQ_API_KEY= +# Get your HuggingFace API Key here - +# https://huggingface.co/settings/tokens +# You only need this environment variable set if you want to use HuggingFace models +HuggingFace_API_KEY= + + # Get your Open AI API Key by following these instructions - # https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key # You only need this environment variable set if you want to use GPT models @@ -32,17 +38,27 @@ OLLAMA_API_BASE_URL= # You only need this environment variable set if you want to use OpenAI Like models OPENAI_LIKE_API_BASE_URL= +# You only need this environment variable set if you want to use Together AI models +TOGETHER_API_BASE_URL= + # You only need this environment variable set if you want to use DeepSeek models through their API DEEPSEEK_API_KEY= # Get your OpenAI Like API Key OPENAI_LIKE_API_KEY= +# Get your Together API Key +TOGETHER_API_KEY= + # Get your Mistral API Key by following these instructions - # https://console.mistral.ai/api-keys/ # You only need this environment variable set if you want to use Mistral models MISTRAL_API_KEY= +# Get the Cohere Api key by following these instructions - +# https://dashboard.cohere.com/api-keys +# You only need this environment variable set if you want to use Cohere models +COHERE_API_KEY= # Get LMStudio Base URL from LM Studio Developer Console # Make sure to enable CORS @@ -56,3 +72,11 @@ XAI_API_KEY= # Include this environment variable if you want more logging for debugging locally VITE_LOG_LEVEL=debug + +# Example Context Values for qwen2.5-coder:32b +# +# DEFAULT_NUM_CTX=32768 # Consumes 36GB of VRAM +# DEFAULT_NUM_CTX=24576 # Consumes 32GB of VRAM +# DEFAULT_NUM_CTX=12288 # Consumes 26GB of VRAM +# DEFAULT_NUM_CTX=6144 # Consumes 24GB of VRAM +DEFAULT_NUM_CTX= diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index a594bc8724f9e6decde8da702d6e4cbb6a0850cf..37ebae5a853a5cc63510ac595da00ccd52786275 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -56,6 +56,16 @@ body: - OS: [e.g. macOS, Windows, Linux] - Browser: [e.g. Chrome, Safari, Firefox] - Version: [e.g. 91.1] + - type: input + id: provider + attributes: + label: Provider Used + description: Tell us the provider you are using. + - type: input + id: model + attributes: + label: Model Used + description: Tell us the model you are using. - type: textarea id: additional attributes: diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ceff508478171711302960255bdb54d1bdce4a6d --- /dev/null +++ b/.github/workflows/docs.yaml @@ -0,0 +1,33 @@ +name: Docs CI/CD + +on: + push: + branches: + - main +permissions: + contents: write +jobs: + build_docs: + runs-on: ubuntu-latest + defaults: + run: + working-directory: ./docs + steps: + - uses: actions/checkout@v4 + - name: Configure Git Credentials + run: | + git config user.name github-actions[bot] + git config user.email 41898282+github-actions[bot]@users.noreply.github.com + - uses: actions/setup-python@v5 + with: + python-version: 3.x + - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV + - uses: actions/cache@v4 + with: + key: mkdocs-material-${{ env.cache_id }} + path: .cache + restore-keys: | + mkdocs-material- + + - run: pip install mkdocs-material + - run: mkdocs gh-deploy --force \ No newline at end of file diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 0000000000000000000000000000000000000000..c9eb890eb1af4fc7ed52e1297be4ff31edc5c560 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,25 @@ +name: Mark Stale Issues and Pull Requests + +on: + schedule: + - cron: '0 2 * * *' # Runs daily at 2:00 AM UTC + workflow_dispatch: # Allows manual triggering of the workflow + +jobs: + stale: + runs-on: ubuntu-latest + + steps: + - name: Mark stale issues and pull requests + uses: actions/stale@v8 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: "This issue has been marked as stale due to inactivity. If no further activity occurs, it will be closed in 7 days." + stale-pr-message: "This pull request has been marked as stale due to inactivity. If no further activity occurs, it will be closed in 7 days." + days-before-stale: 10 # Number of days before marking an issue or PR as stale + days-before-close: 4 # Number of days after being marked stale before closing + stale-issue-label: "stale" # Label to apply to stale issues + stale-pr-label: "stale" # Label to apply to stale pull requests + exempt-issue-labels: "pinned,important" # Issues with these labels won't be marked stale + exempt-pr-labels: "pinned,important" # PRs with these labels won't be marked stale + operations-per-run: 75 # Limits the number of actions per run to avoid API rate limits diff --git a/.gitignore b/.gitignore index b43105b77fa88ea2f40962d904156abcdd6693e3..7bbcc2ea3f3c5ef915a7832f0ac1268701fa5567 100644 --- a/.gitignore +++ b/.gitignore @@ -22,13 +22,18 @@ dist-ssr *.sln *.sw? +/.history /.cache /build .env.local .env +.dev.vars *.vars .wrangler _worker.bundle Modelfile modelfiles + +# docs ignore +site diff --git a/.husky/commit-msg b/.husky/commit-msg deleted file mode 100644 index d821bbc58de2bc4e96ebb214d7fb895d3b0c12a2..0000000000000000000000000000000000000000 --- a/.husky/commit-msg +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env sh - -. "$(dirname "$0")/_/husky.sh" - -npx commitlint --edit $1 - -exit 0 diff --git a/.husky/pre-commit b/.husky/pre-commit new file mode 100644 index 0000000000000000000000000000000000000000..05fe9ee6912a73904b6bf43f787190c8acbd91c8 --- /dev/null +++ b/.husky/pre-commit @@ -0,0 +1,20 @@ +#!/bin/sh + +echo "πŸ” Running pre-commit hook to check the code looks good... πŸ”" + +export NVM_DIR="$HOME/.nvm" +[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # Load nvm if you're using i + +if ! pnpm typecheck; then + echo "❌ Type checking failed! Please review TypeScript types." + echo "Once you're done, don't forget to add your changes to the commit! πŸš€" + exit 1 +fi + +if ! pnpm lint; then + echo "❌ Linting failed! 'pnpm lint:fix' will help you fix the easy ones." + echo "Once you're done, don't forget to add your beautification to the commit! 🀩" + exit 1 +fi + +echo "πŸ‘ All good! Committing changes..." diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1bf3bfb77c70b0869e89eaf0e42e243e31fc285a..68215a289bb750a8bd4aa4ed5193168043307a2e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ -# Contributing to Bolt.new Fork +# Contributing to oTToDev -First off, thank you for considering contributing to Bolt.new! This fork aims to expand the capabilities of the original project by integrating multiple LLM providers and enhancing functionality. Every contribution helps make Bolt.new a better tool for developers worldwide. +First off, thank you for considering contributing to oTToDev! This fork aims to expand the capabilities of the original project by integrating multiple LLM providers and enhancing functionality. Every contribution helps make oTToDev a better tool for developers worldwide. ## πŸ“‹ Table of Contents - [Code of Conduct](#code-of-conduct) @@ -53,6 +53,8 @@ We're looking for dedicated contributors to help maintain and grow this project. - Comment complex logic - Keep functions focused and small - Use meaningful variable names +- Lint your code. This repo contains a pre-commit-hook that will verify your code is linted properly, +so set up your IDE to do that for you! ## Development Setup @@ -72,6 +74,7 @@ pnpm install - Add your LLM API keys (only set the ones you plan to use): ```bash GROQ_API_KEY=XXX +HuggingFace_API_KEY=XXX OPENAI_API_KEY=XXX ANTHROPIC_API_KEY=XXX ... @@ -80,6 +83,19 @@ ANTHROPIC_API_KEY=XXX ```bash VITE_LOG_LEVEL=debug ``` + + - Optionally set context size: +```bash +DEFAULT_NUM_CTX=32768 +``` + +Some Example Context Values for the qwen2.5-coder:32b models are. + +* DEFAULT_NUM_CTX=32768 - Consumes 36GB of VRAM +* DEFAULT_NUM_CTX=24576 - Consumes 32GB of VRAM +* DEFAULT_NUM_CTX=12288 - Consumes 26GB of VRAM +* DEFAULT_NUM_CTX=6144 - Consumes 24GB of VRAM + **Important**: Never commit your `.env.local` file to version control. It's already included in .gitignore. ### πŸš€ Running the Development Server diff --git a/Dockerfile b/Dockerfile index 3b5a74cdee74ba91457ec18fdd4f043764c721db..06541d30317c979d615e8b4d93eb934581463f7b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -19,21 +19,29 @@ FROM base AS bolt-ai-production # Define environment variables with default values or let them be overridden ARG GROQ_API_KEY +ARG HuggingFace_API_KEY ARG OPENAI_API_KEY ARG ANTHROPIC_API_KEY ARG OPEN_ROUTER_API_KEY ARG GOOGLE_GENERATIVE_AI_API_KEY ARG OLLAMA_API_BASE_URL +ARG TOGETHER_API_KEY +ARG TOGETHER_API_BASE_URL ARG VITE_LOG_LEVEL=debug +ARG DEFAULT_NUM_CTX ENV WRANGLER_SEND_METRICS=false \ GROQ_API_KEY=${GROQ_API_KEY} \ + HuggingFace_KEY=${HuggingFace_API_KEY} \ OPENAI_API_KEY=${OPENAI_API_KEY} \ ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} \ OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \ GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \ OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \ - VITE_LOG_LEVEL=${VITE_LOG_LEVEL} + TOGETHER_API_KEY=${TOGETHER_API_KEY} \ + TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \ + VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \ + DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX} # Pre-configure wrangler to disable metrics RUN mkdir -p /root/.config/.wrangler && \ @@ -48,20 +56,28 @@ FROM base AS bolt-ai-development # Define the same environment variables for development ARG GROQ_API_KEY +ARG HuggingFace ARG OPENAI_API_KEY ARG ANTHROPIC_API_KEY ARG OPEN_ROUTER_API_KEY ARG GOOGLE_GENERATIVE_AI_API_KEY ARG OLLAMA_API_BASE_URL +ARG TOGETHER_API_KEY +ARG TOGETHER_API_BASE_URL ARG VITE_LOG_LEVEL=debug +ARG DEFAULT_NUM_CTX ENV GROQ_API_KEY=${GROQ_API_KEY} \ + HuggingFace_API_KEY=${HuggingFace_API_KEY} \ OPENAI_API_KEY=${OPENAI_API_KEY} \ ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} \ OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \ GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \ OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \ - VITE_LOG_LEVEL=${VITE_LOG_LEVEL} + TOGETHER_API_KEY=${TOGETHER_API_KEY} \ + TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \ + VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \ + DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX} RUN mkdir -p ${WORKDIR}/run CMD pnpm run dev --host diff --git a/FAQ.md b/FAQ.md new file mode 100644 index 0000000000000000000000000000000000000000..3e267058c7c6a1534f8364da4bc01def8b27ef3c --- /dev/null +++ b/FAQ.md @@ -0,0 +1,54 @@ +[![Bolt.new: AI-Powered Full-Stack Web Development in the Browser](./public/social_preview_index.jpg)](https://bolt.new) + +# Bolt.new Fork by Cole Medin - oTToDev + +## FAQ + +### How do I get the best results with oTToDev? + +- **Be specific about your stack**: If you want to use specific frameworks or libraries (like Astro, Tailwind, ShadCN, or any other popular JavaScript framework), mention them in your initial prompt to ensure Bolt scaffolds the project accordingly. + +- **Use the enhance prompt icon**: Before sending your prompt, try clicking the 'enhance' icon to have the AI model help you refine your prompt, then edit the results before submitting. + +- **Scaffold the basics first, then add features**: Make sure the basic structure of your application is in place before diving into more advanced functionality. This helps oTToDev understand the foundation of your project and ensure everything is wired up right before building out more advanced functionality. + +- **Batch simple instructions**: Save time by combining simple instructions into one message. For example, you can ask oTToDev to change the color scheme, add mobile responsiveness, and restart the dev server, all in one go saving you time and reducing API credit consumption significantly. + +### Do you plan on merging oTToDev back into the official Bolt.new repo? + +More news coming on this coming early next month - stay tuned! + +### Why are there so many open issues/pull requests? + +oTToDev was started simply to showcase how to edit an open source project and to do something cool with local LLMs on my (@ColeMedin) YouTube channel! However, it quickly +grew into a massive community project that I am working hard to keep up with the demand of by forming a team of maintainers and getting as many people involved as I can. +That effort is going well and all of our maintainers are ABSOLUTE rockstars, but it still takes time to organize everything so we can efficiently get through all +the issues and PRs. But rest assured, we are working hard and even working on some partnerships behind the scenes to really help this project take off! + +### How do local LLMs fair compared to larger models like Claude 3.5 Sonnet for oTToDev/Bolt.new? + +As much as the gap is quickly closing between open source and massive close source models, you’re still going to get the best results with the very large models like GPT-4o, Claude 3.5 Sonnet, and DeepSeek Coder V2 236b. This is one of the big tasks we have at hand - figuring out how to prompt better, use agents, and improve the platform as a whole to make it work better for even the smaller local LLMs! + +### I'm getting the error: "There was an error processing this request" + +If you see this error within oTToDev, that is just the application telling you there is a problem at a high level, and this could mean a number of different things. To find the actual error, please check BOTH the terminal where you started the application (with Docker or pnpm) and the developer console in the browser. For most browsers, you can access the developer console by pressing F12 or right clicking anywhere in the browser and selecting β€œInspect”. Then go to the β€œconsole” tab in the top right. + +### I'm getting the error: "x-api-key header missing" + +We have seen this error a couple times and for some reason just restarting the Docker container has fixed it. This seems to be Ollama specific. Another thing to try is try to run oTToDev with Docker or pnpm, whichever you didn’t run first. We are still on the hunt for why this happens once and a while! + +### I'm getting a blank preview when oTToDev runs my app! + +We promise you that we are constantly testing new PRs coming into oTToDev and the preview is core functionality, so the application is not broken! When you get a blank preview or don’t get a preview, this is generally because the LLM hallucinated bad code or incorrect commands. We are working on making this more transparent so it is obvious. Sometimes the error will appear in developer console too so check that as well. + +### How to add a LLM: + +To make new LLMs available to use in this version of Bolt.new, head on over to `app/utils/constants.ts` and find the constant MODEL_LIST. Each element in this array is an object that has the model ID for the name (get this from the provider's API documentation), a label for the frontend model dropdown, and the provider. + +By default, Anthropic, OpenAI, Groq, and Ollama are implemented as providers, but the YouTube video for this repo covers how to extend this to work with more providers if you wish! + +When you add a new model to the MODEL_LIST array, it will immediately be available to use when you run the app locally or reload it. For Ollama models, make sure you have the model installed already before trying to use it here! + +### Everything works but the results are bad + +This goes to the point above about how local LLMs are getting very powerful but you still are going to see better (sometimes much better) results with the largest LLMs like GPT-4o, Claude 3.5 Sonnet, and DeepSeek Coder V2 236b. If you are using smaller LLMs like Qwen-2.5-Coder, consider it more experimental and educational at this point. It can build smaller applications really well, which is super impressive for a local LLM, but for larger scale applications you want to use the larger LLMs still! diff --git a/README.md b/README.md index 54ae824edb67766d9780c9b9054b3d79376e2f37..33f861fa0e5f35a4f18f1d807f5c964161a9feed 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,17 @@ [![Bolt.new: AI-Powered Full-Stack Web Development in the Browser](./public/social_preview_index.jpg)](https://bolt.new) -# Bolt.new Fork by Cole Medin +# Bolt.new Fork by Cole Medin - oTToDev -This fork of Bolt.new allows you to choose the LLM that you use for each prompt! Currently, you can use OpenAI, Anthropic, Ollama, OpenRouter, Gemini, or Groq models - and it is easily extended to use any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models. +This fork of Bolt.new (oTToDev) allows you to choose the LLM that you use for each prompt! Currently, you can use OpenAI, Anthropic, Ollama, OpenRouter, Gemini, LMStudio, Mistral, xAI, HuggingFace, DeepSeek, or Groq models - and it is easily extended to use any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models. -# Requested Additions to this Fork - Feel Free to Contribute!! +Check the [oTToDev Docs](https://coleam00.github.io/bolt.new-any-llm/) for more information. + +## Join the community for oTToDev! + +https://thinktank.ottomator.ai + + +## Requested Additions - Feel Free to Contribute! - βœ… OpenRouter Integration (@coleam00) - βœ… Gemini Integration (@jonathands) @@ -20,28 +27,33 @@ This fork of Bolt.new allows you to choose the LLM that you use for each prompt! - βœ… Publish projects directly to GitHub (@goncaloalves) - βœ… Ability to enter API keys in the UI (@ali00209) - βœ… xAI Grok Beta Integration (@milutinke) +- βœ… LM Studio Integration (@karrot0) +- βœ… HuggingFace Integration (@ahsan3219) +- βœ… Bolt terminal to see the output of LLM run commands (@thecodacus) +- βœ… Streaming of code output (@thecodacus) +- βœ… Ability to revert code to earlier version (@wonderwhy-er) +- βœ… Cohere Integration (@hasanraiyan) +- βœ… Dynamic model max token length (@hasanraiyan) +- βœ… Better prompt enhancing (@SujalXplores) +- βœ… Prompt caching (@SujalXplores) +- βœ… Load local projects into the app (@wonderwhy-er) +- βœ… Together Integration (@mouimet-infinisoft) +- βœ… Mobile friendly (@qwikode) +- βœ… Better prompt enhancing (@SujalXplores) +- βœ… Attach images to prompts (@atrokhym) - ⬜ **HIGH PRIORITY** - Prevent Bolt from rewriting files as often (file locking and diffs) - ⬜ **HIGH PRIORITY** - Better prompting for smaller LLMs (code window sometimes doesn't start) -- ⬜ **HIGH PRIORITY** Load local projects into the app -- ⬜ **HIGH PRIORITY** - Attach images to prompts - ⬜ **HIGH PRIORITY** - Run agents in the backend as opposed to a single model call -- ⬜ LM Studio Integration -- ⬜ Together Integration -- ⬜ Azure Open AI API Integration -- ⬜ HuggingFace Integration -- ⬜ Perplexity Integration -- ⬜ Vertex AI Integration -- ⬜ Cohere Integration - ⬜ Deploy directly to Vercel/Netlify/other similar platforms -- ⬜ Ability to revert code to earlier version -- ⬜ Prompt caching -- ⬜ Better prompt enhancing - ⬜ Have LLM plan the project in a MD file for better results/transparency - ⬜ VSCode Integration with git-like confirmations - ⬜ Upload documents for knowledge - UI design templates, a code base to reference coding style, etc. - ⬜ Voice prompting +- ⬜ Azure Open AI API Integration +- ⬜ Perplexity Integration +- ⬜ Vertex AI Integration -# Bolt.new: AI-Powered Full-Stack Web Development in the Browser +## Bolt.new: AI-Powered Full-Stack Web Development in the Browser Bolt.new is an AI-powered web development agent that allows you to prompt, run, edit, and deploy full-stack applications directly from your browserβ€”no local setup required. If you're here to build your own AI-powered web dev agent using the Bolt open source codebase, [click here to get started!](./CONTRIBUTING.md) @@ -116,6 +128,13 @@ Optionally, you can set the debug level: VITE_LOG_LEVEL=debug ``` +And if using Ollama set the DEFAULT_NUM_CTX, the example below uses 8K context and ollama running on localhost port 11434: + +``` +OLLAMA_API_BASE_URL=http://localhost:11434 +DEFAULT_NUM_CTX=8192 +``` + **Important**: Never commit your `.env.local` file to version control. It's already included in .gitignore. ## Run with Docker @@ -183,40 +202,6 @@ sudo npm install -g pnpm ```bash pnpm run dev ``` - -## Super Important Note on Running Ollama Models - -Ollama models by default only have 2048 tokens for their context window. Even for large models that can easily handle way more. -This is not a large enough window to handle the Bolt.new/oTToDev prompt! You have to create a version of any model you want -to use where you specify a larger context window. Luckily it's super easy to do that. - -All you have to do is: - -- Create a file called "Modelfile" (no file extension) anywhere on your computer -- Put in the two lines: - -``` -FROM [Ollama model ID such as qwen2.5-coder:7b] -PARAMETER num_ctx 32768 -``` - -- Run the command: - -``` -ollama create -f Modelfile [your new model ID, can be whatever you want (example: qwen2.5-coder-extra-ctx:7b)] -``` - -Now you have a new Ollama model that isn't heavily limited in the context length like Ollama models are by default for some reason. -You'll see this new model in the list of Ollama models along with all the others you pulled! - -## Adding New LLMs: - -To make new LLMs available to use in this version of Bolt.new, head on over to `app/utils/constants.ts` and find the constant MODEL_LIST. Each element in this array is an object that has the model ID for the name (get this from the provider's API documentation), a label for the frontend model dropdown, and the provider. - -By default, Anthropic, OpenAI, Groq, and Ollama are implemented as providers, but the YouTube video for this repo covers how to extend this to work with more providers if you wish! - -When you add a new model to the MODEL_LIST array, it will immediately be available to use when you run the app locally or reload it. For Ollama models, make sure you have the model installed already before trying to use it here! - ## Available Scripts - `pnpm run dev`: Starts the development server. @@ -227,6 +212,7 @@ When you add a new model to the MODEL_LIST array, it will immediately be availab - `pnpm run typecheck`: Runs TypeScript type checking. - `pnpm run typegen`: Generates TypeScript types using Wrangler. - `pnpm run deploy`: Builds the project and deploys it to Cloudflare Pages. +- `pnpm run lint:fix`: Runs the linter and automatically fixes issues according to your ESLint configuration. ## Development @@ -238,14 +224,16 @@ pnpm run dev This will start the Remix Vite development server. You will need Google Chrome Canary to run this locally if you use Chrome! It's an easy install and a good browser for web development anyway. -## Tips and Tricks +## How do I contribute to oTToDev? + +[Please check out our dedicated page for contributing to oTToDev here!](CONTRIBUTING.md) -Here are some tips to get the most out of Bolt.new: +## What are the future plans for oTToDev? -- **Be specific about your stack**: If you want to use specific frameworks or libraries (like Astro, Tailwind, ShadCN, or any other popular JavaScript framework), mention them in your initial prompt to ensure Bolt scaffolds the project accordingly. +[Check out our Roadmap here!](https://roadmap.sh/r/ottodev-roadmap-2ovzo) -- **Use the enhance prompt icon**: Before sending your prompt, try clicking the 'enhance' icon to have the AI model help you refine your prompt, then edit the results before submitting. +Lot more updates to this roadmap coming soon! -- **Scaffold the basics first, then add features**: Make sure the basic structure of your application is in place before diving into more advanced functionality. This helps Bolt understand the foundation of your project and ensure everything is wired up right before building out more advanced functionality. +## FAQ -- **Batch simple instructions**: Save time by combining simple instructions into one message. For example, you can ask Bolt to change the color scheme, add mobile responsiveness, and restart the dev server, all in one go saving you time and reducing API credit consumption significantly. +[Please check out our dedicated page for FAQ's related to oTToDev here!](FAQ.md) diff --git a/app/components/chat/APIKeyManager.tsx b/app/components/chat/APIKeyManager.tsx index a35724c8c7348d6210621f867f09f6ee67753f66..28847bc19a8c103e783de33d2fe094b19ee7b6db 100644 --- a/app/components/chat/APIKeyManager.tsx +++ b/app/components/chat/APIKeyManager.tsx @@ -1,12 +1,16 @@ import React, { useState } from 'react'; import { IconButton } from '~/components/ui/IconButton'; +import type { ProviderInfo } from '~/types/model'; interface APIKeyManagerProps { - provider: string; + provider: ProviderInfo; apiKey: string; setApiKey: (key: string) => void; + getApiKeyLink?: string; + labelForGetApiKey?: string; } +// eslint-disable-next-line @typescript-eslint/naming-convention export const APIKeyManager: React.FC = ({ provider, apiKey, setApiKey }) => { const [isEditing, setIsEditing] = useState(false); const [tempKey, setTempKey] = useState(apiKey); @@ -17,15 +21,29 @@ export const APIKeyManager: React.FC = ({ provider, apiKey, }; return ( -
- {provider} API Key: +
+
+ {provider?.name} API Key: + {!isEditing && ( +
+ + {apiKey ? 'β€’β€’β€’β€’β€’β€’β€’β€’' : 'Not set (will still work if set in .env file)'} + + setIsEditing(true)} title="Edit API Key"> +
+ +
+ )} +
+ {isEditing ? ( - <> +
setTempKey(e.target.value)} - className="flex-1 p-1 text-sm rounded border border-bolt-elements-borderColor bg-bolt-elements-prompt-background text-bolt-elements-textPrimary focus:outline-none focus:ring-2 focus:ring-bolt-elements-focus" + className="flex-1 px-2 py-1 text-xs lg:text-sm rounded border border-bolt-elements-borderColor bg-bolt-elements-prompt-background text-bolt-elements-textPrimary focus:outline-none focus:ring-2 focus:ring-bolt-elements-focus" />
@@ -33,15 +51,15 @@ export const APIKeyManager: React.FC = ({ provider, apiKey, setIsEditing(false)} title="Cancel">
- +
) : ( <> - - {apiKey ? 'β€’β€’β€’β€’β€’β€’β€’β€’' : 'Not set (will still work if set in .env file)'} - - setIsEditing(true)} title="Edit API Key"> -
- + {provider?.getApiKeyLink && ( + window.open(provider?.getApiKeyLink)} title="Edit API Key"> + {provider?.labelForGetApiKey || 'Get API Key'} +
+ + )} )}
diff --git a/app/components/chat/Artifact.tsx b/app/components/chat/Artifact.tsx index 62020fd846fc74473f3ac93a4c894f2213ac8f9a..989b92b22214baf9e2ac8f88b7bd3c6be73f66ce 100644 --- a/app/components/chat/Artifact.tsx +++ b/app/components/chat/Artifact.tsx @@ -7,6 +7,7 @@ import type { ActionState } from '~/lib/runtime/action-runner'; import { workbenchStore } from '~/lib/stores/workbench'; import { classNames } from '~/utils/classNames'; import { cubicEasingFn } from '~/utils/easings'; +import { WORK_DIR } from '~/utils/constants'; const highlighterOptions = { langs: ['shell'], @@ -27,6 +28,7 @@ interface ArtifactProps { export const Artifact = memo(({ messageId }: ArtifactProps) => { const userToggledActions = useRef(false); const [showActions, setShowActions] = useState(false); + const [allActionFinished, setAllActionFinished] = useState(false); const artifacts = useStore(workbenchStore.artifacts); const artifact = artifacts[messageId]; @@ -46,6 +48,14 @@ export const Artifact = memo(({ messageId }: ArtifactProps) => { if (actions.length && !showActions && !userToggledActions.current) { setShowActions(true); } + + if (actions.length !== 0 && artifact.type === 'bundled') { + const finished = !actions.find((action) => action.status !== 'complete'); + + if (finished != allActionFinished) { + setAllActionFinished(finished); + } + } }, [actions]); return ( @@ -58,6 +68,18 @@ export const Artifact = memo(({ messageId }: ArtifactProps) => { workbenchStore.showWorkbench.set(!showWorkbench); }} > + {artifact.type == 'bundled' && ( + <> +
+ {allActionFinished ? ( +
+ ) : ( +
+ )} +
+
+ + )}
{artifact?.title}
Click to open Workbench
@@ -65,7 +87,7 @@ export const Artifact = memo(({ messageId }: ArtifactProps) => {
- {actions.length && ( + {actions.length && artifact.type !== 'bundled' && ( {
- {showActions && actions.length > 0 && ( + {artifact.type !== 'bundled' && showActions && actions.length > 0 && ( { transition={{ duration: 0.15 }} >
+
@@ -129,6 +152,14 @@ const actionVariants = { visible: { opacity: 1, y: 0 }, }; +function openArtifactInWorkbench(filePath: any) { + if (workbenchStore.currentView.get() !== 'code') { + workbenchStore.currentView.set('code'); + } + + workbenchStore.setSelectedFile(`${WORK_DIR}/${filePath}`); +} + const ActionList = memo(({ actions }: ActionListProps) => { return ( @@ -169,7 +200,10 @@ const ActionList = memo(({ actions }: ActionListProps) => { {type === 'file' ? (
Create{' '} - + openArtifactInWorkbench(action.filePath)} + > {action.filePath}
diff --git a/app/components/chat/BaseChat.module.scss b/app/components/chat/BaseChat.module.scss index 3d6ed4c8617bfaa86a42bdf85fbb9c8dbe885147..cf530a112dadfe17a597e9494c0de2bd892373ff 100644 --- a/app/components/chat/BaseChat.module.scss +++ b/app/components/chat/BaseChat.module.scss @@ -17,3 +17,107 @@ .Chat { opacity: 1; } + +.RayContainer { + --gradient-opacity: 0.85; + --ray-gradient: radial-gradient(rgba(83, 196, 255, var(--gradient-opacity)) 0%, rgba(43, 166, 255, 0) 100%); + transition: opacity 0.25s linear; + position: fixed; + inset: 0; + pointer-events: none; + user-select: none; +} + +.LightRayOne { + width: 480px; + height: 680px; + transform: rotate(80deg); + top: -540px; + left: 250px; + filter: blur(110px); + position: absolute; + border-radius: 100%; + background: var(--ray-gradient); +} + +.LightRayTwo { + width: 110px; + height: 400px; + transform: rotate(-20deg); + top: -280px; + left: 350px; + mix-blend-mode: overlay; + opacity: 0.6; + filter: blur(60px); + position: absolute; + border-radius: 100%; + background: var(--ray-gradient); +} + +.LightRayThree { + width: 400px; + height: 370px; + top: -350px; + left: 200px; + mix-blend-mode: overlay; + opacity: 0.6; + filter: blur(21px); + position: absolute; + border-radius: 100%; + background: var(--ray-gradient); +} + +.LightRayFour { + position: absolute; + width: 330px; + height: 370px; + top: -330px; + left: 50px; + mix-blend-mode: overlay; + opacity: 0.5; + filter: blur(21px); + border-radius: 100%; + background: var(--ray-gradient); +} + +.LightRayFive { + position: absolute; + width: 110px; + height: 400px; + transform: rotate(-40deg); + top: -280px; + left: -10px; + mix-blend-mode: overlay; + opacity: 0.8; + filter: blur(60px); + border-radius: 100%; + background: var(--ray-gradient); +} + +.PromptEffectContainer { + --prompt-container-offset: 50px; + --prompt-line-stroke-width: 1px; + position: absolute; + pointer-events: none; + inset: calc(var(--prompt-container-offset) / -2); + width: calc(100% + var(--prompt-container-offset)); + height: calc(100% + var(--prompt-container-offset)); +} + +.PromptEffectLine { + width: calc(100% - var(--prompt-container-offset) + var(--prompt-line-stroke-width)); + height: calc(100% - var(--prompt-container-offset) + var(--prompt-line-stroke-width)); + x: calc(var(--prompt-container-offset) / 2 - var(--prompt-line-stroke-width) / 2); + y: calc(var(--prompt-container-offset) / 2 - var(--prompt-line-stroke-width) / 2); + rx: calc(8px - var(--prompt-line-stroke-width)); + fill: transparent; + stroke-width: var(--prompt-line-stroke-width); + stroke: url(#line-gradient); + stroke-dasharray: 35px 65px; + stroke-dashoffset: 10; +} + +.PromptShine { + fill: url(#shine-gradient); + mix-blend-mode: overlay; +} diff --git a/app/components/chat/BaseChat.tsx b/app/components/chat/BaseChat.tsx index c5fe70507d0d98f4d74decbd3d5fa35ab21d9a1f..abd5b137a344d6fe09db9a8d139e77d4d2ab5724 100644 --- a/app/components/chat/BaseChat.tsx +++ b/app/components/chat/BaseChat.tsx @@ -1,74 +1,30 @@ -// @ts-nocheck -// Preventing TS checks with files presented in the video for a better presentation. +/* + * @ts-nocheck + * Preventing TS checks with files presented in the video for a better presentation. + */ import type { Message } from 'ai'; -import React, { type RefCallback, useEffect } from 'react'; +import React, { type RefCallback, useEffect, useState } from 'react'; import { ClientOnly } from 'remix-utils/client-only'; import { Menu } from '~/components/sidebar/Menu.client'; import { IconButton } from '~/components/ui/IconButton'; import { Workbench } from '~/components/workbench/Workbench.client'; import { classNames } from '~/utils/classNames'; -import { MODEL_LIST, DEFAULT_PROVIDER } from '~/utils/constants'; +import { MODEL_LIST, PROVIDER_LIST, initializeModelList } from '~/utils/constants'; import { Messages } from './Messages.client'; import { SendButton } from './SendButton.client'; -import { useState } from 'react'; import { APIKeyManager } from './APIKeyManager'; import Cookies from 'js-cookie'; +import * as Tooltip from '@radix-ui/react-tooltip'; import styles from './BaseChat.module.scss'; +import type { ProviderInfo } from '~/utils/types'; +import { ExportChatButton } from '~/components/chat/chatExportAndImport/ExportChatButton'; +import { ImportButtons } from '~/components/chat/chatExportAndImport/ImportButtons'; +import { ExamplePrompts } from '~/components/chat/ExamplePrompts'; -const EXAMPLE_PROMPTS = [ - { text: 'Build a todo app in React using Tailwind' }, - { text: 'Build a simple blog using Astro' }, - { text: 'Create a cookie consent form using Material UI' }, - { text: 'Make a space invaders game' }, - { text: 'How do I center a div?' }, -]; - -const providerList = [...new Set(MODEL_LIST.map((model) => model.provider))]; - -const ModelSelector = ({ model, setModel, provider, setProvider, modelList, providerList }) => { - return ( -
- - -
- ); -}; +import FilePreview from './FilePreview'; +import { ModelSelector } from '~/components/chat/ModelSelector'; +import { SpeechRecognitionButton } from '~/components/chat/SpeechRecognition'; const TEXTAREA_MIN_HEIGHT = 76; @@ -80,17 +36,24 @@ interface BaseChatProps { chatStarted?: boolean; isStreaming?: boolean; messages?: Message[]; + description?: string; enhancingPrompt?: boolean; promptEnhanced?: boolean; input?: string; model?: string; setModel?: (model: string) => void; - provider?: string; - setProvider?: (provider: string) => void; + provider?: ProviderInfo; + setProvider?: (provider: ProviderInfo) => void; handleStop?: () => void; sendMessage?: (event: React.UIEvent, messageInput?: string) => void; handleInputChange?: (event: React.ChangeEvent) => void; enhancePrompt?: () => void; + importChat?: (description: string, messages: Message[]) => Promise; + exportChat?: () => void; + uploadedFiles?: File[]; + setUploadedFiles?: (files: File[]) => void; + imageDataList?: string[]; + setImageDataList?: (dataList: string[]) => void; } export const BaseChat = React.forwardRef( @@ -102,45 +65,129 @@ export const BaseChat = React.forwardRef( showChat = true, chatStarted = false, isStreaming = false, - enhancingPrompt = false, - promptEnhanced = false, - messages, - input = '', model, setModel, provider, setProvider, - sendMessage, + input = '', + enhancingPrompt, handleInputChange, + promptEnhanced, enhancePrompt, + sendMessage, handleStop, + importChat, + exportChat, + uploadedFiles = [], + setUploadedFiles, + imageDataList = [], + setImageDataList, + messages, }, ref, ) => { const TEXTAREA_MAX_HEIGHT = chatStarted ? 400 : 200; const [apiKeys, setApiKeys] = useState>({}); + const [modelList, setModelList] = useState(MODEL_LIST); + const [isModelSettingsCollapsed, setIsModelSettingsCollapsed] = useState(false); + const [isListening, setIsListening] = useState(false); + const [recognition, setRecognition] = useState(null); + const [transcript, setTranscript] = useState(''); + console.log(transcript); useEffect(() => { // Load API keys from cookies on component mount try { const storedApiKeys = Cookies.get('apiKeys'); + if (storedApiKeys) { const parsedKeys = JSON.parse(storedApiKeys); + if (typeof parsedKeys === 'object' && parsedKeys !== null) { setApiKeys(parsedKeys); } } } catch (error) { console.error('Error loading API keys from cookies:', error); + // Clear invalid cookie data Cookies.remove('apiKeys'); } + + initializeModelList().then((modelList) => { + setModelList(modelList); + }); + + if (typeof window !== 'undefined' && ('SpeechRecognition' in window || 'webkitSpeechRecognition' in window)) { + const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; + const recognition = new SpeechRecognition(); + recognition.continuous = true; + recognition.interimResults = true; + + recognition.onresult = (event) => { + const transcript = Array.from(event.results) + .map((result) => result[0]) + .map((result) => result.transcript) + .join(''); + + setTranscript(transcript); + + if (handleInputChange) { + const syntheticEvent = { + target: { value: transcript }, + } as React.ChangeEvent; + handleInputChange(syntheticEvent); + } + }; + + recognition.onerror = (event) => { + console.error('Speech recognition error:', event.error); + setIsListening(false); + }; + + setRecognition(recognition); + } }, []); + const startListening = () => { + if (recognition) { + recognition.start(); + setIsListening(true); + } + }; + + const stopListening = () => { + if (recognition) { + recognition.stop(); + setIsListening(false); + } + }; + + const handleSendMessage = (event: React.UIEvent, messageInput?: string) => { + if (sendMessage) { + sendMessage(event, messageInput); + + if (recognition) { + recognition.abort(); // Stop current recognition + setTranscript(''); // Clear transcript + setIsListening(false); + + // Clear the input by triggering handleInputChange with empty value + if (handleInputChange) { + const syntheticEvent = { + target: { value: '' }, + } as React.ChangeEvent; + handleInputChange(syntheticEvent); + } + } + } + }; + const updateApiKey = (provider: string, key: string) => { try { const updatedApiKeys = { ...apiKeys, [provider]: key }; setApiKeys(updatedApiKeys); + // Save updated API keys to cookies with 30 day expiry and secure settings Cookies.set('apiKeys', JSON.stringify(updatedApiKeys), { expires: 30, // 30 days @@ -153,27 +200,86 @@ export const BaseChat = React.forwardRef( } }; - return ( + const handleFileUpload = () => { + const input = document.createElement('input'); + input.type = 'file'; + input.accept = 'image/*'; + + input.onchange = async (e) => { + const file = (e.target as HTMLInputElement).files?.[0]; + + if (file) { + const reader = new FileReader(); + + reader.onload = (e) => { + const base64Image = e.target?.result as string; + setUploadedFiles?.([...uploadedFiles, file]); + setImageDataList?.([...imageDataList, base64Image]); + }; + reader.readAsDataURL(file); + } + }; + + input.click(); + }; + + const handlePaste = async (e: React.ClipboardEvent) => { + const items = e.clipboardData?.items; + + if (!items) { + return; + } + + for (const item of items) { + if (item.type.startsWith('image/')) { + e.preventDefault(); + + const file = item.getAsFile(); + + if (file) { + const reader = new FileReader(); + + reader.onload = (e) => { + const base64Image = e.target?.result as string; + setUploadedFiles?.([...uploadedFiles, file]); + setImageDataList?.([...imageDataList, base64Image]); + }; + reader.readAsDataURL(file); + } + + break; + } + } + }; + + const baseChat = (
+
+
+
+
+
+
+
{() => } -
-
+
+
{!chatStarted && ( -
-

+
+

Where ideas begin

-

+

Bring ideas to life in seconds or get help on existing projects.

)}
@@ -182,7 +288,7 @@ export const BaseChat = React.forwardRef( return chatStarted ? ( @@ -190,31 +296,125 @@ export const BaseChat = React.forwardRef( }}
- - updateApiKey(provider, key)} + + + + + + + + + + + + + + + + + + +
+
+ +
+ +
+ + {provider && ( + updateApiKey(provider.name, key)} + /> + )} +
+
+ { + setUploadedFiles?.(uploadedFiles.filter((_, i) => i !== index)); + setImageDataList?.(imageDataList.filter((_, i) => i !== index)); + }} />