codacus commited on
Commit
49c7129
·
unverified ·
1 Parent(s): 3ecac25

fix: ollama and lm studio url issue fix for docker and build (#1008)

Browse files

* fix: ollama and lm studio url issue fix for docker and build

* vite config fix

Dockerfile CHANGED
@@ -45,13 +45,14 @@ ENV WRANGLER_SEND_METRICS=false \
45
  TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \
46
  AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG} \
47
  VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \
48
- DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}
 
49
 
50
  # Pre-configure wrangler to disable metrics
51
  RUN mkdir -p /root/.config/.wrangler && \
52
  echo '{"enabled":false}' > /root/.config/.wrangler/metrics.json
53
 
54
- RUN npm run build
55
 
56
  CMD [ "pnpm", "run", "dockerstart"]
57
 
@@ -84,7 +85,8 @@ ENV GROQ_API_KEY=${GROQ_API_KEY} \
84
  TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \
85
  AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG} \
86
  VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \
87
- DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}
 
88
 
89
  RUN mkdir -p ${WORKDIR}/run
90
  CMD pnpm run dev --host
 
45
  TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \
46
  AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG} \
47
  VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \
48
+ DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}\
49
+ RUNNING_IN_DOCKER=true
50
 
51
  # Pre-configure wrangler to disable metrics
52
  RUN mkdir -p /root/.config/.wrangler && \
53
  echo '{"enabled":false}' > /root/.config/.wrangler/metrics.json
54
 
55
+ RUN pnpm run build
56
 
57
  CMD [ "pnpm", "run", "dockerstart"]
58
 
 
85
  TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \
86
  AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG} \
87
  VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \
88
+ DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}\
89
+ RUNNING_IN_DOCKER=true
90
 
91
  RUN mkdir -p ${WORKDIR}/run
92
  CMD pnpm run dev --host
app/lib/modules/llm/providers/lmstudio.ts CHANGED
@@ -3,6 +3,7 @@ import type { ModelInfo } from '~/lib/modules/llm/types';
3
  import type { IProviderSetting } from '~/types/model';
4
  import { createOpenAI } from '@ai-sdk/openai';
5
  import type { LanguageModelV1 } from 'ai';
 
6
 
7
  export default class LMStudioProvider extends BaseProvider {
8
  name = 'LMStudio';
@@ -22,7 +23,7 @@ export default class LMStudioProvider extends BaseProvider {
22
  settings?: IProviderSetting,
23
  serverEnv: Record<string, string> = {},
24
  ): Promise<ModelInfo[]> {
25
- const { baseUrl } = this.getProviderBaseUrlAndKey({
26
  apiKeys,
27
  providerSettings: settings,
28
  serverEnv,
@@ -31,7 +32,18 @@ export default class LMStudioProvider extends BaseProvider {
31
  });
32
 
33
  if (!baseUrl) {
34
- return [];
 
 
 
 
 
 
 
 
 
 
 
35
  }
36
 
37
  const response = await fetch(`${baseUrl}/v1/models`);
@@ -51,13 +63,26 @@ export default class LMStudioProvider extends BaseProvider {
51
  providerSettings?: Record<string, IProviderSetting>;
52
  }) => LanguageModelV1 = (options) => {
53
  const { apiKeys, providerSettings, serverEnv, model } = options;
54
- const { baseUrl } = this.getProviderBaseUrlAndKey({
55
  apiKeys,
56
- providerSettings,
57
  serverEnv: serverEnv as any,
58
- defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
59
  defaultApiTokenKey: '',
60
  });
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  const lmstudio = createOpenAI({
62
  baseUrl: `${baseUrl}/v1`,
63
  apiKey: '',
 
3
  import type { IProviderSetting } from '~/types/model';
4
  import { createOpenAI } from '@ai-sdk/openai';
5
  import type { LanguageModelV1 } from 'ai';
6
+ import { logger } from '~/utils/logger';
7
 
8
  export default class LMStudioProvider extends BaseProvider {
9
  name = 'LMStudio';
 
23
  settings?: IProviderSetting,
24
  serverEnv: Record<string, string> = {},
25
  ): Promise<ModelInfo[]> {
26
+ let { baseUrl } = this.getProviderBaseUrlAndKey({
27
  apiKeys,
28
  providerSettings: settings,
29
  serverEnv,
 
32
  });
33
 
34
  if (!baseUrl) {
35
+ throw new Error('No baseUrl found for LMStudio provider');
36
+ }
37
+
38
+ if (typeof window === 'undefined') {
39
+ /*
40
+ * Running in Server
41
+ * Backend: Check if we're running in Docker
42
+ */
43
+ const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
44
+
45
+ baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
46
+ baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;
47
  }
48
 
49
  const response = await fetch(`${baseUrl}/v1/models`);
 
63
  providerSettings?: Record<string, IProviderSetting>;
64
  }) => LanguageModelV1 = (options) => {
65
  const { apiKeys, providerSettings, serverEnv, model } = options;
66
+ let { baseUrl } = this.getProviderBaseUrlAndKey({
67
  apiKeys,
68
+ providerSettings: providerSettings?.[this.name],
69
  serverEnv: serverEnv as any,
70
+ defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
71
  defaultApiTokenKey: '',
72
  });
73
+
74
+ if (!baseUrl) {
75
+ throw new Error('No baseUrl found for LMStudio provider');
76
+ }
77
+
78
+ if (typeof window === 'undefined') {
79
+ const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
80
+ baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
81
+ baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;
82
+ }
83
+
84
+ logger.debug('LMStudio Base Url used: ', baseUrl);
85
+
86
  const lmstudio = createOpenAI({
87
  baseUrl: `${baseUrl}/v1`,
88
  apiKey: '',
app/lib/modules/llm/providers/ollama.ts CHANGED
@@ -3,6 +3,7 @@ import type { ModelInfo } from '~/lib/modules/llm/types';
3
  import type { IProviderSetting } from '~/types/model';
4
  import type { LanguageModelV1 } from 'ai';
5
  import { ollama } from 'ollama-ai-provider';
 
6
 
7
  interface OllamaModelDetails {
8
  parent_model: string;
@@ -45,7 +46,7 @@ export default class OllamaProvider extends BaseProvider {
45
  settings?: IProviderSetting,
46
  serverEnv: Record<string, string> = {},
47
  ): Promise<ModelInfo[]> {
48
- const { baseUrl } = this.getProviderBaseUrlAndKey({
49
  apiKeys,
50
  providerSettings: settings,
51
  serverEnv,
@@ -54,7 +55,18 @@ export default class OllamaProvider extends BaseProvider {
54
  });
55
 
56
  if (!baseUrl) {
57
- return [];
 
 
 
 
 
 
 
 
 
 
 
58
  }
59
 
60
  const response = await fetch(`${baseUrl}/api/tags`);
@@ -78,18 +90,23 @@ export default class OllamaProvider extends BaseProvider {
78
  const { apiKeys, providerSettings, serverEnv, model } = options;
79
  let { baseUrl } = this.getProviderBaseUrlAndKey({
80
  apiKeys,
81
- providerSettings,
82
  serverEnv: serverEnv as any,
83
  defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
84
  defaultApiTokenKey: '',
85
  });
86
 
87
  // Backend: Check if we're running in Docker
88
- const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
 
 
89
 
 
90
  baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
91
  baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;
92
 
 
 
93
  const ollamaInstance = ollama(model, {
94
  numCtx: DEFAULT_NUM_CTX,
95
  }) as LanguageModelV1 & { config: any };
 
3
  import type { IProviderSetting } from '~/types/model';
4
  import type { LanguageModelV1 } from 'ai';
5
  import { ollama } from 'ollama-ai-provider';
6
+ import { logger } from '~/utils/logger';
7
 
8
  interface OllamaModelDetails {
9
  parent_model: string;
 
46
  settings?: IProviderSetting,
47
  serverEnv: Record<string, string> = {},
48
  ): Promise<ModelInfo[]> {
49
+ let { baseUrl } = this.getProviderBaseUrlAndKey({
50
  apiKeys,
51
  providerSettings: settings,
52
  serverEnv,
 
55
  });
56
 
57
  if (!baseUrl) {
58
+ throw new Error('No baseUrl found for OLLAMA provider');
59
+ }
60
+
61
+ if (typeof window === 'undefined') {
62
+ /*
63
+ * Running in Server
64
+ * Backend: Check if we're running in Docker
65
+ */
66
+ const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
67
+
68
+ baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
69
+ baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;
70
  }
71
 
72
  const response = await fetch(`${baseUrl}/api/tags`);
 
90
  const { apiKeys, providerSettings, serverEnv, model } = options;
91
  let { baseUrl } = this.getProviderBaseUrlAndKey({
92
  apiKeys,
93
+ providerSettings: providerSettings?.[this.name],
94
  serverEnv: serverEnv as any,
95
  defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
96
  defaultApiTokenKey: '',
97
  });
98
 
99
  // Backend: Check if we're running in Docker
100
+ if (!baseUrl) {
101
+ throw new Error('No baseUrl found for OLLAMA provider');
102
+ }
103
 
104
+ const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
105
  baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
106
  baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;
107
 
108
+ logger.debug('Ollama Base Url used: ', baseUrl);
109
+
110
  const ollamaInstance = ollama(model, {
111
  numCtx: DEFAULT_NUM_CTX,
112
  }) as LanguageModelV1 & { config: any };
vite.config.ts CHANGED
@@ -4,9 +4,11 @@ import { defineConfig, type ViteDevServer } from 'vite';
4
  import { nodePolyfills } from 'vite-plugin-node-polyfills';
5
  import { optimizeCssModules } from 'vite-plugin-optimize-css-modules';
6
  import tsconfigPaths from 'vite-tsconfig-paths';
7
-
8
  import { execSync } from 'child_process';
9
 
 
 
10
  // Get git hash with fallback
11
  const getGitHash = () => {
12
  try {
@@ -17,18 +19,21 @@ const getGitHash = () => {
17
  };
18
 
19
 
 
 
20
  export default defineConfig((config) => {
21
  return {
22
  define: {
23
  __COMMIT_HASH: JSON.stringify(getGitHash()),
24
  __APP_VERSION: JSON.stringify(process.env.npm_package_version),
 
25
  },
26
  build: {
27
  target: 'esnext',
28
  },
29
  plugins: [
30
  nodePolyfills({
31
- include: ['path', 'buffer'],
32
  }),
33
  config.mode !== 'test' && remixCloudflareDevProxy(),
34
  remixVitePlugin({
 
4
  import { nodePolyfills } from 'vite-plugin-node-polyfills';
5
  import { optimizeCssModules } from 'vite-plugin-optimize-css-modules';
6
  import tsconfigPaths from 'vite-tsconfig-paths';
7
+ import * as dotenv from 'dotenv';
8
  import { execSync } from 'child_process';
9
 
10
+ dotenv.config();
11
+
12
  // Get git hash with fallback
13
  const getGitHash = () => {
14
  try {
 
19
  };
20
 
21
 
22
+
23
+
24
  export default defineConfig((config) => {
25
  return {
26
  define: {
27
  __COMMIT_HASH: JSON.stringify(getGitHash()),
28
  __APP_VERSION: JSON.stringify(process.env.npm_package_version),
29
+ // 'process.env': JSON.stringify(process.env)
30
  },
31
  build: {
32
  target: 'esnext',
33
  },
34
  plugins: [
35
  nodePolyfills({
36
+ include: ['path', 'buffer', 'process'],
37
  }),
38
  config.mode !== 'test' && remixCloudflareDevProxy(),
39
  remixVitePlugin({