atrokhym commited on
Commit
df94e66
·
1 Parent(s): 937ba7e

picking right model

Browse files
app/lib/.server/llm/model.ts CHANGED
@@ -15,14 +15,23 @@ export function getAnthropicModel(apiKey: string, model: string) {
15
 
16
  return anthropic(model);
17
  }
18
- export function getOpenAILikeModel(baseURL:string,apiKey: string, model: string) {
 
 
19
  const openai = createOpenAI({
20
  baseURL,
21
  apiKey,
22
  });
23
-
24
- return openai(model);
 
 
 
 
 
 
25
  }
 
26
  export function getOpenAIModel(apiKey: string, model: string) {
27
  const openai = createOpenAI({
28
  apiKey,
@@ -74,7 +83,7 @@ export function getOllamaModel(baseURL: string, model: string) {
74
  return Ollama;
75
  }
76
 
77
- export function getDeepseekModel(apiKey: string, model: string){
78
  const openai = createOpenAI({
79
  baseURL: 'https://api.deepseek.com/beta',
80
  apiKey,
@@ -108,9 +117,15 @@ export function getXAIModel(apiKey: string, model: string) {
108
 
109
  return openai(model);
110
  }
 
111
  export function getModel(provider: string, model: string, env: Env, apiKeys?: Record<string, string>) {
112
- const apiKey = getAPIKey(env, provider, apiKeys);
113
- const baseURL = getBaseURL(env, provider);
 
 
 
 
 
114
 
115
  switch (provider) {
116
  case 'Anthropic':
@@ -126,11 +141,11 @@ export function getModel(provider: string, model: string, env: Env, apiKeys?: Re
126
  case 'Google':
127
  return getGoogleModel(apiKey, model);
128
  case 'OpenAILike':
129
- return getOpenAILikeModel(baseURL,apiKey, model);
130
  case 'Deepseek':
131
  return getDeepseekModel(apiKey, model);
132
  case 'Mistral':
133
- return getMistralModel(apiKey, model);
134
  case 'LMStudio':
135
  return getLMStudioModel(baseURL, model);
136
  case 'xAI':
@@ -138,4 +153,4 @@ export function getModel(provider: string, model: string, env: Env, apiKeys?: Re
138
  default:
139
  return getOllamaModel(baseURL, model);
140
  }
141
- }
 
15
 
16
  return anthropic(model);
17
  }
18
+
19
+ export function getOpenAILikeModel(baseURL: string, apiKey: string, model: string) {
20
+ // console.log('OpenAILike config:', { baseURL, hasApiKey: !!apiKey, model });
21
  const openai = createOpenAI({
22
  baseURL,
23
  apiKey,
24
  });
25
+ // console.log('OpenAI client created:', !!openai);
26
+ const client = openai(model);
27
+ // console.log('OpenAI model client:', !!client);
28
+ return client;
29
+ // return {
30
+ // model: client,
31
+ // provider: 'OpenAILike' // Correctly identifying the actual provider
32
+ // };
33
  }
34
+
35
  export function getOpenAIModel(apiKey: string, model: string) {
36
  const openai = createOpenAI({
37
  apiKey,
 
83
  return Ollama;
84
  }
85
 
86
+ export function getDeepseekModel(apiKey: string, model: string) {
87
  const openai = createOpenAI({
88
  baseURL: 'https://api.deepseek.com/beta',
89
  apiKey,
 
117
 
118
  return openai(model);
119
  }
120
+
121
  export function getModel(provider: string, model: string, env: Env, apiKeys?: Record<string, string>) {
122
+ let apiKey; // Declare first
123
+ let baseURL;
124
+
125
+ apiKey = getAPIKey(env, provider, apiKeys); // Then assign
126
+ baseURL = getBaseURL(env, provider);
127
+
128
+ // console.log('getModel inputs:', { provider, model, baseURL, hasApiKey: !!apiKey });
129
 
130
  switch (provider) {
131
  case 'Anthropic':
 
141
  case 'Google':
142
  return getGoogleModel(apiKey, model);
143
  case 'OpenAILike':
144
+ return getOpenAILikeModel(baseURL, apiKey, model);
145
  case 'Deepseek':
146
  return getDeepseekModel(apiKey, model);
147
  case 'Mistral':
148
+ return getMistralModel(apiKey, model);
149
  case 'LMStudio':
150
  return getLMStudioModel(baseURL, model);
151
  case 'xAI':
 
153
  default:
154
  return getOllamaModel(baseURL, model);
155
  }
156
+ }
app/lib/.server/llm/stream-text.ts CHANGED
@@ -52,6 +52,10 @@ function extractPropertiesFromMessage(message: Message): { model: string; provid
52
  })
53
  : textContent.replace(MODEL_REGEX, '').replace(PROVIDER_REGEX, '');
54
 
 
 
 
 
55
  return { model, provider, content: cleanedContent };
56
  }
57
 
@@ -64,7 +68,7 @@ export function streamText(
64
  let currentModel = DEFAULT_MODEL;
65
  let currentProvider = DEFAULT_PROVIDER;
66
 
67
- console.log('StreamText:', JSON.stringify(messages));
68
 
69
  const processedMessages = messages.map((message) => {
70
  if (message.role === 'user') {
@@ -82,11 +86,22 @@ export function streamText(
82
  return message; // No changes for non-user messages
83
  });
84
 
85
- return _streamText({
86
- model: getModel(currentProvider, currentModel, env, apiKeys),
 
 
 
 
 
 
 
 
87
  system: getSystemPrompt(),
88
  maxTokens: MAX_TOKENS,
89
  messages: convertToCoreMessages(processedMessages),
90
- ...options,
91
- });
 
 
 
92
  }
 
52
  })
53
  : textContent.replace(MODEL_REGEX, '').replace(PROVIDER_REGEX, '');
54
 
55
+ // console.log('Model from message:', model);
56
+ // console.log('Found in MODEL_LIST:', MODEL_LIST.find((m) => m.name === model));
57
+ // console.log('Current MODEL_LIST:', MODEL_LIST);
58
+
59
  return { model, provider, content: cleanedContent };
60
  }
61
 
 
68
  let currentModel = DEFAULT_MODEL;
69
  let currentProvider = DEFAULT_PROVIDER;
70
 
71
+ // console.log('StreamText:', JSON.stringify(messages));
72
 
73
  const processedMessages = messages.map((message) => {
74
  if (message.role === 'user') {
 
86
  return message; // No changes for non-user messages
87
  });
88
 
89
+ // console.log('Message content:', messages[0].content);
90
+ // console.log('Extracted properties:', extractPropertiesFromMessage(messages[0]));
91
+
92
+ const llmClient = getModel(currentProvider, currentModel, env, apiKeys);
93
+ // console.log('LLM Client:', llmClient);
94
+
95
+ const llmConfig = {
96
+ ...options,
97
+ model: llmClient, //getModel(currentProvider, currentModel, env, apiKeys),
98
+ provider: currentProvider,
99
  system: getSystemPrompt(),
100
  maxTokens: MAX_TOKENS,
101
  messages: convertToCoreMessages(processedMessages),
102
+ };
103
+
104
+ // console.log('LLM Config:', llmConfig);
105
+
106
+ return _streamText(llmConfig);
107
  }
app/routes/api.chat.ts CHANGED
@@ -37,7 +37,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
37
  model: string
38
  }>();
39
 
40
- console.log('ChatAction:', JSON.stringify(messages));
41
 
42
  const cookieHeader = request.headers.get("Cookie");
43
 
 
37
  model: string
38
  }>();
39
 
40
+ // console.log('ChatAction:', JSON.stringify(messages));
41
 
42
  const cookieHeader = request.headers.get("Cookie");
43
 
app/utils/constants.ts CHANGED
@@ -32,6 +32,7 @@ const PROVIDER_LIST: ProviderInfo[] = [
32
  name: 'OpenAILike',
33
  staticModels: [
34
  { name: 'o1-mini', label: 'o1-mini', provider: 'OpenAILike' },
 
35
  ],
36
  getDynamicModels: getOpenAILikeModels
37
  },
@@ -58,7 +59,9 @@ const PROVIDER_LIST: ProviderInfo[] = [
58
 
59
  }, {
60
  name: 'Google',
61
- staticModels: [
 
 
62
  { name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google' },
63
  { name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google' }
64
  ],
 
32
  name: 'OpenAILike',
33
  staticModels: [
34
  { name: 'o1-mini', label: 'o1-mini', provider: 'OpenAILike' },
35
+ { name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI' },
36
  ],
37
  getDynamicModels: getOpenAILikeModels
38
  },
 
59
 
60
  }, {
61
  name: 'Google',
62
+ staticModels: [
63
+ { name: 'gemini-exp-1121', label: 'Gemini Experimental 1121', provider: 'Google' },
64
+ { name: 'gemini-1.5-pro-002', label: 'Gemini 1.5 Pro 002', provider: 'Google' },
65
  { name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google' },
66
  { name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google' }
67
  ],