File size: 12,428 Bytes
1978456
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
#!/usr/bin/env node

// universal-developer/src/cli.ts

import { program } from 'commander';
import { UniversalLLM } from './index';
import * as fs from 'fs';
import * as path from 'path';
import chalk from 'chalk';
import ora from 'ora';
import * as dotenv from 'dotenv';
import * as os from 'os';
import * as readline from 'readline';
import { createSpinner } from 'nanospinner';

// Load environment variables
dotenv.config();

// Load package.json for version info
const packageJson = JSON.parse(
  fs.readFileSync(path.resolve(__dirname, '../package.json'), 'utf-8')
);

// Check for config file in user's home directory
const configDir = path.join(os.homedir(), '.universal-developer');
const configPath = path.join(configDir, 'config.json');
let config: any = {
  defaultProvider: 'anthropic',
  enableTelemetry: true,
  apiKeys: {}
};

// Create config directory if it doesn't exist
if (!fs.existsSync(configDir)) {
  fs.mkdirSync(configDir, { recursive: true });
}

// Load config if it exists
if (fs.existsSync(configPath)) {
  try {
    config = JSON.parse(fs.readFileSync(configPath, 'utf-8'));
  } catch (error) {
    console.error('Error loading config file:', error);
  }
}

// Save config function
function saveConfig() {
  try {
    fs.writeFileSync(configPath, JSON.stringify(config, null, 2));
  } catch (error) {
    console.error('Error saving config:', error);
  }
}

// Configure CLI
program
  .name('ud')
  .description('Universal Developer CLI - Control LLMs with symbolic runtime commands')
  .version(packageJson.version);

// Configure command
program
  .command('config')
  .description('Configure Universal Developer CLI')
  .option('-p, --provider <provider>', 'Set default provider (anthropic, openai, qwen, gemini, ollama)')
  .option('-k, --key <key>', 'Set API key for the default provider')
  .option('--anthropic-key <key>', 'Set API key for Anthropic/Claude')
  .option('--openai-key <key>', 'Set API key for OpenAI')
  .option('--qwen-key <key>', 'Set API key for Qwen')
  .option('--gemini-key <key>', 'Set API key for Google Gemini')
  .option('--telemetry <boolean>', 'Enable or disable anonymous telemetry')
  .option('-l, --list', 'List current configuration')
  .action((options) => {
    if (options.list) {
      console.log(chalk.bold('\nCurrent Configuration:'));
      console.log(`Default Provider: ${chalk.green(config.defaultProvider)}`);
      console.log(`Telemetry: ${config.enableTelemetry ? chalk.green('Enabled') : chalk.yellow('Disabled')}`);
      console.log('\nAPI Keys:');
      for (const [provider, key] of Object.entries(config.apiKeys)) {
        console.log(`${provider}: ${key ? chalk.green('Configured') : chalk.red('Not configured')}`);
      }
      return;
    }

    let changed = false;

    if (options.provider) {
      const validProviders = ['anthropic', 'openai', 'qwen', 'gemini', 'ollama'];
      if (validProviders.includes(options.provider)) {
        config.defaultProvider = options.provider;
        changed = true;
        console.log(`Default provider set to ${chalk.green(options.provider)}`);
      } else {
        console.error(`Invalid provider: ${options.provider}. Valid options are: ${validProviders.join(', ')}`);
      }
    }

    if (options.key) {
      if (!config.apiKeys) config.apiKeys = {};
      config.apiKeys[config.defaultProvider] = options.key;
      changed = true;
      console.log(`API key for ${chalk.green(config.defaultProvider)} has been set`);
    }

    // Provider-specific keys
    const providerKeys = {
      'anthropic': options.anthropicKey,
      'openai': options.openaiKey,
      'qwen': options.qwenKey,
      'gemini': options.geminiKey
    };

    for (const [provider, key] of Object.entries(providerKeys)) {
      if (key) {
        if (!config.apiKeys) config.apiKeys = {};
        config.apiKeys[provider] = key;
        changed = true;
        console.log(`API key for ${chalk.green(provider)} has been set`);
      }
    }

    if (options.telemetry !== undefined) {
      const enableTelemetry = options.telemetry === 'true';
      config.enableTelemetry = enableTelemetry;
      changed = true;
      console.log(`Telemetry ${enableTelemetry ? chalk.green('enabled') : chalk.yellow('disabled')}`);
    }

    if (changed) {
      saveConfig();
      console.log(chalk.bold('\nConfiguration saved!'));
    } else {
      console.log('No changes made. Use --help to see available options.');
    }
  });

// Helper function to handle piped input
async function getPipedInput(): Promise<string | null> {
  if (process.stdin.isTTY) {
    return null;
  }

  return new Promise((resolve) => {
    let data = '';
    process.stdin.on('readable', () => {
      const chunk = process.stdin.read();
      if (chunk !== null) {
        data += chunk;
      }
    });

    process.stdin.on('end', () => {
      resolve(data);
    });
  });
}

// Helper to get API key for a provider
function getApiKey(provider: string): string {
  // First check config
  if (config.apiKeys && config.apiKeys[provider]) {
    return config.apiKeys[provider];
  }

  // Then check environment variables
  const envVarName = `${provider.toUpperCase()}_API_KEY`;
  const apiKey = process.env[envVarName];
  
  if (!apiKey) {
    console.error(chalk.red(`Error: No API key found for ${provider}.`));
    console.log(`Please set your API key using: ud config --${provider}-key <your-api-key>`);
    console.log(`Or set the ${envVarName} environment variable.`);
    process.exit(1);
  }

  return apiKey;
}

// Interactive mode
program
  .command('interactive')
  .alias('i')
  .description('Start an interactive session')
  .option('-p, --provider <provider>', 'LLM provider to use')
  .option('-m, --model <model>', 'Model to use')
  .action(async (options) => {
    const provider = options.provider || config.defaultProvider;
    const apiKey = getApiKey(provider);
    
    const llm = new UniversalLLM({
      provider,
      apiKey,
      model: options.model,
      telemetryEnabled: config.enableTelemetry
    });

    console.log(chalk.bold('\nUniversal Developer Interactive Mode'));
    console.log(chalk.dim(`Using provider: ${provider}`));
    console.log(chalk.dim('Type /exit or Ctrl+C to quit'));
    console.log(chalk.dim('Available commands: /think, /fast, /loop, /reflect, /fork, /collapse\n'));
    
    const rl = readline.createInterface({
      input: process.stdin,
      output: process.stdout
    });

    let conversationHistory: { role: string, content: string }[] = [];
    
    const promptUser = () => {
      rl.question('> ', async (input) => {
        if (input.toLowerCase() === '/exit') {
          rl.close();
          return;
        }

        // Store user message
        conversationHistory.push({
          role: 'user',
          content: input
        });

        const spinner = createSpinner('Generating response...').start();
        
        try {
          const response = await llm.generate({
            messages: conversationHistory
          });
          
          spinner.success();
          console.log(`\n${chalk.blue('Assistant:')} ${response}\n`);
          
          // Store assistant response
          conversationHistory.push({
            role: 'assistant',
            content: response
          });
        } catch (error) {
          spinner.error();
          console.error(`Error: ${error.message}`);
        }
        
        promptUser();
      });
    };
    
    console.log(chalk.blue('Assistant:') + ' Hello! How can I help you today?\n');
    conversationHistory.push({
      role: 'assistant',
      content: 'Hello! How can I help you today?'
    });
    
    promptUser();
  });

// Command for each symbolic operation
const symbolicCommands = [
  { name: 'think', description: 'Generate response using deep reasoning' },
  { name: 'fast', description: 'Generate quick, concise response' },
  { name: 'loop', description: 'Generate iteratively refined response' },
  { name: 'reflect', description: 'Generate response with self-reflection' },
  { name: 'fork', description: 'Generate multiple alternative responses' },
  { name: 'collapse', description: 'Generate response using default behavior' }
];

symbolicCommands.forEach(cmd => {
  program
    .command(cmd.name)
    .description(cmd.description)
    .argument('[prompt]', 'The prompt to send to the LLM')
    .option('-p, --provider <provider>', 'LLM provider to use')
    .option('-m, --model <model>', 'Model to use')
    .option('-s, --system <prompt>', 'System prompt to use')
    .option('-i, --iterations <number>', 'Number of iterations (for loop command)')
    .option('-c, --count <number>', 'Number of alternatives (for fork command)')
    .action(async (promptArg, options) => {
      // Get provider from options or config
      const provider = options.provider || config.defaultProvider;
      const apiKey = getApiKey(provider);

      // Initialize LLM
      const llm = new UniversalLLM({
        provider,
        apiKey,
        model: options.model,
        telemetryEnabled: config.enableTelemetry
      });

      // Check for piped input
      const pipedInput = await getPipedInput();
      
      // Combine prompt argument and piped input
      let prompt = promptArg || '';
      if (pipedInput) {
        prompt = prompt ? `${prompt}\n\n${pipedInput}` : pipedInput;
      }
      
      // If no prompt provided, show help
      if (!prompt) {
        console.error('Error: Prompt is required.');
        console.log(`Usage: ud ${cmd.name} "Your prompt here"`);
        console.log('Or pipe content: cat file.txt | ud ${cmd.name}');
        process.exit(1);
      }

      // Build command string
      let commandString = `/${cmd.name}`;
      
      // Add command-specific parameters
      if (cmd.name === 'loop' && options.iterations) {
        commandString += ` --iterations=${options.iterations}`;
      } else if (cmd.name === 'fork' && options.count) {
        commandString += ` --count=${options.count}`;
      }
      
      // Add the prompt
      const fullPrompt = `${commandString} ${prompt}`;
      
      // Show what's happening
      console.log(chalk.dim(`Using provider: ${provider}`));
      const spinner = createSpinner('Generating response...').start();
      
      try {
        const response = await llm.generate({
          systemPrompt: options.system,
          prompt: fullPrompt
        });
        
        spinner.success();
        console.log('\n' + response + '\n');
      } catch (error) {
        spinner.error();
        console.error(`Error: ${error.message}`);
        process.exit(1);
      }
    });
});

// Default command (no subcommand specified)
program
  .arguments('[prompt]')
  .option('-p, --provider <provider>', 'LLM provider to use')
  .option('-m, --model <model>', 'Model to use')
  .option('-s, --system <prompt>', 'System prompt to use')
  .option('-c, --command <command>', 'Symbolic command to use')
  .action(async (promptArg, options) => {
    if (!promptArg && !process.stdin.isTTY) {
      // No prompt argument but has piped input
      const pipedInput = await getPipedInput();
      if (pipedInput) {
        promptArg = pipedInput;
      }
    }

    if (!promptArg) {
      // No prompt provided, show interactive mode
      program.commands.find(cmd => cmd.name() === 'interactive').action(options);
      return;
    }

    // Get provider from options or config
    const provider = options.provider || config.defaultProvider;
    const apiKey = getApiKey(provider);

    // Initialize LLM
    const llm = new UniversalLLM({
      provider,
      apiKey,
      model: options.model,
      telemetryEnabled: config.enableTelemetry
    });

    // Default to think command if none specified
    const command = options.command || 'think';
    
    // Format prompt with command
    const fullPrompt = `/${command} ${promptArg}`;
    
    // Show what's happening
    console.log(chalk.dim(`Using provider: ${provider}`));
    const spinner = createSpinner('Generating response...').start();
    
    try {
      const response = await llm.generate({
        systemPrompt: options.system,
        prompt: fullPrompt
      });
      
      spinner.success();
      console.log('\n' + response + '\n');
    } catch (error) {
      spinner.error();
      console.error(`Error: ${error.message}`);
      process.exit(1);
    }
  });

// Parse arguments
program.parse();