File size: 7,001 Bytes
1978456 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 |
// universal-developer/src/index.ts
import { ModelAdapter, TransformedPrompt, SymbolicCommand } from './adapters/base';
import { ClaudeAdapter } from './adapters/claude';
import { OpenAIAdapter } from './adapters/openai';
import { QwenAdapter } from './adapters/qwen';
// Import additional adapters as they become available
// import { GeminiAdapter } from './adapters/gemini';
// import { VLLMAdapter } from './adapters/vllm';
// import { OllamaAdapter } from './adapters/ollama';
type Provider = 'anthropic' | 'openai' | 'qwen' | 'gemini' | 'vllm' | 'ollama' | 'lmstudio';
interface UniversalLLMOptions {
provider: Provider;
apiKey: string;
model?: string;
maxTokens?: number;
temperature?: number;
baseURL?: string;
[key: string]: any; // Additional provider-specific options
}
interface GenerateOptions {
prompt: string;
systemPrompt?: string;
}
interface SymbolicTelemetry {
enabled: boolean;
endpoint?: string;
anonymousId?: string;
sessionId?: string;
}
/**
* UniversalLLM provides a unified interface for interacting with different LLM providers
* using symbolic runtime commands.
*/
export class UniversalLLM {
private adapter: ModelAdapter;
private telemetry: SymbolicTelemetry;
private sessionCommands: Map<string, number> = new Map();
/**
* Create a new UniversalLLM instance
* @param options Configuration options including provider and API key
*/
constructor(options: UniversalLLMOptions) {
this.adapter = this.createAdapter(options);
// Initialize telemetry (opt-in by default)
this.telemetry = {
enabled: options.telemetryEnabled !== false,
endpoint: options.telemetryEndpoint || 'https://telemetry.universal-developer.org/v1/events',
anonymousId: options.anonymousId || this.generateAnonymousId(),
sessionId: options.sessionId || this.generateSessionId()
};
}
/**
* Register a custom symbolic command
* @param name Command name (without the / prefix)
* @param command Command configuration
*/
public registerCommand(name: string, command: Omit<SymbolicCommand, 'name'>) {
this.adapter.registerCommand({
name,
...command
});
return this; // For method chaining
}
/**
* Generate a response using the configured LLM provider
* @param options Generation options including prompt and optional system prompt
* @returns Promise resolving to the generated text
*/
public async generate(options: GenerateOptions): Promise<string> {
// Extract symbolic command if present (for telemetry)
const commandMatch = options.prompt.match(/^\/([a-zA-Z0-9_]+)/);
const command = commandMatch ? commandMatch[1] : null;
// Track command usage
if (command) {
this.trackCommandUsage(command);
}
// Generate response using the adapter
const response = await this.adapter.generate(options);
// Send telemetry data if enabled
if (this.telemetry.enabled && command) {
this.sendTelemetry(command, options.prompt);
}
return response;
}
/**
* Get usage statistics for symbolic commands in the current session
* @returns Map of command names to usage counts
*/
public getCommandUsageStats(): Map<string, number> {
return new Map(this.sessionCommands);
}
/**
* Enable or disable telemetry collection
* @param enabled Whether telemetry should be enabled
*/
public setTelemetryEnabled(enabled: boolean): void {
this.telemetry.enabled = enabled;
}
/**
* Create the appropriate adapter based on the provider
* @param options Configuration options
* @returns Configured ModelAdapter instance
*/
private createAdapter(options: UniversalLLMOptions): ModelAdapter {
const { provider, apiKey, ...adapterOptions } = options;
switch (provider) {
case 'anthropic':
return new ClaudeAdapter(apiKey, adapterOptions);
case 'openai':
return new OpenAIAdapter(apiKey, adapterOptions);
case 'qwen':
return new QwenAdapter(apiKey, adapterOptions);
// Add cases for other providers as they become available
// case 'gemini':
// return new GeminiAdapter(apiKey, adapterOptions);
// case 'vllm':
// return new VLLMAdapter(apiKey, adapterOptions);
// case 'ollama':
// return new OllamaAdapter(apiKey, adapterOptions);
// case 'lmstudio':
// return new LMStudioAdapter(apiKey, adapterOptions);
default:
throw new Error(`Unsupported provider: ${provider}`);
}
}
/**
* Track usage of a symbolic command
* @param command Name of the command (without the / prefix)
*/
private trackCommandUsage(command: string): void {
const currentCount = this.sessionCommands.get(command) || 0;
this.sessionCommands.set(command, currentCount + 1);
}
/**
* Send telemetry data to the collection endpoint
* @param command Name of the command used
* @param prompt Full prompt text
*/
private async sendTelemetry(command: string, prompt: string): Promise<void> {
if (!this.telemetry.enabled || !this.telemetry.endpoint) return;
try {
const data = {
event: 'symbolic_command_used',
properties: {
command,
provider: (this.adapter as any).constructor.name.replace('Adapter', '').toLowerCase(),
timestamp: new Date().toISOString(),
prompt_length: prompt.length,
// No personal data or prompt content is sent
},
anonymousId: this.telemetry.anonymousId,
sessionId: this.telemetry.sessionId
};
// Use fetch in browser environments, axios/node-fetch in Node.js
if (typeof fetch === 'function') {
await fetch(this.telemetry.endpoint, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(data)
});
} else {
// In Node.js environments, use a dynamic import to avoid bundling issues
const { default: axios } = await import('axios');
await axios.post(this.telemetry.endpoint, data);
}
} catch (error) {
// Silently fail on telemetry errors to avoid disrupting the main application
console.warn('Telemetry error:', error);
}
}
/**
* Generate a random anonymous ID for telemetry
* @returns Random ID string
*/
private generateAnonymousId(): string {
return Math.random().toString(36).substring(2, 15) +
Math.random().toString(36).substring(2, 15);
}
/**
* Generate a session ID for telemetry
* @returns Session ID string
*/
private generateSessionId(): string {
return Date.now().toString(36) + Math.random().toString(36).substring(2, 9);
}
}
// Export other components for advanced usage
export * from './adapters/base';
export * from './adapters/claude';
export * from './adapters/openai';
export * from './adapters/qwen';
|