Files
zn-ai/electron/providers/OpenAIProvider.ts
2026-04-14 17:02:20 +08:00

66 lines
2.2 KiB
TypeScript

import { BaseProvider, ChatOptions, GatewayChatMessage } from "./BaseProvider";
import OpenAI from "openai";
import logManager from "@electron/service/logger"
function _transformChunk(chunk: OpenAI.Chat.Completions.ChatCompletionChunk): UniversalChunk {
const choice = chunk.choices[0];
return {
isEnd: choice?.finish_reason != null,
result: choice?.delta?.content ?? '',
}
}
export class OpenAIProvider extends BaseProvider {
private client: OpenAI;
constructor(apiKey: string, baseURL: string, headers?: Record<string, string>) {
super();
this.client = new OpenAI({ apiKey, baseURL, defaultHeaders: headers });
}
async chat(messages: GatewayChatMessage[], model: string, options?: ChatOptions): Promise<AsyncIterable<UniversalChunk>> {
const startTime = Date.now();
const lastMessage = messages[messages.length - 1];
logManager.logApiRequest('chat.completions.create', {
model,
lastMessage: lastMessage?.content?.substring(0, 100) + (lastMessage?.content?.length > 100 ? '...' : ''),
messageCount: messages.length,
}, 'POST');
try {
const chunks = await this.client.chat.completions.create({
model,
messages: messages as any,
stream: true,
}, {
signal: options?.signal,
});
return {
async *[Symbol.asyncIterator]() {
try {
for await (const chunk of chunks) {
if (options?.signal?.aborted) break;
yield _transformChunk(chunk);
}
const responseTime = Date.now() - startTime;
logManager.logApiResponse('chat.completions.create', { success: true }, 200, responseTime);
} catch (error) {
const responseTime = Date.now() - startTime;
logManager.logApiResponse('chat.completions.create', { error: error instanceof Error ? error.message : String(error) }, 500, responseTime);
throw error;
}
}
}
} catch (error) {
const responseTime = Date.now() - startTime;
logManager.logApiResponse('chat.completions.create', { error: error instanceof Error ? error.message : String(error) }, 500, responseTime);
throw error;
}
}
}