Files
zn-ai/electron/providers/OpenAIProvider.ts
DEV_DSW 78d3235ab6 feat: enhance token usage tracking and history management
- Updated HTML assets for improved loading.
- Integrated token usage tracking in chat processing, appending usage details to transcripts.
- Enhanced OpenAIProvider to include usage data in chat completion responses.
- Implemented asynchronous retrieval of recent token usage history.
- Added utility functions for managing transcript files and parsing usage data.
- Updated UI components to reflect changes in usage status handling.
- Ensured consistent usage status definitions across the application.
2026-04-15 11:45:33 +08:00

69 lines
2.3 KiB
TypeScript

import { BaseProvider, ChatOptions, GatewayChatMessage } from "./BaseProvider";
import OpenAI from "openai";
import logManager from "@electron/service/logger"
function _transformChunk(chunk: OpenAI.Chat.Completions.ChatCompletionChunk): UniversalChunk {
const choice = chunk.choices[0];
const usage = (chunk as any).usage;
return {
isEnd: choice?.finish_reason != null || (chunk.choices.length === 0 && usage != null),
result: choice?.delta?.content ?? '',
usage: usage ?? undefined,
}
}
export class OpenAIProvider extends BaseProvider {
private client: OpenAI;
constructor(apiKey: string, baseURL: string, headers?: Record<string, string>) {
super();
this.client = new OpenAI({ apiKey, baseURL, defaultHeaders: headers });
}
async chat(messages: GatewayChatMessage[], model: string, options?: ChatOptions): Promise<AsyncIterable<UniversalChunk>> {
const startTime = Date.now();
const lastMessage = messages[messages.length - 1];
logManager.logApiRequest('chat.completions.create', {
model,
lastMessage: lastMessage?.content?.substring(0, 100) + (lastMessage?.content?.length > 100 ? '...' : ''),
messageCount: messages.length,
}, 'POST');
try {
const chunks = await this.client.chat.completions.create({
model,
messages: messages as any,
stream: true,
stream_options: { include_usage: true },
}, {
signal: options?.signal,
});
return {
async *[Symbol.asyncIterator]() {
try {
for await (const chunk of chunks) {
if (options?.signal?.aborted) break;
yield _transformChunk(chunk);
}
const responseTime = Date.now() - startTime;
logManager.logApiResponse('chat.completions.create', { success: true }, 200, responseTime);
} catch (error) {
const responseTime = Date.now() - startTime;
logManager.logApiResponse('chat.completions.create', { error: error instanceof Error ? error.message : String(error) }, 500, responseTime);
throw error;
}
}
}
} catch (error) {
const responseTime = Date.now() - startTime;
logManager.logApiResponse('chat.completions.create', { error: error instanceof Error ? error.message : String(error) }, 500, responseTime);
throw error;
}
}
}