AI_botter/node_modules/openai/lib/ChatCompletionStream.d.ts
2024-04-30 17:46:10 -08:00

149 lines
No EOL
6.1 KiB
TypeScript

import * as Core from 'openai/core';
import { Completions, type ChatCompletion, type ChatCompletionChunk, type ChatCompletionCreateParams, type ChatCompletionCreateParamsBase } from 'openai/resources/chat/completions';
import { AbstractChatCompletionRunner, type AbstractChatCompletionRunnerEvents } from "./AbstractChatCompletionRunner.js";
import { type ReadableStream } from 'openai/_shims/index';
export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents {
content: (contentDelta: string, contentSnapshot: string) => void;
chunk: (chunk: ChatCompletionChunk, snapshot: ChatCompletionSnapshot) => void;
}
export type ChatCompletionStreamParams = Omit<ChatCompletionCreateParamsBase, 'stream'> & {
stream?: true;
};
export declare class ChatCompletionStream extends AbstractChatCompletionRunner<ChatCompletionStreamEvents> implements AsyncIterable<ChatCompletionChunk> {
#private;
get currentChatCompletionSnapshot(): ChatCompletionSnapshot | undefined;
/**
* Intended for use on the frontend, consuming a stream produced with
* `.toReadableStream()` on the backend.
*
* Note that messages sent to the model do not appear in `.on('message')`
* in this context.
*/
static fromReadableStream(stream: ReadableStream): ChatCompletionStream;
static createChatCompletion(completions: Completions, params: ChatCompletionStreamParams, options?: Core.RequestOptions): ChatCompletionStream;
protected _createChatCompletion(completions: Completions, params: ChatCompletionCreateParams, options?: Core.RequestOptions): Promise<ChatCompletion>;
protected _fromReadableStream(readableStream: ReadableStream, options?: Core.RequestOptions): Promise<ChatCompletion>;
[Symbol.asyncIterator](): AsyncIterator<ChatCompletionChunk>;
toReadableStream(): ReadableStream;
}
/**
* Represents a streamed chunk of a chat completion response returned by model,
* based on the provided input.
*/
export interface ChatCompletionSnapshot {
/**
* A unique identifier for the chat completion.
*/
id: string;
/**
* A list of chat completion choices. Can be more than one if `n` is greater
* than 1.
*/
choices: Array<ChatCompletionSnapshot.Choice>;
/**
* The Unix timestamp (in seconds) of when the chat completion was created.
*/
created: number;
/**
* The model to generate the completion.
*/
model: string;
/**
* This fingerprint represents the backend configuration that the model runs with.
*
* Can be used in conjunction with the `seed` request parameter to understand when
* backend changes have been made that might impact determinism.
*/
system_fingerprint?: string;
}
export declare namespace ChatCompletionSnapshot {
interface Choice {
/**
* A chat completion delta generated by streamed model responses.
*/
message: Choice.Message;
/**
* The reason the model stopped generating tokens. This will be `stop` if the model
* hit a natural stop point or a provided stop sequence, `length` if the maximum
* number of tokens specified in the request was reached, `content_filter` if
* content was omitted due to a flag from our content filters, or `function_call`
* if the model called a function.
*/
finish_reason: ChatCompletion.Choice['finish_reason'] | null;
/**
* Log probability information for the choice.
*/
logprobs: ChatCompletion.Choice.Logprobs | null;
/**
* The index of the choice in the list of choices.
*/
index: number;
}
namespace Choice {
/**
* A chat completion delta generated by streamed model responses.
*/
interface Message {
/**
* The contents of the chunk message.
*/
content?: string | null;
/**
* The name and arguments of a function that should be called, as generated by the
* model.
*/
function_call?: Message.FunctionCall;
tool_calls?: Array<Message.ToolCall>;
/**
* The role of the author of this message.
*/
role?: 'system' | 'user' | 'assistant' | 'function' | 'tool';
}
namespace Message {
interface ToolCall {
/**
* The ID of the tool call.
*/
id?: string;
function?: ToolCall.Function;
/**
* The type of the tool.
*/
type?: 'function';
}
namespace ToolCall {
interface Function {
/**
* The arguments to call the function with, as generated by the model in JSON
* format. Note that the model does not always generate valid JSON, and may
* hallucinate parameters not defined by your function schema. Validate the
* arguments in your code before calling your function.
*/
arguments?: string;
/**
* The name of the function to call.
*/
name?: string;
}
}
/**
* The name and arguments of a function that should be called, as generated by the
* model.
*/
interface FunctionCall {
/**
* The arguments to call the function with, as generated by the model in JSON
* format. Note that the model does not always generate valid JSON, and may
* hallucinate parameters not defined by your function schema. Validate the
* arguments in your code before calling your function.
*/
arguments?: string;
/**
* The name of the function to call.
*/
name?: string;
}
}
}
}
//# sourceMappingURL=ChatCompletionStream.d.ts.map