Path: blob/main/extensions/copilot/src/platform/nesFetch/common/completionsAPI.ts
13401 views
/*---------------------------------------------------------------------------------------------1* Copyright (c) Microsoft Corporation. All rights reserved.2* Licensed under the MIT License. See License.txt in the project root for license information.3*--------------------------------------------------------------------------------------------*/45import { CopilotAnnotation } from '../../completions-core/common/openai/copilotAnnotations';67/**8* Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint).9*10* Based (updated 31.10.2024) on https://platform.openai.com/docs/api-reference/completions/object11* (! except `Choice#finish_reason` can be a null during streaming)12* (! omits `id`, `model`, and `created` for performance)13*/14export interface Completion {15/** The list of completion choices the model generated for the input prompt. */16choices: Completion.Choice[];17/** This fingerprint represents the backend configuration that the model runs with. */18system_fingerprint: string;19/** The object type, which is always "text_completion". */20object: string;21/** Usage statistics for the completion request. */22usage: Completion.Usage | undefined;23}2425export namespace Completion {26export interface Choice {27/** The index of the choice. */28index: number;29/** The reason the model stopped generating tokens. */30finish_reason: FinishReason | null;31/** The log probabilities of the tokens. */32logprobs?: LogProbs | null;33/** The generated text. */34text: string | undefined;35/** Copilot-specific annotations */36copilot_annotations?: { [key: string]: CopilotAnnotation[] };37}3839/**40* The reason the model stopped generating tokens.41*/42export enum FinishReason {43/** If the model hit a natural stop point or a provided stop sequence. */44Stop = 'stop',45/** If the maximum number of tokens specified in the request was reached. */46Length = 'length',47/** If content was omitted due to a flag from our content filters. */48ContentFilter = 'content_filter',49}5051export type LogProbs = {52/** The list of tokens generated by the model. */53tokens: string[];54/** The log probabilities of the tokens. */55token_logprobs: number[];56/** The text offsets of the tokens. */57text_offset: number[];58/** The top log probabilities of the tokens. */59top_logprobs: Record<string, number>[];60};6162export interface Usage {63/** Number of tokens in the generated completion. */64completion_tokens: number;65/** Number of tokens in the prompt. */66prompt_tokens: number;67/** Total number of tokens used in the request (prompt + completion). */68total_tokens: number;69/** Breakdown of tokens used in a completion. */70completion_tokens_details: TokensDetails;71/** Breakdown of tokens used in the prompt. */72prompt_tokens_details: TokensDetails;73}7475export interface TokensDetails {76/** Audio input tokens present in the prompt. */77audio_tokens: number;78/** Tokens generated by the model for reasoning. */79reasoning_tokens: number;80}81}828384