Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
microsoft
GitHub Repository: microsoft/vscode
Path: blob/main/extensions/copilot/src/platform/nesFetch/common/completionsAPI.ts
13401 views
1
/*---------------------------------------------------------------------------------------------
2
* Copyright (c) Microsoft Corporation. All rights reserved.
3
* Licensed under the MIT License. See License.txt in the project root for license information.
4
*--------------------------------------------------------------------------------------------*/
5
6
import { CopilotAnnotation } from '../../completions-core/common/openai/copilotAnnotations';
7
8
/**
9
* Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint).
10
*
11
* Based (updated 31.10.2024) on https://platform.openai.com/docs/api-reference/completions/object
12
* (! except `Choice#finish_reason` can be a null during streaming)
13
* (! omits `id`, `model`, and `created` for performance)
14
*/
15
export interface Completion {
16
/** The list of completion choices the model generated for the input prompt. */
17
choices: Completion.Choice[];
18
/** This fingerprint represents the backend configuration that the model runs with. */
19
system_fingerprint: string;
20
/** The object type, which is always "text_completion". */
21
object: string;
22
/** Usage statistics for the completion request. */
23
usage: Completion.Usage | undefined;
24
}
25
26
export namespace Completion {
27
export interface Choice {
28
/** The index of the choice. */
29
index: number;
30
/** The reason the model stopped generating tokens. */
31
finish_reason: FinishReason | null;
32
/** The log probabilities of the tokens. */
33
logprobs?: LogProbs | null;
34
/** The generated text. */
35
text: string | undefined;
36
/** Copilot-specific annotations */
37
copilot_annotations?: { [key: string]: CopilotAnnotation[] };
38
}
39
40
/**
41
* The reason the model stopped generating tokens.
42
*/
43
export enum FinishReason {
44
/** If the model hit a natural stop point or a provided stop sequence. */
45
Stop = 'stop',
46
/** If the maximum number of tokens specified in the request was reached. */
47
Length = 'length',
48
/** If content was omitted due to a flag from our content filters. */
49
ContentFilter = 'content_filter',
50
}
51
52
export type LogProbs = {
53
/** The list of tokens generated by the model. */
54
tokens: string[];
55
/** The log probabilities of the tokens. */
56
token_logprobs: number[];
57
/** The text offsets of the tokens. */
58
text_offset: number[];
59
/** The top log probabilities of the tokens. */
60
top_logprobs: Record<string, number>[];
61
};
62
63
export interface Usage {
64
/** Number of tokens in the generated completion. */
65
completion_tokens: number;
66
/** Number of tokens in the prompt. */
67
prompt_tokens: number;
68
/** Total number of tokens used in the request (prompt + completion). */
69
total_tokens: number;
70
/** Breakdown of tokens used in a completion. */
71
completion_tokens_details: TokensDetails;
72
/** Breakdown of tokens used in the prompt. */
73
prompt_tokens_details: TokensDetails;
74
}
75
76
export interface TokensDetails {
77
/** Audio input tokens present in the prompt. */
78
audio_tokens: number;
79
/** Tokens generated by the model for reasoning. */
80
reasoning_tokens: number;
81
}
82
}
83
84