Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
microsoft
GitHub Repository: microsoft/vscode
Path: blob/main/extensions/copilot/src/extension/prompts/node/base/promptRenderer.ts
13405 views
1
/*---------------------------------------------------------------------------------------------
2
* Copyright (c) Microsoft Corporation. All rights reserved.
3
* Licensed under the MIT License. See License.txt in the project root for license information.
4
*--------------------------------------------------------------------------------------------*/
5
6
import { BasePromptElementProps, PromptRenderer as BasePromptRenderer, HTMLTracer, ITokenizer, JSONTree, MetadataMap, OutputMode, QueueItem, Raw, RenderPromptResult } from '@vscode/prompt-tsx';
7
import type { ChatResponsePart, ChatResponseProgressPart, LanguageModelToolTokenizationOptions, Progress } from 'vscode';
8
import { ChatLocation } from '../../../../platform/chat/common/commonTypes';
9
import { toTextPart } from '../../../../platform/chat/common/globalStringUtils';
10
import { ConfigKey, IConfigurationService } from '../../../../platform/configuration/common/configurationService';
11
import { IEndpointProvider } from '../../../../platform/endpoint/common/endpointProvider';
12
import { ILogService } from '../../../../platform/log/common/logService';
13
import { IChatEndpoint } from '../../../../platform/networking/common/networking';
14
import { IRequestLogger } from '../../../../platform/requestLogger/common/requestLogger';
15
import { ITokenizerProvider } from '../../../../platform/tokenizer/node/tokenizer';
16
import { createServiceIdentifier } from '../../../../util/common/services';
17
import { isLocation } from '../../../../util/common/types';
18
import { CancellationToken } from '../../../../util/vs/base/common/cancellation';
19
import { URI } from '../../../../util/vs/base/common/uri';
20
import { IInstantiationService } from '../../../../util/vs/platform/instantiation/common/instantiation';
21
import { ServiceCollection } from '../../../../util/vs/platform/instantiation/common/serviceCollection';
22
import { ChatResponseReferencePart, Location, Uri } from '../../../../vscodeTypes';
23
import { RendererVisualizations } from '../../../inlineChat/node/rendererVisualization';
24
import { getUniqueReferences, PromptReference } from '../../../prompt/common/conversation';
25
import { IBuildPromptContext } from '../../../prompt/common/intents';
26
import { IIntent } from '../../../prompt/node/intents';
27
import { PromptElementCtor } from './promptElement';
28
29
/**
30
* Allows us to use dependency injection to pass the fully fledged IChatEndpoint to the prompt element being rendered.
31
*/
32
export type IPromptEndpoint = IChatEndpoint & {
33
_serviceBrand: undefined;
34
};
35
export const IPromptEndpoint = createServiceIdentifier<IPromptEndpoint>('IPromptEndpoint');
36
37
/**
38
* Convenience intent invocation that uses a renderer for prompt crafting.
39
*/
40
export abstract class RendererIntentInvocation {
41
42
constructor(
43
readonly intent: IIntent,
44
readonly location: ChatLocation,
45
readonly endpoint: IChatEndpoint,
46
) { }
47
48
async buildPrompt(promptParams: IBuildPromptContext, progress: Progress<ChatResponseReferencePart | ChatResponseProgressPart>, token: CancellationToken): Promise<RenderPromptResult<OutputMode.Raw> & { references: PromptReference[] }> {
49
const renderer = await this.createRenderer(promptParams, this.endpoint, progress, token);
50
return await renderer.render(progress, token);
51
}
52
53
abstract createRenderer(promptParams: IBuildPromptContext, endpoint: IChatEndpoint, progress: Progress<ChatResponseReferencePart | ChatResponseProgressPart>, token: CancellationToken): BasePromptRenderer<any, OutputMode.Raw> | Promise<BasePromptRenderer<any, OutputMode.Raw>>;
54
}
55
56
export class PromptRenderer<P extends BasePromptElementProps> extends BasePromptRenderer<P, OutputMode.Raw> {
57
private ctorName?: string; // when and iff tracing is enabled
58
59
public static create<P extends BasePromptElementProps>(
60
instantiationService: IInstantiationService,
61
endpoint: IChatEndpoint,
62
ctor: PromptElementCtor<P, any>,
63
props: P,
64
) {
65
// TODO@Alex, TODO@Joh: instantiationService.createInstance doesn't work here
66
const hydratedInstaService = instantiationService.createChild(new ServiceCollection([IPromptEndpoint, endpoint]));
67
return hydratedInstaService.invokeFunction((accessor) => {
68
const tokenizerProvider = accessor.get(ITokenizerProvider);
69
let renderer = new PromptRenderer(hydratedInstaService, endpoint, ctor, props, tokenizerProvider, accessor.get(IRequestLogger), accessor.get(ILogService), accessor.get(IConfigurationService));
70
71
const visualizations = RendererVisualizations.getIfVisualizationTestIsRunning();
72
if (visualizations) {
73
renderer = visualizations.decorateAndRegister(renderer, ctor.name);
74
}
75
76
return renderer;
77
});
78
}
79
80
constructor(
81
private readonly _instantiationService: IInstantiationService,
82
protected readonly endpoint: IChatEndpoint,
83
ctor: PromptElementCtor<P, any>,
84
props: P,
85
@ITokenizerProvider tokenizerProvider: ITokenizerProvider,
86
@IRequestLogger private readonly _requestLogger: IRequestLogger,
87
@ILogService private readonly _logService: ILogService,
88
@IConfigurationService configurationService: IConfigurationService,
89
) {
90
const tokenizer = tokenizerProvider.acquireTokenizer(endpoint);
91
super(endpoint, ctor, props, tokenizer);
92
93
if (configurationService.getConfig(ConfigKey.TeamInternal.EnablePromptRendererTracing)) {
94
this.ctorName = ctor.name || '<anonymous>';
95
this.tracer = new HTMLTracer();
96
}
97
}
98
99
override createElement(element: QueueItem<PromptElementCtor<P, any>, P>, ...args: any[]) {
100
return this._instantiationService.createInstance(element.ctor, element.props, ...args);
101
}
102
103
override async render(progress?: Progress<ChatResponsePart> | undefined, token?: CancellationToken | undefined, opts?: Partial<{ trace: boolean }>): Promise<RenderPromptResult> {
104
const result = await super.render(progress, token);
105
const defaultOptions = { trace: true };
106
opts = { ...defaultOptions, ...opts };
107
if (this.tracer && !!opts.trace) {
108
this._requestLogger.addPromptTrace(this.ctorName!, this.endpoint, result, this.tracer as HTMLTracer);
109
}
110
111
// Collapse consecutive system messages because CAPI currently expects a single
112
// system message per prompt. Note: this may slightly reduce the actual
113
// token usage under the `RenderPromptResult.tokenCount`.
114
for (let i = 1; i < result.messages.length; i++) {
115
const current = result.messages[i];
116
const prev = result.messages[i - 1];
117
if (current.role === Raw.ChatRole.System && prev.role === Raw.ChatRole.System) {
118
const lastContent = prev.content.at(-1);
119
const nextContent = current.content.at(0);
120
if (lastContent && nextContent && lastContent.type === Raw.ChatCompletionContentPartKind.Text && nextContent.type === Raw.ChatCompletionContentPartKind.Text) {
121
lastContent.text = lastContent.text.trimEnd() + '\n' + nextContent.text;
122
prev.content = prev.content.concat(current.content.slice(1));
123
} else {
124
prev.content.push(toTextPart('\n'));
125
prev.content = prev.content.concat(current.content);
126
}
127
result.messages.splice(i, 1);
128
i--;
129
}
130
}
131
132
const references = result.references.filter(ref => this.validateReference(ref));
133
this._instantiationService.dispose(); // Dispose the hydrated instantiation service
134
return { ...result, references: getUniqueReferences(references) };
135
}
136
137
private validateReference(reference: PromptReference) {
138
const validateLocation = (value: Uri | Location) => {
139
const uri = isLocation(value) ? value.uri : value;
140
if (!URI.isUri(uri)) {
141
this._logService.warn(`Invalid PromptReference, uri not an instance of URI: ${uri}. Try to find the code that is creating this reference and fix it.`);
142
return false;
143
}
144
return true;
145
};
146
const refAnchor = reference.anchor;
147
if ('variableName' in refAnchor) {
148
return refAnchor.value === undefined || validateLocation(refAnchor.value);
149
}
150
return validateLocation(refAnchor);
151
}
152
153
async countTokens(token?: CancellationToken): Promise<number> {
154
const result = await super.render(undefined, token);
155
return result.tokenCount;
156
}
157
}
158
159
export async function renderPromptElement<P extends BasePromptElementProps>(
160
instantiationService: IInstantiationService,
161
endpoint: IChatEndpoint,
162
ctor: PromptElementCtor<P, any>,
163
props: P,
164
progress?: Progress<ChatResponseProgressPart>,
165
token?: CancellationToken,
166
): Promise<{ messages: Raw.ChatMessage[]; tokenCount: number; metadatas: MetadataMap; references: PromptReference[] }> {
167
const renderer = PromptRenderer.create(instantiationService, endpoint, ctor, props);
168
const { messages, tokenCount, references, metadata } = await renderer.render(progress, token);
169
return { messages, tokenCount, metadatas: metadata, references: getUniqueReferences(references) };
170
}
171
172
// The below all exists to wrap `renderElementJSON` to call our instantiation service
173
174
class PromptRendererForJSON<P extends BasePromptElementProps> extends BasePromptRenderer<P, OutputMode.Raw> {
175
constructor(
176
ctor: PromptElementCtor<P, any>,
177
props: P,
178
tokenOptions: LanguageModelToolTokenizationOptions | undefined,
179
chatEndpoint: IChatEndpoint,
180
private readonly instantiationService: IInstantiationService,
181
) {
182
// Copied from prompt-tsx to map the vscode tool tokenOptions to ITokenizer
183
const tokenizer: ITokenizer<OutputMode.Raw> = {
184
mode: OutputMode.Raw,
185
countMessageTokens(message) {
186
throw new Error('Tools may only return text, not messages.');
187
},
188
tokenLength(text, token) {
189
if (text.type === Raw.ChatCompletionContentPartKind.Text) {
190
return Promise.resolve(tokenOptions?.countTokens(text.text, token) ?? Promise.resolve(1));
191
} else {
192
return Promise.resolve(1);
193
}
194
},
195
};
196
197
super({ modelMaxPromptTokens: tokenOptions?.tokenBudget ?? chatEndpoint.modelMaxPromptTokens }, ctor, props, tokenizer);
198
}
199
200
override createElement(element: QueueItem<PromptElementCtor<P, any>, P>, ...args: any[]) {
201
return this.instantiationService.createInstance(element.ctor, element.props, ...args);
202
}
203
}
204
205
export async function renderPromptElementJSON<P extends BasePromptElementProps>(
206
instantiationService: IInstantiationService,
207
ctor: PromptElementCtor<P, any>,
208
props: P,
209
tokenOptions?: LanguageModelToolTokenizationOptions,
210
token?: CancellationToken
211
): Promise<JSONTree.PromptElementJSON> {
212
// todo@connor4312: we don't know what model the tool call will use, just assume copilot base
213
// todo@lramos15: We should pass in endpoint provider rather than doing invoke function, but this was easier
214
const endpoint = await instantiationService.invokeFunction(async (accessor) => {
215
const endpointProvider = accessor.get(IEndpointProvider);
216
return await endpointProvider.getChatEndpoint('copilot-base');
217
});
218
const hydratedInstaService = instantiationService.createChild(new ServiceCollection([IPromptEndpoint, endpoint]));
219
const renderer = new PromptRendererForJSON(ctor as any, props, tokenOptions, endpoint, hydratedInstaService);
220
return await renderer.renderElementJSON(token);
221
}
222
223