Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
microsoft
GitHub Repository: microsoft/vscode
Path: blob/main/extensions/copilot/src/extension/byok/vscode-node/test/ollamaProvider.spec.ts
13405 views
1
/*---------------------------------------------------------------------------------------------
2
* Copyright (c) Microsoft Corporation. All rights reserved.
3
* Licensed under the MIT License. See License.txt in the project root for license information.
4
*--------------------------------------------------------------------------------------------*/
5
6
import { describe, expect, it, vi } from 'vitest';
7
import * as vscode from 'vscode';
8
import { OllamaLMProvider } from '../ollamaProvider';
9
10
describe('OllamaLMProvider', () => {
11
it('returns successful models when one /api/show lookup fails', async () => {
12
const ollamaBaseUrl = 'http://localhost:11434';
13
const tagsModels = [{ model: 'good-model-a' }, { model: 'bad-model' }, { model: 'good-model-b' }];
14
const showCalls: string[] = [];
15
16
const fetch = vi.fn(async (url: string, options: { body?: string }) => {
17
if (url === `${ollamaBaseUrl}/api/version`) {
18
return { json: async () => ({ version: '0.6.4' }) };
19
}
20
if (url === `${ollamaBaseUrl}/api/tags`) {
21
return { json: async () => ({ models: tagsModels }) };
22
}
23
if (url === `${ollamaBaseUrl}/api/show`) {
24
const modelId = JSON.parse(options.body ?? '{}').model as string;
25
showCalls.push(modelId);
26
if (modelId === 'bad-model') {
27
throw new Error('simulated /api/show failure');
28
}
29
return {
30
json: async () => ({
31
template: '',
32
capabilities: [],
33
details: { family: 'llama' },
34
remote_model: modelId,
35
model_info: {
36
'general.basename': modelId,
37
'general.architecture': 'llama',
38
'llama.context_length': 8192,
39
},
40
})
41
};
42
}
43
throw new Error(`Unexpected URL in test: ${url}`);
44
});
45
46
const logService = {
47
_serviceBrand: undefined,
48
trace: vi.fn(),
49
debug: vi.fn(),
50
info: vi.fn(),
51
warn: vi.fn(),
52
error: vi.fn(),
53
show: vi.fn(),
54
createSubLogger: vi.fn(),
55
withExtraTarget: vi.fn(),
56
};
57
logService.createSubLogger.mockReturnValue(logService);
58
logService.withExtraTarget.mockReturnValue(logService);
59
60
const provider = new OllamaLMProvider(
61
{
62
getAPIKey: vi.fn().mockResolvedValue(undefined),
63
storeAPIKey: vi.fn().mockResolvedValue(undefined),
64
deleteAPIKey: vi.fn().mockResolvedValue(undefined),
65
getStoredModelConfigs: vi.fn().mockResolvedValue({}),
66
saveModelConfig: vi.fn().mockResolvedValue(undefined),
67
removeModelConfig: vi.fn().mockResolvedValue(undefined),
68
} as any,
69
{ fetch } as any,
70
{
71
isConfigured: vi.fn().mockReturnValue(false),
72
getConfig: vi.fn(),
73
setConfig: vi.fn(),
74
} as any,
75
logService as any,
76
{
77
createInstance: vi.fn().mockReturnValue({}),
78
} as any,
79
{} as any
80
);
81
82
const tokenSource = new vscode.CancellationTokenSource();
83
const models = await provider.provideLanguageModelChatInformation(
84
{
85
silent: false,
86
configuration: { url: ollamaBaseUrl },
87
},
88
tokenSource.token
89
);
90
91
expect(showCalls).toEqual(['good-model-a', 'bad-model', 'good-model-b']);
92
expect(models.map(model => model.id)).toEqual(['good-model-a', 'good-model-b']);
93
});
94
});
95
96