Path: blob/main/extensions/copilot/src/extension/byok/vscode-node/test/ollamaProvider.spec.ts
13405 views
/*---------------------------------------------------------------------------------------------1* Copyright (c) Microsoft Corporation. All rights reserved.2* Licensed under the MIT License. See License.txt in the project root for license information.3*--------------------------------------------------------------------------------------------*/45import { describe, expect, it, vi } from 'vitest';6import * as vscode from 'vscode';7import { OllamaLMProvider } from '../ollamaProvider';89describe('OllamaLMProvider', () => {10it('returns successful models when one /api/show lookup fails', async () => {11const ollamaBaseUrl = 'http://localhost:11434';12const tagsModels = [{ model: 'good-model-a' }, { model: 'bad-model' }, { model: 'good-model-b' }];13const showCalls: string[] = [];1415const fetch = vi.fn(async (url: string, options: { body?: string }) => {16if (url === `${ollamaBaseUrl}/api/version`) {17return { json: async () => ({ version: '0.6.4' }) };18}19if (url === `${ollamaBaseUrl}/api/tags`) {20return { json: async () => ({ models: tagsModels }) };21}22if (url === `${ollamaBaseUrl}/api/show`) {23const modelId = JSON.parse(options.body ?? '{}').model as string;24showCalls.push(modelId);25if (modelId === 'bad-model') {26throw new Error('simulated /api/show failure');27}28return {29json: async () => ({30template: '',31capabilities: [],32details: { family: 'llama' },33remote_model: modelId,34model_info: {35'general.basename': modelId,36'general.architecture': 'llama',37'llama.context_length': 8192,38},39})40};41}42throw new Error(`Unexpected URL in test: ${url}`);43});4445const logService = {46_serviceBrand: undefined,47trace: vi.fn(),48debug: vi.fn(),49info: vi.fn(),50warn: vi.fn(),51error: vi.fn(),52show: vi.fn(),53createSubLogger: vi.fn(),54withExtraTarget: vi.fn(),55};56logService.createSubLogger.mockReturnValue(logService);57logService.withExtraTarget.mockReturnValue(logService);5859const provider = new OllamaLMProvider(60{61getAPIKey: vi.fn().mockResolvedValue(undefined),62storeAPIKey: vi.fn().mockResolvedValue(undefined),63deleteAPIKey: vi.fn().mockResolvedValue(undefined),64getStoredModelConfigs: vi.fn().mockResolvedValue({}),65saveModelConfig: vi.fn().mockResolvedValue(undefined),66removeModelConfig: vi.fn().mockResolvedValue(undefined),67} as any,68{ fetch } as any,69{70isConfigured: vi.fn().mockReturnValue(false),71getConfig: vi.fn(),72setConfig: vi.fn(),73} as any,74logService as any,75{76createInstance: vi.fn().mockReturnValue({}),77} as any,78{} as any79);8081const tokenSource = new vscode.CancellationTokenSource();82const models = await provider.provideLanguageModelChatInformation(83{84silent: false,85configuration: { url: ollamaBaseUrl },86},87tokenSource.token88);8990expect(showCalls).toEqual(['good-model-a', 'bad-model', 'good-model-b']);91expect(models.map(model => model.id)).toEqual(['good-model-a', 'good-model-b']);92});93});949596