Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
sagemathinc
GitHub Repository: sagemathinc/cocalc
Path: blob/master/src/packages/util/db-schema/llm-utils.test.ts
5808 views
1
import {
2
DEFAULT_LLM_PRIORITY,
3
DEFAULT_MODEL,
4
getValidLanguageModelName,
5
isCoreLanguageModel,
6
isFreeModel,
7
LANGUAGE_MODEL_SERVICES,
8
LANGUAGE_MODELS,
9
LanguageService,
10
LLM_DESCR,
11
LLM_COST,
12
LLMServicesAvailable,
13
model2vendor,
14
model2service,
15
OLLAMA_PREFIX,
16
SERVICES,
17
USER_SELECTABLE_LANGUAGE_MODELS,
18
USER_SELECTABLE_LLMS_BY_VENDOR,
19
} from "./llm-utils";
20
21
describe("llm", () => {
22
const is_cocalc_com = true; // otherwise, the test makes no sense
23
24
test("isFreeModel", () => {
25
expect(isFreeModel("gpt-3", is_cocalc_com)).toBe(true);
26
expect(isFreeModel("gpt-4", is_cocalc_com)).toBe(false);
27
// WARNING: if the following breaks, and ollama becomes non-free, then a couple of assumptions are broken as well.
28
// search for model2service(...) as LanguageService in the codebase!
29
expect(isFreeModel(`${OLLAMA_PREFIX}-1`, is_cocalc_com)).toBe(true);
30
});
31
32
test.each(Object.keys(LLM_COST))(
33
"is valid model names in LLM_COST: '%s'",
34
(model) => {
35
expect(LANGUAGE_MODELS.includes(model as any)).toBe(true);
36
},
37
);
38
39
test("all user selectable ones are valid", () => {
40
for (const model of USER_SELECTABLE_LANGUAGE_MODELS) {
41
expect(LANGUAGE_MODELS.includes(model)).toBe(true);
42
}
43
});
44
45
// none of the user selectable models start with any of the vendor prefixes
46
test.each(USER_SELECTABLE_LANGUAGE_MODELS)(
47
"model '%s' does not start with any vendor prefix",
48
(model) => {
49
for (const prefix of LANGUAGE_MODEL_SERVICES) {
50
expect(model.startsWith(prefix)).toBe(false);
51
}
52
},
53
);
54
55
test.each(LANGUAGE_MODELS)(
56
`check that model2vendor('%s') knows the model`,
57
(model) => {
58
const vendor = model2vendor(model);
59
expect(LANGUAGE_MODEL_SERVICES.includes(vendor.name)).toBe(true);
60
},
61
);
62
63
test("model2service handles xAI models", () => {
64
expect(model2service("grok-4-1-fast-non-reasoning-16k")).toBe(
65
"xai-grok-4-1-fast-non-reasoning-16k",
66
);
67
});
68
69
test("model2service handles gpt-5.2 models", () => {
70
expect(model2service("gpt-5.2-8k")).toBe("openai-gpt-5.2-8k");
71
});
72
73
test("Gemini 3 Flash description omits preview", () => {
74
expect(LLM_DESCR["gemini-3-flash-preview-16k"].toLowerCase()).not.toContain(
75
"preview",
76
);
77
});
78
79
test(`check model by vendor`, () => {
80
for (const vendor in USER_SELECTABLE_LLMS_BY_VENDOR) {
81
const models = USER_SELECTABLE_LLMS_BY_VENDOR[vendor];
82
for (const model of models) {
83
const v = model2vendor(model);
84
expect(v.name).toBe(vendor);
85
expect(v.url).toContain("https://");
86
}
87
}
88
});
89
90
test("just checking the price", () => {
91
expect(1_000_000 * LLM_COST["gpt-4"].prompt_tokens).toBeCloseTo(30);
92
expect(1_000_000 * LLM_COST["gpt-4"].completion_tokens).toBeCloseTo(60);
93
expect(1_000_000 * LLM_COST["claude-3-opus"].prompt_tokens).toBeCloseTo(15);
94
expect(1_000_000 * LLM_COST["claude-3-opus"].completion_tokens).toBeCloseTo(
95
75,
96
);
97
});
98
99
test("priority list is a shuffle of all llm vendors", () => {
100
// except for "user"
101
const prio = DEFAULT_LLM_PRIORITY;
102
const vend = SERVICES;
103
// test, that those lists have the same elements
104
expect(prio.length).toBe(vend.length);
105
for (const v of vend) {
106
expect(prio.includes(v)).toBe(true);
107
}
108
});
109
110
test("getting valid language model", () => {
111
const selectable_llms = [...USER_SELECTABLE_LANGUAGE_MODELS];
112
const notAvailable = selectable_llms.pop();
113
114
function getModel(model: LanguageService, disabled?: LanguageService) {
115
const allEnabled = LANGUAGE_MODEL_SERVICES.reduce((acc, svc) => {
116
acc[svc] = disabled !== svc;
117
return acc;
118
}, {}) as LLMServicesAvailable;
119
return getValidLanguageModelName({
120
model,
121
filter: allEnabled,
122
ollama: ["phi3"],
123
custom_openai: ["bar"],
124
selectable_llms,
125
});
126
}
127
128
// meaningless name
129
expect(getModel("foobar")).toEqual(DEFAULT_MODEL);
130
expect(getModel("baz-delta99")).toEqual(DEFAULT_MODEL);
131
// gpt 3.5 is disabled
132
expect(getModel("gpt-3.5-turbo")).toEqual(DEFAULT_MODEL);
133
// not available
134
expect(
135
typeof notAvailable === "string" && isCoreLanguageModel(notAvailable),
136
).toBe(true);
137
if (typeof notAvailable === "string") {
138
expect(getModel(notAvailable)).toEqual(DEFAULT_MODEL);
139
}
140
// not disabled
141
expect(getModel("mistral-large-latest")).toEqual("mistral-large-latest");
142
expect(getModel("gpt-4")).toEqual("gpt-4");
143
expect(getModel(DEFAULT_MODEL)).toEqual(DEFAULT_MODEL);
144
expect(getModel("magistral-medium-latest")).toEqual(DEFAULT_MODEL);
145
expect(getModel("mistral-large-latest")).toEqual("mistral-large-latest");
146
expect(getModel("claude-4-5-haiku-8k")).toEqual("claude-4-5-haiku-8k");
147
// anthropic service disabled
148
expect(getModel("claude-4-5-haiku-8k", "anthropic")).toEqual(DEFAULT_MODEL);
149
// ollama
150
expect(getModel("ollama-foo")).toEqual(DEFAULT_MODEL);
151
expect(getModel("ollama-phi3")).toEqual("ollama-phi3");
152
// openai api
153
expect(getModel("custom_openai-foo")).toEqual(DEFAULT_MODEL);
154
expect(getModel("custom_openai-bar")).toEqual("custom_openai-bar");
155
// user models: there are no further checks
156
expect(getModel("user-custom_openai-foo")).toEqual(
157
"user-custom_openai-foo",
158
);
159
expect(getModel("user-openai-gpt-3.5-turbo")).toEqual(
160
"user-openai-gpt-3.5-turbo",
161
);
162
// it's ok to use a model if disabled by the admin, since it's their key
163
expect(getModel("user-anthropic-claude-3-5-haiku-8k", "anthropic")).toEqual(
164
"user-anthropic-claude-3-5-haiku-8k",
165
);
166
// meaningless user service
167
expect(getModel("user-baz-delta99")).toEqual(DEFAULT_MODEL);
168
});
169
});
170
171