Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
sagemathinc
GitHub Repository: sagemathinc/cocalc
Path: blob/master/src/packages/frontend/editors/markdown-input/mentionable-users.tsx
5854 views
1
/*
2
* This file is part of CoCalc: Copyright © 2020 Sagemath, Inc.
3
* License: MS-RSL – see LICENSE.md for details
4
*/
5
6
import { Tooltip } from "antd";
7
import { List } from "immutable";
8
import { isEmpty } from "lodash";
9
import { Avatar } from "@cocalc/frontend/account/avatar/avatar";
10
import { useLanguageModelSetting } from "@cocalc/frontend/account/useLanguageModelSetting";
11
import { redux, useMemo, useTypedRedux } from "@cocalc/frontend/app-framework";
12
import AnthropicAvatar from "@cocalc/frontend/components/anthropic-avatar";
13
import GoogleGeminiLogo from "@cocalc/frontend/components/google-gemini-avatar";
14
import { LanguageModelVendorAvatar } from "@cocalc/frontend/components/language-model-icon";
15
import MistralAvatar from "@cocalc/frontend/components/mistral-avatar";
16
import OpenAIAvatar from "@cocalc/frontend/components/openai-avatar";
17
import XAIAvatar from "@cocalc/frontend/components/xai-avatar";
18
import { LLMModelPrice } from "@cocalc/frontend/frame-editors/llm/llm-selector";
19
import { useUserDefinedLLM } from "@cocalc/frontend/frame-editors/llm/use-userdefined-llm";
20
import { useProjectContext } from "@cocalc/frontend/project/context";
21
import {
22
ANTHROPIC_MODELS,
23
GOOGLE_MODELS,
24
LLMServicesAvailable,
25
LLM_DESCR,
26
LLM_USERNAMES,
27
LanguageModel,
28
MISTRAL_MODELS,
29
MODELS_OPENAI,
30
UserDefinedLLM,
31
fromCustomOpenAIModel,
32
fromOllamaModel,
33
isCustomOpenAI,
34
isOllamaLLM,
35
isUserDefinedModel,
36
model2service,
37
model2vendor,
38
toCustomOpenAIModel,
39
toOllamaModel,
40
toUserLLMModelName,
41
XAI_MODELS,
42
} from "@cocalc/util/db-schema/llm-utils";
43
import { cmp, timestamp_cmp, trunc_middle } from "@cocalc/util/misc";
44
import { CustomLLMPublic } from "@cocalc/util/types/llm";
45
import { Item as CompleteItem } from "./complete";
46
47
// we make the show_llm_main_menu field required, to avoid forgetting to set it ;-)
48
type Item = CompleteItem & Required<Pick<CompleteItem, "show_llm_main_menu">>;
49
50
interface Opts {
51
avatarUserSize?: number;
52
avatarLLMSize?: number;
53
}
54
55
export function useMentionableUsers(): (
56
search: string | undefined,
57
opts?: Opts,
58
) => Item[] {
59
const { project_id, enabledLLMs } = useProjectContext();
60
61
const selectableLLMs = useTypedRedux("customize", "selectable_llms");
62
const ollama = useTypedRedux("customize", "ollama");
63
const custom_openai = useTypedRedux("customize", "custom_openai");
64
const user_llm = useUserDefinedLLM();
65
66
// the current default model. This is always a valid LLM, even if none has ever been selected.
67
const [model] = useLanguageModelSetting();
68
69
return useMemo(() => {
70
return (search: string | undefined, opts?: Opts) => {
71
return mentionableUsers({
72
search,
73
project_id,
74
enabledLLMs,
75
model,
76
ollama: ollama?.toJS() ?? {},
77
custom_openai: custom_openai?.toJS() ?? {},
78
user_llm,
79
selectableLLMs,
80
opts,
81
});
82
};
83
}, [project_id, JSON.stringify(enabledLLMs), ollama, custom_openai, model]);
84
}
85
86
interface Props {
87
search: string | undefined;
88
project_id: string;
89
model: LanguageModel;
90
ollama: { [key: string]: CustomLLMPublic };
91
custom_openai: { [key: string]: CustomLLMPublic };
92
enabledLLMs: LLMServicesAvailable;
93
selectableLLMs: List<string>;
94
user_llm: UserDefinedLLM[];
95
opts?: Opts;
96
}
97
98
function mentionableUsers({
99
search,
100
project_id,
101
enabledLLMs,
102
model,
103
ollama,
104
custom_openai,
105
selectableLLMs,
106
user_llm,
107
opts,
108
}: Props): Item[] {
109
const { avatarUserSize = 24, avatarLLMSize = 24 } = opts ?? {};
110
111
const users = redux
112
.getStore("projects")
113
.getIn(["project_map", project_id, "users"]);
114
115
const last_active = redux
116
.getStore("projects")
117
.getIn(["project_map", project_id, "last_active"]);
118
119
if (users == null || last_active == null) return []; // e.g., for an admin
120
121
const my_account_id = redux.getStore("account").get("account_id");
122
123
function getProjectUsers() {
124
const project_users: {
125
account_id: string;
126
last_active: Date | undefined;
127
}[] = [];
128
for (const [account_id] of users) {
129
project_users.push({
130
account_id,
131
last_active: last_active.get(account_id),
132
});
133
}
134
project_users.sort((a, b) => {
135
// always push self to bottom...
136
if (a.account_id == my_account_id) {
137
return 1;
138
}
139
if (b.account_id == my_account_id) {
140
return -1;
141
}
142
if (a == null || b == null) return cmp(a.account_id, b.account_id);
143
if (a == null && b != null) return 1;
144
if (a != null && b == null) return -1;
145
return timestamp_cmp(a, b, "last_active");
146
});
147
return project_users;
148
}
149
150
const project_users = getProjectUsers();
151
152
const users_store = redux.getStore("users");
153
154
const mentions: Item[] = [];
155
156
if (enabledLLMs.openai) {
157
// NOTE: all modes are included, including the 16k version, because:
158
// (1) if you use GPT-3.5 too much you hit your limit,
159
// (2) this is a non-free BUT CHEAP model you can actually use after hitting your limit, which is much cheaper than GPT-4.
160
for (const moai of MODELS_OPENAI) {
161
if (!selectableLLMs.includes(moai)) continue;
162
const show_llm_main_menu = moai === model;
163
const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;
164
const v = "openai";
165
const m = moai.replace(/-/g, "");
166
const n = LLM_USERNAMES[moai].replace(/ /g, "");
167
const search_term = `${v}chat${m}${n}`.toLowerCase();
168
if (!search || search_term.includes(search)) {
169
mentions.push({
170
value: model2service(moai),
171
label: (
172
<LLMTooltip model={moai}>
173
<OpenAIAvatar size={size} /> {LLM_USERNAMES[moai]}{" "}
174
<LLMModelPrice model={moai} floatRight />
175
</LLMTooltip>
176
),
177
search: search_term,
178
is_llm: true,
179
show_llm_main_menu,
180
});
181
}
182
}
183
}
184
185
if (enabledLLMs.google) {
186
for (const m of GOOGLE_MODELS) {
187
if (!selectableLLMs.includes(m)) continue;
188
const show_llm_main_menu = m === model;
189
const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;
190
const v = model2vendor(m);
191
const search_term = `${v}${m.replace(/-/g, "").toLowerCase()}`;
192
if (!search || search_term.includes(search)) {
193
mentions.push({
194
value: model2service(m),
195
label: (
196
<LLMTooltip model={m}>
197
<GoogleGeminiLogo size={size} /> {LLM_USERNAMES[m]}{" "}
198
<LLMModelPrice model={m} floatRight />
199
</LLMTooltip>
200
),
201
search: search_term,
202
is_llm: true,
203
show_llm_main_menu,
204
});
205
}
206
}
207
}
208
209
if (enabledLLMs.xai) {
210
for (const m of XAI_MODELS) {
211
if (!selectableLLMs.includes(m)) continue;
212
const show_llm_main_menu = m === model;
213
const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;
214
const name = LLM_USERNAMES[m] ?? m;
215
const vendor = model2vendor(m);
216
const search_term =
217
`${vendor.name}${m.replace(/-/g, "")}${name.replace(/ /g, "")}`.toLowerCase();
218
if (!search || search_term.includes(search)) {
219
mentions.push({
220
value: model2service(m),
221
label: (
222
<LLMTooltip model={m}>
223
<XAIAvatar size={size} /> {name}{" "}
224
<LLMModelPrice model={m} floatRight />
225
</LLMTooltip>
226
),
227
search: search_term,
228
is_llm: true,
229
show_llm_main_menu,
230
});
231
}
232
}
233
}
234
235
if (enabledLLMs.mistralai) {
236
for (const m of MISTRAL_MODELS) {
237
if (!selectableLLMs.includes(m)) continue;
238
const show_llm_main_menu = m === model;
239
const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;
240
const name = LLM_USERNAMES[m] ?? m;
241
const s = model2vendor(m);
242
const search_term = `${s}${m}${name}`.toLowerCase();
243
if (!search || search_term.includes(search)) {
244
mentions.push({
245
value: model2service(m),
246
label: (
247
<LLMTooltip model={m}>
248
<MistralAvatar size={size} /> {name}{" "}
249
<LLMModelPrice model={m} floatRight />
250
</LLMTooltip>
251
),
252
search: search_term,
253
is_llm: true,
254
show_llm_main_menu,
255
});
256
}
257
}
258
}
259
260
if (enabledLLMs.anthropic) {
261
for (const m of ANTHROPIC_MODELS) {
262
if (!selectableLLMs.includes(m)) continue;
263
const show_llm_main_menu = m === model;
264
const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;
265
const name = LLM_USERNAMES[m] ?? m;
266
const s = model2vendor(m);
267
const search_term = `${s}${m}${name}`.toLowerCase();
268
if (!search || search_term.includes(search)) {
269
mentions.push({
270
value: model2service(m),
271
label: (
272
<LLMTooltip model={m}>
273
<AnthropicAvatar size={size} /> {name}{" "}
274
<LLMModelPrice model={m} floatRight />
275
</LLMTooltip>
276
),
277
search: search_term,
278
is_llm: true,
279
show_llm_main_menu,
280
});
281
}
282
}
283
}
284
285
if (enabledLLMs.ollama && !isEmpty(ollama)) {
286
for (const [m, conf] of Object.entries(ollama)) {
287
const show_llm_main_menu =
288
isOllamaLLM(model) && m === fromOllamaModel(model);
289
const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;
290
const value = toOllamaModel(m);
291
const search_term = `${m}${value}${conf.display}`.toLowerCase();
292
if (!search || search_term.includes(search)) {
293
mentions.push({
294
value,
295
label: (
296
<span>
297
<LanguageModelVendorAvatar model={value} size={size} />{" "}
298
{conf.display} <LLMModelPrice model={m} floatRight />
299
</span>
300
),
301
search: search_term,
302
is_llm: true,
303
show_llm_main_menu,
304
});
305
}
306
}
307
}
308
309
if (enabledLLMs.custom_openai && !isEmpty(custom_openai)) {
310
for (const [m, conf] of Object.entries(custom_openai)) {
311
const show_llm_main_menu =
312
isCustomOpenAI(model) && m === fromCustomOpenAIModel(model);
313
const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;
314
const value = toCustomOpenAIModel(m);
315
const search_term = `${m}${value}${conf.display}`.toLowerCase();
316
if (!search || search_term.includes(search)) {
317
mentions.push({
318
value,
319
label: (
320
<span>
321
<LanguageModelVendorAvatar model={value} size={size} />{" "}
322
{conf.display} <LLMModelPrice model={m} floatRight />
323
</span>
324
),
325
search: search_term,
326
is_llm: true,
327
show_llm_main_menu,
328
});
329
}
330
}
331
}
332
333
if (!isEmpty(user_llm)) {
334
for (const llm of user_llm) {
335
const m = toUserLLMModelName(llm);
336
const show_llm_main_menu = isUserDefinedModel(model) && m === model;
337
const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;
338
const value = m;
339
const search_term = `${value}${llm.display}`.toLowerCase();
340
if (!search || search_term.includes(search)) {
341
mentions.push({
342
value,
343
label: (
344
<span>
345
<LanguageModelVendorAvatar model={value} size={size} />{" "}
346
{llm.display}
347
</span>
348
),
349
search: search_term,
350
is_llm: true,
351
show_llm_main_menu,
352
});
353
}
354
}
355
}
356
357
for (const { account_id } of project_users) {
358
const fullname = users_store.get_name(account_id) ?? "";
359
const s = fullname.toLowerCase();
360
if (search != null && s.indexOf(search) == -1) continue;
361
const name = trunc_middle(fullname, 64);
362
const label = (
363
<span>
364
<Avatar account_id={account_id} size={avatarUserSize} /> {name}
365
</span>
366
);
367
mentions.push({
368
value: account_id,
369
label,
370
search: s,
371
is_llm: false,
372
show_llm_main_menu: true, // irrelevant, but that's what it will do for standard user accounts
373
});
374
}
375
376
return mentions;
377
}
378
379
function LLMTooltip({
380
model,
381
children,
382
}: {
383
model: string;
384
children: React.ReactNode;
385
}) {
386
const descr = LLM_DESCR[model];
387
const title = <>{descr}</>;
388
return (
389
<Tooltip title={title} placement="right">
390
<div style={{ width: "100%" }}>{children}</div>
391
</Tooltip>
392
);
393
}
394
395