Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
sagemathinc
GitHub Repository: sagemathinc/cocalc
Path: blob/master/src/packages/frontend/editors/markdown-input/mentionable-users.tsx
1691 views
1
/*
2
* This file is part of CoCalc: Copyright © 2020 Sagemath, Inc.
3
* License: MS-RSL – see LICENSE.md for details
4
*/
5
6
import { Tooltip } from "antd";
7
import { List } from "immutable";
8
import { isEmpty } from "lodash";
9
import { Avatar } from "@cocalc/frontend/account/avatar/avatar";
10
import { useLanguageModelSetting } from "@cocalc/frontend/account/useLanguageModelSetting";
11
import { redux, useMemo, useTypedRedux } from "@cocalc/frontend/app-framework";
12
import AnthropicAvatar from "@cocalc/frontend/components/anthropic-avatar";
13
import GoogleGeminiLogo from "@cocalc/frontend/components/google-gemini-avatar";
14
import { LanguageModelVendorAvatar } from "@cocalc/frontend/components/language-model-icon";
15
import MistralAvatar from "@cocalc/frontend/components/mistral-avatar";
16
import OpenAIAvatar from "@cocalc/frontend/components/openai-avatar";
17
import { LLMModelPrice } from "@cocalc/frontend/frame-editors/llm/llm-selector";
18
import { useUserDefinedLLM } from "@cocalc/frontend/frame-editors/llm/use-userdefined-llm";
19
import { useProjectContext } from "@cocalc/frontend/project/context";
20
import {
21
ANTHROPIC_MODELS,
22
GOOGLE_MODELS,
23
LLMServicesAvailable,
24
LLM_DESCR,
25
LLM_USERNAMES,
26
LanguageModel,
27
MISTRAL_MODELS,
28
MODELS_OPENAI,
29
UserDefinedLLM,
30
fromCustomOpenAIModel,
31
fromOllamaModel,
32
isCustomOpenAI,
33
isOllamaLLM,
34
isUserDefinedModel,
35
model2service,
36
model2vendor,
37
toCustomOpenAIModel,
38
toOllamaModel,
39
toUserLLMModelName,
40
} from "@cocalc/util/db-schema/llm-utils";
41
import { cmp, timestamp_cmp, trunc_middle } from "@cocalc/util/misc";
42
import { CustomLLMPublic } from "@cocalc/util/types/llm";
43
import { Item as CompleteItem } from "./complete";
44
45
// we make the show_llm_main_menu field required, to avoid forgetting to set it ;-)
46
type Item = CompleteItem & Required<Pick<CompleteItem, "show_llm_main_menu">>;
47
48
interface Opts {
49
avatarUserSize?: number;
50
avatarLLMSize?: number;
51
}
52
53
export function useMentionableUsers(): (
54
search: string | undefined,
55
opts?: Opts,
56
) => Item[] {
57
const { project_id, enabledLLMs } = useProjectContext();
58
59
const selectableLLMs = useTypedRedux("customize", "selectable_llms");
60
const ollama = useTypedRedux("customize", "ollama");
61
const custom_openai = useTypedRedux("customize", "custom_openai");
62
const user_llm = useUserDefinedLLM();
63
64
// the current default model. This is always a valid LLM, even if none has ever been selected.
65
const [model] = useLanguageModelSetting();
66
67
return useMemo(() => {
68
return (search: string | undefined, opts?: Opts) => {
69
return mentionableUsers({
70
search,
71
project_id,
72
enabledLLMs,
73
model,
74
ollama: ollama?.toJS() ?? {},
75
custom_openai: custom_openai?.toJS() ?? {},
76
user_llm,
77
selectableLLMs,
78
opts,
79
});
80
};
81
}, [project_id, JSON.stringify(enabledLLMs), ollama, custom_openai, model]);
82
}
83
84
interface Props {
85
search: string | undefined;
86
project_id: string;
87
model: LanguageModel;
88
ollama: { [key: string]: CustomLLMPublic };
89
custom_openai: { [key: string]: CustomLLMPublic };
90
enabledLLMs: LLMServicesAvailable;
91
selectableLLMs: List<string>;
92
user_llm: UserDefinedLLM[];
93
opts?: Opts;
94
}
95
96
function mentionableUsers({
97
search,
98
project_id,
99
enabledLLMs,
100
model,
101
ollama,
102
custom_openai,
103
selectableLLMs,
104
user_llm,
105
opts,
106
}: Props): Item[] {
107
const { avatarUserSize = 24, avatarLLMSize = 24 } = opts ?? {};
108
109
const users = redux
110
.getStore("projects")
111
.getIn(["project_map", project_id, "users"]);
112
113
const last_active = redux
114
.getStore("projects")
115
.getIn(["project_map", project_id, "last_active"]);
116
117
if (users == null || last_active == null) return []; // e.g., for an admin
118
119
const my_account_id = redux.getStore("account").get("account_id");
120
121
function getProjectUsers() {
122
const project_users: {
123
account_id: string;
124
last_active: Date | undefined;
125
}[] = [];
126
for (const [account_id] of users) {
127
project_users.push({
128
account_id,
129
last_active: last_active.get(account_id),
130
});
131
}
132
project_users.sort((a, b) => {
133
// always push self to bottom...
134
if (a.account_id == my_account_id) {
135
return 1;
136
}
137
if (b.account_id == my_account_id) {
138
return -1;
139
}
140
if (a == null || b == null) return cmp(a.account_id, b.account_id);
141
if (a == null && b != null) return 1;
142
if (a != null && b == null) return -1;
143
return timestamp_cmp(a, b, "last_active");
144
});
145
return project_users;
146
}
147
148
const project_users = getProjectUsers();
149
150
const users_store = redux.getStore("users");
151
152
const mentions: Item[] = [];
153
154
if (enabledLLMs.openai) {
155
// NOTE: all modes are included, including the 16k version, because:
156
// (1) if you use GPT-3.5 too much you hit your limit,
157
// (2) this is a non-free BUT CHEAP model you can actually use after hitting your limit, which is much cheaper than GPT-4.
158
for (const moai of MODELS_OPENAI) {
159
if (!selectableLLMs.includes(moai)) continue;
160
const show_llm_main_menu = moai === model;
161
const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;
162
const v = "openai";
163
const m = moai.replace(/-/g, "");
164
const n = LLM_USERNAMES[moai].replace(/ /g, "");
165
const search_term = `${v}chat${m}${n}`.toLowerCase();
166
if (!search || search_term.includes(search)) {
167
mentions.push({
168
value: model2service(moai),
169
label: (
170
<LLMTooltip model={moai}>
171
<OpenAIAvatar size={size} /> {LLM_USERNAMES[moai]}{" "}
172
<LLMModelPrice model={moai} floatRight />
173
</LLMTooltip>
174
),
175
search: search_term,
176
is_llm: true,
177
show_llm_main_menu,
178
});
179
}
180
}
181
}
182
183
if (enabledLLMs.google) {
184
for (const m of GOOGLE_MODELS) {
185
if (!selectableLLMs.includes(m)) continue;
186
const show_llm_main_menu = m === model;
187
const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;
188
const v = model2vendor(m);
189
const search_term = `${v}${m.replace(/-/g, "").toLowerCase()}`;
190
if (!search || search_term.includes(search)) {
191
mentions.push({
192
value: model2service(m),
193
label: (
194
<LLMTooltip model={m}>
195
<GoogleGeminiLogo size={size} /> {LLM_USERNAMES[m]}{" "}
196
<LLMModelPrice model={m} floatRight />
197
</LLMTooltip>
198
),
199
search: search_term,
200
is_llm: true,
201
show_llm_main_menu,
202
});
203
}
204
}
205
}
206
207
if (enabledLLMs.mistralai) {
208
for (const m of MISTRAL_MODELS) {
209
if (!selectableLLMs.includes(m)) continue;
210
const show_llm_main_menu = m === model;
211
const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;
212
const name = LLM_USERNAMES[m] ?? m;
213
const s = model2vendor(m);
214
const search_term = `${s}${m}${name}`.toLowerCase();
215
if (!search || search_term.includes(search)) {
216
mentions.push({
217
value: model2service(m),
218
label: (
219
<LLMTooltip model={m}>
220
<MistralAvatar size={size} /> {name}{" "}
221
<LLMModelPrice model={m} floatRight />
222
</LLMTooltip>
223
),
224
search: search_term,
225
is_llm: true,
226
show_llm_main_menu,
227
});
228
}
229
}
230
}
231
232
if (enabledLLMs.anthropic) {
233
for (const m of ANTHROPIC_MODELS) {
234
if (!selectableLLMs.includes(m)) continue;
235
const show_llm_main_menu = m === model;
236
const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;
237
const name = LLM_USERNAMES[m] ?? m;
238
const s = model2vendor(m);
239
const search_term = `${s}${m}${name}`.toLowerCase();
240
if (!search || search_term.includes(search)) {
241
mentions.push({
242
value: model2service(m),
243
label: (
244
<LLMTooltip model={m}>
245
<AnthropicAvatar size={size} /> {name}{" "}
246
<LLMModelPrice model={m} floatRight />
247
</LLMTooltip>
248
),
249
search: search_term,
250
is_llm: true,
251
show_llm_main_menu,
252
});
253
}
254
}
255
}
256
257
if (enabledLLMs.ollama && !isEmpty(ollama)) {
258
for (const [m, conf] of Object.entries(ollama)) {
259
const show_llm_main_menu =
260
isOllamaLLM(model) && m === fromOllamaModel(model);
261
const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;
262
const value = toOllamaModel(m);
263
const search_term = `${m}${value}${conf.display}`.toLowerCase();
264
if (!search || search_term.includes(search)) {
265
mentions.push({
266
value,
267
label: (
268
<span>
269
<LanguageModelVendorAvatar model={value} size={size} />{" "}
270
{conf.display} <LLMModelPrice model={m} floatRight />
271
</span>
272
),
273
search: search_term,
274
is_llm: true,
275
show_llm_main_menu,
276
});
277
}
278
}
279
}
280
281
if (enabledLLMs.custom_openai && !isEmpty(custom_openai)) {
282
for (const [m, conf] of Object.entries(custom_openai)) {
283
const show_llm_main_menu =
284
isCustomOpenAI(model) && m === fromCustomOpenAIModel(model);
285
const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;
286
const value = toCustomOpenAIModel(m);
287
const search_term = `${m}${value}${conf.display}`.toLowerCase();
288
if (!search || search_term.includes(search)) {
289
mentions.push({
290
value,
291
label: (
292
<span>
293
<LanguageModelVendorAvatar model={value} size={size} />{" "}
294
{conf.display} <LLMModelPrice model={m} floatRight />
295
</span>
296
),
297
search: search_term,
298
is_llm: true,
299
show_llm_main_menu,
300
});
301
}
302
}
303
}
304
305
if (!isEmpty(user_llm)) {
306
for (const llm of user_llm) {
307
const m = toUserLLMModelName(llm);
308
const show_llm_main_menu = isUserDefinedModel(model) && m === model;
309
const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;
310
const value = m;
311
const search_term = `${value}${llm.display}`.toLowerCase();
312
if (!search || search_term.includes(search)) {
313
mentions.push({
314
value,
315
label: (
316
<span>
317
<LanguageModelVendorAvatar model={value} size={size} />{" "}
318
{llm.display}
319
</span>
320
),
321
search: search_term,
322
is_llm: true,
323
show_llm_main_menu,
324
});
325
}
326
}
327
}
328
329
for (const { account_id } of project_users) {
330
const fullname = users_store.get_name(account_id) ?? "";
331
const s = fullname.toLowerCase();
332
if (search != null && s.indexOf(search) == -1) continue;
333
const name = trunc_middle(fullname, 64);
334
const label = (
335
<span>
336
<Avatar account_id={account_id} size={avatarUserSize} /> {name}
337
</span>
338
);
339
mentions.push({
340
value: account_id,
341
label,
342
search: s,
343
is_llm: false,
344
show_llm_main_menu: true, // irrelevant, but that's what it will do for standard user accounts
345
});
346
}
347
348
return mentions;
349
}
350
351
function LLMTooltip({
352
model,
353
children,
354
}: {
355
model: string;
356
children: React.ReactNode;
357
}) {
358
const descr = LLM_DESCR[model];
359
const title = <>{descr}</>;
360
return (
361
<Tooltip title={title} placement="right">
362
<div style={{ width: "100%" }}>{children}</div>
363
</Tooltip>
364
);
365
}
366
367