Path: blob/master/src/packages/frontend/editors/markdown-input/mentionable-users.tsx
1691 views
/*1* This file is part of CoCalc: Copyright © 2020 Sagemath, Inc.2* License: MS-RSL – see LICENSE.md for details3*/45import { Tooltip } from "antd";6import { List } from "immutable";7import { isEmpty } from "lodash";8import { Avatar } from "@cocalc/frontend/account/avatar/avatar";9import { useLanguageModelSetting } from "@cocalc/frontend/account/useLanguageModelSetting";10import { redux, useMemo, useTypedRedux } from "@cocalc/frontend/app-framework";11import AnthropicAvatar from "@cocalc/frontend/components/anthropic-avatar";12import GoogleGeminiLogo from "@cocalc/frontend/components/google-gemini-avatar";13import { LanguageModelVendorAvatar } from "@cocalc/frontend/components/language-model-icon";14import MistralAvatar from "@cocalc/frontend/components/mistral-avatar";15import OpenAIAvatar from "@cocalc/frontend/components/openai-avatar";16import { LLMModelPrice } from "@cocalc/frontend/frame-editors/llm/llm-selector";17import { useUserDefinedLLM } from "@cocalc/frontend/frame-editors/llm/use-userdefined-llm";18import { useProjectContext } from "@cocalc/frontend/project/context";19import {20ANTHROPIC_MODELS,21GOOGLE_MODELS,22LLMServicesAvailable,23LLM_DESCR,24LLM_USERNAMES,25LanguageModel,26MISTRAL_MODELS,27MODELS_OPENAI,28UserDefinedLLM,29fromCustomOpenAIModel,30fromOllamaModel,31isCustomOpenAI,32isOllamaLLM,33isUserDefinedModel,34model2service,35model2vendor,36toCustomOpenAIModel,37toOllamaModel,38toUserLLMModelName,39} from "@cocalc/util/db-schema/llm-utils";40import { cmp, timestamp_cmp, trunc_middle } from "@cocalc/util/misc";41import { CustomLLMPublic } from "@cocalc/util/types/llm";42import { Item as CompleteItem } from "./complete";4344// we make the show_llm_main_menu field required, to avoid forgetting to set it ;-)45type Item = CompleteItem & Required<Pick<CompleteItem, "show_llm_main_menu">>;4647interface Opts {48avatarUserSize?: number;49avatarLLMSize?: number;50}5152export function useMentionableUsers(): (53search: string | undefined,54opts?: Opts,55) => Item[] {56const { project_id, enabledLLMs } = useProjectContext();5758const selectableLLMs = useTypedRedux("customize", "selectable_llms");59const ollama = useTypedRedux("customize", "ollama");60const custom_openai = useTypedRedux("customize", "custom_openai");61const user_llm = useUserDefinedLLM();6263// the current default model. This is always a valid LLM, even if none has ever been selected.64const [model] = useLanguageModelSetting();6566return useMemo(() => {67return (search: string | undefined, opts?: Opts) => {68return mentionableUsers({69search,70project_id,71enabledLLMs,72model,73ollama: ollama?.toJS() ?? {},74custom_openai: custom_openai?.toJS() ?? {},75user_llm,76selectableLLMs,77opts,78});79};80}, [project_id, JSON.stringify(enabledLLMs), ollama, custom_openai, model]);81}8283interface Props {84search: string | undefined;85project_id: string;86model: LanguageModel;87ollama: { [key: string]: CustomLLMPublic };88custom_openai: { [key: string]: CustomLLMPublic };89enabledLLMs: LLMServicesAvailable;90selectableLLMs: List<string>;91user_llm: UserDefinedLLM[];92opts?: Opts;93}9495function mentionableUsers({96search,97project_id,98enabledLLMs,99model,100ollama,101custom_openai,102selectableLLMs,103user_llm,104opts,105}: Props): Item[] {106const { avatarUserSize = 24, avatarLLMSize = 24 } = opts ?? {};107108const users = redux109.getStore("projects")110.getIn(["project_map", project_id, "users"]);111112const last_active = redux113.getStore("projects")114.getIn(["project_map", project_id, "last_active"]);115116if (users == null || last_active == null) return []; // e.g., for an admin117118const my_account_id = redux.getStore("account").get("account_id");119120function getProjectUsers() {121const project_users: {122account_id: string;123last_active: Date | undefined;124}[] = [];125for (const [account_id] of users) {126project_users.push({127account_id,128last_active: last_active.get(account_id),129});130}131project_users.sort((a, b) => {132// always push self to bottom...133if (a.account_id == my_account_id) {134return 1;135}136if (b.account_id == my_account_id) {137return -1;138}139if (a == null || b == null) return cmp(a.account_id, b.account_id);140if (a == null && b != null) return 1;141if (a != null && b == null) return -1;142return timestamp_cmp(a, b, "last_active");143});144return project_users;145}146147const project_users = getProjectUsers();148149const users_store = redux.getStore("users");150151const mentions: Item[] = [];152153if (enabledLLMs.openai) {154// NOTE: all modes are included, including the 16k version, because:155// (1) if you use GPT-3.5 too much you hit your limit,156// (2) this is a non-free BUT CHEAP model you can actually use after hitting your limit, which is much cheaper than GPT-4.157for (const moai of MODELS_OPENAI) {158if (!selectableLLMs.includes(moai)) continue;159const show_llm_main_menu = moai === model;160const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;161const v = "openai";162const m = moai.replace(/-/g, "");163const n = LLM_USERNAMES[moai].replace(/ /g, "");164const search_term = `${v}chat${m}${n}`.toLowerCase();165if (!search || search_term.includes(search)) {166mentions.push({167value: model2service(moai),168label: (169<LLMTooltip model={moai}>170<OpenAIAvatar size={size} /> {LLM_USERNAMES[moai]}{" "}171<LLMModelPrice model={moai} floatRight />172</LLMTooltip>173),174search: search_term,175is_llm: true,176show_llm_main_menu,177});178}179}180}181182if (enabledLLMs.google) {183for (const m of GOOGLE_MODELS) {184if (!selectableLLMs.includes(m)) continue;185const show_llm_main_menu = m === model;186const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;187const v = model2vendor(m);188const search_term = `${v}${m.replace(/-/g, "").toLowerCase()}`;189if (!search || search_term.includes(search)) {190mentions.push({191value: model2service(m),192label: (193<LLMTooltip model={m}>194<GoogleGeminiLogo size={size} /> {LLM_USERNAMES[m]}{" "}195<LLMModelPrice model={m} floatRight />196</LLMTooltip>197),198search: search_term,199is_llm: true,200show_llm_main_menu,201});202}203}204}205206if (enabledLLMs.mistralai) {207for (const m of MISTRAL_MODELS) {208if (!selectableLLMs.includes(m)) continue;209const show_llm_main_menu = m === model;210const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;211const name = LLM_USERNAMES[m] ?? m;212const s = model2vendor(m);213const search_term = `${s}${m}${name}`.toLowerCase();214if (!search || search_term.includes(search)) {215mentions.push({216value: model2service(m),217label: (218<LLMTooltip model={m}>219<MistralAvatar size={size} /> {name}{" "}220<LLMModelPrice model={m} floatRight />221</LLMTooltip>222),223search: search_term,224is_llm: true,225show_llm_main_menu,226});227}228}229}230231if (enabledLLMs.anthropic) {232for (const m of ANTHROPIC_MODELS) {233if (!selectableLLMs.includes(m)) continue;234const show_llm_main_menu = m === model;235const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;236const name = LLM_USERNAMES[m] ?? m;237const s = model2vendor(m);238const search_term = `${s}${m}${name}`.toLowerCase();239if (!search || search_term.includes(search)) {240mentions.push({241value: model2service(m),242label: (243<LLMTooltip model={m}>244<AnthropicAvatar size={size} /> {name}{" "}245<LLMModelPrice model={m} floatRight />246</LLMTooltip>247),248search: search_term,249is_llm: true,250show_llm_main_menu,251});252}253}254}255256if (enabledLLMs.ollama && !isEmpty(ollama)) {257for (const [m, conf] of Object.entries(ollama)) {258const show_llm_main_menu =259isOllamaLLM(model) && m === fromOllamaModel(model);260const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;261const value = toOllamaModel(m);262const search_term = `${m}${value}${conf.display}`.toLowerCase();263if (!search || search_term.includes(search)) {264mentions.push({265value,266label: (267<span>268<LanguageModelVendorAvatar model={value} size={size} />{" "}269{conf.display} <LLMModelPrice model={m} floatRight />270</span>271),272search: search_term,273is_llm: true,274show_llm_main_menu,275});276}277}278}279280if (enabledLLMs.custom_openai && !isEmpty(custom_openai)) {281for (const [m, conf] of Object.entries(custom_openai)) {282const show_llm_main_menu =283isCustomOpenAI(model) && m === fromCustomOpenAIModel(model);284const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;285const value = toCustomOpenAIModel(m);286const search_term = `${m}${value}${conf.display}`.toLowerCase();287if (!search || search_term.includes(search)) {288mentions.push({289value,290label: (291<span>292<LanguageModelVendorAvatar model={value} size={size} />{" "}293{conf.display} <LLMModelPrice model={m} floatRight />294</span>295),296search: search_term,297is_llm: true,298show_llm_main_menu,299});300}301}302}303304if (!isEmpty(user_llm)) {305for (const llm of user_llm) {306const m = toUserLLMModelName(llm);307const show_llm_main_menu = isUserDefinedModel(model) && m === model;308const size = show_llm_main_menu ? avatarUserSize : avatarLLMSize;309const value = m;310const search_term = `${value}${llm.display}`.toLowerCase();311if (!search || search_term.includes(search)) {312mentions.push({313value,314label: (315<span>316<LanguageModelVendorAvatar model={value} size={size} />{" "}317{llm.display}318</span>319),320search: search_term,321is_llm: true,322show_llm_main_menu,323});324}325}326}327328for (const { account_id } of project_users) {329const fullname = users_store.get_name(account_id) ?? "";330const s = fullname.toLowerCase();331if (search != null && s.indexOf(search) == -1) continue;332const name = trunc_middle(fullname, 64);333const label = (334<span>335<Avatar account_id={account_id} size={avatarUserSize} /> {name}336</span>337);338mentions.push({339value: account_id,340label,341search: s,342is_llm: false,343show_llm_main_menu: true, // irrelevant, but that's what it will do for standard user accounts344});345}346347return mentions;348}349350function LLMTooltip({351model,352children,353}: {354model: string;355children: React.ReactNode;356}) {357const descr = LLM_DESCR[model];358const title = <>{descr}</>;359return (360<Tooltip title={title} placement="right">361<div style={{ width: "100%" }}>{children}</div>362</Tooltip>363);364}365366367