Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place.
Path: blob/master/src/packages/frontend/chat/llm-msg-regenerate.tsx
Views: 687
/*1* This file is part of CoCalc: Copyright © 2024 Sagemath, Inc.2* License: MS-RSL – see LICENSE.md for details3*/45import type { MenuProps } from "antd";6import { Button, Dropdown, Space, Tooltip } from "antd";7import { isEmpty } from "lodash";89import { CSS, redux, useTypedRedux } from "@cocalc/frontend/app-framework";10import { Icon, Text } from "@cocalc/frontend/components";11import { LanguageModelVendorAvatar } from "@cocalc/frontend/components/language-model-icon";12import {13LLMModelPrice,14modelToName,15} from "@cocalc/frontend/frame-editors/llm/llm-selector";16import { useUserDefinedLLM } from "@cocalc/frontend/frame-editors/llm/use-userdefined-llm";17import { useProjectContext } from "@cocalc/frontend/project/context";18import {19LanguageModel,20LanguageModelCore,21USER_SELECTABLE_LLMS_BY_VENDOR,22isCustomOpenAI,23isLanguageModel,24isOllamaLLM,25toCustomOpenAIModel,26toOllamaModel,27toUserLLMModelName,28} from "@cocalc/util/db-schema/llm-utils";29import { COLORS } from "@cocalc/util/theme";30import { CustomLLMPublic } from "@cocalc/util/types/llm";31import { ChatActions } from "./actions";3233interface RegenerateLLMProps {34actions?: ChatActions;35date: number; // ms since epoch36style?: CSS;37model: LanguageModel | false;38}3940export function RegenerateLLM({41actions,42date,43style,44model,45}: RegenerateLLMProps) {46const { enabledLLMs, project_id } = useProjectContext();47const selectableLLMs = useTypedRedux("customize", "selectable_llms");48const ollama = useTypedRedux("customize", "ollama");49const custom_openai = useTypedRedux("customize", "custom_openai");50const user_llm = useUserDefinedLLM();5152const haveChatRegenerate = redux53.getStore("projects")54.hasLanguageModelEnabled(project_id, "chat-regenerate");5556if (!actions || !haveChatRegenerate) return null;5758const entries: MenuProps["items"] = [];5960// iterate over all key,values in USER_SELECTABLE_LLMS_BY_VENDOR61for (const vendor in USER_SELECTABLE_LLMS_BY_VENDOR) {62if (!enabledLLMs[vendor]) continue;63const llms: LanguageModelCore[] = USER_SELECTABLE_LLMS_BY_VENDOR[vendor];64for (const llm of llms) {65if (!selectableLLMs.includes(llm)) continue;66entries.push({67key: llm,68label: (69<>70<LanguageModelVendorAvatar model={llm} /> {modelToName(llm)}{" "}71<LLMModelPrice model={llm} floatRight />72</>73),74onClick: () => {75actions.regenerateLLMResponse(new Date(date), llm);76},77});78}79}8081if (ollama && enabledLLMs.ollama) {82for (const [key, config] of Object.entries<CustomLLMPublic>(83ollama.toJS(),84)) {85const { display = key } = config;86const ollamaModel = toOllamaModel(key);87entries.push({88key: ollamaModel,89label: (90<>91<LanguageModelVendorAvatar model={ollamaModel} /> {display}{" "}92<LLMModelPrice model={ollamaModel} floatRight />93</>94),95onClick: () => {96actions.regenerateLLMResponse(new Date(date), ollamaModel);97},98});99}100}101102if (custom_openai && enabledLLMs.custom_openai) {103for (const [key, config] of Object.entries<CustomLLMPublic>(104custom_openai.toJS(),105)) {106const { display = key } = config;107const customOpenAIModel = toCustomOpenAIModel(key);108entries.push({109key: customOpenAIModel,110label: (111<>112<LanguageModelVendorAvatar model={customOpenAIModel} /> {display}{" "}113<LLMModelPrice model={customOpenAIModel} floatRight />114</>115),116onClick: () => {117actions.regenerateLLMResponse(new Date(date), customOpenAIModel);118},119});120}121}122123if (!isEmpty(user_llm)) {124for (const llm of user_llm) {125const m = toUserLLMModelName(llm);126const name = modelToName(m);127entries.push({128key: m,129label: (130<>131<LanguageModelVendorAvatar model={m} /> {name}{" "}132<LLMModelPrice model={m} floatRight />133</>134),135onClick: () => {136actions.regenerateLLMResponse(new Date(date), m);137},138});139}140}141142if (entries.length === 0) {143entries.push({144key: "none",145label: "No language models available",146});147}148149// list the model that made the response first, to make it easier to regenerate the same response150// https://github.com/sagemathinc/cocalc/issues/7534151if (entries.length > 0 && isLanguageModel(model)) {152entries.unshift({ key: "divider", type: "divider" });153const display =154isOllamaLLM(model) && ollama?.get(model) != null155? ollama?.getIn([model, "display"]) ?? model156: isCustomOpenAI(model) && custom_openai?.get(model) != null157? custom_openai?.getIn([model, "display"]) ?? model158: modelToName(model);159entries.unshift({160key: "same",161label: (162<>163<LanguageModelVendorAvatar model={model} />{" "}164<Text strong>{display}</Text> (the same){" "}165<LLMModelPrice model={model} floatRight />166</>167),168onClick: () => {169actions.regenerateLLMResponse(new Date(date), model);170},171});172}173174return (175<Tooltip title="Regenerating the response will send the thread to the language model again and replace this answer. Select a different language model to see, if it has a better response. Previous answers are kept in the history of that message.">176<Dropdown177menu={{178items: entries,179style: { overflow: "auto", maxHeight: "50vh" },180}}181trigger={["click"]}182>183<Button184size="small"185type="text"186style={{187display: "inline",188whiteSpace: "nowrap",189color: COLORS.GRAY_M,190...style,191}}192>193<Space>194<Icon name="refresh" />195Regenerate196<Icon name="chevron-down" />197</Space>198</Button>199</Dropdown>200</Tooltip>201);202}203204205