CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
sagemathinc

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place.

GitHub Repository: sagemathinc/cocalc
Path: blob/master/src/packages/frontend/chat/llm-msg-regenerate.tsx
Views: 687
1
/*
2
* This file is part of CoCalc: Copyright © 2024 Sagemath, Inc.
3
* License: MS-RSL – see LICENSE.md for details
4
*/
5
6
import type { MenuProps } from "antd";
7
import { Button, Dropdown, Space, Tooltip } from "antd";
8
import { isEmpty } from "lodash";
9
10
import { CSS, redux, useTypedRedux } from "@cocalc/frontend/app-framework";
11
import { Icon, Text } from "@cocalc/frontend/components";
12
import { LanguageModelVendorAvatar } from "@cocalc/frontend/components/language-model-icon";
13
import {
14
LLMModelPrice,
15
modelToName,
16
} from "@cocalc/frontend/frame-editors/llm/llm-selector";
17
import { useUserDefinedLLM } from "@cocalc/frontend/frame-editors/llm/use-userdefined-llm";
18
import { useProjectContext } from "@cocalc/frontend/project/context";
19
import {
20
LanguageModel,
21
LanguageModelCore,
22
USER_SELECTABLE_LLMS_BY_VENDOR,
23
isCustomOpenAI,
24
isLanguageModel,
25
isOllamaLLM,
26
toCustomOpenAIModel,
27
toOllamaModel,
28
toUserLLMModelName,
29
} from "@cocalc/util/db-schema/llm-utils";
30
import { COLORS } from "@cocalc/util/theme";
31
import { CustomLLMPublic } from "@cocalc/util/types/llm";
32
import { ChatActions } from "./actions";
33
34
interface RegenerateLLMProps {
35
actions?: ChatActions;
36
date: number; // ms since epoch
37
style?: CSS;
38
model: LanguageModel | false;
39
}
40
41
export function RegenerateLLM({
42
actions,
43
date,
44
style,
45
model,
46
}: RegenerateLLMProps) {
47
const { enabledLLMs, project_id } = useProjectContext();
48
const selectableLLMs = useTypedRedux("customize", "selectable_llms");
49
const ollama = useTypedRedux("customize", "ollama");
50
const custom_openai = useTypedRedux("customize", "custom_openai");
51
const user_llm = useUserDefinedLLM();
52
53
const haveChatRegenerate = redux
54
.getStore("projects")
55
.hasLanguageModelEnabled(project_id, "chat-regenerate");
56
57
if (!actions || !haveChatRegenerate) return null;
58
59
const entries: MenuProps["items"] = [];
60
61
// iterate over all key,values in USER_SELECTABLE_LLMS_BY_VENDOR
62
for (const vendor in USER_SELECTABLE_LLMS_BY_VENDOR) {
63
if (!enabledLLMs[vendor]) continue;
64
const llms: LanguageModelCore[] = USER_SELECTABLE_LLMS_BY_VENDOR[vendor];
65
for (const llm of llms) {
66
if (!selectableLLMs.includes(llm)) continue;
67
entries.push({
68
key: llm,
69
label: (
70
<>
71
<LanguageModelVendorAvatar model={llm} /> {modelToName(llm)}{" "}
72
<LLMModelPrice model={llm} floatRight />
73
</>
74
),
75
onClick: () => {
76
actions.regenerateLLMResponse(new Date(date), llm);
77
},
78
});
79
}
80
}
81
82
if (ollama && enabledLLMs.ollama) {
83
for (const [key, config] of Object.entries<CustomLLMPublic>(
84
ollama.toJS(),
85
)) {
86
const { display = key } = config;
87
const ollamaModel = toOllamaModel(key);
88
entries.push({
89
key: ollamaModel,
90
label: (
91
<>
92
<LanguageModelVendorAvatar model={ollamaModel} /> {display}{" "}
93
<LLMModelPrice model={ollamaModel} floatRight />
94
</>
95
),
96
onClick: () => {
97
actions.regenerateLLMResponse(new Date(date), ollamaModel);
98
},
99
});
100
}
101
}
102
103
if (custom_openai && enabledLLMs.custom_openai) {
104
for (const [key, config] of Object.entries<CustomLLMPublic>(
105
custom_openai.toJS(),
106
)) {
107
const { display = key } = config;
108
const customOpenAIModel = toCustomOpenAIModel(key);
109
entries.push({
110
key: customOpenAIModel,
111
label: (
112
<>
113
<LanguageModelVendorAvatar model={customOpenAIModel} /> {display}{" "}
114
<LLMModelPrice model={customOpenAIModel} floatRight />
115
</>
116
),
117
onClick: () => {
118
actions.regenerateLLMResponse(new Date(date), customOpenAIModel);
119
},
120
});
121
}
122
}
123
124
if (!isEmpty(user_llm)) {
125
for (const llm of user_llm) {
126
const m = toUserLLMModelName(llm);
127
const name = modelToName(m);
128
entries.push({
129
key: m,
130
label: (
131
<>
132
<LanguageModelVendorAvatar model={m} /> {name}{" "}
133
<LLMModelPrice model={m} floatRight />
134
</>
135
),
136
onClick: () => {
137
actions.regenerateLLMResponse(new Date(date), m);
138
},
139
});
140
}
141
}
142
143
if (entries.length === 0) {
144
entries.push({
145
key: "none",
146
label: "No language models available",
147
});
148
}
149
150
// list the model that made the response first, to make it easier to regenerate the same response
151
// https://github.com/sagemathinc/cocalc/issues/7534
152
if (entries.length > 0 && isLanguageModel(model)) {
153
entries.unshift({ key: "divider", type: "divider" });
154
const display =
155
isOllamaLLM(model) && ollama?.get(model) != null
156
? ollama?.getIn([model, "display"]) ?? model
157
: isCustomOpenAI(model) && custom_openai?.get(model) != null
158
? custom_openai?.getIn([model, "display"]) ?? model
159
: modelToName(model);
160
entries.unshift({
161
key: "same",
162
label: (
163
<>
164
<LanguageModelVendorAvatar model={model} />{" "}
165
<Text strong>{display}</Text> (the same){" "}
166
<LLMModelPrice model={model} floatRight />
167
</>
168
),
169
onClick: () => {
170
actions.regenerateLLMResponse(new Date(date), model);
171
},
172
});
173
}
174
175
return (
176
<Tooltip title="Regenerating the response will send the thread to the language model again and replace this answer. Select a different language model to see, if it has a better response. Previous answers are kept in the history of that message.">
177
<Dropdown
178
menu={{
179
items: entries,
180
style: { overflow: "auto", maxHeight: "50vh" },
181
}}
182
trigger={["click"]}
183
>
184
<Button
185
size="small"
186
type="text"
187
style={{
188
display: "inline",
189
whiteSpace: "nowrap",
190
color: COLORS.GRAY_M,
191
...style,
192
}}
193
>
194
<Space>
195
<Icon name="refresh" />
196
Regenerate
197
<Icon name="chevron-down" />
198
</Space>
199
</Button>
200
</Dropdown>
201
</Tooltip>
202
);
203
}
204
205