Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
sagemathinc
GitHub Repository: sagemathinc/cocalc
Path: blob/master/src/packages/frontend/chat/llm-msg-regenerate.tsx
1496 views
1
/*
2
* This file is part of CoCalc: Copyright © 2024 Sagemath, Inc.
3
* License: MS-RSL – see LICENSE.md for details
4
*/
5
6
import type { MenuProps } from "antd";
7
import { Button, Dropdown, Space, Tooltip } from "antd";
8
import { isEmpty } from "lodash";
9
10
import { CSS, redux, useTypedRedux } from "@cocalc/frontend/app-framework";
11
import { Icon, Text } from "@cocalc/frontend/components";
12
import { LanguageModelVendorAvatar } from "@cocalc/frontend/components/language-model-icon";
13
import {
14
LLMModelPrice,
15
modelToName,
16
} from "@cocalc/frontend/frame-editors/llm/llm-selector";
17
import { useUserDefinedLLM } from "@cocalc/frontend/frame-editors/llm/use-userdefined-llm";
18
import { useProjectContext } from "@cocalc/frontend/project/context";
19
import {
20
LanguageModel,
21
LanguageModelCore,
22
USER_SELECTABLE_LLMS_BY_VENDOR,
23
isCustomOpenAI,
24
isLanguageModel,
25
isOllamaLLM,
26
toCustomOpenAIModel,
27
toOllamaModel,
28
toUserLLMModelName,
29
} from "@cocalc/util/db-schema/llm-utils";
30
import { COLORS } from "@cocalc/util/theme";
31
import { CustomLLMPublic } from "@cocalc/util/types/llm";
32
import { ChatActions } from "./actions";
33
34
interface RegenerateLLMProps {
35
actions?: ChatActions;
36
date: number; // ms since epoch
37
style?: CSS;
38
model: LanguageModel | false;
39
}
40
41
export function RegenerateLLM({
42
actions,
43
date,
44
style,
45
model,
46
}: RegenerateLLMProps) {
47
const { enabledLLMs, project_id } = useProjectContext();
48
const selectableLLMs = useTypedRedux("customize", "selectable_llms");
49
const ollama = useTypedRedux("customize", "ollama");
50
const custom_openai = useTypedRedux("customize", "custom_openai");
51
const user_llm = useUserDefinedLLM();
52
53
const haveChatRegenerate = redux
54
.getStore("projects")
55
.hasLanguageModelEnabled(project_id, "chat-regenerate");
56
57
if (!actions || !haveChatRegenerate) return null;
58
59
const entries: MenuProps["items"] = [];
60
61
// iterate over all key,values in USER_SELECTABLE_LLMS_BY_VENDOR
62
for (const vendor in USER_SELECTABLE_LLMS_BY_VENDOR) {
63
if (!enabledLLMs[vendor]) continue;
64
const llms: LanguageModelCore[] = USER_SELECTABLE_LLMS_BY_VENDOR[vendor];
65
for (const llm of llms) {
66
if (!selectableLLMs.includes(llm)) continue;
67
entries.push({
68
key: llm,
69
label: (
70
<>
71
<LanguageModelVendorAvatar model={llm} /> {modelToName(llm)}{" "}
72
<LLMModelPrice model={llm} floatRight />
73
</>
74
),
75
onClick: () => {
76
actions.regenerateLLMResponse(new Date(date), llm);
77
},
78
});
79
}
80
}
81
82
if (ollama && enabledLLMs.ollama) {
83
for (const [key, config] of Object.entries<CustomLLMPublic>(
84
ollama.toJS(),
85
)) {
86
const { display = key } = config;
87
const ollamaModel = toOllamaModel(key);
88
entries.push({
89
key: ollamaModel,
90
label: (
91
<>
92
<LanguageModelVendorAvatar model={ollamaModel} /> {display}{" "}
93
<LLMModelPrice model={ollamaModel} floatRight />
94
</>
95
),
96
onClick: () => {
97
actions.regenerateLLMResponse(new Date(date), ollamaModel);
98
},
99
});
100
}
101
}
102
103
if (custom_openai && enabledLLMs.custom_openai) {
104
for (const [key, config] of Object.entries<CustomLLMPublic>(
105
custom_openai.toJS(),
106
)) {
107
const { display = key } = config;
108
const customOpenAIModel = toCustomOpenAIModel(key);
109
entries.push({
110
key: customOpenAIModel,
111
label: (
112
<>
113
<LanguageModelVendorAvatar model={customOpenAIModel} /> {display}{" "}
114
<LLMModelPrice model={customOpenAIModel} floatRight />
115
</>
116
),
117
onClick: () => {
118
actions.regenerateLLMResponse(new Date(date), customOpenAIModel);
119
},
120
});
121
}
122
}
123
124
if (!isEmpty(user_llm)) {
125
for (const llm of user_llm) {
126
const m = toUserLLMModelName(llm);
127
const name = modelToName(m);
128
entries.push({
129
key: m,
130
label: (
131
<>
132
<LanguageModelVendorAvatar model={m} /> {name}{" "}
133
<LLMModelPrice model={m} floatRight />
134
</>
135
),
136
onClick: () => {
137
actions.regenerateLLMResponse(new Date(date), m);
138
},
139
});
140
}
141
}
142
143
if (entries.length === 0) {
144
entries.push({
145
key: "none",
146
label: "No language models available",
147
});
148
}
149
150
// list the model that made the response first, to make it easier to regenerate the same response
151
// https://github.com/sagemathinc/cocalc/issues/7534
152
if (entries.length > 0 && isLanguageModel(model)) {
153
entries.unshift({ key: "divider", type: "divider" });
154
const display =
155
isOllamaLLM(model) && ollama?.get(model) != null
156
? ollama?.getIn([model, "display"]) ?? model
157
: isCustomOpenAI(model) && custom_openai?.get(model) != null
158
? custom_openai?.getIn([model, "display"]) ?? model
159
: modelToName(model);
160
entries.unshift({
161
key: "same",
162
label: (
163
<>
164
<LanguageModelVendorAvatar model={model} />{" "}
165
<Text>
166
<Text strong>{display}</Text> (the same)
167
</Text>{" "}
168
<LLMModelPrice model={model} floatRight />
169
</>
170
),
171
onClick: () => {
172
actions.regenerateLLMResponse(new Date(date), model);
173
},
174
});
175
}
176
177
return (
178
<Tooltip title="Regenerating the response will send the thread to the language model again and replace this answer. Select a different language model to see, if it has a better response. Previous answers are kept in the history of that message.">
179
<Dropdown
180
menu={{
181
items: entries,
182
style: { overflow: "auto", maxHeight: "50vh" },
183
}}
184
trigger={["click"]}
185
>
186
<Button
187
size="small"
188
type="text"
189
style={{
190
display: "inline",
191
whiteSpace: "nowrap",
192
color: COLORS.GRAY_M,
193
...style,
194
}}
195
>
196
<Space>
197
<Icon name="refresh" />
198
Regenerate
199
<Icon name="chevron-down" />
200
</Space>
201
</Button>
202
</Dropdown>
203
</Tooltip>
204
);
205
}
206
207