CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
sagemathinc

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place.

GitHub Repository: sagemathinc/cocalc
Path: blob/master/src/packages/hub/health-checks.ts
Views: 687
1
/*
2
* This file is part of CoCalc: Copyright © 2020 Sagemath, Inc.
3
* License: MS-RSL – see LICENSE.md for details
4
*/
5
6
// endpoints for various health checks
7
8
import getLogger from "@cocalc/backend/logger";
9
const { new_counter } = require("@cocalc/hub/metrics-recorder");
10
import { howLongDisconnectedMins } from "@cocalc/database/postgres/record-connect-error";
11
import type { PostgreSQL } from "@cocalc/database/postgres/types";
12
import { seconds2hms } from "@cocalc/util/misc";
13
import express, { Response } from "express";
14
import { createServer, Server } from "net";
15
import { isFloat } from "validator";
16
import { database_is_working } from "./hub_register";
17
const logger = getLogger("hub:healthcheck");
18
const { debug: L } = logger;
19
20
const HEALTHCHECKS = new_counter(
21
"healthchecks_total",
22
"test healthcheck counter",
23
["status"]
24
);
25
26
interface HealthcheckData {
27
code: 200 | 404;
28
txt: string;
29
}
30
31
// self termination is only activated, if there is a COCALC_HUB_SELF_TERMINATE environment variable
32
// it's value is an interval in hours, minimum and maximum, for how long it should be alive
33
// and a drain period in minutes at the end.
34
// e.g. "24,48,15" for an uptime between 1 and 2 days and 15 minutes of draining
35
function init_self_terminate(): {
36
startup: number;
37
shutdown?: number; // when to shutdown (causes a failed health check)
38
drain?: number; // when to start draining, causes a proxy server to no longer send traffic
39
} {
40
const D = logger.extend("init_self_terminate").debug;
41
const startup = Date.now();
42
const conf = process.env.COCALC_HUB_SELF_TERMINATE;
43
if (conf == null) {
44
D("COCALC_HUB_SELF_TERMINATE env var not set, hence no self-termination");
45
return { startup };
46
}
47
const [from_str, to_str, drain_str] = conf.trim().split(",");
48
if (!isFloat(from_str, { gt: 0 }))
49
throw new Error("COCALC_HUB_SELF_TERMINATE/from not a positive float");
50
if (!isFloat(to_str, { gt: 0 }))
51
throw new Error("COCALC_HUB_SELF_TERMINATE/to not a positive float");
52
if (!isFloat(drain_str, { gt: 0 }))
53
throw new Error("COCALC_HUB_SELF_TERMINATE/drain not a positive float");
54
const from = parseFloat(from_str);
55
const to = parseFloat(to_str);
56
const drain_h = parseFloat(drain_str) / 60; // minutes to hours
57
D("parsed data:", { from, to, drain_h });
58
if (from > to)
59
throw Error(
60
"COCALC_HUB_SELF_TERMINATE 'from' must be smaller than 'to', e.g. '24,48,15'"
61
);
62
const uptime = Math.random() * (to - from); // hours
63
const hours2ms = 1000 * 60 * 60;
64
const shutdown = startup + (from + uptime) * hours2ms;
65
const drain = shutdown - drain_h * hours2ms;
66
if (startup > drain) {
67
throw new Error(
68
`COCALC_HUB_SELF_TERMINATE: startup must be smaller than drain – ${startup}>${drain}`
69
);
70
}
71
D({
72
startup: new Date(startup).toISOString(),
73
drain: new Date(drain).toISOString(),
74
shutdown: new Date(shutdown).toISOString(),
75
uptime: seconds2hms((hours2ms * uptime) / 1000),
76
draintime: seconds2hms((drain_h * hours2ms) / 1000),
77
});
78
return { startup, shutdown, drain };
79
}
80
81
const { startup, shutdown, drain } = init_self_terminate();
82
83
let agent_port = 0;
84
let agent_host = "0.0.0.0";
85
export function set_agent_endpoint(port: number, host: string) {
86
L(`set_agent_endpoint ${agent_host}:${agent_port}`);
87
agent_port = port;
88
agent_host = host;
89
}
90
91
let agent_check_server: Server | undefined;
92
93
// HAProxy agent-check TCP endpoint
94
// https://cbonte.github.io/haproxy-dconv/2.0/configuration.html#5.2-agent-check
95
// for development, set the env var in your startup script or terminal init file
96
// export COCALC_HUB_SELF_TERMINATE=.1,.2,1
97
// and then query it like that
98
// $ telnet 0.0.0.0 $(cat $COCALC_ROOT/dev/project/ports/agent-port)
99
function setup_agent_check() {
100
if (agent_port == 0 || drain == null) {
101
L("setup_agent_check: agent_port not set, no agent checks");
102
return;
103
}
104
105
// TODO this could also return a "weight" for this server, based on load values
106
// there is also "drain", but we set it to "10%" to avoid a nasty situation, when all endpoints are draining.
107
// ATTN: weight must be set as well, which is poorly documented here:
108
// https://cbonte.github.io/haproxy-dconv/2.0/configuration.html#5.2-weight
109
agent_check_server = createServer((c) => {
110
let msg = Date.now() < drain ? "ready up 100%" : "10%";
111
c.write(msg + "\r\n");
112
c.destroy();
113
});
114
115
agent_check_server.listen(agent_port, agent_host);
116
L(`setup_agent_check: listening on ${agent_host}:${agent_port}`);
117
}
118
119
export interface Check {
120
status: string;
121
abort?: boolean;
122
}
123
124
interface Opts {
125
db: PostgreSQL;
126
router: express.Router;
127
extra?: (() => Promise<Check>)[]; // additional health checks
128
}
129
130
// this could be directly in setup_health_checks, but we also need it in proxy.coffee
131
// proxy.coffee must be rewritten and restructured first – just wrapping it with a router
132
// didn't work at all for me
133
export function process_alive(): HealthcheckData {
134
let txt = "alive: YES";
135
let is_dead = true;
136
if (!database_is_working()) {
137
// this will stop haproxy from routing traffic to us
138
// until db connection starts working again.
139
txt = "alive: NO – database not working";
140
} else if (shutdown != null && Date.now() > shutdown) {
141
txt = "alive: NO – shutdown initiated";
142
} else {
143
is_dead = false;
144
}
145
const code = is_dead ? 404 : 200;
146
return { txt, code };
147
}
148
149
function checkConcurrent(db: PostgreSQL): Check {
150
const c = db.concurrent();
151
if (c >= db._concurrent_warn) {
152
return {
153
status: `hub not healthy, since concurrent ${c} >= ${db._concurrent_warn}`,
154
abort: true,
155
};
156
} else {
157
return { status: `concurrent ${c} < ${db._concurrent_warn}` };
158
}
159
}
160
161
function checkUptime(): Check {
162
const now = Date.now();
163
const uptime = seconds2hms((now - startup) / 1000);
164
if (shutdown != null && drain != null) {
165
if (now >= shutdown) {
166
const msg = `uptime ${uptime} – expired, terminating now`;
167
L(msg);
168
return { status: msg, abort: true };
169
} else {
170
const until = seconds2hms((shutdown - now) / 1000);
171
const drain_str =
172
drain > now
173
? `draining in ${seconds2hms((drain - now) / 1000)}`
174
: "draining now";
175
const msg = `uptime ${uptime} – ${drain_str} – terminating in ${until}`;
176
L(msg);
177
return { status: msg };
178
}
179
} else {
180
const msg = `uptime ${uptime} – no self-termination`;
181
L(msg);
182
return { status: msg };
183
}
184
}
185
186
// if there are is no connection to the database for that many minutes,
187
// declare the hub unhealthy
188
const DB_ERRORS_THRESHOLD_MIN = parseInt(
189
process.env.COCALC_DB_ERRORS_THRESHOLD_MIN ?? "5"
190
);
191
192
function checkDBConnectivity(): Check {
193
if (DB_ERRORS_THRESHOLD_MIN <= 0) {
194
return { status: "db connectivity check disabled" };
195
}
196
const num = howLongDisconnectedMins();
197
if (num == null) {
198
return { status: "no DB connection problems", abort: false };
199
}
200
// round num to 2 decimal places
201
const numStr = num.toFixed(2);
202
const above = num >= DB_ERRORS_THRESHOLD_MIN;
203
const status = above
204
? `DB problems for ${numStr} >= ${DB_ERRORS_THRESHOLD_MIN} mins`
205
: `DB problems for ${numStr} < ${DB_ERRORS_THRESHOLD_MIN} mins`;
206
return { status, abort: above };
207
}
208
209
// same note as above for process_alive()
210
async function process_health_check(
211
db: PostgreSQL,
212
extra: (() => Promise<Check>)[] = []
213
): Promise<HealthcheckData> {
214
let any_abort = false;
215
let txt = "healthchecks:\n";
216
for (const test of [
217
() => checkConcurrent(db),
218
checkUptime,
219
checkDBConnectivity,
220
...extra,
221
]) {
222
try {
223
const { status, abort = false } = await test();
224
const statusTxt = abort ? "FAIL" : "OK";
225
txt += `${status} – ${statusTxt}\n`;
226
any_abort = any_abort || abort;
227
L(`process_health_check: ${status} – ${statusTxt}`);
228
} catch (err) {
229
L(`process_health_check ERRROR: ${err}`);
230
HEALTHCHECKS.labels("ERROR").inc();
231
}
232
}
233
const code = any_abort ? 404 : 200;
234
HEALTHCHECKS.labels(any_abort ? "FAIL" : "OK").inc();
235
return { code, txt };
236
}
237
238
export async function setup_health_checks(opts: Opts): Promise<void> {
239
const { db, extra, router } = opts;
240
setup_agent_check();
241
242
// used by HAPROXY for testing that this hub is OK to receive traffic
243
router.get("/alive", (_, res: Response) => {
244
const { code, txt } = process_alive();
245
res.type("txt");
246
res.status(code);
247
res.send(txt);
248
});
249
250
// this is a more general check than concurrent-warn
251
// additionally to checking the database condition, it also self-terminates
252
// this hub if it is running for quite some time. beyond that, in the future
253
// there could be even more checks on top of that.
254
router.get("/healthcheck", async (_, res: Response) => {
255
const { txt, code } = await process_health_check(db, extra);
256
res.status(code);
257
res.type("txt");
258
res.send(txt);
259
});
260
261
// /concurrent-warn -- could be used by kubernetes to decide whether or not to kill the container; if
262
// below the warn thresh, returns number of concurrent connection; if hits warn, then
263
// returns 404 error, meaning hub may be unhealthy. Kubernetes will try a few times before
264
// killing the container. Will also return 404 if there is no working database connection.
265
router.get("/concurrent-warn", (_, res) => {
266
res.type("txt");
267
if (!database_is_working()) {
268
L("/concurrent-warn: not healthy, since database connection not working");
269
res.status(404).end();
270
return;
271
}
272
273
const c = db.concurrent();
274
if (c >= db._concurrent_warn) {
275
L(
276
`/concurrent-warn: not healthy, since concurrent ${c} >= ${db._concurrent_warn}`
277
);
278
res.status(404).end();
279
return;
280
}
281
res.send(`${c}`);
282
});
283
284
// Return number of concurrent connections (could be useful)
285
router.get("/concurrent", (_, res) => {
286
res.type("txt");
287
res.send(`${db.concurrent()}`);
288
});
289
}
290
291