CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
sagemathinc

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

GitHub Repository: sagemathinc/cocalc
Path: blob/master/src/packages/util/api/throttle.ts
Views: 791
1
/*
2
Generic throttling protocol for rate limiting api requests.
3
4
It limits the number of requests per second, minute and hour using a TTL
5
data structure and keeping track of all access of times during the interval.
6
*/
7
8
import TTLCache from "@isaacs/ttlcache";
9
import { plural } from "@cocalc/util/misc";
10
11
/*
12
We specify non-default throttling parameters for an endpoint *here* rather than in @cocalc/server,
13
so that we can enforce them in various places. E.g., by specifying them here,
14
we can enforce them both on the frontend and the backend with different semantics,
15
so the backend enforcement is only needed if the frontend client is somehow abusive
16
(i.e., not our client but one written by somebody else).
17
18
CAREFUL: if you make a change it won't be reflected in all clients since they use
19
this hardcoded value, rather than an api endoint to get this.
20
*/
21
22
const THROTTLE = {
23
"/accounts/get-names": {
24
second: 3,
25
minute: 50,
26
hour: 500,
27
},
28
"purchases/is-purchase-allowed": {
29
second: 7,
30
minute: 30,
31
hour: 300,
32
},
33
"purchases/stripe/get-payments": {
34
second: 3,
35
minute: 20,
36
hour: 150,
37
},
38
"purchases/stripe/get-customer-session": {
39
second: 1,
40
minute: 3,
41
hour: 40,
42
},
43
"purchases/get-purchases-admin": {
44
// extra generous for admin
45
second: 5,
46
minute: 100,
47
hour: 1000,
48
},
49
// i'm worried about abuse/bugs with message sending for now, so
50
// pretty aggressive throttling:
51
"user_query-messages": {
52
minute: 6,
53
hour: 100,
54
},
55
56
// pretty limiting for now -- this only applies to sending messages via the api
57
"messages/send": {
58
second: 1,
59
minute: 5,
60
hour: 60,
61
},
62
} as const;
63
64
const DEFAULTS = {
65
second: 3,
66
minute: 15,
67
hour: 200,
68
} as const;
69
70
type Interval = keyof typeof DEFAULTS;
71
72
const INTERVALS: Interval[] = ["second", "minute", "hour"] as const;
73
74
const cache = {
75
second: new TTLCache<string, number[]>({
76
max: 100000,
77
ttl: 1000,
78
updateAgeOnGet: true,
79
}),
80
minute: new TTLCache<string, number[]>({
81
max: 100000,
82
ttl: 1000 * 60,
83
updateAgeOnGet: true,
84
}),
85
hour: new TTLCache<string, number[]>({
86
max: 100000,
87
ttl: 1000 * 1000 * 60,
88
updateAgeOnGet: true,
89
}),
90
};
91
92
export default function throttle({
93
endpoint,
94
account_id,
95
}: {
96
endpoint: string;
97
// if not given, viewed as global
98
account_id?: string;
99
}) {
100
if (process["env"]?.["JEST_WORKER_ID"]) {
101
// do not throttle when testing.
102
return;
103
}
104
const key = `${account_id ? account_id : ""}:${endpoint}`;
105
const m = maxPerInterval(endpoint);
106
const now = Date.now();
107
for (const interval of INTERVALS) {
108
const c = cache[interval];
109
if (c == null) continue; // can't happen
110
const v = c.get(key);
111
if (v == null) {
112
c.set(key, [now]);
113
continue;
114
}
115
// process mutates v in place, so efficient
116
process(v, now, interval, m[interval], endpoint);
117
}
118
}
119
120
const TO_MS = {
121
second: 1000,
122
minute: 1000 * 60,
123
hour: 1000 * 60 * 60,
124
} as const;
125
126
function process(
127
v: number[],
128
now: number,
129
interval: Interval,
130
maxPerInterval: number,
131
endpoint: string,
132
) {
133
const cutoff = now - TO_MS[interval];
134
// mutate v so all numbers in it are >= cutoff:
135
for (let i = 0; i < v.length; i++) {
136
if (v[i] < cutoff) {
137
v.splice(i, 1);
138
i--; // Adjust index due to array mutation
139
}
140
}
141
if (v.length >= maxPerInterval) {
142
const wait = Math.ceil((v[0] - cutoff) / 1000);
143
const mesg = `too many requests to ${endpoint}; try again in ${wait} ${plural(wait, "second")} (rule: at most ${maxPerInterval} ${plural(maxPerInterval, "request")} per ${interval})`;
144
// console.trace(mesg);
145
throw Error(mesg);
146
}
147
v.push(now);
148
}
149
150
function maxPerInterval(endpoint): {
151
second: number;
152
minute: number;
153
hour: number;
154
} {
155
const a = THROTTLE[endpoint];
156
if (a == null) {
157
return DEFAULTS;
158
}
159
return {
160
second: a["second"] ?? DEFAULTS.second,
161
minute: a["minute"] ?? DEFAULTS.minute,
162
hour: a["hour"] ?? DEFAULTS.hour,
163
};
164
}
165
166