Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/mptcp/sched.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* Multipath TCP
3
*
4
* Copyright (c) 2022, SUSE.
5
*/
6
7
#define pr_fmt(fmt) "MPTCP: " fmt
8
9
#include <linux/kernel.h>
10
#include <linux/module.h>
11
#include <linux/list.h>
12
#include <linux/rculist.h>
13
#include <linux/spinlock.h>
14
#include "protocol.h"
15
16
static DEFINE_SPINLOCK(mptcp_sched_list_lock);
17
static LIST_HEAD(mptcp_sched_list);
18
19
static int mptcp_sched_default_get_send(struct mptcp_sock *msk)
20
{
21
struct sock *ssk;
22
23
ssk = mptcp_subflow_get_send(msk);
24
if (!ssk)
25
return -EINVAL;
26
27
mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk), true);
28
return 0;
29
}
30
31
static int mptcp_sched_default_get_retrans(struct mptcp_sock *msk)
32
{
33
struct sock *ssk;
34
35
ssk = mptcp_subflow_get_retrans(msk);
36
if (!ssk)
37
return -EINVAL;
38
39
mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk), true);
40
return 0;
41
}
42
43
static struct mptcp_sched_ops mptcp_sched_default = {
44
.get_send = mptcp_sched_default_get_send,
45
.get_retrans = mptcp_sched_default_get_retrans,
46
.name = "default",
47
.owner = THIS_MODULE,
48
};
49
50
/* Must be called with rcu read lock held */
51
struct mptcp_sched_ops *mptcp_sched_find(const char *name)
52
{
53
struct mptcp_sched_ops *sched, *ret = NULL;
54
55
list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
56
if (!strcmp(sched->name, name)) {
57
ret = sched;
58
break;
59
}
60
}
61
62
return ret;
63
}
64
65
/* Build string with list of available scheduler values.
66
* Similar to tcp_get_available_congestion_control()
67
*/
68
void mptcp_get_available_schedulers(char *buf, size_t maxlen)
69
{
70
struct mptcp_sched_ops *sched;
71
size_t offs = 0;
72
73
rcu_read_lock();
74
list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
75
offs += snprintf(buf + offs, maxlen - offs,
76
"%s%s",
77
offs == 0 ? "" : " ", sched->name);
78
79
if (WARN_ON_ONCE(offs >= maxlen))
80
break;
81
}
82
rcu_read_unlock();
83
}
84
85
int mptcp_validate_scheduler(struct mptcp_sched_ops *sched)
86
{
87
if (!sched->get_send) {
88
pr_err("%s does not implement required ops\n", sched->name);
89
return -EINVAL;
90
}
91
92
return 0;
93
}
94
95
int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
96
{
97
int ret;
98
99
ret = mptcp_validate_scheduler(sched);
100
if (ret)
101
return ret;
102
103
spin_lock(&mptcp_sched_list_lock);
104
if (mptcp_sched_find(sched->name)) {
105
spin_unlock(&mptcp_sched_list_lock);
106
return -EEXIST;
107
}
108
list_add_tail_rcu(&sched->list, &mptcp_sched_list);
109
spin_unlock(&mptcp_sched_list_lock);
110
111
pr_debug("%s registered\n", sched->name);
112
return 0;
113
}
114
115
void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched)
116
{
117
if (sched == &mptcp_sched_default)
118
return;
119
120
spin_lock(&mptcp_sched_list_lock);
121
list_del_rcu(&sched->list);
122
spin_unlock(&mptcp_sched_list_lock);
123
}
124
125
void mptcp_sched_init(void)
126
{
127
mptcp_register_scheduler(&mptcp_sched_default);
128
}
129
130
int mptcp_init_sched(struct mptcp_sock *msk,
131
struct mptcp_sched_ops *sched)
132
{
133
if (!sched)
134
sched = &mptcp_sched_default;
135
136
if (!bpf_try_module_get(sched, sched->owner))
137
return -EBUSY;
138
139
msk->sched = sched;
140
if (msk->sched->init)
141
msk->sched->init(msk);
142
143
pr_debug("sched=%s\n", msk->sched->name);
144
145
return 0;
146
}
147
148
void mptcp_release_sched(struct mptcp_sock *msk)
149
{
150
struct mptcp_sched_ops *sched = msk->sched;
151
152
if (!sched)
153
return;
154
155
msk->sched = NULL;
156
if (sched->release)
157
sched->release(msk);
158
159
bpf_module_put(sched, sched->owner);
160
}
161
162
void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
163
bool scheduled)
164
{
165
WRITE_ONCE(subflow->scheduled, scheduled);
166
}
167
168
int mptcp_sched_get_send(struct mptcp_sock *msk)
169
{
170
struct mptcp_subflow_context *subflow;
171
172
msk_owned_by_me(msk);
173
174
/* the following check is moved out of mptcp_subflow_get_send */
175
if (__mptcp_check_fallback(msk)) {
176
if (msk->first &&
177
__tcp_can_send(msk->first) &&
178
sk_stream_memory_free(msk->first)) {
179
mptcp_subflow_set_scheduled(mptcp_subflow_ctx(msk->first), true);
180
return 0;
181
}
182
return -EINVAL;
183
}
184
185
mptcp_for_each_subflow(msk, subflow) {
186
if (READ_ONCE(subflow->scheduled))
187
return 0;
188
}
189
190
if (msk->sched == &mptcp_sched_default || !msk->sched)
191
return mptcp_sched_default_get_send(msk);
192
return msk->sched->get_send(msk);
193
}
194
195
int mptcp_sched_get_retrans(struct mptcp_sock *msk)
196
{
197
struct mptcp_subflow_context *subflow;
198
199
msk_owned_by_me(msk);
200
201
/* the following check is moved out of mptcp_subflow_get_retrans */
202
if (__mptcp_check_fallback(msk))
203
return -EINVAL;
204
205
mptcp_for_each_subflow(msk, subflow) {
206
if (READ_ONCE(subflow->scheduled))
207
return 0;
208
}
209
210
if (msk->sched == &mptcp_sched_default || !msk->sched)
211
return mptcp_sched_default_get_retrans(msk);
212
if (msk->sched->get_retrans)
213
return msk->sched->get_retrans(msk);
214
return msk->sched->get_send(msk);
215
}
216
217