Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/netpfil/pf/pf_ioctl.c
106180 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2001 Daniel Hartmeier
5
* Copyright (c) 2002,2003 Henning Brauer
6
* Copyright (c) 2012 Gleb Smirnoff <[email protected]>
7
* All rights reserved.
8
*
9
* Redistribution and use in source and binary forms, with or without
10
* modification, are permitted provided that the following conditions
11
* are met:
12
*
13
* - Redistributions of source code must retain the above copyright
14
* notice, this list of conditions and the following disclaimer.
15
* - Redistributions in binary form must reproduce the above
16
* copyright notice, this list of conditions and the following
17
* disclaimer in the documentation and/or other materials provided
18
* with the distribution.
19
*
20
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24
* COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31
* POSSIBILITY OF SUCH DAMAGE.
32
*
33
* Effort sponsored in part by the Defense Advanced Research Projects
34
* Agency (DARPA) and Air Force Research Laboratory, Air Force
35
* Materiel Command, USAF, under agreement number F30602-01-2-0537.
36
*
37
* $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38
*/
39
40
#include <sys/cdefs.h>
41
#include "opt_inet.h"
42
#include "opt_inet6.h"
43
#include "opt_bpf.h"
44
#include "opt_pf.h"
45
46
#include <sys/param.h>
47
#include <sys/_bitset.h>
48
#include <sys/bitset.h>
49
#include <sys/bus.h>
50
#include <sys/conf.h>
51
#include <sys/endian.h>
52
#include <sys/fcntl.h>
53
#include <sys/filio.h>
54
#include <sys/hash.h>
55
#include <sys/interrupt.h>
56
#include <sys/jail.h>
57
#include <sys/kernel.h>
58
#include <sys/kthread.h>
59
#include <sys/lock.h>
60
#include <sys/mbuf.h>
61
#include <sys/module.h>
62
#include <sys/nv.h>
63
#include <sys/proc.h>
64
#include <sys/sdt.h>
65
#include <sys/smp.h>
66
#include <sys/socket.h>
67
#include <sys/sysctl.h>
68
#include <sys/md5.h>
69
#include <sys/ucred.h>
70
71
#include <net/if.h>
72
#include <net/if_var.h>
73
#include <net/if_private.h>
74
#include <net/vnet.h>
75
#include <net/route.h>
76
#include <net/pfil.h>
77
#include <net/pfvar.h>
78
#include <net/if_pfsync.h>
79
#include <net/if_pflog.h>
80
81
#include <netinet/in.h>
82
#include <netinet/ip.h>
83
#include <netinet/ip_var.h>
84
#include <netinet6/ip6_var.h>
85
#include <netinet/ip_icmp.h>
86
#include <netpfil/pf/pf_nl.h>
87
#include <netpfil/pf/pf_nv.h>
88
89
#ifdef INET6
90
#include <netinet/ip6.h>
91
#endif /* INET6 */
92
93
#ifdef ALTQ
94
#include <net/altq/altq.h>
95
#endif
96
97
SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
98
SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
99
SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
100
SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
101
102
static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t,
103
u_int32_t, u_int8_t, u_int8_t, u_int8_t, int);
104
105
static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
106
static void pf_empty_kpool(struct pf_kpalist *);
107
static int pfioctl(struct cdev *, u_long, caddr_t, int,
108
struct thread *);
109
static int pf_begin_eth(uint32_t *, const char *);
110
static int pf_rollback_eth(uint32_t, const char *);
111
static int pf_commit_eth(uint32_t, const char *);
112
static void pf_free_eth_rule(struct pf_keth_rule *);
113
#ifdef ALTQ
114
static int pf_begin_altq(u_int32_t *);
115
static int pf_rollback_altq(u_int32_t);
116
static int pf_commit_altq(u_int32_t);
117
static int pf_enable_altq(struct pf_altq *);
118
static int pf_disable_altq(struct pf_altq *);
119
static void pf_qid_unref(uint16_t);
120
#endif /* ALTQ */
121
static int pf_begin_rules(u_int32_t *, int, const char *);
122
static int pf_rollback_rules(u_int32_t, int, char *);
123
static int pf_setup_pfsync_matching(struct pf_kruleset *);
124
static void pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
125
static void pf_hash_rule(struct pf_krule *);
126
static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
127
static int pf_commit_rules(u_int32_t, int, char *);
128
static int pf_addr_setup(struct pf_kruleset *,
129
struct pf_addr_wrap *, sa_family_t);
130
static void pf_src_node_copy(const struct pf_ksrc_node *,
131
struct pf_src_node *);
132
#ifdef ALTQ
133
static int pf_export_kaltq(struct pf_altq *,
134
struct pfioc_altq_v1 *, size_t);
135
static int pf_import_kaltq(struct pfioc_altq_v1 *,
136
struct pf_altq *, size_t);
137
#endif /* ALTQ */
138
139
static void pf_statelim_commit(void);
140
static void pf_statelim_rollback(void);
141
static int pf_sourcelim_check(void);
142
static void pf_sourcelim_commit(void);
143
static void pf_sourcelim_rollback(void);
144
145
VNET_DEFINE(struct pf_krule, pf_default_rule);
146
147
static __inline int pf_krule_compare(struct pf_krule *,
148
struct pf_krule *);
149
150
RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
151
152
#ifdef ALTQ
153
VNET_DEFINE_STATIC(int, pf_altq_running);
154
#define V_pf_altq_running VNET(pf_altq_running)
155
#endif
156
157
#define TAGID_MAX 50000
158
struct pf_tagname {
159
TAILQ_ENTRY(pf_tagname) namehash_entries;
160
TAILQ_ENTRY(pf_tagname) taghash_entries;
161
char name[PF_TAG_NAME_SIZE];
162
uint16_t tag;
163
int ref;
164
};
165
166
struct pf_tagset {
167
TAILQ_HEAD(, pf_tagname) *namehash;
168
TAILQ_HEAD(, pf_tagname) *taghash;
169
unsigned int mask;
170
uint32_t seed;
171
BITSET_DEFINE(, TAGID_MAX) avail;
172
};
173
174
VNET_DEFINE(struct pf_tagset, pf_tags);
175
#define V_pf_tags VNET(pf_tags)
176
static unsigned int pf_rule_tag_hashsize;
177
#define PF_RULE_TAG_HASH_SIZE_DEFAULT 128
178
SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
179
&pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
180
"Size of pf(4) rule tag hashtable");
181
182
#ifdef ALTQ
183
VNET_DEFINE(struct pf_tagset, pf_qids);
184
#define V_pf_qids VNET(pf_qids)
185
static unsigned int pf_queue_tag_hashsize;
186
#define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128
187
SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
188
&pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
189
"Size of pf(4) queue tag hashtable");
190
#endif
191
VNET_DEFINE(uma_zone_t, pf_tag_z);
192
#define V_pf_tag_z VNET(pf_tag_z)
193
static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
194
static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
195
MALLOC_DEFINE(M_PF, "pf", "pf(4)");
196
MALLOC_DEFINE(M_PF_STATE_LIM, "pf_state_lim", "pf(4) state limiter");
197
198
#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
199
#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
200
#endif
201
202
VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
203
#define V_pf_filter_local VNET(pf_filter_local)
204
SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
205
&VNET_NAME(pf_filter_local), false,
206
"Enable filtering for packets delivered to local network stack");
207
208
#ifdef PF_DEFAULT_TO_DROP
209
VNET_DEFINE_STATIC(bool, default_to_drop) = true;
210
#else
211
VNET_DEFINE_STATIC(bool, default_to_drop);
212
#endif
213
#define V_default_to_drop VNET(default_to_drop)
214
SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET,
215
&VNET_NAME(default_to_drop), false,
216
"Make the default rule drop all packets.");
217
218
static void pf_init_tagset(struct pf_tagset *, unsigned int *,
219
unsigned int);
220
static void pf_cleanup_tagset(struct pf_tagset *);
221
static uint16_t tagname2hashindex(const struct pf_tagset *, const char *);
222
static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t);
223
static u_int16_t tagname2tag(struct pf_tagset *, const char *, bool);
224
static void tag_unref(struct pf_tagset *, u_int16_t);
225
226
struct cdev *pf_dev;
227
228
/*
229
* XXX - These are new and need to be checked when moveing to a new version
230
*/
231
static void pf_clear_all_states(void);
232
static int pf_killstates_row(struct pf_kstate_kill *,
233
struct pf_idhash *);
234
static int pf_killstates_nv(struct pfioc_nv *);
235
static int pf_clearstates_nv(struct pfioc_nv *);
236
static int pf_getstate(struct pfioc_nv *);
237
static int pf_getstatus(struct pfioc_nv *);
238
static int pf_clear_tables(void);
239
static void pf_kill_srcnodes(struct pfioc_src_node_kill *);
240
static int pf_keepcounters(struct pfioc_nv *);
241
static void pf_tbladdr_copyout(struct pf_addr_wrap *);
242
243
/*
244
* Wrapper functions for pfil(9) hooks
245
*/
246
static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
247
int flags, void *ruleset __unused, struct inpcb *inp);
248
static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
249
int flags, void *ruleset __unused, struct inpcb *inp);
250
#ifdef INET
251
static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
252
int flags, void *ruleset __unused, struct inpcb *inp);
253
static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
254
int flags, void *ruleset __unused, struct inpcb *inp);
255
#endif
256
#ifdef INET6
257
static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
258
int flags, void *ruleset __unused, struct inpcb *inp);
259
static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
260
int flags, void *ruleset __unused, struct inpcb *inp);
261
#endif
262
263
static void hook_pf_eth(void);
264
static void hook_pf(void);
265
static void dehook_pf_eth(void);
266
static void dehook_pf(void);
267
static int shutdown_pf(void);
268
static int pf_load(void);
269
static void pf_unload(void *);
270
271
static struct cdevsw pf_cdevsw = {
272
.d_ioctl = pfioctl,
273
.d_name = PF_NAME,
274
.d_version = D_VERSION,
275
};
276
277
VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
278
#define V_pf_pfil_hooked VNET(pf_pfil_hooked)
279
VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
280
#define V_pf_pfil_eth_hooked VNET(pf_pfil_eth_hooked)
281
282
/*
283
* We need a flag that is neither hooked nor running to know when
284
* the VNET is "valid". We primarily need this to control (global)
285
* external event, e.g., eventhandlers.
286
*/
287
VNET_DEFINE(int, pf_vnet_active);
288
#define V_pf_vnet_active VNET(pf_vnet_active)
289
290
int pf_end_threads;
291
struct proc *pf_purge_proc;
292
293
VNET_DEFINE(struct rmlock, pf_rules_lock);
294
VNET_DEFINE(struct rmlock, pf_tags_lock);
295
VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
296
#define V_pf_ioctl_lock VNET(pf_ioctl_lock)
297
struct sx pf_end_lock;
298
299
/* pfsync */
300
VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
301
VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
302
VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
303
VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
304
VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
305
VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
306
VNET_DEFINE(pflow_export_state_t *, pflow_export_state_ptr);
307
pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
308
309
/* pflog */
310
pflog_packet_t *pflog_packet_ptr = NULL;
311
312
/*
313
* Copy a user-provided string, returning an error if truncation would occur.
314
* Avoid scanning past "sz" bytes in the source string since there's no
315
* guarantee that it's nul-terminated.
316
*/
317
static int
318
pf_user_strcpy(char *dst, const char *src, size_t sz)
319
{
320
if (strnlen(src, sz) == sz)
321
return (EINVAL);
322
(void)strlcpy(dst, src, sz);
323
return (0);
324
}
325
326
static void
327
pfattach_vnet(void)
328
{
329
u_int32_t *my_timeout = V_pf_default_rule.timeout;
330
331
bzero(&V_pf_status, sizeof(V_pf_status));
332
333
pf_initialize();
334
pfr_initialize();
335
pfi_initialize_vnet();
336
pf_normalize_init();
337
pf_syncookies_init();
338
339
V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
340
V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
341
V_pf_limits[PF_LIMIT_ANCHORS].limit = PF_ANCHOR_HIWAT;
342
V_pf_limits[PF_LIMIT_ETH_ANCHORS].limit = PF_ANCHOR_HIWAT;
343
344
RB_INIT(&V_pf_anchors);
345
pf_init_kruleset(&pf_main_ruleset);
346
347
pf_init_keth(V_pf_keth);
348
349
/* default rule should never be garbage collected */
350
V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
351
V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS;
352
V_pf_default_rule.nr = (uint32_t)-1;
353
V_pf_default_rule.rtableid = -1;
354
355
pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
356
for (int i = 0; i < 2; i++) {
357
pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
358
pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
359
}
360
V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
361
V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
362
for (pf_sn_types_t sn_type = 0; sn_type<PF_SN_MAX; sn_type++)
363
V_pf_default_rule.src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
364
365
V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
366
M_WAITOK | M_ZERO);
367
368
#ifdef PF_WANT_32_TO_64_COUNTER
369
V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
370
V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
371
PF_RULES_WLOCK();
372
LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
373
LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
374
V_pf_allrulecount++;
375
LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
376
PF_RULES_WUNLOCK();
377
#endif
378
379
/* initialize default timeouts */
380
my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
381
my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
382
my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
383
my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
384
my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
385
my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
386
my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
387
my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL;
388
my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
389
my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL;
390
my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL;
391
my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
392
my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
393
my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
394
my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
395
my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
396
my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
397
my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
398
my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
399
my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
400
my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
401
my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
402
my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
403
my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
404
my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
405
406
V_pf_status.debug = PF_DEBUG_URGENT;
407
/*
408
* XXX This is different than in OpenBSD where reassembly is enabled by
409
* defult. In FreeBSD we expect people to still use scrub rules and
410
* switch to the new syntax later. Only when they switch they must
411
* explicitly enable reassemle. We could change the default once the
412
* scrub rule functionality is hopefully removed some day in future.
413
*/
414
V_pf_status.reass = 0;
415
416
V_pf_pfil_hooked = false;
417
V_pf_pfil_eth_hooked = false;
418
419
/* XXX do our best to avoid a conflict */
420
V_pf_status.hostid = arc4random();
421
422
for (int i = 0; i < PFRES_MAX; i++)
423
V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
424
for (int i = 0; i < KLCNT_MAX; i++)
425
V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
426
for (int i = 0; i < FCNT_MAX; i++)
427
pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
428
for (int i = 0; i < SCNT_MAX; i++)
429
V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
430
for (int i = 0; i < NCNT_MAX; i++)
431
V_pf_status.ncounters[i] = counter_u64_alloc(M_WAITOK);
432
433
if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
434
INTR_MPSAFE, &V_pf_swi_cookie) != 0)
435
/* XXXGL: leaked all above. */
436
return;
437
}
438
439
static struct pf_kpool *
440
pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
441
u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
442
u_int8_t check_ticket, int which)
443
{
444
struct pf_kruleset *ruleset;
445
struct pf_krule *rule;
446
int rs_num;
447
448
MPASS(which == PF_RDR || which == PF_NAT || which == PF_RT);
449
450
ruleset = pf_find_kruleset(anchor);
451
if (ruleset == NULL)
452
return (NULL);
453
rs_num = pf_get_ruleset_number(rule_action);
454
if (rs_num >= PF_RULESET_MAX)
455
return (NULL);
456
if (active) {
457
if (check_ticket && ticket !=
458
ruleset->rules[rs_num].active.ticket)
459
return (NULL);
460
if (r_last)
461
rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
462
pf_krulequeue);
463
else
464
rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
465
} else {
466
if (check_ticket && ticket !=
467
ruleset->rules[rs_num].inactive.ticket)
468
return (NULL);
469
if (r_last)
470
rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
471
pf_krulequeue);
472
else
473
rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
474
}
475
if (!r_last) {
476
while ((rule != NULL) && (rule->nr != rule_number))
477
rule = TAILQ_NEXT(rule, entries);
478
}
479
if (rule == NULL)
480
return (NULL);
481
482
switch (which) {
483
case PF_RDR:
484
return (&rule->rdr);
485
case PF_NAT:
486
return (&rule->nat);
487
case PF_RT:
488
return (&rule->route);
489
default:
490
panic("Unknow pool type %d", which);
491
}
492
}
493
494
static void
495
pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
496
{
497
struct pf_kpooladdr *mv_pool_pa;
498
499
while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
500
TAILQ_REMOVE(poola, mv_pool_pa, entries);
501
TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
502
}
503
}
504
505
static void
506
pf_empty_kpool(struct pf_kpalist *poola)
507
{
508
struct pf_kpooladdr *pa;
509
510
while ((pa = TAILQ_FIRST(poola)) != NULL) {
511
switch (pa->addr.type) {
512
case PF_ADDR_DYNIFTL:
513
pfi_dynaddr_remove(pa->addr.p.dyn);
514
break;
515
case PF_ADDR_TABLE:
516
/* XXX: this could be unfinished pooladdr on pabuf */
517
if (pa->addr.p.tbl != NULL)
518
pfr_detach_table(pa->addr.p.tbl);
519
break;
520
}
521
if (pa->kif)
522
pfi_kkif_unref(pa->kif);
523
TAILQ_REMOVE(poola, pa, entries);
524
free(pa, M_PFRULE);
525
}
526
}
527
528
static void
529
pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
530
{
531
532
PF_RULES_WASSERT();
533
PF_UNLNKDRULES_ASSERT();
534
535
TAILQ_REMOVE(rulequeue, rule, entries);
536
537
rule->rule_ref |= PFRULE_REFS;
538
TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
539
}
540
541
static void
542
pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
543
{
544
545
PF_RULES_WASSERT();
546
547
PF_UNLNKDRULES_LOCK();
548
pf_unlink_rule_locked(rulequeue, rule);
549
PF_UNLNKDRULES_UNLOCK();
550
}
551
552
static void
553
pf_free_eth_rule(struct pf_keth_rule *rule)
554
{
555
PF_RULES_WASSERT();
556
557
if (rule == NULL)
558
return;
559
560
if (rule->tag)
561
tag_unref(&V_pf_tags, rule->tag);
562
if (rule->match_tag)
563
tag_unref(&V_pf_tags, rule->match_tag);
564
#ifdef ALTQ
565
pf_qid_unref(rule->qid);
566
#endif
567
568
if (rule->bridge_to)
569
pfi_kkif_unref(rule->bridge_to);
570
if (rule->kif)
571
pfi_kkif_unref(rule->kif);
572
573
if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
574
pfr_detach_table(rule->ipsrc.addr.p.tbl);
575
if (rule->ipdst.addr.type == PF_ADDR_TABLE)
576
pfr_detach_table(rule->ipdst.addr.p.tbl);
577
578
counter_u64_free(rule->evaluations);
579
for (int i = 0; i < 2; i++) {
580
counter_u64_free(rule->packets[i]);
581
counter_u64_free(rule->bytes[i]);
582
}
583
uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
584
pf_keth_anchor_remove(rule);
585
586
free(rule, M_PFRULE);
587
}
588
589
void
590
pf_free_rule(struct pf_krule *rule)
591
{
592
593
PF_RULES_WASSERT();
594
PF_CONFIG_ASSERT();
595
596
if (rule->tag)
597
tag_unref(&V_pf_tags, rule->tag);
598
if (rule->match_tag)
599
tag_unref(&V_pf_tags, rule->match_tag);
600
#ifdef ALTQ
601
if (rule->pqid != rule->qid)
602
pf_qid_unref(rule->pqid);
603
pf_qid_unref(rule->qid);
604
#endif
605
switch (rule->src.addr.type) {
606
case PF_ADDR_DYNIFTL:
607
pfi_dynaddr_remove(rule->src.addr.p.dyn);
608
break;
609
case PF_ADDR_TABLE:
610
pfr_detach_table(rule->src.addr.p.tbl);
611
break;
612
}
613
switch (rule->dst.addr.type) {
614
case PF_ADDR_DYNIFTL:
615
pfi_dynaddr_remove(rule->dst.addr.p.dyn);
616
break;
617
case PF_ADDR_TABLE:
618
pfr_detach_table(rule->dst.addr.p.tbl);
619
break;
620
}
621
if (rule->overload_tbl)
622
pfr_detach_table(rule->overload_tbl);
623
if (rule->kif)
624
pfi_kkif_unref(rule->kif);
625
if (rule->rcv_kif)
626
pfi_kkif_unref(rule->rcv_kif);
627
pf_remove_kanchor(rule);
628
pf_empty_kpool(&rule->rdr.list);
629
pf_empty_kpool(&rule->nat.list);
630
pf_empty_kpool(&rule->route.list);
631
632
pf_krule_free(rule);
633
}
634
635
static void
636
pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
637
unsigned int default_size)
638
{
639
unsigned int i;
640
unsigned int hashsize;
641
642
if (*tunable_size == 0 || !powerof2(*tunable_size))
643
*tunable_size = default_size;
644
645
hashsize = *tunable_size;
646
ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
647
M_WAITOK);
648
ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
649
M_WAITOK);
650
ts->mask = hashsize - 1;
651
ts->seed = arc4random();
652
for (i = 0; i < hashsize; i++) {
653
TAILQ_INIT(&ts->namehash[i]);
654
TAILQ_INIT(&ts->taghash[i]);
655
}
656
BIT_FILL(TAGID_MAX, &ts->avail);
657
}
658
659
static void
660
pf_cleanup_tagset(struct pf_tagset *ts)
661
{
662
unsigned int i;
663
unsigned int hashsize;
664
struct pf_tagname *t, *tmp;
665
666
/*
667
* Only need to clean up one of the hashes as each tag is hashed
668
* into each table.
669
*/
670
hashsize = ts->mask + 1;
671
for (i = 0; i < hashsize; i++)
672
TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
673
uma_zfree(V_pf_tag_z, t);
674
675
free(ts->namehash, M_PFHASH);
676
free(ts->taghash, M_PFHASH);
677
}
678
679
static uint16_t
680
tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
681
{
682
size_t len;
683
684
len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
685
return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
686
}
687
688
static uint16_t
689
tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
690
{
691
692
return (tag & ts->mask);
693
}
694
695
static u_int16_t
696
tagname2tag(struct pf_tagset *ts, const char *tagname, bool add_new)
697
{
698
struct pf_tagname *tag;
699
u_int32_t index;
700
u_int16_t new_tagid;
701
702
PF_TAGS_RLOCK_TRACKER;
703
704
PF_TAGS_RLOCK();
705
706
index = tagname2hashindex(ts, tagname);
707
TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
708
if (strcmp(tagname, tag->name) == 0) {
709
tag->ref++;
710
new_tagid = tag->tag;
711
PF_TAGS_RUNLOCK();
712
return (new_tagid);
713
}
714
715
/*
716
* When used for pfsync with queues we must not create new entries.
717
* Pf tags can be created just fine by this function, but queues
718
* require additional configuration. If they are missing on the target
719
* system we just ignore them
720
*/
721
if (add_new == false) {
722
printf("%s: Not creating a new tag\n", __func__);
723
PF_TAGS_RUNLOCK();
724
return (0);
725
}
726
727
/*
728
* If a new entry must be created do it under a write lock.
729
* But first search again, somebody could have created the tag
730
* between unlocking the read lock and locking the write lock.
731
*/
732
PF_TAGS_RUNLOCK();
733
PF_TAGS_WLOCK();
734
TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
735
if (strcmp(tagname, tag->name) == 0) {
736
tag->ref++;
737
new_tagid = tag->tag;
738
PF_TAGS_WUNLOCK();
739
return (new_tagid);
740
}
741
742
/*
743
* new entry
744
*
745
* to avoid fragmentation, we do a linear search from the beginning
746
* and take the first free slot we find.
747
*/
748
new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
749
/*
750
* Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
751
* BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
752
* set. It may also return a bit number greater than TAGID_MAX due
753
* to rounding of the number of bits in the vector up to a multiple
754
* of the vector word size at declaration/allocation time.
755
*/
756
if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) {
757
PF_TAGS_WUNLOCK();
758
return (0);
759
}
760
761
/* Mark the tag as in use. Bits are 0-based for BIT_CLR() */
762
BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
763
764
/* allocate and fill new struct pf_tagname */
765
tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
766
if (tag == NULL) {
767
PF_TAGS_WUNLOCK();
768
return (0);
769
}
770
strlcpy(tag->name, tagname, sizeof(tag->name));
771
tag->tag = new_tagid;
772
tag->ref = 1;
773
774
/* Insert into namehash */
775
TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
776
777
/* Insert into taghash */
778
index = tag2hashindex(ts, new_tagid);
779
TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
780
781
PF_TAGS_WUNLOCK();
782
return (new_tagid);
783
}
784
785
static char *
786
tag2tagname(struct pf_tagset *ts, u_int16_t tag)
787
{
788
struct pf_tagname *t;
789
uint16_t index;
790
791
PF_TAGS_RLOCK_TRACKER;
792
793
PF_TAGS_RLOCK();
794
795
index = tag2hashindex(ts, tag);
796
TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
797
if (tag == t->tag) {
798
PF_TAGS_RUNLOCK();
799
return (t->name);
800
}
801
802
PF_TAGS_RUNLOCK();
803
return (NULL);
804
}
805
806
static void
807
tag_unref(struct pf_tagset *ts, u_int16_t tag)
808
{
809
struct pf_tagname *t;
810
uint16_t index;
811
812
PF_TAGS_WLOCK();
813
814
index = tag2hashindex(ts, tag);
815
TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
816
if (tag == t->tag) {
817
if (--t->ref == 0) {
818
TAILQ_REMOVE(&ts->taghash[index], t,
819
taghash_entries);
820
index = tagname2hashindex(ts, t->name);
821
TAILQ_REMOVE(&ts->namehash[index], t,
822
namehash_entries);
823
/* Bits are 0-based for BIT_SET() */
824
BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
825
uma_zfree(V_pf_tag_z, t);
826
}
827
break;
828
}
829
830
PF_TAGS_WUNLOCK();
831
}
832
833
uint16_t
834
pf_tagname2tag(const char *tagname)
835
{
836
return (tagname2tag(&V_pf_tags, tagname, true));
837
}
838
839
static const char *
840
pf_tag2tagname(uint16_t tag)
841
{
842
return (tag2tagname(&V_pf_tags, tag));
843
}
844
845
static int
846
pf_begin_eth(uint32_t *ticket, const char *anchor)
847
{
848
struct pf_keth_rule *rule, *tmp;
849
struct pf_keth_ruleset *rs;
850
851
PF_RULES_WASSERT();
852
853
rs = pf_find_or_create_keth_ruleset(anchor);
854
if (rs == NULL)
855
return (EINVAL);
856
857
/* Purge old inactive rules. */
858
TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
859
tmp) {
860
TAILQ_REMOVE(rs->inactive.rules, rule,
861
entries);
862
pf_free_eth_rule(rule);
863
}
864
865
*ticket = ++rs->inactive.ticket;
866
rs->inactive.open = 1;
867
868
return (0);
869
}
870
871
static int
872
pf_rollback_eth(uint32_t ticket, const char *anchor)
873
{
874
struct pf_keth_rule *rule, *tmp;
875
struct pf_keth_ruleset *rs;
876
877
PF_RULES_WASSERT();
878
879
rs = pf_find_keth_ruleset(anchor);
880
if (rs == NULL)
881
return (EINVAL);
882
883
if (!rs->inactive.open ||
884
ticket != rs->inactive.ticket)
885
return (0);
886
887
/* Purge old inactive rules. */
888
TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
889
tmp) {
890
TAILQ_REMOVE(rs->inactive.rules, rule, entries);
891
pf_free_eth_rule(rule);
892
}
893
894
rs->inactive.open = 0;
895
896
pf_remove_if_empty_keth_ruleset(rs);
897
898
return (0);
899
}
900
901
#define PF_SET_SKIP_STEPS(i) \
902
do { \
903
while (head[i] != cur) { \
904
head[i]->skip[i].ptr = cur; \
905
head[i] = TAILQ_NEXT(head[i], entries); \
906
} \
907
} while (0)
908
909
static void
910
pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
911
{
912
struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
913
int i;
914
915
cur = TAILQ_FIRST(rules);
916
prev = cur;
917
for (i = 0; i < PFE_SKIP_COUNT; ++i)
918
head[i] = cur;
919
while (cur != NULL) {
920
if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
921
PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
922
if (cur->direction != prev->direction)
923
PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
924
if (cur->proto != prev->proto)
925
PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
926
if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
927
PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
928
if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
929
PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
930
if (cur->ipsrc.neg != prev->ipsrc.neg ||
931
pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
932
PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
933
if (cur->ipdst.neg != prev->ipdst.neg ||
934
pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
935
PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
936
937
prev = cur;
938
cur = TAILQ_NEXT(cur, entries);
939
}
940
for (i = 0; i < PFE_SKIP_COUNT; ++i)
941
PF_SET_SKIP_STEPS(i);
942
}
943
944
static int
945
pf_commit_eth(uint32_t ticket, const char *anchor)
946
{
947
struct pf_keth_ruleq *rules;
948
struct pf_keth_ruleset *rs;
949
950
rs = pf_find_keth_ruleset(anchor);
951
if (rs == NULL) {
952
return (EINVAL);
953
}
954
955
if (!rs->inactive.open ||
956
ticket != rs->inactive.ticket)
957
return (EBUSY);
958
959
PF_RULES_WASSERT();
960
961
pf_eth_calc_skip_steps(rs->inactive.rules);
962
963
rules = rs->active.rules;
964
atomic_store_ptr(&rs->active.rules, rs->inactive.rules);
965
rs->inactive.rules = rules;
966
rs->inactive.ticket = rs->active.ticket;
967
968
return (pf_rollback_eth(rs->inactive.ticket,
969
rs->anchor ? rs->anchor->path : ""));
970
}
971
972
#ifdef ALTQ
973
uint16_t
974
pf_qname2qid(const char *qname, bool add_new)
975
{
976
return (tagname2tag(&V_pf_qids, qname, add_new));
977
}
978
979
static void
980
pf_qid_unref(uint16_t qid)
981
{
982
tag_unref(&V_pf_qids, qid);
983
}
984
985
static int
986
pf_begin_altq(u_int32_t *ticket)
987
{
988
struct pf_altq *altq, *tmp;
989
int error = 0;
990
991
PF_RULES_WASSERT();
992
993
/* Purge the old altq lists */
994
TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
995
if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
996
/* detach and destroy the discipline */
997
error = altq_remove(altq);
998
}
999
free(altq, M_PFALTQ);
1000
}
1001
TAILQ_INIT(V_pf_altq_ifs_inactive);
1002
TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1003
pf_qid_unref(altq->qid);
1004
free(altq, M_PFALTQ);
1005
}
1006
TAILQ_INIT(V_pf_altqs_inactive);
1007
if (error)
1008
return (error);
1009
*ticket = ++V_ticket_altqs_inactive;
1010
V_altqs_inactive_open = 1;
1011
return (0);
1012
}
1013
1014
static int
1015
pf_rollback_altq(u_int32_t ticket)
1016
{
1017
struct pf_altq *altq, *tmp;
1018
int error = 0;
1019
1020
PF_RULES_WASSERT();
1021
1022
if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
1023
return (0);
1024
/* Purge the old altq lists */
1025
TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1026
if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1027
/* detach and destroy the discipline */
1028
error = altq_remove(altq);
1029
}
1030
free(altq, M_PFALTQ);
1031
}
1032
TAILQ_INIT(V_pf_altq_ifs_inactive);
1033
TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1034
pf_qid_unref(altq->qid);
1035
free(altq, M_PFALTQ);
1036
}
1037
TAILQ_INIT(V_pf_altqs_inactive);
1038
V_altqs_inactive_open = 0;
1039
return (error);
1040
}
1041
1042
static int
1043
pf_commit_altq(u_int32_t ticket)
1044
{
1045
struct pf_altqqueue *old_altqs, *old_altq_ifs;
1046
struct pf_altq *altq, *tmp;
1047
int err, error = 0;
1048
1049
PF_RULES_WASSERT();
1050
1051
if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
1052
return (EBUSY);
1053
1054
/* swap altqs, keep the old. */
1055
old_altqs = V_pf_altqs_active;
1056
old_altq_ifs = V_pf_altq_ifs_active;
1057
V_pf_altqs_active = V_pf_altqs_inactive;
1058
V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
1059
V_pf_altqs_inactive = old_altqs;
1060
V_pf_altq_ifs_inactive = old_altq_ifs;
1061
V_ticket_altqs_active = V_ticket_altqs_inactive;
1062
1063
/* Attach new disciplines */
1064
TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1065
if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1066
/* attach the discipline */
1067
error = altq_pfattach(altq);
1068
if (error == 0 && V_pf_altq_running)
1069
error = pf_enable_altq(altq);
1070
if (error != 0)
1071
return (error);
1072
}
1073
}
1074
1075
/* Purge the old altq lists */
1076
TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1077
if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1078
/* detach and destroy the discipline */
1079
if (V_pf_altq_running)
1080
error = pf_disable_altq(altq);
1081
err = altq_pfdetach(altq);
1082
if (err != 0 && error == 0)
1083
error = err;
1084
err = altq_remove(altq);
1085
if (err != 0 && error == 0)
1086
error = err;
1087
}
1088
free(altq, M_PFALTQ);
1089
}
1090
TAILQ_INIT(V_pf_altq_ifs_inactive);
1091
TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1092
pf_qid_unref(altq->qid);
1093
free(altq, M_PFALTQ);
1094
}
1095
TAILQ_INIT(V_pf_altqs_inactive);
1096
1097
V_altqs_inactive_open = 0;
1098
return (error);
1099
}
1100
1101
static int
1102
pf_enable_altq(struct pf_altq *altq)
1103
{
1104
struct ifnet *ifp;
1105
struct tb_profile tb;
1106
int error = 0;
1107
1108
if ((ifp = ifunit(altq->ifname)) == NULL)
1109
return (EINVAL);
1110
1111
if (ifp->if_snd.altq_type != ALTQT_NONE)
1112
error = altq_enable(&ifp->if_snd);
1113
1114
/* set tokenbucket regulator */
1115
if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1116
tb.rate = altq->ifbandwidth;
1117
tb.depth = altq->tbrsize;
1118
error = tbr_set(&ifp->if_snd, &tb);
1119
}
1120
1121
return (error);
1122
}
1123
1124
static int
1125
pf_disable_altq(struct pf_altq *altq)
1126
{
1127
struct ifnet *ifp;
1128
struct tb_profile tb;
1129
int error;
1130
1131
if ((ifp = ifunit(altq->ifname)) == NULL)
1132
return (EINVAL);
1133
1134
/*
1135
* when the discipline is no longer referenced, it was overridden
1136
* by a new one. if so, just return.
1137
*/
1138
if (altq->altq_disc != ifp->if_snd.altq_disc)
1139
return (0);
1140
1141
error = altq_disable(&ifp->if_snd);
1142
1143
if (error == 0) {
1144
/* clear tokenbucket regulator */
1145
tb.rate = 0;
1146
error = tbr_set(&ifp->if_snd, &tb);
1147
}
1148
1149
return (error);
1150
}
1151
1152
static int
1153
pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1154
struct pf_altq *altq)
1155
{
1156
struct ifnet *ifp1;
1157
int error = 0;
1158
1159
/* Deactivate the interface in question */
1160
altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1161
if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1162
(remove && ifp1 == ifp)) {
1163
altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1164
} else {
1165
error = altq_add(ifp1, altq);
1166
1167
if (ticket != V_ticket_altqs_inactive)
1168
error = EBUSY;
1169
1170
if (error)
1171
free(altq, M_PFALTQ);
1172
}
1173
1174
return (error);
1175
}
1176
1177
void
1178
pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1179
{
1180
struct pf_altq *a1, *a2, *a3;
1181
u_int32_t ticket;
1182
int error = 0;
1183
1184
/*
1185
* No need to re-evaluate the configuration for events on interfaces
1186
* that do not support ALTQ, as it's not possible for such
1187
* interfaces to be part of the configuration.
1188
*/
1189
if (!ALTQ_IS_READY(&ifp->if_snd))
1190
return;
1191
1192
/* Interrupt userland queue modifications */
1193
if (V_altqs_inactive_open)
1194
pf_rollback_altq(V_ticket_altqs_inactive);
1195
1196
/* Start new altq ruleset */
1197
if (pf_begin_altq(&ticket))
1198
return;
1199
1200
/* Copy the current active set */
1201
TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1202
a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1203
if (a2 == NULL) {
1204
error = ENOMEM;
1205
break;
1206
}
1207
bcopy(a1, a2, sizeof(struct pf_altq));
1208
1209
error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1210
if (error)
1211
break;
1212
1213
TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1214
}
1215
if (error)
1216
goto out;
1217
TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1218
a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1219
if (a2 == NULL) {
1220
error = ENOMEM;
1221
break;
1222
}
1223
bcopy(a1, a2, sizeof(struct pf_altq));
1224
1225
if ((a2->qid = pf_qname2qid(a2->qname, true)) == 0) {
1226
error = EBUSY;
1227
free(a2, M_PFALTQ);
1228
break;
1229
}
1230
a2->altq_disc = NULL;
1231
TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1232
if (strncmp(a3->ifname, a2->ifname,
1233
IFNAMSIZ) == 0) {
1234
a2->altq_disc = a3->altq_disc;
1235
break;
1236
}
1237
}
1238
error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1239
if (error)
1240
break;
1241
1242
TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1243
}
1244
1245
out:
1246
if (error != 0)
1247
pf_rollback_altq(ticket);
1248
else
1249
pf_commit_altq(ticket);
1250
}
1251
#endif /* ALTQ */
1252
1253
static struct pf_krule_global *
1254
pf_rule_tree_alloc(int flags)
1255
{
1256
struct pf_krule_global *tree;
1257
1258
tree = malloc(sizeof(struct pf_krule_global), M_PF, flags);
1259
if (tree == NULL)
1260
return (NULL);
1261
RB_INIT(tree);
1262
return (tree);
1263
}
1264
1265
void
1266
pf_rule_tree_free(struct pf_krule_global *tree)
1267
{
1268
1269
free(tree, M_PF);
1270
}
1271
1272
static int
1273
pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1274
{
1275
struct pf_krule_global *tree;
1276
struct pf_kruleset *rs;
1277
struct pf_krule *rule;
1278
1279
PF_RULES_WASSERT();
1280
1281
if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1282
return (EINVAL);
1283
tree = pf_rule_tree_alloc(M_NOWAIT);
1284
if (tree == NULL)
1285
return (ENOMEM);
1286
rs = pf_find_or_create_kruleset(anchor);
1287
if (rs == NULL) {
1288
pf_rule_tree_free(tree);
1289
return (EINVAL);
1290
}
1291
pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1292
rs->rules[rs_num].inactive.tree = tree;
1293
1294
while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1295
pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1296
rs->rules[rs_num].inactive.rcount--;
1297
}
1298
*ticket = ++rs->rules[rs_num].inactive.ticket;
1299
rs->rules[rs_num].inactive.open = 1;
1300
return (0);
1301
}
1302
1303
static int
1304
pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1305
{
1306
struct pf_kruleset *rs;
1307
struct pf_krule *rule;
1308
1309
PF_RULES_WASSERT();
1310
1311
if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1312
return (EINVAL);
1313
rs = pf_find_kruleset(anchor);
1314
if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1315
rs->rules[rs_num].inactive.ticket != ticket)
1316
return (0);
1317
while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1318
pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1319
rs->rules[rs_num].inactive.rcount--;
1320
}
1321
rs->rules[rs_num].inactive.open = 0;
1322
1323
if (anchor[0])
1324
return (0);
1325
1326
pf_statelim_rollback();
1327
pf_sourcelim_rollback();
1328
return (0);
1329
}
1330
1331
#define PF_MD5_UPD(st, elm) \
1332
MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1333
1334
#define PF_MD5_UPD_STR(st, elm) \
1335
MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1336
1337
#define PF_MD5_UPD_HTONL(st, elm, stor) do { \
1338
(stor) = htonl((st)->elm); \
1339
MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1340
} while (0)
1341
1342
#define PF_MD5_UPD_HTONS(st, elm, stor) do { \
1343
(stor) = htons((st)->elm); \
1344
MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1345
} while (0)
1346
1347
static void
1348
pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1349
{
1350
PF_MD5_UPD(pfr, addr.type);
1351
switch (pfr->addr.type) {
1352
case PF_ADDR_DYNIFTL:
1353
PF_MD5_UPD(pfr, addr.v.ifname);
1354
PF_MD5_UPD(pfr, addr.iflags);
1355
break;
1356
case PF_ADDR_TABLE:
1357
if (strncmp(pfr->addr.v.tblname, PF_OPTIMIZER_TABLE_PFX,
1358
strlen(PF_OPTIMIZER_TABLE_PFX)))
1359
PF_MD5_UPD(pfr, addr.v.tblname);
1360
break;
1361
case PF_ADDR_ADDRMASK:
1362
/* XXX ignore af? */
1363
PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1364
PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1365
break;
1366
}
1367
1368
PF_MD5_UPD(pfr, port[0]);
1369
PF_MD5_UPD(pfr, port[1]);
1370
PF_MD5_UPD(pfr, neg);
1371
PF_MD5_UPD(pfr, port_op);
1372
}
1373
1374
static void
1375
pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1376
{
1377
u_int16_t x;
1378
u_int32_t y;
1379
1380
pf_hash_rule_addr(ctx, &rule->src);
1381
pf_hash_rule_addr(ctx, &rule->dst);
1382
for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1383
PF_MD5_UPD_STR(rule, label[i]);
1384
PF_MD5_UPD_STR(rule, ifname);
1385
PF_MD5_UPD_STR(rule, rcv_ifname);
1386
PF_MD5_UPD_STR(rule, match_tagname);
1387
PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1388
PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1389
PF_MD5_UPD_HTONL(rule, prob, y);
1390
PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1391
PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1392
PF_MD5_UPD(rule, uid.op);
1393
PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1394
PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1395
PF_MD5_UPD(rule, gid.op);
1396
PF_MD5_UPD_HTONL(rule, rule_flag, y);
1397
PF_MD5_UPD(rule, action);
1398
PF_MD5_UPD(rule, direction);
1399
PF_MD5_UPD(rule, af);
1400
PF_MD5_UPD(rule, quick);
1401
PF_MD5_UPD(rule, ifnot);
1402
PF_MD5_UPD(rule, rcvifnot);
1403
PF_MD5_UPD(rule, match_tag_not);
1404
PF_MD5_UPD(rule, natpass);
1405
PF_MD5_UPD(rule, keep_state);
1406
PF_MD5_UPD(rule, proto);
1407
PF_MD5_UPD(rule, type);
1408
PF_MD5_UPD(rule, code);
1409
PF_MD5_UPD(rule, flags);
1410
PF_MD5_UPD(rule, flagset);
1411
PF_MD5_UPD(rule, allow_opts);
1412
PF_MD5_UPD(rule, rt);
1413
PF_MD5_UPD(rule, tos);
1414
PF_MD5_UPD(rule, scrub_flags);
1415
PF_MD5_UPD(rule, min_ttl);
1416
PF_MD5_UPD(rule, set_tos);
1417
if (rule->anchor != NULL)
1418
PF_MD5_UPD_STR(rule, anchor->path);
1419
}
1420
1421
static void
1422
pf_hash_rule(struct pf_krule *rule)
1423
{
1424
MD5_CTX ctx;
1425
1426
MD5Init(&ctx);
1427
pf_hash_rule_rolling(&ctx, rule);
1428
MD5Final(rule->md5sum, &ctx);
1429
}
1430
1431
static int
1432
pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1433
{
1434
1435
return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1436
}
1437
1438
static int
1439
pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1440
{
1441
struct pf_kruleset *rs;
1442
struct pf_krule *rule, *old_rule;
1443
struct pf_krulequeue *old_rules;
1444
struct pf_krule_global *old_tree;
1445
int error;
1446
u_int32_t old_rcount;
1447
bool is_main_ruleset = anchor[0] == '\0';
1448
1449
PF_RULES_WASSERT();
1450
1451
if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1452
return (EINVAL);
1453
rs = pf_find_kruleset(anchor);
1454
if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1455
ticket != rs->rules[rs_num].inactive.ticket)
1456
return (EBUSY);
1457
1458
/* Calculate checksum for the main ruleset */
1459
if (rs == &pf_main_ruleset) {
1460
error = pf_sourcelim_check();
1461
if (error != 0)
1462
return (error);
1463
error = pf_setup_pfsync_matching(rs);
1464
if (error != 0)
1465
return (error);
1466
}
1467
1468
/* Swap rules, keep the old. */
1469
old_rules = rs->rules[rs_num].active.ptr;
1470
old_rcount = rs->rules[rs_num].active.rcount;
1471
old_tree = rs->rules[rs_num].active.tree;
1472
1473
rs->rules[rs_num].active.ptr =
1474
rs->rules[rs_num].inactive.ptr;
1475
rs->rules[rs_num].active.tree =
1476
rs->rules[rs_num].inactive.tree;
1477
rs->rules[rs_num].active.rcount =
1478
rs->rules[rs_num].inactive.rcount;
1479
1480
/* Attempt to preserve counter information. */
1481
if (V_pf_status.keep_counters && old_tree != NULL) {
1482
TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1483
entries) {
1484
old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1485
if (old_rule == NULL) {
1486
continue;
1487
}
1488
pf_counter_u64_critical_enter();
1489
pf_counter_u64_rollup_protected(&rule->evaluations,
1490
pf_counter_u64_fetch(&old_rule->evaluations));
1491
pf_counter_u64_rollup_protected(&rule->packets[0],
1492
pf_counter_u64_fetch(&old_rule->packets[0]));
1493
pf_counter_u64_rollup_protected(&rule->packets[1],
1494
pf_counter_u64_fetch(&old_rule->packets[1]));
1495
pf_counter_u64_rollup_protected(&rule->bytes[0],
1496
pf_counter_u64_fetch(&old_rule->bytes[0]));
1497
pf_counter_u64_rollup_protected(&rule->bytes[1],
1498
pf_counter_u64_fetch(&old_rule->bytes[1]));
1499
pf_counter_u64_critical_exit();
1500
}
1501
}
1502
1503
rs->rules[rs_num].inactive.ptr = old_rules;
1504
rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1505
rs->rules[rs_num].inactive.rcount = old_rcount;
1506
1507
rs->rules[rs_num].active.ticket =
1508
rs->rules[rs_num].inactive.ticket;
1509
pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1510
1511
/* Purge the old rule list. */
1512
PF_UNLNKDRULES_LOCK();
1513
while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1514
pf_unlink_rule_locked(old_rules, rule);
1515
PF_UNLNKDRULES_UNLOCK();
1516
rs->rules[rs_num].inactive.rcount = 0;
1517
rs->rules[rs_num].inactive.open = 0;
1518
pf_remove_if_empty_kruleset(rs);
1519
pf_rule_tree_free(old_tree);
1520
1521
/* statelim/sourcelim/queue defs only in the main ruleset */
1522
if (! is_main_ruleset || rs_num != PF_RULESET_FILTER)
1523
return (0);
1524
1525
pf_statelim_commit();
1526
pf_sourcelim_commit();
1527
1528
return (0);
1529
}
1530
1531
static int
1532
pf_setup_pfsync_matching(struct pf_kruleset *rs)
1533
{
1534
MD5_CTX ctx;
1535
struct pf_krule *rule;
1536
int rs_cnt;
1537
u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1538
1539
MD5Init(&ctx);
1540
for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1541
/* XXX PF_RULESET_SCRUB as well? */
1542
if (rs_cnt == PF_RULESET_SCRUB)
1543
continue;
1544
1545
if (rs->rules[rs_cnt].inactive.rcount) {
1546
TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1547
entries) {
1548
pf_hash_rule_rolling(&ctx, rule);
1549
}
1550
}
1551
}
1552
1553
MD5Final(digest, &ctx);
1554
memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1555
return (0);
1556
}
1557
1558
static int
1559
pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1560
{
1561
int error = 0;
1562
1563
switch (addr->type) {
1564
case PF_ADDR_TABLE:
1565
addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1566
if (addr->p.tbl == NULL)
1567
error = ENOMEM;
1568
break;
1569
default:
1570
error = EINVAL;
1571
}
1572
1573
return (error);
1574
}
1575
1576
static int
1577
pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1578
sa_family_t af)
1579
{
1580
int error = 0;
1581
1582
switch (addr->type) {
1583
case PF_ADDR_TABLE:
1584
addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1585
if (addr->p.tbl == NULL)
1586
error = ENOMEM;
1587
break;
1588
case PF_ADDR_DYNIFTL:
1589
error = pfi_dynaddr_setup(addr, af);
1590
break;
1591
}
1592
1593
return (error);
1594
}
1595
1596
void
1597
pf_addr_copyout(struct pf_addr_wrap *addr)
1598
{
1599
1600
switch (addr->type) {
1601
case PF_ADDR_DYNIFTL:
1602
pfi_dynaddr_copyout(addr);
1603
break;
1604
case PF_ADDR_TABLE:
1605
pf_tbladdr_copyout(addr);
1606
break;
1607
}
1608
}
1609
1610
int
1611
pf_statelim_add(const struct pfioc_statelim *ioc)
1612
{
1613
struct pf_statelim *pfstlim;
1614
int error;
1615
size_t namelen;
1616
1617
if (ioc->id < PF_STATELIM_ID_MIN ||
1618
ioc->id > PF_STATELIM_ID_MAX)
1619
return (EINVAL);
1620
1621
if (ioc->limit < PF_STATELIM_LIMIT_MIN ||
1622
ioc->limit > PF_STATELIM_LIMIT_MAX)
1623
return (EINVAL);
1624
1625
if ((ioc->rate.limit == 0) != (ioc->rate.seconds == 0))
1626
return (EINVAL);
1627
1628
namelen = strnlen(ioc->name, sizeof(ioc->name));
1629
/* is the name from userland nul terminated? */
1630
if (namelen == sizeof(ioc->name))
1631
return (EINVAL);
1632
1633
pfstlim = malloc(sizeof(*pfstlim), M_PF_STATE_LIM, M_WAITOK | M_ZERO);
1634
if (pfstlim == NULL)
1635
return (ENOMEM);
1636
1637
pfstlim->pfstlim_id = ioc->id;
1638
if (strlcpy(pfstlim->pfstlim_nm, ioc->name,
1639
sizeof(pfstlim->pfstlim_nm)) >= sizeof(pfstlim->pfstlim_nm)) {
1640
error = EINVAL;
1641
goto free;
1642
}
1643
pfstlim->pfstlim_limit = ioc->limit;
1644
pfstlim->pfstlim_rate.limit = ioc->rate.limit;
1645
pfstlim->pfstlim_rate.seconds = ioc->rate.seconds;
1646
1647
if (pfstlim->pfstlim_rate.limit) {
1648
uint64_t bucket = SEC_TO_NSEC(pfstlim->pfstlim_rate.seconds);
1649
struct timespec ts;
1650
1651
getnanouptime(&ts);
1652
1653
pfstlim->pfstlim_rate_ts = SEC_TO_NSEC(ts.tv_sec) + ts.tv_nsec -
1654
bucket;
1655
pfstlim->pfstlim_rate_token = bucket /
1656
pfstlim->pfstlim_rate.limit;
1657
pfstlim->pfstlim_rate_bucket = bucket;
1658
}
1659
1660
TAILQ_INIT(&pfstlim->pfstlim_states);
1661
mtx_init(&pfstlim->pfstlim_lock, "pf state limit", NULL, MTX_DEF);
1662
1663
PF_RULES_WLOCK();
1664
if (ioc->ticket != pf_main_ruleset.rules[PF_RULESET_FILTER].inactive.ticket) {
1665
error = EBUSY;
1666
goto unlock;
1667
}
1668
1669
if (RB_INSERT(pf_statelim_id_tree, &V_pf_statelim_id_tree_inactive,
1670
pfstlim) != NULL) {
1671
error = EBUSY;
1672
goto unlock;
1673
}
1674
1675
if (RB_INSERT(pf_statelim_nm_tree, &V_pf_statelim_nm_tree_inactive,
1676
pfstlim) != NULL) {
1677
RB_REMOVE(pf_statelim_id_tree, &V_pf_statelim_id_tree_inactive,
1678
pfstlim);
1679
error = EBUSY;
1680
goto unlock;
1681
}
1682
1683
TAILQ_INSERT_HEAD(&V_pf_statelim_list_inactive, pfstlim, pfstlim_list);
1684
1685
PF_RULES_WUNLOCK();
1686
1687
return (0);
1688
1689
unlock:
1690
PF_RULES_WUNLOCK();
1691
1692
free:
1693
free(pfstlim, M_PF_STATE_LIM);
1694
1695
return (error);
1696
}
1697
1698
static void
1699
pf_statelim_unlink(struct pf_statelim *pfstlim,
1700
struct pf_state_link_list *garbage)
1701
{
1702
struct pf_state_link *pfl;
1703
1704
1705
/* unwire the links */
1706
TAILQ_FOREACH(pfl, &pfstlim->pfstlim_states, pfl_link) {
1707
struct pf_kstate *s = pfl->pfl_state;
1708
1709
/* if !rmst */
1710
PF_STATE_LOCK(s);
1711
s->statelim = 0;
1712
SLIST_REMOVE(&s->linkage, pfl, pf_state_link, pfl_linkage);
1713
PF_STATE_UNLOCK(s);
1714
}
1715
1716
/* take the list away */
1717
TAILQ_CONCAT(garbage, &pfstlim->pfstlim_states, pfl_link);
1718
pfstlim->pfstlim_inuse = 0;
1719
}
1720
1721
void
1722
pf_statelim_commit(void)
1723
{
1724
struct pf_statelim *pfstlim, *npfstlim, *opfstlim;
1725
struct pf_statelim_list l = TAILQ_HEAD_INITIALIZER(l);
1726
struct pf_state_link_list garbage = TAILQ_HEAD_INITIALIZER(garbage);
1727
struct pf_state_link *pfl, *npfl;
1728
1729
PF_RULES_WASSERT();
1730
1731
/* merge the new statelims into the current set */
1732
1733
/* start with an empty active list */
1734
TAILQ_CONCAT(&l, &V_pf_statelim_list_active, pfstlim_list);
1735
1736
/* beware, the inactive bits gets messed up here */
1737
1738
/* try putting pending statelims into the active tree */
1739
TAILQ_FOREACH_SAFE(pfstlim, &V_pf_statelim_list_inactive, pfstlim_list,
1740
npfstlim) {
1741
opfstlim = RB_INSERT(pf_statelim_id_tree,
1742
&V_pf_statelim_id_tree_active, pfstlim);
1743
if (opfstlim != NULL) {
1744
/* this statelim already exists, merge */
1745
opfstlim->pfstlim_limit = pfstlim->pfstlim_limit;
1746
opfstlim->pfstlim_rate.limit =
1747
pfstlim->pfstlim_rate.limit;
1748
opfstlim->pfstlim_rate.seconds =
1749
pfstlim->pfstlim_rate.seconds;
1750
1751
opfstlim->pfstlim_rate_ts = pfstlim->pfstlim_rate_ts;
1752
opfstlim->pfstlim_rate_token =
1753
pfstlim->pfstlim_rate_token;
1754
opfstlim->pfstlim_rate_bucket =
1755
pfstlim->pfstlim_rate_bucket;
1756
1757
memcpy(opfstlim->pfstlim_nm, pfstlim->pfstlim_nm,
1758
sizeof(opfstlim->pfstlim_nm));
1759
1760
/* use the existing statelim instead */
1761
free(pfstlim, M_PF_STATE_LIM);
1762
TAILQ_REMOVE(&l, opfstlim, pfstlim_list);
1763
pfstlim = opfstlim;
1764
}
1765
1766
TAILQ_INSERT_TAIL(&V_pf_statelim_list_active, pfstlim,
1767
pfstlim_list);
1768
}
1769
1770
/* clean up the now unused statelims from the old set */
1771
TAILQ_FOREACH_SAFE(pfstlim, &l, pfstlim_list, npfstlim) {
1772
pf_statelim_unlink(pfstlim, &garbage);
1773
1774
RB_REMOVE(pf_statelim_id_tree, &V_pf_statelim_id_tree_active,
1775
pfstlim);
1776
1777
free(pfstlim, M_PF_STATE_LIM);
1778
}
1779
1780
/* fix up the inactive tree */
1781
RB_INIT(&V_pf_statelim_id_tree_inactive);
1782
RB_INIT(&V_pf_statelim_nm_tree_inactive);
1783
TAILQ_INIT(&V_pf_statelim_list_inactive);
1784
1785
TAILQ_FOREACH_SAFE(pfl, &garbage, pfl_link, npfl)
1786
free(pfl, M_PF_STATE_LINK);
1787
}
1788
1789
static void
1790
pf_sourcelim_unlink(struct pf_sourcelim *pfsrlim,
1791
struct pf_state_link_list *garbage)
1792
{
1793
extern struct pf_source_list pf_source_gc;
1794
struct pf_source *pfsr;
1795
struct pf_state_link *pfl;
1796
1797
PF_RULES_WASSERT();
1798
1799
while ((pfsr = RB_ROOT(&pfsrlim->pfsrlim_sources)) != NULL) {
1800
RB_REMOVE(pf_source_tree, &pfsrlim->pfsrlim_sources, pfsr);
1801
RB_REMOVE(pf_source_ioc_tree, &pfsrlim->pfsrlim_ioc_sources,
1802
pfsr);
1803
if (pfsr->pfsr_inuse == 0)
1804
TAILQ_REMOVE(&pf_source_gc, pfsr, pfsr_empty_gc);
1805
1806
/* unwire the links */
1807
TAILQ_FOREACH(pfl, &pfsr->pfsr_states, pfl_link) {
1808
struct pf_kstate *s = pfl->pfl_state;
1809
1810
PF_STATE_LOCK(s);
1811
/* if !rmst */
1812
s->sourcelim = 0;
1813
SLIST_REMOVE(&s->linkage, pfl, pf_state_link,
1814
pfl_linkage);
1815
PF_STATE_UNLOCK(s);
1816
}
1817
1818
/* take the list away */
1819
TAILQ_CONCAT(garbage, &pfsr->pfsr_states, pfl_link);
1820
1821
free(pfsr, M_PF_SOURCE_LIM);
1822
}
1823
}
1824
1825
int
1826
pf_sourcelim_check(void)
1827
{
1828
struct pf_sourcelim *pfsrlim, *npfsrlim;
1829
1830
PF_RULES_WASSERT();
1831
1832
/* check if we can merge */
1833
1834
TAILQ_FOREACH(pfsrlim, &V_pf_sourcelim_list_inactive, pfsrlim_list) {
1835
npfsrlim = RB_FIND(pf_sourcelim_id_tree,
1836
&V_pf_sourcelim_id_tree_active, pfsrlim);
1837
1838
/* new config, no conflict */
1839
if (npfsrlim == NULL)
1840
continue;
1841
1842
/* nothing is tracked at the moment, no conflict */
1843
if (RB_EMPTY(&npfsrlim->pfsrlim_sources))
1844
continue;
1845
1846
if (strcmp(npfsrlim->pfsrlim_overload.name,
1847
pfsrlim->pfsrlim_overload.name) != 0)
1848
return (EBUSY);
1849
1850
/*
1851
* we should allow the prefixlens to get shorter
1852
* and merge pf_source entries.
1853
*/
1854
1855
if ((npfsrlim->pfsrlim_ipv4_prefix !=
1856
pfsrlim->pfsrlim_ipv4_prefix) ||
1857
(npfsrlim->pfsrlim_ipv6_prefix !=
1858
pfsrlim->pfsrlim_ipv6_prefix))
1859
return (EBUSY);
1860
}
1861
1862
return (0);
1863
}
1864
1865
void
1866
pf_sourcelim_commit(void)
1867
{
1868
struct pf_sourcelim *pfsrlim, *npfsrlim, *opfsrlim;
1869
struct pf_sourcelim_list l = TAILQ_HEAD_INITIALIZER(l);
1870
struct pf_state_link_list garbage = TAILQ_HEAD_INITIALIZER(garbage);
1871
struct pf_state_link *pfl, *npfl;
1872
1873
PF_RULES_WASSERT();
1874
1875
/* merge the new sourcelims into the current set */
1876
1877
/* start with an empty active list */
1878
TAILQ_CONCAT(&l, &V_pf_sourcelim_list_active, pfsrlim_list);
1879
1880
/* beware, the inactive bits gets messed up here */
1881
1882
/* try putting pending sourcelims into the active tree */
1883
TAILQ_FOREACH_SAFE(pfsrlim, &V_pf_sourcelim_list_inactive, pfsrlim_list,
1884
npfsrlim) {
1885
opfsrlim = RB_INSERT(pf_sourcelim_id_tree,
1886
&V_pf_sourcelim_id_tree_active, pfsrlim);
1887
if (opfsrlim != NULL) {
1888
/* this sourcelim already exists, merge */
1889
opfsrlim->pfsrlim_entries = pfsrlim->pfsrlim_entries;
1890
opfsrlim->pfsrlim_limit = pfsrlim->pfsrlim_limit;
1891
opfsrlim->pfsrlim_ipv4_prefix =
1892
pfsrlim->pfsrlim_ipv4_prefix;
1893
opfsrlim->pfsrlim_ipv6_prefix =
1894
pfsrlim->pfsrlim_ipv6_prefix;
1895
opfsrlim->pfsrlim_rate.limit =
1896
pfsrlim->pfsrlim_rate.limit;
1897
opfsrlim->pfsrlim_rate.seconds =
1898
pfsrlim->pfsrlim_rate.seconds;
1899
1900
opfsrlim->pfsrlim_ipv4_mask =
1901
pfsrlim->pfsrlim_ipv4_mask;
1902
opfsrlim->pfsrlim_ipv6_mask =
1903
pfsrlim->pfsrlim_ipv6_mask;
1904
1905
/* keep the existing pfstlim_rate_ts */
1906
1907
opfsrlim->pfsrlim_rate_token =
1908
pfsrlim->pfsrlim_rate_token;
1909
opfsrlim->pfsrlim_rate_bucket =
1910
pfsrlim->pfsrlim_rate_bucket;
1911
1912
if (opfsrlim->pfsrlim_overload.table != NULL) {
1913
pfr_detach_table(
1914
opfsrlim->pfsrlim_overload.table);
1915
}
1916
1917
strlcpy(opfsrlim->pfsrlim_overload.name,
1918
pfsrlim->pfsrlim_overload.name,
1919
sizeof(opfsrlim->pfsrlim_overload.name));
1920
opfsrlim->pfsrlim_overload.hwm =
1921
pfsrlim->pfsrlim_overload.hwm;
1922
opfsrlim->pfsrlim_overload.lwm =
1923
pfsrlim->pfsrlim_overload.lwm;
1924
opfsrlim->pfsrlim_overload.table =
1925
pfsrlim->pfsrlim_overload.table,
1926
1927
memcpy(opfsrlim->pfsrlim_nm, pfsrlim->pfsrlim_nm,
1928
sizeof(opfsrlim->pfsrlim_nm));
1929
1930
/* use the existing sourcelim instead */
1931
free(pfsrlim, M_PF_SOURCE_LIM);
1932
TAILQ_REMOVE(&l, opfsrlim, pfsrlim_list);
1933
pfsrlim = opfsrlim;
1934
}
1935
1936
TAILQ_INSERT_TAIL(&V_pf_sourcelim_list_active, pfsrlim,
1937
pfsrlim_list);
1938
}
1939
1940
/* clean up the now unused sourcelims from the old set */
1941
TAILQ_FOREACH_SAFE(pfsrlim, &l, pfsrlim_list, npfsrlim) {
1942
pf_sourcelim_unlink(pfsrlim, &garbage);
1943
1944
RB_REMOVE(pf_sourcelim_id_tree, &V_pf_sourcelim_id_tree_active,
1945
pfsrlim);
1946
1947
if (pfsrlim->pfsrlim_overload.table != NULL)
1948
pfr_detach_table(pfsrlim->pfsrlim_overload.table);
1949
1950
free(pfsrlim, M_PF_SOURCE_LIM);
1951
}
1952
1953
/* fix up the inactive tree */
1954
RB_INIT(&V_pf_sourcelim_id_tree_inactive);
1955
RB_INIT(&V_pf_sourcelim_nm_tree_inactive);
1956
TAILQ_INIT(&V_pf_sourcelim_list_inactive);
1957
1958
TAILQ_FOREACH_SAFE(pfl, &garbage, pfl_link, npfl)
1959
free(pfl, M_PF_STATE_LINK);
1960
}
1961
1962
void
1963
pf_statelim_rollback(void)
1964
{
1965
struct pf_statelim *pfstlim, *npfstlim;
1966
1967
PF_RULES_WASSERT();
1968
1969
TAILQ_FOREACH_SAFE(pfstlim, &V_pf_statelim_list_inactive, pfstlim_list,
1970
npfstlim)
1971
free(pfstlim, M_PF_STATE_LIM);
1972
1973
TAILQ_INIT(&V_pf_statelim_list_inactive);
1974
RB_INIT(&V_pf_statelim_id_tree_inactive);
1975
RB_INIT(&V_pf_statelim_nm_tree_inactive);
1976
}
1977
1978
struct pf_statelim *
1979
pf_statelim_rb_find(struct pf_statelim_id_tree *tree, struct pf_statelim *key)
1980
{
1981
PF_RULES_ASSERT();
1982
1983
return (RB_FIND(pf_statelim_id_tree, tree, key));
1984
}
1985
1986
struct pf_statelim *
1987
pf_statelim_rb_nfind(struct pf_statelim_id_tree *tree, struct pf_statelim *key)
1988
{
1989
PF_RULES_ASSERT();
1990
1991
return (RB_NFIND(pf_statelim_id_tree, tree, key));
1992
}
1993
1994
int
1995
pf_statelim_get(struct pfioc_statelim *ioc,
1996
struct pf_statelim *(*rbt_op)(struct pf_statelim_id_tree *,
1997
struct pf_statelim *))
1998
{
1999
struct pf_statelim key = { .pfstlim_id = ioc->id };
2000
struct pf_statelim *pfstlim;
2001
int error = 0;
2002
PF_RULES_RLOCK_TRACKER;
2003
2004
PF_RULES_RLOCK();
2005
2006
pfstlim = (*rbt_op)(&V_pf_statelim_id_tree_active, &key);
2007
if (pfstlim == NULL) {
2008
error = ENOENT;
2009
goto unlock;
2010
}
2011
2012
ioc->id = pfstlim->pfstlim_id;
2013
ioc->limit = pfstlim->pfstlim_limit;
2014
ioc->rate.limit = pfstlim->pfstlim_rate.limit;
2015
ioc->rate.seconds = pfstlim->pfstlim_rate.seconds;
2016
CTASSERT(sizeof(ioc->name) == sizeof(pfstlim->pfstlim_nm));
2017
memcpy(ioc->name, pfstlim->pfstlim_nm, sizeof(ioc->name));
2018
2019
ioc->inuse = pfstlim->pfstlim_inuse;
2020
ioc->admitted = pfstlim->pfstlim_counters.admitted;
2021
ioc->hardlimited = pfstlim->pfstlim_counters.hardlimited;
2022
ioc->ratelimited = pfstlim->pfstlim_counters.ratelimited;
2023
2024
unlock:
2025
PF_RULES_RUNLOCK();
2026
2027
return (error);
2028
}
2029
2030
int
2031
pf_sourcelim_add(const struct pfioc_sourcelim *ioc)
2032
{
2033
struct pf_sourcelim *pfsrlim;
2034
int error;
2035
size_t namelen, tablelen;
2036
unsigned int prefix;
2037
size_t i;
2038
2039
if (ioc->id < PF_SOURCELIM_ID_MIN ||
2040
ioc->id > PF_SOURCELIM_ID_MAX)
2041
return (EINVAL);
2042
2043
if (ioc->entries < 1)
2044
return (EINVAL);
2045
2046
if (ioc->limit < 1)
2047
return (EINVAL);
2048
2049
if ((ioc->rate.limit == 0) != (ioc->rate.seconds == 0))
2050
return (EINVAL);
2051
2052
if (ioc->inet_prefix > 32)
2053
return (EINVAL);
2054
if (ioc->inet6_prefix > 128)
2055
return (EINVAL);
2056
2057
namelen = strnlen(ioc->name, sizeof(ioc->name));
2058
/* is the name from userland nul terminated? */
2059
if (namelen == sizeof(ioc->name))
2060
return (EINVAL);
2061
2062
tablelen = strnlen(ioc->overload_tblname,
2063
sizeof(ioc->overload_tblname));
2064
/* is the name from userland nul terminated? */
2065
if (tablelen == sizeof(ioc->overload_tblname))
2066
return (EINVAL);
2067
if (tablelen != 0) {
2068
if (ioc->overload_hwm == 0)
2069
return (EINVAL);
2070
2071
if (ioc->overload_hwm < ioc->overload_lwm)
2072
return (EINVAL);
2073
}
2074
2075
pfsrlim = malloc(sizeof(*pfsrlim), M_PF_SOURCE_LIM, M_WAITOK | M_ZERO);
2076
if (pfsrlim == NULL)
2077
return (ENOMEM);
2078
2079
pfsrlim->pfsrlim_id = ioc->id;
2080
pfsrlim->pfsrlim_entries = ioc->entries;
2081
pfsrlim->pfsrlim_limit = ioc->limit;
2082
pfsrlim->pfsrlim_ipv4_prefix = ioc->inet_prefix;
2083
pfsrlim->pfsrlim_ipv6_prefix = ioc->inet6_prefix;
2084
pfsrlim->pfsrlim_rate.limit = ioc->rate.limit;
2085
pfsrlim->pfsrlim_rate.seconds = ioc->rate.seconds;
2086
if (strlcpy(pfsrlim->pfsrlim_overload.name, ioc->overload_tblname,
2087
sizeof(pfsrlim->pfsrlim_overload.name)) >=
2088
sizeof(pfsrlim->pfsrlim_overload.name)) {
2089
error = EINVAL;
2090
goto free;
2091
}
2092
pfsrlim->pfsrlim_overload.hwm = ioc->overload_hwm;
2093
pfsrlim->pfsrlim_overload.lwm = ioc->overload_lwm;
2094
if (strlcpy(pfsrlim->pfsrlim_nm, ioc->name,
2095
sizeof(pfsrlim->pfsrlim_nm)) >= sizeof(pfsrlim->pfsrlim_nm)) {
2096
error = EINVAL;
2097
goto free;
2098
}
2099
2100
if (pfsrlim->pfsrlim_rate.limit) {
2101
uint64_t bucket = pfsrlim->pfsrlim_rate.seconds * 1000000000ULL;
2102
2103
pfsrlim->pfsrlim_rate_token = bucket /
2104
pfsrlim->pfsrlim_rate.limit;
2105
pfsrlim->pfsrlim_rate_bucket = bucket;
2106
}
2107
2108
pfsrlim->pfsrlim_ipv4_mask.v4.s_addr = htonl(
2109
0xffffffff << (32 - pfsrlim->pfsrlim_ipv4_prefix));
2110
2111
prefix = pfsrlim->pfsrlim_ipv6_prefix;
2112
for (i = 0; i < nitems(pfsrlim->pfsrlim_ipv6_mask.addr32); i++) {
2113
if (prefix == 0) {
2114
/* the memory is already zeroed */
2115
break;
2116
}
2117
if (prefix < 32) {
2118
pfsrlim->pfsrlim_ipv6_mask.addr32[i] = htonl(
2119
0xffffffff << (32 - prefix));
2120
break;
2121
}
2122
2123
pfsrlim->pfsrlim_ipv6_mask.addr32[i] = htonl(0xffffffff);
2124
prefix -= 32;
2125
}
2126
2127
RB_INIT(&pfsrlim->pfsrlim_sources);
2128
mtx_init(&pfsrlim->pfsrlim_lock, "pf source limit", NULL, MTX_DEF);
2129
2130
PF_RULES_WLOCK();
2131
if (ioc->ticket != pf_main_ruleset.rules[PF_RULESET_FILTER].inactive.ticket) {
2132
error = EBUSY;
2133
goto unlock;
2134
}
2135
2136
if (pfsrlim->pfsrlim_overload.name[0] != '\0') {
2137
pfsrlim->pfsrlim_overload.table = pfr_attach_table(
2138
&pf_main_ruleset, pfsrlim->pfsrlim_overload.name);
2139
if (pfsrlim->pfsrlim_overload.table == NULL) {
2140
error = EINVAL;
2141
goto unlock;
2142
}
2143
}
2144
2145
if (RB_INSERT(pf_sourcelim_id_tree, &V_pf_sourcelim_id_tree_inactive,
2146
pfsrlim) != NULL) {
2147
error = EBUSY;
2148
goto unlock;
2149
}
2150
2151
if (RB_INSERT(pf_sourcelim_nm_tree, &V_pf_sourcelim_nm_tree_inactive,
2152
pfsrlim) != NULL) {
2153
RB_INSERT(pf_sourcelim_nm_tree, &V_pf_sourcelim_nm_tree_inactive,
2154
pfsrlim);
2155
error = EBUSY;
2156
goto unlock;
2157
}
2158
2159
TAILQ_INSERT_HEAD(&V_pf_sourcelim_list_inactive, pfsrlim, pfsrlim_list);
2160
2161
PF_RULES_WUNLOCK();
2162
2163
return (0);
2164
2165
unlock:
2166
PF_RULES_WUNLOCK();
2167
2168
free:
2169
free(pfsrlim, M_PF_SOURCE_LIM);
2170
2171
return (error);
2172
}
2173
2174
void
2175
pf_sourcelim_rollback(void)
2176
{
2177
struct pf_sourcelim *pfsrlim, *npfsrlim;
2178
2179
PF_RULES_WASSERT();
2180
2181
TAILQ_FOREACH_SAFE(pfsrlim, &V_pf_sourcelim_list_inactive, pfsrlim_list,
2182
npfsrlim) {
2183
if (pfsrlim->pfsrlim_overload.table != NULL)
2184
pfr_detach_table(pfsrlim->pfsrlim_overload.table);
2185
2186
free(pfsrlim, M_PF_SOURCE_LIM);
2187
}
2188
2189
TAILQ_INIT(&V_pf_sourcelim_list_inactive);
2190
RB_INIT(&V_pf_sourcelim_id_tree_inactive);
2191
RB_INIT(&V_pf_sourcelim_nm_tree_inactive);
2192
}
2193
2194
struct pf_sourcelim *
2195
pf_sourcelim_rb_find(struct pf_sourcelim_id_tree *tree,
2196
struct pf_sourcelim *key)
2197
{
2198
PF_RULES_ASSERT();
2199
return (RB_FIND(pf_sourcelim_id_tree, tree, key));
2200
}
2201
2202
struct pf_sourcelim *
2203
pf_sourcelim_rb_nfind(struct pf_sourcelim_id_tree *tree,
2204
struct pf_sourcelim *key)
2205
{
2206
PF_RULES_ASSERT();
2207
return (RB_NFIND(pf_sourcelim_id_tree, tree, key));
2208
}
2209
2210
int
2211
pf_sourcelim_get(struct pfioc_sourcelim *ioc,
2212
struct pf_sourcelim *(*rbt_op)(struct pf_sourcelim_id_tree *,
2213
struct pf_sourcelim *))
2214
{
2215
struct pf_sourcelim key = { .pfsrlim_id = ioc->id };
2216
struct pf_sourcelim *pfsrlim;
2217
int error = 0;
2218
PF_RULES_RLOCK_TRACKER;
2219
2220
PF_RULES_RLOCK();
2221
2222
pfsrlim = (*rbt_op)(&V_pf_sourcelim_id_tree_active, &key);
2223
if (pfsrlim == NULL) {
2224
error = ESRCH;
2225
goto unlock;
2226
}
2227
2228
ioc->id = pfsrlim->pfsrlim_id;
2229
ioc->entries = pfsrlim->pfsrlim_entries;
2230
ioc->limit = pfsrlim->pfsrlim_limit;
2231
ioc->inet_prefix = pfsrlim->pfsrlim_ipv4_prefix;
2232
ioc->inet6_prefix = pfsrlim->pfsrlim_ipv6_prefix;
2233
ioc->rate.limit = pfsrlim->pfsrlim_rate.limit;
2234
ioc->rate.seconds = pfsrlim->pfsrlim_rate.seconds;
2235
2236
CTASSERT(sizeof(ioc->overload_tblname) ==
2237
sizeof(pfsrlim->pfsrlim_overload.name));
2238
memcpy(ioc->overload_tblname, pfsrlim->pfsrlim_overload.name,
2239
sizeof(pfsrlim->pfsrlim_overload.name));
2240
ioc->overload_hwm = pfsrlim->pfsrlim_overload.hwm;
2241
ioc->overload_lwm = pfsrlim->pfsrlim_overload.lwm;
2242
2243
CTASSERT(sizeof(ioc->name) == sizeof(pfsrlim->pfsrlim_nm));
2244
memcpy(ioc->name, pfsrlim->pfsrlim_nm, sizeof(ioc->name));
2245
/* XXX overload table thing */
2246
2247
ioc->nentries = pfsrlim->pfsrlim_nsources;
2248
2249
ioc->inuse = pfsrlim->pfsrlim_counters.inuse;
2250
ioc->addrallocs = pfsrlim->pfsrlim_counters.addrallocs;
2251
ioc->addrnomem = pfsrlim->pfsrlim_counters.addrnomem;
2252
ioc->admitted = pfsrlim->pfsrlim_counters.admitted;
2253
ioc->addrlimited = pfsrlim->pfsrlim_counters.addrlimited;
2254
ioc->hardlimited = pfsrlim->pfsrlim_counters.hardlimited;
2255
ioc->ratelimited = pfsrlim->pfsrlim_counters.ratelimited;
2256
2257
unlock:
2258
PF_RULES_RUNLOCK();
2259
2260
return (error);
2261
}
2262
2263
struct pf_source *
2264
pf_source_rb_find(struct pf_source_ioc_tree *tree,
2265
struct pf_source *key)
2266
{
2267
PF_RULES_ASSERT();
2268
2269
return (RB_FIND(pf_source_ioc_tree, tree, key));
2270
}
2271
2272
struct pf_source *
2273
pf_source_rb_nfind(struct pf_source_ioc_tree *tree,
2274
struct pf_source *key)
2275
{
2276
PF_RULES_ASSERT();
2277
2278
return (RB_NFIND(pf_source_ioc_tree, tree, key));
2279
}
2280
2281
int
2282
pf_source_clr(struct pfioc_source_kill *ioc)
2283
{
2284
extern struct pf_source_list pf_source_gc;
2285
struct pf_sourcelim plkey = {
2286
.pfsrlim_id = ioc->id,
2287
};
2288
struct pf_source skey = {
2289
.pfsr_af = ioc->af,
2290
.pfsr_rdomain = ioc->rdomain,
2291
.pfsr_addr = ioc->addr,
2292
};
2293
struct pf_sourcelim *pfsrlim;
2294
struct pf_source *pfsr;
2295
struct pf_state_link *pfl, *npfl;
2296
int error = 0;
2297
unsigned int gen;
2298
2299
if (ioc->rmstates) {
2300
/* XXX userland wants the states removed too */
2301
return (EOPNOTSUPP);
2302
}
2303
2304
PF_RULES_WLOCK();
2305
2306
pfsrlim = pf_sourcelim_rb_find(&V_pf_sourcelim_id_tree_active, &plkey);
2307
if (pfsrlim == NULL) {
2308
error = ESRCH;
2309
goto unlock;
2310
}
2311
2312
pfsr = pf_source_rb_find(&pfsrlim->pfsrlim_ioc_sources, &skey);
2313
if (pfsr == NULL) {
2314
error = ENOENT;
2315
goto unlock;
2316
}
2317
2318
RB_REMOVE(pf_source_tree, &pfsrlim->pfsrlim_sources, pfsr);
2319
RB_REMOVE(pf_source_ioc_tree, &pfsrlim->pfsrlim_ioc_sources, pfsr);
2320
if (pfsr->pfsr_inuse == 0)
2321
TAILQ_REMOVE(&pf_source_gc, pfsr, pfsr_empty_gc);
2322
2323
gen = pf_sourcelim_enter(pfsrlim);
2324
pfsrlim->pfsrlim_nsources--;
2325
pfsrlim->pfsrlim_counters.inuse -= pfsr->pfsr_inuse;
2326
pf_sourcelim_leave(pfsrlim, gen);
2327
2328
/* unwire the links */
2329
TAILQ_FOREACH(pfl, &pfsr->pfsr_states, pfl_link) {
2330
struct pf_kstate *st = pfl->pfl_state;
2331
2332
/* if !rmst */
2333
st->sourcelim = 0;
2334
SLIST_REMOVE(&st->linkage, pfl, pf_state_link, pfl_linkage);
2335
}
2336
2337
PF_RULES_WUNLOCK();
2338
2339
TAILQ_FOREACH_SAFE(pfl, &pfsr->pfsr_states, pfl_link, npfl)
2340
free(pfl, M_PF_STATE_LINK);
2341
2342
free(pfsr, M_PF_SOURCE_LIM);
2343
2344
return (0);
2345
2346
unlock:
2347
PF_RULES_WUNLOCK();
2348
2349
return (error);
2350
}
2351
2352
static void
2353
pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
2354
{
2355
int secs = time_uptime;
2356
2357
bzero(out, sizeof(struct pf_src_node));
2358
2359
bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
2360
bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
2361
2362
if (in->rule != NULL)
2363
out->rule.nr = in->rule->nr;
2364
2365
for (int i = 0; i < 2; i++) {
2366
out->bytes[i] = counter_u64_fetch(in->bytes[i]);
2367
out->packets[i] = counter_u64_fetch(in->packets[i]);
2368
}
2369
2370
out->states = in->states;
2371
out->conn = in->conn;
2372
out->af = in->af;
2373
out->ruletype = in->ruletype;
2374
2375
out->creation = secs - in->creation;
2376
if (out->expire > secs)
2377
out->expire -= secs;
2378
else
2379
out->expire = 0;
2380
2381
/* Adjust the connection rate estimate. */
2382
out->conn_rate.limit = in->conn_rate.limit;
2383
out->conn_rate.seconds = in->conn_rate.seconds;
2384
/* If there's no limit there's no counter_rate. */
2385
if (in->conn_rate.cr != NULL)
2386
out->conn_rate.count = counter_rate_get(in->conn_rate.cr);
2387
}
2388
2389
#ifdef ALTQ
2390
/*
2391
* Handle export of struct pf_kaltq to user binaries that may be using any
2392
* version of struct pf_altq.
2393
*/
2394
static int
2395
pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
2396
{
2397
u_int32_t version;
2398
2399
if (ioc_size == sizeof(struct pfioc_altq_v0))
2400
version = 0;
2401
else
2402
version = pa->version;
2403
2404
if (version > PFIOC_ALTQ_VERSION)
2405
return (EINVAL);
2406
2407
#define ASSIGN(x) exported_q->x = q->x
2408
#define COPY(x) \
2409
bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
2410
#define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
2411
#define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
2412
2413
switch (version) {
2414
case 0: {
2415
struct pf_altq_v0 *exported_q =
2416
&((struct pfioc_altq_v0 *)pa)->altq;
2417
2418
COPY(ifname);
2419
2420
ASSIGN(scheduler);
2421
ASSIGN(tbrsize);
2422
exported_q->tbrsize = SATU16(q->tbrsize);
2423
exported_q->ifbandwidth = SATU32(q->ifbandwidth);
2424
2425
COPY(qname);
2426
COPY(parent);
2427
ASSIGN(parent_qid);
2428
exported_q->bandwidth = SATU32(q->bandwidth);
2429
ASSIGN(priority);
2430
ASSIGN(local_flags);
2431
2432
ASSIGN(qlimit);
2433
ASSIGN(flags);
2434
2435
if (q->scheduler == ALTQT_HFSC) {
2436
#define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
2437
#define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
2438
SATU32(q->pq_u.hfsc_opts.x)
2439
2440
ASSIGN_OPT_SATU32(rtsc_m1);
2441
ASSIGN_OPT(rtsc_d);
2442
ASSIGN_OPT_SATU32(rtsc_m2);
2443
2444
ASSIGN_OPT_SATU32(lssc_m1);
2445
ASSIGN_OPT(lssc_d);
2446
ASSIGN_OPT_SATU32(lssc_m2);
2447
2448
ASSIGN_OPT_SATU32(ulsc_m1);
2449
ASSIGN_OPT(ulsc_d);
2450
ASSIGN_OPT_SATU32(ulsc_m2);
2451
2452
ASSIGN_OPT(flags);
2453
2454
#undef ASSIGN_OPT
2455
#undef ASSIGN_OPT_SATU32
2456
} else
2457
COPY(pq_u);
2458
2459
ASSIGN(qid);
2460
break;
2461
}
2462
case 1: {
2463
struct pf_altq_v1 *exported_q =
2464
&((struct pfioc_altq_v1 *)pa)->altq;
2465
2466
COPY(ifname);
2467
2468
ASSIGN(scheduler);
2469
ASSIGN(tbrsize);
2470
ASSIGN(ifbandwidth);
2471
2472
COPY(qname);
2473
COPY(parent);
2474
ASSIGN(parent_qid);
2475
ASSIGN(bandwidth);
2476
ASSIGN(priority);
2477
ASSIGN(local_flags);
2478
2479
ASSIGN(qlimit);
2480
ASSIGN(flags);
2481
COPY(pq_u);
2482
2483
ASSIGN(qid);
2484
break;
2485
}
2486
default:
2487
panic("%s: unhandled struct pfioc_altq version", __func__);
2488
break;
2489
}
2490
2491
#undef ASSIGN
2492
#undef COPY
2493
#undef SATU16
2494
#undef SATU32
2495
2496
return (0);
2497
}
2498
2499
/*
2500
* Handle import to struct pf_kaltq of struct pf_altq from user binaries
2501
* that may be using any version of it.
2502
*/
2503
static int
2504
pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
2505
{
2506
u_int32_t version;
2507
2508
if (ioc_size == sizeof(struct pfioc_altq_v0))
2509
version = 0;
2510
else
2511
version = pa->version;
2512
2513
if (version > PFIOC_ALTQ_VERSION)
2514
return (EINVAL);
2515
2516
#define ASSIGN(x) q->x = imported_q->x
2517
#define COPY(x) \
2518
bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
2519
2520
switch (version) {
2521
case 0: {
2522
struct pf_altq_v0 *imported_q =
2523
&((struct pfioc_altq_v0 *)pa)->altq;
2524
2525
COPY(ifname);
2526
2527
ASSIGN(scheduler);
2528
ASSIGN(tbrsize); /* 16-bit -> 32-bit */
2529
ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
2530
2531
COPY(qname);
2532
COPY(parent);
2533
ASSIGN(parent_qid);
2534
ASSIGN(bandwidth); /* 32-bit -> 64-bit */
2535
ASSIGN(priority);
2536
ASSIGN(local_flags);
2537
2538
ASSIGN(qlimit);
2539
ASSIGN(flags);
2540
2541
if (imported_q->scheduler == ALTQT_HFSC) {
2542
#define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
2543
2544
/*
2545
* The m1 and m2 parameters are being copied from
2546
* 32-bit to 64-bit.
2547
*/
2548
ASSIGN_OPT(rtsc_m1);
2549
ASSIGN_OPT(rtsc_d);
2550
ASSIGN_OPT(rtsc_m2);
2551
2552
ASSIGN_OPT(lssc_m1);
2553
ASSIGN_OPT(lssc_d);
2554
ASSIGN_OPT(lssc_m2);
2555
2556
ASSIGN_OPT(ulsc_m1);
2557
ASSIGN_OPT(ulsc_d);
2558
ASSIGN_OPT(ulsc_m2);
2559
2560
ASSIGN_OPT(flags);
2561
2562
#undef ASSIGN_OPT
2563
} else
2564
COPY(pq_u);
2565
2566
ASSIGN(qid);
2567
break;
2568
}
2569
case 1: {
2570
struct pf_altq_v1 *imported_q =
2571
&((struct pfioc_altq_v1 *)pa)->altq;
2572
2573
COPY(ifname);
2574
2575
ASSIGN(scheduler);
2576
ASSIGN(tbrsize);
2577
ASSIGN(ifbandwidth);
2578
2579
COPY(qname);
2580
COPY(parent);
2581
ASSIGN(parent_qid);
2582
ASSIGN(bandwidth);
2583
ASSIGN(priority);
2584
ASSIGN(local_flags);
2585
2586
ASSIGN(qlimit);
2587
ASSIGN(flags);
2588
COPY(pq_u);
2589
2590
ASSIGN(qid);
2591
break;
2592
}
2593
default:
2594
panic("%s: unhandled struct pfioc_altq version", __func__);
2595
break;
2596
}
2597
2598
#undef ASSIGN
2599
#undef COPY
2600
2601
return (0);
2602
}
2603
2604
static struct pf_altq *
2605
pf_altq_get_nth_active(u_int32_t n)
2606
{
2607
struct pf_altq *altq;
2608
u_int32_t nr;
2609
2610
nr = 0;
2611
TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
2612
if (nr == n)
2613
return (altq);
2614
nr++;
2615
}
2616
2617
TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
2618
if (nr == n)
2619
return (altq);
2620
nr++;
2621
}
2622
2623
return (NULL);
2624
}
2625
#endif /* ALTQ */
2626
2627
struct pf_krule *
2628
pf_krule_alloc(void)
2629
{
2630
struct pf_krule *rule;
2631
2632
rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
2633
mtx_init(&rule->nat.mtx, "pf_krule_nat_pool", NULL, MTX_DEF);
2634
mtx_init(&rule->rdr.mtx, "pf_krule_rdr_pool", NULL, MTX_DEF);
2635
mtx_init(&rule->route.mtx, "pf_krule_route_pool", NULL, MTX_DEF);
2636
rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
2637
M_WAITOK | M_ZERO);
2638
return (rule);
2639
}
2640
2641
void
2642
pf_krule_free(struct pf_krule *rule)
2643
{
2644
#ifdef PF_WANT_32_TO_64_COUNTER
2645
bool wowned;
2646
#endif
2647
2648
if (rule == NULL)
2649
return;
2650
2651
#ifdef PF_WANT_32_TO_64_COUNTER
2652
if (rule->allrulelinked) {
2653
wowned = PF_RULES_WOWNED();
2654
if (!wowned)
2655
PF_RULES_WLOCK();
2656
LIST_REMOVE(rule, allrulelist);
2657
V_pf_allrulecount--;
2658
if (!wowned)
2659
PF_RULES_WUNLOCK();
2660
}
2661
#endif
2662
2663
pf_counter_u64_deinit(&rule->evaluations);
2664
for (int i = 0; i < 2; i++) {
2665
pf_counter_u64_deinit(&rule->packets[i]);
2666
pf_counter_u64_deinit(&rule->bytes[i]);
2667
}
2668
counter_u64_free(rule->states_cur);
2669
counter_u64_free(rule->states_tot);
2670
for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
2671
counter_u64_free(rule->src_nodes[sn_type]);
2672
uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
2673
2674
mtx_destroy(&rule->nat.mtx);
2675
mtx_destroy(&rule->rdr.mtx);
2676
mtx_destroy(&rule->route.mtx);
2677
free(rule, M_PFRULE);
2678
}
2679
2680
void
2681
pf_krule_clear_counters(struct pf_krule *rule)
2682
{
2683
pf_counter_u64_zero(&rule->evaluations);
2684
for (int i = 0; i < 2; i++) {
2685
pf_counter_u64_zero(&rule->packets[i]);
2686
pf_counter_u64_zero(&rule->bytes[i]);
2687
}
2688
counter_u64_zero(rule->states_tot);
2689
}
2690
2691
static void
2692
pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
2693
struct pf_pooladdr *pool)
2694
{
2695
2696
bzero(pool, sizeof(*pool));
2697
bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
2698
strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
2699
}
2700
2701
static int
2702
pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
2703
struct pf_kpooladdr *kpool)
2704
{
2705
int ret;
2706
2707
bzero(kpool, sizeof(*kpool));
2708
bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
2709
ret = pf_user_strcpy(kpool->ifname, pool->ifname,
2710
sizeof(kpool->ifname));
2711
return (ret);
2712
}
2713
2714
static void
2715
pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
2716
{
2717
_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
2718
_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
2719
2720
bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
2721
bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
2722
2723
kpool->tblidx = pool->tblidx;
2724
kpool->proxy_port[0] = pool->proxy_port[0];
2725
kpool->proxy_port[1] = pool->proxy_port[1];
2726
kpool->opts = pool->opts;
2727
}
2728
2729
static int
2730
pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
2731
{
2732
int ret;
2733
2734
#ifndef INET
2735
if (rule->af == AF_INET) {
2736
return (EAFNOSUPPORT);
2737
}
2738
#endif /* INET */
2739
#ifndef INET6
2740
if (rule->af == AF_INET6) {
2741
return (EAFNOSUPPORT);
2742
}
2743
#endif /* INET6 */
2744
2745
ret = pf_check_rule_addr(&rule->src);
2746
if (ret != 0)
2747
return (ret);
2748
ret = pf_check_rule_addr(&rule->dst);
2749
if (ret != 0)
2750
return (ret);
2751
2752
bcopy(&rule->src, &krule->src, sizeof(rule->src));
2753
bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
2754
2755
ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
2756
if (ret != 0)
2757
return (ret);
2758
ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
2759
if (ret != 0)
2760
return (ret);
2761
ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
2762
if (ret != 0)
2763
return (ret);
2764
ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
2765
if (ret != 0)
2766
return (ret);
2767
ret = pf_user_strcpy(krule->tagname, rule->tagname,
2768
sizeof(rule->tagname));
2769
if (ret != 0)
2770
return (ret);
2771
ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
2772
sizeof(rule->match_tagname));
2773
if (ret != 0)
2774
return (ret);
2775
ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
2776
sizeof(rule->overload_tblname));
2777
if (ret != 0)
2778
return (ret);
2779
2780
pf_pool_to_kpool(&rule->rpool, &krule->rdr);
2781
2782
/* Don't allow userspace to set evaluations, packets or bytes. */
2783
/* kif, anchor, overload_tbl are not copied over. */
2784
2785
krule->os_fingerprint = rule->os_fingerprint;
2786
2787
krule->rtableid = rule->rtableid;
2788
/* pf_rule->timeout is smaller than pf_krule->timeout */
2789
bcopy(rule->timeout, krule->timeout, sizeof(rule->timeout));
2790
krule->max_states = rule->max_states;
2791
krule->max_src_nodes = rule->max_src_nodes;
2792
krule->max_src_states = rule->max_src_states;
2793
krule->max_src_conn = rule->max_src_conn;
2794
krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
2795
krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
2796
krule->qid = rule->qid;
2797
krule->pqid = rule->pqid;
2798
krule->nr = rule->nr;
2799
krule->prob = rule->prob;
2800
krule->cuid = rule->cuid;
2801
krule->cpid = rule->cpid;
2802
2803
krule->return_icmp = rule->return_icmp;
2804
krule->return_icmp6 = rule->return_icmp6;
2805
krule->max_mss = rule->max_mss;
2806
krule->tag = rule->tag;
2807
krule->match_tag = rule->match_tag;
2808
krule->scrub_flags = rule->scrub_flags;
2809
2810
bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
2811
bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
2812
2813
krule->rule_flag = rule->rule_flag;
2814
krule->action = rule->action;
2815
krule->direction = rule->direction;
2816
krule->log = rule->log;
2817
krule->logif = rule->logif;
2818
krule->quick = rule->quick;
2819
krule->ifnot = rule->ifnot;
2820
krule->match_tag_not = rule->match_tag_not;
2821
krule->natpass = rule->natpass;
2822
2823
krule->keep_state = rule->keep_state;
2824
krule->af = rule->af;
2825
krule->proto = rule->proto;
2826
krule->type = rule->type;
2827
krule->code = rule->code;
2828
krule->flags = rule->flags;
2829
krule->flagset = rule->flagset;
2830
krule->min_ttl = rule->min_ttl;
2831
krule->allow_opts = rule->allow_opts;
2832
krule->rt = rule->rt;
2833
krule->return_ttl = rule->return_ttl;
2834
krule->tos = rule->tos;
2835
krule->set_tos = rule->set_tos;
2836
2837
krule->flush = rule->flush;
2838
krule->prio = rule->prio;
2839
krule->set_prio[0] = rule->set_prio[0];
2840
krule->set_prio[1] = rule->set_prio[1];
2841
2842
bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2843
2844
return (0);
2845
}
2846
2847
int
2848
pf_ioctl_getrules(struct pfioc_rule *pr)
2849
{
2850
PF_RULES_RLOCK_TRACKER;
2851
struct pf_kruleset *ruleset;
2852
struct pf_krule *tail;
2853
int rs_num;
2854
2855
PF_RULES_RLOCK();
2856
ruleset = pf_find_kruleset(pr->anchor);
2857
if (ruleset == NULL) {
2858
PF_RULES_RUNLOCK();
2859
return (EINVAL);
2860
}
2861
rs_num = pf_get_ruleset_number(pr->rule.action);
2862
if (rs_num >= PF_RULESET_MAX) {
2863
PF_RULES_RUNLOCK();
2864
return (EINVAL);
2865
}
2866
tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2867
pf_krulequeue);
2868
if (tail)
2869
pr->nr = tail->nr + 1;
2870
else
2871
pr->nr = 0;
2872
pr->ticket = ruleset->rules[rs_num].active.ticket;
2873
PF_RULES_RUNLOCK();
2874
2875
return (0);
2876
}
2877
2878
static int
2879
pf_rule_checkaf(struct pf_krule *r)
2880
{
2881
switch (r->af) {
2882
case 0:
2883
if (r->rule_flag & PFRULE_AFTO)
2884
return (EPFNOSUPPORT);
2885
break;
2886
case AF_INET:
2887
if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET6)
2888
return (EPFNOSUPPORT);
2889
break;
2890
#ifdef INET6
2891
case AF_INET6:
2892
if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET)
2893
return (EPFNOSUPPORT);
2894
break;
2895
#endif /* INET6 */
2896
default:
2897
return (EPFNOSUPPORT);
2898
}
2899
2900
if ((r->rule_flag & PFRULE_AFTO) == 0 && r->naf != 0)
2901
return (EPFNOSUPPORT);
2902
2903
return (0);
2904
}
2905
2906
static int
2907
pf_validate_range(uint8_t op, uint16_t port[2])
2908
{
2909
uint16_t a = ntohs(port[0]);
2910
uint16_t b = ntohs(port[1]);
2911
2912
if ((op == PF_OP_RRG && a > b) || /* 34:12, i.e. none */
2913
(op == PF_OP_IRG && a >= b) || /* 34><12, i.e. none */
2914
(op == PF_OP_XRG && a > b)) /* 34<>22, i.e. all */
2915
return 1;
2916
return 0;
2917
}
2918
2919
static int
2920
pf_chk_limiter_action(int limiter_action)
2921
{
2922
int rv;
2923
2924
switch (limiter_action) {
2925
case PF_LIMITER_NOMATCH:
2926
case PF_LIMITER_BLOCK:
2927
rv = 0;
2928
break;
2929
default:
2930
rv = 1;
2931
}
2932
2933
return (rv);
2934
}
2935
2936
int
2937
pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2938
uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2939
uid_t uid, pid_t pid)
2940
{
2941
struct pf_kruleset *ruleset;
2942
struct pf_krule *tail;
2943
struct pf_kpooladdr *pa;
2944
struct pfi_kkif *kif = NULL, *rcv_kif = NULL;
2945
int rs_num;
2946
int error = 0;
2947
2948
#define ERROUT(x) ERROUT_FUNCTION(errout, x)
2949
#define ERROUT_UNLOCKED(x) ERROUT_FUNCTION(errout_unlocked, x)
2950
2951
if ((rule->return_icmp >> 8) > ICMP_MAXTYPE)
2952
ERROUT_UNLOCKED(EINVAL);
2953
2954
if ((error = pf_rule_checkaf(rule)))
2955
ERROUT_UNLOCKED(error);
2956
if (pf_validate_range(rule->src.port_op, rule->src.port))
2957
ERROUT_UNLOCKED(EINVAL);
2958
if (pf_validate_range(rule->dst.port_op, rule->dst.port))
2959
ERROUT_UNLOCKED(EINVAL);
2960
if (pf_chk_limiter_action(rule->statelim.limiter_action) ||
2961
pf_chk_limiter_action(rule->sourcelim.limiter_action))
2962
ERROUT_UNLOCKED(EINVAL);
2963
2964
if (rule->ifname[0])
2965
kif = pf_kkif_create(M_WAITOK);
2966
if (rule->rcv_ifname[0])
2967
rcv_kif = pf_kkif_create(M_WAITOK);
2968
pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2969
for (int i = 0; i < 2; i++) {
2970
pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2971
pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2972
}
2973
rule->states_cur = counter_u64_alloc(M_WAITOK);
2974
rule->states_tot = counter_u64_alloc(M_WAITOK);
2975
for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
2976
rule->src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
2977
rule->cuid = uid;
2978
rule->cpid = pid;
2979
TAILQ_INIT(&rule->rdr.list);
2980
TAILQ_INIT(&rule->nat.list);
2981
TAILQ_INIT(&rule->route.list);
2982
2983
PF_CONFIG_LOCK();
2984
PF_RULES_WLOCK();
2985
#ifdef PF_WANT_32_TO_64_COUNTER
2986
LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2987
MPASS(!rule->allrulelinked);
2988
rule->allrulelinked = true;
2989
V_pf_allrulecount++;
2990
#endif
2991
ruleset = pf_find_kruleset(anchor);
2992
if (ruleset == NULL)
2993
ERROUT(EINVAL);
2994
rs_num = pf_get_ruleset_number(rule->action);
2995
if (rs_num >= PF_RULESET_MAX)
2996
ERROUT(EINVAL);
2997
if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2998
DPFPRINTF(PF_DEBUG_MISC,
2999
"ticket: %d != [%d]%d", ticket, rs_num,
3000
ruleset->rules[rs_num].inactive.ticket);
3001
ERROUT(EBUSY);
3002
}
3003
if (pool_ticket != V_ticket_pabuf) {
3004
DPFPRINTF(PF_DEBUG_MISC,
3005
"pool_ticket: %d != %d", pool_ticket,
3006
V_ticket_pabuf);
3007
ERROUT(EBUSY);
3008
}
3009
/*
3010
* XXXMJG hack: there is no mechanism to ensure they started the
3011
* transaction. Ticket checked above may happen to match by accident,
3012
* even if nobody called DIOCXBEGIN, let alone this process.
3013
* Partially work around it by checking if the RB tree got allocated,
3014
* see pf_begin_rules.
3015
*/
3016
if (ruleset->rules[rs_num].inactive.tree == NULL) {
3017
ERROUT(EINVAL);
3018
}
3019
3020
tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
3021
pf_krulequeue);
3022
if (tail)
3023
rule->nr = tail->nr + 1;
3024
else
3025
rule->nr = 0;
3026
if (rule->ifname[0]) {
3027
rule->kif = pfi_kkif_attach(kif, rule->ifname);
3028
kif = NULL;
3029
pfi_kkif_ref(rule->kif);
3030
} else
3031
rule->kif = NULL;
3032
3033
if (rule->rcv_ifname[0]) {
3034
rule->rcv_kif = pfi_kkif_attach(rcv_kif, rule->rcv_ifname);
3035
rcv_kif = NULL;
3036
pfi_kkif_ref(rule->rcv_kif);
3037
} else
3038
rule->rcv_kif = NULL;
3039
3040
if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
3041
ERROUT(EBUSY);
3042
#ifdef ALTQ
3043
/* set queue IDs */
3044
if (rule->qname[0] != 0) {
3045
if ((rule->qid = pf_qname2qid(rule->qname, true)) == 0)
3046
ERROUT(EBUSY);
3047
else if (rule->pqname[0] != 0) {
3048
if ((rule->pqid =
3049
pf_qname2qid(rule->pqname, true)) == 0)
3050
ERROUT(EBUSY);
3051
} else
3052
rule->pqid = rule->qid;
3053
}
3054
#endif
3055
if (rule->tagname[0])
3056
if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
3057
ERROUT(EBUSY);
3058
if (rule->match_tagname[0])
3059
if ((rule->match_tag =
3060
pf_tagname2tag(rule->match_tagname)) == 0)
3061
ERROUT(EBUSY);
3062
if (rule->rt && !rule->direction)
3063
ERROUT(EINVAL);
3064
if (!rule->log)
3065
rule->logif = 0;
3066
if (! pf_init_threshold(&rule->pktrate, rule->pktrate.limit,
3067
rule->pktrate.seconds))
3068
ERROUT(ENOMEM);
3069
if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
3070
ERROUT(ENOMEM);
3071
if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
3072
ERROUT(ENOMEM);
3073
if (pf_kanchor_setup(rule, ruleset, anchor_call))
3074
ERROUT(EINVAL);
3075
if (rule->scrub_flags & PFSTATE_SETPRIO &&
3076
(rule->set_prio[0] > PF_PRIO_MAX ||
3077
rule->set_prio[1] > PF_PRIO_MAX))
3078
ERROUT(EINVAL);
3079
for (int i = 0; i < 3; i++) {
3080
TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
3081
if (pa->addr.type == PF_ADDR_TABLE) {
3082
pa->addr.p.tbl = pfr_attach_table(ruleset,
3083
pa->addr.v.tblname);
3084
if (pa->addr.p.tbl == NULL)
3085
ERROUT(ENOMEM);
3086
}
3087
}
3088
3089
rule->overload_tbl = NULL;
3090
if (rule->overload_tblname[0]) {
3091
if ((rule->overload_tbl = pfr_attach_table(ruleset,
3092
rule->overload_tblname)) == NULL)
3093
ERROUT(EINVAL);
3094
else
3095
rule->overload_tbl->pfrkt_flags |=
3096
PFR_TFLAG_ACTIVE;
3097
}
3098
3099
pf_mv_kpool(&V_pf_pabuf[0], &rule->nat.list);
3100
3101
/*
3102
* Old version of pfctl provide route redirection pools in single
3103
* common redirection pool rdr. New versions use rdr only for
3104
* rdr-to rules.
3105
*/
3106
if (rule->rt > PF_NOPFROUTE && TAILQ_EMPTY(&V_pf_pabuf[2])) {
3107
pf_mv_kpool(&V_pf_pabuf[1], &rule->route.list);
3108
} else {
3109
pf_mv_kpool(&V_pf_pabuf[1], &rule->rdr.list);
3110
pf_mv_kpool(&V_pf_pabuf[2], &rule->route.list);
3111
}
3112
3113
if (((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
3114
(rule->action == PF_BINAT)) && rule->anchor == NULL &&
3115
TAILQ_FIRST(&rule->rdr.list) == NULL) {
3116
ERROUT(EINVAL);
3117
}
3118
3119
if (rule->rt > PF_NOPFROUTE && (TAILQ_FIRST(&rule->route.list) == NULL)) {
3120
ERROUT(EINVAL);
3121
}
3122
3123
if (rule->action == PF_PASS && (rule->rdr.opts & PF_POOL_STICKYADDR ||
3124
rule->nat.opts & PF_POOL_STICKYADDR) && !rule->keep_state) {
3125
ERROUT(EINVAL);
3126
}
3127
3128
MPASS(error == 0);
3129
3130
rule->nat.cur = TAILQ_FIRST(&rule->nat.list);
3131
rule->rdr.cur = TAILQ_FIRST(&rule->rdr.list);
3132
rule->route.cur = TAILQ_FIRST(&rule->route.list);
3133
rule->route.ipv6_nexthop_af = AF_INET6;
3134
TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
3135
rule, entries);
3136
ruleset->rules[rs_num].inactive.rcount++;
3137
3138
PF_RULES_WUNLOCK();
3139
pf_hash_rule(rule);
3140
if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
3141
PF_RULES_WLOCK();
3142
TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
3143
ruleset->rules[rs_num].inactive.rcount--;
3144
pf_free_rule(rule);
3145
rule = NULL;
3146
ERROUT(EEXIST);
3147
}
3148
PF_CONFIG_UNLOCK();
3149
3150
return (0);
3151
3152
#undef ERROUT
3153
#undef ERROUT_UNLOCKED
3154
errout:
3155
PF_RULES_WUNLOCK();
3156
PF_CONFIG_UNLOCK();
3157
errout_unlocked:
3158
pf_kkif_free(rcv_kif);
3159
pf_kkif_free(kif);
3160
pf_krule_free(rule);
3161
return (error);
3162
}
3163
3164
static bool
3165
pf_label_match(const struct pf_krule *rule, const char *label)
3166
{
3167
int i = 0;
3168
3169
while (*rule->label[i]) {
3170
if (strcmp(rule->label[i], label) == 0)
3171
return (true);
3172
i++;
3173
}
3174
3175
return (false);
3176
}
3177
3178
static unsigned int
3179
pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
3180
{
3181
struct pf_kstate *s;
3182
int more = 0;
3183
3184
s = pf_find_state_all(key, dir, &more);
3185
if (s == NULL)
3186
return (0);
3187
3188
if (more) {
3189
PF_STATE_UNLOCK(s);
3190
return (0);
3191
}
3192
3193
pf_remove_state(s);
3194
return (1);
3195
}
3196
3197
static int
3198
pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
3199
{
3200
struct pf_kstate *s;
3201
struct pf_state_key *sk;
3202
struct pf_addr *srcaddr, *dstaddr;
3203
struct pf_state_key_cmp match_key;
3204
int idx, killed = 0;
3205
unsigned int dir;
3206
u_int16_t srcport, dstport;
3207
struct pfi_kkif *kif;
3208
3209
relock_DIOCKILLSTATES:
3210
PF_HASHROW_LOCK(ih);
3211
LIST_FOREACH(s, &ih->states, entry) {
3212
/* For floating states look at the original kif. */
3213
kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
3214
3215
sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE];
3216
if (s->direction == PF_OUT) {
3217
srcaddr = &sk->addr[1];
3218
dstaddr = &sk->addr[0];
3219
srcport = sk->port[1];
3220
dstport = sk->port[0];
3221
} else {
3222
srcaddr = &sk->addr[0];
3223
dstaddr = &sk->addr[1];
3224
srcport = sk->port[0];
3225
dstport = sk->port[1];
3226
}
3227
3228
if (psk->psk_af && sk->af != psk->psk_af)
3229
continue;
3230
3231
if (psk->psk_proto && psk->psk_proto != sk->proto)
3232
continue;
3233
3234
if (! pf_match_addr(psk->psk_src.neg,
3235
&psk->psk_src.addr.v.a.addr,
3236
&psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
3237
continue;
3238
3239
if (! pf_match_addr(psk->psk_dst.neg,
3240
&psk->psk_dst.addr.v.a.addr,
3241
&psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
3242
continue;
3243
3244
if (! pf_match_addr(psk->psk_rt_addr.neg,
3245
&psk->psk_rt_addr.addr.v.a.addr,
3246
&psk->psk_rt_addr.addr.v.a.mask,
3247
&s->act.rt_addr, sk->af))
3248
continue;
3249
3250
if (psk->psk_src.port_op != 0 &&
3251
! pf_match_port(psk->psk_src.port_op,
3252
psk->psk_src.port[0], psk->psk_src.port[1], srcport))
3253
continue;
3254
3255
if (psk->psk_dst.port_op != 0 &&
3256
! pf_match_port(psk->psk_dst.port_op,
3257
psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
3258
continue;
3259
3260
if (psk->psk_label[0] &&
3261
! pf_label_match(s->rule, psk->psk_label))
3262
continue;
3263
3264
if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
3265
kif->pfik_name))
3266
continue;
3267
3268
if (psk->psk_kill_match) {
3269
/* Create the key to find matching states, with lock
3270
* held. */
3271
3272
bzero(&match_key, sizeof(match_key));
3273
3274
if (s->direction == PF_OUT) {
3275
dir = PF_IN;
3276
idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK;
3277
} else {
3278
dir = PF_OUT;
3279
idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE;
3280
}
3281
3282
match_key.af = s->key[idx]->af;
3283
match_key.proto = s->key[idx]->proto;
3284
pf_addrcpy(&match_key.addr[0],
3285
&s->key[idx]->addr[1], match_key.af);
3286
match_key.port[0] = s->key[idx]->port[1];
3287
pf_addrcpy(&match_key.addr[1],
3288
&s->key[idx]->addr[0], match_key.af);
3289
match_key.port[1] = s->key[idx]->port[0];
3290
}
3291
3292
pf_remove_state(s);
3293
killed++;
3294
3295
if (psk->psk_kill_match)
3296
killed += pf_kill_matching_state(&match_key, dir);
3297
3298
goto relock_DIOCKILLSTATES;
3299
}
3300
PF_HASHROW_UNLOCK(ih);
3301
3302
return (killed);
3303
}
3304
3305
int
3306
pf_start(void)
3307
{
3308
int error = 0;
3309
3310
sx_xlock(&V_pf_ioctl_lock);
3311
if (V_pf_status.running)
3312
error = EEXIST;
3313
else {
3314
hook_pf();
3315
if (! TAILQ_EMPTY(V_pf_keth->active.rules))
3316
hook_pf_eth();
3317
V_pf_status.running = 1;
3318
V_pf_status.since = time_uptime;
3319
new_unrhdr64(&V_pf_stateid, time_second);
3320
3321
DPFPRINTF(PF_DEBUG_MISC, "pf: started");
3322
}
3323
sx_xunlock(&V_pf_ioctl_lock);
3324
3325
return (error);
3326
}
3327
3328
int
3329
pf_stop(void)
3330
{
3331
int error = 0;
3332
3333
sx_xlock(&V_pf_ioctl_lock);
3334
if (!V_pf_status.running)
3335
error = ENOENT;
3336
else {
3337
V_pf_status.running = 0;
3338
dehook_pf();
3339
dehook_pf_eth();
3340
V_pf_status.since = time_uptime;
3341
DPFPRINTF(PF_DEBUG_MISC, "pf: stopped");
3342
}
3343
sx_xunlock(&V_pf_ioctl_lock);
3344
3345
return (error);
3346
}
3347
3348
void
3349
pf_ioctl_clear_status(void)
3350
{
3351
PF_RULES_WLOCK();
3352
for (int i = 0; i < PFRES_MAX; i++)
3353
counter_u64_zero(V_pf_status.counters[i]);
3354
for (int i = 0; i < FCNT_MAX; i++)
3355
pf_counter_u64_zero(&V_pf_status.fcounters[i]);
3356
for (int i = 0; i < SCNT_MAX; i++)
3357
counter_u64_zero(V_pf_status.scounters[i]);
3358
for (int i = 0; i < NCNT_MAX; i++)
3359
counter_u64_zero(V_pf_status.ncounters[i]);
3360
for (int i = 0; i < KLCNT_MAX; i++)
3361
counter_u64_zero(V_pf_status.lcounters[i]);
3362
V_pf_status.since = time_uptime;
3363
if (*V_pf_status.ifname)
3364
pfi_update_status(V_pf_status.ifname, NULL);
3365
PF_RULES_WUNLOCK();
3366
}
3367
3368
int
3369
pf_ioctl_set_timeout(int timeout, int seconds, int *prev_seconds)
3370
{
3371
uint32_t old;
3372
3373
if (timeout < 0 || timeout >= PFTM_MAX ||
3374
seconds < 0)
3375
return (EINVAL);
3376
3377
PF_RULES_WLOCK();
3378
old = V_pf_default_rule.timeout[timeout];
3379
if (timeout == PFTM_INTERVAL && seconds == 0)
3380
seconds = 1;
3381
V_pf_default_rule.timeout[timeout] = seconds;
3382
if (timeout == PFTM_INTERVAL && seconds < old)
3383
wakeup(pf_purge_thread);
3384
3385
if (prev_seconds != NULL)
3386
*prev_seconds = old;
3387
3388
PF_RULES_WUNLOCK();
3389
3390
return (0);
3391
}
3392
3393
int
3394
pf_ioctl_get_timeout(int timeout, int *seconds)
3395
{
3396
PF_RULES_RLOCK_TRACKER;
3397
3398
if (timeout < 0 || timeout >= PFTM_MAX)
3399
return (EINVAL);
3400
3401
PF_RULES_RLOCK();
3402
*seconds = V_pf_default_rule.timeout[timeout];
3403
PF_RULES_RUNLOCK();
3404
3405
return (0);
3406
}
3407
3408
int
3409
pf_ioctl_set_limit(int index, unsigned int limit, unsigned int *old_limit)
3410
{
3411
3412
PF_RULES_WLOCK();
3413
if (index < 0 || index >= PF_LIMIT_MAX ||
3414
V_pf_limits[index].zone == NULL) {
3415
PF_RULES_WUNLOCK();
3416
return (EINVAL);
3417
}
3418
uma_zone_set_max(V_pf_limits[index].zone,
3419
limit == 0 ? INT_MAX : limit);
3420
if (old_limit != NULL)
3421
*old_limit = V_pf_limits[index].limit;
3422
V_pf_limits[index].limit = limit;
3423
PF_RULES_WUNLOCK();
3424
3425
return (0);
3426
}
3427
3428
int
3429
pf_ioctl_get_limit(int index, unsigned int *limit)
3430
{
3431
PF_RULES_RLOCK_TRACKER;
3432
3433
if (index < 0 || index >= PF_LIMIT_MAX)
3434
return (EINVAL);
3435
3436
PF_RULES_RLOCK();
3437
*limit = V_pf_limits[index].limit;
3438
PF_RULES_RUNLOCK();
3439
3440
return (0);
3441
}
3442
3443
int
3444
pf_ioctl_begin_addrs(uint32_t *ticket)
3445
{
3446
PF_RULES_WLOCK();
3447
pf_empty_kpool(&V_pf_pabuf[0]);
3448
pf_empty_kpool(&V_pf_pabuf[1]);
3449
pf_empty_kpool(&V_pf_pabuf[2]);
3450
*ticket = ++V_ticket_pabuf;
3451
PF_RULES_WUNLOCK();
3452
3453
return (0);
3454
}
3455
3456
int
3457
pf_ioctl_add_addr(struct pf_nl_pooladdr *pp)
3458
{
3459
struct pf_kpooladdr *pa = NULL;
3460
struct pfi_kkif *kif = NULL;
3461
int error;
3462
3463
if (pp->which != PF_RDR && pp->which != PF_NAT &&
3464
pp->which != PF_RT)
3465
return (EINVAL);
3466
3467
switch (pp->af) {
3468
#ifdef INET
3469
case AF_INET:
3470
/* FALLTHROUGH */
3471
#endif /* INET */
3472
#ifdef INET6
3473
case AF_INET6:
3474
/* FALLTHROUGH */
3475
#endif /* INET6 */
3476
case AF_UNSPEC:
3477
break;
3478
default:
3479
return (EAFNOSUPPORT);
3480
}
3481
3482
if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3483
pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3484
pp->addr.addr.type != PF_ADDR_TABLE)
3485
return (EINVAL);
3486
3487
if (pp->addr.addr.p.dyn != NULL)
3488
return (EINVAL);
3489
3490
pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
3491
error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
3492
if (error != 0)
3493
goto out;
3494
if (pa->ifname[0])
3495
kif = pf_kkif_create(M_WAITOK);
3496
PF_RULES_WLOCK();
3497
if (pp->ticket != V_ticket_pabuf) {
3498
PF_RULES_WUNLOCK();
3499
if (pa->ifname[0])
3500
pf_kkif_free(kif);
3501
error = EBUSY;
3502
goto out;
3503
}
3504
if (pa->ifname[0]) {
3505
pa->kif = pfi_kkif_attach(kif, pa->ifname);
3506
kif = NULL;
3507
pfi_kkif_ref(pa->kif);
3508
} else
3509
pa->kif = NULL;
3510
if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
3511
pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
3512
if (pa->ifname[0])
3513
pfi_kkif_unref(pa->kif);
3514
PF_RULES_WUNLOCK();
3515
goto out;
3516
}
3517
pa->af = pp->af;
3518
switch (pp->which) {
3519
case PF_NAT:
3520
TAILQ_INSERT_TAIL(&V_pf_pabuf[0], pa, entries);
3521
break;
3522
case PF_RDR:
3523
TAILQ_INSERT_TAIL(&V_pf_pabuf[1], pa, entries);
3524
break;
3525
case PF_RT:
3526
TAILQ_INSERT_TAIL(&V_pf_pabuf[2], pa, entries);
3527
break;
3528
}
3529
PF_RULES_WUNLOCK();
3530
3531
return (0);
3532
3533
out:
3534
free(pa, M_PFRULE);
3535
return (error);
3536
}
3537
3538
int
3539
pf_ioctl_get_addrs(struct pf_nl_pooladdr *pp)
3540
{
3541
struct pf_kpool *pool;
3542
struct pf_kpooladdr *pa;
3543
3544
PF_RULES_RLOCK_TRACKER;
3545
3546
if (pp->which != PF_RDR && pp->which != PF_NAT &&
3547
pp->which != PF_RT)
3548
return (EINVAL);
3549
3550
pp->anchor[sizeof(pp->anchor) - 1] = 0;
3551
pp->nr = 0;
3552
3553
PF_RULES_RLOCK();
3554
pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
3555
pp->r_num, 0, 1, 0, pp->which);
3556
if (pool == NULL) {
3557
PF_RULES_RUNLOCK();
3558
return (EBUSY);
3559
}
3560
TAILQ_FOREACH(pa, &pool->list, entries)
3561
pp->nr++;
3562
PF_RULES_RUNLOCK();
3563
3564
return (0);
3565
}
3566
3567
int
3568
pf_ioctl_get_addr(struct pf_nl_pooladdr *pp)
3569
{
3570
struct pf_kpool *pool;
3571
struct pf_kpooladdr *pa;
3572
u_int32_t nr = 0;
3573
3574
if (pp->which != PF_RDR && pp->which != PF_NAT &&
3575
pp->which != PF_RT)
3576
return (EINVAL);
3577
3578
PF_RULES_RLOCK_TRACKER;
3579
3580
pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3581
3582
PF_RULES_RLOCK();
3583
pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
3584
pp->r_num, 0, 1, 1, pp->which);
3585
if (pool == NULL) {
3586
PF_RULES_RUNLOCK();
3587
return (EBUSY);
3588
}
3589
pa = TAILQ_FIRST(&pool->list);
3590
while ((pa != NULL) && (nr < pp->nr)) {
3591
pa = TAILQ_NEXT(pa, entries);
3592
nr++;
3593
}
3594
if (pa == NULL) {
3595
PF_RULES_RUNLOCK();
3596
return (EBUSY);
3597
}
3598
pf_kpooladdr_to_pooladdr(pa, &pp->addr);
3599
pp->af = pa->af;
3600
pf_addr_copyout(&pp->addr.addr);
3601
PF_RULES_RUNLOCK();
3602
3603
return (0);
3604
}
3605
3606
int
3607
pf_ioctl_get_rulesets(struct pfioc_ruleset *pr)
3608
{
3609
struct pf_kruleset *ruleset;
3610
struct pf_kanchor *anchor;
3611
3612
PF_RULES_RLOCK_TRACKER;
3613
3614
pr->path[sizeof(pr->path) - 1] = '\0';
3615
3616
PF_RULES_RLOCK();
3617
if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
3618
PF_RULES_RUNLOCK();
3619
return (ENOENT);
3620
}
3621
pr->nr = 0;
3622
if (ruleset == &pf_main_ruleset) {
3623
/* XXX kludge for pf_main_ruleset */
3624
RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
3625
if (anchor->parent == NULL)
3626
pr->nr++;
3627
} else {
3628
RB_FOREACH(anchor, pf_kanchor_node,
3629
&ruleset->anchor->children)
3630
pr->nr++;
3631
}
3632
PF_RULES_RUNLOCK();
3633
3634
return (0);
3635
}
3636
3637
int
3638
pf_ioctl_get_ruleset(struct pfioc_ruleset *pr)
3639
{
3640
struct pf_kruleset *ruleset;
3641
struct pf_kanchor *anchor;
3642
u_int32_t nr = 0;
3643
int error = 0;
3644
3645
PF_RULES_RLOCK_TRACKER;
3646
3647
PF_RULES_RLOCK();
3648
if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
3649
PF_RULES_RUNLOCK();
3650
return (ENOENT);
3651
}
3652
3653
pr->name[0] = '\0';
3654
if (ruleset == &pf_main_ruleset) {
3655
/* XXX kludge for pf_main_ruleset */
3656
RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
3657
if (anchor->parent == NULL && nr++ == pr->nr) {
3658
strlcpy(pr->name, anchor->name,
3659
sizeof(pr->name));
3660
break;
3661
}
3662
} else {
3663
RB_FOREACH(anchor, pf_kanchor_node,
3664
&ruleset->anchor->children)
3665
if (nr++ == pr->nr) {
3666
strlcpy(pr->name, anchor->name,
3667
sizeof(pr->name));
3668
break;
3669
}
3670
}
3671
if (!pr->name[0])
3672
error = EBUSY;
3673
PF_RULES_RUNLOCK();
3674
3675
return (error);
3676
}
3677
3678
int
3679
pf_ioctl_natlook(struct pfioc_natlook *pnl)
3680
{
3681
struct pf_state_key *sk;
3682
struct pf_kstate *state;
3683
struct pf_state_key_cmp key;
3684
int m = 0, direction = pnl->direction;
3685
int sidx, didx;
3686
3687
/* NATLOOK src and dst are reversed, so reverse sidx/didx */
3688
sidx = (direction == PF_IN) ? 1 : 0;
3689
didx = (direction == PF_IN) ? 0 : 1;
3690
3691
if (!pnl->proto ||
3692
PF_AZERO(&pnl->saddr, pnl->af) ||
3693
PF_AZERO(&pnl->daddr, pnl->af) ||
3694
((pnl->proto == IPPROTO_TCP ||
3695
pnl->proto == IPPROTO_UDP) &&
3696
(!pnl->dport || !pnl->sport)))
3697
return (EINVAL);
3698
3699
switch (pnl->direction) {
3700
case PF_IN:
3701
case PF_OUT:
3702
case PF_INOUT:
3703
break;
3704
default:
3705
return (EINVAL);
3706
}
3707
3708
switch (pnl->af) {
3709
#ifdef INET
3710
case AF_INET:
3711
break;
3712
#endif /* INET */
3713
#ifdef INET6
3714
case AF_INET6:
3715
break;
3716
#endif /* INET6 */
3717
default:
3718
return (EAFNOSUPPORT);
3719
}
3720
3721
bzero(&key, sizeof(key));
3722
key.af = pnl->af;
3723
key.proto = pnl->proto;
3724
pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af);
3725
key.port[sidx] = pnl->sport;
3726
pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af);
3727
key.port[didx] = pnl->dport;
3728
3729
state = pf_find_state_all(&key, direction, &m);
3730
if (state == NULL)
3731
return (ENOENT);
3732
3733
if (m > 1) {
3734
PF_STATE_UNLOCK(state);
3735
return (E2BIG); /* more than one state */
3736
}
3737
3738
sk = state->key[sidx];
3739
pf_addrcpy(&pnl->rsaddr,
3740
&sk->addr[sidx], sk->af);
3741
pnl->rsport = sk->port[sidx];
3742
pf_addrcpy(&pnl->rdaddr,
3743
&sk->addr[didx], sk->af);
3744
pnl->rdport = sk->port[didx];
3745
PF_STATE_UNLOCK(state);
3746
3747
return (0);
3748
}
3749
3750
static int
3751
pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
3752
{
3753
int error = 0;
3754
PF_RULES_RLOCK_TRACKER;
3755
3756
#define ERROUT_IOCTL(target, x) \
3757
do { \
3758
error = (x); \
3759
SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \
3760
goto target; \
3761
} while (0)
3762
3763
3764
/* XXX keep in sync with switch() below */
3765
if (securelevel_gt(td->td_ucred, 2))
3766
switch (cmd) {
3767
case DIOCGETRULES:
3768
case DIOCGETRULENV:
3769
case DIOCGETADDRS:
3770
case DIOCGETADDR:
3771
case DIOCGETSTATE:
3772
case DIOCGETSTATENV:
3773
case DIOCSETSTATUSIF:
3774
case DIOCGETSTATUSNV:
3775
case DIOCCLRSTATUS:
3776
case DIOCNATLOOK:
3777
case DIOCSETDEBUG:
3778
#ifdef COMPAT_FREEBSD14
3779
case DIOCGETSTATES:
3780
case DIOCGETSTATESV2:
3781
#endif
3782
case DIOCGETTIMEOUT:
3783
case DIOCCLRRULECTRS:
3784
case DIOCGETLIMIT:
3785
case DIOCGETALTQSV0:
3786
case DIOCGETALTQSV1:
3787
case DIOCGETALTQV0:
3788
case DIOCGETALTQV1:
3789
case DIOCGETQSTATSV0:
3790
case DIOCGETQSTATSV1:
3791
case DIOCGETRULESETS:
3792
case DIOCGETRULESET:
3793
case DIOCRGETTABLES:
3794
case DIOCRGETTSTATS:
3795
case DIOCRCLRTSTATS:
3796
case DIOCRCLRADDRS:
3797
case DIOCRADDADDRS:
3798
case DIOCRDELADDRS:
3799
case DIOCRSETADDRS:
3800
case DIOCRGETADDRS:
3801
case DIOCRGETASTATS:
3802
case DIOCRCLRASTATS:
3803
case DIOCRTSTADDRS:
3804
case DIOCOSFPGET:
3805
case DIOCGETSRCNODES:
3806
case DIOCCLRSRCNODES:
3807
case DIOCGETSYNCOOKIES:
3808
case DIOCIGETIFACES:
3809
case DIOCGIFSPEEDV0:
3810
case DIOCGIFSPEEDV1:
3811
case DIOCSETIFFLAG:
3812
case DIOCCLRIFFLAG:
3813
case DIOCGETETHRULES:
3814
case DIOCGETETHRULE:
3815
case DIOCGETETHRULESETS:
3816
case DIOCGETETHRULESET:
3817
break;
3818
case DIOCRCLRTABLES:
3819
case DIOCRADDTABLES:
3820
case DIOCRDELTABLES:
3821
case DIOCRSETTFLAGS:
3822
if (((struct pfioc_table *)addr)->pfrio_flags &
3823
PFR_FLAG_DUMMY)
3824
break; /* dummy operation ok */
3825
return (EPERM);
3826
default:
3827
return (EPERM);
3828
}
3829
3830
if (!(flags & FWRITE))
3831
switch (cmd) {
3832
case DIOCGETRULES:
3833
case DIOCGETADDRS:
3834
case DIOCGETADDR:
3835
case DIOCGETSTATE:
3836
case DIOCGETSTATENV:
3837
case DIOCGETSTATUSNV:
3838
#ifdef COMPAT_FREEBSD14
3839
case DIOCGETSTATES:
3840
case DIOCGETSTATESV2:
3841
#endif
3842
case DIOCGETTIMEOUT:
3843
case DIOCGETLIMIT:
3844
case DIOCGETALTQSV0:
3845
case DIOCGETALTQSV1:
3846
case DIOCGETALTQV0:
3847
case DIOCGETALTQV1:
3848
case DIOCGETQSTATSV0:
3849
case DIOCGETQSTATSV1:
3850
case DIOCGETRULESETS:
3851
case DIOCGETRULESET:
3852
case DIOCNATLOOK:
3853
case DIOCRGETTABLES:
3854
case DIOCRGETTSTATS:
3855
case DIOCRGETADDRS:
3856
case DIOCRGETASTATS:
3857
case DIOCRTSTADDRS:
3858
case DIOCOSFPGET:
3859
case DIOCGETSRCNODES:
3860
case DIOCGETSYNCOOKIES:
3861
case DIOCIGETIFACES:
3862
case DIOCGIFSPEEDV1:
3863
case DIOCGIFSPEEDV0:
3864
case DIOCGETRULENV:
3865
case DIOCGETETHRULES:
3866
case DIOCGETETHRULE:
3867
case DIOCGETETHRULESETS:
3868
case DIOCGETETHRULESET:
3869
break;
3870
case DIOCRCLRTABLES:
3871
case DIOCRADDTABLES:
3872
case DIOCRDELTABLES:
3873
case DIOCRCLRTSTATS:
3874
case DIOCRCLRADDRS:
3875
case DIOCRADDADDRS:
3876
case DIOCRDELADDRS:
3877
case DIOCRSETADDRS:
3878
case DIOCRSETTFLAGS:
3879
if (((struct pfioc_table *)addr)->pfrio_flags &
3880
PFR_FLAG_DUMMY) {
3881
flags |= FWRITE; /* need write lock for dummy */
3882
break; /* dummy operation ok */
3883
}
3884
return (EACCES);
3885
default:
3886
return (EACCES);
3887
}
3888
3889
CURVNET_SET(TD_TO_VNET(td));
3890
3891
switch (cmd) {
3892
#ifdef COMPAT_FREEBSD14
3893
case DIOCSTART:
3894
error = pf_start();
3895
break;
3896
3897
case DIOCSTOP:
3898
error = pf_stop();
3899
break;
3900
#endif
3901
3902
case DIOCGETETHRULES: {
3903
struct pfioc_nv *nv = (struct pfioc_nv *)addr;
3904
nvlist_t *nvl;
3905
void *packed;
3906
struct pf_keth_rule *tail;
3907
struct pf_keth_ruleset *rs;
3908
u_int32_t ticket, nr;
3909
const char *anchor = "";
3910
3911
nvl = NULL;
3912
packed = NULL;
3913
3914
#define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULES_error, x)
3915
3916
if (nv->len > pf_ioctl_maxcount)
3917
ERROUT(ENOMEM);
3918
3919
/* Copy the request in */
3920
packed = malloc(nv->len, M_NVLIST, M_WAITOK);
3921
error = copyin(nv->data, packed, nv->len);
3922
if (error)
3923
ERROUT(error);
3924
3925
nvl = nvlist_unpack(packed, nv->len, 0);
3926
if (nvl == NULL)
3927
ERROUT(EBADMSG);
3928
3929
if (! nvlist_exists_string(nvl, "anchor"))
3930
ERROUT(EBADMSG);
3931
3932
anchor = nvlist_get_string(nvl, "anchor");
3933
3934
rs = pf_find_keth_ruleset(anchor);
3935
3936
nvlist_destroy(nvl);
3937
nvl = NULL;
3938
free(packed, M_NVLIST);
3939
packed = NULL;
3940
3941
if (rs == NULL)
3942
ERROUT(ENOENT);
3943
3944
/* Reply */
3945
nvl = nvlist_create(0);
3946
if (nvl == NULL)
3947
ERROUT(ENOMEM);
3948
3949
PF_RULES_RLOCK();
3950
3951
ticket = rs->active.ticket;
3952
tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
3953
if (tail)
3954
nr = tail->nr + 1;
3955
else
3956
nr = 0;
3957
3958
PF_RULES_RUNLOCK();
3959
3960
nvlist_add_number(nvl, "ticket", ticket);
3961
nvlist_add_number(nvl, "nr", nr);
3962
3963
packed = nvlist_pack(nvl, &nv->len);
3964
if (packed == NULL)
3965
ERROUT(ENOMEM);
3966
3967
if (nv->size == 0)
3968
ERROUT(0);
3969
else if (nv->size < nv->len)
3970
ERROUT(ENOSPC);
3971
3972
error = copyout(packed, nv->data, nv->len);
3973
3974
#undef ERROUT
3975
DIOCGETETHRULES_error:
3976
free(packed, M_NVLIST);
3977
nvlist_destroy(nvl);
3978
break;
3979
}
3980
3981
case DIOCGETETHRULE: {
3982
struct epoch_tracker et;
3983
struct pfioc_nv *nv = (struct pfioc_nv *)addr;
3984
nvlist_t *nvl = NULL;
3985
void *nvlpacked = NULL;
3986
struct pf_keth_rule *rule = NULL;
3987
struct pf_keth_ruleset *rs;
3988
u_int32_t ticket, nr;
3989
bool clear = false;
3990
const char *anchor;
3991
3992
#define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULE_error, x)
3993
3994
if (nv->len > pf_ioctl_maxcount)
3995
ERROUT(ENOMEM);
3996
3997
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3998
error = copyin(nv->data, nvlpacked, nv->len);
3999
if (error)
4000
ERROUT(error);
4001
4002
nvl = nvlist_unpack(nvlpacked, nv->len, 0);
4003
if (nvl == NULL)
4004
ERROUT(EBADMSG);
4005
if (! nvlist_exists_number(nvl, "ticket"))
4006
ERROUT(EBADMSG);
4007
ticket = nvlist_get_number(nvl, "ticket");
4008
if (! nvlist_exists_string(nvl, "anchor"))
4009
ERROUT(EBADMSG);
4010
anchor = nvlist_get_string(nvl, "anchor");
4011
4012
if (nvlist_exists_bool(nvl, "clear"))
4013
clear = nvlist_get_bool(nvl, "clear");
4014
4015
if (clear && !(flags & FWRITE))
4016
ERROUT(EACCES);
4017
4018
if (! nvlist_exists_number(nvl, "nr"))
4019
ERROUT(EBADMSG);
4020
nr = nvlist_get_number(nvl, "nr");
4021
4022
PF_RULES_RLOCK();
4023
rs = pf_find_keth_ruleset(anchor);
4024
if (rs == NULL) {
4025
PF_RULES_RUNLOCK();
4026
ERROUT(ENOENT);
4027
}
4028
if (ticket != rs->active.ticket) {
4029
PF_RULES_RUNLOCK();
4030
ERROUT(EBUSY);
4031
}
4032
4033
nvlist_destroy(nvl);
4034
nvl = NULL;
4035
free(nvlpacked, M_NVLIST);
4036
nvlpacked = NULL;
4037
4038
rule = TAILQ_FIRST(rs->active.rules);
4039
while ((rule != NULL) && (rule->nr != nr))
4040
rule = TAILQ_NEXT(rule, entries);
4041
if (rule == NULL) {
4042
PF_RULES_RUNLOCK();
4043
ERROUT(ENOENT);
4044
}
4045
/* Make sure rule can't go away. */
4046
NET_EPOCH_ENTER(et);
4047
PF_RULES_RUNLOCK();
4048
nvl = pf_keth_rule_to_nveth_rule(rule);
4049
if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) {
4050
NET_EPOCH_EXIT(et);
4051
ERROUT(EBUSY);
4052
}
4053
NET_EPOCH_EXIT(et);
4054
if (nvl == NULL)
4055
ERROUT(ENOMEM);
4056
4057
nvlpacked = nvlist_pack(nvl, &nv->len);
4058
if (nvlpacked == NULL)
4059
ERROUT(ENOMEM);
4060
4061
if (nv->size == 0)
4062
ERROUT(0);
4063
else if (nv->size < nv->len)
4064
ERROUT(ENOSPC);
4065
4066
error = copyout(nvlpacked, nv->data, nv->len);
4067
if (error == 0 && clear) {
4068
counter_u64_zero(rule->evaluations);
4069
for (int i = 0; i < 2; i++) {
4070
counter_u64_zero(rule->packets[i]);
4071
counter_u64_zero(rule->bytes[i]);
4072
}
4073
}
4074
4075
#undef ERROUT
4076
DIOCGETETHRULE_error:
4077
free(nvlpacked, M_NVLIST);
4078
nvlist_destroy(nvl);
4079
break;
4080
}
4081
4082
case DIOCADDETHRULE: {
4083
struct pfioc_nv *nv = (struct pfioc_nv *)addr;
4084
nvlist_t *nvl = NULL;
4085
void *nvlpacked = NULL;
4086
struct pf_keth_rule *rule = NULL, *tail = NULL;
4087
struct pf_keth_ruleset *ruleset = NULL;
4088
struct pfi_kkif *kif = NULL, *bridge_to_kif = NULL;
4089
const char *anchor = "", *anchor_call = "";
4090
4091
#define ERROUT(x) ERROUT_IOCTL(DIOCADDETHRULE_error, x)
4092
4093
if (nv->len > pf_ioctl_maxcount)
4094
ERROUT(ENOMEM);
4095
4096
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
4097
error = copyin(nv->data, nvlpacked, nv->len);
4098
if (error)
4099
ERROUT(error);
4100
4101
nvl = nvlist_unpack(nvlpacked, nv->len, 0);
4102
if (nvl == NULL)
4103
ERROUT(EBADMSG);
4104
4105
if (! nvlist_exists_number(nvl, "ticket"))
4106
ERROUT(EBADMSG);
4107
4108
if (nvlist_exists_string(nvl, "anchor"))
4109
anchor = nvlist_get_string(nvl, "anchor");
4110
if (nvlist_exists_string(nvl, "anchor_call"))
4111
anchor_call = nvlist_get_string(nvl, "anchor_call");
4112
4113
ruleset = pf_find_keth_ruleset(anchor);
4114
if (ruleset == NULL)
4115
ERROUT(EINVAL);
4116
4117
if (nvlist_get_number(nvl, "ticket") !=
4118
ruleset->inactive.ticket) {
4119
DPFPRINTF(PF_DEBUG_MISC,
4120
"ticket: %d != %d",
4121
(u_int32_t)nvlist_get_number(nvl, "ticket"),
4122
ruleset->inactive.ticket);
4123
ERROUT(EBUSY);
4124
}
4125
4126
rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
4127
rule->timestamp = NULL;
4128
4129
error = pf_nveth_rule_to_keth_rule(nvl, rule);
4130
if (error != 0)
4131
ERROUT(error);
4132
4133
if (rule->ifname[0])
4134
kif = pf_kkif_create(M_WAITOK);
4135
if (rule->bridge_to_name[0])
4136
bridge_to_kif = pf_kkif_create(M_WAITOK);
4137
rule->evaluations = counter_u64_alloc(M_WAITOK);
4138
for (int i = 0; i < 2; i++) {
4139
rule->packets[i] = counter_u64_alloc(M_WAITOK);
4140
rule->bytes[i] = counter_u64_alloc(M_WAITOK);
4141
}
4142
rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
4143
M_WAITOK | M_ZERO);
4144
4145
PF_RULES_WLOCK();
4146
4147
if (rule->ifname[0]) {
4148
rule->kif = pfi_kkif_attach(kif, rule->ifname);
4149
pfi_kkif_ref(rule->kif);
4150
} else
4151
rule->kif = NULL;
4152
if (rule->bridge_to_name[0]) {
4153
rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
4154
rule->bridge_to_name);
4155
pfi_kkif_ref(rule->bridge_to);
4156
} else
4157
rule->bridge_to = NULL;
4158
4159
#ifdef ALTQ
4160
/* set queue IDs */
4161
if (rule->qname[0] != 0) {
4162
if ((rule->qid = pf_qname2qid(rule->qname, true)) == 0)
4163
error = EBUSY;
4164
else
4165
rule->qid = rule->qid;
4166
}
4167
#endif
4168
if (rule->tagname[0])
4169
if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
4170
error = EBUSY;
4171
if (rule->match_tagname[0])
4172
if ((rule->match_tag = pf_tagname2tag(
4173
rule->match_tagname)) == 0)
4174
error = EBUSY;
4175
4176
if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
4177
error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
4178
if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
4179
error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
4180
4181
if (error) {
4182
pf_free_eth_rule(rule);
4183
PF_RULES_WUNLOCK();
4184
ERROUT(error);
4185
}
4186
4187
if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
4188
pf_free_eth_rule(rule);
4189
PF_RULES_WUNLOCK();
4190
ERROUT(EINVAL);
4191
}
4192
4193
tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
4194
if (tail)
4195
rule->nr = tail->nr + 1;
4196
else
4197
rule->nr = 0;
4198
4199
TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
4200
4201
PF_RULES_WUNLOCK();
4202
4203
#undef ERROUT
4204
DIOCADDETHRULE_error:
4205
nvlist_destroy(nvl);
4206
free(nvlpacked, M_NVLIST);
4207
break;
4208
}
4209
4210
case DIOCGETETHRULESETS: {
4211
struct epoch_tracker et;
4212
struct pfioc_nv *nv = (struct pfioc_nv *)addr;
4213
nvlist_t *nvl = NULL;
4214
void *nvlpacked = NULL;
4215
struct pf_keth_ruleset *ruleset;
4216
struct pf_keth_anchor *anchor;
4217
int nr = 0;
4218
4219
#define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
4220
4221
if (nv->len > pf_ioctl_maxcount)
4222
ERROUT(ENOMEM);
4223
4224
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
4225
error = copyin(nv->data, nvlpacked, nv->len);
4226
if (error)
4227
ERROUT(error);
4228
4229
nvl = nvlist_unpack(nvlpacked, nv->len, 0);
4230
if (nvl == NULL)
4231
ERROUT(EBADMSG);
4232
if (! nvlist_exists_string(nvl, "path"))
4233
ERROUT(EBADMSG);
4234
4235
NET_EPOCH_ENTER(et);
4236
4237
if ((ruleset = pf_find_keth_ruleset(
4238
nvlist_get_string(nvl, "path"))) == NULL) {
4239
NET_EPOCH_EXIT(et);
4240
ERROUT(ENOENT);
4241
}
4242
4243
if (ruleset->anchor == NULL) {
4244
RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
4245
if (anchor->parent == NULL)
4246
nr++;
4247
} else {
4248
RB_FOREACH(anchor, pf_keth_anchor_node,
4249
&ruleset->anchor->children)
4250
nr++;
4251
}
4252
4253
NET_EPOCH_EXIT(et);
4254
4255
nvlist_destroy(nvl);
4256
nvl = NULL;
4257
free(nvlpacked, M_NVLIST);
4258
nvlpacked = NULL;
4259
4260
nvl = nvlist_create(0);
4261
if (nvl == NULL)
4262
ERROUT(ENOMEM);
4263
4264
nvlist_add_number(nvl, "nr", nr);
4265
4266
nvlpacked = nvlist_pack(nvl, &nv->len);
4267
if (nvlpacked == NULL)
4268
ERROUT(ENOMEM);
4269
4270
if (nv->size == 0)
4271
ERROUT(0);
4272
else if (nv->size < nv->len)
4273
ERROUT(ENOSPC);
4274
4275
error = copyout(nvlpacked, nv->data, nv->len);
4276
4277
#undef ERROUT
4278
DIOCGETETHRULESETS_error:
4279
free(nvlpacked, M_NVLIST);
4280
nvlist_destroy(nvl);
4281
break;
4282
}
4283
4284
case DIOCGETETHRULESET: {
4285
struct epoch_tracker et;
4286
struct pfioc_nv *nv = (struct pfioc_nv *)addr;
4287
nvlist_t *nvl = NULL;
4288
void *nvlpacked = NULL;
4289
struct pf_keth_ruleset *ruleset;
4290
struct pf_keth_anchor *anchor;
4291
int nr = 0, req_nr = 0;
4292
bool found = false;
4293
4294
#define ERROUT(x) ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
4295
4296
if (nv->len > pf_ioctl_maxcount)
4297
ERROUT(ENOMEM);
4298
4299
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
4300
error = copyin(nv->data, nvlpacked, nv->len);
4301
if (error)
4302
ERROUT(error);
4303
4304
nvl = nvlist_unpack(nvlpacked, nv->len, 0);
4305
if (nvl == NULL)
4306
ERROUT(EBADMSG);
4307
if (! nvlist_exists_string(nvl, "path"))
4308
ERROUT(EBADMSG);
4309
if (! nvlist_exists_number(nvl, "nr"))
4310
ERROUT(EBADMSG);
4311
4312
req_nr = nvlist_get_number(nvl, "nr");
4313
4314
NET_EPOCH_ENTER(et);
4315
4316
if ((ruleset = pf_find_keth_ruleset(
4317
nvlist_get_string(nvl, "path"))) == NULL) {
4318
NET_EPOCH_EXIT(et);
4319
ERROUT(ENOENT);
4320
}
4321
4322
nvlist_destroy(nvl);
4323
nvl = NULL;
4324
free(nvlpacked, M_NVLIST);
4325
nvlpacked = NULL;
4326
4327
nvl = nvlist_create(0);
4328
if (nvl == NULL) {
4329
NET_EPOCH_EXIT(et);
4330
ERROUT(ENOMEM);
4331
}
4332
4333
if (ruleset->anchor == NULL) {
4334
RB_FOREACH(anchor, pf_keth_anchor_global,
4335
&V_pf_keth_anchors) {
4336
if (anchor->parent == NULL && nr++ == req_nr) {
4337
found = true;
4338
break;
4339
}
4340
}
4341
} else {
4342
RB_FOREACH(anchor, pf_keth_anchor_node,
4343
&ruleset->anchor->children) {
4344
if (nr++ == req_nr) {
4345
found = true;
4346
break;
4347
}
4348
}
4349
}
4350
4351
NET_EPOCH_EXIT(et);
4352
if (found) {
4353
nvlist_add_number(nvl, "nr", nr);
4354
nvlist_add_string(nvl, "name", anchor->name);
4355
if (ruleset->anchor)
4356
nvlist_add_string(nvl, "path",
4357
ruleset->anchor->path);
4358
else
4359
nvlist_add_string(nvl, "path", "");
4360
} else {
4361
ERROUT(EBUSY);
4362
}
4363
4364
nvlpacked = nvlist_pack(nvl, &nv->len);
4365
if (nvlpacked == NULL)
4366
ERROUT(ENOMEM);
4367
4368
if (nv->size == 0)
4369
ERROUT(0);
4370
else if (nv->size < nv->len)
4371
ERROUT(ENOSPC);
4372
4373
error = copyout(nvlpacked, nv->data, nv->len);
4374
4375
#undef ERROUT
4376
DIOCGETETHRULESET_error:
4377
free(nvlpacked, M_NVLIST);
4378
nvlist_destroy(nvl);
4379
break;
4380
}
4381
4382
case DIOCADDRULENV: {
4383
struct pfioc_nv *nv = (struct pfioc_nv *)addr;
4384
nvlist_t *nvl = NULL;
4385
void *nvlpacked = NULL;
4386
struct pf_krule *rule = NULL;
4387
const char *anchor = "", *anchor_call = "";
4388
uint32_t ticket = 0, pool_ticket = 0;
4389
4390
#define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x)
4391
4392
if (nv->len > pf_ioctl_maxcount)
4393
ERROUT(ENOMEM);
4394
4395
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
4396
error = copyin(nv->data, nvlpacked, nv->len);
4397
if (error)
4398
ERROUT(error);
4399
4400
nvl = nvlist_unpack(nvlpacked, nv->len, 0);
4401
if (nvl == NULL)
4402
ERROUT(EBADMSG);
4403
4404
if (! nvlist_exists_number(nvl, "ticket"))
4405
ERROUT(EINVAL);
4406
ticket = nvlist_get_number(nvl, "ticket");
4407
4408
if (! nvlist_exists_number(nvl, "pool_ticket"))
4409
ERROUT(EINVAL);
4410
pool_ticket = nvlist_get_number(nvl, "pool_ticket");
4411
4412
if (! nvlist_exists_nvlist(nvl, "rule"))
4413
ERROUT(EINVAL);
4414
4415
rule = pf_krule_alloc();
4416
error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
4417
rule);
4418
if (error)
4419
ERROUT(error);
4420
4421
if (nvlist_exists_string(nvl, "anchor"))
4422
anchor = nvlist_get_string(nvl, "anchor");
4423
if (nvlist_exists_string(nvl, "anchor_call"))
4424
anchor_call = nvlist_get_string(nvl, "anchor_call");
4425
4426
if ((error = nvlist_error(nvl)))
4427
ERROUT(error);
4428
4429
/* Frees rule on error */
4430
error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
4431
anchor_call, td->td_ucred->cr_ruid,
4432
td->td_proc ? td->td_proc->p_pid : 0);
4433
4434
nvlist_destroy(nvl);
4435
free(nvlpacked, M_NVLIST);
4436
break;
4437
#undef ERROUT
4438
DIOCADDRULENV_error:
4439
pf_krule_free(rule);
4440
nvlist_destroy(nvl);
4441
free(nvlpacked, M_NVLIST);
4442
4443
break;
4444
}
4445
case DIOCADDRULE: {
4446
struct pfioc_rule *pr = (struct pfioc_rule *)addr;
4447
struct pf_krule *rule;
4448
4449
rule = pf_krule_alloc();
4450
error = pf_rule_to_krule(&pr->rule, rule);
4451
if (error != 0) {
4452
pf_krule_free(rule);
4453
goto fail;
4454
}
4455
4456
pr->anchor[sizeof(pr->anchor) - 1] = '\0';
4457
4458
/* Frees rule on error */
4459
error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
4460
pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid,
4461
td->td_proc ? td->td_proc->p_pid : 0);
4462
break;
4463
}
4464
4465
case DIOCGETRULES: {
4466
struct pfioc_rule *pr = (struct pfioc_rule *)addr;
4467
4468
pr->anchor[sizeof(pr->anchor) - 1] = '\0';
4469
4470
error = pf_ioctl_getrules(pr);
4471
4472
break;
4473
}
4474
4475
case DIOCGETRULENV: {
4476
PF_RULES_RLOCK_TRACKER;
4477
struct pfioc_nv *nv = (struct pfioc_nv *)addr;
4478
nvlist_t *nvrule = NULL;
4479
nvlist_t *nvl = NULL;
4480
struct pf_kruleset *ruleset;
4481
struct pf_krule *rule;
4482
void *nvlpacked = NULL;
4483
int rs_num, nr;
4484
bool clear_counter = false;
4485
4486
#define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x)
4487
#define ERROUT_LOCKED(x) do { \
4488
if (clear_counter) \
4489
PF_RULES_WUNLOCK(); \
4490
else \
4491
PF_RULES_RUNLOCK(); \
4492
ERROUT(x); \
4493
} while (0)
4494
4495
if (nv->len > pf_ioctl_maxcount)
4496
ERROUT(ENOMEM);
4497
4498
/* Copy the request in */
4499
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
4500
error = copyin(nv->data, nvlpacked, nv->len);
4501
if (error)
4502
ERROUT(error);
4503
4504
nvl = nvlist_unpack(nvlpacked, nv->len, 0);
4505
if (nvl == NULL)
4506
ERROUT(EBADMSG);
4507
4508
if (! nvlist_exists_string(nvl, "anchor"))
4509
ERROUT(EBADMSG);
4510
if (! nvlist_exists_number(nvl, "ruleset"))
4511
ERROUT(EBADMSG);
4512
if (! nvlist_exists_number(nvl, "ticket"))
4513
ERROUT(EBADMSG);
4514
if (! nvlist_exists_number(nvl, "nr"))
4515
ERROUT(EBADMSG);
4516
4517
if (nvlist_exists_bool(nvl, "clear_counter"))
4518
clear_counter = nvlist_get_bool(nvl, "clear_counter");
4519
4520
if (clear_counter && !(flags & FWRITE))
4521
ERROUT(EACCES);
4522
4523
nr = nvlist_get_number(nvl, "nr");
4524
4525
if (clear_counter)
4526
PF_RULES_WLOCK();
4527
else
4528
PF_RULES_RLOCK();
4529
ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
4530
if (ruleset == NULL)
4531
ERROUT_LOCKED(ENOENT);
4532
4533
rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
4534
if (rs_num >= PF_RULESET_MAX)
4535
ERROUT_LOCKED(EINVAL);
4536
4537
if (nvlist_get_number(nvl, "ticket") !=
4538
ruleset->rules[rs_num].active.ticket)
4539
ERROUT_LOCKED(EBUSY);
4540
4541
if ((error = nvlist_error(nvl)))
4542
ERROUT_LOCKED(error);
4543
4544
rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
4545
while ((rule != NULL) && (rule->nr != nr))
4546
rule = TAILQ_NEXT(rule, entries);
4547
if (rule == NULL)
4548
ERROUT_LOCKED(EBUSY);
4549
4550
nvrule = pf_krule_to_nvrule(rule);
4551
4552
nvlist_destroy(nvl);
4553
nvl = nvlist_create(0);
4554
if (nvl == NULL)
4555
ERROUT_LOCKED(ENOMEM);
4556
nvlist_add_number(nvl, "nr", nr);
4557
nvlist_add_nvlist(nvl, "rule", nvrule);
4558
nvlist_destroy(nvrule);
4559
nvrule = NULL;
4560
if (pf_kanchor_nvcopyout(ruleset, rule, nvl))
4561
ERROUT_LOCKED(EBUSY);
4562
4563
free(nvlpacked, M_NVLIST);
4564
nvlpacked = nvlist_pack(nvl, &nv->len);
4565
if (nvlpacked == NULL)
4566
ERROUT_LOCKED(ENOMEM);
4567
4568
if (nv->size == 0)
4569
ERROUT_LOCKED(0);
4570
else if (nv->size < nv->len)
4571
ERROUT_LOCKED(ENOSPC);
4572
4573
if (clear_counter) {
4574
pf_krule_clear_counters(rule);
4575
PF_RULES_WUNLOCK();
4576
} else {
4577
PF_RULES_RUNLOCK();
4578
}
4579
4580
error = copyout(nvlpacked, nv->data, nv->len);
4581
4582
#undef ERROUT_LOCKED
4583
#undef ERROUT
4584
DIOCGETRULENV_error:
4585
free(nvlpacked, M_NVLIST);
4586
nvlist_destroy(nvrule);
4587
nvlist_destroy(nvl);
4588
4589
break;
4590
}
4591
4592
case DIOCCHANGERULE: {
4593
struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
4594
struct pf_kruleset *ruleset;
4595
struct pf_krule *oldrule = NULL, *newrule = NULL;
4596
struct pfi_kkif *kif = NULL;
4597
struct pf_kpooladdr *pa;
4598
u_int32_t nr = 0;
4599
int rs_num;
4600
4601
pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
4602
4603
if (pcr->action < PF_CHANGE_ADD_HEAD ||
4604
pcr->action > PF_CHANGE_GET_TICKET) {
4605
error = EINVAL;
4606
goto fail;
4607
}
4608
if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
4609
error = EINVAL;
4610
goto fail;
4611
}
4612
4613
if (pcr->action != PF_CHANGE_REMOVE) {
4614
newrule = pf_krule_alloc();
4615
error = pf_rule_to_krule(&pcr->rule, newrule);
4616
if (error != 0) {
4617
pf_krule_free(newrule);
4618
goto fail;
4619
}
4620
4621
if ((error = pf_rule_checkaf(newrule))) {
4622
pf_krule_free(newrule);
4623
goto fail;
4624
}
4625
if (newrule->ifname[0])
4626
kif = pf_kkif_create(M_WAITOK);
4627
pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
4628
for (int i = 0; i < 2; i++) {
4629
pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
4630
pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
4631
}
4632
newrule->states_cur = counter_u64_alloc(M_WAITOK);
4633
newrule->states_tot = counter_u64_alloc(M_WAITOK);
4634
for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
4635
newrule->src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
4636
newrule->cuid = td->td_ucred->cr_ruid;
4637
newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
4638
TAILQ_INIT(&newrule->nat.list);
4639
TAILQ_INIT(&newrule->rdr.list);
4640
TAILQ_INIT(&newrule->route.list);
4641
}
4642
#define ERROUT(x) ERROUT_IOCTL(DIOCCHANGERULE_error, x)
4643
4644
PF_CONFIG_LOCK();
4645
PF_RULES_WLOCK();
4646
#ifdef PF_WANT_32_TO_64_COUNTER
4647
if (newrule != NULL) {
4648
LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
4649
newrule->allrulelinked = true;
4650
V_pf_allrulecount++;
4651
}
4652
#endif
4653
4654
if (!(pcr->action == PF_CHANGE_REMOVE ||
4655
pcr->action == PF_CHANGE_GET_TICKET) &&
4656
pcr->pool_ticket != V_ticket_pabuf)
4657
ERROUT(EBUSY);
4658
4659
ruleset = pf_find_kruleset(pcr->anchor);
4660
if (ruleset == NULL)
4661
ERROUT(EINVAL);
4662
4663
rs_num = pf_get_ruleset_number(pcr->rule.action);
4664
if (rs_num >= PF_RULESET_MAX)
4665
ERROUT(EINVAL);
4666
4667
/*
4668
* XXXMJG: there is no guarantee that the ruleset was
4669
* created by the usual route of calling DIOCXBEGIN.
4670
* As a result it is possible the rule tree will not
4671
* be allocated yet. Hack around it by doing it here.
4672
* Note it is fine to let the tree persist in case of
4673
* error as it will be freed down the road on future
4674
* updates (if need be).
4675
*/
4676
if (ruleset->rules[rs_num].active.tree == NULL) {
4677
ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
4678
if (ruleset->rules[rs_num].active.tree == NULL) {
4679
ERROUT(ENOMEM);
4680
}
4681
}
4682
4683
if (pcr->action == PF_CHANGE_GET_TICKET) {
4684
pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
4685
ERROUT(0);
4686
} else if (pcr->ticket !=
4687
ruleset->rules[rs_num].active.ticket)
4688
ERROUT(EINVAL);
4689
4690
if (pcr->action != PF_CHANGE_REMOVE) {
4691
if (newrule->ifname[0]) {
4692
newrule->kif = pfi_kkif_attach(kif,
4693
newrule->ifname);
4694
kif = NULL;
4695
pfi_kkif_ref(newrule->kif);
4696
} else
4697
newrule->kif = NULL;
4698
4699
if (newrule->rtableid > 0 &&
4700
newrule->rtableid >= rt_numfibs)
4701
error = EBUSY;
4702
4703
#ifdef ALTQ
4704
/* set queue IDs */
4705
if (newrule->qname[0] != 0) {
4706
if ((newrule->qid =
4707
pf_qname2qid(newrule->qname, true)) == 0)
4708
error = EBUSY;
4709
else if (newrule->pqname[0] != 0) {
4710
if ((newrule->pqid =
4711
pf_qname2qid(newrule->pqname, true)) == 0)
4712
error = EBUSY;
4713
} else
4714
newrule->pqid = newrule->qid;
4715
}
4716
#endif /* ALTQ */
4717
if (newrule->tagname[0])
4718
if ((newrule->tag =
4719
pf_tagname2tag(newrule->tagname)) == 0)
4720
error = EBUSY;
4721
if (newrule->match_tagname[0])
4722
if ((newrule->match_tag = pf_tagname2tag(
4723
newrule->match_tagname)) == 0)
4724
error = EBUSY;
4725
if (newrule->rt && !newrule->direction)
4726
error = EINVAL;
4727
if (!newrule->log)
4728
newrule->logif = 0;
4729
if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
4730
error = ENOMEM;
4731
if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
4732
error = ENOMEM;
4733
if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
4734
error = EINVAL;
4735
for (int i = 0; i < 3; i++) {
4736
TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
4737
if (pa->addr.type == PF_ADDR_TABLE) {
4738
pa->addr.p.tbl =
4739
pfr_attach_table(ruleset,
4740
pa->addr.v.tblname);
4741
if (pa->addr.p.tbl == NULL)
4742
error = ENOMEM;
4743
}
4744
}
4745
4746
newrule->overload_tbl = NULL;
4747
if (newrule->overload_tblname[0]) {
4748
if ((newrule->overload_tbl = pfr_attach_table(
4749
ruleset, newrule->overload_tblname)) ==
4750
NULL)
4751
error = EINVAL;
4752
else
4753
newrule->overload_tbl->pfrkt_flags |=
4754
PFR_TFLAG_ACTIVE;
4755
}
4756
4757
pf_mv_kpool(&V_pf_pabuf[0], &newrule->nat.list);
4758
pf_mv_kpool(&V_pf_pabuf[1], &newrule->rdr.list);
4759
pf_mv_kpool(&V_pf_pabuf[2], &newrule->route.list);
4760
if (((((newrule->action == PF_NAT) ||
4761
(newrule->action == PF_RDR) ||
4762
(newrule->action == PF_BINAT) ||
4763
(newrule->rt > PF_NOPFROUTE)) &&
4764
!newrule->anchor)) &&
4765
(TAILQ_FIRST(&newrule->rdr.list) == NULL))
4766
error = EINVAL;
4767
4768
if (error) {
4769
pf_free_rule(newrule);
4770
PF_RULES_WUNLOCK();
4771
PF_CONFIG_UNLOCK();
4772
goto fail;
4773
}
4774
4775
newrule->nat.cur = TAILQ_FIRST(&newrule->nat.list);
4776
newrule->rdr.cur = TAILQ_FIRST(&newrule->rdr.list);
4777
}
4778
pf_empty_kpool(&V_pf_pabuf[0]);
4779
pf_empty_kpool(&V_pf_pabuf[1]);
4780
pf_empty_kpool(&V_pf_pabuf[2]);
4781
4782
if (pcr->action == PF_CHANGE_ADD_HEAD)
4783
oldrule = TAILQ_FIRST(
4784
ruleset->rules[rs_num].active.ptr);
4785
else if (pcr->action == PF_CHANGE_ADD_TAIL)
4786
oldrule = TAILQ_LAST(
4787
ruleset->rules[rs_num].active.ptr, pf_krulequeue);
4788
else {
4789
oldrule = TAILQ_FIRST(
4790
ruleset->rules[rs_num].active.ptr);
4791
while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
4792
oldrule = TAILQ_NEXT(oldrule, entries);
4793
if (oldrule == NULL) {
4794
if (newrule != NULL)
4795
pf_free_rule(newrule);
4796
PF_RULES_WUNLOCK();
4797
PF_CONFIG_UNLOCK();
4798
error = EINVAL;
4799
goto fail;
4800
}
4801
}
4802
4803
if (pcr->action == PF_CHANGE_REMOVE) {
4804
pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
4805
oldrule);
4806
RB_REMOVE(pf_krule_global,
4807
ruleset->rules[rs_num].active.tree, oldrule);
4808
ruleset->rules[rs_num].active.rcount--;
4809
} else {
4810
pf_hash_rule(newrule);
4811
if (RB_INSERT(pf_krule_global,
4812
ruleset->rules[rs_num].active.tree, newrule) != NULL) {
4813
pf_free_rule(newrule);
4814
PF_RULES_WUNLOCK();
4815
PF_CONFIG_UNLOCK();
4816
error = EEXIST;
4817
goto fail;
4818
}
4819
4820
if (oldrule == NULL)
4821
TAILQ_INSERT_TAIL(
4822
ruleset->rules[rs_num].active.ptr,
4823
newrule, entries);
4824
else if (pcr->action == PF_CHANGE_ADD_HEAD ||
4825
pcr->action == PF_CHANGE_ADD_BEFORE)
4826
TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
4827
else
4828
TAILQ_INSERT_AFTER(
4829
ruleset->rules[rs_num].active.ptr,
4830
oldrule, newrule, entries);
4831
ruleset->rules[rs_num].active.rcount++;
4832
}
4833
4834
nr = 0;
4835
TAILQ_FOREACH(oldrule,
4836
ruleset->rules[rs_num].active.ptr, entries)
4837
oldrule->nr = nr++;
4838
4839
ruleset->rules[rs_num].active.ticket++;
4840
4841
pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
4842
pf_remove_if_empty_kruleset(ruleset);
4843
4844
PF_RULES_WUNLOCK();
4845
PF_CONFIG_UNLOCK();
4846
break;
4847
4848
#undef ERROUT
4849
DIOCCHANGERULE_error:
4850
PF_RULES_WUNLOCK();
4851
PF_CONFIG_UNLOCK();
4852
pf_krule_free(newrule);
4853
pf_kkif_free(kif);
4854
break;
4855
}
4856
4857
case DIOCCLRSTATESNV: {
4858
error = pf_clearstates_nv((struct pfioc_nv *)addr);
4859
break;
4860
}
4861
4862
case DIOCKILLSTATESNV: {
4863
error = pf_killstates_nv((struct pfioc_nv *)addr);
4864
break;
4865
}
4866
4867
case DIOCADDSTATE: {
4868
struct pfioc_state *ps = (struct pfioc_state *)addr;
4869
struct pfsync_state_1301 *sp = &ps->state;
4870
4871
if (sp->timeout >= PFTM_MAX) {
4872
error = EINVAL;
4873
goto fail;
4874
}
4875
if (V_pfsync_state_import_ptr != NULL) {
4876
PF_RULES_RLOCK();
4877
error = V_pfsync_state_import_ptr(
4878
(union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
4879
PFSYNC_MSG_VERSION_1301);
4880
PF_RULES_RUNLOCK();
4881
} else
4882
error = EOPNOTSUPP;
4883
break;
4884
}
4885
4886
case DIOCGETSTATE: {
4887
struct pfioc_state *ps = (struct pfioc_state *)addr;
4888
struct pf_kstate *s;
4889
4890
s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
4891
if (s == NULL) {
4892
error = ENOENT;
4893
goto fail;
4894
}
4895
4896
pfsync_state_export_1301(&ps->state, s);
4897
PF_STATE_UNLOCK(s);
4898
break;
4899
}
4900
4901
case DIOCGETSTATENV: {
4902
error = pf_getstate((struct pfioc_nv *)addr);
4903
break;
4904
}
4905
4906
#ifdef COMPAT_FREEBSD14
4907
case DIOCGETSTATES: {
4908
struct pfioc_states *ps = (struct pfioc_states *)addr;
4909
struct pf_kstate *s;
4910
struct pfsync_state_1301 *pstore, *p;
4911
int i, nr;
4912
size_t slice_count = 16, count;
4913
void *out;
4914
4915
if (ps->ps_len <= 0) {
4916
nr = uma_zone_get_cur(V_pf_state_z);
4917
ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
4918
break;
4919
}
4920
4921
out = ps->ps_states;
4922
pstore = mallocarray(slice_count,
4923
sizeof(struct pfsync_state_1301), M_PF, M_WAITOK | M_ZERO);
4924
nr = 0;
4925
4926
for (i = 0; i <= V_pf_hashmask; i++) {
4927
struct pf_idhash *ih = &V_pf_idhash[i];
4928
4929
DIOCGETSTATES_retry:
4930
p = pstore;
4931
4932
if (LIST_EMPTY(&ih->states))
4933
continue;
4934
4935
PF_HASHROW_LOCK(ih);
4936
count = 0;
4937
LIST_FOREACH(s, &ih->states, entry) {
4938
if (s->timeout == PFTM_UNLINKED)
4939
continue;
4940
count++;
4941
}
4942
4943
if (count > slice_count) {
4944
PF_HASHROW_UNLOCK(ih);
4945
free(pstore, M_PF);
4946
slice_count = count * 2;
4947
pstore = mallocarray(slice_count,
4948
sizeof(struct pfsync_state_1301), M_PF,
4949
M_WAITOK | M_ZERO);
4950
goto DIOCGETSTATES_retry;
4951
}
4952
4953
if ((nr+count) * sizeof(*p) > ps->ps_len) {
4954
PF_HASHROW_UNLOCK(ih);
4955
goto DIOCGETSTATES_full;
4956
}
4957
4958
LIST_FOREACH(s, &ih->states, entry) {
4959
if (s->timeout == PFTM_UNLINKED)
4960
continue;
4961
4962
pfsync_state_export_1301(p, s);
4963
p++;
4964
nr++;
4965
}
4966
PF_HASHROW_UNLOCK(ih);
4967
error = copyout(pstore, out,
4968
sizeof(struct pfsync_state_1301) * count);
4969
if (error) {
4970
free(pstore, M_PF);
4971
goto fail;
4972
}
4973
out = ps->ps_states + nr;
4974
}
4975
DIOCGETSTATES_full:
4976
ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
4977
free(pstore, M_PF);
4978
4979
break;
4980
}
4981
4982
case DIOCGETSTATESV2: {
4983
struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr;
4984
struct pf_kstate *s;
4985
struct pf_state_export *pstore, *p;
4986
int i, nr;
4987
size_t slice_count = 16, count;
4988
void *out;
4989
4990
if (ps->ps_req_version > PF_STATE_VERSION) {
4991
error = ENOTSUP;
4992
goto fail;
4993
}
4994
4995
if (ps->ps_len <= 0) {
4996
nr = uma_zone_get_cur(V_pf_state_z);
4997
ps->ps_len = sizeof(struct pf_state_export) * nr;
4998
break;
4999
}
5000
5001
out = ps->ps_states;
5002
pstore = mallocarray(slice_count,
5003
sizeof(struct pf_state_export), M_PF, M_WAITOK | M_ZERO);
5004
nr = 0;
5005
5006
for (i = 0; i <= V_pf_hashmask; i++) {
5007
struct pf_idhash *ih = &V_pf_idhash[i];
5008
5009
DIOCGETSTATESV2_retry:
5010
p = pstore;
5011
5012
if (LIST_EMPTY(&ih->states))
5013
continue;
5014
5015
PF_HASHROW_LOCK(ih);
5016
count = 0;
5017
LIST_FOREACH(s, &ih->states, entry) {
5018
if (s->timeout == PFTM_UNLINKED)
5019
continue;
5020
count++;
5021
}
5022
5023
if (count > slice_count) {
5024
PF_HASHROW_UNLOCK(ih);
5025
free(pstore, M_PF);
5026
slice_count = count * 2;
5027
pstore = mallocarray(slice_count,
5028
sizeof(struct pf_state_export), M_PF,
5029
M_WAITOK | M_ZERO);
5030
goto DIOCGETSTATESV2_retry;
5031
}
5032
5033
if ((nr+count) * sizeof(*p) > ps->ps_len) {
5034
PF_HASHROW_UNLOCK(ih);
5035
goto DIOCGETSTATESV2_full;
5036
}
5037
5038
LIST_FOREACH(s, &ih->states, entry) {
5039
if (s->timeout == PFTM_UNLINKED)
5040
continue;
5041
5042
pf_state_export(p, s);
5043
p++;
5044
nr++;
5045
}
5046
PF_HASHROW_UNLOCK(ih);
5047
error = copyout(pstore, out,
5048
sizeof(struct pf_state_export) * count);
5049
if (error) {
5050
free(pstore, M_PF);
5051
goto fail;
5052
}
5053
out = ps->ps_states + nr;
5054
}
5055
DIOCGETSTATESV2_full:
5056
ps->ps_len = nr * sizeof(struct pf_state_export);
5057
free(pstore, M_PF);
5058
5059
break;
5060
}
5061
#endif
5062
case DIOCGETSTATUSNV: {
5063
error = pf_getstatus((struct pfioc_nv *)addr);
5064
break;
5065
}
5066
5067
case DIOCSETSTATUSIF: {
5068
struct pfioc_if *pi = (struct pfioc_if *)addr;
5069
5070
if (pi->ifname[0] == 0) {
5071
bzero(V_pf_status.ifname, IFNAMSIZ);
5072
break;
5073
}
5074
PF_RULES_WLOCK();
5075
error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
5076
PF_RULES_WUNLOCK();
5077
break;
5078
}
5079
5080
case DIOCCLRSTATUS: {
5081
pf_ioctl_clear_status();
5082
break;
5083
}
5084
5085
case DIOCNATLOOK: {
5086
struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
5087
5088
error = pf_ioctl_natlook(pnl);
5089
break;
5090
}
5091
5092
case DIOCSETTIMEOUT: {
5093
struct pfioc_tm *pt = (struct pfioc_tm *)addr;
5094
5095
error = pf_ioctl_set_timeout(pt->timeout, pt->seconds,
5096
&pt->seconds);
5097
break;
5098
}
5099
5100
case DIOCGETTIMEOUT: {
5101
struct pfioc_tm *pt = (struct pfioc_tm *)addr;
5102
5103
error = pf_ioctl_get_timeout(pt->timeout, &pt->seconds);
5104
break;
5105
}
5106
5107
case DIOCGETLIMIT: {
5108
struct pfioc_limit *pl = (struct pfioc_limit *)addr;
5109
5110
error = pf_ioctl_get_limit(pl->index, &pl->limit);
5111
break;
5112
}
5113
5114
case DIOCSETLIMIT: {
5115
struct pfioc_limit *pl = (struct pfioc_limit *)addr;
5116
unsigned int old_limit;
5117
5118
error = pf_ioctl_set_limit(pl->index, pl->limit, &old_limit);
5119
pl->limit = old_limit;
5120
break;
5121
}
5122
5123
case DIOCSETDEBUG: {
5124
u_int32_t *level = (u_int32_t *)addr;
5125
5126
PF_RULES_WLOCK();
5127
V_pf_status.debug = *level;
5128
PF_RULES_WUNLOCK();
5129
break;
5130
}
5131
5132
case DIOCCLRRULECTRS: {
5133
/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
5134
struct pf_kruleset *ruleset = &pf_main_ruleset;
5135
struct pf_krule *rule;
5136
5137
PF_RULES_WLOCK();
5138
TAILQ_FOREACH(rule,
5139
ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
5140
pf_counter_u64_zero(&rule->evaluations);
5141
for (int i = 0; i < 2; i++) {
5142
pf_counter_u64_zero(&rule->packets[i]);
5143
pf_counter_u64_zero(&rule->bytes[i]);
5144
}
5145
}
5146
PF_RULES_WUNLOCK();
5147
break;
5148
}
5149
5150
case DIOCGIFSPEEDV0:
5151
case DIOCGIFSPEEDV1: {
5152
struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr;
5153
struct pf_ifspeed_v1 ps;
5154
struct ifnet *ifp;
5155
5156
if (psp->ifname[0] == '\0') {
5157
error = EINVAL;
5158
goto fail;
5159
}
5160
5161
error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
5162
if (error != 0)
5163
goto fail;
5164
ifp = ifunit(ps.ifname);
5165
if (ifp != NULL) {
5166
psp->baudrate32 =
5167
(u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
5168
if (cmd == DIOCGIFSPEEDV1)
5169
psp->baudrate = ifp->if_baudrate;
5170
} else {
5171
error = EINVAL;
5172
}
5173
break;
5174
}
5175
5176
#ifdef ALTQ
5177
case DIOCSTARTALTQ: {
5178
struct pf_altq *altq;
5179
5180
PF_RULES_WLOCK();
5181
/* enable all altq interfaces on active list */
5182
TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
5183
if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
5184
error = pf_enable_altq(altq);
5185
if (error != 0)
5186
break;
5187
}
5188
}
5189
if (error == 0)
5190
V_pf_altq_running = 1;
5191
PF_RULES_WUNLOCK();
5192
DPFPRINTF(PF_DEBUG_MISC, "altq: started");
5193
break;
5194
}
5195
5196
case DIOCSTOPALTQ: {
5197
struct pf_altq *altq;
5198
5199
PF_RULES_WLOCK();
5200
/* disable all altq interfaces on active list */
5201
TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
5202
if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
5203
error = pf_disable_altq(altq);
5204
if (error != 0)
5205
break;
5206
}
5207
}
5208
if (error == 0)
5209
V_pf_altq_running = 0;
5210
PF_RULES_WUNLOCK();
5211
DPFPRINTF(PF_DEBUG_MISC, "altq: stopped");
5212
break;
5213
}
5214
5215
case DIOCADDALTQV0:
5216
case DIOCADDALTQV1: {
5217
struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
5218
struct pf_altq *altq, *a;
5219
struct ifnet *ifp;
5220
5221
altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
5222
error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
5223
if (error)
5224
goto fail;
5225
altq->local_flags = 0;
5226
5227
PF_RULES_WLOCK();
5228
if (pa->ticket != V_ticket_altqs_inactive) {
5229
PF_RULES_WUNLOCK();
5230
free(altq, M_PFALTQ);
5231
error = EBUSY;
5232
goto fail;
5233
}
5234
5235
/*
5236
* if this is for a queue, find the discipline and
5237
* copy the necessary fields
5238
*/
5239
if (altq->qname[0] != 0) {
5240
if ((altq->qid = pf_qname2qid(altq->qname, true)) == 0) {
5241
PF_RULES_WUNLOCK();
5242
error = EBUSY;
5243
free(altq, M_PFALTQ);
5244
goto fail;
5245
}
5246
altq->altq_disc = NULL;
5247
TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
5248
if (strncmp(a->ifname, altq->ifname,
5249
IFNAMSIZ) == 0) {
5250
altq->altq_disc = a->altq_disc;
5251
break;
5252
}
5253
}
5254
}
5255
5256
if ((ifp = ifunit(altq->ifname)) == NULL)
5257
altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
5258
else
5259
error = altq_add(ifp, altq);
5260
5261
if (error) {
5262
PF_RULES_WUNLOCK();
5263
free(altq, M_PFALTQ);
5264
goto fail;
5265
}
5266
5267
if (altq->qname[0] != 0)
5268
TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
5269
else
5270
TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
5271
/* version error check done on import above */
5272
pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
5273
PF_RULES_WUNLOCK();
5274
break;
5275
}
5276
5277
case DIOCGETALTQSV0:
5278
case DIOCGETALTQSV1: {
5279
struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
5280
struct pf_altq *altq;
5281
5282
PF_RULES_RLOCK();
5283
pa->nr = 0;
5284
TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
5285
pa->nr++;
5286
TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
5287
pa->nr++;
5288
pa->ticket = V_ticket_altqs_active;
5289
PF_RULES_RUNLOCK();
5290
break;
5291
}
5292
5293
case DIOCGETALTQV0:
5294
case DIOCGETALTQV1: {
5295
struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr;
5296
struct pf_altq *altq;
5297
5298
PF_RULES_RLOCK();
5299
if (pa->ticket != V_ticket_altqs_active) {
5300
PF_RULES_RUNLOCK();
5301
error = EBUSY;
5302
goto fail;
5303
}
5304
altq = pf_altq_get_nth_active(pa->nr);
5305
if (altq == NULL) {
5306
PF_RULES_RUNLOCK();
5307
error = EBUSY;
5308
goto fail;
5309
}
5310
pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
5311
PF_RULES_RUNLOCK();
5312
break;
5313
}
5314
5315
case DIOCCHANGEALTQV0:
5316
case DIOCCHANGEALTQV1:
5317
/* CHANGEALTQ not supported yet! */
5318
error = ENODEV;
5319
break;
5320
5321
case DIOCGETQSTATSV0:
5322
case DIOCGETQSTATSV1: {
5323
struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr;
5324
struct pf_altq *altq;
5325
int nbytes;
5326
u_int32_t version;
5327
5328
PF_RULES_RLOCK();
5329
if (pq->ticket != V_ticket_altqs_active) {
5330
PF_RULES_RUNLOCK();
5331
error = EBUSY;
5332
goto fail;
5333
}
5334
nbytes = pq->nbytes;
5335
altq = pf_altq_get_nth_active(pq->nr);
5336
if (altq == NULL) {
5337
PF_RULES_RUNLOCK();
5338
error = EBUSY;
5339
goto fail;
5340
}
5341
5342
if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
5343
PF_RULES_RUNLOCK();
5344
error = ENXIO;
5345
goto fail;
5346
}
5347
PF_RULES_RUNLOCK();
5348
if (cmd == DIOCGETQSTATSV0)
5349
version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */
5350
else
5351
version = pq->version;
5352
error = altq_getqstats(altq, pq->buf, &nbytes, version);
5353
if (error == 0) {
5354
pq->scheduler = altq->scheduler;
5355
pq->nbytes = nbytes;
5356
}
5357
break;
5358
}
5359
#endif /* ALTQ */
5360
5361
case DIOCBEGINADDRS: {
5362
struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
5363
5364
error = pf_ioctl_begin_addrs(&pp->ticket);
5365
break;
5366
}
5367
5368
case DIOCADDADDR: {
5369
struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
5370
struct pf_nl_pooladdr npp = {};
5371
5372
npp.which = PF_RDR;
5373
memcpy(&npp, pp, sizeof(*pp));
5374
error = pf_ioctl_add_addr(&npp);
5375
break;
5376
}
5377
5378
case DIOCGETADDRS: {
5379
struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
5380
struct pf_nl_pooladdr npp = {};
5381
5382
npp.which = PF_RDR;
5383
memcpy(&npp, pp, sizeof(*pp));
5384
error = pf_ioctl_get_addrs(&npp);
5385
memcpy(pp, &npp, sizeof(*pp));
5386
5387
break;
5388
}
5389
5390
case DIOCGETADDR: {
5391
struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
5392
struct pf_nl_pooladdr npp = {};
5393
5394
npp.which = PF_RDR;
5395
memcpy(&npp, pp, sizeof(*pp));
5396
error = pf_ioctl_get_addr(&npp);
5397
memcpy(pp, &npp, sizeof(*pp));
5398
5399
break;
5400
}
5401
5402
case DIOCCHANGEADDR: {
5403
struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
5404
struct pf_kpool *pool;
5405
struct pf_kpooladdr *oldpa = NULL, *newpa = NULL;
5406
struct pf_kruleset *ruleset;
5407
struct pfi_kkif *kif = NULL;
5408
5409
pca->anchor[sizeof(pca->anchor) - 1] = '\0';
5410
5411
if (pca->action < PF_CHANGE_ADD_HEAD ||
5412
pca->action > PF_CHANGE_REMOVE) {
5413
error = EINVAL;
5414
goto fail;
5415
}
5416
if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
5417
pca->addr.addr.type != PF_ADDR_DYNIFTL &&
5418
pca->addr.addr.type != PF_ADDR_TABLE) {
5419
error = EINVAL;
5420
goto fail;
5421
}
5422
if (pca->addr.addr.p.dyn != NULL) {
5423
error = EINVAL;
5424
goto fail;
5425
}
5426
5427
if (pca->action != PF_CHANGE_REMOVE) {
5428
#ifndef INET
5429
if (pca->af == AF_INET) {
5430
error = EAFNOSUPPORT;
5431
goto fail;
5432
}
5433
#endif /* INET */
5434
#ifndef INET6
5435
if (pca->af == AF_INET6) {
5436
error = EAFNOSUPPORT;
5437
goto fail;
5438
}
5439
#endif /* INET6 */
5440
newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
5441
bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
5442
if (newpa->ifname[0])
5443
kif = pf_kkif_create(M_WAITOK);
5444
newpa->kif = NULL;
5445
}
5446
#define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
5447
PF_RULES_WLOCK();
5448
ruleset = pf_find_kruleset(pca->anchor);
5449
if (ruleset == NULL)
5450
ERROUT(EBUSY);
5451
5452
pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
5453
pca->r_num, pca->r_last, 1, 1, PF_RDR);
5454
if (pool == NULL)
5455
ERROUT(EBUSY);
5456
5457
if (pca->action != PF_CHANGE_REMOVE) {
5458
if (newpa->ifname[0]) {
5459
newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
5460
pfi_kkif_ref(newpa->kif);
5461
kif = NULL;
5462
}
5463
5464
switch (newpa->addr.type) {
5465
case PF_ADDR_DYNIFTL:
5466
error = pfi_dynaddr_setup(&newpa->addr,
5467
pca->af);
5468
break;
5469
case PF_ADDR_TABLE:
5470
newpa->addr.p.tbl = pfr_attach_table(ruleset,
5471
newpa->addr.v.tblname);
5472
if (newpa->addr.p.tbl == NULL)
5473
error = ENOMEM;
5474
break;
5475
}
5476
if (error)
5477
goto DIOCCHANGEADDR_error;
5478
}
5479
5480
switch (pca->action) {
5481
case PF_CHANGE_ADD_HEAD:
5482
oldpa = TAILQ_FIRST(&pool->list);
5483
break;
5484
case PF_CHANGE_ADD_TAIL:
5485
oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
5486
break;
5487
default:
5488
oldpa = TAILQ_FIRST(&pool->list);
5489
for (int i = 0; oldpa && i < pca->nr; i++)
5490
oldpa = TAILQ_NEXT(oldpa, entries);
5491
5492
if (oldpa == NULL)
5493
ERROUT(EINVAL);
5494
}
5495
5496
if (pca->action == PF_CHANGE_REMOVE) {
5497
TAILQ_REMOVE(&pool->list, oldpa, entries);
5498
switch (oldpa->addr.type) {
5499
case PF_ADDR_DYNIFTL:
5500
pfi_dynaddr_remove(oldpa->addr.p.dyn);
5501
break;
5502
case PF_ADDR_TABLE:
5503
pfr_detach_table(oldpa->addr.p.tbl);
5504
break;
5505
}
5506
if (oldpa->kif)
5507
pfi_kkif_unref(oldpa->kif);
5508
free(oldpa, M_PFRULE);
5509
} else {
5510
if (oldpa == NULL)
5511
TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
5512
else if (pca->action == PF_CHANGE_ADD_HEAD ||
5513
pca->action == PF_CHANGE_ADD_BEFORE)
5514
TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
5515
else
5516
TAILQ_INSERT_AFTER(&pool->list, oldpa,
5517
newpa, entries);
5518
}
5519
5520
pool->cur = TAILQ_FIRST(&pool->list);
5521
pf_addrcpy(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
5522
PF_RULES_WUNLOCK();
5523
break;
5524
5525
#undef ERROUT
5526
DIOCCHANGEADDR_error:
5527
if (newpa != NULL) {
5528
if (newpa->kif)
5529
pfi_kkif_unref(newpa->kif);
5530
free(newpa, M_PFRULE);
5531
}
5532
PF_RULES_WUNLOCK();
5533
pf_kkif_free(kif);
5534
break;
5535
}
5536
5537
case DIOCGETRULESETS: {
5538
struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
5539
5540
pr->path[sizeof(pr->path) - 1] = '\0';
5541
5542
error = pf_ioctl_get_rulesets(pr);
5543
break;
5544
}
5545
5546
case DIOCGETRULESET: {
5547
struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
5548
5549
pr->path[sizeof(pr->path) - 1] = '\0';
5550
5551
error = pf_ioctl_get_ruleset(pr);
5552
break;
5553
}
5554
5555
case DIOCRCLRTABLES: {
5556
struct pfioc_table *io = (struct pfioc_table *)addr;
5557
5558
if (io->pfrio_esize != 0) {
5559
error = ENODEV;
5560
goto fail;
5561
}
5562
if (strnlen(io->pfrio_table.pfrt_anchor, MAXPATHLEN)
5563
== MAXPATHLEN) {
5564
error = EINVAL;
5565
goto fail;
5566
}
5567
if (strnlen(io->pfrio_table.pfrt_name, PF_TABLE_NAME_SIZE)
5568
== PF_TABLE_NAME_SIZE) {
5569
error = EINVAL;
5570
goto fail;
5571
}
5572
5573
PF_RULES_WLOCK();
5574
error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
5575
io->pfrio_flags | PFR_FLAG_USERIOCTL);
5576
PF_RULES_WUNLOCK();
5577
break;
5578
}
5579
5580
case DIOCRADDTABLES: {
5581
struct pfioc_table *io = (struct pfioc_table *)addr;
5582
struct pfr_table *pfrts;
5583
size_t totlen;
5584
5585
if (io->pfrio_esize != sizeof(struct pfr_table)) {
5586
error = ENODEV;
5587
goto fail;
5588
}
5589
5590
if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
5591
WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
5592
error = ENOMEM;
5593
goto fail;
5594
}
5595
5596
totlen = io->pfrio_size * sizeof(struct pfr_table);
5597
pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
5598
M_PF, M_WAITOK);
5599
error = copyin(io->pfrio_buffer, pfrts, totlen);
5600
if (error) {
5601
free(pfrts, M_PF);
5602
goto fail;
5603
}
5604
PF_RULES_WLOCK();
5605
error = pfr_add_tables(pfrts, io->pfrio_size,
5606
&io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5607
PF_RULES_WUNLOCK();
5608
free(pfrts, M_PF);
5609
break;
5610
}
5611
5612
case DIOCRDELTABLES: {
5613
struct pfioc_table *io = (struct pfioc_table *)addr;
5614
struct pfr_table *pfrts;
5615
size_t totlen;
5616
5617
if (io->pfrio_esize != sizeof(struct pfr_table)) {
5618
error = ENODEV;
5619
goto fail;
5620
}
5621
5622
if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
5623
WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
5624
error = ENOMEM;
5625
goto fail;
5626
}
5627
5628
totlen = io->pfrio_size * sizeof(struct pfr_table);
5629
pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
5630
M_PF, M_WAITOK);
5631
error = copyin(io->pfrio_buffer, pfrts, totlen);
5632
if (error) {
5633
free(pfrts, M_PF);
5634
goto fail;
5635
}
5636
PF_RULES_WLOCK();
5637
error = pfr_del_tables(pfrts, io->pfrio_size,
5638
&io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5639
PF_RULES_WUNLOCK();
5640
free(pfrts, M_PF);
5641
break;
5642
}
5643
5644
case DIOCRGETTABLES: {
5645
struct pfioc_table *io = (struct pfioc_table *)addr;
5646
struct pfr_table *pfrts;
5647
size_t totlen;
5648
int n;
5649
5650
if (io->pfrio_esize != sizeof(struct pfr_table)) {
5651
error = ENODEV;
5652
goto fail;
5653
}
5654
PF_RULES_RLOCK();
5655
n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
5656
if (n < 0) {
5657
PF_RULES_RUNLOCK();
5658
error = EINVAL;
5659
goto fail;
5660
}
5661
io->pfrio_size = min(io->pfrio_size, n);
5662
5663
totlen = io->pfrio_size * sizeof(struct pfr_table);
5664
5665
pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
5666
M_PF, M_NOWAIT | M_ZERO);
5667
if (pfrts == NULL) {
5668
error = ENOMEM;
5669
PF_RULES_RUNLOCK();
5670
goto fail;
5671
}
5672
error = pfr_get_tables(&io->pfrio_table, pfrts,
5673
&io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5674
PF_RULES_RUNLOCK();
5675
if (error == 0)
5676
error = copyout(pfrts, io->pfrio_buffer, totlen);
5677
free(pfrts, M_PF);
5678
break;
5679
}
5680
5681
case DIOCRGETTSTATS: {
5682
struct pfioc_table *io = (struct pfioc_table *)addr;
5683
struct pfr_tstats *pfrtstats;
5684
size_t totlen;
5685
int n;
5686
5687
if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
5688
error = ENODEV;
5689
goto fail;
5690
}
5691
PF_TABLE_STATS_LOCK();
5692
PF_RULES_RLOCK();
5693
n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
5694
if (n < 0) {
5695
PF_RULES_RUNLOCK();
5696
PF_TABLE_STATS_UNLOCK();
5697
error = EINVAL;
5698
goto fail;
5699
}
5700
io->pfrio_size = min(io->pfrio_size, n);
5701
5702
totlen = io->pfrio_size * sizeof(struct pfr_tstats);
5703
pfrtstats = mallocarray(io->pfrio_size,
5704
sizeof(struct pfr_tstats), M_PF, M_NOWAIT | M_ZERO);
5705
if (pfrtstats == NULL) {
5706
error = ENOMEM;
5707
PF_RULES_RUNLOCK();
5708
PF_TABLE_STATS_UNLOCK();
5709
goto fail;
5710
}
5711
error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
5712
&io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5713
PF_RULES_RUNLOCK();
5714
PF_TABLE_STATS_UNLOCK();
5715
if (error == 0)
5716
error = copyout(pfrtstats, io->pfrio_buffer, totlen);
5717
free(pfrtstats, M_PF);
5718
break;
5719
}
5720
5721
case DIOCRCLRTSTATS: {
5722
struct pfioc_table *io = (struct pfioc_table *)addr;
5723
struct pfr_table *pfrts;
5724
size_t totlen;
5725
5726
if (io->pfrio_esize != sizeof(struct pfr_table)) {
5727
error = ENODEV;
5728
goto fail;
5729
}
5730
5731
if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
5732
WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
5733
/* We used to count tables and use the minimum required
5734
* size, so we didn't fail on overly large requests.
5735
* Keep doing so. */
5736
io->pfrio_size = pf_ioctl_maxcount;
5737
goto fail;
5738
}
5739
5740
totlen = io->pfrio_size * sizeof(struct pfr_table);
5741
pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
5742
M_PF, M_WAITOK);
5743
error = copyin(io->pfrio_buffer, pfrts, totlen);
5744
if (error) {
5745
free(pfrts, M_PF);
5746
goto fail;
5747
}
5748
5749
PF_TABLE_STATS_LOCK();
5750
PF_RULES_RLOCK();
5751
error = pfr_clr_tstats(pfrts, io->pfrio_size,
5752
&io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5753
PF_RULES_RUNLOCK();
5754
PF_TABLE_STATS_UNLOCK();
5755
free(pfrts, M_PF);
5756
break;
5757
}
5758
5759
case DIOCRSETTFLAGS: {
5760
struct pfioc_table *io = (struct pfioc_table *)addr;
5761
struct pfr_table *pfrts;
5762
size_t totlen;
5763
int n;
5764
5765
if (io->pfrio_esize != sizeof(struct pfr_table)) {
5766
error = ENODEV;
5767
goto fail;
5768
}
5769
5770
PF_RULES_RLOCK();
5771
n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
5772
if (n < 0) {
5773
PF_RULES_RUNLOCK();
5774
error = EINVAL;
5775
goto fail;
5776
}
5777
5778
io->pfrio_size = min(io->pfrio_size, n);
5779
PF_RULES_RUNLOCK();
5780
5781
totlen = io->pfrio_size * sizeof(struct pfr_table);
5782
pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
5783
M_PF, M_WAITOK);
5784
error = copyin(io->pfrio_buffer, pfrts, totlen);
5785
if (error) {
5786
free(pfrts, M_PF);
5787
goto fail;
5788
}
5789
PF_RULES_WLOCK();
5790
error = pfr_set_tflags(pfrts, io->pfrio_size,
5791
io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
5792
&io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5793
PF_RULES_WUNLOCK();
5794
free(pfrts, M_PF);
5795
break;
5796
}
5797
5798
case DIOCRCLRADDRS: {
5799
struct pfioc_table *io = (struct pfioc_table *)addr;
5800
5801
if (io->pfrio_esize != 0) {
5802
error = ENODEV;
5803
goto fail;
5804
}
5805
PF_RULES_WLOCK();
5806
error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
5807
io->pfrio_flags | PFR_FLAG_USERIOCTL);
5808
PF_RULES_WUNLOCK();
5809
break;
5810
}
5811
5812
case DIOCRADDADDRS: {
5813
struct pfioc_table *io = (struct pfioc_table *)addr;
5814
struct pfr_addr *pfras;
5815
size_t totlen;
5816
5817
if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5818
error = ENODEV;
5819
goto fail;
5820
}
5821
if (io->pfrio_size < 0 ||
5822
io->pfrio_size > pf_ioctl_maxcount ||
5823
WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5824
error = EINVAL;
5825
goto fail;
5826
}
5827
totlen = io->pfrio_size * sizeof(struct pfr_addr);
5828
pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5829
M_PF, M_WAITOK);
5830
error = copyin(io->pfrio_buffer, pfras, totlen);
5831
if (error) {
5832
free(pfras, M_PF);
5833
goto fail;
5834
}
5835
PF_RULES_WLOCK();
5836
io->pfrio_nadd = 0;
5837
error = pfr_add_addrs(&io->pfrio_table, pfras,
5838
io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
5839
PFR_FLAG_USERIOCTL);
5840
PF_RULES_WUNLOCK();
5841
if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5842
error = copyout(pfras, io->pfrio_buffer, totlen);
5843
free(pfras, M_PF);
5844
break;
5845
}
5846
5847
case DIOCRDELADDRS: {
5848
struct pfioc_table *io = (struct pfioc_table *)addr;
5849
struct pfr_addr *pfras;
5850
size_t totlen;
5851
5852
if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5853
error = ENODEV;
5854
goto fail;
5855
}
5856
if (io->pfrio_size < 0 ||
5857
io->pfrio_size > pf_ioctl_maxcount ||
5858
WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5859
error = EINVAL;
5860
goto fail;
5861
}
5862
totlen = io->pfrio_size * sizeof(struct pfr_addr);
5863
pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5864
M_PF, M_WAITOK);
5865
error = copyin(io->pfrio_buffer, pfras, totlen);
5866
if (error) {
5867
free(pfras, M_PF);
5868
goto fail;
5869
}
5870
PF_RULES_WLOCK();
5871
error = pfr_del_addrs(&io->pfrio_table, pfras,
5872
io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
5873
PFR_FLAG_USERIOCTL);
5874
PF_RULES_WUNLOCK();
5875
if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5876
error = copyout(pfras, io->pfrio_buffer, totlen);
5877
free(pfras, M_PF);
5878
break;
5879
}
5880
5881
case DIOCRSETADDRS: {
5882
struct pfioc_table *io = (struct pfioc_table *)addr;
5883
struct pfr_addr *pfras;
5884
size_t totlen, count;
5885
5886
if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5887
error = ENODEV;
5888
goto fail;
5889
}
5890
if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
5891
error = EINVAL;
5892
goto fail;
5893
}
5894
count = max(io->pfrio_size, io->pfrio_size2);
5895
if (count > pf_ioctl_maxcount ||
5896
WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
5897
error = EINVAL;
5898
goto fail;
5899
}
5900
totlen = count * sizeof(struct pfr_addr);
5901
pfras = mallocarray(count, sizeof(struct pfr_addr), M_PF,
5902
M_WAITOK);
5903
error = copyin(io->pfrio_buffer, pfras, totlen);
5904
if (error) {
5905
free(pfras, M_PF);
5906
goto fail;
5907
}
5908
PF_RULES_WLOCK();
5909
error = pfr_set_addrs(&io->pfrio_table, pfras,
5910
io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
5911
&io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
5912
PFR_FLAG_START | PFR_FLAG_DONE | PFR_FLAG_USERIOCTL, 0);
5913
PF_RULES_WUNLOCK();
5914
if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5915
error = copyout(pfras, io->pfrio_buffer, totlen);
5916
free(pfras, M_PF);
5917
break;
5918
}
5919
5920
case DIOCRGETADDRS: {
5921
struct pfioc_table *io = (struct pfioc_table *)addr;
5922
struct pfr_addr *pfras;
5923
size_t totlen;
5924
5925
if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5926
error = ENODEV;
5927
goto fail;
5928
}
5929
if (io->pfrio_size < 0 ||
5930
io->pfrio_size > pf_ioctl_maxcount ||
5931
WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5932
error = EINVAL;
5933
goto fail;
5934
}
5935
totlen = io->pfrio_size * sizeof(struct pfr_addr);
5936
pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5937
M_PF, M_WAITOK | M_ZERO);
5938
PF_RULES_RLOCK();
5939
error = pfr_get_addrs(&io->pfrio_table, pfras,
5940
&io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5941
PF_RULES_RUNLOCK();
5942
if (error == 0)
5943
error = copyout(pfras, io->pfrio_buffer, totlen);
5944
free(pfras, M_PF);
5945
break;
5946
}
5947
5948
case DIOCRGETASTATS: {
5949
struct pfioc_table *io = (struct pfioc_table *)addr;
5950
struct pfr_astats *pfrastats;
5951
size_t totlen;
5952
5953
if (io->pfrio_esize != sizeof(struct pfr_astats)) {
5954
error = ENODEV;
5955
goto fail;
5956
}
5957
if (io->pfrio_size < 0 ||
5958
io->pfrio_size > pf_ioctl_maxcount ||
5959
WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
5960
error = EINVAL;
5961
goto fail;
5962
}
5963
totlen = io->pfrio_size * sizeof(struct pfr_astats);
5964
pfrastats = mallocarray(io->pfrio_size,
5965
sizeof(struct pfr_astats), M_PF, M_WAITOK | M_ZERO);
5966
PF_RULES_RLOCK();
5967
error = pfr_get_astats(&io->pfrio_table, pfrastats,
5968
&io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5969
PF_RULES_RUNLOCK();
5970
if (error == 0)
5971
error = copyout(pfrastats, io->pfrio_buffer, totlen);
5972
free(pfrastats, M_PF);
5973
break;
5974
}
5975
5976
case DIOCRCLRASTATS: {
5977
struct pfioc_table *io = (struct pfioc_table *)addr;
5978
struct pfr_addr *pfras;
5979
size_t totlen;
5980
5981
if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5982
error = ENODEV;
5983
goto fail;
5984
}
5985
if (io->pfrio_size < 0 ||
5986
io->pfrio_size > pf_ioctl_maxcount ||
5987
WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5988
error = EINVAL;
5989
goto fail;
5990
}
5991
totlen = io->pfrio_size * sizeof(struct pfr_addr);
5992
pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5993
M_PF, M_WAITOK);
5994
error = copyin(io->pfrio_buffer, pfras, totlen);
5995
if (error) {
5996
free(pfras, M_PF);
5997
goto fail;
5998
}
5999
PF_RULES_WLOCK();
6000
error = pfr_clr_astats(&io->pfrio_table, pfras,
6001
io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
6002
PFR_FLAG_USERIOCTL);
6003
PF_RULES_WUNLOCK();
6004
if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
6005
error = copyout(pfras, io->pfrio_buffer, totlen);
6006
free(pfras, M_PF);
6007
break;
6008
}
6009
6010
case DIOCRTSTADDRS: {
6011
struct pfioc_table *io = (struct pfioc_table *)addr;
6012
struct pfr_addr *pfras;
6013
size_t totlen;
6014
6015
if (io->pfrio_esize != sizeof(struct pfr_addr)) {
6016
error = ENODEV;
6017
goto fail;
6018
}
6019
if (io->pfrio_size < 0 ||
6020
io->pfrio_size > pf_ioctl_maxcount ||
6021
WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
6022
error = EINVAL;
6023
goto fail;
6024
}
6025
totlen = io->pfrio_size * sizeof(struct pfr_addr);
6026
pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
6027
M_PF, M_WAITOK);
6028
error = copyin(io->pfrio_buffer, pfras, totlen);
6029
if (error) {
6030
free(pfras, M_PF);
6031
goto fail;
6032
}
6033
PF_RULES_RLOCK();
6034
error = pfr_tst_addrs(&io->pfrio_table, pfras,
6035
io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
6036
PFR_FLAG_USERIOCTL);
6037
PF_RULES_RUNLOCK();
6038
if (error == 0)
6039
error = copyout(pfras, io->pfrio_buffer, totlen);
6040
free(pfras, M_PF);
6041
break;
6042
}
6043
6044
case DIOCRINADEFINE: {
6045
struct pfioc_table *io = (struct pfioc_table *)addr;
6046
struct pfr_addr *pfras;
6047
size_t totlen;
6048
6049
if (io->pfrio_esize != sizeof(struct pfr_addr)) {
6050
error = ENODEV;
6051
goto fail;
6052
}
6053
if (io->pfrio_size < 0 ||
6054
io->pfrio_size > pf_ioctl_maxcount ||
6055
WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
6056
error = EINVAL;
6057
goto fail;
6058
}
6059
totlen = io->pfrio_size * sizeof(struct pfr_addr);
6060
pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
6061
M_PF, M_WAITOK);
6062
error = copyin(io->pfrio_buffer, pfras, totlen);
6063
if (error) {
6064
free(pfras, M_PF);
6065
goto fail;
6066
}
6067
PF_RULES_WLOCK();
6068
error = pfr_ina_define(&io->pfrio_table, pfras,
6069
io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
6070
io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
6071
PF_RULES_WUNLOCK();
6072
free(pfras, M_PF);
6073
break;
6074
}
6075
6076
case DIOCOSFPADD: {
6077
struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
6078
PF_RULES_WLOCK();
6079
error = pf_osfp_add(io);
6080
PF_RULES_WUNLOCK();
6081
break;
6082
}
6083
6084
case DIOCOSFPGET: {
6085
struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
6086
PF_RULES_RLOCK();
6087
error = pf_osfp_get(io);
6088
PF_RULES_RUNLOCK();
6089
break;
6090
}
6091
6092
case DIOCXBEGIN: {
6093
struct pfioc_trans *io = (struct pfioc_trans *)addr;
6094
struct pfioc_trans_e *ioes, *ioe;
6095
size_t totlen;
6096
int i;
6097
6098
if (io->esize != sizeof(*ioe)) {
6099
error = ENODEV;
6100
goto fail;
6101
}
6102
if (io->size < 0 ||
6103
io->size > pf_ioctl_maxcount ||
6104
WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
6105
error = EINVAL;
6106
goto fail;
6107
}
6108
totlen = sizeof(struct pfioc_trans_e) * io->size;
6109
ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
6110
M_PF, M_WAITOK);
6111
error = copyin(io->array, ioes, totlen);
6112
if (error) {
6113
free(ioes, M_PF);
6114
goto fail;
6115
}
6116
PF_RULES_WLOCK();
6117
for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
6118
ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
6119
switch (ioe->rs_num) {
6120
case PF_RULESET_ETH:
6121
if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
6122
PF_RULES_WUNLOCK();
6123
free(ioes, M_PF);
6124
goto fail;
6125
}
6126
break;
6127
#ifdef ALTQ
6128
case PF_RULESET_ALTQ:
6129
if (ioe->anchor[0]) {
6130
PF_RULES_WUNLOCK();
6131
free(ioes, M_PF);
6132
error = EINVAL;
6133
goto fail;
6134
}
6135
if ((error = pf_begin_altq(&ioe->ticket))) {
6136
PF_RULES_WUNLOCK();
6137
free(ioes, M_PF);
6138
goto fail;
6139
}
6140
break;
6141
#endif /* ALTQ */
6142
case PF_RULESET_TABLE:
6143
{
6144
struct pfr_table table;
6145
6146
bzero(&table, sizeof(table));
6147
strlcpy(table.pfrt_anchor, ioe->anchor,
6148
sizeof(table.pfrt_anchor));
6149
if ((error = pfr_ina_begin(&table,
6150
&ioe->ticket, NULL, 0))) {
6151
PF_RULES_WUNLOCK();
6152
free(ioes, M_PF);
6153
goto fail;
6154
}
6155
break;
6156
}
6157
default:
6158
if ((error = pf_begin_rules(&ioe->ticket,
6159
ioe->rs_num, ioe->anchor))) {
6160
PF_RULES_WUNLOCK();
6161
free(ioes, M_PF);
6162
goto fail;
6163
}
6164
break;
6165
}
6166
}
6167
PF_RULES_WUNLOCK();
6168
error = copyout(ioes, io->array, totlen);
6169
free(ioes, M_PF);
6170
break;
6171
}
6172
6173
case DIOCXROLLBACK: {
6174
struct pfioc_trans *io = (struct pfioc_trans *)addr;
6175
struct pfioc_trans_e *ioe, *ioes;
6176
size_t totlen;
6177
int i;
6178
6179
if (io->esize != sizeof(*ioe)) {
6180
error = ENODEV;
6181
goto fail;
6182
}
6183
if (io->size < 0 ||
6184
io->size > pf_ioctl_maxcount ||
6185
WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
6186
error = EINVAL;
6187
goto fail;
6188
}
6189
totlen = sizeof(struct pfioc_trans_e) * io->size;
6190
ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
6191
M_PF, M_WAITOK);
6192
error = copyin(io->array, ioes, totlen);
6193
if (error) {
6194
free(ioes, M_PF);
6195
goto fail;
6196
}
6197
PF_RULES_WLOCK();
6198
for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
6199
ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
6200
switch (ioe->rs_num) {
6201
case PF_RULESET_ETH:
6202
if ((error = pf_rollback_eth(ioe->ticket,
6203
ioe->anchor))) {
6204
PF_RULES_WUNLOCK();
6205
free(ioes, M_PF);
6206
goto fail; /* really bad */
6207
}
6208
break;
6209
#ifdef ALTQ
6210
case PF_RULESET_ALTQ:
6211
if (ioe->anchor[0]) {
6212
PF_RULES_WUNLOCK();
6213
free(ioes, M_PF);
6214
error = EINVAL;
6215
goto fail;
6216
}
6217
if ((error = pf_rollback_altq(ioe->ticket))) {
6218
PF_RULES_WUNLOCK();
6219
free(ioes, M_PF);
6220
goto fail; /* really bad */
6221
}
6222
break;
6223
#endif /* ALTQ */
6224
case PF_RULESET_TABLE:
6225
{
6226
struct pfr_table table;
6227
6228
bzero(&table, sizeof(table));
6229
strlcpy(table.pfrt_anchor, ioe->anchor,
6230
sizeof(table.pfrt_anchor));
6231
if ((error = pfr_ina_rollback(&table,
6232
ioe->ticket, NULL, 0))) {
6233
PF_RULES_WUNLOCK();
6234
free(ioes, M_PF);
6235
goto fail; /* really bad */
6236
}
6237
break;
6238
}
6239
default:
6240
if ((error = pf_rollback_rules(ioe->ticket,
6241
ioe->rs_num, ioe->anchor))) {
6242
PF_RULES_WUNLOCK();
6243
free(ioes, M_PF);
6244
goto fail; /* really bad */
6245
}
6246
break;
6247
}
6248
}
6249
PF_RULES_WUNLOCK();
6250
free(ioes, M_PF);
6251
break;
6252
}
6253
6254
case DIOCXCOMMIT: {
6255
struct pfioc_trans *io = (struct pfioc_trans *)addr;
6256
struct pfioc_trans_e *ioe, *ioes;
6257
struct pf_kruleset *rs;
6258
struct pf_keth_ruleset *ers;
6259
size_t totlen;
6260
int i;
6261
6262
if (io->esize != sizeof(*ioe)) {
6263
error = ENODEV;
6264
goto fail;
6265
}
6266
6267
if (io->size < 0 ||
6268
io->size > pf_ioctl_maxcount ||
6269
WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
6270
error = EINVAL;
6271
goto fail;
6272
}
6273
6274
totlen = sizeof(struct pfioc_trans_e) * io->size;
6275
ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
6276
M_PF, M_WAITOK);
6277
error = copyin(io->array, ioes, totlen);
6278
if (error) {
6279
free(ioes, M_PF);
6280
goto fail;
6281
}
6282
PF_RULES_WLOCK();
6283
/* First makes sure everything will succeed. */
6284
for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
6285
ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
6286
switch (ioe->rs_num) {
6287
case PF_RULESET_ETH:
6288
ers = pf_find_keth_ruleset(ioe->anchor);
6289
if (ers == NULL || ioe->ticket == 0 ||
6290
ioe->ticket != ers->inactive.ticket) {
6291
PF_RULES_WUNLOCK();
6292
free(ioes, M_PF);
6293
error = EINVAL;
6294
goto fail;
6295
}
6296
break;
6297
#ifdef ALTQ
6298
case PF_RULESET_ALTQ:
6299
if (ioe->anchor[0]) {
6300
PF_RULES_WUNLOCK();
6301
free(ioes, M_PF);
6302
error = EINVAL;
6303
goto fail;
6304
}
6305
if (!V_altqs_inactive_open || ioe->ticket !=
6306
V_ticket_altqs_inactive) {
6307
PF_RULES_WUNLOCK();
6308
free(ioes, M_PF);
6309
error = EBUSY;
6310
goto fail;
6311
}
6312
break;
6313
#endif /* ALTQ */
6314
case PF_RULESET_TABLE:
6315
rs = pf_find_kruleset(ioe->anchor);
6316
if (rs == NULL || !rs->topen || ioe->ticket !=
6317
rs->tticket) {
6318
PF_RULES_WUNLOCK();
6319
free(ioes, M_PF);
6320
error = EBUSY;
6321
goto fail;
6322
}
6323
break;
6324
default:
6325
if (ioe->rs_num < 0 || ioe->rs_num >=
6326
PF_RULESET_MAX) {
6327
PF_RULES_WUNLOCK();
6328
free(ioes, M_PF);
6329
error = EINVAL;
6330
goto fail;
6331
}
6332
rs = pf_find_kruleset(ioe->anchor);
6333
if (rs == NULL ||
6334
!rs->rules[ioe->rs_num].inactive.open ||
6335
rs->rules[ioe->rs_num].inactive.ticket !=
6336
ioe->ticket) {
6337
PF_RULES_WUNLOCK();
6338
free(ioes, M_PF);
6339
error = EBUSY;
6340
goto fail;
6341
}
6342
break;
6343
}
6344
}
6345
/* Now do the commit - no errors should happen here. */
6346
for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
6347
switch (ioe->rs_num) {
6348
case PF_RULESET_ETH:
6349
if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
6350
PF_RULES_WUNLOCK();
6351
free(ioes, M_PF);
6352
goto fail; /* really bad */
6353
}
6354
break;
6355
#ifdef ALTQ
6356
case PF_RULESET_ALTQ:
6357
if ((error = pf_commit_altq(ioe->ticket))) {
6358
PF_RULES_WUNLOCK();
6359
free(ioes, M_PF);
6360
goto fail; /* really bad */
6361
}
6362
break;
6363
#endif /* ALTQ */
6364
case PF_RULESET_TABLE:
6365
{
6366
struct pfr_table table;
6367
6368
bzero(&table, sizeof(table));
6369
(void)strlcpy(table.pfrt_anchor, ioe->anchor,
6370
sizeof(table.pfrt_anchor));
6371
if ((error = pfr_ina_commit(&table,
6372
ioe->ticket, NULL, NULL, 0))) {
6373
PF_RULES_WUNLOCK();
6374
free(ioes, M_PF);
6375
goto fail; /* really bad */
6376
}
6377
break;
6378
}
6379
default:
6380
if ((error = pf_commit_rules(ioe->ticket,
6381
ioe->rs_num, ioe->anchor))) {
6382
PF_RULES_WUNLOCK();
6383
free(ioes, M_PF);
6384
goto fail; /* really bad */
6385
}
6386
break;
6387
}
6388
}
6389
PF_RULES_WUNLOCK();
6390
6391
/* Only hook into EtherNet taffic if we've got rules for it. */
6392
if (! TAILQ_EMPTY(V_pf_keth->active.rules))
6393
hook_pf_eth();
6394
else
6395
dehook_pf_eth();
6396
6397
free(ioes, M_PF);
6398
break;
6399
}
6400
6401
case DIOCGETSRCNODES: {
6402
struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
6403
struct pf_srchash *sh;
6404
struct pf_ksrc_node *n;
6405
struct pf_src_node *p, *pstore;
6406
uint32_t i, nr = 0;
6407
6408
for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
6409
i++, sh++) {
6410
PF_HASHROW_LOCK(sh);
6411
LIST_FOREACH(n, &sh->nodes, entry)
6412
nr++;
6413
PF_HASHROW_UNLOCK(sh);
6414
}
6415
6416
psn->psn_len = min(psn->psn_len,
6417
sizeof(struct pf_src_node) * nr);
6418
6419
if (psn->psn_len == 0) {
6420
psn->psn_len = sizeof(struct pf_src_node) * nr;
6421
goto fail;
6422
}
6423
6424
nr = 0;
6425
6426
p = pstore = malloc(psn->psn_len, M_PF, M_WAITOK | M_ZERO);
6427
for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
6428
i++, sh++) {
6429
PF_HASHROW_LOCK(sh);
6430
LIST_FOREACH(n, &sh->nodes, entry) {
6431
6432
if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
6433
break;
6434
6435
pf_src_node_copy(n, p);
6436
6437
p++;
6438
nr++;
6439
}
6440
PF_HASHROW_UNLOCK(sh);
6441
}
6442
error = copyout(pstore, psn->psn_src_nodes,
6443
sizeof(struct pf_src_node) * nr);
6444
if (error) {
6445
free(pstore, M_PF);
6446
goto fail;
6447
}
6448
psn->psn_len = sizeof(struct pf_src_node) * nr;
6449
free(pstore, M_PF);
6450
break;
6451
}
6452
6453
case DIOCCLRSRCNODES: {
6454
pf_kill_srcnodes(NULL);
6455
break;
6456
}
6457
6458
case DIOCKILLSRCNODES:
6459
pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
6460
break;
6461
6462
#ifdef COMPAT_FREEBSD13
6463
case DIOCKEEPCOUNTERS_FREEBSD13:
6464
#endif
6465
case DIOCKEEPCOUNTERS:
6466
error = pf_keepcounters((struct pfioc_nv *)addr);
6467
break;
6468
6469
case DIOCGETSYNCOOKIES:
6470
error = pf_get_syncookies((struct pfioc_nv *)addr);
6471
break;
6472
6473
case DIOCSETSYNCOOKIES:
6474
error = pf_set_syncookies((struct pfioc_nv *)addr);
6475
break;
6476
6477
case DIOCSETHOSTID: {
6478
u_int32_t *hostid = (u_int32_t *)addr;
6479
6480
PF_RULES_WLOCK();
6481
if (*hostid == 0)
6482
V_pf_status.hostid = arc4random();
6483
else
6484
V_pf_status.hostid = *hostid;
6485
PF_RULES_WUNLOCK();
6486
break;
6487
}
6488
6489
case DIOCOSFPFLUSH:
6490
PF_RULES_WLOCK();
6491
pf_osfp_flush();
6492
PF_RULES_WUNLOCK();
6493
break;
6494
6495
case DIOCIGETIFACES: {
6496
struct pfioc_iface *io = (struct pfioc_iface *)addr;
6497
struct pfi_kif *ifstore;
6498
size_t bufsiz;
6499
6500
if (io->pfiio_esize != sizeof(struct pfi_kif)) {
6501
error = ENODEV;
6502
goto fail;
6503
}
6504
6505
if (io->pfiio_size < 0 ||
6506
io->pfiio_size > pf_ioctl_maxcount ||
6507
WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
6508
error = EINVAL;
6509
goto fail;
6510
}
6511
6512
io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
6513
6514
bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
6515
ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
6516
M_PF, M_WAITOK | M_ZERO);
6517
6518
PF_RULES_RLOCK();
6519
pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
6520
PF_RULES_RUNLOCK();
6521
error = copyout(ifstore, io->pfiio_buffer, bufsiz);
6522
free(ifstore, M_PF);
6523
break;
6524
}
6525
6526
case DIOCSETIFFLAG: {
6527
struct pfioc_iface *io = (struct pfioc_iface *)addr;
6528
6529
io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
6530
6531
PF_RULES_WLOCK();
6532
error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
6533
PF_RULES_WUNLOCK();
6534
break;
6535
}
6536
6537
case DIOCCLRIFFLAG: {
6538
struct pfioc_iface *io = (struct pfioc_iface *)addr;
6539
6540
io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
6541
6542
PF_RULES_WLOCK();
6543
error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
6544
PF_RULES_WUNLOCK();
6545
break;
6546
}
6547
6548
case DIOCSETREASS: {
6549
u_int32_t *reass = (u_int32_t *)addr;
6550
6551
V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
6552
/* Removal of DF flag without reassembly enabled is not a
6553
* valid combination. Disable reassembly in such case. */
6554
if (!(V_pf_status.reass & PF_REASS_ENABLED))
6555
V_pf_status.reass = 0;
6556
break;
6557
}
6558
6559
default:
6560
error = ENODEV;
6561
break;
6562
}
6563
fail:
6564
CURVNET_RESTORE();
6565
6566
#undef ERROUT_IOCTL
6567
6568
return (error);
6569
}
6570
6571
static void
6572
pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
6573
{
6574
const char *tagname;
6575
6576
/* copy from state key */
6577
sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
6578
sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
6579
sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
6580
sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
6581
sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
6582
sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
6583
sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
6584
sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
6585
6586
/* copy from state */
6587
strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
6588
bcopy(&st->act.rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
6589
sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000));
6590
sp->pfs_1301.expire = pf_state_expires(st);
6591
if (sp->pfs_1301.expire <= time_uptime)
6592
sp->pfs_1301.expire = htonl(0);
6593
else
6594
sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
6595
6596
switch (msg_version) {
6597
case PFSYNC_MSG_VERSION_1301:
6598
sp->pfs_1301.state_flags = st->state_flags;
6599
sp->pfs_1301.direction = st->direction;
6600
sp->pfs_1301.log = st->act.log;
6601
sp->pfs_1301.timeout = st->timeout;
6602
sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
6603
sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
6604
/*
6605
* XXX Why do we bother pfsyncing source node information if source
6606
* nodes are not synced? Showing users that there is source tracking
6607
* when there is none seems useless.
6608
*/
6609
if (st->sns[PF_SN_LIMIT] != NULL)
6610
sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
6611
if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE])
6612
sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
6613
break;
6614
case PFSYNC_MSG_VERSION_1400:
6615
sp->pfs_1400.state_flags = htons(st->state_flags);
6616
sp->pfs_1400.direction = st->direction;
6617
sp->pfs_1400.log = st->act.log;
6618
sp->pfs_1400.timeout = st->timeout;
6619
sp->pfs_1400.proto = st->key[PF_SK_WIRE]->proto;
6620
sp->pfs_1400.af = st->key[PF_SK_WIRE]->af;
6621
sp->pfs_1400.qid = htons(st->act.qid);
6622
sp->pfs_1400.pqid = htons(st->act.pqid);
6623
sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
6624
sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
6625
sp->pfs_1400.rtableid = htonl(st->act.rtableid);
6626
sp->pfs_1400.min_ttl = st->act.min_ttl;
6627
sp->pfs_1400.set_tos = st->act.set_tos;
6628
sp->pfs_1400.max_mss = htons(st->act.max_mss);
6629
sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
6630
sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
6631
sp->pfs_1400.rt = st->act.rt;
6632
if (st->act.rt_kif)
6633
strlcpy(sp->pfs_1400.rt_ifname,
6634
st->act.rt_kif->pfik_name,
6635
sizeof(sp->pfs_1400.rt_ifname));
6636
/*
6637
* XXX Why do we bother pfsyncing source node information if source
6638
* nodes are not synced? Showing users that there is source tracking
6639
* when there is none seems useless.
6640
*/
6641
if (st->sns[PF_SN_LIMIT] != NULL)
6642
sp->pfs_1400.sync_flags |= PFSYNC_FLAG_SRCNODE;
6643
if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE])
6644
sp->pfs_1400.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
6645
break;
6646
case PFSYNC_MSG_VERSION_1500:
6647
sp->pfs_1500.state_flags = htons(st->state_flags);
6648
sp->pfs_1500.direction = st->direction;
6649
sp->pfs_1500.log = st->act.log;
6650
sp->pfs_1500.timeout = st->timeout;
6651
sp->pfs_1500.wire_proto = st->key[PF_SK_WIRE]->proto;
6652
sp->pfs_1500.wire_af = st->key[PF_SK_WIRE]->af;
6653
sp->pfs_1500.stack_proto = st->key[PF_SK_STACK]->proto;
6654
sp->pfs_1500.stack_af = st->key[PF_SK_STACK]->af;
6655
sp->pfs_1500.qid = htons(st->act.qid);
6656
sp->pfs_1500.pqid = htons(st->act.pqid);
6657
sp->pfs_1500.dnpipe = htons(st->act.dnpipe);
6658
sp->pfs_1500.dnrpipe = htons(st->act.dnrpipe);
6659
sp->pfs_1500.rtableid = htonl(st->act.rtableid);
6660
sp->pfs_1500.min_ttl = st->act.min_ttl;
6661
sp->pfs_1500.set_tos = st->act.set_tos;
6662
sp->pfs_1500.max_mss = htons(st->act.max_mss);
6663
sp->pfs_1500.set_prio[0] = st->act.set_prio[0];
6664
sp->pfs_1500.set_prio[1] = st->act.set_prio[1];
6665
sp->pfs_1500.rt = st->act.rt;
6666
sp->pfs_1500.rt_af = st->act.rt_af;
6667
if (st->act.rt_kif)
6668
strlcpy(sp->pfs_1500.rt_ifname,
6669
st->act.rt_kif->pfik_name,
6670
sizeof(sp->pfs_1500.rt_ifname));
6671
strlcpy(sp->pfs_1500.orig_ifname,
6672
st->orig_kif->pfik_name,
6673
sizeof(sp->pfs_1500.orig_ifname));
6674
if ((tagname = pf_tag2tagname(st->tag)) != NULL)
6675
strlcpy(sp->pfs_1500.tagname, tagname,
6676
sizeof(sp->pfs_1500.tagname));
6677
break;
6678
default:
6679
panic("%s: Unsupported pfsync_msg_version %d",
6680
__func__, msg_version);
6681
}
6682
6683
sp->pfs_1301.id = st->id;
6684
sp->pfs_1301.creatorid = st->creatorid;
6685
pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
6686
pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
6687
6688
if (st->rule == NULL)
6689
sp->pfs_1301.rule = htonl(-1);
6690
else
6691
sp->pfs_1301.rule = htonl(st->rule->nr);
6692
if (st->anchor == NULL)
6693
sp->pfs_1301.anchor = htonl(-1);
6694
else
6695
sp->pfs_1301.anchor = htonl(st->anchor->nr);
6696
if (st->nat_rule == NULL)
6697
sp->pfs_1301.nat_rule = htonl(-1);
6698
else
6699
sp->pfs_1301.nat_rule = htonl(st->nat_rule->nr);
6700
6701
pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
6702
pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
6703
pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
6704
pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
6705
}
6706
6707
void
6708
pfsync_state_export_1301(struct pfsync_state_1301 *sp, struct pf_kstate *st)
6709
{
6710
bzero(sp, sizeof(*sp));
6711
pfsync_state_export((union pfsync_state_union *)sp, st,
6712
PFSYNC_MSG_VERSION_1301);
6713
}
6714
6715
void
6716
pfsync_state_export_1400(struct pfsync_state_1400 *sp, struct pf_kstate *st)
6717
{
6718
bzero(sp, sizeof(*sp));
6719
pfsync_state_export((union pfsync_state_union *)sp, st,
6720
PFSYNC_MSG_VERSION_1400);
6721
}
6722
6723
void
6724
pfsync_state_export_1500(struct pfsync_state_1500 *sp, struct pf_kstate *st)
6725
{
6726
bzero(sp, sizeof(*sp));
6727
pfsync_state_export((union pfsync_state_union *)sp, st,
6728
PFSYNC_MSG_VERSION_1500);
6729
}
6730
6731
void
6732
pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
6733
{
6734
bzero(sp, sizeof(*sp));
6735
6736
sp->version = PF_STATE_VERSION;
6737
6738
/* copy from state key */
6739
sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
6740
sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
6741
sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
6742
sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
6743
sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
6744
sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
6745
sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
6746
sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
6747
sp->proto = st->key[PF_SK_WIRE]->proto;
6748
sp->af = st->key[PF_SK_WIRE]->af;
6749
6750
/* copy from state */
6751
strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
6752
strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
6753
sizeof(sp->orig_ifname));
6754
memcpy(&sp->rt_addr, &st->act.rt_addr, sizeof(sp->rt_addr));
6755
sp->creation = htonl(time_uptime - (st->creation / 1000));
6756
sp->expire = pf_state_expires(st);
6757
if (sp->expire <= time_uptime)
6758
sp->expire = htonl(0);
6759
else
6760
sp->expire = htonl(sp->expire - time_uptime);
6761
6762
sp->direction = st->direction;
6763
sp->log = st->act.log;
6764
sp->timeout = st->timeout;
6765
/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
6766
sp->state_flags_compat = st->state_flags;
6767
sp->state_flags = htons(st->state_flags);
6768
if (st->sns[PF_SN_LIMIT] != NULL)
6769
sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
6770
if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE] != NULL)
6771
sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
6772
sp->id = st->id;
6773
sp->creatorid = st->creatorid;
6774
pf_state_peer_hton(&st->src, &sp->src);
6775
pf_state_peer_hton(&st->dst, &sp->dst);
6776
6777
if (st->rule == NULL)
6778
sp->rule = htonl(-1);
6779
else
6780
sp->rule = htonl(st->rule->nr);
6781
if (st->anchor == NULL)
6782
sp->anchor = htonl(-1);
6783
else
6784
sp->anchor = htonl(st->anchor->nr);
6785
if (st->nat_rule == NULL)
6786
sp->nat_rule = htonl(-1);
6787
else
6788
sp->nat_rule = htonl(st->nat_rule->nr);
6789
6790
sp->packets[0] = st->packets[0];
6791
sp->packets[1] = st->packets[1];
6792
sp->bytes[0] = st->bytes[0];
6793
sp->bytes[1] = st->bytes[1];
6794
6795
sp->qid = htons(st->act.qid);
6796
sp->pqid = htons(st->act.pqid);
6797
sp->dnpipe = htons(st->act.dnpipe);
6798
sp->dnrpipe = htons(st->act.dnrpipe);
6799
sp->rtableid = htonl(st->act.rtableid);
6800
sp->min_ttl = st->act.min_ttl;
6801
sp->set_tos = st->act.set_tos;
6802
sp->max_mss = htons(st->act.max_mss);
6803
sp->rt = st->act.rt;
6804
if (st->act.rt_kif)
6805
strlcpy(sp->rt_ifname, st->act.rt_kif->pfik_name,
6806
sizeof(sp->rt_ifname));
6807
sp->set_prio[0] = st->act.set_prio[0];
6808
sp->set_prio[1] = st->act.set_prio[1];
6809
6810
}
6811
6812
static void
6813
pf_tbladdr_copyout(struct pf_addr_wrap *aw)
6814
{
6815
struct pfr_ktable *kt;
6816
6817
KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
6818
6819
kt = aw->p.tbl;
6820
if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
6821
kt = kt->pfrkt_root;
6822
aw->p.tbl = NULL;
6823
aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
6824
kt->pfrkt_cnt : -1;
6825
}
6826
6827
static int
6828
pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
6829
size_t number, char **names)
6830
{
6831
nvlist_t *nvc;
6832
6833
nvc = nvlist_create(0);
6834
if (nvc == NULL)
6835
return (ENOMEM);
6836
6837
for (int i = 0; i < number; i++) {
6838
nvlist_append_number_array(nvc, "counters",
6839
counter_u64_fetch(counters[i]));
6840
nvlist_append_string_array(nvc, "names",
6841
names[i]);
6842
nvlist_append_number_array(nvc, "ids",
6843
i);
6844
}
6845
nvlist_add_nvlist(nvl, name, nvc);
6846
nvlist_destroy(nvc);
6847
6848
return (0);
6849
}
6850
6851
static int
6852
pf_getstatus(struct pfioc_nv *nv)
6853
{
6854
nvlist_t *nvl = NULL, *nvc = NULL;
6855
void *nvlpacked = NULL;
6856
int error;
6857
struct pf_status s;
6858
char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
6859
char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
6860
char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
6861
time_t since;
6862
6863
PF_RULES_RLOCK_TRACKER;
6864
6865
#define ERROUT(x) ERROUT_FUNCTION(errout, x)
6866
6867
PF_RULES_RLOCK();
6868
6869
nvl = nvlist_create(0);
6870
if (nvl == NULL)
6871
ERROUT(ENOMEM);
6872
6873
since = time_second - (time_uptime - V_pf_status.since);
6874
6875
nvlist_add_bool(nvl, "running", V_pf_status.running);
6876
nvlist_add_number(nvl, "since", since);
6877
nvlist_add_number(nvl, "debug", V_pf_status.debug);
6878
nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
6879
nvlist_add_number(nvl, "states", V_pf_status.states);
6880
nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
6881
nvlist_add_number(nvl, "reass", V_pf_status.reass);
6882
nvlist_add_bool(nvl, "syncookies_active",
6883
V_pf_status.syncookies_active);
6884
nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen);
6885
6886
/* counters */
6887
error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
6888
PFRES_MAX, pf_reasons);
6889
if (error != 0)
6890
ERROUT(error);
6891
6892
/* lcounters */
6893
error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
6894
KLCNT_MAX, pf_lcounter);
6895
if (error != 0)
6896
ERROUT(error);
6897
6898
/* fcounters */
6899
nvc = nvlist_create(0);
6900
if (nvc == NULL)
6901
ERROUT(ENOMEM);
6902
6903
for (int i = 0; i < FCNT_MAX; i++) {
6904
nvlist_append_number_array(nvc, "counters",
6905
pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
6906
nvlist_append_string_array(nvc, "names",
6907
pf_fcounter[i]);
6908
nvlist_append_number_array(nvc, "ids",
6909
i);
6910
}
6911
nvlist_add_nvlist(nvl, "fcounters", nvc);
6912
nvlist_destroy(nvc);
6913
nvc = NULL;
6914
6915
/* scounters */
6916
error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
6917
SCNT_MAX, pf_fcounter);
6918
if (error != 0)
6919
ERROUT(error);
6920
6921
nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
6922
nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
6923
PF_MD5_DIGEST_LENGTH);
6924
6925
pfi_update_status(V_pf_status.ifname, &s);
6926
6927
/* pcounters / bcounters */
6928
for (int i = 0; i < 2; i++) {
6929
for (int j = 0; j < 2; j++) {
6930
for (int k = 0; k < 2; k++) {
6931
nvlist_append_number_array(nvl, "pcounters",
6932
s.pcounters[i][j][k]);
6933
}
6934
nvlist_append_number_array(nvl, "bcounters",
6935
s.bcounters[i][j]);
6936
}
6937
}
6938
6939
nvlpacked = nvlist_pack(nvl, &nv->len);
6940
if (nvlpacked == NULL)
6941
ERROUT(ENOMEM);
6942
6943
if (nv->size == 0)
6944
ERROUT(0);
6945
else if (nv->size < nv->len)
6946
ERROUT(ENOSPC);
6947
6948
PF_RULES_RUNLOCK();
6949
error = copyout(nvlpacked, nv->data, nv->len);
6950
goto done;
6951
6952
#undef ERROUT
6953
errout:
6954
PF_RULES_RUNLOCK();
6955
done:
6956
free(nvlpacked, M_NVLIST);
6957
nvlist_destroy(nvc);
6958
nvlist_destroy(nvl);
6959
6960
return (error);
6961
}
6962
6963
/*
6964
* XXX - Check for version mismatch!!!
6965
*/
6966
static void
6967
pf_clear_all_states(void)
6968
{
6969
struct epoch_tracker et;
6970
struct pf_kstate *s;
6971
u_int i;
6972
6973
NET_EPOCH_ENTER(et);
6974
for (i = 0; i <= V_pf_hashmask; i++) {
6975
struct pf_idhash *ih = &V_pf_idhash[i];
6976
relock:
6977
PF_HASHROW_LOCK(ih);
6978
LIST_FOREACH(s, &ih->states, entry) {
6979
s->timeout = PFTM_PURGE;
6980
/* Don't send out individual delete messages. */
6981
s->state_flags |= PFSTATE_NOSYNC;
6982
pf_remove_state(s);
6983
goto relock;
6984
}
6985
PF_HASHROW_UNLOCK(ih);
6986
}
6987
NET_EPOCH_EXIT(et);
6988
}
6989
6990
static int
6991
pf_clear_tables(void)
6992
{
6993
struct pfioc_table io;
6994
int error;
6995
6996
bzero(&io, sizeof(io));
6997
io.pfrio_flags |= PFR_FLAG_ALLRSETS;
6998
6999
error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
7000
io.pfrio_flags);
7001
7002
return (error);
7003
}
7004
7005
static void
7006
pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
7007
{
7008
struct pf_ksrc_node_list kill;
7009
u_int killed;
7010
7011
LIST_INIT(&kill);
7012
for (int i = 0; i <= V_pf_srchashmask; i++) {
7013
struct pf_srchash *sh = &V_pf_srchash[i];
7014
struct pf_ksrc_node *sn, *tmp;
7015
7016
PF_HASHROW_LOCK(sh);
7017
LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
7018
if (psnk == NULL ||
7019
(pf_match_addr(psnk->psnk_src.neg,
7020
&psnk->psnk_src.addr.v.a.addr,
7021
&psnk->psnk_src.addr.v.a.mask,
7022
&sn->addr, sn->af) &&
7023
pf_match_addr(psnk->psnk_dst.neg,
7024
&psnk->psnk_dst.addr.v.a.addr,
7025
&psnk->psnk_dst.addr.v.a.mask,
7026
&sn->raddr, sn->af))) {
7027
pf_unlink_src_node(sn);
7028
LIST_INSERT_HEAD(&kill, sn, entry);
7029
sn->expire = 1;
7030
}
7031
PF_HASHROW_UNLOCK(sh);
7032
}
7033
7034
for (int i = 0; i <= V_pf_hashmask; i++) {
7035
struct pf_idhash *ih = &V_pf_idhash[i];
7036
struct pf_kstate *s;
7037
7038
PF_HASHROW_LOCK(ih);
7039
LIST_FOREACH(s, &ih->states, entry) {
7040
for(pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX;
7041
sn_type++) {
7042
if (s->sns[sn_type] &&
7043
s->sns[sn_type]->expire == 1) {
7044
s->sns[sn_type] = NULL;
7045
}
7046
}
7047
}
7048
PF_HASHROW_UNLOCK(ih);
7049
}
7050
7051
killed = pf_free_src_nodes(&kill);
7052
7053
if (psnk != NULL)
7054
psnk->psnk_killed = killed;
7055
}
7056
7057
static int
7058
pf_keepcounters(struct pfioc_nv *nv)
7059
{
7060
nvlist_t *nvl = NULL;
7061
void *nvlpacked = NULL;
7062
int error = 0;
7063
7064
#define ERROUT(x) ERROUT_FUNCTION(on_error, x)
7065
7066
if (nv->len > pf_ioctl_maxcount)
7067
ERROUT(ENOMEM);
7068
7069
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
7070
error = copyin(nv->data, nvlpacked, nv->len);
7071
if (error)
7072
ERROUT(error);
7073
7074
nvl = nvlist_unpack(nvlpacked, nv->len, 0);
7075
if (nvl == NULL)
7076
ERROUT(EBADMSG);
7077
7078
if (! nvlist_exists_bool(nvl, "keep_counters"))
7079
ERROUT(EBADMSG);
7080
7081
V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
7082
7083
on_error:
7084
nvlist_destroy(nvl);
7085
free(nvlpacked, M_NVLIST);
7086
return (error);
7087
}
7088
7089
unsigned int
7090
pf_clear_states(const struct pf_kstate_kill *kill)
7091
{
7092
struct pf_state_key_cmp match_key;
7093
struct pf_kstate *s;
7094
struct pfi_kkif *kif;
7095
int idx;
7096
unsigned int killed = 0, dir;
7097
7098
NET_EPOCH_ASSERT();
7099
7100
for (unsigned int i = 0; i <= V_pf_hashmask; i++) {
7101
struct pf_idhash *ih = &V_pf_idhash[i];
7102
7103
relock_DIOCCLRSTATES:
7104
PF_HASHROW_LOCK(ih);
7105
LIST_FOREACH(s, &ih->states, entry) {
7106
/* For floating states look at the original kif. */
7107
kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
7108
7109
if (kill->psk_ifname[0] &&
7110
strcmp(kill->psk_ifname,
7111
kif->pfik_name))
7112
continue;
7113
7114
if (kill->psk_kill_match) {
7115
bzero(&match_key, sizeof(match_key));
7116
7117
if (s->direction == PF_OUT) {
7118
dir = PF_IN;
7119
idx = PF_SK_STACK;
7120
} else {
7121
dir = PF_OUT;
7122
idx = PF_SK_WIRE;
7123
}
7124
7125
match_key.af = s->key[idx]->af;
7126
match_key.proto = s->key[idx]->proto;
7127
pf_addrcpy(&match_key.addr[0],
7128
&s->key[idx]->addr[1], match_key.af);
7129
match_key.port[0] = s->key[idx]->port[1];
7130
pf_addrcpy(&match_key.addr[1],
7131
&s->key[idx]->addr[0], match_key.af);
7132
match_key.port[1] = s->key[idx]->port[0];
7133
}
7134
7135
/*
7136
* Don't send out individual
7137
* delete messages.
7138
*/
7139
s->state_flags |= PFSTATE_NOSYNC;
7140
pf_remove_state(s);
7141
killed++;
7142
7143
if (kill->psk_kill_match)
7144
killed += pf_kill_matching_state(&match_key,
7145
dir);
7146
7147
goto relock_DIOCCLRSTATES;
7148
}
7149
PF_HASHROW_UNLOCK(ih);
7150
}
7151
7152
if (V_pfsync_clear_states_ptr != NULL)
7153
V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
7154
7155
return (killed);
7156
}
7157
7158
void
7159
pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
7160
{
7161
struct pf_kstate *s;
7162
7163
NET_EPOCH_ASSERT();
7164
if (kill->psk_pfcmp.id) {
7165
if (kill->psk_pfcmp.creatorid == 0)
7166
kill->psk_pfcmp.creatorid = V_pf_status.hostid;
7167
if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
7168
kill->psk_pfcmp.creatorid))) {
7169
pf_remove_state(s);
7170
*killed = 1;
7171
}
7172
return;
7173
}
7174
7175
for (unsigned int i = 0; i <= V_pf_hashmask; i++)
7176
*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
7177
}
7178
7179
static int
7180
pf_killstates_nv(struct pfioc_nv *nv)
7181
{
7182
struct pf_kstate_kill kill;
7183
struct epoch_tracker et;
7184
nvlist_t *nvl = NULL;
7185
void *nvlpacked = NULL;
7186
int error = 0;
7187
unsigned int killed = 0;
7188
7189
#define ERROUT(x) ERROUT_FUNCTION(on_error, x)
7190
7191
if (nv->len > pf_ioctl_maxcount)
7192
ERROUT(ENOMEM);
7193
7194
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
7195
error = copyin(nv->data, nvlpacked, nv->len);
7196
if (error)
7197
ERROUT(error);
7198
7199
nvl = nvlist_unpack(nvlpacked, nv->len, 0);
7200
if (nvl == NULL)
7201
ERROUT(EBADMSG);
7202
7203
error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
7204
if (error)
7205
ERROUT(error);
7206
7207
NET_EPOCH_ENTER(et);
7208
pf_killstates(&kill, &killed);
7209
NET_EPOCH_EXIT(et);
7210
7211
free(nvlpacked, M_NVLIST);
7212
nvlpacked = NULL;
7213
nvlist_destroy(nvl);
7214
nvl = nvlist_create(0);
7215
if (nvl == NULL)
7216
ERROUT(ENOMEM);
7217
7218
nvlist_add_number(nvl, "killed", killed);
7219
7220
nvlpacked = nvlist_pack(nvl, &nv->len);
7221
if (nvlpacked == NULL)
7222
ERROUT(ENOMEM);
7223
7224
if (nv->size == 0)
7225
ERROUT(0);
7226
else if (nv->size < nv->len)
7227
ERROUT(ENOSPC);
7228
7229
error = copyout(nvlpacked, nv->data, nv->len);
7230
7231
on_error:
7232
nvlist_destroy(nvl);
7233
free(nvlpacked, M_NVLIST);
7234
return (error);
7235
}
7236
7237
static int
7238
pf_clearstates_nv(struct pfioc_nv *nv)
7239
{
7240
struct pf_kstate_kill kill;
7241
struct epoch_tracker et;
7242
nvlist_t *nvl = NULL;
7243
void *nvlpacked = NULL;
7244
int error = 0;
7245
unsigned int killed;
7246
7247
#define ERROUT(x) ERROUT_FUNCTION(on_error, x)
7248
7249
if (nv->len > pf_ioctl_maxcount)
7250
ERROUT(ENOMEM);
7251
7252
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
7253
error = copyin(nv->data, nvlpacked, nv->len);
7254
if (error)
7255
ERROUT(error);
7256
7257
nvl = nvlist_unpack(nvlpacked, nv->len, 0);
7258
if (nvl == NULL)
7259
ERROUT(EBADMSG);
7260
7261
error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
7262
if (error)
7263
ERROUT(error);
7264
7265
NET_EPOCH_ENTER(et);
7266
killed = pf_clear_states(&kill);
7267
NET_EPOCH_EXIT(et);
7268
7269
free(nvlpacked, M_NVLIST);
7270
nvlpacked = NULL;
7271
nvlist_destroy(nvl);
7272
nvl = nvlist_create(0);
7273
if (nvl == NULL)
7274
ERROUT(ENOMEM);
7275
7276
nvlist_add_number(nvl, "killed", killed);
7277
7278
nvlpacked = nvlist_pack(nvl, &nv->len);
7279
if (nvlpacked == NULL)
7280
ERROUT(ENOMEM);
7281
7282
if (nv->size == 0)
7283
ERROUT(0);
7284
else if (nv->size < nv->len)
7285
ERROUT(ENOSPC);
7286
7287
error = copyout(nvlpacked, nv->data, nv->len);
7288
7289
#undef ERROUT
7290
on_error:
7291
nvlist_destroy(nvl);
7292
free(nvlpacked, M_NVLIST);
7293
return (error);
7294
}
7295
7296
static int
7297
pf_getstate(struct pfioc_nv *nv)
7298
{
7299
nvlist_t *nvl = NULL, *nvls;
7300
void *nvlpacked = NULL;
7301
struct pf_kstate *s = NULL;
7302
int error = 0;
7303
uint64_t id, creatorid;
7304
7305
#define ERROUT(x) ERROUT_FUNCTION(errout, x)
7306
7307
if (nv->len > pf_ioctl_maxcount)
7308
ERROUT(ENOMEM);
7309
7310
nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
7311
error = copyin(nv->data, nvlpacked, nv->len);
7312
if (error)
7313
ERROUT(error);
7314
7315
nvl = nvlist_unpack(nvlpacked, nv->len, 0);
7316
if (nvl == NULL)
7317
ERROUT(EBADMSG);
7318
7319
PFNV_CHK(pf_nvuint64(nvl, "id", &id));
7320
PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
7321
7322
s = pf_find_state_byid(id, creatorid);
7323
if (s == NULL)
7324
ERROUT(ENOENT);
7325
7326
free(nvlpacked, M_NVLIST);
7327
nvlpacked = NULL;
7328
nvlist_destroy(nvl);
7329
nvl = nvlist_create(0);
7330
if (nvl == NULL)
7331
ERROUT(ENOMEM);
7332
7333
nvls = pf_state_to_nvstate(s);
7334
if (nvls == NULL)
7335
ERROUT(ENOMEM);
7336
7337
nvlist_add_nvlist(nvl, "state", nvls);
7338
nvlist_destroy(nvls);
7339
7340
nvlpacked = nvlist_pack(nvl, &nv->len);
7341
if (nvlpacked == NULL)
7342
ERROUT(ENOMEM);
7343
7344
if (nv->size == 0)
7345
ERROUT(0);
7346
else if (nv->size < nv->len)
7347
ERROUT(ENOSPC);
7348
7349
error = copyout(nvlpacked, nv->data, nv->len);
7350
7351
#undef ERROUT
7352
errout:
7353
if (s != NULL)
7354
PF_STATE_UNLOCK(s);
7355
free(nvlpacked, M_NVLIST);
7356
nvlist_destroy(nvl);
7357
return (error);
7358
}
7359
7360
/*
7361
* XXX - Check for version mismatch!!!
7362
*/
7363
7364
/*
7365
* Duplicate pfctl -Fa operation to get rid of as much as we can.
7366
*/
7367
static int
7368
shutdown_pf(void)
7369
{
7370
int error = 0;
7371
u_int32_t t[5];
7372
char nn = '\0';
7373
struct pf_kanchor *anchor, *tmp_anchor;
7374
struct pf_keth_anchor *eth_anchor, *tmp_eth_anchor;
7375
int rs_num;
7376
7377
do {
7378
/* Unlink rules of all user defined anchors */
7379
RB_FOREACH_SAFE(anchor, pf_kanchor_global, &V_pf_anchors,
7380
tmp_anchor) {
7381
for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
7382
if ((error = pf_begin_rules(&t[rs_num], rs_num,
7383
anchor->path)) != 0) {
7384
DPFPRINTF(PF_DEBUG_MISC, "%s: "
7385
"anchor.path=%s rs_num=%d",
7386
__func__, anchor->path, rs_num);
7387
goto error; /* XXX: rollback? */
7388
}
7389
}
7390
for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
7391
error = pf_commit_rules(t[rs_num], rs_num,
7392
anchor->path);
7393
MPASS(error == 0);
7394
}
7395
}
7396
7397
/* Unlink rules of all user defined ether anchors */
7398
RB_FOREACH_SAFE(eth_anchor, pf_keth_anchor_global,
7399
&V_pf_keth_anchors, tmp_eth_anchor) {
7400
if ((error = pf_begin_eth(&t[0], eth_anchor->path))
7401
!= 0) {
7402
DPFPRINTF(PF_DEBUG_MISC, "%s: eth "
7403
"anchor.path=%s", __func__,
7404
eth_anchor->path);
7405
goto error;
7406
}
7407
error = pf_commit_eth(t[0], eth_anchor->path);
7408
MPASS(error == 0);
7409
}
7410
7411
if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
7412
!= 0) {
7413
DPFPRINTF(PF_DEBUG_MISC, "%s: SCRUB", __func__);
7414
break;
7415
}
7416
if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
7417
!= 0) {
7418
DPFPRINTF(PF_DEBUG_MISC, "%s: FILTER", __func__);
7419
break; /* XXX: rollback? */
7420
}
7421
if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
7422
!= 0) {
7423
DPFPRINTF(PF_DEBUG_MISC, "%s: NAT", __func__);
7424
break; /* XXX: rollback? */
7425
}
7426
if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
7427
!= 0) {
7428
DPFPRINTF(PF_DEBUG_MISC, "%s: BINAT", __func__);
7429
break; /* XXX: rollback? */
7430
}
7431
if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
7432
!= 0) {
7433
DPFPRINTF(PF_DEBUG_MISC, "%s: RDR", __func__);
7434
break; /* XXX: rollback? */
7435
}
7436
7437
error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
7438
MPASS(error == 0);
7439
error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
7440
MPASS(error == 0);
7441
error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
7442
MPASS(error == 0);
7443
error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
7444
MPASS(error == 0);
7445
error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
7446
MPASS(error == 0);
7447
7448
if ((error = pf_clear_tables()) != 0)
7449
break;
7450
7451
if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
7452
DPFPRINTF(PF_DEBUG_MISC, "%s: eth", __func__);
7453
break;
7454
}
7455
error = pf_commit_eth(t[0], &nn);
7456
MPASS(error == 0);
7457
7458
#ifdef ALTQ
7459
if ((error = pf_begin_altq(&t[0])) != 0) {
7460
DPFPRINTF(PF_DEBUG_MISC, "%s: ALTQ", __func__);
7461
break;
7462
}
7463
pf_commit_altq(t[0]);
7464
#endif
7465
7466
pf_clear_all_states();
7467
7468
pf_kill_srcnodes(NULL);
7469
7470
for (int i = 0; i < PF_RULESET_MAX; i++) {
7471
pf_rule_tree_free(pf_main_ruleset.rules[i].active.tree);
7472
pf_rule_tree_free(pf_main_ruleset.rules[i].inactive.tree);
7473
}
7474
7475
/* status does not use malloced mem so no need to cleanup */
7476
/* fingerprints and interfaces have their own cleanup code */
7477
} while(0);
7478
7479
error:
7480
return (error);
7481
}
7482
7483
static pfil_return_t
7484
pf_check_return(int chk, struct mbuf **m)
7485
{
7486
7487
switch (chk) {
7488
case PF_PASS:
7489
if (*m == NULL)
7490
return (PFIL_CONSUMED);
7491
else
7492
return (PFIL_PASS);
7493
break;
7494
default:
7495
if (*m != NULL) {
7496
m_freem(*m);
7497
*m = NULL;
7498
}
7499
return (PFIL_DROPPED);
7500
}
7501
}
7502
7503
static pfil_return_t
7504
pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
7505
void *ruleset __unused, struct inpcb *inp)
7506
{
7507
int chk;
7508
7509
CURVNET_ASSERT_SET();
7510
7511
chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
7512
7513
return (pf_check_return(chk, m));
7514
}
7515
7516
static pfil_return_t
7517
pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
7518
void *ruleset __unused, struct inpcb *inp)
7519
{
7520
int chk;
7521
7522
CURVNET_ASSERT_SET();
7523
7524
chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
7525
7526
return (pf_check_return(chk, m));
7527
}
7528
7529
#ifdef INET
7530
static pfil_return_t
7531
pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
7532
void *ruleset __unused, struct inpcb *inp)
7533
{
7534
int chk;
7535
7536
CURVNET_ASSERT_SET();
7537
7538
chk = pf_test(AF_INET, PF_IN, flags, ifp, m, inp, NULL);
7539
7540
return (pf_check_return(chk, m));
7541
}
7542
7543
static pfil_return_t
7544
pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
7545
void *ruleset __unused, struct inpcb *inp)
7546
{
7547
int chk;
7548
7549
CURVNET_ASSERT_SET();
7550
7551
chk = pf_test(AF_INET, PF_OUT, flags, ifp, m, inp, NULL);
7552
7553
return (pf_check_return(chk, m));
7554
}
7555
#endif
7556
7557
#ifdef INET6
7558
static pfil_return_t
7559
pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
7560
void *ruleset __unused, struct inpcb *inp)
7561
{
7562
int chk;
7563
7564
CURVNET_ASSERT_SET();
7565
7566
/*
7567
* In case of loopback traffic IPv6 uses the real interface in
7568
* order to support scoped addresses. In order to support stateful
7569
* filtering we have change this to lo0 as it is the case in IPv4.
7570
*/
7571
chk = pf_test(AF_INET6, PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
7572
m, inp, NULL);
7573
7574
return (pf_check_return(chk, m));
7575
}
7576
7577
static pfil_return_t
7578
pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
7579
void *ruleset __unused, struct inpcb *inp)
7580
{
7581
int chk;
7582
7583
CURVNET_ASSERT_SET();
7584
7585
chk = pf_test(AF_INET6, PF_OUT, flags, ifp, m, inp, NULL);
7586
7587
return (pf_check_return(chk, m));
7588
}
7589
#endif /* INET6 */
7590
7591
VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
7592
VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
7593
#define V_pf_eth_in_hook VNET(pf_eth_in_hook)
7594
#define V_pf_eth_out_hook VNET(pf_eth_out_hook)
7595
7596
#ifdef INET
7597
VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
7598
VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
7599
#define V_pf_ip4_in_hook VNET(pf_ip4_in_hook)
7600
#define V_pf_ip4_out_hook VNET(pf_ip4_out_hook)
7601
#endif
7602
#ifdef INET6
7603
VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
7604
VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
7605
#define V_pf_ip6_in_hook VNET(pf_ip6_in_hook)
7606
#define V_pf_ip6_out_hook VNET(pf_ip6_out_hook)
7607
#endif
7608
7609
static void
7610
hook_pf_eth(void)
7611
{
7612
struct pfil_hook_args pha = {
7613
.pa_version = PFIL_VERSION,
7614
.pa_modname = "pf",
7615
.pa_type = PFIL_TYPE_ETHERNET,
7616
};
7617
struct pfil_link_args pla = {
7618
.pa_version = PFIL_VERSION,
7619
};
7620
int ret __diagused;
7621
7622
if (atomic_load_bool(&V_pf_pfil_eth_hooked))
7623
return;
7624
7625
pha.pa_mbuf_chk = pf_eth_check_in;
7626
pha.pa_flags = PFIL_IN;
7627
pha.pa_rulname = "eth-in";
7628
V_pf_eth_in_hook = pfil_add_hook(&pha);
7629
pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
7630
pla.pa_head = V_link_pfil_head;
7631
pla.pa_hook = V_pf_eth_in_hook;
7632
ret = pfil_link(&pla);
7633
MPASS(ret == 0);
7634
pha.pa_mbuf_chk = pf_eth_check_out;
7635
pha.pa_flags = PFIL_OUT;
7636
pha.pa_rulname = "eth-out";
7637
V_pf_eth_out_hook = pfil_add_hook(&pha);
7638
pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
7639
pla.pa_head = V_link_pfil_head;
7640
pla.pa_hook = V_pf_eth_out_hook;
7641
ret = pfil_link(&pla);
7642
MPASS(ret == 0);
7643
7644
atomic_store_bool(&V_pf_pfil_eth_hooked, true);
7645
}
7646
7647
static void
7648
hook_pf(void)
7649
{
7650
struct pfil_hook_args pha = {
7651
.pa_version = PFIL_VERSION,
7652
.pa_modname = "pf",
7653
};
7654
struct pfil_link_args pla = {
7655
.pa_version = PFIL_VERSION,
7656
};
7657
int ret __diagused;
7658
7659
if (atomic_load_bool(&V_pf_pfil_hooked))
7660
return;
7661
7662
#ifdef INET
7663
pha.pa_type = PFIL_TYPE_IP4;
7664
pha.pa_mbuf_chk = pf_check_in;
7665
pha.pa_flags = PFIL_IN;
7666
pha.pa_rulname = "default-in";
7667
V_pf_ip4_in_hook = pfil_add_hook(&pha);
7668
pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
7669
pla.pa_head = V_inet_pfil_head;
7670
pla.pa_hook = V_pf_ip4_in_hook;
7671
ret = pfil_link(&pla);
7672
MPASS(ret == 0);
7673
pha.pa_mbuf_chk = pf_check_out;
7674
pha.pa_flags = PFIL_OUT;
7675
pha.pa_rulname = "default-out";
7676
V_pf_ip4_out_hook = pfil_add_hook(&pha);
7677
pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
7678
pla.pa_head = V_inet_pfil_head;
7679
pla.pa_hook = V_pf_ip4_out_hook;
7680
ret = pfil_link(&pla);
7681
MPASS(ret == 0);
7682
if (V_pf_filter_local) {
7683
pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
7684
pla.pa_head = V_inet_local_pfil_head;
7685
pla.pa_hook = V_pf_ip4_out_hook;
7686
ret = pfil_link(&pla);
7687
MPASS(ret == 0);
7688
}
7689
#endif
7690
#ifdef INET6
7691
pha.pa_type = PFIL_TYPE_IP6;
7692
pha.pa_mbuf_chk = pf_check6_in;
7693
pha.pa_flags = PFIL_IN;
7694
pha.pa_rulname = "default-in6";
7695
V_pf_ip6_in_hook = pfil_add_hook(&pha);
7696
pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
7697
pla.pa_head = V_inet6_pfil_head;
7698
pla.pa_hook = V_pf_ip6_in_hook;
7699
ret = pfil_link(&pla);
7700
MPASS(ret == 0);
7701
pha.pa_mbuf_chk = pf_check6_out;
7702
pha.pa_rulname = "default-out6";
7703
pha.pa_flags = PFIL_OUT;
7704
V_pf_ip6_out_hook = pfil_add_hook(&pha);
7705
pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
7706
pla.pa_head = V_inet6_pfil_head;
7707
pla.pa_hook = V_pf_ip6_out_hook;
7708
ret = pfil_link(&pla);
7709
MPASS(ret == 0);
7710
if (V_pf_filter_local) {
7711
pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
7712
pla.pa_head = V_inet6_local_pfil_head;
7713
pla.pa_hook = V_pf_ip6_out_hook;
7714
ret = pfil_link(&pla);
7715
MPASS(ret == 0);
7716
}
7717
#endif
7718
7719
atomic_store_bool(&V_pf_pfil_hooked, true);
7720
}
7721
7722
static void
7723
dehook_pf_eth(void)
7724
{
7725
7726
if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
7727
return;
7728
7729
pfil_remove_hook(V_pf_eth_in_hook);
7730
pfil_remove_hook(V_pf_eth_out_hook);
7731
7732
atomic_store_bool(&V_pf_pfil_eth_hooked, false);
7733
}
7734
7735
static void
7736
dehook_pf(void)
7737
{
7738
7739
if (!atomic_load_bool(&V_pf_pfil_hooked))
7740
return;
7741
7742
#ifdef INET
7743
pfil_remove_hook(V_pf_ip4_in_hook);
7744
pfil_remove_hook(V_pf_ip4_out_hook);
7745
#endif
7746
#ifdef INET6
7747
pfil_remove_hook(V_pf_ip6_in_hook);
7748
pfil_remove_hook(V_pf_ip6_out_hook);
7749
#endif
7750
7751
atomic_store_bool(&V_pf_pfil_hooked, false);
7752
}
7753
7754
static void
7755
pf_load_vnet(void)
7756
{
7757
V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
7758
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
7759
7760
rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
7761
rm_init_flags(&V_pf_tags_lock, "pf tags and queues", RM_RECURSE);
7762
sx_init(&V_pf_ioctl_lock, "pf ioctl");
7763
7764
pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
7765
PF_RULE_TAG_HASH_SIZE_DEFAULT);
7766
#ifdef ALTQ
7767
pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
7768
PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
7769
#endif
7770
7771
V_pf_keth = &V_pf_main_keth_anchor.ruleset;
7772
7773
pfattach_vnet();
7774
V_pf_vnet_active = 1;
7775
}
7776
7777
static int
7778
pf_load(void)
7779
{
7780
int error;
7781
7782
sx_init(&pf_end_lock, "pf end thread");
7783
7784
pf_mtag_initialize();
7785
7786
pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
7787
if (pf_dev == NULL)
7788
return (ENOMEM);
7789
7790
pf_end_threads = 0;
7791
error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
7792
if (error != 0)
7793
return (error);
7794
7795
pfi_initialize();
7796
7797
return (0);
7798
}
7799
7800
static void
7801
pf_unload_vnet(void)
7802
{
7803
int ret __diagused;
7804
7805
V_pf_vnet_active = 0;
7806
V_pf_status.running = 0;
7807
dehook_pf();
7808
dehook_pf_eth();
7809
7810
PF_RULES_WLOCK();
7811
pf_syncookies_cleanup();
7812
shutdown_pf();
7813
PF_RULES_WUNLOCK();
7814
7815
ret = swi_remove(V_pf_swi_cookie);
7816
MPASS(ret == 0);
7817
ret = intr_event_destroy(V_pf_swi_ie);
7818
MPASS(ret == 0);
7819
7820
pf_unload_vnet_purge();
7821
7822
pf_normalize_cleanup();
7823
PF_RULES_WLOCK();
7824
pfi_cleanup_vnet();
7825
PF_RULES_WUNLOCK();
7826
pfr_cleanup();
7827
pf_osfp_flush();
7828
pf_cleanup();
7829
if (IS_DEFAULT_VNET(curvnet))
7830
pf_mtag_cleanup();
7831
7832
pf_cleanup_tagset(&V_pf_tags);
7833
#ifdef ALTQ
7834
pf_cleanup_tagset(&V_pf_qids);
7835
#endif
7836
uma_zdestroy(V_pf_tag_z);
7837
7838
#ifdef PF_WANT_32_TO_64_COUNTER
7839
PF_RULES_WLOCK();
7840
LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
7841
7842
MPASS(LIST_EMPTY(&V_pf_allkiflist));
7843
MPASS(V_pf_allkifcount == 0);
7844
7845
LIST_REMOVE(&V_pf_default_rule, allrulelist);
7846
V_pf_allrulecount--;
7847
LIST_REMOVE(V_pf_rulemarker, allrulelist);
7848
7849
MPASS(LIST_EMPTY(&V_pf_allrulelist));
7850
MPASS(V_pf_allrulecount == 0);
7851
7852
PF_RULES_WUNLOCK();
7853
7854
free(V_pf_kifmarker, PFI_MTYPE);
7855
free(V_pf_rulemarker, M_PFRULE);
7856
#endif
7857
7858
/* Free counters last as we updated them during shutdown. */
7859
pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
7860
for (int i = 0; i < 2; i++) {
7861
pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
7862
pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
7863
}
7864
counter_u64_free(V_pf_default_rule.states_cur);
7865
counter_u64_free(V_pf_default_rule.states_tot);
7866
for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
7867
counter_u64_free(V_pf_default_rule.src_nodes[sn_type]);
7868
uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
7869
7870
for (int i = 0; i < PFRES_MAX; i++)
7871
counter_u64_free(V_pf_status.counters[i]);
7872
for (int i = 0; i < KLCNT_MAX; i++)
7873
counter_u64_free(V_pf_status.lcounters[i]);
7874
for (int i = 0; i < FCNT_MAX; i++)
7875
pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
7876
for (int i = 0; i < SCNT_MAX; i++)
7877
counter_u64_free(V_pf_status.scounters[i]);
7878
for (int i = 0; i < NCNT_MAX; i++)
7879
counter_u64_free(V_pf_status.ncounters[i]);
7880
7881
rm_destroy(&V_pf_rules_lock);
7882
sx_destroy(&V_pf_ioctl_lock);
7883
}
7884
7885
static void
7886
pf_unload(void *dummy __unused)
7887
{
7888
7889
sx_xlock(&pf_end_lock);
7890
pf_end_threads = 1;
7891
while (pf_end_threads < 2) {
7892
wakeup_one(pf_purge_thread);
7893
sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
7894
}
7895
sx_xunlock(&pf_end_lock);
7896
7897
pf_nl_unregister();
7898
7899
if (pf_dev != NULL)
7900
destroy_dev(pf_dev);
7901
7902
pfi_cleanup();
7903
7904
sx_destroy(&pf_end_lock);
7905
}
7906
7907
static void
7908
vnet_pf_init(void *unused __unused)
7909
{
7910
7911
pf_load_vnet();
7912
}
7913
VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
7914
vnet_pf_init, NULL);
7915
7916
static void
7917
vnet_pf_uninit(const void *unused __unused)
7918
{
7919
7920
pf_unload_vnet();
7921
}
7922
SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
7923
VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
7924
vnet_pf_uninit, NULL);
7925
7926
static int
7927
pf_modevent(module_t mod, int type, void *data)
7928
{
7929
int error = 0;
7930
7931
switch(type) {
7932
case MOD_LOAD:
7933
error = pf_load();
7934
pf_nl_register();
7935
break;
7936
case MOD_UNLOAD:
7937
/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
7938
* the vnet_pf_uninit()s */
7939
break;
7940
default:
7941
error = EINVAL;
7942
break;
7943
}
7944
7945
return (error);
7946
}
7947
7948
static moduledata_t pf_mod = {
7949
"pf",
7950
pf_modevent,
7951
0
7952
};
7953
7954
DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
7955
MODULE_DEPEND(pf, netlink, 1, 1, 1);
7956
MODULE_DEPEND(pf, crypto, 1, 1, 1);
7957
MODULE_VERSION(pf, PF_MODVER);
7958
7959