Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/netpfil/ipfw/ip_fw2.c
39482 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
5
*
6
* Redistribution and use in source and binary forms, with or without
7
* modification, are permitted provided that the following conditions
8
* are met:
9
* 1. Redistributions of source code must retain the above copyright
10
* notice, this list of conditions and the following disclaimer.
11
* 2. Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
*
15
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25
* SUCH DAMAGE.
26
*/
27
28
#include <sys/cdefs.h>
29
/*
30
* The FreeBSD IP packet firewall, main file
31
*/
32
33
#include "opt_ipfw.h"
34
#include "opt_ipdivert.h"
35
#include "opt_inet.h"
36
#ifndef INET
37
#error "IPFIREWALL requires INET"
38
#endif /* INET */
39
#include "opt_inet6.h"
40
41
#include <sys/param.h>
42
#include <sys/systm.h>
43
#include <sys/condvar.h>
44
#include <sys/counter.h>
45
#include <sys/eventhandler.h>
46
#include <sys/malloc.h>
47
#include <sys/mbuf.h>
48
#include <sys/kernel.h>
49
#include <sys/lock.h>
50
#include <sys/jail.h>
51
#include <sys/module.h>
52
#include <sys/priv.h>
53
#include <sys/proc.h>
54
#include <sys/rwlock.h>
55
#include <sys/rmlock.h>
56
#include <sys/sdt.h>
57
#include <sys/socket.h>
58
#include <sys/socketvar.h>
59
#include <sys/sysctl.h>
60
#include <sys/syslog.h>
61
#include <sys/ucred.h>
62
#include <net/ethernet.h> /* for ETHERTYPE_IP */
63
#include <net/if.h>
64
#include <net/if_var.h>
65
#include <net/if_private.h>
66
#include <net/route.h>
67
#include <net/route/nhop.h>
68
#include <net/pfil.h>
69
#include <net/vnet.h>
70
#include <net/if_gif.h>
71
#include <net/if_pfsync.h>
72
73
#include <netpfil/pf/pf_mtag.h>
74
75
#include <netinet/in.h>
76
#include <netinet/in_var.h>
77
#include <netinet/in_pcb.h>
78
#include <netinet/ip.h>
79
#include <netinet/ip_var.h>
80
#include <netinet/ip_icmp.h>
81
#include <netinet/ip_fw.h>
82
#include <netinet/ip_carp.h>
83
#include <netinet/pim.h>
84
#include <netinet/tcp_var.h>
85
#include <netinet/udp.h>
86
#include <netinet/udp_var.h>
87
#include <netinet/sctp.h>
88
#include <netinet/sctp_crc32.h>
89
#include <netinet/sctp_header.h>
90
91
#include <netinet/ip6.h>
92
#include <netinet/icmp6.h>
93
#include <netinet/in_fib.h>
94
#ifdef INET6
95
#include <netinet6/in6_fib.h>
96
#include <netinet6/in6_pcb.h>
97
#include <netinet6/scope6_var.h>
98
#include <netinet6/ip6_var.h>
99
#endif
100
101
#include <net/if_gre.h> /* for struct grehdr */
102
103
#include <netpfil/ipfw/ip_fw_private.h>
104
105
#include <machine/in_cksum.h> /* XXX for in_cksum */
106
107
#ifdef MAC
108
#include <security/mac/mac_framework.h>
109
#endif
110
111
#define IPFW_PROBE(probe, arg0, arg1, arg2, arg3, arg4, arg5) \
112
SDT_PROBE6(ipfw, , , probe, arg0, arg1, arg2, arg3, arg4, arg5)
113
114
SDT_PROVIDER_DEFINE(ipfw);
115
SDT_PROBE_DEFINE6(ipfw, , , rule__matched,
116
"int", /* retval */
117
"int", /* af */
118
"void *", /* src addr */
119
"void *", /* dst addr */
120
"struct ip_fw_args *", /* args */
121
"struct ip_fw *" /* rule */);
122
123
/*
124
* static variables followed by global ones.
125
* All ipfw global variables are here.
126
*/
127
128
VNET_DEFINE_STATIC(int, fw_deny_unknown_exthdrs);
129
#define V_fw_deny_unknown_exthdrs VNET(fw_deny_unknown_exthdrs)
130
131
VNET_DEFINE_STATIC(int, fw_permit_single_frag6) = 1;
132
#define V_fw_permit_single_frag6 VNET(fw_permit_single_frag6)
133
134
#ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
135
static int default_to_accept = 1;
136
#else
137
static int default_to_accept;
138
#endif
139
140
VNET_DEFINE(int, autoinc_step);
141
VNET_DEFINE(int, fw_one_pass) = 1;
142
143
VNET_DEFINE(unsigned int, fw_tables_max);
144
VNET_DEFINE(unsigned int, fw_tables_sets) = 0; /* Don't use set-aware tables */
145
/* Use 128 tables by default */
146
static unsigned int default_fw_tables = IPFW_TABLES_DEFAULT;
147
148
#ifndef IPFIREWALL_LINEAR_SKIPTO
149
VNET_DEFINE(int, skipto_cache) = 0;
150
#else
151
VNET_DEFINE(int, skipto_cache) = 1;
152
#endif
153
154
static uint32_t jump(struct ip_fw_chain *chain, struct ip_fw *f,
155
uint32_t num, int tablearg, bool jump_backwards);
156
157
/*
158
* Each rule belongs to one of 32 different sets (0..31).
159
* The variable set_disable contains one bit per set.
160
* If the bit is set, all rules in the corresponding set
161
* are disabled. Set RESVD_SET(31) is reserved for the default rule
162
* and rules that are not deleted by the flush command,
163
* and CANNOT be disabled.
164
* Rules in set RESVD_SET can only be deleted individually.
165
*/
166
VNET_DEFINE(u_int32_t, set_disable);
167
#define V_set_disable VNET(set_disable)
168
169
VNET_DEFINE(int, fw_verbose);
170
/* counter for ipfw_log(NULL...) */
171
VNET_DEFINE(u_int64_t, norule_counter);
172
VNET_DEFINE(int, verbose_limit);
173
174
/* layer3_chain contains the list of rules for layer 3 */
175
VNET_DEFINE(struct ip_fw_chain, layer3_chain);
176
177
/* ipfw_vnet_ready controls when we are open for business */
178
VNET_DEFINE(int, ipfw_vnet_ready) = 0;
179
180
VNET_DEFINE(int, ipfw_nat_ready) = 0;
181
182
ipfw_nat_t *ipfw_nat_ptr = NULL;
183
struct cfg_nat *(*lookup_nat_ptr)(struct nat_list *, int);
184
ipfw_nat_cfg_t *ipfw_nat_cfg_ptr;
185
ipfw_nat_cfg_t *ipfw_nat_del_ptr;
186
ipfw_nat_cfg_t *ipfw_nat_get_cfg_ptr;
187
ipfw_nat_cfg_t *ipfw_nat_get_log_ptr;
188
189
#ifdef SYSCTL_NODE
190
uint32_t dummy_def = IPFW_DEFAULT_RULE;
191
static int sysctl_ipfw_table_num(SYSCTL_HANDLER_ARGS);
192
static int sysctl_ipfw_tables_sets(SYSCTL_HANDLER_ARGS);
193
194
SYSBEGIN(f3)
195
196
SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
197
"Firewall");
198
SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, one_pass,
199
CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE3, &VNET_NAME(fw_one_pass), 0,
200
"Only do a single pass through ipfw when using dummynet(4), ipfw_nat or other divert(4)-like interfaces");
201
SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, autoinc_step,
202
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(autoinc_step), 0,
203
"Rule number auto-increment step");
204
SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose,
205
CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE3, &VNET_NAME(fw_verbose), 0,
206
"Log matches to ipfw rules");
207
SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit,
208
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(verbose_limit), 0,
209
"Set upper limit of matches of ipfw rules logged");
210
SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, skipto_cache,
211
CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(skipto_cache), 0,
212
"Status of linear skipto cache: 1 - enabled, 0 - disabled.");
213
SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, default_rule, CTLFLAG_RD,
214
&dummy_def, 0,
215
"The default/max possible rule number.");
216
SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, tables_max,
217
CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
218
0, 0, sysctl_ipfw_table_num, "IU",
219
"Maximum number of concurrently used tables");
220
SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, tables_sets,
221
CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
222
0, 0, sysctl_ipfw_tables_sets, "IU",
223
"Use per-set namespace for tables");
224
SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, default_to_accept, CTLFLAG_RDTUN,
225
&default_to_accept, 0,
226
"Make the default rule accept all packets.");
227
TUNABLE_INT("net.inet.ip.fw.tables_max", (int *)&default_fw_tables);
228
SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count,
229
CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(layer3_chain.n_rules), 0,
230
"Number of static rules");
231
232
#ifdef INET6
233
SYSCTL_DECL(_net_inet6_ip6);
234
SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, fw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
235
"Firewall");
236
SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, deny_unknown_exthdrs,
237
CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE,
238
&VNET_NAME(fw_deny_unknown_exthdrs), 0,
239
"Deny packets with unknown IPv6 Extension Headers");
240
SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, permit_single_frag6,
241
CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE,
242
&VNET_NAME(fw_permit_single_frag6), 0,
243
"Permit single packet IPv6 fragments");
244
#endif /* INET6 */
245
246
SYSEND
247
248
#endif /* SYSCTL_NODE */
249
250
/*
251
* Some macros used in the various matching options.
252
* L3HDR maps an ipv4 pointer into a layer3 header pointer of type T
253
* Other macros just cast void * into the appropriate type
254
*/
255
#define L3HDR(T, ip) ((T *)((u_int32_t *)(ip) + (ip)->ip_hl))
256
#define TCP(p) ((struct tcphdr *)(p))
257
#define SCTP(p) ((struct sctphdr *)(p))
258
#define UDP(p) ((struct udphdr *)(p))
259
#define ICMP(p) ((struct icmphdr *)(p))
260
#define ICMP6(p) ((struct icmp6_hdr *)(p))
261
262
static __inline int
263
icmptype_match(struct icmphdr *icmp, ipfw_insn_u32 *cmd)
264
{
265
int type = icmp->icmp_type;
266
267
return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1<<type)) );
268
}
269
270
#define TT ( (1 << ICMP_ECHO) | (1 << ICMP_ROUTERSOLICIT) | \
271
(1 << ICMP_TSTAMP) | (1 << ICMP_IREQ) | (1 << ICMP_MASKREQ) )
272
273
static int
274
is_icmp_query(struct icmphdr *icmp)
275
{
276
int type = icmp->icmp_type;
277
278
return (type <= ICMP_MAXTYPE && (TT & (1<<type)) );
279
}
280
#undef TT
281
282
/*
283
* The following checks use two arrays of 8 or 16 bits to store the
284
* bits that we want set or clear, respectively. They are in the
285
* low and high half of cmd->arg1 or cmd->d[0].
286
*
287
* We scan options and store the bits we find set. We succeed if
288
*
289
* (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear
290
*
291
* The code is sometimes optimized not to store additional variables.
292
*/
293
294
static int
295
flags_match(ipfw_insn *cmd, u_int8_t bits)
296
{
297
u_char want_clear;
298
bits = ~bits;
299
300
if ( ((cmd->arg1 & 0xff) & bits) != 0)
301
return 0; /* some bits we want set were clear */
302
want_clear = (cmd->arg1 >> 8) & 0xff;
303
if ( (want_clear & bits) != want_clear)
304
return 0; /* some bits we want clear were set */
305
return 1;
306
}
307
308
static int
309
ipopts_match(struct ip *ip, ipfw_insn *cmd)
310
{
311
int optlen, bits = 0;
312
u_char *cp = (u_char *)(ip + 1);
313
int x = (ip->ip_hl << 2) - sizeof (struct ip);
314
315
for (; x > 0; x -= optlen, cp += optlen) {
316
int opt = cp[IPOPT_OPTVAL];
317
318
if (opt == IPOPT_EOL)
319
break;
320
if (opt == IPOPT_NOP)
321
optlen = 1;
322
else {
323
optlen = cp[IPOPT_OLEN];
324
if (optlen <= 0 || optlen > x)
325
return 0; /* invalid or truncated */
326
}
327
switch (opt) {
328
default:
329
break;
330
331
case IPOPT_LSRR:
332
bits |= IP_FW_IPOPT_LSRR;
333
break;
334
335
case IPOPT_SSRR:
336
bits |= IP_FW_IPOPT_SSRR;
337
break;
338
339
case IPOPT_RR:
340
bits |= IP_FW_IPOPT_RR;
341
break;
342
343
case IPOPT_TS:
344
bits |= IP_FW_IPOPT_TS;
345
break;
346
}
347
}
348
return (flags_match(cmd, bits));
349
}
350
351
/*
352
* Parse TCP options. The logic copied from tcp_dooptions().
353
*/
354
static int
355
tcpopts_parse(const struct tcphdr *tcp, uint16_t *mss)
356
{
357
const u_char *cp = (const u_char *)(tcp + 1);
358
int optlen, bits = 0;
359
int cnt = (tcp->th_off << 2) - sizeof(struct tcphdr);
360
361
for (; cnt > 0; cnt -= optlen, cp += optlen) {
362
int opt = cp[0];
363
if (opt == TCPOPT_EOL)
364
break;
365
if (opt == TCPOPT_NOP)
366
optlen = 1;
367
else {
368
if (cnt < 2)
369
break;
370
optlen = cp[1];
371
if (optlen < 2 || optlen > cnt)
372
break;
373
}
374
375
switch (opt) {
376
default:
377
break;
378
379
case TCPOPT_MAXSEG:
380
if (optlen != TCPOLEN_MAXSEG)
381
break;
382
bits |= IP_FW_TCPOPT_MSS;
383
if (mss != NULL)
384
*mss = be16dec(cp + 2);
385
break;
386
387
case TCPOPT_WINDOW:
388
if (optlen == TCPOLEN_WINDOW)
389
bits |= IP_FW_TCPOPT_WINDOW;
390
break;
391
392
case TCPOPT_SACK_PERMITTED:
393
if (optlen == TCPOLEN_SACK_PERMITTED)
394
bits |= IP_FW_TCPOPT_SACK;
395
break;
396
397
case TCPOPT_SACK:
398
if (optlen > 2 && (optlen - 2) % TCPOLEN_SACK == 0)
399
bits |= IP_FW_TCPOPT_SACK;
400
break;
401
402
case TCPOPT_TIMESTAMP:
403
if (optlen == TCPOLEN_TIMESTAMP)
404
bits |= IP_FW_TCPOPT_TS;
405
break;
406
}
407
}
408
return (bits);
409
}
410
411
static int
412
tcpopts_match(struct tcphdr *tcp, ipfw_insn *cmd)
413
{
414
415
return (flags_match(cmd, tcpopts_parse(tcp, NULL)));
416
}
417
418
static int
419
iface_match(struct ifnet *ifp, ipfw_insn_if *cmd, struct ip_fw_chain *chain,
420
uint32_t *tablearg)
421
{
422
423
if (ifp == NULL) /* no iface with this packet, match fails */
424
return (0);
425
426
/* Check by name or by IP address */
427
if (cmd->name[0] != '\0') { /* match by name */
428
if (cmd->name[0] == '\1') /* use tablearg to match */
429
return ipfw_lookup_table(chain, cmd->p.kidx, 0,
430
&ifp->if_index, tablearg);
431
/* Check name */
432
if (cmd->p.glob) {
433
if (fnmatch(cmd->name, ifp->if_xname, 0) == 0)
434
return(1);
435
} else {
436
if (strncmp(ifp->if_xname, cmd->name, IFNAMSIZ) == 0)
437
return(1);
438
}
439
} else {
440
#if !defined(USERSPACE) && defined(__FreeBSD__) /* and OSX too ? */
441
struct ifaddr *ia;
442
443
NET_EPOCH_ASSERT();
444
445
CK_STAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) {
446
if (ia->ifa_addr->sa_family != AF_INET)
447
continue;
448
if (cmd->p.ip.s_addr == ((struct sockaddr_in *)
449
(ia->ifa_addr))->sin_addr.s_addr)
450
return (1); /* match */
451
}
452
#endif /* __FreeBSD__ */
453
}
454
return(0); /* no match, fail ... */
455
}
456
457
/*
458
* The verify_path function checks if a route to the src exists and
459
* if it is reachable via ifp (when provided).
460
*
461
* The 'verrevpath' option checks that the interface that an IP packet
462
* arrives on is the same interface that traffic destined for the
463
* packet's source address would be routed out of.
464
* The 'versrcreach' option just checks that the source address is
465
* reachable via any route (except default) in the routing table.
466
* These two are a measure to block forged packets. This is also
467
* commonly known as "anti-spoofing" or Unicast Reverse Path
468
* Forwarding (Unicast RFP) in Cisco-ese. The name of the knobs
469
* is purposely reminiscent of the Cisco IOS command,
470
*
471
* ip verify unicast reverse-path
472
* ip verify unicast source reachable-via any
473
*
474
* which implements the same functionality. But note that the syntax
475
* is misleading, and the check may be performed on all IP packets
476
* whether unicast, multicast, or broadcast.
477
*/
478
static int
479
verify_path(struct in_addr src, struct ifnet *ifp, u_int fib)
480
{
481
#if defined(USERSPACE) || !defined(__FreeBSD__)
482
return 0;
483
#else
484
struct nhop_object *nh;
485
486
nh = fib4_lookup(fib, src, 0, NHR_NONE, 0);
487
if (nh == NULL)
488
return (0);
489
490
/*
491
* If ifp is provided, check for equality with rtentry.
492
* We should use rt->rt_ifa->ifa_ifp, instead of rt->rt_ifp,
493
* in order to pass packets injected back by if_simloop():
494
* routing entry (via lo0) for our own address
495
* may exist, so we need to handle routing assymetry.
496
*/
497
if (ifp != NULL && ifp != nh->nh_aifp)
498
return (0);
499
500
/* if no ifp provided, check if rtentry is not default route */
501
if (ifp == NULL && (nh->nh_flags & NHF_DEFAULT) != 0)
502
return (0);
503
504
/* or if this is a blackhole/reject route */
505
if (ifp == NULL && (nh->nh_flags & (NHF_REJECT|NHF_BLACKHOLE)) != 0)
506
return (0);
507
508
/* found valid route */
509
return 1;
510
#endif /* __FreeBSD__ */
511
}
512
513
/*
514
* Generate an SCTP packet containing an ABORT chunk. The verification tag
515
* is given by vtag. The T-bit is set in the ABORT chunk if and only if
516
* reflected is not 0.
517
*/
518
519
static struct mbuf *
520
ipfw_send_abort(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t vtag,
521
int reflected)
522
{
523
struct mbuf *m;
524
struct ip *ip;
525
#ifdef INET6
526
struct ip6_hdr *ip6;
527
#endif
528
struct sctphdr *sctp;
529
struct sctp_chunkhdr *chunk;
530
u_int16_t hlen, plen, tlen;
531
532
MGETHDR(m, M_NOWAIT, MT_DATA);
533
if (m == NULL)
534
return (NULL);
535
536
M_SETFIB(m, id->fib);
537
#ifdef MAC
538
if (replyto != NULL)
539
mac_netinet_firewall_reply(replyto, m);
540
else
541
mac_netinet_firewall_send(m);
542
#else
543
(void)replyto; /* don't warn about unused arg */
544
#endif
545
546
switch (id->addr_type) {
547
case 4:
548
hlen = sizeof(struct ip);
549
break;
550
#ifdef INET6
551
case 6:
552
hlen = sizeof(struct ip6_hdr);
553
break;
554
#endif
555
default:
556
/* XXX: log me?!? */
557
FREE_PKT(m);
558
return (NULL);
559
}
560
plen = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
561
tlen = hlen + plen;
562
m->m_data += max_linkhdr;
563
m->m_flags |= M_SKIP_FIREWALL;
564
m->m_pkthdr.len = m->m_len = tlen;
565
m->m_pkthdr.rcvif = NULL;
566
bzero(m->m_data, tlen);
567
568
switch (id->addr_type) {
569
case 4:
570
ip = mtod(m, struct ip *);
571
572
ip->ip_v = 4;
573
ip->ip_hl = sizeof(struct ip) >> 2;
574
ip->ip_tos = IPTOS_LOWDELAY;
575
ip->ip_len = htons(tlen);
576
ip->ip_id = htons(0);
577
ip->ip_off = htons(0);
578
ip->ip_ttl = V_ip_defttl;
579
ip->ip_p = IPPROTO_SCTP;
580
ip->ip_sum = 0;
581
ip->ip_src.s_addr = htonl(id->dst_ip);
582
ip->ip_dst.s_addr = htonl(id->src_ip);
583
584
sctp = (struct sctphdr *)(ip + 1);
585
break;
586
#ifdef INET6
587
case 6:
588
ip6 = mtod(m, struct ip6_hdr *);
589
590
ip6->ip6_vfc = IPV6_VERSION;
591
ip6->ip6_plen = htons(plen);
592
ip6->ip6_nxt = IPPROTO_SCTP;
593
ip6->ip6_hlim = IPV6_DEFHLIM;
594
ip6->ip6_src = id->dst_ip6;
595
ip6->ip6_dst = id->src_ip6;
596
597
sctp = (struct sctphdr *)(ip6 + 1);
598
break;
599
#endif
600
}
601
602
sctp->src_port = htons(id->dst_port);
603
sctp->dest_port = htons(id->src_port);
604
sctp->v_tag = htonl(vtag);
605
sctp->checksum = htonl(0);
606
607
chunk = (struct sctp_chunkhdr *)(sctp + 1);
608
chunk->chunk_type = SCTP_ABORT_ASSOCIATION;
609
chunk->chunk_flags = 0;
610
if (reflected != 0) {
611
chunk->chunk_flags |= SCTP_HAD_NO_TCB;
612
}
613
chunk->chunk_length = htons(sizeof(struct sctp_chunkhdr));
614
615
sctp->checksum = sctp_calculate_cksum(m, hlen);
616
617
return (m);
618
}
619
620
/*
621
* Generate a TCP packet, containing either a RST or a keepalive.
622
* When flags & TH_RST, we are sending a RST packet, because of a
623
* "reset" action matched the packet.
624
* Otherwise we are sending a keepalive, and flags & TH_
625
* The 'replyto' mbuf is the mbuf being replied to, if any, and is required
626
* so that MAC can label the reply appropriately.
627
*/
628
struct mbuf *
629
ipfw_send_pkt(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t seq,
630
u_int32_t ack, int flags)
631
{
632
struct mbuf *m = NULL; /* stupid compiler */
633
struct ip *h = NULL; /* stupid compiler */
634
#ifdef INET6
635
struct ip6_hdr *h6 = NULL;
636
#endif
637
struct tcphdr *th = NULL;
638
int len, dir;
639
640
MGETHDR(m, M_NOWAIT, MT_DATA);
641
if (m == NULL)
642
return (NULL);
643
644
M_SETFIB(m, id->fib);
645
#ifdef MAC
646
if (replyto != NULL)
647
mac_netinet_firewall_reply(replyto, m);
648
else
649
mac_netinet_firewall_send(m);
650
#else
651
(void)replyto; /* don't warn about unused arg */
652
#endif
653
654
switch (id->addr_type) {
655
case 4:
656
len = sizeof(struct ip) + sizeof(struct tcphdr);
657
break;
658
#ifdef INET6
659
case 6:
660
len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
661
break;
662
#endif
663
default:
664
/* XXX: log me?!? */
665
FREE_PKT(m);
666
return (NULL);
667
}
668
dir = ((flags & (TH_SYN | TH_RST)) == TH_SYN);
669
670
m->m_data += max_linkhdr;
671
m->m_flags |= M_SKIP_FIREWALL;
672
m->m_pkthdr.len = m->m_len = len;
673
m->m_pkthdr.rcvif = NULL;
674
bzero(m->m_data, len);
675
676
switch (id->addr_type) {
677
case 4:
678
h = mtod(m, struct ip *);
679
680
/* prepare for checksum */
681
h->ip_p = IPPROTO_TCP;
682
h->ip_len = htons(sizeof(struct tcphdr));
683
if (dir) {
684
h->ip_src.s_addr = htonl(id->src_ip);
685
h->ip_dst.s_addr = htonl(id->dst_ip);
686
} else {
687
h->ip_src.s_addr = htonl(id->dst_ip);
688
h->ip_dst.s_addr = htonl(id->src_ip);
689
}
690
691
th = (struct tcphdr *)(h + 1);
692
break;
693
#ifdef INET6
694
case 6:
695
h6 = mtod(m, struct ip6_hdr *);
696
697
/* prepare for checksum */
698
h6->ip6_nxt = IPPROTO_TCP;
699
h6->ip6_plen = htons(sizeof(struct tcphdr));
700
if (dir) {
701
h6->ip6_src = id->src_ip6;
702
h6->ip6_dst = id->dst_ip6;
703
} else {
704
h6->ip6_src = id->dst_ip6;
705
h6->ip6_dst = id->src_ip6;
706
}
707
708
th = (struct tcphdr *)(h6 + 1);
709
break;
710
#endif
711
}
712
713
if (dir) {
714
th->th_sport = htons(id->src_port);
715
th->th_dport = htons(id->dst_port);
716
} else {
717
th->th_sport = htons(id->dst_port);
718
th->th_dport = htons(id->src_port);
719
}
720
th->th_off = sizeof(struct tcphdr) >> 2;
721
722
if (flags & TH_RST) {
723
if (flags & TH_ACK) {
724
th->th_seq = htonl(ack);
725
tcp_set_flags(th, TH_RST);
726
} else {
727
if (flags & TH_SYN)
728
seq++;
729
th->th_ack = htonl(seq);
730
tcp_set_flags(th, TH_RST | TH_ACK);
731
}
732
} else {
733
/*
734
* Keepalive - use caller provided sequence numbers
735
*/
736
th->th_seq = htonl(seq);
737
th->th_ack = htonl(ack);
738
tcp_set_flags(th, TH_ACK);
739
}
740
741
switch (id->addr_type) {
742
case 4:
743
th->th_sum = in_cksum(m, len);
744
745
/* finish the ip header */
746
h->ip_v = 4;
747
h->ip_hl = sizeof(*h) >> 2;
748
h->ip_tos = IPTOS_LOWDELAY;
749
h->ip_off = htons(0);
750
h->ip_len = htons(len);
751
h->ip_ttl = V_ip_defttl;
752
h->ip_sum = 0;
753
break;
754
#ifdef INET6
755
case 6:
756
th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(*h6),
757
sizeof(struct tcphdr));
758
759
/* finish the ip6 header */
760
h6->ip6_vfc |= IPV6_VERSION;
761
h6->ip6_hlim = IPV6_DEFHLIM;
762
break;
763
#endif
764
}
765
766
return (m);
767
}
768
769
#ifdef INET6
770
/*
771
* ipv6 specific rules here...
772
*/
773
static __inline int
774
icmp6type_match(int type, ipfw_insn_u32 *cmd)
775
{
776
return (type <= ICMP6_MAXTYPE && (cmd->d[type/32] & (1<<(type%32)) ) );
777
}
778
779
static int
780
flow6id_match(int curr_flow, ipfw_insn_u32 *cmd)
781
{
782
int i;
783
for (i=0; i <= cmd->o.arg1; ++i)
784
if (curr_flow == cmd->d[i])
785
return 1;
786
return 0;
787
}
788
789
/* support for IP6_*_ME opcodes */
790
static const struct in6_addr lla_mask = {{{
791
0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
792
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
793
}}};
794
795
static int
796
ipfw_localip6(struct in6_addr *in6)
797
{
798
struct rm_priotracker in6_ifa_tracker;
799
struct in6_ifaddr *ia;
800
801
if (IN6_IS_ADDR_MULTICAST(in6))
802
return (0);
803
804
if (!IN6_IS_ADDR_LINKLOCAL(in6))
805
return (in6_localip(in6));
806
807
IN6_IFADDR_RLOCK(&in6_ifa_tracker);
808
CK_STAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) {
809
if (!IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr))
810
continue;
811
if (IN6_ARE_MASKED_ADDR_EQUAL(&ia->ia_addr.sin6_addr,
812
in6, &lla_mask)) {
813
IN6_IFADDR_RUNLOCK(&in6_ifa_tracker);
814
return (1);
815
}
816
}
817
IN6_IFADDR_RUNLOCK(&in6_ifa_tracker);
818
return (0);
819
}
820
821
static int
822
verify_path6(struct in6_addr *src, struct ifnet *ifp, u_int fib)
823
{
824
struct nhop_object *nh;
825
826
if (IN6_IS_SCOPE_LINKLOCAL(src))
827
return (1);
828
829
nh = fib6_lookup(fib, src, 0, NHR_NONE, 0);
830
if (nh == NULL)
831
return (0);
832
833
/* If ifp is provided, check for equality with route table. */
834
if (ifp != NULL && ifp != nh->nh_aifp)
835
return (0);
836
837
/* if no ifp provided, check if rtentry is not default route */
838
if (ifp == NULL && (nh->nh_flags & NHF_DEFAULT) != 0)
839
return (0);
840
841
/* or if this is a blackhole/reject route */
842
if (ifp == NULL && (nh->nh_flags & (NHF_REJECT|NHF_BLACKHOLE)) != 0)
843
return (0);
844
845
/* found valid route */
846
return 1;
847
}
848
849
static int
850
is_icmp6_query(int icmp6_type)
851
{
852
if ((icmp6_type <= ICMP6_MAXTYPE) &&
853
(icmp6_type == ICMP6_ECHO_REQUEST ||
854
icmp6_type == ICMP6_MEMBERSHIP_QUERY ||
855
icmp6_type == ICMP6_WRUREQUEST ||
856
icmp6_type == ICMP6_FQDN_QUERY ||
857
icmp6_type == ICMP6_NI_QUERY))
858
return (1);
859
860
return (0);
861
}
862
863
static int
864
map_icmp_unreach(int code)
865
{
866
867
/* RFC 7915 p4.2 */
868
switch (code) {
869
case ICMP_UNREACH_NET:
870
case ICMP_UNREACH_HOST:
871
case ICMP_UNREACH_SRCFAIL:
872
case ICMP_UNREACH_NET_UNKNOWN:
873
case ICMP_UNREACH_HOST_UNKNOWN:
874
case ICMP_UNREACH_TOSNET:
875
case ICMP_UNREACH_TOSHOST:
876
return (ICMP6_DST_UNREACH_NOROUTE);
877
case ICMP_UNREACH_PORT:
878
return (ICMP6_DST_UNREACH_NOPORT);
879
default:
880
/*
881
* Map the rest of codes into admit prohibited.
882
* XXX: unreach proto should be mapped into ICMPv6
883
* parameter problem, but we use only unreach type.
884
*/
885
return (ICMP6_DST_UNREACH_ADMIN);
886
}
887
}
888
889
static void
890
send_reject6(struct ip_fw_args *args, int code, u_int hlen,
891
const struct ip6_hdr *ip6)
892
{
893
struct mbuf *m;
894
895
m = args->m;
896
if (code == ICMP6_UNREACH_RST && args->f_id.proto == IPPROTO_TCP) {
897
const struct tcphdr * tcp;
898
tcp = (const struct tcphdr *)((const char *)ip6 + hlen);
899
900
if ((tcp_get_flags(tcp) & TH_RST) == 0) {
901
struct mbuf *m0;
902
m0 = ipfw_send_pkt(args->m, &(args->f_id),
903
ntohl(tcp->th_seq), ntohl(tcp->th_ack),
904
tcp_get_flags(tcp) | TH_RST);
905
if (m0 != NULL)
906
ip6_output(m0, NULL, NULL, 0, NULL, NULL,
907
NULL);
908
}
909
FREE_PKT(m);
910
} else if (code == ICMP6_UNREACH_ABORT &&
911
args->f_id.proto == IPPROTO_SCTP) {
912
struct mbuf *m0;
913
const struct sctphdr *sctp;
914
u_int32_t v_tag;
915
int reflected;
916
917
sctp = (const struct sctphdr *)((const char *)ip6 + hlen);
918
reflected = 1;
919
v_tag = ntohl(sctp->v_tag);
920
/* Investigate the first chunk header if available */
921
if (m->m_len >= hlen + sizeof(struct sctphdr) +
922
sizeof(struct sctp_chunkhdr)) {
923
const struct sctp_chunkhdr *chunk;
924
925
chunk = (const struct sctp_chunkhdr *)(sctp + 1);
926
switch (chunk->chunk_type) {
927
case SCTP_INITIATION:
928
/*
929
* Packets containing an INIT chunk MUST have
930
* a zero v-tag.
931
*/
932
if (v_tag != 0) {
933
v_tag = 0;
934
break;
935
}
936
/* INIT chunk MUST NOT be bundled */
937
if (m->m_pkthdr.len >
938
hlen + sizeof(struct sctphdr) +
939
ntohs(chunk->chunk_length) + 3) {
940
break;
941
}
942
/* Use the initiate tag if available */
943
if ((m->m_len >= hlen + sizeof(struct sctphdr) +
944
sizeof(struct sctp_chunkhdr) +
945
offsetof(struct sctp_init, a_rwnd))) {
946
const struct sctp_init *init;
947
948
init = (const struct sctp_init *)(chunk + 1);
949
v_tag = ntohl(init->initiate_tag);
950
reflected = 0;
951
}
952
break;
953
case SCTP_ABORT_ASSOCIATION:
954
/*
955
* If the packet contains an ABORT chunk, don't
956
* reply.
957
* XXX: We should search through all chunks,
958
* but do not do that to avoid attacks.
959
*/
960
v_tag = 0;
961
break;
962
}
963
}
964
if (v_tag == 0) {
965
m0 = NULL;
966
} else {
967
m0 = ipfw_send_abort(args->m, &(args->f_id), v_tag,
968
reflected);
969
}
970
if (m0 != NULL)
971
ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
972
FREE_PKT(m);
973
} else if (code != ICMP6_UNREACH_RST && code != ICMP6_UNREACH_ABORT) {
974
/* Send an ICMPv6 unreach. */
975
#if 0
976
/*
977
* Unlike above, the mbufs need to line up with the ip6 hdr,
978
* as the contents are read. We need to m_adj() the
979
* needed amount.
980
* The mbuf will however be thrown away so we can adjust it.
981
* Remember we did an m_pullup on it already so we
982
* can make some assumptions about contiguousness.
983
*/
984
if (args->L3offset)
985
m_adj(m, args->L3offset);
986
#endif
987
icmp6_error(m, ICMP6_DST_UNREACH, code, 0);
988
} else
989
FREE_PKT(m);
990
991
args->m = NULL;
992
}
993
994
#endif /* INET6 */
995
996
/*
997
* sends a reject message, consuming the mbuf passed as an argument.
998
*/
999
static void
1000
send_reject(struct ip_fw_args *args, int code, uint16_t mtu, int iplen,
1001
const struct ip *ip)
1002
{
1003
#if 0
1004
/* XXX When ip is not guaranteed to be at mtod() we will
1005
* need to account for this */
1006
* The mbuf will however be thrown away so we can adjust it.
1007
* Remember we did an m_pullup on it already so we
1008
* can make some assumptions about contiguousness.
1009
*/
1010
if (args->L3offset)
1011
m_adj(m, args->L3offset);
1012
#endif
1013
if (code != ICMP_REJECT_RST && code != ICMP_REJECT_ABORT) {
1014
/* Send an ICMP unreach */
1015
icmp_error(args->m, ICMP_UNREACH, code, 0L, mtu);
1016
} else if (code == ICMP_REJECT_RST && args->f_id.proto == IPPROTO_TCP) {
1017
struct tcphdr *const tcp =
1018
L3HDR(struct tcphdr, mtod(args->m, struct ip *));
1019
if ( (tcp_get_flags(tcp) & TH_RST) == 0) {
1020
struct mbuf *m;
1021
m = ipfw_send_pkt(args->m, &(args->f_id),
1022
ntohl(tcp->th_seq), ntohl(tcp->th_ack),
1023
tcp_get_flags(tcp) | TH_RST);
1024
if (m != NULL)
1025
ip_output(m, NULL, NULL, 0, NULL, NULL);
1026
}
1027
FREE_PKT(args->m);
1028
} else if (code == ICMP_REJECT_ABORT &&
1029
args->f_id.proto == IPPROTO_SCTP) {
1030
struct mbuf *m;
1031
struct sctphdr *sctp;
1032
struct sctp_chunkhdr *chunk;
1033
struct sctp_init *init;
1034
u_int32_t v_tag;
1035
int reflected;
1036
1037
sctp = L3HDR(struct sctphdr, mtod(args->m, struct ip *));
1038
reflected = 1;
1039
v_tag = ntohl(sctp->v_tag);
1040
if (iplen >= (ip->ip_hl << 2) + sizeof(struct sctphdr) +
1041
sizeof(struct sctp_chunkhdr)) {
1042
/* Look at the first chunk header if available */
1043
chunk = (struct sctp_chunkhdr *)(sctp + 1);
1044
switch (chunk->chunk_type) {
1045
case SCTP_INITIATION:
1046
/*
1047
* Packets containing an INIT chunk MUST have
1048
* a zero v-tag.
1049
*/
1050
if (v_tag != 0) {
1051
v_tag = 0;
1052
break;
1053
}
1054
/* INIT chunk MUST NOT be bundled */
1055
if (iplen >
1056
(ip->ip_hl << 2) + sizeof(struct sctphdr) +
1057
ntohs(chunk->chunk_length) + 3) {
1058
break;
1059
}
1060
/* Use the initiate tag if available */
1061
if ((iplen >= (ip->ip_hl << 2) +
1062
sizeof(struct sctphdr) +
1063
sizeof(struct sctp_chunkhdr) +
1064
offsetof(struct sctp_init, a_rwnd))) {
1065
init = (struct sctp_init *)(chunk + 1);
1066
v_tag = ntohl(init->initiate_tag);
1067
reflected = 0;
1068
}
1069
break;
1070
case SCTP_ABORT_ASSOCIATION:
1071
/*
1072
* If the packet contains an ABORT chunk, don't
1073
* reply.
1074
* XXX: We should search through all chunks,
1075
* but do not do that to avoid attacks.
1076
*/
1077
v_tag = 0;
1078
break;
1079
}
1080
}
1081
if (v_tag == 0) {
1082
m = NULL;
1083
} else {
1084
m = ipfw_send_abort(args->m, &(args->f_id), v_tag,
1085
reflected);
1086
}
1087
if (m != NULL)
1088
ip_output(m, NULL, NULL, 0, NULL, NULL);
1089
FREE_PKT(args->m);
1090
} else
1091
FREE_PKT(args->m);
1092
args->m = NULL;
1093
}
1094
1095
/*
1096
* Support for uid/gid/jail lookup. These tests are expensive
1097
* (because we may need to look into the list of active sockets)
1098
* so we cache the results. ugid_lookupp is 0 if we have not
1099
* yet done a lookup, 1 if we succeeded, and -1 if we tried
1100
* and failed. The function always returns the match value.
1101
* We could actually spare the variable and use *uc, setting
1102
* it to '(void *)check_uidgid if we have no info, NULL if
1103
* we tried and failed, or any other value if successful.
1104
*/
1105
static int
1106
check_uidgid(ipfw_insn_u32 *insn, struct ip_fw_args *args, int *ugid_lookupp,
1107
struct ucred **uc)
1108
{
1109
#if defined(USERSPACE)
1110
return 0; // not supported in userspace
1111
#else
1112
#ifndef __FreeBSD__
1113
/* XXX */
1114
return cred_check(insn, proto, oif,
1115
dst_ip, dst_port, src_ip, src_port,
1116
(struct bsd_ucred *)uc, ugid_lookupp, ((struct mbuf *)inp)->m_skb);
1117
#else /* FreeBSD */
1118
struct in_addr src_ip, dst_ip;
1119
struct inpcbinfo *pi;
1120
struct ipfw_flow_id *id;
1121
struct inpcb *pcb, *inp;
1122
int lookupflags;
1123
int match;
1124
1125
id = &args->f_id;
1126
inp = args->inp;
1127
1128
/*
1129
* Check to see if the UDP or TCP stack supplied us with
1130
* the PCB. If so, rather then holding a lock and looking
1131
* up the PCB, we can use the one that was supplied.
1132
*/
1133
if (inp && *ugid_lookupp == 0) {
1134
INP_LOCK_ASSERT(inp);
1135
if (inp->inp_socket != NULL) {
1136
*uc = crhold(inp->inp_cred);
1137
*ugid_lookupp = 1;
1138
} else
1139
*ugid_lookupp = -1;
1140
}
1141
/*
1142
* If we have already been here and the packet has no
1143
* PCB entry associated with it, then we can safely
1144
* assume that this is a no match.
1145
*/
1146
if (*ugid_lookupp == -1)
1147
return (0);
1148
if (id->proto == IPPROTO_TCP) {
1149
lookupflags = 0;
1150
pi = &V_tcbinfo;
1151
} else if (id->proto == IPPROTO_UDP) {
1152
lookupflags = INPLOOKUP_WILDCARD;
1153
pi = &V_udbinfo;
1154
} else if (id->proto == IPPROTO_UDPLITE) {
1155
lookupflags = INPLOOKUP_WILDCARD;
1156
pi = &V_ulitecbinfo;
1157
} else
1158
return 0;
1159
lookupflags |= INPLOOKUP_RLOCKPCB;
1160
match = 0;
1161
if (*ugid_lookupp == 0) {
1162
if (id->addr_type == 6) {
1163
#ifdef INET6
1164
if (args->flags & IPFW_ARGS_IN)
1165
pcb = in6_pcblookup_mbuf(pi,
1166
&id->src_ip6, htons(id->src_port),
1167
&id->dst_ip6, htons(id->dst_port),
1168
lookupflags, NULL, args->m);
1169
else
1170
pcb = in6_pcblookup_mbuf(pi,
1171
&id->dst_ip6, htons(id->dst_port),
1172
&id->src_ip6, htons(id->src_port),
1173
lookupflags, args->ifp, args->m);
1174
#else
1175
*ugid_lookupp = -1;
1176
return (0);
1177
#endif
1178
} else {
1179
src_ip.s_addr = htonl(id->src_ip);
1180
dst_ip.s_addr = htonl(id->dst_ip);
1181
if (args->flags & IPFW_ARGS_IN)
1182
pcb = in_pcblookup_mbuf(pi,
1183
src_ip, htons(id->src_port),
1184
dst_ip, htons(id->dst_port),
1185
lookupflags, NULL, args->m);
1186
else
1187
pcb = in_pcblookup_mbuf(pi,
1188
dst_ip, htons(id->dst_port),
1189
src_ip, htons(id->src_port),
1190
lookupflags, args->ifp, args->m);
1191
}
1192
if (pcb != NULL) {
1193
INP_RLOCK_ASSERT(pcb);
1194
*uc = crhold(pcb->inp_cred);
1195
*ugid_lookupp = 1;
1196
INP_RUNLOCK(pcb);
1197
}
1198
if (*ugid_lookupp == 0) {
1199
/*
1200
* We tried and failed, set the variable to -1
1201
* so we will not try again on this packet.
1202
*/
1203
*ugid_lookupp = -1;
1204
return (0);
1205
}
1206
}
1207
if (insn->o.opcode == O_UID)
1208
match = ((*uc)->cr_uid == (uid_t)insn->d[0]);
1209
else if (insn->o.opcode == O_GID)
1210
match = groupmember((gid_t)insn->d[0], *uc);
1211
else if (insn->o.opcode == O_JAIL)
1212
match = ((*uc)->cr_prison->pr_id == (int)insn->d[0]);
1213
return (match);
1214
#endif /* __FreeBSD__ */
1215
#endif /* not supported in userspace */
1216
}
1217
1218
/*
1219
* Helper function to set args with info on the rule after the matching
1220
* one. slot is precise, whereas we guess rule_id as they are
1221
* assigned sequentially.
1222
*/
1223
static inline void
1224
set_match(struct ip_fw_args *args, int slot,
1225
struct ip_fw_chain *chain)
1226
{
1227
args->rule.chain_id = chain->id;
1228
args->rule.slot = slot + 1; /* we use 0 as a marker */
1229
args->rule.rule_id = 1 + chain->map[slot]->id;
1230
args->rule.rulenum = chain->map[slot]->rulenum;
1231
args->flags |= IPFW_ARGS_REF;
1232
}
1233
1234
static uint32_t
1235
jump_lookup_pos(struct ip_fw_chain *chain, struct ip_fw *f, uint32_t num,
1236
int tablearg, bool jump_backwards)
1237
{
1238
int f_pos, i;
1239
1240
/*
1241
* Make sure we do not jump backward.
1242
*/
1243
i = IP_FW_ARG_TABLEARG(chain, num, skipto);
1244
if (!jump_backwards && i <= f->rulenum)
1245
i = f->rulenum + 1;
1246
1247
if (V_skipto_cache == 0)
1248
f_pos = ipfw_find_rule(chain, i, 0);
1249
else {
1250
/*
1251
* Make sure we do not do out of bounds access.
1252
*/
1253
if (i >= IPFW_DEFAULT_RULE)
1254
i = IPFW_DEFAULT_RULE - 1;
1255
f_pos = chain->idxmap[i];
1256
}
1257
1258
return (f_pos);
1259
}
1260
1261
static uint32_t
1262
jump(struct ip_fw_chain *chain, struct ip_fw *f, uint32_t num,
1263
int tablearg, bool jump_backwards)
1264
{
1265
int f_pos;
1266
1267
/* Can't use cache with IP_FW_TARG */
1268
if (num == IP_FW_TARG)
1269
return jump_lookup_pos(chain, f, num, tablearg, jump_backwards);
1270
1271
/*
1272
* If possible use cached f_pos (in f->cache.pos),
1273
* whose version is written in f->cache.id (horrible hacks
1274
* to avoid changing the ABI).
1275
*
1276
* Multiple threads can execute the same rule simultaneously,
1277
* we need to ensure that cache.pos is updated before cache.id.
1278
*/
1279
1280
#ifdef __LP64__
1281
struct ip_fw_jump_cache cache;
1282
1283
cache.raw_value = f->cache.raw_value;
1284
if (cache.id == chain->id)
1285
return (cache.pos);
1286
1287
f_pos = jump_lookup_pos(chain, f, num, tablearg, jump_backwards);
1288
1289
cache.pos = f_pos;
1290
cache.id = chain->id;
1291
f->cache.raw_value = cache.raw_value;
1292
#else
1293
if (f->cache.id == chain->id) {
1294
/* Load pos after id */
1295
atomic_thread_fence_acq();
1296
return (f->cache.pos);
1297
}
1298
1299
f_pos = jump_lookup_pos(chain, f, num, tablearg, jump_backwards);
1300
1301
f->cache.pos = f_pos;
1302
/* Store id after pos */
1303
atomic_thread_fence_rel();
1304
f->cache.id = chain->id;
1305
#endif /* !__LP64__ */
1306
return (f_pos);
1307
}
1308
1309
#define TARG(k, f) IP_FW_ARG_TABLEARG(chain, k, f)
1310
1311
static inline int
1312
tvalue_match(struct ip_fw_chain *ch, const ipfw_insn_table *cmd,
1313
uint32_t tablearg)
1314
{
1315
uint32_t tvalue;
1316
1317
switch (IPFW_TVALUE_TYPE(&cmd->o)) {
1318
case TVALUE_PIPE:
1319
tvalue = TARG_VAL(ch, tablearg, pipe);
1320
break;
1321
case TVALUE_DIVERT:
1322
tvalue = TARG_VAL(ch, tablearg, divert);
1323
break;
1324
case TVALUE_SKIPTO:
1325
tvalue = TARG_VAL(ch, tablearg, skipto);
1326
break;
1327
case TVALUE_NETGRAPH:
1328
tvalue = TARG_VAL(ch, tablearg, netgraph);
1329
break;
1330
case TVALUE_FIB:
1331
tvalue = TARG_VAL(ch, tablearg, fib);
1332
break;
1333
case TVALUE_NAT:
1334
tvalue = TARG_VAL(ch, tablearg, nat);
1335
break;
1336
case TVALUE_NH4:
1337
tvalue = TARG_VAL(ch, tablearg, nh4);
1338
break;
1339
case TVALUE_DSCP:
1340
tvalue = TARG_VAL(ch, tablearg, dscp);
1341
break;
1342
case TVALUE_LIMIT:
1343
tvalue = TARG_VAL(ch, tablearg, limit);
1344
break;
1345
case TVALUE_MARK:
1346
tvalue = TARG_VAL(ch, tablearg, mark);
1347
break;
1348
case TVALUE_TAG:
1349
default:
1350
tvalue = TARG_VAL(ch, tablearg, tag);
1351
break;
1352
}
1353
return (tvalue == cmd->value);
1354
}
1355
1356
/*
1357
* The main check routine for the firewall.
1358
*
1359
* All arguments are in args so we can modify them and return them
1360
* back to the caller.
1361
*
1362
* Parameters:
1363
*
1364
* args->m (in/out) The packet; we set to NULL when/if we nuke it.
1365
* Starts with the IP header.
1366
* args->L3offset Number of bytes bypassed if we came from L2.
1367
* e.g. often sizeof(eh) ** NOTYET **
1368
* args->ifp Incoming or outgoing interface.
1369
* args->divert_rule (in/out)
1370
* Skip up to the first rule past this rule number;
1371
* upon return, non-zero port number for divert or tee.
1372
*
1373
* args->rule Pointer to the last matching rule (in/out)
1374
* args->next_hop Socket we are forwarding to (out).
1375
* args->next_hop6 IPv6 next hop we are forwarding to (out).
1376
* args->f_id Addresses grabbed from the packet (out)
1377
* args->rule.info a cookie depending on rule action
1378
*
1379
* Return value:
1380
*
1381
* IP_FW_PASS the packet must be accepted
1382
* IP_FW_DENY the packet must be dropped
1383
* IP_FW_DIVERT divert packet, port in m_tag
1384
* IP_FW_TEE tee packet, port in m_tag
1385
* IP_FW_DUMMYNET to dummynet, pipe in args->cookie
1386
* IP_FW_NETGRAPH into netgraph, cookie args->cookie
1387
* args->rule contains the matching rule,
1388
* args->rule.info has additional information.
1389
*
1390
*/
1391
int
1392
ipfw_chk(struct ip_fw_args *args)
1393
{
1394
1395
/*
1396
* Local variables holding state while processing a packet:
1397
*
1398
* IMPORTANT NOTE: to speed up the processing of rules, there
1399
* are some assumption on the values of the variables, which
1400
* are documented here. Should you change them, please check
1401
* the implementation of the various instructions to make sure
1402
* that they still work.
1403
*
1404
* m | args->m Pointer to the mbuf, as received from the caller.
1405
* It may change if ipfw_chk() does an m_pullup, or if it
1406
* consumes the packet because it calls send_reject().
1407
* XXX This has to change, so that ipfw_chk() never modifies
1408
* or consumes the buffer.
1409
* OR
1410
* args->mem Pointer to contigous memory chunk.
1411
* ip Is the beginning of the ip(4 or 6) header.
1412
* eh Ethernet header in case if input is Layer2.
1413
*/
1414
struct mbuf *m;
1415
struct ip *ip;
1416
struct ether_header *eh;
1417
1418
/*
1419
* For rules which contain uid/gid or jail constraints, cache
1420
* a copy of the users credentials after the pcb lookup has been
1421
* executed. This will speed up the processing of rules with
1422
* these types of constraints, as well as decrease contention
1423
* on pcb related locks.
1424
*/
1425
#ifndef __FreeBSD__
1426
struct bsd_ucred ucred_cache;
1427
#else
1428
struct ucred *ucred_cache = NULL;
1429
#endif
1430
uint32_t f_pos = 0; /* index of current rule in the array */
1431
int ucred_lookup = 0;
1432
int retval = 0;
1433
struct ifnet *oif, *iif;
1434
1435
/*
1436
* hlen The length of the IP header.
1437
*/
1438
u_int hlen = 0; /* hlen >0 means we have an IP pkt */
1439
1440
/*
1441
* offset The offset of a fragment. offset != 0 means that
1442
* we have a fragment at this offset of an IPv4 packet.
1443
* offset == 0 means that (if this is an IPv4 packet)
1444
* this is the first or only fragment.
1445
* For IPv6 offset|ip6f_mf == 0 means there is no Fragment Header
1446
* or there is a single packet fragment (fragment header added
1447
* without needed). We will treat a single packet fragment as if
1448
* there was no fragment header (or log/block depending on the
1449
* V_fw_permit_single_frag6 sysctl setting).
1450
*/
1451
u_short offset = 0;
1452
u_short ip6f_mf = 0;
1453
1454
/*
1455
* Local copies of addresses. They are only valid if we have
1456
* an IP packet.
1457
*
1458
* proto The protocol. Set to 0 for non-ip packets,
1459
* or to the protocol read from the packet otherwise.
1460
* proto != 0 means that we have an IPv4 packet.
1461
*
1462
* src_port, dst_port port numbers, in HOST format. Only
1463
* valid for TCP and UDP packets.
1464
*
1465
* src_ip, dst_ip ip addresses, in NETWORK format.
1466
* Only valid for IPv4 packets.
1467
*/
1468
uint8_t proto;
1469
uint16_t src_port, dst_port; /* NOTE: host format */
1470
struct in_addr src_ip, dst_ip; /* NOTE: network format */
1471
int iplen = 0;
1472
int pktlen;
1473
1474
struct ipfw_dyn_info dyn_info;
1475
struct ip_fw *q = NULL;
1476
struct ip_fw_chain *chain = &V_layer3_chain;
1477
1478
/*
1479
* We store in ulp a pointer to the upper layer protocol header.
1480
* In the ipv4 case this is easy to determine from the header,
1481
* but for ipv6 we might have some additional headers in the middle.
1482
* ulp is NULL if not found.
1483
*/
1484
void *ulp = NULL; /* upper layer protocol pointer. */
1485
1486
/* XXX ipv6 variables */
1487
int is_ipv6 = 0;
1488
#ifdef INET6
1489
uint8_t icmp6_type = 0;
1490
#endif
1491
uint16_t ext_hd = 0; /* bits vector for extension header filtering */
1492
/* end of ipv6 variables */
1493
1494
int is_ipv4 = 0;
1495
1496
int done = 0; /* flag to exit the outer loop */
1497
IPFW_RLOCK_TRACKER;
1498
bool mem;
1499
bool need_send_reject = false;
1500
int reject_code;
1501
uint16_t reject_mtu;
1502
1503
if ((mem = (args->flags & IPFW_ARGS_LENMASK))) {
1504
if (args->flags & IPFW_ARGS_ETHER) {
1505
eh = (struct ether_header *)args->mem;
1506
if (eh->ether_type == htons(ETHERTYPE_VLAN))
1507
ip = (struct ip *)
1508
((struct ether_vlan_header *)eh + 1);
1509
else
1510
ip = (struct ip *)(eh + 1);
1511
} else {
1512
eh = NULL;
1513
ip = (struct ip *)args->mem;
1514
}
1515
pktlen = IPFW_ARGS_LENGTH(args->flags);
1516
args->f_id.fib = args->ifp->if_fib; /* best guess */
1517
} else {
1518
m = args->m;
1519
if (m->m_flags & M_SKIP_FIREWALL || (! V_ipfw_vnet_ready))
1520
return (IP_FW_PASS); /* accept */
1521
if (args->flags & IPFW_ARGS_ETHER) {
1522
/* We need some amount of data to be contiguous. */
1523
if (m->m_len < min(m->m_pkthdr.len, max_protohdr) &&
1524
(args->m = m = m_pullup(m, min(m->m_pkthdr.len,
1525
max_protohdr))) == NULL)
1526
goto pullup_failed;
1527
eh = mtod(m, struct ether_header *);
1528
ip = (struct ip *)(eh + 1);
1529
} else {
1530
eh = NULL;
1531
ip = mtod(m, struct ip *);
1532
}
1533
pktlen = m->m_pkthdr.len;
1534
args->f_id.fib = M_GETFIB(m); /* mbuf not altered */
1535
}
1536
1537
dst_ip.s_addr = 0; /* make sure it is initialized */
1538
src_ip.s_addr = 0; /* make sure it is initialized */
1539
src_port = dst_port = 0;
1540
1541
DYN_INFO_INIT(&dyn_info);
1542
/*
1543
* PULLUP_TO(len, p, T) makes sure that len + sizeof(T) is contiguous,
1544
* then it sets p to point at the offset "len" in the mbuf. WARNING: the
1545
* pointer might become stale after other pullups (but we never use it
1546
* this way).
1547
*/
1548
#define PULLUP_TO(_len, p, T) PULLUP_LEN(_len, p, sizeof(T))
1549
#define EHLEN (eh != NULL ? ((char *)ip - (char *)eh) : 0)
1550
#define _PULLUP_LOCKED(_len, p, T, unlock) \
1551
do { \
1552
int x = (_len) + T + EHLEN; \
1553
if (mem) { \
1554
if (__predict_false(pktlen < x)) { \
1555
unlock; \
1556
goto pullup_failed; \
1557
} \
1558
p = (char *)args->mem + (_len) + EHLEN; \
1559
} else { \
1560
if (__predict_false((m)->m_len < x)) { \
1561
args->m = m = m_pullup(m, x); \
1562
if (m == NULL) { \
1563
unlock; \
1564
goto pullup_failed; \
1565
} \
1566
} \
1567
p = mtod(m, char *) + (_len) + EHLEN; \
1568
} \
1569
} while (0)
1570
1571
#define PULLUP_LEN(_len, p, T) _PULLUP_LOCKED(_len, p, T, )
1572
#define PULLUP_LEN_LOCKED(_len, p, T) \
1573
_PULLUP_LOCKED(_len, p, T, IPFW_PF_RUNLOCK(chain)); \
1574
UPDATE_POINTERS()
1575
/*
1576
* In case pointers got stale after pullups, update them.
1577
*/
1578
#define UPDATE_POINTERS() \
1579
do { \
1580
if (!mem) { \
1581
if (eh != NULL) { \
1582
eh = mtod(m, struct ether_header *); \
1583
ip = (struct ip *)(eh + 1); \
1584
} else \
1585
ip = mtod(m, struct ip *); \
1586
args->m = m; \
1587
} \
1588
} while (0)
1589
1590
/* Identify IP packets and fill up variables. */
1591
if (pktlen >= sizeof(struct ip6_hdr) &&
1592
(eh == NULL || eh->ether_type == htons(ETHERTYPE_IPV6)) &&
1593
ip->ip_v == 6) {
1594
struct ip6_hdr *ip6 = (struct ip6_hdr *)ip;
1595
1596
is_ipv6 = 1;
1597
args->flags |= IPFW_ARGS_IP6;
1598
hlen = sizeof(struct ip6_hdr);
1599
proto = ip6->ip6_nxt;
1600
/* Search extension headers to find upper layer protocols */
1601
while (ulp == NULL && offset == 0) {
1602
switch (proto) {
1603
case IPPROTO_ICMPV6:
1604
PULLUP_TO(hlen, ulp, struct icmp6_hdr);
1605
#ifdef INET6
1606
icmp6_type = ICMP6(ulp)->icmp6_type;
1607
#endif
1608
break;
1609
1610
case IPPROTO_TCP:
1611
PULLUP_TO(hlen, ulp, struct tcphdr);
1612
dst_port = TCP(ulp)->th_dport;
1613
src_port = TCP(ulp)->th_sport;
1614
/* save flags for dynamic rules */
1615
args->f_id._flags = tcp_get_flags(TCP(ulp));
1616
break;
1617
1618
case IPPROTO_SCTP:
1619
if (pktlen >= hlen + sizeof(struct sctphdr) +
1620
sizeof(struct sctp_chunkhdr) +
1621
offsetof(struct sctp_init, a_rwnd))
1622
PULLUP_LEN(hlen, ulp,
1623
sizeof(struct sctphdr) +
1624
sizeof(struct sctp_chunkhdr) +
1625
offsetof(struct sctp_init, a_rwnd));
1626
else if (pktlen >= hlen + sizeof(struct sctphdr))
1627
PULLUP_LEN(hlen, ulp, pktlen - hlen);
1628
else
1629
PULLUP_LEN(hlen, ulp,
1630
sizeof(struct sctphdr));
1631
src_port = SCTP(ulp)->src_port;
1632
dst_port = SCTP(ulp)->dest_port;
1633
break;
1634
1635
case IPPROTO_UDP:
1636
case IPPROTO_UDPLITE:
1637
PULLUP_TO(hlen, ulp, struct udphdr);
1638
dst_port = UDP(ulp)->uh_dport;
1639
src_port = UDP(ulp)->uh_sport;
1640
break;
1641
1642
case IPPROTO_HOPOPTS: /* RFC 2460 */
1643
PULLUP_TO(hlen, ulp, struct ip6_hbh);
1644
ext_hd |= EXT_HOPOPTS;
1645
hlen += (((struct ip6_hbh *)ulp)->ip6h_len + 1) << 3;
1646
proto = ((struct ip6_hbh *)ulp)->ip6h_nxt;
1647
ulp = NULL;
1648
break;
1649
1650
case IPPROTO_ROUTING: /* RFC 2460 */
1651
PULLUP_TO(hlen, ulp, struct ip6_rthdr);
1652
switch (((struct ip6_rthdr *)ulp)->ip6r_type) {
1653
case 0:
1654
ext_hd |= EXT_RTHDR0;
1655
break;
1656
case 2:
1657
ext_hd |= EXT_RTHDR2;
1658
break;
1659
default:
1660
if (V_fw_verbose)
1661
printf("IPFW2: IPV6 - Unknown "
1662
"Routing Header type(%d)\n",
1663
((struct ip6_rthdr *)
1664
ulp)->ip6r_type);
1665
if (V_fw_deny_unknown_exthdrs)
1666
return (IP_FW_DENY);
1667
break;
1668
}
1669
ext_hd |= EXT_ROUTING;
1670
hlen += (((struct ip6_rthdr *)ulp)->ip6r_len + 1) << 3;
1671
proto = ((struct ip6_rthdr *)ulp)->ip6r_nxt;
1672
ulp = NULL;
1673
break;
1674
1675
case IPPROTO_FRAGMENT: /* RFC 2460 */
1676
PULLUP_TO(hlen, ulp, struct ip6_frag);
1677
ext_hd |= EXT_FRAGMENT;
1678
hlen += sizeof (struct ip6_frag);
1679
proto = ((struct ip6_frag *)ulp)->ip6f_nxt;
1680
offset = ((struct ip6_frag *)ulp)->ip6f_offlg &
1681
IP6F_OFF_MASK;
1682
ip6f_mf = ((struct ip6_frag *)ulp)->ip6f_offlg &
1683
IP6F_MORE_FRAG;
1684
if (V_fw_permit_single_frag6 == 0 &&
1685
offset == 0 && ip6f_mf == 0) {
1686
if (V_fw_verbose)
1687
printf("IPFW2: IPV6 - Invalid "
1688
"Fragment Header\n");
1689
if (V_fw_deny_unknown_exthdrs)
1690
return (IP_FW_DENY);
1691
break;
1692
}
1693
args->f_id.extra =
1694
ntohl(((struct ip6_frag *)ulp)->ip6f_ident);
1695
ulp = NULL;
1696
break;
1697
1698
case IPPROTO_DSTOPTS: /* RFC 2460 */
1699
PULLUP_TO(hlen, ulp, struct ip6_hbh);
1700
ext_hd |= EXT_DSTOPTS;
1701
hlen += (((struct ip6_hbh *)ulp)->ip6h_len + 1) << 3;
1702
proto = ((struct ip6_hbh *)ulp)->ip6h_nxt;
1703
ulp = NULL;
1704
break;
1705
1706
case IPPROTO_AH: /* RFC 2402 */
1707
PULLUP_TO(hlen, ulp, struct ip6_ext);
1708
ext_hd |= EXT_AH;
1709
hlen += (((struct ip6_ext *)ulp)->ip6e_len + 2) << 2;
1710
proto = ((struct ip6_ext *)ulp)->ip6e_nxt;
1711
ulp = NULL;
1712
break;
1713
1714
case IPPROTO_ESP: /* RFC 2406 */
1715
PULLUP_TO(hlen, ulp, uint32_t); /* SPI, Seq# */
1716
/* Anything past Seq# is variable length and
1717
* data past this ext. header is encrypted. */
1718
ext_hd |= EXT_ESP;
1719
break;
1720
1721
case IPPROTO_NONE: /* RFC 2460 */
1722
/*
1723
* Packet ends here, and IPv6 header has
1724
* already been pulled up. If ip6e_len!=0
1725
* then octets must be ignored.
1726
*/
1727
ulp = ip; /* non-NULL to get out of loop. */
1728
break;
1729
1730
case IPPROTO_OSPFIGP:
1731
/* XXX OSPF header check? */
1732
PULLUP_TO(hlen, ulp, struct ip6_ext);
1733
break;
1734
1735
case IPPROTO_PIM:
1736
/* XXX PIM header check? */
1737
PULLUP_TO(hlen, ulp, struct pim);
1738
break;
1739
1740
case IPPROTO_GRE: /* RFC 1701 */
1741
/* XXX GRE header check? */
1742
PULLUP_TO(hlen, ulp, struct grehdr);
1743
break;
1744
1745
case IPPROTO_CARP:
1746
PULLUP_TO(hlen, ulp, offsetof(
1747
struct carp_header, carp_counter));
1748
if (CARP_ADVERTISEMENT !=
1749
((struct carp_header *)ulp)->carp_type)
1750
return (IP_FW_DENY);
1751
break;
1752
1753
case IPPROTO_IPV6: /* RFC 2893 */
1754
PULLUP_TO(hlen, ulp, struct ip6_hdr);
1755
break;
1756
1757
case IPPROTO_IPV4: /* RFC 2893 */
1758
PULLUP_TO(hlen, ulp, struct ip);
1759
break;
1760
1761
case IPPROTO_ETHERIP: /* RFC 3378 */
1762
PULLUP_LEN(hlen, ulp,
1763
sizeof(struct etherip_header) +
1764
sizeof(struct ether_header));
1765
break;
1766
1767
case IPPROTO_PFSYNC:
1768
PULLUP_TO(hlen, ulp, struct pfsync_header);
1769
break;
1770
1771
default:
1772
if (V_fw_verbose)
1773
printf("IPFW2: IPV6 - Unknown "
1774
"Extension Header(%d), ext_hd=%x\n",
1775
proto, ext_hd);
1776
if (V_fw_deny_unknown_exthdrs)
1777
return (IP_FW_DENY);
1778
PULLUP_TO(hlen, ulp, struct ip6_ext);
1779
break;
1780
} /*switch */
1781
}
1782
UPDATE_POINTERS();
1783
ip6 = (struct ip6_hdr *)ip;
1784
args->f_id.addr_type = 6;
1785
args->f_id.src_ip6 = ip6->ip6_src;
1786
args->f_id.dst_ip6 = ip6->ip6_dst;
1787
args->f_id.flow_id6 = ntohl(ip6->ip6_flow);
1788
iplen = ntohs(ip6->ip6_plen) + sizeof(*ip6);
1789
} else if (pktlen >= sizeof(struct ip) &&
1790
(eh == NULL || eh->ether_type == htons(ETHERTYPE_IP)) &&
1791
ip->ip_v == 4) {
1792
is_ipv4 = 1;
1793
args->flags |= IPFW_ARGS_IP4;
1794
hlen = ip->ip_hl << 2;
1795
/*
1796
* Collect parameters into local variables for faster
1797
* matching.
1798
*/
1799
proto = ip->ip_p;
1800
src_ip = ip->ip_src;
1801
dst_ip = ip->ip_dst;
1802
offset = ntohs(ip->ip_off) & IP_OFFMASK;
1803
iplen = ntohs(ip->ip_len);
1804
1805
if (offset == 0) {
1806
switch (proto) {
1807
case IPPROTO_TCP:
1808
PULLUP_TO(hlen, ulp, struct tcphdr);
1809
dst_port = TCP(ulp)->th_dport;
1810
src_port = TCP(ulp)->th_sport;
1811
/* save flags for dynamic rules */
1812
args->f_id._flags = tcp_get_flags(TCP(ulp));
1813
break;
1814
1815
case IPPROTO_SCTP:
1816
if (pktlen >= hlen + sizeof(struct sctphdr) +
1817
sizeof(struct sctp_chunkhdr) +
1818
offsetof(struct sctp_init, a_rwnd))
1819
PULLUP_LEN(hlen, ulp,
1820
sizeof(struct sctphdr) +
1821
sizeof(struct sctp_chunkhdr) +
1822
offsetof(struct sctp_init, a_rwnd));
1823
else if (pktlen >= hlen + sizeof(struct sctphdr))
1824
PULLUP_LEN(hlen, ulp, pktlen - hlen);
1825
else
1826
PULLUP_LEN(hlen, ulp,
1827
sizeof(struct sctphdr));
1828
src_port = SCTP(ulp)->src_port;
1829
dst_port = SCTP(ulp)->dest_port;
1830
break;
1831
1832
case IPPROTO_UDP:
1833
case IPPROTO_UDPLITE:
1834
PULLUP_TO(hlen, ulp, struct udphdr);
1835
dst_port = UDP(ulp)->uh_dport;
1836
src_port = UDP(ulp)->uh_sport;
1837
break;
1838
1839
case IPPROTO_ICMP:
1840
PULLUP_TO(hlen, ulp, struct icmphdr);
1841
//args->f_id.flags = ICMP(ulp)->icmp_type;
1842
break;
1843
1844
default:
1845
break;
1846
}
1847
} else {
1848
if (offset == 1 && proto == IPPROTO_TCP) {
1849
/* RFC 3128 */
1850
goto pullup_failed;
1851
}
1852
}
1853
1854
UPDATE_POINTERS();
1855
args->f_id.addr_type = 4;
1856
args->f_id.src_ip = ntohl(src_ip.s_addr);
1857
args->f_id.dst_ip = ntohl(dst_ip.s_addr);
1858
} else {
1859
proto = 0;
1860
dst_ip.s_addr = src_ip.s_addr = 0;
1861
1862
args->f_id.addr_type = 1; /* XXX */
1863
}
1864
#undef PULLUP_TO
1865
pktlen = iplen < pktlen ? iplen: pktlen;
1866
1867
/* Properly initialize the rest of f_id */
1868
args->f_id.proto = proto;
1869
args->f_id.src_port = src_port = ntohs(src_port);
1870
args->f_id.dst_port = dst_port = ntohs(dst_port);
1871
1872
IPFW_PF_RLOCK(chain);
1873
if (! V_ipfw_vnet_ready) { /* shutting down, leave NOW. */
1874
IPFW_PF_RUNLOCK(chain);
1875
return (IP_FW_PASS); /* accept */
1876
}
1877
if (args->flags & IPFW_ARGS_REF) {
1878
/*
1879
* Packet has already been tagged as a result of a previous
1880
* match on rule args->rule aka args->rule_id (PIPE, QUEUE,
1881
* REASS, NETGRAPH, DIVERT/TEE...)
1882
* Validate the slot and continue from the next one
1883
* if still present, otherwise do a lookup.
1884
*/
1885
f_pos = (args->rule.chain_id == chain->id) ?
1886
args->rule.slot :
1887
ipfw_find_rule(chain, args->rule.rulenum,
1888
args->rule.rule_id);
1889
} else {
1890
f_pos = 0;
1891
}
1892
1893
if (args->flags & IPFW_ARGS_IN) {
1894
iif = args->ifp;
1895
oif = NULL;
1896
} else {
1897
MPASS(args->flags & IPFW_ARGS_OUT);
1898
iif = mem ? NULL : m_rcvif(m);
1899
oif = args->ifp;
1900
}
1901
1902
/*
1903
* Now scan the rules, and parse microinstructions for each rule.
1904
* We have two nested loops and an inner switch. Sometimes we
1905
* need to break out of one or both loops, or re-enter one of
1906
* the loops with updated variables. Loop variables are:
1907
*
1908
* f_pos (outer loop) points to the current rule.
1909
* On output it points to the matching rule.
1910
* done (outer loop) is used as a flag to break the loop.
1911
* l (inner loop) residual length of current rule.
1912
* cmd points to the current microinstruction.
1913
*
1914
* We break the inner loop by setting l=0 and possibly
1915
* cmdlen=0 if we don't want to advance cmd.
1916
* We break the outer loop by setting done=1
1917
* We can restart the inner loop by setting l>0 and f_pos, f, cmd
1918
* as needed.
1919
*/
1920
for (; f_pos < chain->n_rules; f_pos++) {
1921
ipfw_insn *cmd;
1922
uint32_t tablearg = 0;
1923
int l, cmdlen, skip_or; /* skip rest of OR block */
1924
struct ip_fw *f;
1925
1926
f = chain->map[f_pos];
1927
if (V_set_disable & (1 << f->set) )
1928
continue;
1929
1930
skip_or = 0;
1931
for (l = f->cmd_len, cmd = f->cmd ; l > 0 ;
1932
l -= cmdlen, cmd += cmdlen) {
1933
int match;
1934
1935
/*
1936
* check_body is a jump target used when we find a
1937
* CHECK_STATE, and need to jump to the body of
1938
* the target rule.
1939
*/
1940
1941
/* check_body: */
1942
cmdlen = F_LEN(cmd);
1943
/*
1944
* An OR block (insn_1 || .. || insn_n) has the
1945
* F_OR bit set in all but the last instruction.
1946
* The first match will set "skip_or", and cause
1947
* the following instructions to be skipped until
1948
* past the one with the F_OR bit clear.
1949
*/
1950
if (skip_or) { /* skip this instruction */
1951
if ((cmd->len & F_OR) == 0)
1952
skip_or = 0; /* next one is good */
1953
continue;
1954
}
1955
match = 0; /* set to 1 if we succeed */
1956
1957
switch (cmd->opcode) {
1958
/*
1959
* The first set of opcodes compares the packet's
1960
* fields with some pattern, setting 'match' if a
1961
* match is found. At the end of the loop there is
1962
* logic to deal with F_NOT and F_OR flags associated
1963
* with the opcode.
1964
*/
1965
case O_NOP:
1966
match = 1;
1967
break;
1968
1969
case O_FORWARD_MAC:
1970
printf("ipfw: opcode %d unimplemented\n",
1971
cmd->opcode);
1972
break;
1973
1974
case O_GID:
1975
case O_UID:
1976
case O_JAIL:
1977
/*
1978
* We only check offset == 0 && proto != 0,
1979
* as this ensures that we have a
1980
* packet with the ports info.
1981
*/
1982
if (offset != 0)
1983
break;
1984
if (proto == IPPROTO_TCP ||
1985
proto == IPPROTO_UDP ||
1986
proto == IPPROTO_UDPLITE)
1987
match = check_uidgid(
1988
(ipfw_insn_u32 *)cmd,
1989
args, &ucred_lookup,
1990
#ifdef __FreeBSD__
1991
&ucred_cache);
1992
#else
1993
(void *)&ucred_cache);
1994
#endif
1995
break;
1996
1997
case O_RECV:
1998
match = iface_match(iif, (ipfw_insn_if *)cmd,
1999
chain, &tablearg);
2000
break;
2001
2002
case O_XMIT:
2003
match = iface_match(oif, (ipfw_insn_if *)cmd,
2004
chain, &tablearg);
2005
break;
2006
2007
case O_VIA:
2008
match = iface_match(args->ifp,
2009
(ipfw_insn_if *)cmd, chain, &tablearg);
2010
break;
2011
2012
case O_MACADDR2:
2013
if (args->flags & IPFW_ARGS_ETHER) {
2014
u_int32_t *want = (u_int32_t *)
2015
((ipfw_insn_mac *)cmd)->addr;
2016
u_int32_t *mask = (u_int32_t *)
2017
((ipfw_insn_mac *)cmd)->mask;
2018
u_int32_t *hdr = (u_int32_t *)eh;
2019
2020
match =
2021
( want[0] == (hdr[0] & mask[0]) &&
2022
want[1] == (hdr[1] & mask[1]) &&
2023
want[2] == (hdr[2] & mask[2]) );
2024
}
2025
break;
2026
2027
case O_MAC_TYPE:
2028
if (args->flags & IPFW_ARGS_ETHER) {
2029
u_int16_t *p =
2030
((ipfw_insn_u16 *)cmd)->ports;
2031
int i;
2032
2033
for (i = cmdlen - 1; !match && i>0;
2034
i--, p += 2)
2035
match =
2036
(ntohs(eh->ether_type) >=
2037
p[0] &&
2038
ntohs(eh->ether_type) <=
2039
p[1]);
2040
}
2041
break;
2042
2043
case O_FRAG:
2044
if (is_ipv4) {
2045
/*
2046
* Since flags_match() works with
2047
* uint8_t we pack ip_off into 8 bits.
2048
* For this match offset is a boolean.
2049
*/
2050
match = flags_match(cmd,
2051
((ntohs(ip->ip_off) & ~IP_OFFMASK)
2052
>> 8) | (offset != 0));
2053
} else {
2054
/*
2055
* Compatibility: historically bare
2056
* "frag" would match IPv6 fragments.
2057
*/
2058
match = (cmd->arg1 == 0x1 &&
2059
(offset != 0));
2060
}
2061
break;
2062
2063
case O_IN: /* "out" is "not in" */
2064
match = (oif == NULL);
2065
break;
2066
2067
case O_LAYER2:
2068
match = (args->flags & IPFW_ARGS_ETHER);
2069
break;
2070
2071
case O_DIVERTED:
2072
if ((args->flags & IPFW_ARGS_REF) == 0)
2073
break;
2074
/*
2075
* For diverted packets, args->rule.info
2076
* contains the divert port (in host format)
2077
* reason and direction.
2078
*/
2079
match = ((args->rule.info & IPFW_IS_MASK) ==
2080
IPFW_IS_DIVERT) && (
2081
((args->rule.info & IPFW_INFO_IN) ?
2082
1: 2) & cmd->arg1);
2083
break;
2084
2085
case O_PROTO:
2086
/*
2087
* We do not allow an arg of 0 so the
2088
* check of "proto" only suffices.
2089
*/
2090
match = (proto == cmd->arg1);
2091
break;
2092
2093
case O_IP_SRC:
2094
match = is_ipv4 &&
2095
(((ipfw_insn_ip *)cmd)->addr.s_addr ==
2096
src_ip.s_addr);
2097
break;
2098
2099
case O_IP_DST_LOOKUP:
2100
if (IPFW_LOOKUP_TYPE(cmd) != LOOKUP_NONE) {
2101
void *pkey = NULL;
2102
uint32_t key, vidx;
2103
uint16_t keylen = 0; /* zero if can't match the packet */
2104
uint8_t lookup_type;
2105
2106
lookup_type = IPFW_LOOKUP_TYPE(cmd);
2107
2108
switch (lookup_type) {
2109
case LOOKUP_DST_IP:
2110
case LOOKUP_SRC_IP:
2111
if (is_ipv4) {
2112
keylen = sizeof(in_addr_t);
2113
if (lookup_type == LOOKUP_DST_IP)
2114
pkey = &dst_ip;
2115
else
2116
pkey = &src_ip;
2117
} else if (is_ipv6) {
2118
keylen = sizeof(struct in6_addr);
2119
if (lookup_type == LOOKUP_DST_IP)
2120
pkey = &args->f_id.dst_ip6;
2121
else
2122
pkey = &args->f_id.src_ip6;
2123
} else /* only for L3 */
2124
break;
2125
case LOOKUP_DSCP:
2126
if (is_ipv4)
2127
key = ip->ip_tos >> 2;
2128
else if (is_ipv6)
2129
key = IPV6_DSCP(
2130
(struct ip6_hdr *)ip) >> 2;
2131
else
2132
break; /* only for L3 */
2133
2134
key &= 0x3f;
2135
if (cmdlen == F_INSN_SIZE(ipfw_insn_table))
2136
key &= insntod(cmd, table)->value;
2137
pkey = &key;
2138
keylen = sizeof(key);
2139
break;
2140
case LOOKUP_DST_PORT:
2141
case LOOKUP_SRC_PORT:
2142
/* only for L3 */
2143
if (is_ipv6 == 0 && is_ipv4 == 0) {
2144
break;
2145
}
2146
/* Skip fragments */
2147
if (offset != 0) {
2148
break;
2149
}
2150
/* Skip proto without ports */
2151
if (proto != IPPROTO_TCP &&
2152
proto != IPPROTO_UDP &&
2153
proto != IPPROTO_UDPLITE &&
2154
proto != IPPROTO_SCTP)
2155
break;
2156
if (lookup_type == LOOKUP_DST_PORT)
2157
key = dst_port;
2158
else
2159
key = src_port;
2160
pkey = &key;
2161
if (cmdlen == F_INSN_SIZE(ipfw_insn_table))
2162
key &= insntod(cmd, table)->value;
2163
keylen = sizeof(key);
2164
break;
2165
case LOOKUP_DST_MAC:
2166
case LOOKUP_SRC_MAC:
2167
/* only for L2 */
2168
if ((args->flags & IPFW_ARGS_ETHER) == 0)
2169
break;
2170
2171
pkey = lookup_type == LOOKUP_DST_MAC ?
2172
eh->ether_dhost : eh->ether_shost;
2173
keylen = ETHER_ADDR_LEN;
2174
break;
2175
#ifndef USERSPACE
2176
case LOOKUP_UID:
2177
case LOOKUP_JAIL:
2178
check_uidgid(insntod(cmd, u32),
2179
args, &ucred_lookup,
2180
#ifdef __FreeBSD__
2181
&ucred_cache);
2182
if (lookup_type == LOOKUP_UID)
2183
key = ucred_cache->cr_uid;
2184
else if (lookup_type == LOOKUP_JAIL)
2185
key = ucred_cache->cr_prison->pr_id;
2186
#else /* !__FreeBSD__ */
2187
(void *)&ucred_cache);
2188
if (lookup_type == LOOKUP_UID)
2189
key = ucred_cache.uid;
2190
else if (lookup_type == LOOKUP_JAIL)
2191
key = ucred_cache.xid;
2192
#endif /* !__FreeBSD__ */
2193
pkey = &key;
2194
if (cmdlen == F_INSN_SIZE(ipfw_insn_table))
2195
key &= insntod(cmd, table)->value;
2196
keylen = sizeof(key);
2197
break;
2198
#endif /* !USERSPACE */
2199
case LOOKUP_MARK:
2200
key = args->rule.pkt_mark;
2201
if (cmdlen == F_INSN_SIZE(ipfw_insn_table))
2202
key &= insntod(cmd, table)->value;
2203
pkey = &key;
2204
keylen = sizeof(key);
2205
break;
2206
case LOOKUP_RULENUM:
2207
key = f->rulenum;
2208
if (cmdlen == F_INSN_SIZE(ipfw_insn_table))
2209
key &= insntod(cmd, table)->value;
2210
pkey = &key;
2211
keylen = sizeof(key);
2212
break;
2213
}
2214
/* unknown key type */
2215
if (keylen == 0)
2216
break;
2217
match = ipfw_lookup_table(chain,
2218
insntod(cmd, kidx)->kidx, keylen,
2219
pkey, &vidx);
2220
2221
if (match)
2222
tablearg = vidx;
2223
break;
2224
}
2225
/* LOOKUP_NONE */
2226
/* FALLTHROUGH */
2227
case O_IP_SRC_LOOKUP:
2228
{
2229
void *pkey;
2230
uint32_t vidx;
2231
uint16_t keylen;
2232
2233
if (is_ipv4) {
2234
keylen = sizeof(in_addr_t);
2235
if (cmd->opcode == O_IP_DST_LOOKUP)
2236
pkey = &dst_ip;
2237
else
2238
pkey = &src_ip;
2239
} else if (is_ipv6) {
2240
keylen = sizeof(struct in6_addr);
2241
if (cmd->opcode == O_IP_DST_LOOKUP)
2242
pkey = &args->f_id.dst_ip6;
2243
else
2244
pkey = &args->f_id.src_ip6;
2245
} else
2246
break;
2247
match = ipfw_lookup_table(chain,
2248
insntod(cmd, kidx)->kidx,
2249
keylen, pkey, &vidx);
2250
if (!match)
2251
break;
2252
if (cmdlen == F_INSN_SIZE(ipfw_insn_table)) {
2253
match = tvalue_match(chain,
2254
insntod(cmd, table), vidx);
2255
if (!match)
2256
break;
2257
}
2258
tablearg = vidx;
2259
break;
2260
}
2261
2262
case O_MAC_SRC_LOOKUP:
2263
case O_MAC_DST_LOOKUP:
2264
{
2265
void *pkey;
2266
uint32_t vidx;
2267
uint16_t keylen = ETHER_ADDR_LEN;
2268
2269
/* Need ether frame */
2270
if ((args->flags & IPFW_ARGS_ETHER) == 0)
2271
break;
2272
2273
if (cmd->opcode == O_MAC_DST_LOOKUP)
2274
pkey = eh->ether_dhost;
2275
else
2276
pkey = eh->ether_shost;
2277
2278
match = ipfw_lookup_table(chain,
2279
insntod(cmd, kidx)->kidx,
2280
keylen, pkey, &vidx);
2281
if (!match)
2282
break;
2283
if (cmdlen == F_INSN_SIZE(ipfw_insn_table)) {
2284
match = tvalue_match(chain,
2285
insntod(cmd, table), vidx);
2286
if (!match)
2287
break;
2288
}
2289
tablearg = vidx;
2290
break;
2291
}
2292
2293
case O_IP_FLOW_LOOKUP:
2294
{
2295
uint32_t vidx = 0;
2296
2297
match = ipfw_lookup_table(chain,
2298
insntod(cmd, kidx)->kidx, 0,
2299
&args->f_id, &vidx);
2300
if (!match)
2301
break;
2302
if (cmdlen == F_INSN_SIZE(ipfw_insn_table))
2303
match = tvalue_match(chain,
2304
insntod(cmd, table), vidx);
2305
if (match)
2306
tablearg = vidx;
2307
break;
2308
}
2309
2310
case O_IP_SRC_MASK:
2311
case O_IP_DST_MASK:
2312
if (is_ipv4) {
2313
uint32_t a =
2314
(cmd->opcode == O_IP_DST_MASK) ?
2315
dst_ip.s_addr : src_ip.s_addr;
2316
uint32_t *p = ((ipfw_insn_u32 *)cmd)->d;
2317
int i = cmdlen-1;
2318
2319
for (; !match && i>0; i-= 2, p+= 2)
2320
match = (p[0] == (a & p[1]));
2321
}
2322
break;
2323
2324
case O_IP_SRC_ME:
2325
if (is_ipv4) {
2326
match = in_localip(src_ip);
2327
break;
2328
}
2329
#ifdef INET6
2330
/* FALLTHROUGH */
2331
case O_IP6_SRC_ME:
2332
match = is_ipv6 &&
2333
ipfw_localip6(&args->f_id.src_ip6);
2334
#endif
2335
break;
2336
2337
case O_IP_DST_SET:
2338
case O_IP_SRC_SET:
2339
if (is_ipv4) {
2340
u_int32_t *d = (u_int32_t *)(cmd+1);
2341
u_int32_t addr =
2342
cmd->opcode == O_IP_DST_SET ?
2343
args->f_id.dst_ip :
2344
args->f_id.src_ip;
2345
2346
if (addr < d[0])
2347
break;
2348
addr -= d[0]; /* subtract base */
2349
match = (addr < cmd->arg1) &&
2350
( d[ 1 + (addr>>5)] &
2351
(1<<(addr & 0x1f)) );
2352
}
2353
break;
2354
2355
case O_IP_DST:
2356
match = is_ipv4 &&
2357
(((ipfw_insn_ip *)cmd)->addr.s_addr ==
2358
dst_ip.s_addr);
2359
break;
2360
2361
case O_IP_DST_ME:
2362
if (is_ipv4) {
2363
match = in_localip(dst_ip);
2364
break;
2365
}
2366
#ifdef INET6
2367
/* FALLTHROUGH */
2368
case O_IP6_DST_ME:
2369
match = is_ipv6 &&
2370
ipfw_localip6(&args->f_id.dst_ip6);
2371
#endif
2372
break;
2373
2374
case O_IP_SRCPORT:
2375
case O_IP_DSTPORT:
2376
/*
2377
* offset == 0 && proto != 0 is enough
2378
* to guarantee that we have a
2379
* packet with port info.
2380
*/
2381
if ((proto == IPPROTO_UDP ||
2382
proto == IPPROTO_UDPLITE ||
2383
proto == IPPROTO_TCP ||
2384
proto == IPPROTO_SCTP) && offset == 0) {
2385
u_int16_t x =
2386
(cmd->opcode == O_IP_SRCPORT) ?
2387
src_port : dst_port ;
2388
u_int16_t *p =
2389
((ipfw_insn_u16 *)cmd)->ports;
2390
int i;
2391
2392
for (i = cmdlen - 1; !match && i>0;
2393
i--, p += 2)
2394
match = (x>=p[0] && x<=p[1]);
2395
}
2396
break;
2397
2398
case O_ICMPTYPE:
2399
match = (offset == 0 && proto==IPPROTO_ICMP &&
2400
icmptype_match(ICMP(ulp), (ipfw_insn_u32 *)cmd) );
2401
break;
2402
2403
#ifdef INET6
2404
case O_ICMP6TYPE:
2405
match = is_ipv6 && offset == 0 &&
2406
proto==IPPROTO_ICMPV6 &&
2407
icmp6type_match(
2408
ICMP6(ulp)->icmp6_type,
2409
(ipfw_insn_u32 *)cmd);
2410
break;
2411
#endif /* INET6 */
2412
2413
case O_IPOPT:
2414
match = (is_ipv4 &&
2415
ipopts_match(ip, cmd) );
2416
break;
2417
2418
case O_IPVER:
2419
match = ((is_ipv4 || is_ipv6) &&
2420
cmd->arg1 == ip->ip_v);
2421
break;
2422
2423
case O_IPID:
2424
case O_IPTTL:
2425
if (!is_ipv4)
2426
break;
2427
case O_IPLEN:
2428
{ /* only for IP packets */
2429
uint16_t x;
2430
uint16_t *p;
2431
int i;
2432
2433
if (cmd->opcode == O_IPLEN)
2434
x = iplen;
2435
else if (cmd->opcode == O_IPTTL)
2436
x = ip->ip_ttl;
2437
else /* must be IPID */
2438
x = ntohs(ip->ip_id);
2439
if (cmdlen == 1) {
2440
match = (cmd->arg1 == x);
2441
break;
2442
}
2443
/* otherwise we have ranges */
2444
p = ((ipfw_insn_u16 *)cmd)->ports;
2445
i = cmdlen - 1;
2446
for (; !match && i>0; i--, p += 2)
2447
match = (x >= p[0] && x <= p[1]);
2448
}
2449
break;
2450
2451
case O_IPPRECEDENCE:
2452
match = (is_ipv4 &&
2453
(cmd->arg1 == (ip->ip_tos & 0xe0)) );
2454
break;
2455
2456
case O_IPTOS:
2457
match = (is_ipv4 &&
2458
flags_match(cmd, ip->ip_tos));
2459
break;
2460
2461
case O_DSCP:
2462
{
2463
uint32_t *p;
2464
uint16_t x;
2465
2466
p = ((ipfw_insn_u32 *)cmd)->d;
2467
2468
if (is_ipv4)
2469
x = ip->ip_tos >> 2;
2470
else if (is_ipv6) {
2471
x = IPV6_DSCP(
2472
(struct ip6_hdr *)ip) >> 2;
2473
x &= 0x3f;
2474
} else
2475
break;
2476
2477
/* DSCP bitmask is stored as low_u32 high_u32 */
2478
if (x >= 32)
2479
match = *(p + 1) & (1 << (x - 32));
2480
else
2481
match = *p & (1 << x);
2482
}
2483
break;
2484
2485
case O_TCPDATALEN:
2486
if (proto == IPPROTO_TCP && offset == 0) {
2487
struct tcphdr *tcp;
2488
uint16_t x;
2489
uint16_t *p;
2490
int i;
2491
#ifdef INET6
2492
if (is_ipv6) {
2493
struct ip6_hdr *ip6;
2494
2495
ip6 = (struct ip6_hdr *)ip;
2496
if (ip6->ip6_plen == 0) {
2497
/*
2498
* Jumbo payload is not
2499
* supported by this
2500
* opcode.
2501
*/
2502
break;
2503
}
2504
x = iplen - hlen;
2505
} else
2506
#endif /* INET6 */
2507
x = iplen - (ip->ip_hl << 2);
2508
tcp = TCP(ulp);
2509
x -= tcp->th_off << 2;
2510
if (cmdlen == 1) {
2511
match = (cmd->arg1 == x);
2512
break;
2513
}
2514
/* otherwise we have ranges */
2515
p = ((ipfw_insn_u16 *)cmd)->ports;
2516
i = cmdlen - 1;
2517
for (; !match && i>0; i--, p += 2)
2518
match = (x >= p[0] && x <= p[1]);
2519
}
2520
break;
2521
2522
case O_TCPFLAGS:
2523
/*
2524
* Note that this is currently only set up to
2525
* match the lower 8 TCP header flag bits, not
2526
* the full compliment of all 12 flags.
2527
*/
2528
match = (proto == IPPROTO_TCP && offset == 0 &&
2529
flags_match(cmd, tcp_get_flags(TCP(ulp))));
2530
break;
2531
2532
case O_TCPOPTS:
2533
if (proto == IPPROTO_TCP && offset == 0 && ulp){
2534
PULLUP_LEN_LOCKED(hlen, ulp,
2535
(TCP(ulp)->th_off << 2));
2536
match = tcpopts_match(TCP(ulp), cmd);
2537
}
2538
break;
2539
2540
case O_TCPSEQ:
2541
match = (proto == IPPROTO_TCP && offset == 0 &&
2542
((ipfw_insn_u32 *)cmd)->d[0] ==
2543
TCP(ulp)->th_seq);
2544
break;
2545
2546
case O_TCPACK:
2547
match = (proto == IPPROTO_TCP && offset == 0 &&
2548
((ipfw_insn_u32 *)cmd)->d[0] ==
2549
TCP(ulp)->th_ack);
2550
break;
2551
2552
case O_TCPMSS:
2553
if (proto == IPPROTO_TCP &&
2554
(args->f_id._flags & TH_SYN) != 0 &&
2555
ulp != NULL) {
2556
uint16_t mss, *p;
2557
int i;
2558
2559
PULLUP_LEN_LOCKED(hlen, ulp,
2560
(TCP(ulp)->th_off << 2));
2561
if ((tcpopts_parse(TCP(ulp), &mss) &
2562
IP_FW_TCPOPT_MSS) == 0)
2563
break;
2564
if (cmdlen == 1) {
2565
match = (cmd->arg1 == mss);
2566
break;
2567
}
2568
/* Otherwise we have ranges. */
2569
p = ((ipfw_insn_u16 *)cmd)->ports;
2570
i = cmdlen - 1;
2571
for (; !match && i > 0; i--, p += 2)
2572
match = (mss >= p[0] &&
2573
mss <= p[1]);
2574
}
2575
break;
2576
2577
case O_TCPWIN:
2578
if (proto == IPPROTO_TCP && offset == 0) {
2579
uint16_t x;
2580
uint16_t *p;
2581
int i;
2582
2583
x = ntohs(TCP(ulp)->th_win);
2584
if (cmdlen == 1) {
2585
match = (cmd->arg1 == x);
2586
break;
2587
}
2588
/* Otherwise we have ranges. */
2589
p = ((ipfw_insn_u16 *)cmd)->ports;
2590
i = cmdlen - 1;
2591
for (; !match && i > 0; i--, p += 2)
2592
match = (x >= p[0] && x <= p[1]);
2593
}
2594
break;
2595
2596
case O_ESTAB:
2597
/* reject packets which have SYN only */
2598
/* XXX should i also check for TH_ACK ? */
2599
match = (proto == IPPROTO_TCP && offset == 0 &&
2600
(tcp_get_flags(TCP(ulp)) &
2601
(TH_RST | TH_ACK | TH_SYN)) != TH_SYN);
2602
break;
2603
2604
case O_ALTQ: {
2605
struct pf_mtag *at;
2606
struct m_tag *mtag;
2607
ipfw_insn_altq *altq = (ipfw_insn_altq *)cmd;
2608
2609
/*
2610
* ALTQ uses mbuf tags from another
2611
* packet filtering system - pf(4).
2612
* We allocate a tag in its format
2613
* and fill it in, pretending to be pf(4).
2614
*/
2615
match = 1;
2616
at = pf_find_mtag(m);
2617
if (at != NULL && at->qid != 0)
2618
break;
2619
mtag = m_tag_get(PACKET_TAG_PF,
2620
sizeof(struct pf_mtag), M_NOWAIT | M_ZERO);
2621
if (mtag == NULL) {
2622
/*
2623
* Let the packet fall back to the
2624
* default ALTQ.
2625
*/
2626
break;
2627
}
2628
m_tag_prepend(m, mtag);
2629
at = (struct pf_mtag *)(mtag + 1);
2630
at->qid = altq->qid;
2631
at->hdr = ip;
2632
break;
2633
}
2634
2635
case O_LOG:
2636
ipfw_log(chain, f, hlen, args,
2637
offset | ip6f_mf, tablearg, ip, eh);
2638
match = 1;
2639
break;
2640
2641
case O_PROB:
2642
match = (random()<((ipfw_insn_u32 *)cmd)->d[0]);
2643
break;
2644
2645
case O_VERREVPATH:
2646
/* Outgoing packets automatically pass/match */
2647
match = (args->flags & IPFW_ARGS_OUT ||
2648
(
2649
#ifdef INET6
2650
is_ipv6 ?
2651
verify_path6(&(args->f_id.src_ip6),
2652
iif, args->f_id.fib) :
2653
#endif
2654
verify_path(src_ip, iif, args->f_id.fib)));
2655
break;
2656
2657
case O_VERSRCREACH:
2658
/* Outgoing packets automatically pass/match */
2659
match = (hlen > 0 && ((oif != NULL) || (
2660
#ifdef INET6
2661
is_ipv6 ?
2662
verify_path6(&(args->f_id.src_ip6),
2663
NULL, args->f_id.fib) :
2664
#endif
2665
verify_path(src_ip, NULL, args->f_id.fib))));
2666
break;
2667
2668
case O_ANTISPOOF:
2669
/* Outgoing packets automatically pass/match */
2670
if (oif == NULL && hlen > 0 &&
2671
( (is_ipv4 && in_localaddr(src_ip))
2672
#ifdef INET6
2673
|| (is_ipv6 &&
2674
in6_localaddr(&(args->f_id.src_ip6)))
2675
#endif
2676
))
2677
match =
2678
#ifdef INET6
2679
is_ipv6 ? verify_path6(
2680
&(args->f_id.src_ip6), iif,
2681
args->f_id.fib) :
2682
#endif
2683
verify_path(src_ip, iif,
2684
args->f_id.fib);
2685
else
2686
match = 1;
2687
break;
2688
2689
case O_IPSEC:
2690
match = (m_tag_find(m,
2691
PACKET_TAG_IPSEC_IN_DONE, NULL) != NULL);
2692
/* otherwise no match */
2693
break;
2694
2695
#ifdef INET6
2696
case O_IP6_SRC:
2697
match = is_ipv6 &&
2698
IN6_ARE_ADDR_EQUAL(&args->f_id.src_ip6,
2699
&((ipfw_insn_ip6 *)cmd)->addr6);
2700
break;
2701
2702
case O_IP6_DST:
2703
match = is_ipv6 &&
2704
IN6_ARE_ADDR_EQUAL(&args->f_id.dst_ip6,
2705
&((ipfw_insn_ip6 *)cmd)->addr6);
2706
break;
2707
case O_IP6_SRC_MASK:
2708
case O_IP6_DST_MASK:
2709
if (is_ipv6) {
2710
int i = cmdlen - 1;
2711
struct in6_addr p;
2712
struct in6_addr *d =
2713
&((ipfw_insn_ip6 *)cmd)->addr6;
2714
2715
for (; !match && i > 0; d += 2,
2716
i -= F_INSN_SIZE(struct in6_addr)
2717
* 2) {
2718
p = (cmd->opcode ==
2719
O_IP6_SRC_MASK) ?
2720
args->f_id.src_ip6:
2721
args->f_id.dst_ip6;
2722
APPLY_MASK(&p, &d[1]);
2723
match =
2724
IN6_ARE_ADDR_EQUAL(&d[0],
2725
&p);
2726
}
2727
}
2728
break;
2729
2730
case O_FLOW6ID:
2731
match = is_ipv6 &&
2732
flow6id_match(args->f_id.flow_id6,
2733
(ipfw_insn_u32 *) cmd);
2734
break;
2735
2736
case O_EXT_HDR:
2737
match = is_ipv6 &&
2738
(ext_hd & ((ipfw_insn *) cmd)->arg1);
2739
break;
2740
2741
case O_IP6:
2742
match = is_ipv6;
2743
break;
2744
#endif
2745
2746
case O_IP4:
2747
match = is_ipv4;
2748
break;
2749
2750
case O_TAG: {
2751
struct m_tag *mtag;
2752
uint32_t tag = TARG(cmd->arg1, tag);
2753
2754
/* Packet is already tagged with this tag? */
2755
mtag = m_tag_locate(m, MTAG_IPFW, tag, NULL);
2756
2757
/* We have `untag' action when F_NOT flag is
2758
* present. And we must remove this mtag from
2759
* mbuf and reset `match' to zero (`match' will
2760
* be inversed later).
2761
* Otherwise we should allocate new mtag and
2762
* push it into mbuf.
2763
*/
2764
if (cmd->len & F_NOT) { /* `untag' action */
2765
if (mtag != NULL)
2766
m_tag_delete(m, mtag);
2767
match = 0;
2768
} else {
2769
if (mtag == NULL) {
2770
mtag = m_tag_alloc( MTAG_IPFW,
2771
tag, 0, M_NOWAIT);
2772
if (mtag != NULL)
2773
m_tag_prepend(m, mtag);
2774
}
2775
match = 1;
2776
}
2777
break;
2778
}
2779
2780
case O_FIB: /* try match the specified fib */
2781
if (args->f_id.fib == cmd->arg1)
2782
match = 1;
2783
break;
2784
2785
case O_SOCKARG: {
2786
#ifndef USERSPACE /* not supported in userspace */
2787
struct inpcb *inp = args->inp;
2788
struct inpcbinfo *pi;
2789
bool inp_locked = false;
2790
2791
if (proto == IPPROTO_TCP)
2792
pi = &V_tcbinfo;
2793
else if (proto == IPPROTO_UDP)
2794
pi = &V_udbinfo;
2795
else if (proto == IPPROTO_UDPLITE)
2796
pi = &V_ulitecbinfo;
2797
else
2798
break;
2799
2800
/*
2801
* XXXRW: so_user_cookie should almost
2802
* certainly be inp_user_cookie?
2803
*/
2804
2805
/*
2806
* For incoming packet lookup the inpcb
2807
* using the src/dest ip/port tuple.
2808
*/
2809
if (is_ipv4 && inp == NULL) {
2810
inp = in_pcblookup(pi,
2811
src_ip, htons(src_port),
2812
dst_ip, htons(dst_port),
2813
INPLOOKUP_RLOCKPCB, NULL);
2814
inp_locked = true;
2815
}
2816
#ifdef INET6
2817
if (is_ipv6 && inp == NULL) {
2818
inp = in6_pcblookup(pi,
2819
&args->f_id.src_ip6,
2820
htons(src_port),
2821
&args->f_id.dst_ip6,
2822
htons(dst_port),
2823
INPLOOKUP_RLOCKPCB, NULL);
2824
inp_locked = true;
2825
}
2826
#endif /* INET6 */
2827
if (inp != NULL) {
2828
if (inp->inp_socket) {
2829
tablearg =
2830
inp->inp_socket->so_user_cookie;
2831
if (tablearg)
2832
match = 1;
2833
}
2834
if (inp_locked)
2835
INP_RUNLOCK(inp);
2836
}
2837
#endif /* !USERSPACE */
2838
break;
2839
}
2840
2841
case O_TAGGED: {
2842
struct m_tag *mtag;
2843
uint32_t tag = TARG(cmd->arg1, tag);
2844
2845
if (cmdlen == 1) {
2846
match = m_tag_locate(m, MTAG_IPFW,
2847
tag, NULL) != NULL;
2848
break;
2849
}
2850
2851
/* we have ranges */
2852
for (mtag = m_tag_first(m);
2853
mtag != NULL && !match;
2854
mtag = m_tag_next(m, mtag)) {
2855
uint16_t *p;
2856
int i;
2857
2858
if (mtag->m_tag_cookie != MTAG_IPFW)
2859
continue;
2860
2861
p = ((ipfw_insn_u16 *)cmd)->ports;
2862
i = cmdlen - 1;
2863
for(; !match && i > 0; i--, p += 2)
2864
match =
2865
mtag->m_tag_id >= p[0] &&
2866
mtag->m_tag_id <= p[1];
2867
}
2868
break;
2869
}
2870
2871
case O_MARK: {
2872
uint32_t mark;
2873
if (cmd->arg1 == IP_FW_TARG)
2874
mark = TARG_VAL(chain, tablearg, mark);
2875
else
2876
mark = insntoc(cmd, u32)->d[0];
2877
match =
2878
(args->rule.pkt_mark &
2879
insntoc(cmd, u32)->d[1]) ==
2880
(mark & insntoc(cmd, u32)->d[1]);
2881
break;
2882
}
2883
2884
/*
2885
* The second set of opcodes represents 'actions',
2886
* i.e. the terminal part of a rule once the packet
2887
* matches all previous patterns.
2888
* Typically there is only one action for each rule,
2889
* and the opcode is stored at the end of the rule
2890
* (but there are exceptions -- see below).
2891
*
2892
* In general, here we set retval and terminate the
2893
* outer loop (would be a 'break 3' in some language,
2894
* but we need to set l=0, done=1)
2895
*
2896
* Exceptions:
2897
* O_COUNT and O_SKIPTO actions:
2898
* instead of terminating, we jump to the next rule
2899
* (setting l=0), or to the SKIPTO target (setting
2900
* f/f_len, cmd and l as needed), respectively.
2901
*
2902
* O_TAG, O_LOG and O_ALTQ action parameters:
2903
* perform some action and set match = 1;
2904
*
2905
* O_LIMIT and O_KEEP_STATE: these opcodes are
2906
* not real 'actions', and are stored right
2907
* before the 'action' part of the rule (one
2908
* exception is O_SKIP_ACTION which could be
2909
* between these opcodes and 'action' one).
2910
* These opcodes try to install an entry in the
2911
* state tables; if successful, we continue with
2912
* the next opcode (match=1; break;), otherwise
2913
* the packet must be dropped (set retval,
2914
* break loops with l=0, done=1)
2915
*
2916
* O_PROBE_STATE and O_CHECK_STATE: these opcodes
2917
* cause a lookup of the state table, and a jump
2918
* to the 'action' part of the parent rule
2919
* if an entry is found, or
2920
* (CHECK_STATE only) a jump to the next rule if
2921
* the entry is not found.
2922
* The result of the lookup is cached so that
2923
* further instances of these opcodes become NOPs.
2924
* The jump to the next rule is done by setting
2925
* l=0, cmdlen=0.
2926
*
2927
* O_SKIP_ACTION: this opcode is not a real 'action'
2928
* either, and is stored right before the 'action'
2929
* part of the rule, right after the O_KEEP_STATE
2930
* opcode. It causes match failure so the real
2931
* 'action' could be executed only if the rule
2932
* is checked via dynamic rule from the state
2933
* table, as in such case execution starts
2934
* from the true 'action' opcode directly.
2935
*
2936
*/
2937
case O_LIMIT:
2938
case O_KEEP_STATE:
2939
if (ipfw_dyn_install_state(chain, f,
2940
(ipfw_insn_limit *)cmd, args, ulp,
2941
pktlen, &dyn_info, tablearg)) {
2942
/* error or limit violation */
2943
retval = IP_FW_DENY;
2944
l = 0; /* exit inner loop */
2945
done = 1; /* exit outer loop */
2946
}
2947
match = 1;
2948
break;
2949
2950
case O_PROBE_STATE:
2951
case O_CHECK_STATE:
2952
/*
2953
* dynamic rules are checked at the first
2954
* keep-state or check-state occurrence,
2955
* with the result being stored in dyn_info.
2956
* The compiler introduces a PROBE_STATE
2957
* instruction for us when we have a
2958
* KEEP_STATE (because PROBE_STATE needs
2959
* to be run first).
2960
*/
2961
if (DYN_LOOKUP_NEEDED(&dyn_info, cmd) &&
2962
(q = ipfw_dyn_lookup_state(args, ulp,
2963
pktlen, cmd, &dyn_info)) != NULL) {
2964
/*
2965
* Found dynamic entry, jump to the
2966
* 'action' part of the parent rule
2967
* by setting f, cmd, l and clearing
2968
* cmdlen.
2969
*/
2970
f = q;
2971
f_pos = dyn_info.f_pos;
2972
cmd = ACTION_PTR(f);
2973
l = f->cmd_len - f->act_ofs;
2974
cmdlen = 0;
2975
continue;
2976
}
2977
/*
2978
* Dynamic entry not found. If CHECK_STATE,
2979
* skip to next rule, if PROBE_STATE just
2980
* ignore and continue with next opcode.
2981
*/
2982
if (cmd->opcode == O_CHECK_STATE)
2983
l = 0; /* exit inner loop */
2984
match = 1;
2985
break;
2986
2987
case O_SKIP_ACTION:
2988
match = 0; /* skip to the next rule */
2989
l = 0; /* exit inner loop */
2990
break;
2991
2992
case O_ACCEPT:
2993
retval = 0; /* accept */
2994
l = 0; /* exit inner loop */
2995
done = 1; /* exit outer loop */
2996
break;
2997
2998
case O_PIPE:
2999
case O_QUEUE:
3000
set_match(args, f_pos, chain);
3001
args->rule.info = TARG(cmd->arg1, pipe);
3002
if (cmd->opcode == O_PIPE)
3003
args->rule.info |= IPFW_IS_PIPE;
3004
if (V_fw_one_pass)
3005
args->rule.info |= IPFW_ONEPASS;
3006
retval = IP_FW_DUMMYNET;
3007
l = 0; /* exit inner loop */
3008
done = 1; /* exit outer loop */
3009
break;
3010
3011
case O_DIVERT:
3012
case O_TEE:
3013
if (args->flags & IPFW_ARGS_ETHER)
3014
break; /* not on layer 2 */
3015
/* otherwise this is terminal */
3016
l = 0; /* exit inner loop */
3017
done = 1; /* exit outer loop */
3018
retval = (cmd->opcode == O_DIVERT) ?
3019
IP_FW_DIVERT : IP_FW_TEE;
3020
set_match(args, f_pos, chain);
3021
args->rule.info = TARG(cmd->arg1, divert);
3022
break;
3023
3024
case O_COUNT:
3025
IPFW_INC_RULE_COUNTER(f, pktlen);
3026
l = 0; /* exit inner loop */
3027
break;
3028
3029
case O_SKIPTO:
3030
IPFW_INC_RULE_COUNTER(f, pktlen);
3031
f_pos = jump(chain, f,
3032
insntod(cmd, u32)->d[0], tablearg, false);
3033
/*
3034
* Skip disabled rules, and re-enter
3035
* the inner loop with the correct
3036
* f_pos, f, l and cmd.
3037
* Also clear cmdlen and skip_or
3038
*/
3039
for (; f_pos < chain->n_rules - 1 &&
3040
(V_set_disable &
3041
(1 << chain->map[f_pos]->set));
3042
f_pos++)
3043
;
3044
/* Re-enter the inner loop at the skipto rule. */
3045
f = chain->map[f_pos];
3046
l = f->cmd_len;
3047
cmd = f->cmd;
3048
match = 1;
3049
cmdlen = 0;
3050
skip_or = 0;
3051
continue;
3052
break; /* not reached */
3053
3054
case O_CALLRETURN: {
3055
/*
3056
* Implementation of `subroutine' call/return,
3057
* in the stack carried in an mbuf tag. This
3058
* is different from `skipto' in that any call
3059
* address is possible (`skipto' must prevent
3060
* backward jumps to avoid endless loops).
3061
* We have `return' action when F_NOT flag is
3062
* present. The `m_tag_id' field is used as
3063
* stack pointer.
3064
*/
3065
struct m_tag *mtag;
3066
uint32_t jmpto, *stack;
3067
3068
#define IS_CALL ((cmd->len & F_NOT) == 0)
3069
#define IS_RETURN ((cmd->len & F_NOT) != 0)
3070
/*
3071
* Hand-rolled version of m_tag_locate() with
3072
* wildcard `type'.
3073
* If not already tagged, allocate new tag.
3074
*/
3075
mtag = m_tag_first(m);
3076
while (mtag != NULL) {
3077
if (mtag->m_tag_cookie ==
3078
MTAG_IPFW_CALL)
3079
break;
3080
mtag = m_tag_next(m, mtag);
3081
}
3082
3083
/*
3084
* We keep ruleset id in the first element
3085
* of stack. If it doesn't match chain->id,
3086
* then we can't trust information in the
3087
* stack, since rules were changed.
3088
* We reset stack pointer to be able reuse
3089
* tag if it will be needed.
3090
*/
3091
if (mtag != NULL) {
3092
stack = (uint32_t *)(mtag + 1);
3093
if (stack[0] != chain->id) {
3094
stack[0] = chain->id;
3095
mtag->m_tag_id = 0;
3096
}
3097
}
3098
3099
/*
3100
* If there is no mtag or stack is empty,
3101
* `return` continues with next rule.
3102
*/
3103
if (IS_RETURN && (mtag == NULL ||
3104
mtag->m_tag_id == 0)) {
3105
l = 0; /* exit inner loop */
3106
break;
3107
}
3108
3109
if (mtag == NULL) {
3110
MPASS(IS_CALL);
3111
mtag = m_tag_alloc(MTAG_IPFW_CALL, 0,
3112
IPFW_CALLSTACK_SIZE *
3113
sizeof(uint32_t), M_NOWAIT);
3114
if (mtag != NULL) {
3115
m_tag_prepend(m, mtag);
3116
stack = (uint32_t *)(mtag + 1);
3117
stack[0] = chain->id;
3118
}
3119
}
3120
3121
if (mtag == NULL) {
3122
printf("ipfw: rule %u: failed to "
3123
"allocate call stack. "
3124
"Denying packet.\n",
3125
f->rulenum);
3126
l = 0; /* exit inner loop */
3127
done = 1; /* exit outer loop */
3128
retval = IP_FW_DENY; /* drop packet */
3129
break;
3130
}
3131
3132
if (IS_CALL && mtag->m_tag_id >=
3133
IPFW_CALLSTACK_SIZE - 1) {
3134
printf("ipfw: rule %u: call stack "
3135
"overflow. Denying packet.\n",
3136
f->rulenum);
3137
l = 0; /* exit inner loop */
3138
done = 1; /* exit outer loop */
3139
retval = IP_FW_DENY; /* drop packet */
3140
break;
3141
}
3142
3143
MPASS(stack == (uint32_t *)(mtag + 1));
3144
IPFW_INC_RULE_COUNTER(f, pktlen);
3145
3146
if (IS_CALL) {
3147
stack[++mtag->m_tag_id] = f_pos;
3148
f_pos = jump(chain, f,
3149
insntod(cmd, u32)->d[0],
3150
tablearg, true);
3151
} else { /* `return' action */
3152
jmpto = stack[mtag->m_tag_id--];
3153
if (cmd->arg1 == RETURN_NEXT_RULE)
3154
f_pos = jmpto + 1;
3155
else /* RETURN_NEXT_RULENUM */
3156
f_pos = ipfw_find_rule(chain,
3157
chain->map[
3158
jmpto]->rulenum + 1, 0);
3159
}
3160
3161
/*
3162
* Skip disabled rules, and re-enter
3163
* the inner loop with the correct
3164
* f_pos, f, l and cmd.
3165
* Also clear cmdlen and skip_or
3166
*/
3167
MPASS(f_pos < chain->n_rules - 1);
3168
for (; f_pos < chain->n_rules - 1 &&
3169
(V_set_disable &
3170
(1 << chain->map[f_pos]->set)); f_pos++)
3171
;
3172
/*
3173
* Re-enter the inner loop at the dest
3174
* rule.
3175
*/
3176
f = chain->map[f_pos];
3177
l = f->cmd_len;
3178
cmd = f->cmd;
3179
cmdlen = 0;
3180
skip_or = 0;
3181
continue;
3182
break; /* NOTREACHED */
3183
}
3184
#undef IS_CALL
3185
#undef IS_RETURN
3186
3187
case O_REJECT:
3188
/*
3189
* Drop the packet and send a reject notice
3190
* if the packet is not ICMP (or is an ICMP
3191
* query), and it is not multicast/broadcast.
3192
*/
3193
if (hlen > 0 && is_ipv4 && offset == 0 &&
3194
(proto != IPPROTO_ICMP ||
3195
is_icmp_query(ICMP(ulp))) &&
3196
!(m->m_flags & (M_BCAST|M_MCAST)) &&
3197
!IN_MULTICAST(ntohl(dst_ip.s_addr))) {
3198
KASSERT(!need_send_reject,
3199
("o_reject - need_send_reject was set previously"));
3200
if ((reject_code = cmd->arg1) == ICMP_UNREACH_NEEDFRAG &&
3201
cmd->len == F_INSN_SIZE(ipfw_insn_u16)) {
3202
reject_mtu =
3203
((ipfw_insn_u16 *)cmd)->ports[0];
3204
} else {
3205
reject_mtu = 0;
3206
}
3207
need_send_reject = true;
3208
}
3209
/* FALLTHROUGH */
3210
#ifdef INET6
3211
case O_UNREACH6:
3212
if (hlen > 0 && is_ipv6 &&
3213
((offset & IP6F_OFF_MASK) == 0) &&
3214
(proto != IPPROTO_ICMPV6 ||
3215
(is_icmp6_query(icmp6_type) == 1)) &&
3216
!(m->m_flags & (M_BCAST|M_MCAST)) &&
3217
!IN6_IS_ADDR_MULTICAST(
3218
&args->f_id.dst_ip6)) {
3219
KASSERT(!need_send_reject,
3220
("o_unreach6 - need_send_reject was set previously"));
3221
reject_code = cmd->arg1;
3222
if (cmd->opcode == O_REJECT) {
3223
reject_code =
3224
map_icmp_unreach(reject_code);
3225
}
3226
need_send_reject = true;
3227
}
3228
/* FALLTHROUGH */
3229
#endif
3230
case O_DENY:
3231
retval = IP_FW_DENY;
3232
l = 0; /* exit inner loop */
3233
done = 1; /* exit outer loop */
3234
break;
3235
3236
case O_FORWARD_IP:
3237
if (args->flags & IPFW_ARGS_ETHER)
3238
break; /* not valid on layer2 pkts */
3239
if (q != f ||
3240
dyn_info.direction == MATCH_FORWARD) {
3241
struct sockaddr_in *sa;
3242
3243
sa = &(((ipfw_insn_sa *)cmd)->sa);
3244
if (sa->sin_addr.s_addr == INADDR_ANY) {
3245
#ifdef INET6
3246
/*
3247
* We use O_FORWARD_IP opcode for
3248
* fwd rule with tablearg, but tables
3249
* now support IPv6 addresses. And
3250
* when we are inspecting IPv6 packet,
3251
* we can use nh6 field from
3252
* table_value as next_hop6 address.
3253
*/
3254
if (is_ipv6) {
3255
struct ip_fw_nh6 *nh6;
3256
3257
args->flags |= IPFW_ARGS_NH6;
3258
nh6 = &args->hopstore6;
3259
nh6->sin6_addr = TARG_VAL(
3260
chain, tablearg, nh6);
3261
nh6->sin6_port = sa->sin_port;
3262
nh6->sin6_scope_id = TARG_VAL(
3263
chain, tablearg, zoneid);
3264
} else
3265
#endif
3266
{
3267
args->flags |= IPFW_ARGS_NH4;
3268
args->hopstore.sin_port =
3269
sa->sin_port;
3270
sa = &args->hopstore;
3271
sa->sin_family = AF_INET;
3272
sa->sin_len = sizeof(*sa);
3273
sa->sin_addr.s_addr = htonl(
3274
TARG_VAL(chain, tablearg,
3275
nh4));
3276
}
3277
} else {
3278
args->flags |= IPFW_ARGS_NH4PTR;
3279
args->next_hop = sa;
3280
}
3281
}
3282
retval = IP_FW_PASS;
3283
l = 0; /* exit inner loop */
3284
done = 1; /* exit outer loop */
3285
break;
3286
3287
#ifdef INET6
3288
case O_FORWARD_IP6:
3289
if (args->flags & IPFW_ARGS_ETHER)
3290
break; /* not valid on layer2 pkts */
3291
if (q != f ||
3292
dyn_info.direction == MATCH_FORWARD) {
3293
struct sockaddr_in6 *sin6;
3294
3295
sin6 = &(((ipfw_insn_sa6 *)cmd)->sa);
3296
args->flags |= IPFW_ARGS_NH6PTR;
3297
args->next_hop6 = sin6;
3298
}
3299
retval = IP_FW_PASS;
3300
l = 0; /* exit inner loop */
3301
done = 1; /* exit outer loop */
3302
break;
3303
#endif
3304
3305
case O_NETGRAPH:
3306
case O_NGTEE:
3307
set_match(args, f_pos, chain);
3308
args->rule.info = TARG(cmd->arg1, netgraph);
3309
if (V_fw_one_pass)
3310
args->rule.info |= IPFW_ONEPASS;
3311
retval = (cmd->opcode == O_NETGRAPH) ?
3312
IP_FW_NETGRAPH : IP_FW_NGTEE;
3313
l = 0; /* exit inner loop */
3314
done = 1; /* exit outer loop */
3315
break;
3316
3317
case O_SETFIB: {
3318
uint32_t fib;
3319
3320
IPFW_INC_RULE_COUNTER(f, pktlen);
3321
fib = TARG(cmd->arg1, fib) & 0x7FFF;
3322
if (fib >= rt_numfibs)
3323
fib = 0;
3324
M_SETFIB(m, fib);
3325
args->f_id.fib = fib; /* XXX */
3326
l = 0; /* exit inner loop */
3327
break;
3328
}
3329
3330
case O_SETDSCP: {
3331
uint16_t code;
3332
3333
code = TARG(cmd->arg1, dscp) & 0x3F;
3334
l = 0; /* exit inner loop */
3335
if (is_ipv4) {
3336
uint16_t old;
3337
3338
old = *(uint16_t *)ip;
3339
ip->ip_tos = (code << 2) |
3340
(ip->ip_tos & 0x03);
3341
ip->ip_sum = cksum_adjust(ip->ip_sum,
3342
old, *(uint16_t *)ip);
3343
} else if (is_ipv6) {
3344
/* update cached value */
3345
args->f_id.flow_id6 =
3346
ntohl(*(uint32_t *)ip) & ~0x0FC00000;
3347
args->f_id.flow_id6 |= code << 22;
3348
3349
*((uint32_t *)ip) =
3350
htonl(args->f_id.flow_id6);
3351
} else
3352
break;
3353
3354
IPFW_INC_RULE_COUNTER(f, pktlen);
3355
break;
3356
}
3357
3358
case O_NAT:
3359
l = 0; /* exit inner loop */
3360
done = 1; /* exit outer loop */
3361
/*
3362
* Ensure that we do not invoke NAT handler for
3363
* non IPv4 packets. Libalias expects only IPv4.
3364
*/
3365
if (!is_ipv4 || !IPFW_NAT_LOADED) {
3366
retval = IP_FW_DENY;
3367
break;
3368
}
3369
3370
struct cfg_nat *t;
3371
int nat_id;
3372
3373
args->rule.info = 0;
3374
set_match(args, f_pos, chain);
3375
/* Check if this is 'global' nat rule */
3376
if (cmd->arg1 == IP_FW_NAT44_GLOBAL) {
3377
retval = ipfw_nat_ptr(args, NULL, m);
3378
break;
3379
}
3380
t = ((ipfw_insn_nat *)cmd)->nat;
3381
if (t == NULL) {
3382
nat_id = TARG(cmd->arg1, nat);
3383
t = (*lookup_nat_ptr)(&chain->nat, nat_id);
3384
3385
if (t == NULL) {
3386
retval = IP_FW_DENY;
3387
break;
3388
}
3389
if (cmd->arg1 != IP_FW_TARG)
3390
((ipfw_insn_nat *)cmd)->nat = t;
3391
}
3392
retval = ipfw_nat_ptr(args, t, m);
3393
break;
3394
3395
case O_REASS: {
3396
int ip_off;
3397
3398
l = 0; /* in any case exit inner loop */
3399
if (is_ipv6) /* IPv6 is not supported yet */
3400
break;
3401
IPFW_INC_RULE_COUNTER(f, pktlen);
3402
ip_off = ntohs(ip->ip_off);
3403
3404
/* if not fragmented, go to next rule */
3405
if ((ip_off & (IP_MF | IP_OFFMASK)) == 0)
3406
break;
3407
3408
args->m = m = ip_reass(m);
3409
3410
/*
3411
* do IP header checksum fixup.
3412
*/
3413
if (m == NULL) { /* fragment got swallowed */
3414
retval = IP_FW_DENY;
3415
} else { /* good, packet complete */
3416
int hlen;
3417
3418
ip = mtod(m, struct ip *);
3419
hlen = ip->ip_hl << 2;
3420
ip->ip_sum = 0;
3421
if (hlen == sizeof(struct ip))
3422
ip->ip_sum = in_cksum_hdr(ip);
3423
else
3424
ip->ip_sum = in_cksum(m, hlen);
3425
retval = IP_FW_REASS;
3426
args->rule.info = 0;
3427
set_match(args, f_pos, chain);
3428
}
3429
done = 1; /* exit outer loop */
3430
break;
3431
}
3432
3433
case O_SETMARK: {
3434
l = 0; /* exit inner loop */
3435
args->rule.pkt_mark = (
3436
(cmd->arg1 == IP_FW_TARG) ?
3437
TARG_VAL(chain, tablearg, mark) :
3438
insntoc(cmd, u32)->d[0]);
3439
3440
IPFW_INC_RULE_COUNTER(f, pktlen);
3441
break;
3442
}
3443
3444
case O_EXTERNAL_ACTION:
3445
l = 0; /* in any case exit inner loop */
3446
retval = ipfw_run_eaction(chain, args,
3447
cmd, &done);
3448
/*
3449
* If both @retval and @done are zero,
3450
* consider this as rule matching and
3451
* update counters.
3452
*/
3453
if (retval == 0 && done == 0) {
3454
IPFW_INC_RULE_COUNTER(f, pktlen);
3455
/*
3456
* Reset the result of the last
3457
* dynamic state lookup.
3458
* External action can change
3459
* @args content, and it may be
3460
* used for new state lookup later.
3461
*/
3462
DYN_INFO_INIT(&dyn_info);
3463
}
3464
break;
3465
3466
default:
3467
panic("ipfw: rule %u: unknown opcode %d\n",
3468
f->rulenum, cmd->opcode);
3469
} /* end of switch() on opcodes */
3470
/*
3471
* if we get here with l=0, then match is irrelevant.
3472
*/
3473
3474
if (cmd->len & F_NOT)
3475
match = !match;
3476
3477
if (match) {
3478
if (cmd->len & F_OR)
3479
skip_or = 1;
3480
} else {
3481
if (!(cmd->len & F_OR)) /* not an OR block, */
3482
break; /* try next rule */
3483
}
3484
3485
} /* end of inner loop, scan opcodes */
3486
#undef PULLUP_LEN
3487
#undef PULLUP_LEN_LOCKED
3488
3489
if (done)
3490
break;
3491
3492
/* next_rule:; */ /* try next rule */
3493
3494
} /* end of outer for, scan rules */
3495
3496
if (done) {
3497
struct ip_fw *rule = chain->map[f_pos];
3498
/* Update statistics */
3499
IPFW_INC_RULE_COUNTER(rule, pktlen);
3500
IPFW_PROBE(rule__matched, retval,
3501
is_ipv4 ? AF_INET : AF_INET6,
3502
is_ipv4 ? (uintptr_t)&src_ip :
3503
(uintptr_t)&args->f_id.src_ip6,
3504
is_ipv4 ? (uintptr_t)&dst_ip :
3505
(uintptr_t)&args->f_id.dst_ip6,
3506
args, rule);
3507
} else {
3508
retval = IP_FW_DENY;
3509
printf("ipfw: ouch!, skip past end of rules, denying packet\n");
3510
}
3511
IPFW_PF_RUNLOCK(chain);
3512
if (need_send_reject) {
3513
#ifdef INET6
3514
if (is_ipv6)
3515
send_reject6(args, reject_code, hlen,
3516
(struct ip6_hdr *)ip);
3517
else
3518
#endif
3519
send_reject(args, reject_code, reject_mtu,
3520
iplen, ip);
3521
}
3522
#ifdef __FreeBSD__
3523
if (ucred_cache != NULL)
3524
crfree(ucred_cache);
3525
#endif
3526
return (retval);
3527
3528
pullup_failed:
3529
if (V_fw_verbose)
3530
printf("ipfw: pullup failed\n");
3531
return (IP_FW_DENY);
3532
}
3533
3534
/*
3535
* Set maximum number of tables that can be used in given VNET ipfw instance.
3536
*/
3537
#ifdef SYSCTL_NODE
3538
static int
3539
sysctl_ipfw_table_num(SYSCTL_HANDLER_ARGS)
3540
{
3541
int error;
3542
unsigned int ntables;
3543
3544
ntables = V_fw_tables_max;
3545
3546
error = sysctl_handle_int(oidp, &ntables, 0, req);
3547
/* Read operation or some error */
3548
if ((error != 0) || (req->newptr == NULL))
3549
return (error);
3550
3551
return (ipfw_resize_tables(&V_layer3_chain, ntables));
3552
}
3553
3554
/*
3555
* Switches table namespace between global and per-set.
3556
*/
3557
static int
3558
sysctl_ipfw_tables_sets(SYSCTL_HANDLER_ARGS)
3559
{
3560
int error;
3561
unsigned int sets;
3562
3563
sets = V_fw_tables_sets;
3564
3565
error = sysctl_handle_int(oidp, &sets, 0, req);
3566
/* Read operation or some error */
3567
if ((error != 0) || (req->newptr == NULL))
3568
return (error);
3569
3570
return (ipfw_switch_tables_namespace(&V_layer3_chain, sets));
3571
}
3572
#endif
3573
3574
/*
3575
* Module and VNET glue
3576
*/
3577
3578
/*
3579
* Stuff that must be initialised only on boot or module load
3580
*/
3581
static int
3582
ipfw_init(void)
3583
{
3584
int error = 0;
3585
3586
/*
3587
* Only print out this stuff the first time around,
3588
* when called from the sysinit code.
3589
*/
3590
printf("ipfw2 "
3591
#ifdef INET6
3592
"(+ipv6) "
3593
#endif
3594
"initialized, divert %s, nat %s, "
3595
"default to %s, logging ",
3596
#ifdef IPDIVERT
3597
"enabled",
3598
#else
3599
"loadable",
3600
#endif
3601
#ifdef IPFIREWALL_NAT
3602
"enabled",
3603
#else
3604
"loadable",
3605
#endif
3606
default_to_accept ? "accept" : "deny");
3607
3608
/*
3609
* Note: V_xxx variables can be accessed here but the vnet specific
3610
* initializer may not have been called yet for the VIMAGE case.
3611
* Tuneables will have been processed. We will print out values for
3612
* the default vnet.
3613
* XXX This should all be rationalized AFTER 8.0
3614
*/
3615
if (V_fw_verbose == 0)
3616
printf("disabled\n");
3617
else if (V_verbose_limit == 0)
3618
printf("unlimited\n");
3619
else
3620
printf("limited to %d packets/entry by default\n",
3621
V_verbose_limit);
3622
3623
/* Check user-supplied table count for validness */
3624
if (default_fw_tables > IPFW_TABLES_MAX)
3625
default_fw_tables = IPFW_TABLES_MAX;
3626
3627
ipfw_init_sopt_handler();
3628
ipfw_init_obj_rewriter();
3629
ipfw_iface_init();
3630
return (error);
3631
}
3632
3633
/*
3634
* Called for the removal of the last instance only on module unload.
3635
*/
3636
static void
3637
ipfw_destroy(void)
3638
{
3639
3640
ipfw_iface_destroy();
3641
ipfw_destroy_sopt_handler();
3642
ipfw_destroy_obj_rewriter();
3643
printf("IP firewall unloaded\n");
3644
}
3645
3646
/*
3647
* Stuff that must be initialized for every instance
3648
* (including the first of course).
3649
*/
3650
static int
3651
vnet_ipfw_init(const void *unused)
3652
{
3653
int error, first;
3654
struct ip_fw *rule = NULL;
3655
struct ip_fw_chain *chain;
3656
3657
chain = &V_layer3_chain;
3658
3659
first = IS_DEFAULT_VNET(curvnet) ? 1 : 0;
3660
3661
/* First set up some values that are compile time options */
3662
V_autoinc_step = 100; /* bounded to 1..1000 in add_rule() */
3663
V_fw_deny_unknown_exthdrs = 1;
3664
#ifdef IPFIREWALL_VERBOSE
3665
V_fw_verbose = 1;
3666
#endif
3667
#ifdef IPFIREWALL_VERBOSE_LIMIT
3668
V_verbose_limit = IPFIREWALL_VERBOSE_LIMIT;
3669
#endif
3670
#ifdef IPFIREWALL_NAT
3671
LIST_INIT(&chain->nat);
3672
#endif
3673
3674
/* Init shared services hash table */
3675
ipfw_init_srv(chain);
3676
3677
ipfw_init_counters();
3678
/* Set initial number of tables */
3679
V_fw_tables_max = default_fw_tables;
3680
error = ipfw_init_tables(chain, first);
3681
if (error) {
3682
printf("ipfw2: setting up tables failed\n");
3683
free(chain->map, M_IPFW);
3684
free(rule, M_IPFW);
3685
return (ENOSPC);
3686
}
3687
3688
IPFW_LOCK_INIT(chain);
3689
3690
ipfw_dyn_init(chain);
3691
/* fill and insert the default rule */
3692
rule = ipfw_alloc_rule(chain, sizeof(struct ip_fw));
3693
rule->flags |= IPFW_RULE_NOOPT;
3694
rule->cmd_len = 1;
3695
rule->cmd[0].len = 1;
3696
rule->cmd[0].opcode = default_to_accept ? O_ACCEPT : O_DENY;
3697
chain->default_rule = rule;
3698
ipfw_add_protected_rule(chain, rule, 0);
3699
3700
ipfw_eaction_init(chain, first);
3701
ipfw_init_skipto_cache(chain);
3702
ipfw_bpf_init(first);
3703
3704
/* First set up some values that are compile time options */
3705
V_ipfw_vnet_ready = 1; /* Open for business */
3706
3707
/*
3708
* Hook the sockopt handler and pfil hooks for ipv4 and ipv6.
3709
* Even if the latter two fail we still keep the module alive
3710
* because the sockopt and layer2 paths are still useful.
3711
* ipfw[6]_hook return 0 on success, ENOENT on failure,
3712
* so we can ignore the exact return value and just set a flag.
3713
*
3714
* Note that V_fw[6]_enable are manipulated by a SYSCTL_PROC so
3715
* changes in the underlying (per-vnet) variables trigger
3716
* immediate hook()/unhook() calls.
3717
* In layer2 we have the same behaviour, except that V_ether_ipfw
3718
* is checked on each packet because there are no pfil hooks.
3719
*/
3720
V_ip_fw_ctl_ptr = ipfw_ctl3;
3721
error = ipfw_attach_hooks();
3722
return (error);
3723
}
3724
3725
/*
3726
* Called for the removal of each instance.
3727
*/
3728
static int
3729
vnet_ipfw_uninit(const void *unused)
3730
{
3731
struct ip_fw *reap;
3732
struct ip_fw_chain *chain = &V_layer3_chain;
3733
int i, last;
3734
3735
V_ipfw_vnet_ready = 0; /* tell new callers to go away */
3736
/*
3737
* disconnect from ipv4, ipv6, layer2 and sockopt.
3738
* Then grab, release and grab again the WLOCK so we make
3739
* sure the update is propagated and nobody will be in.
3740
*/
3741
ipfw_detach_hooks();
3742
V_ip_fw_ctl_ptr = NULL;
3743
3744
last = IS_DEFAULT_VNET(curvnet) ? 1 : 0;
3745
3746
IPFW_UH_WLOCK(chain);
3747
IPFW_UH_WUNLOCK(chain);
3748
3749
ipfw_dyn_uninit(0); /* run the callout_drain */
3750
3751
IPFW_UH_WLOCK(chain);
3752
3753
reap = NULL;
3754
IPFW_WLOCK(chain);
3755
for (i = 0; i < chain->n_rules; i++)
3756
ipfw_reap_add(chain, &reap, chain->map[i]);
3757
free(chain->map, M_IPFW);
3758
ipfw_destroy_skipto_cache(chain);
3759
IPFW_WUNLOCK(chain);
3760
IPFW_UH_WUNLOCK(chain);
3761
ipfw_destroy_tables(chain, last);
3762
ipfw_eaction_uninit(chain, last);
3763
if (reap != NULL)
3764
ipfw_reap_rules(reap);
3765
vnet_ipfw_iface_destroy(chain);
3766
ipfw_destroy_srv(chain);
3767
IPFW_LOCK_DESTROY(chain);
3768
ipfw_dyn_uninit(1); /* free the remaining parts */
3769
ipfw_destroy_counters();
3770
ipfw_bpf_uninit(last);
3771
return (0);
3772
}
3773
3774
/*
3775
* Module event handler.
3776
* In general we have the choice of handling most of these events by the
3777
* event handler or by the (VNET_)SYS(UN)INIT handlers. I have chosen to
3778
* use the SYSINIT handlers as they are more capable of expressing the
3779
* flow of control during module and vnet operations, so this is just
3780
* a skeleton. Note there is no SYSINIT equivalent of the module
3781
* SHUTDOWN handler, but we don't have anything to do in that case anyhow.
3782
*/
3783
static int
3784
ipfw_modevent(module_t mod, int type, void *unused)
3785
{
3786
int err = 0;
3787
3788
switch (type) {
3789
case MOD_LOAD:
3790
/* Called once at module load or
3791
* system boot if compiled in. */
3792
break;
3793
case MOD_QUIESCE:
3794
/* Called before unload. May veto unloading. */
3795
break;
3796
case MOD_UNLOAD:
3797
/* Called during unload. */
3798
break;
3799
case MOD_SHUTDOWN:
3800
/* Called during system shutdown. */
3801
break;
3802
default:
3803
err = EOPNOTSUPP;
3804
break;
3805
}
3806
return err;
3807
}
3808
3809
static moduledata_t ipfwmod = {
3810
"ipfw",
3811
ipfw_modevent,
3812
0
3813
};
3814
3815
/* Define startup order. */
3816
#define IPFW_SI_SUB_FIREWALL SI_SUB_PROTO_FIREWALL
3817
#define IPFW_MODEVENT_ORDER (SI_ORDER_ANY - 255) /* On boot slot in here. */
3818
#define IPFW_MODULE_ORDER (IPFW_MODEVENT_ORDER + 1) /* A little later. */
3819
#define IPFW_VNET_ORDER (IPFW_MODEVENT_ORDER + 2) /* Later still. */
3820
3821
DECLARE_MODULE(ipfw, ipfwmod, IPFW_SI_SUB_FIREWALL, IPFW_MODEVENT_ORDER);
3822
FEATURE(ipfw_ctl3, "ipfw new sockopt calls");
3823
MODULE_VERSION(ipfw, 3);
3824
/* should declare some dependencies here */
3825
3826
/*
3827
* Starting up. Done in order after ipfwmod() has been called.
3828
* VNET_SYSINIT is also called for each existing vnet and each new vnet.
3829
*/
3830
SYSINIT(ipfw_init, IPFW_SI_SUB_FIREWALL, IPFW_MODULE_ORDER,
3831
ipfw_init, NULL);
3832
VNET_SYSINIT(vnet_ipfw_init, IPFW_SI_SUB_FIREWALL, IPFW_VNET_ORDER,
3833
vnet_ipfw_init, NULL);
3834
3835
/*
3836
* Closing up shop. These are done in REVERSE ORDER, but still
3837
* after ipfwmod() has been called. Not called on reboot.
3838
* VNET_SYSUNINIT is also called for each exiting vnet as it exits.
3839
* or when the module is unloaded.
3840
*/
3841
SYSUNINIT(ipfw_destroy, IPFW_SI_SUB_FIREWALL, IPFW_MODULE_ORDER,
3842
ipfw_destroy, NULL);
3843
VNET_SYSUNINIT(vnet_ipfw_uninit, IPFW_SI_SUB_FIREWALL, IPFW_VNET_ORDER,
3844
vnet_ipfw_uninit, NULL);
3845
/* end of file */
3846
3847