#include <sys/cdefs.h>
#include "opt_inet6.h"
#include <sys/param.h>
#include <sys/ck.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/time.h>
#include <sys/taskqueue.h>
#include <net/if.h>
#include <netinet/in.h>
#include <netinet/ip_var.h>
#include <netinet/ip_fw.h>
#include <netinet/ip_dummynet.h>
#include <net/vnet.h>
#include <netpfil/ipfw/ip_fw_private.h>
#include <netpfil/ipfw/dn_heap.h>
#include <netpfil/ipfw/ip_dn_private.h>
#ifdef NEW_AQM
#include <netpfil/ipfw/dn_aqm.h>
#endif
#include <netpfil/ipfw/dn_sched.h>
#define DN_C_LINK 0x01
#define DN_C_SCH 0x02
#define DN_C_FLOW 0x04
#define DN_C_FS 0x08
#define DN_C_QUEUE 0x10
struct schk_new_arg {
struct dn_alg *fp;
struct dn_sch *sch;
};
static struct callout dn_timeout;
static int dn_tasks_started = 0;
static int dn_gone;
static struct task dn_task;
static struct taskqueue *dn_tq = NULL;
struct mtx sched_mtx;
CK_LIST_HEAD(, dn_alg) schedlist;
#ifdef NEW_AQM
CK_LIST_HEAD(, dn_aqm) aqmlist;
#endif
static void
dummynet(void *arg)
{
(void)arg;
taskqueue_enqueue(dn_tq, &dn_task);
}
void
dummynet_sched_lock(void)
{
mtx_lock(&sched_mtx);
}
void
dummynet_sched_unlock(void)
{
mtx_unlock(&sched_mtx);
}
void
dn_reschedule(void)
{
if (dn_gone != 0)
return;
callout_reset_sbt(&dn_timeout, tick_sbt, 0, dummynet, NULL,
C_HARDCLOCK | C_DIRECT_EXEC);
}
#ifdef NEW_AQM
static struct dn_aqm *
find_aqm_type(int type, char *name)
{
struct dn_aqm *d;
NET_EPOCH_ASSERT();
CK_LIST_FOREACH(d, &aqmlist, next) {
if (d->type == type || (name && !strcasecmp(d->name, name)))
return d;
}
return NULL;
}
#endif
static struct dn_alg *
find_sched_type(int type, char *name)
{
struct dn_alg *d;
NET_EPOCH_ASSERT();
CK_LIST_FOREACH(d, &schedlist, next) {
if (d->type == type || (name && !strcasecmp(d->name, name)))
return d;
}
return NULL;
}
int
ipdn_bound_var(int *v, int dflt, int lo, int hi, const char *msg)
{
int oldv = *v;
const char *op = NULL;
if (dflt < lo)
dflt = lo;
if (dflt > hi)
dflt = hi;
if (oldv < lo) {
*v = dflt;
op = "Bump";
} else if (oldv > hi) {
*v = hi;
op = "Clamp";
} else
return *v;
if (op && msg && bootverbose)
printf("%s %s to %d (was %d)\n", op, msg, *v, oldv);
return *v;
}
static struct ipfw_flow_id *
flow_id_mask(struct ipfw_flow_id *mask, struct ipfw_flow_id *id)
{
int is_v6 = IS_IP6_FLOW_ID(id);
id->dst_port &= mask->dst_port;
id->src_port &= mask->src_port;
id->proto &= mask->proto;
id->extra &= mask->extra;
if (is_v6) {
APPLY_MASK(&id->dst_ip6, &mask->dst_ip6);
APPLY_MASK(&id->src_ip6, &mask->src_ip6);
id->flow_id6 &= mask->flow_id6;
} else {
id->dst_ip &= mask->dst_ip;
id->src_ip &= mask->src_ip;
}
return id;
}
static struct ipfw_flow_id *
flow_id_or(struct ipfw_flow_id *src, struct ipfw_flow_id *dst)
{
int is_v6 = IS_IP6_FLOW_ID(dst);
dst->dst_port |= src->dst_port;
dst->src_port |= src->src_port;
dst->proto |= src->proto;
dst->extra |= src->extra;
if (is_v6) {
#define OR_MASK(_d, _s) \
(_d)->__u6_addr.__u6_addr32[0] |= (_s)->__u6_addr.__u6_addr32[0]; \
(_d)->__u6_addr.__u6_addr32[1] |= (_s)->__u6_addr.__u6_addr32[1]; \
(_d)->__u6_addr.__u6_addr32[2] |= (_s)->__u6_addr.__u6_addr32[2]; \
(_d)->__u6_addr.__u6_addr32[3] |= (_s)->__u6_addr.__u6_addr32[3];
OR_MASK(&dst->dst_ip6, &src->dst_ip6);
OR_MASK(&dst->src_ip6, &src->src_ip6);
#undef OR_MASK
dst->flow_id6 |= src->flow_id6;
} else {
dst->dst_ip |= src->dst_ip;
dst->src_ip |= src->src_ip;
}
return dst;
}
static int
nonzero_mask(struct ipfw_flow_id *m)
{
if (m->dst_port || m->src_port || m->proto || m->extra)
return 1;
if (IS_IP6_FLOW_ID(m)) {
return
m->dst_ip6.__u6_addr.__u6_addr32[0] ||
m->dst_ip6.__u6_addr.__u6_addr32[1] ||
m->dst_ip6.__u6_addr.__u6_addr32[2] ||
m->dst_ip6.__u6_addr.__u6_addr32[3] ||
m->src_ip6.__u6_addr.__u6_addr32[0] ||
m->src_ip6.__u6_addr.__u6_addr32[1] ||
m->src_ip6.__u6_addr.__u6_addr32[2] ||
m->src_ip6.__u6_addr.__u6_addr32[3] ||
m->flow_id6;
} else {
return m->dst_ip || m->src_ip;
}
}
static uint32_t
flow_id_hash(struct ipfw_flow_id *id)
{
uint32_t i;
if (IS_IP6_FLOW_ID(id)) {
uint32_t *d = (uint32_t *)&id->dst_ip6;
uint32_t *s = (uint32_t *)&id->src_ip6;
i = (d[0] ) ^ (d[1]) ^
(d[2] ) ^ (d[3]) ^
(d[0] >> 15) ^ (d[1] >> 15) ^
(d[2] >> 15) ^ (d[3] >> 15) ^
(s[0] << 1) ^ (s[1] << 1) ^
(s[2] << 1) ^ (s[3] << 1) ^
(s[0] << 16) ^ (s[1] << 16) ^
(s[2] << 16) ^ (s[3] << 16) ^
(id->dst_port << 1) ^ (id->src_port) ^
(id->extra) ^
(id->proto ) ^ (id->flow_id6);
} else {
i = (id->dst_ip) ^ (id->dst_ip >> 15) ^
(id->src_ip << 1) ^ (id->src_ip >> 16) ^
(id->extra) ^
(id->dst_port << 1) ^ (id->src_port) ^ (id->proto);
}
return i;
}
static int
flow_id_cmp(struct ipfw_flow_id *id1, struct ipfw_flow_id *id2)
{
int is_v6 = IS_IP6_FLOW_ID(id1);
if (!is_v6) {
if (IS_IP6_FLOW_ID(id2))
return 1;
return (id1->dst_ip == id2->dst_ip &&
id1->src_ip == id2->src_ip &&
id1->dst_port == id2->dst_port &&
id1->src_port == id2->src_port &&
id1->proto == id2->proto &&
id1->extra == id2->extra) ? 0 : 1;
}
return (
!bcmp(&id1->dst_ip6,&id2->dst_ip6, sizeof(id1->dst_ip6)) &&
!bcmp(&id1->src_ip6,&id2->src_ip6, sizeof(id1->src_ip6)) &&
id1->dst_port == id2->dst_port &&
id1->src_port == id2->src_port &&
id1->proto == id2->proto &&
id1->extra == id2->extra &&
id1->flow_id6 == id2->flow_id6) ? 0 : 1;
}
static uint32_t
q_hash(uintptr_t key, int flags, void *arg)
{
struct ipfw_flow_id *id = (flags & DNHT_KEY_IS_OBJ) ?
&((struct dn_queue *)key)->ni.fid :
(struct ipfw_flow_id *)key;
return flow_id_hash(id);
}
static int
q_match(void *obj, uintptr_t key, int flags, void *arg)
{
struct dn_queue *o = (struct dn_queue *)obj;
struct ipfw_flow_id *id2;
if (flags & DNHT_KEY_IS_OBJ) {
id2 = &((struct dn_queue *)key)->ni.fid;
} else {
id2 = (struct ipfw_flow_id *)key;
}
return (0 == flow_id_cmp(&o->ni.fid, id2));
}
static void *
q_new(uintptr_t key, int flags, void *arg)
{
struct dn_queue *q, *template = arg;
struct dn_fsk *fs = template->fs;
int size = sizeof(*q) + fs->sched->fp->q_datalen;
q = malloc(size, M_DUMMYNET, M_NOWAIT | M_ZERO);
if (q == NULL) {
D("no memory for new queue");
return NULL;
}
set_oid(&q->ni.oid, DN_QUEUE, size);
if (fs->fs.flags & DN_QHT_HASH)
q->ni.fid = *(struct ipfw_flow_id *)key;
q->fs = fs;
q->_si = template->_si;
q->_si->q_count++;
if (fs->sched->fp->new_queue)
fs->sched->fp->new_queue(q);
#ifdef NEW_AQM
if (fs->aqmfp && fs->aqmfp->init)
if(fs->aqmfp->init(q))
D("unable to init AQM for fs %d", fs->fs.fs_nr);
#endif
V_dn_cfg.queue_count++;
return q;
}
static void
dn_delete_queue(struct dn_queue *q, int flags)
{
struct dn_fsk *fs = q->fs;
#ifdef NEW_AQM
if (fs && fs->aqmfp && fs->aqmfp->cleanup)
fs->aqmfp->cleanup(q);
#endif
if (fs && fs->sched->fp->free_queue)
fs->sched->fp->free_queue(q);
q->_si->q_count--;
q->_si = NULL;
if (flags & DN_DESTROY) {
if (q->mq.head)
dn_free_pkts(q->mq.head);
bzero(q, sizeof(*q));
free(q, M_DUMMYNET);
V_dn_cfg.queue_count--;
}
}
static int
q_delete_cb(void *q, void *arg)
{
int flags = (int)(uintptr_t)arg;
dn_delete_queue(q, flags);
return (flags & DN_DESTROY) ? DNHT_SCAN_DEL : 0;
}
static void
qht_delete(struct dn_fsk *fs, int flags)
{
ND("fs %d start flags %d qht %p",
fs->fs.fs_nr, flags, fs->qht);
if (!fs->qht)
return;
if (fs->fs.flags & DN_QHT_HASH) {
dn_ht_scan(fs->qht, q_delete_cb, (void *)(uintptr_t)flags);
if (flags & DN_DESTROY) {
dn_ht_free(fs->qht, 0);
fs->qht = NULL;
}
} else {
dn_delete_queue((struct dn_queue *)(fs->qht), flags);
if (flags & DN_DESTROY)
fs->qht = NULL;
}
}
struct dn_queue *
ipdn_q_find(struct dn_fsk *fs, struct dn_sch_inst *si,
struct ipfw_flow_id *id)
{
struct dn_queue template;
template._si = si;
template.fs = fs;
if (fs->fs.flags & DN_QHT_HASH) {
struct ipfw_flow_id masked_id;
if (fs->qht == NULL) {
fs->qht = dn_ht_init(NULL, fs->fs.buckets,
offsetof(struct dn_queue, q_next),
q_hash, q_match, q_new);
if (fs->qht == NULL)
return NULL;
}
masked_id = *id;
flow_id_mask(&fs->fsk_mask, &masked_id);
return dn_ht_find(fs->qht, (uintptr_t)&masked_id,
DNHT_INSERT, &template);
} else {
if (fs->qht == NULL)
fs->qht = q_new(0, 0, &template);
return (struct dn_queue *)fs->qht;
}
}
static uint32_t
si_hash(uintptr_t key, int flags, void *arg)
{
struct ipfw_flow_id *id = (flags & DNHT_KEY_IS_OBJ) ?
&((struct dn_sch_inst *)key)->ni.fid :
(struct ipfw_flow_id *)key;
return flow_id_hash(id);
}
static int
si_match(void *obj, uintptr_t key, int flags, void *arg)
{
struct dn_sch_inst *o = obj;
struct ipfw_flow_id *id2;
id2 = (flags & DNHT_KEY_IS_OBJ) ?
&((struct dn_sch_inst *)key)->ni.fid :
(struct ipfw_flow_id *)key;
return flow_id_cmp(&o->ni.fid, id2) == 0;
}
static void *
si_new(uintptr_t key, int flags, void *arg)
{
struct dn_schk *s = arg;
struct dn_sch_inst *si;
int l = sizeof(*si) + s->fp->si_datalen;
si = malloc(l, M_DUMMYNET, M_NOWAIT | M_ZERO);
if (si == NULL)
goto error;
set_oid(&si->ni.oid, DN_SCH_I, sizeof(struct dn_flow));
set_oid(&(si->dline.oid), DN_DELAY_LINE,
sizeof(struct delay_line));
si->ni.oid.id = si->dline.oid.id = -1;
si->sched = s;
si->dline.si = si;
if (s->fp->new_sched && s->fp->new_sched(si)) {
D("new_sched error");
goto error;
}
if (s->sch.flags & DN_HAVE_MASK)
si->ni.fid = *(struct ipfw_flow_id *)key;
#ifdef NEW_AQM
if (!(s->fp->flags & DN_MULTIQUEUE))
if (s->fs->aqmfp && s->fs->aqmfp->init)
if(s->fs->aqmfp->init((struct dn_queue *)(si + 1))) {
D("unable to init AQM for fs %d", s->fs->fs.fs_nr);
goto error;
}
#endif
V_dn_cfg.si_count++;
return si;
error:
if (si) {
bzero(si, sizeof(*si));
free(si, M_DUMMYNET);
}
return NULL;
}
static int
si_destroy(void *_si, void *arg)
{
struct dn_sch_inst *si = _si;
struct dn_schk *s = si->sched;
struct delay_line *dl = &si->dline;
if (dl->oid.subtype)
heap_extract(&V_dn_cfg.evheap, dl);
dn_free_pkts(dl->mq.head);
if (si->kflags & DN_ACTIVE)
heap_extract(&V_dn_cfg.evheap, si);
#ifdef NEW_AQM
if (!(s->fp->flags & DN_MULTIQUEUE)) {
struct dn_queue *q = (struct dn_queue *)(si + 1);
if (q->aqm_status && q->fs->aqmfp)
if (q->fs->aqmfp->cleanup)
q->fs->aqmfp->cleanup(q);
}
#endif
if (s->fp->free_sched)
s->fp->free_sched(si);
bzero(si, sizeof(*si));
free(si, M_DUMMYNET);
V_dn_cfg.si_count--;
return DNHT_SCAN_DEL;
}
struct dn_sch_inst *
ipdn_si_find(struct dn_schk *s, struct ipfw_flow_id *id)
{
if (s->sch.flags & DN_HAVE_MASK) {
struct ipfw_flow_id id_t = *id;
flow_id_mask(&s->sch.sched_mask, &id_t);
return dn_ht_find(s->siht, (uintptr_t)&id_t,
DNHT_INSERT, s);
}
if (!s->siht)
s->siht = si_new(0, 0, s);
return (struct dn_sch_inst *)s->siht;
}
static int
si_reset_credit(void *_si, void *arg)
{
struct dn_sch_inst *si = _si;
struct dn_link *p = &si->sched->link;
si->credit = p->burst + (V_dn_cfg.io_fast ? p->bandwidth : 0);
return 0;
}
static void
schk_reset_credit(struct dn_schk *s)
{
if (s->sch.flags & DN_HAVE_MASK)
dn_ht_scan(s->siht, si_reset_credit, NULL);
else if (s->siht)
si_reset_credit(s->siht, NULL);
}
static uint32_t
fsk_hash(uintptr_t key, int flags, void *arg)
{
uint32_t i = !(flags & DNHT_KEY_IS_OBJ) ? key :
((struct dn_fsk *)key)->fs.fs_nr;
return ( (i>>8)^(i>>4)^i );
}
static int
fsk_match(void *obj, uintptr_t key, int flags, void *arg)
{
struct dn_fsk *fs = obj;
int i = !(flags & DNHT_KEY_IS_OBJ) ? key :
((struct dn_fsk *)key)->fs.fs_nr;
return (fs->fs.fs_nr == i);
}
static void *
fsk_new(uintptr_t key, int flags, void *arg)
{
struct dn_fsk *fs;
fs = malloc(sizeof(*fs), M_DUMMYNET, M_NOWAIT | M_ZERO);
if (fs) {
set_oid(&fs->fs.oid, DN_FS, sizeof(fs->fs));
V_dn_cfg.fsk_count++;
fs->drain_bucket = 0;
SLIST_INSERT_HEAD(&V_dn_cfg.fsu, fs, sch_chain);
}
return fs;
}
#ifdef NEW_AQM
static int
si_cleanup_q(void *_si, void *arg)
{
struct dn_sch_inst *si = _si;
if (!(si->sched->fp->flags & DN_MULTIQUEUE)) {
if (si->sched->fs->aqmfp && si->sched->fs->aqmfp->cleanup)
si->sched->fs->aqmfp->cleanup((struct dn_queue *) (si+1));
}
return 0;
}
static int
q_cleanup_q(void *_q, void *arg)
{
struct dn_queue *q = _q;
q->fs->aqmfp->cleanup(q);
return 0;
}
static void
aqm_cleanup_deconfig_fs(struct dn_fsk *fs)
{
struct dn_sch_inst *si;
if (fs->fs.fs_nr > DN_MAX_ID) {
if (fs->sched && !(fs->sched->fp->flags & DN_MULTIQUEUE)) {
if (fs->sched->sch.flags & DN_HAVE_MASK)
dn_ht_scan(fs->sched->siht, si_cleanup_q, NULL);
else {
si = (struct dn_sch_inst *) fs->sched->siht;
if (si && fs->aqmfp && fs->aqmfp->cleanup)
fs->aqmfp->cleanup((struct dn_queue *) (si+1));
}
}
}
if (fs->sched && fs->sched->fp->flags & DN_MULTIQUEUE && fs->qht) {
if (fs->fs.flags & DN_QHT_HASH)
dn_ht_scan(fs->qht, q_cleanup_q, NULL);
else
fs->aqmfp->cleanup((struct dn_queue *)(fs->qht));
}
if(fs->aqmcfg && fs->aqmfp && fs->aqmfp->deconfig)
fs->aqmfp->deconfig(fs);
}
#endif
static void
fsk_detach(struct dn_fsk *fs, int flags)
{
if (flags & DN_DELETE_FS)
flags |= DN_DESTROY;
ND("fs %d from sched %d flags %s %s %s",
fs->fs.fs_nr, fs->fs.sched_nr,
(flags & DN_DELETE_FS) ? "DEL_FS":"",
(flags & DN_DESTROY) ? "DEL":"",
(flags & DN_DETACH) ? "DET":"");
if (flags & DN_DETACH) {
struct dn_fsk_head *h;
h = fs->sched ? &fs->sched->fsk_list : &V_dn_cfg.fsu;
SLIST_REMOVE(h, fs, dn_fsk, sch_chain);
}
free(fs->w_q_lookup, M_DUMMYNET);
fs->w_q_lookup = NULL;
qht_delete(fs, flags);
#ifdef NEW_AQM
aqm_cleanup_deconfig_fs(fs);
#endif
if (fs->sched && fs->sched->fp->free_fsk)
fs->sched->fp->free_fsk(fs);
fs->sched = NULL;
if (flags & DN_DELETE_FS) {
bzero(fs, sizeof(*fs));
free(fs, M_DUMMYNET);
V_dn_cfg.fsk_count--;
} else {
SLIST_INSERT_HEAD(&V_dn_cfg.fsu, fs, sch_chain);
}
}
static void
fsk_detach_list(struct dn_fsk_head *h, int flags)
{
struct dn_fsk *fs;
int n __unused = 0;
ND("head %p flags %x", h, flags);
while ((fs = SLIST_FIRST(h))) {
SLIST_REMOVE_HEAD(h, sch_chain);
n++;
fsk_detach(fs, flags);
}
ND("done %d flowsets", n);
}
static int
delete_fs(int i, int locked)
{
struct dn_fsk *fs;
int err = 0;
if (!locked)
DN_BH_WLOCK();
fs = dn_ht_find(V_dn_cfg.fshash, i, DNHT_REMOVE, NULL);
ND("fs %d found %p", i, fs);
if (fs) {
fsk_detach(fs, DN_DETACH | DN_DELETE_FS);
err = 0;
} else
err = EINVAL;
if (!locked)
DN_BH_WUNLOCK();
return err;
}
static uint32_t
schk_hash(uintptr_t key, int flags, void *_arg)
{
uint32_t i = !(flags & DNHT_KEY_IS_OBJ) ? key :
((struct dn_schk *)key)->sch.sched_nr;
return ( (i>>8)^(i>>4)^i );
}
static int
schk_match(void *obj, uintptr_t key, int flags, void *_arg)
{
struct dn_schk *s = (struct dn_schk *)obj;
int i = !(flags & DNHT_KEY_IS_OBJ) ? key :
((struct dn_schk *)key)->sch.sched_nr;
return (s->sch.sched_nr == i);
}
static void *
schk_new(uintptr_t key, int flags, void *arg)
{
struct schk_new_arg *a = arg;
struct dn_schk *s;
int l = sizeof(*s) +a->fp->schk_datalen;
s = malloc(l, M_DUMMYNET, M_NOWAIT | M_ZERO);
if (s == NULL)
return NULL;
set_oid(&s->link.oid, DN_LINK, sizeof(s->link));
s->sch = *a->sch;
s->link.link_nr = s->sch.sched_nr;
SLIST_INIT(&s->fsk_list);
s->fp = a->fp;
s->drain_bucket = 0;
if (s->sch.flags & DN_HAVE_MASK) {
s->siht = dn_ht_init(NULL, s->sch.buckets,
offsetof(struct dn_sch_inst, si_next),
si_hash, si_match, si_new);
if (s->siht == NULL) {
free(s, M_DUMMYNET);
return NULL;
}
}
s->fp = NULL;
V_dn_cfg.schk_count++;
return s;
}
static int
schk_delete_cb(void *obj, void *arg)
{
struct dn_schk *s = obj;
#if 0
int a = (int)arg;
ND("sched %d arg %s%s",
s->sch.sched_nr,
a&DN_DESTROY ? "DEL ":"",
a&DN_DELETE_FS ? "DEL_FS":"");
#endif
fsk_detach_list(&s->fsk_list, arg ? DN_DESTROY : 0);
if (s->sch.flags & DN_HAVE_MASK) {
dn_ht_scan(s->siht, si_destroy, NULL);
dn_ht_free(s->siht, 0);
} else if (s->siht)
si_destroy(s->siht, NULL);
free(s->profile, M_DUMMYNET);
s->profile = NULL;
s->siht = NULL;
if (s->fp->destroy)
s->fp->destroy(s);
bzero(s, sizeof(*s));
free(obj, M_DUMMYNET);
V_dn_cfg.schk_count--;
return DNHT_SCAN_DEL;
}
static int
delete_schk(int i)
{
struct dn_schk *s;
s = dn_ht_find(V_dn_cfg.schedhash, i, DNHT_REMOVE, NULL);
ND("%d %p", i, s);
if (!s)
return EINVAL;
delete_fs(i + DN_MAX_ID, 1);
schk_delete_cb(s, (void*)(uintptr_t)DN_DESTROY);
return 0;
}
static int
copy_obj(char **start, char *end, void *_o, const char *msg, int i)
{
struct dn_id o;
union {
struct dn_link l;
struct dn_schk s;
} dn;
int have = end - *start;
memcpy(&o, _o, sizeof(o));
if (have < o.len || o.len == 0 || o.type == 0) {
D("(WARN) type %d %s %d have %d need %d",
o.type, msg, i, have, o.len);
return 1;
}
ND("type %d %s %d len %d", o.type, msg, i, o.len);
if (o.type == DN_LINK) {
memcpy(&dn.l, _o, sizeof(dn.l));
dn.l.burst = div64(dn.l.burst, 8 * hz);
dn.l.delay = dn.l.delay * 1000 / hz;
memcpy(*start, &dn.l, sizeof(dn.l));
} else if (o.type == DN_SCH) {
memcpy(&dn.s, _o, sizeof(dn.s));
dn.s.sch.oid.id = (dn.s.sch.flags & DN_HAVE_MASK) ?
dn_ht_entries(dn.s.siht) : (dn.s.siht ? 1 : 0);
memcpy(*start, &dn.s, sizeof(dn.s));
} else
memcpy(*start, _o, o.len);
*start += o.len;
return 0;
}
static int
copy_obj_q(char **start, char *end, void *_o, const char *msg, int i)
{
struct dn_id *o = _o;
int have = end - *start;
int len = sizeof(struct dn_flow);
if (have < len || o->len == 0 || o->type != DN_QUEUE) {
D("ERROR type %d %s %d have %d need %d",
o->type, msg, i, have, len);
return 1;
}
ND("type %d %s %d len %d", o->type, msg, i, len);
memcpy(*start, _o, len);
((struct dn_id*)(*start))->len = len;
*start += len;
return 0;
}
static int
copy_q_cb(void *obj, void *arg)
{
struct dn_queue *q = obj;
struct copy_args *a = arg;
struct dn_flow *ni = (struct dn_flow *)(*a->start);
if (copy_obj_q(a->start, a->end, &q->ni, "queue", -1))
return DNHT_SCAN_END;
ni->oid.type = DN_FLOW;
ni->oid.id = si_hash((uintptr_t)&ni->fid, 0, NULL);
return 0;
}
static int
copy_q(struct copy_args *a, struct dn_fsk *fs, int flags)
{
if (!fs->qht)
return 0;
if (fs->fs.flags & DN_QHT_HASH)
dn_ht_scan(fs->qht, copy_q_cb, a);
else
copy_q_cb(fs->qht, a);
return 0;
}
static int
copy_profile(struct copy_args *a, struct dn_profile *p)
{
int have = a->end - *a->start;
int profile_len = sizeof(struct dn_profile) -
ED_MAX_SAMPLES_NO*sizeof(int);
if (p == NULL)
return 0;
if (have < profile_len) {
D("error have %d need %d", have, profile_len);
return 1;
}
memcpy(*a->start, p, profile_len);
((struct dn_id *)(*a->start))->len = profile_len;
*a->start += profile_len;
return 0;
}
static int
copy_flowset(struct copy_args *a, struct dn_fsk *fs, int flags)
{
struct dn_fs *ufs = (struct dn_fs *)(*a->start);
if (!fs)
return 0;
ND("flowset %d", fs->fs.fs_nr);
if (copy_obj(a->start, a->end, &fs->fs, "flowset", fs->fs.fs_nr))
return DNHT_SCAN_END;
ufs->oid.id = (fs->fs.flags & DN_QHT_HASH) ?
dn_ht_entries(fs->qht) : (fs->qht ? 1 : 0);
if (flags) {
copy_q(a, fs, 0);
}
return 0;
}
static int
copy_si_cb(void *obj, void *arg)
{
struct dn_sch_inst *si = obj;
struct copy_args *a = arg;
struct dn_flow *ni = (struct dn_flow *)(*a->start);
if (copy_obj(a->start, a->end, &si->ni, "inst",
si->sched->sch.sched_nr))
return DNHT_SCAN_END;
ni->oid.type = DN_FLOW;
ni->oid.id = si_hash((uintptr_t)si, DNHT_KEY_IS_OBJ, NULL);
return 0;
}
static int
copy_si(struct copy_args *a, struct dn_schk *s, int flags)
{
if (s->sch.flags & DN_HAVE_MASK)
dn_ht_scan(s->siht, copy_si_cb, a);
else if (s->siht)
copy_si_cb(s->siht, a);
return 0;
}
static int
copy_fsk_list(struct copy_args *a, struct dn_schk *s, int flags)
{
struct dn_fsk *fs;
struct dn_id *o;
uint32_t *p;
int n = 0, space = sizeof(*o);
SLIST_FOREACH(fs, &s->fsk_list, sch_chain) {
if (fs->fs.fs_nr < DN_MAX_ID)
n++;
}
space += n * sizeof(uint32_t);
DX(3, "sched %d has %d flowsets", s->sch.sched_nr, n);
if (a->end - *(a->start) < space)
return DNHT_SCAN_END;
o = (struct dn_id *)(*(a->start));
o->len = space;
*a->start += o->len;
o->type = DN_TEXT;
p = (uint32_t *)(o+1);
SLIST_FOREACH(fs, &s->fsk_list, sch_chain)
if (fs->fs.fs_nr < DN_MAX_ID)
*p++ = fs->fs.fs_nr;
return 0;
}
static int
copy_data_helper(void *_o, void *_arg)
{
struct copy_args *a = _arg;
uint32_t *r = a->extra->r;
uint32_t *lim;
int n;
lim = (uint32_t *)((char *)(a->extra) + a->extra->o.len);
if (a->type == DN_LINK || a->type == DN_SCH) {
struct dn_schk *s = _o;
n = s->sch.sched_nr;
if (a->type == DN_SCH && n >= DN_MAX_ID)
return 0;
if (a->type == DN_LINK && n <= DN_MAX_ID)
return 0;
for (;r < lim; r += 2) {
if (n < r[0] || n > r[1])
continue;
if (a->flags & DN_C_LINK) {
if (copy_obj(a->start, a->end,
&s->link, "link", n))
return DNHT_SCAN_END;
if (copy_profile(a, s->profile))
return DNHT_SCAN_END;
if (copy_flowset(a, s->fs, 0))
return DNHT_SCAN_END;
}
if (a->flags & DN_C_SCH) {
if (copy_obj(a->start, a->end,
&s->sch, "sched", n))
return DNHT_SCAN_END;
if (copy_fsk_list(a, s, 0))
return DNHT_SCAN_END;
}
if (a->flags & DN_C_FLOW)
copy_si(a, s, 0);
break;
}
} else if (a->type == DN_FS) {
struct dn_fsk *fs = _o;
n = fs->fs.fs_nr;
if (n >= DN_MAX_ID)
return 0;
for (;r < lim; r += 2) {
if (n < r[0] || n > r[1])
continue;
if (copy_flowset(a, fs, 0))
return DNHT_SCAN_END;
copy_q(a, fs, 0);
break;
}
}
return 0;
}
static inline struct dn_schk *
locate_scheduler(int i)
{
return dn_ht_find(V_dn_cfg.schedhash, i, 0, NULL);
}
static int
config_red(struct dn_fsk *fs)
{
int64_t s, idle, weight, w0;
int t, i;
fs->w_q = fs->fs.w_q;
fs->max_p = fs->fs.max_p;
ND("called");
i = fs->sched->link.bandwidth;
s = (i <= 0) ? 0 :
hz * V_dn_cfg.red_avg_pkt_size * 8 * SCALE(1) / i;
idle = div64((s * 3) , fs->w_q);
fs->lookup_step = div64(idle , V_dn_cfg.red_lookup_depth);
if (!fs->lookup_step)
fs->lookup_step = 1;
w0 = weight = SCALE(1) - fs->w_q;
for (t = fs->lookup_step; t > 1; --t)
weight = SCALE_MUL(weight, w0);
fs->lookup_weight = (int)(weight);
fs->min_th = SCALE(fs->fs.min_th);
fs->max_th = SCALE(fs->fs.max_th);
if (fs->fs.max_th == fs->fs.min_th)
fs->c_1 = fs->max_p;
else
fs->c_1 = SCALE((int64_t)(fs->max_p)) / (fs->fs.max_th - fs->fs.min_th);
fs->c_2 = SCALE_MUL(fs->c_1, SCALE(fs->fs.min_th));
if (fs->fs.flags & DN_IS_GENTLE_RED) {
fs->c_3 = (SCALE(1) - fs->max_p) / fs->fs.max_th;
fs->c_4 = SCALE(1) - 2 * fs->max_p;
}
free(fs->w_q_lookup, M_DUMMYNET);
fs->w_q_lookup = NULL;
if (V_dn_cfg.red_lookup_depth == 0) {
printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth"
"must be > 0\n");
fs->fs.flags &= ~DN_IS_RED;
fs->fs.flags &= ~DN_IS_GENTLE_RED;
return (EINVAL);
}
fs->lookup_depth = V_dn_cfg.red_lookup_depth;
fs->w_q_lookup = (u_int *)malloc(fs->lookup_depth * sizeof(int),
M_DUMMYNET, M_NOWAIT);
if (fs->w_q_lookup == NULL) {
printf("dummynet: sorry, cannot allocate red lookup table\n");
fs->fs.flags &= ~DN_IS_RED;
fs->fs.flags &= ~DN_IS_GENTLE_RED;
return(ENOSPC);
}
fs->w_q_lookup[0] = SCALE(1) - fs->w_q;
for (i = 1; i < fs->lookup_depth; i++)
fs->w_q_lookup[i] =
SCALE_MUL(fs->w_q_lookup[i - 1], fs->lookup_weight);
if (V_dn_cfg.red_avg_pkt_size < 1)
V_dn_cfg.red_avg_pkt_size = 512;
fs->avg_pkt_size = V_dn_cfg.red_avg_pkt_size;
if (V_dn_cfg.red_max_pkt_size < 1)
V_dn_cfg.red_max_pkt_size = 1500;
fs->max_pkt_size = V_dn_cfg.red_max_pkt_size;
ND("exit");
return 0;
}
static void
update_red(struct dn_schk *s)
{
struct dn_fsk *fs;
SLIST_FOREACH(fs, &s->fsk_list, sch_chain) {
if (fs && (fs->fs.flags & DN_IS_RED))
config_red(fs);
}
}
static void
fsk_attach(struct dn_fsk *fs, struct dn_schk *s)
{
ND("remove fs %d from fsunlinked, link to sched %d",
fs->fs.fs_nr, s->sch.sched_nr);
SLIST_REMOVE(&V_dn_cfg.fsu, fs, dn_fsk, sch_chain);
fs->sched = s;
SLIST_INSERT_HEAD(&s->fsk_list, fs, sch_chain);
if (s->fp->new_fsk)
s->fp->new_fsk(fs);
fs->fsk_mask = fs->fs.flow_mask;
if (fs->sched->sch.flags & DN_HAVE_MASK)
flow_id_or(&fs->sched->sch.sched_mask, &fs->fsk_mask);
if (fs->qht) {
D("XXX TODO requeue from fs %d to sch %d",
fs->fs.fs_nr, s->sch.sched_nr);
fs->qht = NULL;
}
if (nonzero_mask(&fs->fsk_mask))
fs->fs.flags |= DN_QHT_HASH;
else
fs->fs.flags &= ~DN_QHT_HASH;
if (fs->fs.flags & DN_IS_RED)
config_red(fs);
}
static void
update_fs(struct dn_schk *s)
{
struct dn_fsk *fs, *tmp;
SLIST_FOREACH_SAFE(fs, &V_dn_cfg.fsu, sch_chain, tmp) {
if (s->sch.sched_nr != fs->fs.sched_nr) {
D("fs %d for sch %d not %d still unlinked",
fs->fs.fs_nr, fs->fs.sched_nr,
s->sch.sched_nr);
continue;
}
fsk_attach(fs, s);
}
}
#ifdef NEW_AQM
static int
get_aqm_parms(struct sockopt *sopt)
{
struct dn_extra_parms *ep;
struct dn_fsk *fs;
size_t sopt_valsize;
int l, err = 0;
sopt_valsize = sopt->sopt_valsize;
l = sizeof(*ep);
if (sopt->sopt_valsize < l) {
D("bad len sopt->sopt_valsize %d len %d",
(int) sopt->sopt_valsize , l);
err = EINVAL;
return err;
}
ep = malloc(l, M_DUMMYNET, M_NOWAIT);
if(!ep) {
err = ENOMEM ;
return err;
}
do {
err = sooptcopyin(sopt, ep, l, l);
if(err)
break;
sopt->sopt_valsize = sopt_valsize;
if (ep->oid.len < l) {
err = EINVAL;
break;
}
fs = dn_ht_find(V_dn_cfg.fshash, ep->nr, 0, NULL);
if (!fs) {
D("fs %d not found", ep->nr);
err = EINVAL;
break;
}
if (fs->aqmfp && fs->aqmfp->getconfig) {
if(fs->aqmfp->getconfig(fs, ep)) {
D("Error while trying to get AQM params");
err = EINVAL;
break;
}
ep->oid.len = l;
err = sooptcopyout(sopt, ep, l);
}
}while(0);
free(ep, M_DUMMYNET);
return err;
}
static int
get_sched_parms(struct sockopt *sopt)
{
struct dn_extra_parms *ep;
struct dn_schk *schk;
size_t sopt_valsize;
int l, err = 0;
sopt_valsize = sopt->sopt_valsize;
l = sizeof(*ep);
if (sopt->sopt_valsize < l) {
D("bad len sopt->sopt_valsize %d len %d",
(int) sopt->sopt_valsize , l);
err = EINVAL;
return err;
}
ep = malloc(l, M_DUMMYNET, M_NOWAIT);
if(!ep) {
err = ENOMEM ;
return err;
}
do {
err = sooptcopyin(sopt, ep, l, l);
if(err)
break;
sopt->sopt_valsize = sopt_valsize;
if (ep->oid.len < l) {
err = EINVAL;
break;
}
schk = locate_scheduler(ep->nr);
if (!schk) {
D("sched %d not found", ep->nr);
err = EINVAL;
break;
}
if (schk->fp && schk->fp->getconfig) {
if(schk->fp->getconfig(schk, ep)) {
D("Error while trying to get sched params");
err = EINVAL;
break;
}
ep->oid.len = l;
err = sooptcopyout(sopt, ep, l);
}
}while(0);
free(ep, M_DUMMYNET);
return err;
}
static int
config_aqm(struct dn_fsk *fs, struct dn_extra_parms *ep, int busy)
{
int err = 0;
NET_EPOCH_ASSERT();
do {
if (!ep) {
err = 0;
break;
}
if (!strcmp(ep->name,"")) {
err = 0;
break;
}
if (ep->oid.len < sizeof(*ep)) {
D("short aqm len %d", ep->oid.len);
err = EINVAL;
break;
}
if (busy) {
D("Unable to configure flowset, flowset busy!");
err = EINVAL;
break;
}
if (fs->aqmcfg && fs->aqmfp && fs->aqmfp->deconfig) {
aqm_cleanup_deconfig_fs(fs);
}
if (!(fs->aqmfp = find_aqm_type(0, ep->name))) {
D("AQM functions not found for type %s!", ep->name);
fs->fs.flags &= ~DN_IS_AQM;
err = EINVAL;
break;
} else
fs->fs.flags |= DN_IS_AQM;
if (ep->oid.subtype != DN_AQM_PARAMS) {
D("Wrong subtype");
err = EINVAL;
break;
}
if (fs->aqmfp->config) {
err = fs->aqmfp->config(fs, ep, ep->oid.len);
if (err) {
D("Unable to configure AQM for FS %d", fs->fs.fs_nr );
fs->fs.flags &= ~DN_IS_AQM;
fs->aqmfp = NULL;
break;
}
}
} while(0);
return err;
}
#endif
static int
config_link(struct dn_link *p, struct dn_id *arg)
{
int i;
if (p->oid.len != sizeof(*p)) {
D("invalid pipe len %d", p->oid.len);
return EINVAL;
}
i = p->link_nr;
if (i <= 0 || i >= DN_MAX_ID)
return EINVAL;
p->delay = (p->delay * hz) / 1000;
p->burst *= 8 * hz;
DN_BH_WLOCK();
for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) {
struct dn_schk *s = locate_scheduler(i);
if (s == NULL) {
DN_BH_WUNLOCK();
D("sched %d not found", i);
return EINVAL;
}
free(s->profile, M_DUMMYNET);
s->profile = NULL;
s->link.oid = p->oid;
s->link.link_nr = i;
s->link.delay = p->delay;
if (s->link.bandwidth != p->bandwidth) {
s->link.bandwidth = p->bandwidth;
update_red(s);
}
s->link.burst = p->burst;
schk_reset_credit(s);
}
V_dn_cfg.id++;
DN_BH_WUNLOCK();
return 0;
}
static struct dn_fsk *
config_fs(struct dn_fs *nfs, struct dn_id *arg, int locked)
{
int i;
struct dn_fsk *fs;
#ifdef NEW_AQM
struct dn_extra_parms *ep;
#endif
if (nfs->oid.len != sizeof(*nfs)) {
D("invalid flowset len %d", nfs->oid.len);
return NULL;
}
i = nfs->fs_nr;
if (i <= 0 || i >= 3*DN_MAX_ID)
return NULL;
#ifdef NEW_AQM
ep = NULL;
if (arg != NULL) {
ep = malloc(sizeof(*ep), M_TEMP, M_NOWAIT);
if (ep == NULL)
return (NULL);
memcpy(ep, arg, sizeof(*ep));
}
#endif
ND("flowset %d", i);
if (nfs->flags & DN_QSIZE_BYTES) {
ipdn_bound_var(&nfs->qsize, 16384,
1500, V_dn_cfg.byte_limit, NULL);
} else {
ipdn_bound_var(&nfs->qsize, 50,
1, V_dn_cfg.slot_limit, NULL);
}
if (nfs->flags & DN_HAVE_MASK) {
ipdn_bound_var((int *)&nfs->buckets, V_dn_cfg.hash_size,
1, V_dn_cfg.max_hash_size, "flowset buckets");
} else {
nfs->buckets = 1;
}
if (!locked)
DN_BH_WLOCK();
do {
struct dn_schk *s;
int flags = nfs->sched_nr ? DNHT_INSERT : 0;
int j;
int oldc = V_dn_cfg.fsk_count;
fs = dn_ht_find(V_dn_cfg.fshash, i, flags, NULL);
if (fs == NULL) {
D("missing sched for flowset %d", i);
break;
}
if (nfs->sched_nr == 0)
nfs->sched_nr = fs->fs.sched_nr;
for (j = 0; j < sizeof(nfs->par)/sizeof(nfs->par[0]); j++) {
if (nfs->par[j] == -1)
nfs->par[j] = fs->fs.par[j];
}
if (bcmp(&fs->fs, nfs, sizeof(*nfs)) == 0) {
ND("flowset %d unchanged", i);
#ifdef NEW_AQM
if (ep != NULL) {
s = locate_scheduler(nfs->sched_nr);
config_aqm(fs, ep, s != NULL && s->siht != NULL);
}
#endif
break;
}
if (oldc != V_dn_cfg.fsk_count)
V_dn_cfg.id++;
s = locate_scheduler(nfs->sched_nr);
DX(2, "fs %d changed sched %d@%p to %d@%p",
fs->fs.fs_nr,
fs->fs.sched_nr, fs->sched, nfs->sched_nr, s);
if (fs->sched) {
int flags = s ? DN_DETACH : (DN_DETACH | DN_DESTROY);
flags |= DN_DESTROY;
fsk_detach(fs, flags);
}
fs->fs = *nfs;
#ifdef NEW_AQM
fs->aqmfp = NULL;
if (ep != NULL)
config_aqm(fs, ep, s != NULL &&
s->siht != NULL);
#endif
if (s != NULL)
fsk_attach(fs, s);
} while (0);
if (!locked)
DN_BH_WUNLOCK();
#ifdef NEW_AQM
free(ep, M_TEMP);
#endif
return fs;
}
static int
config_sched(struct dn_sch *_nsch, struct dn_id *arg)
{
struct dn_schk *s;
struct schk_new_arg a;
int i;
struct dn_link p;
struct dn_profile *pf = NULL;
struct ipfw_flow_id new_mask;
int new_buckets = 0;
int new_flags = 0;
int pipe_cmd;
int err = ENOMEM;
NET_EPOCH_ASSERT();
a.sch = _nsch;
if (a.sch->oid.len != sizeof(*a.sch)) {
D("bad sched len %d", a.sch->oid.len);
return EINVAL;
}
i = a.sch->sched_nr;
if (i <= 0 || i >= DN_MAX_ID)
return EINVAL;
if (a.sch->flags & DN_HAVE_MASK)
ipdn_bound_var((int *)&a.sch->buckets, V_dn_cfg.hash_size,
1, V_dn_cfg.max_hash_size, "sched buckets");
bzero(&p, sizeof(p));
pipe_cmd = a.sch->flags & DN_PIPE_CMD;
a.sch->flags &= ~DN_PIPE_CMD;
if (pipe_cmd) {
new_mask = a.sch->sched_mask;
new_buckets = a.sch->buckets;
new_flags = a.sch->flags;
}
DN_BH_WLOCK();
again:
V_dn_cfg.id++;
a.fp = find_sched_type(a.sch->oid.subtype, a.sch->name);
if (a.fp != NULL) {
s = dn_ht_find(V_dn_cfg.schedhash, i, DNHT_INSERT, &a);
} else if (a.sch->oid.subtype == 0 && !a.sch->name[0]) {
s = dn_ht_find(V_dn_cfg.schedhash, i, 0, &a);
if (s != NULL) {
a.fp = s->fp;
if (pipe_cmd)
goto next;
} else {
if (pipe_cmd) {
bzero(&a.sch->sched_mask, sizeof(new_mask));
a.sch->buckets = 0;
a.sch->flags &= ~DN_HAVE_MASK;
}
a.sch->oid.subtype = DN_SCHED_WF2QP;
goto again;
}
} else {
D("invalid scheduler type %d %s",
a.sch->oid.subtype, a.sch->name);
err = EINVAL;
goto error;
}
a.sch->oid.subtype = a.fp->type;
bzero(a.sch->name, sizeof(a.sch->name));
strlcpy(a.sch->name, a.fp->name, sizeof(a.sch->name));
if (s == NULL) {
D("cannot allocate scheduler %d", i);
goto error;
}
if (p.link_nr) {
s->link = p;
if (!pf || pf->link_nr != p.link_nr) {
s->profile = NULL;
} else {
s->profile = malloc(sizeof(struct dn_profile),
M_DUMMYNET, M_NOWAIT | M_ZERO);
if (s->profile == NULL) {
D("cannot allocate profile");
goto error;
}
memcpy(s->profile, pf, sizeof(*pf));
}
}
p.link_nr = 0;
if (s->fp == NULL) {
DX(2, "sched %d new type %s", i, a.fp->name);
} else if (s->fp != a.fp ||
bcmp(a.sch, &s->sch, sizeof(*a.sch)) ) {
DX(2, "sched %d type changed from %s to %s",
i, s->fp->name, a.fp->name);
DX(4, " type/sub %d/%d -> %d/%d",
s->sch.oid.type, s->sch.oid.subtype,
a.sch->oid.type, a.sch->oid.subtype);
if (s->link.link_nr == 0)
D("XXX WARNING link 0 for sched %d", i);
p = s->link;
if (s->profile) {
if (!pf)
pf = malloc(sizeof(*pf),
M_DUMMYNET, M_NOWAIT | M_ZERO);
if (pf)
memcpy(pf, s->profile, sizeof(*pf));
}
dn_ht_find(V_dn_cfg.schedhash, i, DNHT_REMOVE, NULL);
schk_delete_cb(s, (void *)DN_DESTROY);
goto again;
} else {
DX(4, "sched %d unchanged type %s", i, a.fp->name);
}
s->sch = *a.sch;
s->fp = a.fp;
s->cfg = arg;
if (!(s->fp->flags & DN_MULTIQUEUE) && !s->fs) {
s->fs = dn_ht_find(V_dn_cfg.fshash, i, 0, NULL);
if (!s->fs) {
struct dn_fs fs;
bzero(&fs, sizeof(fs));
set_oid(&fs.oid, DN_FS, sizeof(fs));
fs.fs_nr = i + DN_MAX_ID;
fs.sched_nr = i;
s->fs = config_fs(&fs, NULL, 1 );
}
if (!s->fs) {
schk_delete_cb(s, (void *)DN_DESTROY);
D("error creating internal fs for %d", i);
goto error;
}
}
if (s->fp->config)
s->fp->config(s);
update_fs(s);
next:
if (i < DN_MAX_ID) {
i += DN_MAX_ID;
if (pipe_cmd) {
a.sch->sched_mask = new_mask;
a.sch->buckets = new_buckets;
a.sch->flags = new_flags;
} else {
if (dn_ht_find(V_dn_cfg.schedhash, i, 0, &a) != NULL) {
err = 0;
goto error;
}
}
a.sch->sched_nr = i;
a.sch->oid.subtype = DN_SCHED_FIFO;
bzero(a.sch->name, sizeof(a.sch->name));
goto again;
}
err = 0;
error:
DN_BH_WUNLOCK();
free(pf, M_DUMMYNET);
return err;
}
static int
config_profile(struct dn_profile *pf, struct dn_id *arg)
{
struct dn_schk *s;
int i, olen, err = 0;
if (pf->oid.len < sizeof(*pf)) {
D("short profile len %d", pf->oid.len);
return EINVAL;
}
i = pf->link_nr;
if (i <= 0 || i >= DN_MAX_ID)
return EINVAL;
DN_BH_WLOCK();
for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) {
s = locate_scheduler(i);
if (s == NULL) {
err = EINVAL;
break;
}
V_dn_cfg.id++;
if (s->profile && (pf->samples_no == 0 ||
s->profile->oid.len < pf->oid.len)) {
free(s->profile, M_DUMMYNET);
s->profile = NULL;
}
if (pf->samples_no == 0)
continue;
if (s->profile == NULL)
s->profile = malloc(pf->oid.len,
M_DUMMYNET, M_NOWAIT | M_ZERO);
if (s->profile == NULL) {
D("no memory for profile %d", i);
err = ENOMEM;
break;
}
olen = s->profile->oid.len;
if (olen < pf->oid.len)
olen = pf->oid.len;
memcpy(s->profile, pf, pf->oid.len);
s->profile->oid.len = olen;
}
DN_BH_WUNLOCK();
return err;
}
static void
dummynet_flush(void)
{
dn_ht_scan(V_dn_cfg.schedhash, schk_delete_cb,
(void *)(uintptr_t)DN_DELETE_FS);
DX(4, "still %d unlinked fs", V_dn_cfg.fsk_count);
dn_ht_free(V_dn_cfg.fshash, DNHT_REMOVE);
fsk_detach_list(&V_dn_cfg.fsu, DN_DELETE_FS);
heap_init(&V_dn_cfg.evheap, 16, offsetof(struct dn_id, id));
}
int
do_config(void *p, size_t l)
{
struct dn_id o;
union {
struct dn_profile profile;
struct dn_fs fs;
struct dn_link link;
struct dn_sch sched;
} *dn;
struct dn_id *arg;
uintptr_t a;
int err, err2, off;
memcpy(&o, p, sizeof(o));
if (o.id != DN_API_VERSION) {
D("invalid api version got %d need %d", o.id, DN_API_VERSION);
return EINVAL;
}
arg = NULL;
dn = NULL;
off = 0;
while (l >= sizeof(o)) {
memcpy(&o, (char *)p + off, sizeof(o));
if (o.len < sizeof(o) || l < o.len) {
D("bad len o.len %d len %zu", o.len, l);
err = EINVAL;
break;
}
l -= o.len;
err = 0;
switch (o.type) {
default:
D("cmd %d not implemented", o.type);
break;
#ifdef EMULATE_SYSCTL
case DN_SYSCTL_SET:
err = kesysctl_emu_set(p, l);
return err;
#endif
case DN_CMD_CONFIG:
break;
case DN_CMD_DELETE:
if (o.len < sizeof(o) + sizeof(a)) {
err = EINVAL;
break;
}
memcpy(&a, (char *)p + off + sizeof(o), sizeof(a));
switch (o.subtype) {
case DN_LINK:
DN_BH_WLOCK();
err = delete_schk(a);
err2 = delete_schk(a + DN_MAX_ID);
DN_BH_WUNLOCK();
if (!err)
err = err2;
break;
default:
D("invalid delete type %d", o.subtype);
err = EINVAL;
break;
case DN_FS:
err = (a < 1 || a >= DN_MAX_ID) ?
EINVAL : delete_fs(a, 0) ;
break;
}
break;
case DN_CMD_FLUSH:
DN_BH_WLOCK();
dummynet_flush();
DN_BH_WUNLOCK();
break;
case DN_TEXT:
free(arg, M_TEMP);
arg = malloc(o.len, M_TEMP, M_NOWAIT);
if (arg == NULL) {
err = ENOMEM;
break;
}
memcpy(arg, (char *)p + off, o.len);
break;
case DN_LINK:
if (dn == NULL)
dn = malloc(sizeof(*dn), M_TEMP, M_NOWAIT);
if (dn == NULL) {
err = ENOMEM;
break;
}
memcpy(&dn->link, (char *)p + off, sizeof(dn->link));
err = config_link(&dn->link, arg);
break;
case DN_PROFILE:
if (dn == NULL)
dn = malloc(sizeof(*dn), M_TEMP, M_NOWAIT);
if (dn == NULL) {
err = ENOMEM;
break;
}
memcpy(&dn->profile, (char *)p + off,
sizeof(dn->profile));
err = config_profile(&dn->profile, arg);
break;
case DN_SCH:
if (dn == NULL)
dn = malloc(sizeof(*dn), M_TEMP, M_NOWAIT);
if (dn == NULL) {
err = ENOMEM;
break;
}
memcpy(&dn->sched, (char *)p + off,
sizeof(dn->sched));
err = config_sched(&dn->sched, arg);
break;
case DN_FS:
if (dn == NULL)
dn = malloc(sizeof(*dn), M_TEMP, M_NOWAIT);
if (dn == NULL) {
err = ENOMEM;
break;
}
memcpy(&dn->fs, (char *)p + off, sizeof(dn->fs));
err = (NULL == config_fs(&dn->fs, arg, 0));
break;
}
if (err != 0)
break;
off += o.len;
}
free(arg, M_TEMP);
free(dn, M_TEMP);
return err;
}
static int
compute_space(struct dn_id *cmd, struct copy_args *a)
{
int x = 0, need = 0;
int profile_size = sizeof(struct dn_profile) -
ED_MAX_SAMPLES_NO*sizeof(int);
switch (cmd->subtype) {
default:
return -1;
case DN_LINK:
x = DN_C_LINK | DN_C_SCH | DN_C_FLOW;
need += V_dn_cfg.schk_count *
(sizeof(struct dn_fs) + profile_size) / 2;
need += V_dn_cfg.fsk_count * sizeof(uint32_t);
break;
case DN_SCH:
need += V_dn_cfg.schk_count *
(sizeof(struct dn_fs) + profile_size) / 2;
need += V_dn_cfg.fsk_count * sizeof(uint32_t);
x = DN_C_SCH | DN_C_LINK | DN_C_FLOW;
break;
case DN_FS:
x = DN_C_FS | DN_C_QUEUE;
break;
case DN_GET_COMPAT:
need = dn_compat_calc_size();
break;
}
a->flags = x;
if (x & DN_C_SCH) {
need += V_dn_cfg.schk_count * sizeof(struct dn_sch) / 2;
need += V_dn_cfg.schk_count * sizeof(struct dn_id) / 2;
}
if (x & DN_C_FS)
need += V_dn_cfg.fsk_count * sizeof(struct dn_fs);
if (x & DN_C_LINK) {
need += V_dn_cfg.schk_count * sizeof(struct dn_link) / 2;
}
if (x & DN_C_QUEUE)
need += V_dn_cfg.queue_count * sizeof(struct dn_flow);
if (x & DN_C_FLOW)
need += V_dn_cfg.si_count * (sizeof(struct dn_flow));
return need;
}
int
dummynet_get(struct sockopt *sopt, void **compat)
{
int have, i, need, error;
char *start = NULL, *buf;
size_t sopt_valsize;
struct dn_id *cmd;
struct copy_args a;
struct copy_range r;
int l = sizeof(struct dn_id);
bzero(&a, sizeof(a));
bzero(&r, sizeof(r));
sopt_valsize = sopt->sopt_valsize;
cmd = &r.o;
if (!compat) {
error = sooptcopyin(sopt, cmd, sizeof(r), sizeof(*cmd));
sopt->sopt_valsize = sopt_valsize;
if (error)
goto done;
l = cmd->len;
#ifdef EMULATE_SYSCTL
if (cmd->type == DN_SYSCTL_GET)
return kesysctl_emu_get(sopt);
#endif
if (l > sizeof(r)) {
cmd = malloc(l, M_DUMMYNET, M_NOWAIT);
if (cmd == NULL) {
error = ENOMEM;
goto done;
}
error = sooptcopyin(sopt, cmd, l, l);
sopt->sopt_valsize = sopt_valsize;
if (error)
goto done;
}
} else {
error = 0;
cmd->type = DN_CMD_GET;
cmd->len = sizeof(struct dn_id);
cmd->subtype = DN_GET_COMPAT;
D("compatibility mode");
}
#ifdef NEW_AQM
if(cmd->subtype == DN_AQM_PARAMS) {
error = get_aqm_parms(sopt);
goto done;
} else if (cmd->subtype == DN_SCH_PARAMS) {
error = get_sched_parms(sopt);
goto done;
}
#endif
a.extra = (struct copy_range *)cmd;
if (cmd->len == sizeof(*cmd)) {
uint32_t *rp = (uint32_t *)(cmd + 1);
cmd->len += 2* sizeof(uint32_t);
rp[0] = 1;
rp[1] = DN_MAX_ID - 1;
if (cmd->subtype == DN_LINK) {
rp[0] += DN_MAX_ID;
rp[1] += DN_MAX_ID;
}
}
for (have = 0, i = 0; i < 10; i++) {
DN_BH_WLOCK();
need = compute_space(cmd, &a);
if (l > sizeof(*cmd))
need = sopt_valsize - sizeof(*cmd);
if (need < 0) {
DN_BH_WUNLOCK();
error = EINVAL;
goto done;
}
need += sizeof(*cmd);
cmd->id = need;
if (have >= need)
break;
DN_BH_WUNLOCK();
free(start, M_DUMMYNET);
start = NULL;
if (need > sopt_valsize)
break;
have = need;
start = malloc(have, M_DUMMYNET, M_NOWAIT | M_ZERO);
}
if (start == NULL) {
if (compat) {
*compat = NULL;
error = 1;
} else {
error = sooptcopyout(sopt, cmd, sizeof(*cmd));
}
goto done;
}
ND("have %d:%d sched %d, %d:%d links %d, %d:%d flowsets %d, "
"%d:%d si %d, %d:%d queues %d",
V_dn_cfg.schk_count, sizeof(struct dn_sch), DN_SCH,
V_dn_cfg.schk_count, sizeof(struct dn_link), DN_LINK,
V_dn_cfg.fsk_count, sizeof(struct dn_fs), DN_FS,
V_dn_cfg.si_count, sizeof(struct dn_flow), DN_SCH_I,
V_dn_cfg.queue_count, sizeof(struct dn_queue), DN_QUEUE);
sopt->sopt_valsize = sopt_valsize;
a.type = cmd->subtype;
if (compat == NULL) {
memcpy(start, cmd, sizeof(*cmd));
((struct dn_id*)(start))->len = sizeof(struct dn_id);
buf = start + sizeof(*cmd);
} else
buf = start;
a.start = &buf;
a.end = start + have;
if (compat) {
a.type = DN_COMPAT_PIPE;
dn_ht_scan(V_dn_cfg.schedhash, copy_data_helper_compat, &a);
a.type = DN_COMPAT_QUEUE;
dn_ht_scan(V_dn_cfg.fshash, copy_data_helper_compat, &a);
} else if (a.type == DN_FS) {
dn_ht_scan(V_dn_cfg.fshash, copy_data_helper, &a);
} else {
dn_ht_scan(V_dn_cfg.schedhash, copy_data_helper, &a);
}
DN_BH_WUNLOCK();
if (compat) {
*compat = start;
sopt->sopt_valsize = buf - start;
start = NULL;
} else {
error = sooptcopyout(sopt, start, buf - start);
}
done:
if (cmd != &r.o)
free(cmd, M_DUMMYNET);
free(start, M_DUMMYNET);
return error;
}
static int
drain_scheduler_cb(void *_si, void *arg)
{
struct dn_sch_inst *si = _si;
if ((si->kflags & DN_ACTIVE) || si->dline.mq.head != NULL)
return 0;
if (si->sched->fp->flags & DN_MULTIQUEUE) {
if (si->q_count == 0)
return si_destroy(si, NULL);
else
return 0;
} else {
if ((si+1)->ni.length == 0)
return si_destroy(si, NULL);
else
return 0;
}
return 0;
}
static int
drain_scheduler_sch_cb(void *_s, void *arg)
{
struct dn_schk *s = _s;
if (s->sch.flags & DN_HAVE_MASK) {
dn_ht_scan_bucket(s->siht, &s->drain_bucket,
drain_scheduler_cb, NULL);
s->drain_bucket++;
} else {
if (s->siht) {
if (drain_scheduler_cb(s->siht, NULL) == DNHT_SCAN_DEL)
s->siht = NULL;
}
}
return 0;
}
void
dn_drain_scheduler(void)
{
dn_ht_scan_bucket(V_dn_cfg.schedhash, &V_dn_cfg.drain_sch,
drain_scheduler_sch_cb, NULL);
V_dn_cfg.drain_sch++;
}
static int
drain_queue_cb(void *_q, void *arg)
{
struct dn_queue *q = _q;
if (q->ni.length == 0) {
dn_delete_queue(q, DN_DESTROY);
return DNHT_SCAN_DEL;
}
return 0;
}
static int
drain_queue_fs_cb(void *_fs, void *arg)
{
struct dn_fsk *fs = _fs;
if (fs->fs.flags & DN_QHT_HASH) {
dn_ht_scan_bucket(fs->qht, &fs->drain_bucket,
drain_queue_cb, NULL);
fs->drain_bucket++;
} else {
if (fs->qht) {
if (drain_queue_cb(fs->qht, NULL) == DNHT_SCAN_DEL)
fs->qht = NULL;
}
}
return 0;
}
void
dn_drain_queue(void)
{
dn_ht_scan_bucket(V_dn_cfg.fshash, &V_dn_cfg.drain_fs,
drain_queue_fs_cb, NULL);
V_dn_cfg.drain_fs++;
}
static int
ip_dn_ctl(struct sockopt *sopt)
{
struct epoch_tracker et;
void *p = NULL;
size_t l;
int error;
error = priv_check(sopt->sopt_td, PRIV_NETINET_DUMMYNET);
if (error)
return (error);
if (sopt->sopt_dir == SOPT_SET) {
error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
if (error)
return (error);
}
NET_EPOCH_ENTER(et);
switch (sopt->sopt_name) {
default :
D("dummynet: unknown option %d", sopt->sopt_name);
error = EINVAL;
break;
case IP_DUMMYNET_FLUSH:
case IP_DUMMYNET_CONFIGURE:
case IP_DUMMYNET_DEL:
case IP_DUMMYNET_GET:
D("dummynet: compat option %d", sopt->sopt_name);
error = ip_dummynet_compat(sopt);
break;
case IP_DUMMYNET3:
if (sopt->sopt_dir == SOPT_GET) {
error = dummynet_get(sopt, NULL);
break;
}
l = sopt->sopt_valsize;
if (l < sizeof(struct dn_id) || l > 12000) {
D("argument len %zu invalid", l);
break;
}
p = malloc(l, M_TEMP, M_NOWAIT);
if (p == NULL) {
error = ENOMEM;
break;
}
error = sooptcopyin(sopt, p, l, l);
if (error == 0)
error = do_config(p, l);
break;
}
free(p, M_TEMP);
NET_EPOCH_EXIT(et);
return error ;
}
static void
ip_dn_vnet_init(void)
{
if (V_dn_cfg.init_done)
return;
V_dn_cfg.slot_limit = 100;
V_dn_cfg.byte_limit = 1024 * 1024;
V_dn_cfg.expire = 1;
V_dn_cfg.red_lookup_depth = 256;
V_dn_cfg.red_avg_pkt_size = 512;
V_dn_cfg.red_max_pkt_size = 1500;
V_dn_cfg.max_hash_size = 65536;
V_dn_cfg.hash_size = 64;
V_dn_cfg.schedhash = dn_ht_init(NULL, V_dn_cfg.hash_size,
offsetof(struct dn_schk, schk_next),
schk_hash, schk_match, schk_new);
V_dn_cfg.fshash = dn_ht_init(NULL, V_dn_cfg.hash_size,
offsetof(struct dn_fsk, fsk_next),
fsk_hash, fsk_match, fsk_new);
V_dn_cfg.drain_fs = 0;
V_dn_cfg.drain_sch = 0;
heap_init(&V_dn_cfg.evheap, 16, offsetof(struct dn_id, id));
SLIST_INIT(&V_dn_cfg.fsu);
DN_LOCK_INIT();
getmicrouptime(&V_dn_cfg.prev_t);
V_dn_cfg.init_done = 1;
}
static void
ip_dn_vnet_destroy(void)
{
DN_BH_WLOCK();
dummynet_flush();
DN_BH_WUNLOCK();
dn_ht_free(V_dn_cfg.schedhash, 0);
dn_ht_free(V_dn_cfg.fshash, 0);
heap_free(&V_dn_cfg.evheap);
DN_LOCK_DESTROY();
}
static void
ip_dn_init(void)
{
if (dn_tasks_started)
return;
mtx_init(&sched_mtx, "dn_sched", NULL, MTX_DEF);
dn_tasks_started = 1;
TASK_INIT(&dn_task, 0, dummynet_task, NULL);
dn_tq = taskqueue_create_fast("dummynet", M_WAITOK,
taskqueue_thread_enqueue, &dn_tq);
taskqueue_start_threads(&dn_tq, 1, PI_NET, "dummynet");
CK_LIST_INIT(&schedlist);
callout_init(&dn_timeout, 1);
dn_reschedule();
}
static void
ip_dn_destroy(int last)
{
dn_gone = 1;
if (last) {
ND("removing last instance\n");
ip_dn_ctl_ptr = NULL;
ip_dn_io_ptr = NULL;
}
callout_drain(&dn_timeout);
taskqueue_drain(dn_tq, &dn_task);
taskqueue_free(dn_tq);
}
static int
dummynet_modevent(module_t mod, int type, void *data)
{
if (type == MOD_LOAD) {
if (ip_dn_io_ptr) {
printf("DUMMYNET already loaded\n");
return EEXIST ;
}
ip_dn_init();
ip_dn_ctl_ptr = ip_dn_ctl;
ip_dn_io_ptr = dummynet_io;
return 0;
} else if (type == MOD_UNLOAD) {
ip_dn_destroy(1 );
return 0;
} else
return EOPNOTSUPP;
}
static int
load_dn_sched(struct dn_alg *d)
{
struct dn_alg *s;
if (d == NULL)
return 1;
ip_dn_init();
if (d->enqueue == NULL || d->dequeue == NULL) {
D("missing enqueue or dequeue for %s", d->name);
return 1;
}
mtx_lock(&sched_mtx);
CK_LIST_FOREACH(s, &schedlist, next) {
if (strcmp(s->name, d->name) == 0) {
D("%s already loaded", d->name);
break;
}
}
if (s == NULL)
CK_LIST_INSERT_HEAD(&schedlist, d, next);
mtx_unlock(&sched_mtx);
D("dn_sched %s %sloaded", d->name, s ? "not ":"");
return s ? 1 : 0;
}
static int
unload_dn_sched(struct dn_alg *s)
{
struct dn_alg *tmp, *r;
int err = EINVAL;
ND("called for %s", s->name);
mtx_lock(&sched_mtx);
CK_LIST_FOREACH_SAFE(r, &schedlist, next, tmp) {
if (strcmp(s->name, r->name) != 0)
continue;
ND("ref_count = %d", r->ref_count);
err = (r->ref_count != 0) ? EBUSY : 0;
if (err == 0)
CK_LIST_REMOVE(r, next);
break;
}
mtx_unlock(&sched_mtx);
NET_EPOCH_WAIT();
D("dn_sched %s %sunloaded", s->name, err ? "not ":"");
return err;
}
int
dn_sched_modevent(module_t mod, int cmd, void *arg)
{
struct dn_alg *sch = arg;
if (cmd == MOD_LOAD)
return load_dn_sched(sch);
else if (cmd == MOD_UNLOAD)
return unload_dn_sched(sch);
else
return EINVAL;
}
static moduledata_t dummynet_mod = {
"dummynet", dummynet_modevent, NULL
};
#define DN_SI_SUB SI_SUB_PROTO_FIREWALL
#define DN_MODEV_ORD (SI_ORDER_ANY - 128)
DECLARE_MODULE(dummynet, dummynet_mod, DN_SI_SUB, DN_MODEV_ORD);
MODULE_VERSION(dummynet, 3);
VNET_SYSINIT(vnet_dn_init, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_vnet_init, NULL);
VNET_SYSUNINIT(vnet_dn_uninit, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_vnet_destroy, NULL);
#ifdef NEW_AQM
static int
load_dn_aqm(struct dn_aqm *d)
{
struct dn_aqm *aqm=NULL;
if (d == NULL)
return 1;
ip_dn_init();
if (d->enqueue == NULL || d->dequeue == NULL) {
D("missing enqueue or dequeue for %s", d->name);
return 1;
}
mtx_lock(&sched_mtx);
CK_LIST_FOREACH(aqm, &aqmlist, next) {
if (strcmp(aqm->name, d->name) == 0) {
D("%s already loaded", d->name);
break;
}
}
if (aqm == NULL)
CK_LIST_INSERT_HEAD(&aqmlist, d, next);
mtx_unlock(&sched_mtx);
D("dn_aqm %s %sloaded", d->name, aqm ? "not ":"");
return aqm ? 1 : 0;
}
static int
fs_cleanup(void *_fs, void *arg)
{
struct dn_fsk *fs = _fs;
uint32_t type = *(uint32_t *)arg;
if (fs->aqmfp && fs->aqmfp->type == type)
aqm_cleanup_deconfig_fs(fs);
return 0;
}
static int
unload_dn_aqm(struct dn_aqm *aqm)
{
struct dn_aqm *tmp, *r;
int err = EINVAL;
err = 0;
ND("called for %s", aqm->name);
dn_ht_scan(V_dn_cfg.fshash, fs_cleanup, &aqm->type);
mtx_lock(&sched_mtx);
CK_LIST_FOREACH_SAFE(r, &aqmlist, next, tmp) {
if (strcmp(aqm->name, r->name) != 0)
continue;
ND("ref_count = %d", r->ref_count);
err = (r->ref_count != 0 || r->cfg_ref_count != 0) ? EBUSY : 0;
if (err == 0)
CK_LIST_REMOVE(r, next);
break;
}
mtx_unlock(&sched_mtx);
NET_EPOCH_WAIT();
D("%s %sunloaded", aqm->name, err ? "not ":"");
if (err)
D("ref_count=%d, cfg_ref_count=%d", r->ref_count, r->cfg_ref_count);
return err;
}
int
dn_aqm_modevent(module_t mod, int cmd, void *arg)
{
struct dn_aqm *aqm = arg;
if (cmd == MOD_LOAD)
return load_dn_aqm(aqm);
else if (cmd == MOD_UNLOAD)
return unload_dn_aqm(aqm);
else
return EINVAL;
}
#endif