Path: blob/main/sys/compat/linuxkpi/common/src/linux_work.c
39586 views
/*-1* Copyright (c) 2017-2019 Hans Petter Selasky2* All rights reserved.3*4* Redistribution and use in source and binary forms, with or without5* modification, are permitted provided that the following conditions6* are met:7* 1. Redistributions of source code must retain the above copyright8* notice unmodified, this list of conditions, and the following9* disclaimer.10* 2. Redistributions in binary form must reproduce the above copyright11* notice, this list of conditions and the following disclaimer in the12* documentation and/or other materials provided with the distribution.13*14* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR15* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES16* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.17* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,18* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT19* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,20* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY21* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT22* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF23* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.24*/2526#include <sys/cdefs.h>27#include <linux/workqueue.h>28#include <linux/wait.h>29#include <linux/compat.h>30#include <linux/spinlock.h>31#include <linux/rcupdate.h>32#include <linux/irq_work.h>3334#include <sys/kernel.h>3536/*37* Define all work struct states38*/39enum {40WORK_ST_IDLE, /* idle - not started */41WORK_ST_TIMER, /* timer is being started */42WORK_ST_TASK, /* taskqueue is being queued */43WORK_ST_EXEC, /* callback is being called */44WORK_ST_CANCEL, /* cancel is being requested */45WORK_ST_MAX,46};4748/*49* Define global workqueues50*/51static struct workqueue_struct *linux_system_short_wq;52static struct workqueue_struct *linux_system_long_wq;5354struct workqueue_struct *system_wq;55struct workqueue_struct *system_long_wq;56struct workqueue_struct *system_unbound_wq;57struct workqueue_struct *system_highpri_wq;58struct workqueue_struct *system_power_efficient_wq;5960struct taskqueue *linux_irq_work_tq;6162static int linux_default_wq_cpus = 4;6364static void linux_delayed_work_timer_fn(void *);6566/*67* This function atomically updates the work state and returns the68* previous state at the time of update.69*/70static uint8_t71linux_update_state(atomic_t *v, const uint8_t *pstate)72{73int c, old;7475c = v->counter;7677while ((old = atomic_cmpxchg(v, c, pstate[c])) != c)78c = old;7980return (c);81}8283/*84* A LinuxKPI task is allowed to free itself inside the callback function85* and cannot safely be referred after the callback function has86* completed. This function gives the linux_work_fn() function a hint,87* that the task is not going away and can have its state checked88* again. Without this extra hint LinuxKPI tasks cannot be serialized89* across multiple worker threads.90*/91static bool92linux_work_exec_unblock(struct work_struct *work)93{94struct workqueue_struct *wq;95struct work_exec *exec;96bool retval = false;9798wq = work->work_queue;99if (unlikely(wq == NULL))100goto done;101102WQ_EXEC_LOCK(wq);103TAILQ_FOREACH(exec, &wq->exec_head, entry) {104if (exec->target == work) {105exec->target = NULL;106retval = true;107break;108}109}110WQ_EXEC_UNLOCK(wq);111done:112return (retval);113}114115static void116linux_delayed_work_enqueue(struct delayed_work *dwork)117{118struct taskqueue *tq;119120tq = dwork->work.work_queue->taskqueue;121taskqueue_enqueue(tq, &dwork->work.work_task);122}123124/*125* This function queues the given work structure on the given126* workqueue. It returns non-zero if the work was successfully127* [re-]queued. Else the work is already pending for completion.128*/129bool130linux_queue_work_on(int cpu __unused, struct workqueue_struct *wq,131struct work_struct *work)132{133static const uint8_t states[WORK_ST_MAX] __aligned(8) = {134[WORK_ST_IDLE] = WORK_ST_TASK, /* start queuing task */135[WORK_ST_TIMER] = WORK_ST_TIMER, /* NOP */136[WORK_ST_TASK] = WORK_ST_TASK, /* NOP */137[WORK_ST_EXEC] = WORK_ST_TASK, /* queue task another time */138[WORK_ST_CANCEL] = WORK_ST_TASK, /* start queuing task again */139};140141if (atomic_read(&wq->draining) != 0)142return (!work_pending(work));143144switch (linux_update_state(&work->state, states)) {145case WORK_ST_EXEC:146case WORK_ST_CANCEL:147if (linux_work_exec_unblock(work) != 0)148return (true);149/* FALLTHROUGH */150case WORK_ST_IDLE:151work->work_queue = wq;152taskqueue_enqueue(wq->taskqueue, &work->work_task);153return (true);154default:155return (false); /* already on a queue */156}157}158159/*160* Callback func for linux_queue_rcu_work161*/162static void163rcu_work_func(struct rcu_head *rcu)164{165struct rcu_work *rwork;166167rwork = container_of(rcu, struct rcu_work, rcu);168linux_queue_work_on(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);169}170171/*172* This function queue a work after a grace period173* If the work was already pending it returns false,174* if not it calls call_rcu and returns true.175*/176bool177linux_queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)178{179180if (!linux_work_pending(&rwork->work)) {181rwork->wq = wq;182linux_call_rcu(RCU_TYPE_REGULAR, &rwork->rcu, rcu_work_func);183return (true);184}185return (false);186}187188/*189* This function waits for the last execution of a work and then190* flush the work.191* It returns true if the work was pending and we waited, it returns192* false otherwise.193*/194bool195linux_flush_rcu_work(struct rcu_work *rwork)196{197198if (linux_work_pending(&rwork->work)) {199linux_rcu_barrier(RCU_TYPE_REGULAR);200linux_flush_work(&rwork->work);201return (true);202}203return (linux_flush_work(&rwork->work));204}205206/*207* This function queues the given work structure on the given208* workqueue after a given delay in ticks. It returns true if the209* work was successfully [re-]queued. Else the work is already pending210* for completion.211*/212bool213linux_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,214struct delayed_work *dwork, unsigned long delay)215{216static const uint8_t states[WORK_ST_MAX] __aligned(8) = {217[WORK_ST_IDLE] = WORK_ST_TIMER, /* start timeout */218[WORK_ST_TIMER] = WORK_ST_TIMER, /* NOP */219[WORK_ST_TASK] = WORK_ST_TASK, /* NOP */220[WORK_ST_EXEC] = WORK_ST_TIMER, /* start timeout */221[WORK_ST_CANCEL] = WORK_ST_TIMER, /* start timeout */222};223bool res;224225if (atomic_read(&wq->draining) != 0)226return (!work_pending(&dwork->work));227228/*229* Clamp the delay to a valid ticks value, some consumers pass230* MAX_SCHEDULE_TIMEOUT.231*/232if (delay > INT_MAX)233delay = INT_MAX;234235mtx_lock(&dwork->timer.mtx);236switch (linux_update_state(&dwork->work.state, states)) {237case WORK_ST_EXEC:238case WORK_ST_CANCEL:239if (delay == 0 && linux_work_exec_unblock(&dwork->work)) {240dwork->timer.expires = jiffies;241res = true;242goto out;243}244/* FALLTHROUGH */245case WORK_ST_IDLE:246dwork->work.work_queue = wq;247dwork->timer.expires = jiffies + delay;248249if (delay == 0) {250linux_delayed_work_enqueue(dwork);251} else if (unlikely(cpu != WORK_CPU_UNBOUND)) {252callout_reset_on(&dwork->timer.callout, delay,253&linux_delayed_work_timer_fn, dwork, cpu);254} else {255callout_reset(&dwork->timer.callout, delay,256&linux_delayed_work_timer_fn, dwork);257}258res = true;259break;260default:261res = false;262break;263}264out:265mtx_unlock(&dwork->timer.mtx);266return (res);267}268269void270linux_work_fn(void *context, int pending)271{272static const uint8_t states[WORK_ST_MAX] __aligned(8) = {273[WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */274[WORK_ST_TIMER] = WORK_ST_EXEC, /* delayed work w/o timeout */275[WORK_ST_TASK] = WORK_ST_EXEC, /* call callback */276[WORK_ST_EXEC] = WORK_ST_IDLE, /* complete callback */277[WORK_ST_CANCEL] = WORK_ST_EXEC, /* failed to cancel */278};279struct work_struct *work;280struct workqueue_struct *wq;281struct work_exec exec;282struct task_struct *task;283284task = current;285286/* setup local variables */287work = context;288wq = work->work_queue;289290/* store target pointer */291exec.target = work;292293/* insert executor into list */294WQ_EXEC_LOCK(wq);295TAILQ_INSERT_TAIL(&wq->exec_head, &exec, entry);296while (1) {297switch (linux_update_state(&work->state, states)) {298case WORK_ST_TIMER:299case WORK_ST_TASK:300case WORK_ST_CANCEL:301WQ_EXEC_UNLOCK(wq);302303/* set current work structure */304task->work = work;305306/* call work function */307work->func(work);308309/* set current work structure */310task->work = NULL;311312WQ_EXEC_LOCK(wq);313/* check if unblocked */314if (exec.target != work) {315/* reapply block */316exec.target = work;317break;318}319/* FALLTHROUGH */320default:321goto done;322}323}324done:325/* remove executor from list */326TAILQ_REMOVE(&wq->exec_head, &exec, entry);327WQ_EXEC_UNLOCK(wq);328}329330void331linux_delayed_work_fn(void *context, int pending)332{333struct delayed_work *dwork = context;334335/*336* Make sure the timer belonging to the delayed work gets337* drained before invoking the work function. Else the timer338* mutex may still be in use which can lead to use-after-free339* situations, because the work function might free the work340* structure before returning.341*/342callout_drain(&dwork->timer.callout);343344linux_work_fn(&dwork->work, pending);345}346347static void348linux_delayed_work_timer_fn(void *arg)349{350static const uint8_t states[WORK_ST_MAX] __aligned(8) = {351[WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */352[WORK_ST_TIMER] = WORK_ST_TASK, /* start queueing task */353[WORK_ST_TASK] = WORK_ST_TASK, /* NOP */354[WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */355[WORK_ST_CANCEL] = WORK_ST_TASK, /* failed to cancel */356};357struct delayed_work *dwork = arg;358359switch (linux_update_state(&dwork->work.state, states)) {360case WORK_ST_TIMER:361case WORK_ST_CANCEL:362linux_delayed_work_enqueue(dwork);363break;364default:365break;366}367}368369/*370* This function cancels the given work structure in a371* non-blocking fashion. It returns non-zero if the work was372* successfully cancelled. Else the work may still be busy or already373* cancelled.374*/375bool376linux_cancel_work(struct work_struct *work)377{378static const uint8_t states[WORK_ST_MAX] __aligned(8) = {379[WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */380[WORK_ST_TIMER] = WORK_ST_TIMER, /* can't happen */381[WORK_ST_TASK] = WORK_ST_IDLE, /* cancel */382[WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */383[WORK_ST_CANCEL] = WORK_ST_IDLE, /* can't happen */384};385struct taskqueue *tq;386387MPASS(atomic_read(&work->state) != WORK_ST_TIMER);388MPASS(atomic_read(&work->state) != WORK_ST_CANCEL);389390switch (linux_update_state(&work->state, states)) {391case WORK_ST_TASK:392tq = work->work_queue->taskqueue;393if (taskqueue_cancel(tq, &work->work_task, NULL) == 0)394return (true);395/* FALLTHROUGH */396default:397return (false);398}399}400401/*402* This function cancels the given work structure in a synchronous403* fashion. It returns non-zero if the work was successfully404* cancelled. Else the work was already cancelled.405*/406bool407linux_cancel_work_sync(struct work_struct *work)408{409static const uint8_t states[WORK_ST_MAX] __aligned(8) = {410[WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */411[WORK_ST_TIMER] = WORK_ST_TIMER, /* can't happen */412[WORK_ST_TASK] = WORK_ST_IDLE, /* cancel and drain */413[WORK_ST_EXEC] = WORK_ST_IDLE, /* too late, drain */414[WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */415};416struct taskqueue *tq;417bool retval = false;418419WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,420"linux_cancel_work_sync() might sleep");421retry:422switch (linux_update_state(&work->state, states)) {423case WORK_ST_IDLE:424case WORK_ST_TIMER:425return (retval);426case WORK_ST_EXEC:427tq = work->work_queue->taskqueue;428if (taskqueue_cancel(tq, &work->work_task, NULL) != 0)429taskqueue_drain(tq, &work->work_task);430goto retry; /* work may have restarted itself */431default:432tq = work->work_queue->taskqueue;433if (taskqueue_cancel(tq, &work->work_task, NULL) != 0)434taskqueue_drain(tq, &work->work_task);435retval = true;436goto retry;437}438}439440/*441* This function atomically stops the timer and callback. The timer442* callback will not be called after this function returns. This443* functions returns true when the timeout was cancelled. Else the444* timeout was not started or has already been called.445*/446static inline bool447linux_cancel_timer(struct delayed_work *dwork, bool drain)448{449bool cancelled;450451mtx_lock(&dwork->timer.mtx);452cancelled = (callout_stop(&dwork->timer.callout) == 1);453mtx_unlock(&dwork->timer.mtx);454455/* check if we should drain */456if (drain)457callout_drain(&dwork->timer.callout);458return (cancelled);459}460461/*462* This function cancels the given delayed work structure in a463* non-blocking fashion. It returns non-zero if the work was464* successfully cancelled. Else the work may still be busy or already465* cancelled.466*/467bool468linux_cancel_delayed_work(struct delayed_work *dwork)469{470static const uint8_t states[WORK_ST_MAX] __aligned(8) = {471[WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */472[WORK_ST_TIMER] = WORK_ST_CANCEL, /* try to cancel */473[WORK_ST_TASK] = WORK_ST_CANCEL, /* try to cancel */474[WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */475[WORK_ST_CANCEL] = WORK_ST_CANCEL, /* NOP */476};477struct taskqueue *tq;478bool cancelled;479480mtx_lock(&dwork->timer.mtx);481switch (linux_update_state(&dwork->work.state, states)) {482case WORK_ST_TIMER:483case WORK_ST_CANCEL:484cancelled = (callout_stop(&dwork->timer.callout) == 1);485if (cancelled) {486atomic_cmpxchg(&dwork->work.state,487WORK_ST_CANCEL, WORK_ST_IDLE);488mtx_unlock(&dwork->timer.mtx);489return (true);490}491/* FALLTHROUGH */492case WORK_ST_TASK:493tq = dwork->work.work_queue->taskqueue;494if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) == 0) {495atomic_cmpxchg(&dwork->work.state,496WORK_ST_CANCEL, WORK_ST_IDLE);497mtx_unlock(&dwork->timer.mtx);498return (true);499}500/* FALLTHROUGH */501default:502mtx_unlock(&dwork->timer.mtx);503return (false);504}505}506507/*508* This function cancels the given work structure in a synchronous509* fashion. It returns true if the work was successfully510* cancelled. Else the work was already cancelled.511*/512static bool513linux_cancel_delayed_work_sync_int(struct delayed_work *dwork)514{515static const uint8_t states[WORK_ST_MAX] __aligned(8) = {516[WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */517[WORK_ST_TIMER] = WORK_ST_IDLE, /* cancel and drain */518[WORK_ST_TASK] = WORK_ST_IDLE, /* cancel and drain */519[WORK_ST_EXEC] = WORK_ST_IDLE, /* too late, drain */520[WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */521};522struct taskqueue *tq;523int ret, state;524bool cancelled;525526WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,527"linux_cancel_delayed_work_sync() might sleep");528mtx_lock(&dwork->timer.mtx);529530state = linux_update_state(&dwork->work.state, states);531switch (state) {532case WORK_ST_IDLE:533mtx_unlock(&dwork->timer.mtx);534return (false);535case WORK_ST_TIMER:536case WORK_ST_CANCEL:537cancelled = (callout_stop(&dwork->timer.callout) == 1);538539tq = dwork->work.work_queue->taskqueue;540ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);541mtx_unlock(&dwork->timer.mtx);542543callout_drain(&dwork->timer.callout);544taskqueue_drain(tq, &dwork->work.work_task);545return (cancelled || (ret != 0));546default:547tq = dwork->work.work_queue->taskqueue;548ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);549mtx_unlock(&dwork->timer.mtx);550if (ret != 0)551taskqueue_drain(tq, &dwork->work.work_task);552return (ret != 0);553}554}555556bool557linux_cancel_delayed_work_sync(struct delayed_work *dwork)558{559bool res;560561res = false;562while (linux_cancel_delayed_work_sync_int(dwork))563res = true;564return (res);565}566567/*568* This function waits until the given work structure is completed.569* It returns non-zero if the work was successfully570* waited for. Else the work was not waited for.571*/572bool573linux_flush_work(struct work_struct *work)574{575struct taskqueue *tq;576bool retval;577578WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,579"linux_flush_work() might sleep");580581switch (atomic_read(&work->state)) {582case WORK_ST_IDLE:583return (false);584default:585tq = work->work_queue->taskqueue;586retval = taskqueue_poll_is_busy(tq, &work->work_task);587taskqueue_drain(tq, &work->work_task);588return (retval);589}590}591592/*593* This function waits until the given delayed work structure is594* completed. It returns non-zero if the work was successfully waited595* for. Else the work was not waited for.596*/597bool598linux_flush_delayed_work(struct delayed_work *dwork)599{600struct taskqueue *tq;601bool retval;602603WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,604"linux_flush_delayed_work() might sleep");605606switch (atomic_read(&dwork->work.state)) {607case WORK_ST_IDLE:608return (false);609case WORK_ST_TIMER:610if (linux_cancel_timer(dwork, 1))611linux_delayed_work_enqueue(dwork);612/* FALLTHROUGH */613default:614tq = dwork->work.work_queue->taskqueue;615retval = taskqueue_poll_is_busy(tq, &dwork->work.work_task);616taskqueue_drain(tq, &dwork->work.work_task);617return (retval);618}619}620621/*622* This function returns true if the given work is pending, and not623* yet executing:624*/625bool626linux_work_pending(struct work_struct *work)627{628switch (atomic_read(&work->state)) {629case WORK_ST_TIMER:630case WORK_ST_TASK:631case WORK_ST_CANCEL:632return (true);633default:634return (false);635}636}637638/*639* This function returns true if the given work is busy.640*/641bool642linux_work_busy(struct work_struct *work)643{644struct taskqueue *tq;645646switch (atomic_read(&work->state)) {647case WORK_ST_IDLE:648return (false);649case WORK_ST_EXEC:650tq = work->work_queue->taskqueue;651return (taskqueue_poll_is_busy(tq, &work->work_task));652default:653return (true);654}655}656657struct workqueue_struct *658linux_create_workqueue_common(const char *name, int cpus)659{660struct workqueue_struct *wq;661662/*663* If zero CPUs are specified use the default number of CPUs:664*/665if (cpus == 0)666cpus = linux_default_wq_cpus;667668wq = kmalloc(sizeof(*wq), M_WAITOK | M_ZERO);669wq->taskqueue = taskqueue_create(name, M_WAITOK,670taskqueue_thread_enqueue, &wq->taskqueue);671atomic_set(&wq->draining, 0);672taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name);673TAILQ_INIT(&wq->exec_head);674mtx_init(&wq->exec_mtx, "linux_wq_exec", NULL, MTX_DEF);675676return (wq);677}678679void680linux_destroy_workqueue(struct workqueue_struct *wq)681{682atomic_inc(&wq->draining);683drain_workqueue(wq);684taskqueue_free(wq->taskqueue);685mtx_destroy(&wq->exec_mtx);686kfree(wq);687}688689void690linux_init_delayed_work(struct delayed_work *dwork, work_func_t func)691{692memset(dwork, 0, sizeof(*dwork));693dwork->work.func = func;694TASK_INIT(&dwork->work.work_task, 0, linux_delayed_work_fn, dwork);695mtx_init(&dwork->timer.mtx, spin_lock_name("lkpi-dwork"), NULL,696MTX_DEF | MTX_NOWITNESS);697callout_init_mtx(&dwork->timer.callout, &dwork->timer.mtx, 0);698}699700struct work_struct *701linux_current_work(void)702{703return (current->work);704}705706static void707linux_work_init(void *arg)708{709int max_wq_cpus = mp_ncpus + 1;710711/* avoid deadlock when there are too few threads */712if (max_wq_cpus < 4)713max_wq_cpus = 4;714715/* set default number of CPUs */716linux_default_wq_cpus = max_wq_cpus;717718linux_system_short_wq = alloc_workqueue("linuxkpi_short_wq", 0, max_wq_cpus);719linux_system_long_wq = alloc_workqueue("linuxkpi_long_wq", 0, max_wq_cpus);720721/* populate the workqueue pointers */722system_long_wq = linux_system_long_wq;723system_wq = linux_system_short_wq;724system_power_efficient_wq = linux_system_short_wq;725system_unbound_wq = linux_system_short_wq;726system_highpri_wq = linux_system_short_wq;727}728SYSINIT(linux_work_init, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_init, NULL);729730static void731linux_work_uninit(void *arg)732{733destroy_workqueue(linux_system_short_wq);734destroy_workqueue(linux_system_long_wq);735736/* clear workqueue pointers */737system_long_wq = NULL;738system_wq = NULL;739system_power_efficient_wq = NULL;740system_unbound_wq = NULL;741system_highpri_wq = NULL;742}743SYSUNINIT(linux_work_uninit, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_uninit, NULL);744745void746linux_irq_work_fn(void *context, int pending)747{748struct irq_work *irqw = context;749750irqw->func(irqw);751}752753static void754linux_irq_work_init_fn(void *context, int pending)755{756/*757* LinuxKPI performs lazy allocation of memory structures required by758* current on the first access to it. As some irq_work clients read759* it with spinlock taken, we have to preallocate td_lkpi_task before760* first call to irq_work_queue(). As irq_work uses a single thread,761* it is enough to read current once at SYSINIT stage.762*/763if (current == NULL)764panic("irq_work taskqueue is not initialized");765}766static struct task linux_irq_work_init_task =767TASK_INITIALIZER(0, linux_irq_work_init_fn, &linux_irq_work_init_task);768769static void770linux_irq_work_init(void *arg)771{772linux_irq_work_tq = taskqueue_create_fast("linuxkpi_irq_wq",773M_WAITOK, taskqueue_thread_enqueue, &linux_irq_work_tq);774taskqueue_start_threads(&linux_irq_work_tq, 1, PWAIT,775"linuxkpi_irq_wq");776taskqueue_enqueue(linux_irq_work_tq, &linux_irq_work_init_task);777}778SYSINIT(linux_irq_work_init, SI_SUB_TASKQ, SI_ORDER_SECOND,779linux_irq_work_init, NULL);780781static void782linux_irq_work_uninit(void *arg)783{784taskqueue_drain_all(linux_irq_work_tq);785taskqueue_free(linux_irq_work_tq);786}787SYSUNINIT(linux_irq_work_uninit, SI_SUB_TASKQ, SI_ORDER_SECOND,788linux_irq_work_uninit, NULL);789790791