Path: blob/main/sys/compat/linuxkpi/common/src/linux_kthread.c
39586 views
/*-1* Copyright (c) 2017 Hans Petter Selasky2* All rights reserved.3*4* Redistribution and use in source and binary forms, with or without5* modification, are permitted provided that the following conditions6* are met:7* 1. Redistributions of source code must retain the above copyright8* notice unmodified, this list of conditions, and the following9* disclaimer.10* 2. Redistributions in binary form must reproduce the above copyright11* notice, this list of conditions and the following disclaimer in the12* documentation and/or other materials provided with the distribution.13*14* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR15* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES16* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.17* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,18* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT19* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,20* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY21* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT22* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF23* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.24*/2526#include <sys/cdefs.h>27#include <linux/compat.h>28#include <linux/kthread.h>29#include <linux/sched.h>30#include <linux/wait.h>3132#include <sys/bus.h>33#include <sys/interrupt.h>34#include <sys/priority.h>3536enum {37KTHREAD_SHOULD_STOP_MASK = (1 << 0),38KTHREAD_SHOULD_PARK_MASK = (1 << 1),39KTHREAD_IS_PARKED_MASK = (1 << 2),40};4142bool43linux_kthread_should_stop_task(struct task_struct *task)44{4546return (atomic_read(&task->kthread_flags) & KTHREAD_SHOULD_STOP_MASK);47}4849bool50linux_kthread_should_stop(void)51{5253return (atomic_read(¤t->kthread_flags) & KTHREAD_SHOULD_STOP_MASK);54}5556int57linux_kthread_stop(struct task_struct *task)58{59int retval;6061/*62* Assume task is still alive else caller should not call63* kthread_stop():64*/65atomic_or(KTHREAD_SHOULD_STOP_MASK, &task->kthread_flags);66kthread_unpark(task);67wake_up_process(task);68wait_for_completion(&task->exited);6970/*71* Get return code and free task structure:72*/73retval = task->task_ret;74put_task_struct(task);7576return (retval);77}7879int80linux_kthread_park(struct task_struct *task)81{8283atomic_or(KTHREAD_SHOULD_PARK_MASK, &task->kthread_flags);84wake_up_process(task);85wait_for_completion(&task->parked);86return (0);87}8889void90linux_kthread_parkme(void)91{92struct task_struct *task;9394task = current;95set_task_state(task, TASK_PARKED | TASK_UNINTERRUPTIBLE);96while (linux_kthread_should_park()) {97while ((atomic_fetch_or(KTHREAD_IS_PARKED_MASK,98&task->kthread_flags) & KTHREAD_IS_PARKED_MASK) == 0)99complete(&task->parked);100schedule();101set_task_state(task, TASK_PARKED | TASK_UNINTERRUPTIBLE);102}103atomic_andnot(KTHREAD_IS_PARKED_MASK, &task->kthread_flags);104set_task_state(task, TASK_RUNNING);105}106107bool108linux_kthread_should_park(void)109{110struct task_struct *task;111112task = current;113return (atomic_read(&task->kthread_flags) & KTHREAD_SHOULD_PARK_MASK);114}115116void117linux_kthread_unpark(struct task_struct *task)118{119120atomic_andnot(KTHREAD_SHOULD_PARK_MASK, &task->kthread_flags);121if ((atomic_fetch_andnot(KTHREAD_IS_PARKED_MASK, &task->kthread_flags) &122KTHREAD_IS_PARKED_MASK) != 0)123wake_up_state(task, TASK_PARKED);124}125126struct task_struct *127linux_kthread_setup_and_run(struct thread *td, linux_task_fn_t *task_fn, void *arg)128{129struct task_struct *task;130131linux_set_current(td);132133task = td->td_lkpi_task;134task->task_fn = task_fn;135task->task_data = arg;136137thread_lock(td);138/* make sure the scheduler priority is raised */139sched_prio(td, PI_SWI(SWI_NET));140/* put thread into run-queue */141sched_add(td, SRQ_BORING);142143return (task);144}145146void147linux_kthread_fn(void *arg __unused)148{149struct task_struct *task = current;150151if (linux_kthread_should_stop_task(task) == 0)152task->task_ret = task->task_fn(task->task_data);153154if (linux_kthread_should_stop_task(task) != 0) {155struct thread *td = curthread;156157/* let kthread_stop() free data */158td->td_lkpi_task = NULL;159160/* wakeup kthread_stop() */161complete(&task->exited);162}163kthread_exit();164}165166void167lkpi_kthread_work_fn(void *context, int pending __unused)168{169struct kthread_work *work = context;170171work->func(work);172}173174void175lkpi_kthread_worker_init_fn(void *context, int pending __unused)176{177struct kthread_worker *worker = context;178179worker->task = current;180}181182183