Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 | /* * linux/kernel/context.c * * Mechanism for running arbitrary tasks in process context * * dwmw2@redhat.com: Genesis * * andrewm@uow.edu.au: 2.4.0-test12 * - Child reaping * - Support for tasks which re-add themselves * - flush_scheduled_tasks. */ #define __KERNEL_SYSCALLS__ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/unistd.h> #include <linux/signal.h> static DECLARE_TASK_QUEUE(tq_context); static DECLARE_WAIT_QUEUE_HEAD(context_task_wq); static DECLARE_WAIT_QUEUE_HEAD(context_task_done); static int keventd_running; static struct task_struct *keventd_task; static int need_keventd(const char *who) { if (keventd_running == 0) printk(KERN_ERR "%s(): keventd has not started\n", who); return keventd_running; } int current_is_keventd(void) { int ret = 0; if (need_keventd(__FUNCTION__)) ret = (current == keventd_task); return ret; } /** * schedule_task - schedule a function for subsequent execution in process context. * @task: pointer to a &tq_struct which defines the function to be scheduled. * * May be called from interrupt context. The scheduled function is run at some * time in the near future by the keventd kernel thread. If it can sleep, it * should be designed to do so for the minimum possible time, as it will be * stalling all other scheduled tasks. * * schedule_task() returns non-zero if the task was successfully scheduled. * If @task is already residing on a task queue then schedule_task() fails * to schedule your task and returns zero. */ int schedule_task(struct tq_struct *task) { int ret; need_keventd(__FUNCTION__); ret = queue_task(task, &tq_context); wake_up(&context_task_wq); return ret; } static int context_thread(void *dummy) { struct task_struct *curtask = current; DECLARE_WAITQUEUE(wait, curtask); struct k_sigaction sa; daemonize(); strcpy(curtask->comm, "keventd"); keventd_running = 1; keventd_task = curtask; spin_lock_irq(&curtask->sigmask_lock); siginitsetinv(&curtask->blocked, sigmask(SIGCHLD)); recalc_sigpending(curtask); spin_unlock_irq(&curtask->sigmask_lock); /* Install a handler so SIGCLD is delivered */ sa.sa.sa_handler = SIG_IGN; sa.sa.sa_flags = 0; siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); /* * If one of the functions on a task queue re-adds itself * to the task queue we call schedule() in state TASK_RUNNING */ for (;;) { set_task_state(curtask, TASK_INTERRUPTIBLE); add_wait_queue(&context_task_wq, &wait); if (TQ_ACTIVE(tq_context)) set_task_state(curtask, TASK_RUNNING); schedule(); remove_wait_queue(&context_task_wq, &wait); run_task_queue(&tq_context); wake_up(&context_task_done); if (signal_pending(curtask)) { while (waitpid(-1, (unsigned int *)0, __WALL|WNOHANG) > 0) ; spin_lock_irq(&curtask->sigmask_lock); flush_signals(curtask); recalc_sigpending(curtask); spin_unlock_irq(&curtask->sigmask_lock); } } } /** * flush_scheduled_tasks - ensure that any scheduled tasks have run to completion. * * Forces execution of the schedule_task() queue and blocks until its completion. * * If a kernel subsystem uses schedule_task() and wishes to flush any pending * tasks, it should use this function. This is typically used in driver shutdown * handlers. * * The caller should hold no spinlocks and should hold no semaphores which could * cause the scheduled tasks to block. */ static struct tq_struct dummy_task; void flush_scheduled_tasks(void) { int count; DECLARE_WAITQUEUE(wait, current); /* * Do it twice. It's possible, albeit highly unlikely, that * the caller queued a task immediately before calling us, * and that the eventd thread was already past the run_task_queue() * but not yet into wake_up(), so it woke us up before completing * the caller's queued task or our new dummy task. */ add_wait_queue(&context_task_done, &wait); for (count = 0; count < 2; count++) { set_current_state(TASK_UNINTERRUPTIBLE); /* Queue a dummy task to make sure we get kicked */ schedule_task(&dummy_task); /* Wait for it to complete */ schedule(); } remove_wait_queue(&context_task_done, &wait); } int start_context_thread(void) { kernel_thread(context_thread, NULL, CLONE_FS | CLONE_FILES); return 0; } EXPORT_SYMBOL(schedule_task); EXPORT_SYMBOL(flush_scheduled_tasks); |