/*
       * linux/kernel/context.c
       *
       * Mechanism for running arbitrary tasks in process context
       *
       * dwmw2@redhat.com:		Genesis
       *
       * andrewm@uow.edu.au:		2.4.0-test12
       *	- Child reaping
       *	- Support for tasks which re-add themselves
       *	- flush_scheduled_tasks.
       */
      
      #define __KERNEL_SYSCALLS__
      
      #include <linux/module.h>
      #include <linux/kernel.h>
      #include <linux/sched.h>
      #include <linux/init.h>
      #include <linux/unistd.h>
      #include <linux/signal.h>
      
      static DECLARE_TASK_QUEUE(tq_context);
      static DECLARE_WAIT_QUEUE_HEAD(context_task_wq);
      static DECLARE_WAIT_QUEUE_HEAD(context_task_done);
      static int keventd_running;
      static struct task_struct *keventd_task;
      
  29  static int need_keventd(const char *who)
      {
  31  	if (keventd_running == 0)
      		printk(KERN_ERR "%s(): keventd has not started\n", who);
  33  	return keventd_running;
      }
      	
  36  int current_is_keventd(void)
      {
      	int ret = 0;
  39  	if (need_keventd(__FUNCTION__))
      		ret = (current == keventd_task);
  41  	return ret;
      }
      
      /**
       * schedule_task - schedule a function for subsequent execution in process context.
       * @task: pointer to a &tq_struct which defines the function to be scheduled.
       *
       * May be called from interrupt context.  The scheduled function is run at some
       * time in the near future by the keventd kernel thread.  If it can sleep, it
       * should be designed to do so for the minimum possible time, as it will be
       * stalling all other scheduled tasks.
       *
       * schedule_task() returns non-zero if the task was successfully scheduled.
       * If @task is already residing on a task queue then schedule_task() fails
       * to schedule your task and returns zero.
       */
  57  int schedule_task(struct tq_struct *task)
      {
      	int ret;
      	need_keventd(__FUNCTION__);
      	ret = queue_task(task, &tq_context);
      	wake_up(&context_task_wq);
  63  	return ret;
      }
      
  66  static int context_thread(void *dummy)
      {
      	struct task_struct *curtask = current;
      	DECLARE_WAITQUEUE(wait, curtask);
      	struct k_sigaction sa;
      
      	daemonize();
      	strcpy(curtask->comm, "keventd");
      	keventd_running = 1;
      	keventd_task = curtask;
      
  77  	spin_lock_irq(&curtask->sigmask_lock);
      	siginitsetinv(&curtask->blocked, sigmask(SIGCHLD));
      	recalc_sigpending(curtask);
  80  	spin_unlock_irq(&curtask->sigmask_lock);
      
      	/* Install a handler so SIGCLD is delivered */
      	sa.sa.sa_handler = SIG_IGN;
      	sa.sa.sa_flags = 0;
      	siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
      	do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
      
      	/*
      	 * If one of the functions on a task queue re-adds itself
      	 * to the task queue we call schedule() in state TASK_RUNNING
      	 */
  92  	for (;;) {
  93  		set_task_state(curtask, TASK_INTERRUPTIBLE);
      		add_wait_queue(&context_task_wq, &wait);
  95  		if (TQ_ACTIVE(tq_context))
  96  			set_task_state(curtask, TASK_RUNNING);
      		schedule();
      		remove_wait_queue(&context_task_wq, &wait);
      		run_task_queue(&tq_context);
      		wake_up(&context_task_done);
 101  		if (signal_pending(curtask)) {
 102  			while (waitpid(-1, (unsigned int *)0, __WALL|WNOHANG) > 0)
      				;
      			flush_signals(curtask);
      			recalc_sigpending(curtask);
      		}
      	}
      }
      
      /**
       * flush_scheduled_tasks - ensure that any scheduled tasks have run to completion.
       *
       * Forces execution of the schedule_task() queue and blocks until its completion.
       *
       * If a kernel subsystem uses schedule_task() and wishes to flush any pending
       * tasks, it should use this function.  This is typically used in driver shutdown
       * handlers.
       *
       * The caller should hold no spinlocks and should hold no semaphores which could
       * cause the scheduled tasks to block.
       */
      static struct tq_struct dummy_task;
      
 124  void flush_scheduled_tasks(void)
      {
      	int count;
      	DECLARE_WAITQUEUE(wait, current);
      
      	/*
      	 * Do it twice. It's possible, albeit highly unlikely, that
      	 * the caller queued a task immediately before calling us,
      	 * and that the eventd thread was already past the run_task_queue()
      	 * but not yet into wake_up(), so it woke us up before completing
      	 * the caller's queued task or our new dummy task.
      	 */
      	add_wait_queue(&context_task_done, &wait);
 137  	for (count = 0; count < 2; count++) {
 138  		set_current_state(TASK_UNINTERRUPTIBLE);
      
      		/* Queue a dummy task to make sure we get kicked */
      		schedule_task(&dummy_task);
      
      		/* Wait for it to complete */
      		schedule();
      	}
      	remove_wait_queue(&context_task_done, &wait);
      }
      	
 149  int start_context_thread(void)
      {
      	kernel_thread(context_thread, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
 152  	return 0;
      }
      
      EXPORT_SYMBOL(schedule_task);
      EXPORT_SYMBOL(flush_scheduled_tasks);