2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/export.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/smpboot.h>
27 #include <linux/tick.h>
28 #include <linux/irq.h>
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/irq.h>
34 - No shared variables, all the data are CPU local.
35 - If a softirq needs serialization, let it serialize itself
37 - Even if softirq is serialized, only local cpu is marked for
38 execution. Hence, we get something sort of weak cpu binding.
39 Though it is still not clear, will it result in better locality
43 - NET RX softirq. It is multithreaded and does not require
44 any global serialization.
45 - NET TX softirq. It kicks software netdevice queues, hence
46 it is logically serialized per device, but this serialization
47 is invisible to common code.
48 - Tasklets: serialized wrt itself.
51 #ifndef __ARCH_IRQ_STAT
52 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
53 EXPORT_SYMBOL(irq_stat);
56 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
58 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
61 * active_softirqs -- per cpu, a mask of softirqs that are being handled,
62 * with the expectation that approximate answers are acceptable and therefore
65 DEFINE_PER_CPU(__u32, active_softirqs);
67 const char * const softirq_to_name[NR_SOFTIRQS] = {
68 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
69 "TASKLET", "SCHED", "HRTIMER", "RCU"
73 * we cannot loop indefinitely here to avoid userspace starvation,
74 * but we also don't want to introduce a worst case 1/HZ latency
75 * to the pending events, so lets the scheduler to balance
76 * the softirq load for us.
78 static void wakeup_softirqd(void)
80 /* Interrupts are disabled: no need to stop preemption */
81 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
83 if (tsk && tsk->state != TASK_RUNNING)
88 * preempt_count and SOFTIRQ_OFFSET usage:
89 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
91 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
92 * on local_bh_disable or local_bh_enable.
93 * This lets us distinguish between whether we are currently processing
94 * softirq and whether we just have bh disabled.
98 * This one is for softirq.c-internal use,
99 * where hardirqs are disabled legitimately:
101 #ifdef CONFIG_TRACE_IRQFLAGS
102 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
106 WARN_ON_ONCE(in_irq());
108 raw_local_irq_save(flags);
110 * The preempt tracer hooks into preempt_count_add and will break
111 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
112 * is set and before current->softirq_enabled is cleared.
113 * We must manually increment preempt_count here and manually
114 * call the trace_preempt_off later.
116 __preempt_count_add(cnt);
118 * Were softirqs turned off above:
120 if (softirq_count() == (cnt & SOFTIRQ_MASK))
121 trace_softirqs_off(ip);
122 raw_local_irq_restore(flags);
124 if (preempt_count() == cnt) {
125 #ifdef CONFIG_DEBUG_PREEMPT
126 current->preempt_disable_ip = get_parent_ip(CALLER_ADDR1);
128 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
131 EXPORT_SYMBOL(__local_bh_disable_ip);
132 #endif /* CONFIG_TRACE_IRQFLAGS */
134 static void __local_bh_enable(unsigned int cnt)
136 WARN_ON_ONCE(!irqs_disabled());
138 if (softirq_count() == (cnt & SOFTIRQ_MASK))
139 trace_softirqs_on(_RET_IP_);
140 preempt_count_sub(cnt);
144 * Special-case - softirqs can safely be enabled in
145 * cond_resched_softirq(), or by __do_softirq(),
146 * without processing still-pending softirqs:
148 void _local_bh_enable(void)
150 WARN_ON_ONCE(in_irq());
151 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
153 EXPORT_SYMBOL(_local_bh_enable);
155 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
157 WARN_ON_ONCE(in_irq() || irqs_disabled());
158 #ifdef CONFIG_TRACE_IRQFLAGS
162 * Are softirqs going to be turned on now:
164 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
165 trace_softirqs_on(ip);
167 * Keep preemption disabled until we are done with
168 * softirq processing:
170 preempt_count_sub(cnt - 1);
172 if (unlikely(!in_interrupt() && local_softirq_pending())) {
174 * Run softirq if any pending. And do it in its own stack
175 * as we may be calling this deep in a task call stack already.
181 #ifdef CONFIG_TRACE_IRQFLAGS
184 preempt_check_resched();
186 EXPORT_SYMBOL(__local_bh_enable_ip);
189 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
190 * but break the loop if need_resched() is set or after 2 ms.
191 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
192 * certain cases, such as stop_machine(), jiffies may cease to
193 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
194 * well to make sure we eventually return from this method.
196 * These limits have been established via experimentation.
197 * The two things to balance is latency against fairness -
198 * we want to handle softirqs as soon as possible, but they
199 * should not be able to lock up the box.
201 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
202 #define MAX_SOFTIRQ_RESTART 10
204 #ifdef CONFIG_TRACE_IRQFLAGS
206 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
207 * to keep the lockdep irq context tracking as tight as possible in order to
208 * not miss-qualify lock contexts and miss possible deadlocks.
211 static inline bool lockdep_softirq_start(void)
213 bool in_hardirq = false;
215 if (trace_hardirq_context(current)) {
217 trace_hardirq_exit();
220 lockdep_softirq_enter();
225 static inline void lockdep_softirq_end(bool in_hardirq)
227 lockdep_softirq_exit();
230 trace_hardirq_enter();
233 static inline bool lockdep_softirq_start(void) { return false; }
234 static inline void lockdep_softirq_end(bool in_hardirq) { }
237 #define softirq_deferred_for_rt(pending) \
239 __u32 deferred = 0; \
240 if (cpupri_check_rt()) { \
241 deferred = pending & LONG_SOFTIRQ_MASK; \
242 pending &= ~LONG_SOFTIRQ_MASK; \
247 asmlinkage __visible void __softirq_entry __do_softirq(void)
249 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
250 unsigned long old_flags = current->flags;
251 int max_restart = MAX_SOFTIRQ_RESTART;
252 struct softirq_action *h;
259 * Mask out PF_MEMALLOC s current task context is borrowed for the
260 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
261 * again if the socket is related to swap
263 current->flags &= ~PF_MEMALLOC;
265 pending = local_softirq_pending();
266 deferred = softirq_deferred_for_rt(pending);
267 account_irq_enter_time(current);
268 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
269 in_hardirq = lockdep_softirq_start();
272 /* Reset the pending bitmask before enabling irqs */
273 set_softirq_pending(deferred);
274 __this_cpu_write(active_softirqs, pending);
280 while ((softirq_bit = ffs(pending))) {
284 h += softirq_bit - 1;
286 vec_nr = h - softirq_vec;
287 prev_count = preempt_count();
289 kstat_incr_softirqs_this_cpu(vec_nr);
291 trace_softirq_entry(vec_nr);
293 trace_softirq_exit(vec_nr);
294 if (unlikely(prev_count != preempt_count())) {
295 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
296 vec_nr, softirq_to_name[vec_nr], h->action,
297 prev_count, preempt_count());
298 preempt_count_set(prev_count);
301 pending >>= softirq_bit;
304 __this_cpu_write(active_softirqs, 0);
308 pending = local_softirq_pending();
309 deferred = softirq_deferred_for_rt(pending);
312 if (time_before(jiffies, end) && !need_resched() &&
317 if (pending | deferred)
319 lockdep_softirq_end(in_hardirq);
320 account_irq_exit_time(current);
321 __local_bh_enable(SOFTIRQ_OFFSET);
322 WARN_ON_ONCE(in_interrupt());
323 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
326 asmlinkage __visible void do_softirq(void)
334 local_irq_save(flags);
336 pending = local_softirq_pending();
339 do_softirq_own_stack();
341 local_irq_restore(flags);
345 * Enter an interrupt context.
350 if (is_idle_task(current) && !in_interrupt()) {
352 * Prevent raise_softirq from needlessly waking up ksoftirqd
353 * here, as softirq will be serviced on return from interrupt.
363 static inline void invoke_softirq(void)
365 if (!force_irqthreads) {
366 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
368 * We can safely execute softirq on the current stack if
369 * it is the irq stack, because it should be near empty
375 * Otherwise, irq_exit() is called on the task stack that can
376 * be potentially deep already. So call softirq in its own stack
377 * to prevent from any overrun.
379 do_softirq_own_stack();
386 static inline void tick_irq_exit(void)
388 #ifdef CONFIG_NO_HZ_COMMON
389 int cpu = smp_processor_id();
391 /* Make sure that timer wheel updates are propagated */
392 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
394 tick_nohz_irq_exit();
400 * Exit an interrupt context. Process softirqs if needed and possible:
404 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
407 WARN_ON_ONCE(!irqs_disabled());
410 account_irq_exit_time(current);
411 preempt_count_sub(HARDIRQ_OFFSET);
412 if (!in_interrupt() && local_softirq_pending())
417 trace_hardirq_exit(); /* must be last! */
421 * This function must run with irqs disabled!
423 inline void raise_softirq_irqoff(unsigned int nr)
425 __raise_softirq_irqoff(nr);
428 * If we're in an interrupt or softirq, we're done
429 * (this also catches softirq-disabled code). We will
430 * actually run the softirq once we return from
431 * the irq or softirq.
433 * Otherwise we wake up ksoftirqd to make sure we
434 * schedule the softirq soon.
440 void raise_softirq(unsigned int nr)
444 local_irq_save(flags);
445 raise_softirq_irqoff(nr);
446 local_irq_restore(flags);
449 void __raise_softirq_irqoff(unsigned int nr)
451 trace_softirq_raise(nr);
452 or_softirq_pending(1UL << nr);
455 void open_softirq(int nr, void (*action)(struct softirq_action *))
457 softirq_vec[nr].action = action;
463 struct tasklet_head {
464 struct tasklet_struct *head;
465 struct tasklet_struct **tail;
468 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
469 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
471 void __tasklet_schedule(struct tasklet_struct *t)
475 local_irq_save(flags);
477 *__this_cpu_read(tasklet_vec.tail) = t;
478 __this_cpu_write(tasklet_vec.tail, &(t->next));
479 raise_softirq_irqoff(TASKLET_SOFTIRQ);
480 local_irq_restore(flags);
482 EXPORT_SYMBOL(__tasklet_schedule);
484 void __tasklet_hi_schedule(struct tasklet_struct *t)
488 local_irq_save(flags);
490 *__this_cpu_read(tasklet_hi_vec.tail) = t;
491 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
492 raise_softirq_irqoff(HI_SOFTIRQ);
493 local_irq_restore(flags);
495 EXPORT_SYMBOL(__tasklet_hi_schedule);
497 void __tasklet_hi_schedule_first(struct tasklet_struct *t)
499 BUG_ON(!irqs_disabled());
501 t->next = __this_cpu_read(tasklet_hi_vec.head);
502 __this_cpu_write(tasklet_hi_vec.head, t);
503 __raise_softirq_irqoff(HI_SOFTIRQ);
505 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
507 static void tasklet_action(struct softirq_action *a)
509 struct tasklet_struct *list;
512 list = __this_cpu_read(tasklet_vec.head);
513 __this_cpu_write(tasklet_vec.head, NULL);
514 __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
518 struct tasklet_struct *t = list;
522 if (tasklet_trylock(t)) {
523 if (!atomic_read(&t->count)) {
524 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
536 *__this_cpu_read(tasklet_vec.tail) = t;
537 __this_cpu_write(tasklet_vec.tail, &(t->next));
538 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
543 static void tasklet_hi_action(struct softirq_action *a)
545 struct tasklet_struct *list;
548 list = __this_cpu_read(tasklet_hi_vec.head);
549 __this_cpu_write(tasklet_hi_vec.head, NULL);
550 __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
554 struct tasklet_struct *t = list;
558 if (tasklet_trylock(t)) {
559 if (!atomic_read(&t->count)) {
560 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
572 *__this_cpu_read(tasklet_hi_vec.tail) = t;
573 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
574 __raise_softirq_irqoff(HI_SOFTIRQ);
579 void tasklet_init(struct tasklet_struct *t,
580 void (*func)(unsigned long), unsigned long data)
584 atomic_set(&t->count, 0);
588 EXPORT_SYMBOL(tasklet_init);
590 void tasklet_kill(struct tasklet_struct *t)
593 pr_notice("Attempt to kill tasklet from interrupt\n");
595 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
598 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
600 tasklet_unlock_wait(t);
601 clear_bit(TASKLET_STATE_SCHED, &t->state);
603 EXPORT_SYMBOL(tasklet_kill);
610 * The trampoline is called when the hrtimer expires. It schedules a tasklet
611 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
612 * hrtimer callback, but from softirq context.
614 static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
616 struct tasklet_hrtimer *ttimer =
617 container_of(timer, struct tasklet_hrtimer, timer);
619 tasklet_hi_schedule(&ttimer->tasklet);
620 return HRTIMER_NORESTART;
624 * Helper function which calls the hrtimer callback from
625 * tasklet/softirq context
627 static void __tasklet_hrtimer_trampoline(unsigned long data)
629 struct tasklet_hrtimer *ttimer = (void *)data;
630 enum hrtimer_restart restart;
632 restart = ttimer->function(&ttimer->timer);
633 if (restart != HRTIMER_NORESTART)
634 hrtimer_restart(&ttimer->timer);
638 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
639 * @ttimer: tasklet_hrtimer which is initialized
640 * @function: hrtimer callback function which gets called from softirq context
641 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
642 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
644 void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
645 enum hrtimer_restart (*function)(struct hrtimer *),
646 clockid_t which_clock, enum hrtimer_mode mode)
648 hrtimer_init(&ttimer->timer, which_clock, mode);
649 ttimer->timer.function = __hrtimer_tasklet_trampoline;
650 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
651 (unsigned long)ttimer);
652 ttimer->function = function;
654 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
656 void __init softirq_init(void)
660 for_each_possible_cpu(cpu) {
661 per_cpu(tasklet_vec, cpu).tail =
662 &per_cpu(tasklet_vec, cpu).head;
663 per_cpu(tasklet_hi_vec, cpu).tail =
664 &per_cpu(tasklet_hi_vec, cpu).head;
667 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
668 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
671 static int ksoftirqd_should_run(unsigned int cpu)
673 return local_softirq_pending();
676 static void run_ksoftirqd(unsigned int cpu)
679 if (local_softirq_pending()) {
681 * We can safely run softirq on inline stack, as we are not deep
682 * in the task stack here.
686 cond_resched_rcu_qs();
692 #ifdef CONFIG_HOTPLUG_CPU
694 * tasklet_kill_immediate is called to remove a tasklet which can already be
695 * scheduled for execution on @cpu.
697 * Unlike tasklet_kill, this function removes the tasklet
698 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
700 * When this function is called, @cpu must be in the CPU_DEAD state.
702 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
704 struct tasklet_struct **i;
706 BUG_ON(cpu_online(cpu));
707 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
709 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
712 /* CPU is dead, so no lock needed. */
713 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
716 /* If this was the tail element, move the tail ptr */
718 per_cpu(tasklet_vec, cpu).tail = i;
725 static void takeover_tasklets(unsigned int cpu)
727 /* CPU is dead, so no lock needed. */
730 /* Find end, append list for that CPU. */
731 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
732 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
733 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
734 per_cpu(tasklet_vec, cpu).head = NULL;
735 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
737 raise_softirq_irqoff(TASKLET_SOFTIRQ);
739 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
740 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
741 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
742 per_cpu(tasklet_hi_vec, cpu).head = NULL;
743 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
745 raise_softirq_irqoff(HI_SOFTIRQ);
749 #endif /* CONFIG_HOTPLUG_CPU */
751 static int cpu_callback(struct notifier_block *nfb, unsigned long action,
755 #ifdef CONFIG_HOTPLUG_CPU
757 case CPU_DEAD_FROZEN:
758 takeover_tasklets((unsigned long)hcpu);
760 #endif /* CONFIG_HOTPLUG_CPU */
765 static struct notifier_block cpu_nfb = {
766 .notifier_call = cpu_callback
769 static struct smp_hotplug_thread softirq_threads = {
771 .thread_should_run = ksoftirqd_should_run,
772 .thread_fn = run_ksoftirqd,
773 .thread_comm = "ksoftirqd/%u",
776 static __init int spawn_ksoftirqd(void)
778 register_cpu_notifier(&cpu_nfb);
780 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
784 early_initcall(spawn_ksoftirqd);
787 * [ These __weak aliases are kept in a separate compilation unit, so that
788 * GCC does not inline them incorrectly. ]
791 int __init __weak early_irq_init(void)
796 int __init __weak arch_probe_nr_irqs(void)
798 return NR_IRQS_LEGACY;
801 int __init __weak arch_early_irq_init(void)
806 unsigned int __weak arch_dynirq_lower_bound(unsigned int from)