From: Eric Dumazet Date: Wed, 29 Oct 2014 01:05:13 +0000 (-0700) Subject: net: introduce napi_schedule_irqoff() X-Git-Tag: v3.19-rc1~118^2~321 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=bc9ad166e38ae1cdcb5323a8aa45dff834d68bfa;p=uclinux-h8%2Flinux.git net: introduce napi_schedule_irqoff() napi_schedule() can be called from any context and has to mask hard irqs. Add a variant that can only be called from hard interrupts handlers or when irqs are already masked. Many NIC drivers can use it from their hard IRQ handler instead of generic variant. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 74fd5d37f15a..c85e06512246 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -386,6 +386,7 @@ typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); void __napi_schedule(struct napi_struct *n); +void __napi_schedule_irqoff(struct napi_struct *n); static inline bool napi_disable_pending(struct napi_struct *n) { @@ -420,6 +421,18 @@ static inline void napi_schedule(struct napi_struct *n) __napi_schedule(n); } +/** + * napi_schedule_irqoff - schedule NAPI poll + * @n: napi context + * + * Variant of napi_schedule(), assuming hard irqs are masked. + */ +static inline void napi_schedule_irqoff(struct napi_struct *n) +{ + if (napi_schedule_prep(n)) + __napi_schedule_irqoff(n); +} + /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ static inline bool napi_reschedule(struct napi_struct *napi) { diff --git a/net/core/dev.c b/net/core/dev.c index b793e3521a36..759940cdf896 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4372,7 +4372,8 @@ static int process_backlog(struct napi_struct *napi, int quota) * __napi_schedule - schedule for receive * @n: entry to schedule * - * The entry's receive function will be scheduled to run + * The entry's receive function will be scheduled to run. + * Consider using __napi_schedule_irqoff() if hard irqs are masked. */ void __napi_schedule(struct napi_struct *n) { @@ -4384,6 +4385,18 @@ void __napi_schedule(struct napi_struct *n) } EXPORT_SYMBOL(__napi_schedule); +/** + * __napi_schedule_irqoff - schedule for receive + * @n: entry to schedule + * + * Variant of __napi_schedule() assuming hard irqs are masked + */ +void __napi_schedule_irqoff(struct napi_struct *n) +{ + ____napi_schedule(this_cpu_ptr(&softnet_data), n); +} +EXPORT_SYMBOL(__napi_schedule_irqoff); + void __napi_complete(struct napi_struct *n) { BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));