We have never been able to track down and address the underlying
cause of the performance issues with workqueue-based service
support. svo_enqueue_xprt is called multiple times per RPC, so
it adds instruction path length, but always ends up at the same
function: svc_xprt_do_enqueue(). We do not anticipate needing
this flexibility for dynamic nfsd thread management support.
As a micro-optimization, remove .svo_enqueue_xprt because
Spectre/Meltdown makes virtual function calls more costly.
This change essentially reverts commit
b9e13cdfac70 ("nfsd/sunrpc:
turn enqueueing a svc_xprt into a svc_serv operation").
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
static const struct svc_serv_ops lockd_sv_ops = {
.svo_shutdown = svc_rpcb_cleanup,
.svo_function = lockd,
- .svo_enqueue_xprt = svc_xprt_do_enqueue,
.svo_module = THIS_MODULE,
};
static const struct svc_serv_ops nfs40_cb_sv_ops = {
.svo_function = nfs4_callback_svc,
- .svo_enqueue_xprt = svc_xprt_do_enqueue,
.svo_module = THIS_MODULE,
};
#if defined(CONFIG_NFS_V4_1)
static const struct svc_serv_ops nfs41_cb_sv_ops = {
.svo_function = nfs41_callback_svc,
- .svo_enqueue_xprt = svc_xprt_do_enqueue,
.svo_module = THIS_MODULE,
};
static const struct svc_serv_ops nfsd_thread_sv_ops = {
.svo_shutdown = nfsd_last_thread,
.svo_function = nfsd,
- .svo_enqueue_xprt = svc_xprt_do_enqueue,
.svo_module = THIS_MODULE,
};
/* function for service threads to run */
int (*svo_function)(void *);
- /* queue up a transport for servicing */
- void (*svo_enqueue_xprt)(struct svc_xprt *);
-
/* optional module to count when adding threads.
* Thread function must call module_put_and_kthread_exit() to exit.
*/
const int, const unsigned short, int,
const struct cred *);
void svc_xprt_received(struct svc_xprt *xprt);
-void svc_xprt_do_enqueue(struct svc_xprt *xprt);
void svc_xprt_enqueue(struct svc_xprt *xprt);
void svc_xprt_put(struct svc_xprt *xprt);
void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt);
static struct cache_deferred_req *svc_defer(struct cache_req *req);
static void svc_age_temp_xprts(struct timer_list *t);
static void svc_delete_xprt(struct svc_xprt *xprt);
+static void svc_xprt_do_enqueue(struct svc_xprt *xprt);
/* apparently the "standard" is that clients close
* idle connections after 5 minutes, servers after
}
/* As soon as we clear busy, the xprt could be closed and
- * 'put', so we need a reference to call svc_enqueue_xprt with:
+ * 'put', so we need a reference to call svc_xprt_do_enqueue with:
*/
svc_xprt_get(xprt);
smp_mb__before_atomic();
clear_bit(XPT_BUSY, &xprt->xpt_flags);
- xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
+ svc_xprt_do_enqueue(xprt);
svc_xprt_put(xprt);
}
EXPORT_SYMBOL_GPL(svc_xprt_received);
return false;
}
-void svc_xprt_do_enqueue(struct svc_xprt *xprt)
+static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
{
struct svc_pool *pool;
struct svc_rqst *rqstp = NULL;
put_cpu();
trace_svc_xprt_enqueue(xprt, rqstp);
}
-EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue);
/*
* Queue up a transport with data pending. If there are idle nfsd
{
if (test_bit(XPT_BUSY, &xprt->xpt_flags))
return;
- xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
+ svc_xprt_do_enqueue(xprt);
}
EXPORT_SYMBOL_GPL(svc_xprt_enqueue);