static inline unsigned long cfs_time_shift(int seconds)
{
- return cfs_time_add(cfs_time_current(), seconds * HZ);
+ return cfs_time_add(jiffies, seconds * HZ);
}
/*
* Generic kernel stuff
*/
-static inline unsigned long cfs_time_current(void)
-{
- return jiffies;
-}
-
static inline long cfs_duration_sec(long d)
{
return d / msecs_to_jiffies(MSEC_PER_SEC);
}
-#define cfs_time_current_64 get_jiffies_64
-
static inline u64 cfs_time_add_64(u64 t, u64 d)
{
return t + d;
static inline u64 cfs_time_shift_64(int seconds)
{
- return cfs_time_add_64(cfs_time_current_64(),
+ return cfs_time_add_64(get_jiffies_64(),
seconds * HZ);
}
unsigned long *when)
{
unsigned long last_alive = 0;
- unsigned long now = cfs_time_current();
+ unsigned long now = jiffies;
rwlock_t *glock = &kiblnd_data.kib_global_lock;
struct kib_peer *peer;
unsigned long flags;
LIST_HEAD(zombies);
struct kib_fmr_pool *fpo = fmr->fmr_pool;
struct kib_fmr_poolset *fps;
- unsigned long now = cfs_time_current();
+ unsigned long now = jiffies;
struct kib_fmr_pool *tmp;
int rc;
goto again;
}
- if (time_before(cfs_time_current(), fps->fps_next_retry)) {
+ if (time_before(jiffies, fps->fps_next_retry)) {
/* someone failed recently */
spin_unlock(&fps->fps_lock);
return -EAGAIN;
LIST_HEAD(zombies);
struct kib_poolset *ps = pool->po_owner;
struct kib_pool *tmp;
- unsigned long now = cfs_time_current();
+ unsigned long now = jiffies;
spin_lock(&ps->ps_lock);
goto again;
}
- if (time_before(cfs_time_current(), ps->ps_next_retry)) {
+ if (time_before(jiffies, ps->ps_next_retry)) {
/* someone failed recently */
spin_unlock(&ps->ps_lock);
return NULL;
spin_unlock(&ps->ps_lock);
CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
- time_before = cfs_time_current();
+ time_before = jiffies;
rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete",
- cfs_time_current() - time_before);
+ jiffies - time_before);
spin_lock(&ps->ps_lock);
ps->ps_increasing = 0;
static void
kiblnd_peer_alive(struct kib_peer *peer)
{
- /* This is racy, but everyone's only writing cfs_time_current() */
- peer->ibp_last_alive = cfs_time_current();
+ /* This is racy, but everyone's only writing jiffies */
+ peer->ibp_last_alive = jiffies;
mb();
}
if (timedout) {
CERROR("Timed out RDMA with %s (%lu): c: %u, oc: %u, rc: %u\n",
libcfs_nid2str(peer->ibp_nid),
- cfs_duration_sec(cfs_time_current() -
+ cfs_duration_sec(jiffies -
peer->ibp_last_alive),
conn->ibc_credits,
conn->ibc_outstanding_credits,
list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
ibd_fail_list) {
- if (time_before(cfs_time_current(),
+ if (time_before(jiffies,
dev->ibd_next_failover))
continue;
do_failover = 1;
}
conn->ksnc_peer = peer; /* conn takes my ref on peer */
- peer->ksnp_last_alive = cfs_time_current();
+ peer->ksnp_last_alive = jiffies;
peer->ksnp_send_keepalive = 0;
peer->ksnp_error = 0;
sched->kss_nconns++;
conn->ksnc_scheduler = sched;
- conn->ksnc_tx_last_post = cfs_time_current();
+ conn->ksnc_tx_last_post = jiffies;
/* Set the deadline for the outgoing HELLO to drain */
conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
&conn->ksnc_ipaddr, conn->ksnc_port,
iov_iter_count(&conn->ksnc_rx_to), conn->ksnc_rx_nob_left,
- cfs_duration_sec(cfs_time_sub(cfs_time_current(),
- last_rcv)));
+ cfs_duration_sec(cfs_time_sub(jiffies, last_rcv)));
lnet_finalize(conn->ksnc_peer->ksnp_ni,
conn->ksnc_cookie, -EIO);
break;
{
int connect = 1;
unsigned long last_alive = 0;
- unsigned long now = cfs_time_current();
+ unsigned long now = jiffies;
struct ksock_peer *peer = NULL;
rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
struct lnet_process_id id = {
*/
conn->ksnc_tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_peer->ksnp_last_alive = jiffies;
conn->ksnc_tx_bufnob = bufnob;
mb();
}
/* received something... */
nob = rc;
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_peer->ksnp_last_alive = jiffies;
conn->ksnc_rx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
mb(); /* order with setting rx_started */
LASSERT(conn->ksnc_tx_scheduled);
list_add_tail(&conn->ksnc_tx_list,
&ksocknal_data.ksnd_enomem_conns);
- if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
+ if (!cfs_time_aftereq(cfs_time_add(jiffies,
SOCKNAL_ENOMEM_RETRY),
ksocknal_data.ksnd_reaper_waketime))
wake_up(&ksocknal_data.ksnd_reaper_waitq);
conn = (typed) ? typed : fallback;
if (conn)
- conn->ksnc_tx_last_post = cfs_time_current();
+ conn->ksnc_tx_last_post = jiffies;
return conn;
}
conn->ksnc_tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_peer->ksnp_last_alive = jiffies;
conn->ksnc_tx_bufnob = 0;
mb(); /* order with adding to tx_queue */
}
struct ksock_route *
ksocknal_find_connectable_route_locked(struct ksock_peer *peer)
{
- unsigned long now = cfs_time_current();
+ unsigned long now = jiffies;
struct list_head *tmp;
struct ksock_route *route;
int retry_later = 0;
int rc = 0;
- deadline = cfs_time_add(cfs_time_current(),
+ deadline = cfs_time_add(jiffies,
*ksocknal_tunables.ksnd_timeout * HZ);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
- if (cfs_time_aftereq(cfs_time_current(), deadline)) {
+ if (cfs_time_aftereq(jiffies, deadline)) {
rc = -ETIMEDOUT;
lnet_connect_console_error(rc, peer->ksnp_id.nid,
route->ksnr_ipaddr,
*/
route->ksnr_retry_interval =
*ksocknal_tunables.ksnd_min_reconnectms * HZ / 1000;
- route->ksnr_timeout = cfs_time_add(cfs_time_current(),
+ route->ksnr_timeout = cfs_time_add(jiffies,
route->ksnr_retry_interval);
}
(long)*ksocknal_tunables.ksnd_max_reconnectms * HZ / 1000);
LASSERT(route->ksnr_retry_interval);
- route->ksnr_timeout = cfs_time_add(cfs_time_current(),
+ route->ksnr_timeout = cfs_time_add(jiffies,
route->ksnr_retry_interval);
if (!list_empty(&peer->ksnp_tx_queue) &&
struct ksock_route *route;
unsigned long now;
- now = cfs_time_current();
+ now = jiffies;
/* connd_routes can contain both pending and ordinary routes */
list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
}
if (conn->ksnc_rx_started &&
- cfs_time_aftereq(cfs_time_current(),
+ cfs_time_aftereq(jiffies,
conn->ksnc_rx_deadline)) {
/* Timed out incomplete incoming message */
ksocknal_conn_addref(conn);
if ((!list_empty(&conn->ksnc_tx_queue) ||
conn->ksnc_sock->sk->sk_wmem_queued) &&
- cfs_time_aftereq(cfs_time_current(),
+ cfs_time_aftereq(jiffies,
conn->ksnc_tx_deadline)) {
/*
* Timed out messages queued for sending or
write_lock_bh(&ksocknal_data.ksnd_global_lock);
list_for_each_entry_safe(tx, tmp, &peer->ksnp_tx_queue, tx_list) {
- if (!cfs_time_aftereq(cfs_time_current(),
+ if (!cfs_time_aftereq(jiffies,
tx->tx_deadline))
break;
return 0;
if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
- time_before(cfs_time_current(),
+ time_before(jiffies,
cfs_time_add(peer->ksnp_last_alive,
*ksocknal_tunables.ksnd_keepalive * HZ)))
return 0;
- if (time_before(cfs_time_current(), peer->ksnp_send_keepalive))
+ if (time_before(jiffies, peer->ksnp_send_keepalive))
return 0;
/*
tx = list_entry(peer->ksnp_tx_queue.next,
struct ksock_tx, tx_list);
- if (cfs_time_aftereq(cfs_time_current(),
+ if (cfs_time_aftereq(jiffies,
tx->tx_deadline)) {
ksocknal_peer_addref(peer);
read_unlock(&ksocknal_data.ksnd_global_lock);
tx_stale = NULL;
spin_lock(&peer->ksnp_lock);
list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
- if (!cfs_time_aftereq(cfs_time_current(),
+ if (!cfs_time_aftereq(jiffies,
tx->tx_deadline))
break;
/* ignore the TX if connection is being closed */
CERROR("Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n",
n, libcfs_nid2str(peer->ksnp_id.nid), tx_stale,
- cfs_duration_sec(cfs_time_current() - deadline),
+ cfs_duration_sec(jiffies - deadline),
resid, conn->ksnc_sock->sk->sk_wmem_queued);
ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
long timeout;
int i;
int peer_index = 0;
- unsigned long deadline = cfs_time_current();
+ unsigned long deadline = jiffies;
INIT_LIST_HEAD(&enomem_conns);
init_waitqueue_entry(&wait, current);
/* careful with the jiffy wrap... */
while ((timeout = cfs_time_sub(deadline,
- cfs_time_current())) <= 0) {
+ jiffies)) <= 0) {
const int n = 4;
const int p = 1;
int chunk = ksocknal_data.ksnd_peer_hash_size;
timeout = SOCKNAL_ENOMEM_RETRY;
}
ksocknal_data.ksnd_reaper_waketime =
- cfs_time_add(cfs_time_current(), timeout);
+ cfs_time_add(jiffies, timeout);
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
if (cdls) {
if (libcfs_console_ratelimit &&
cdls->cdls_next && /* not first time ever */
- !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
+ !cfs_time_after(jiffies, cdls->cdls_next)) {
/* skipping a console message */
cdls->cdls_count++;
if (tcd)
return 1;
}
- if (cfs_time_after(cfs_time_current(),
+ if (cfs_time_after(jiffies,
cdls->cdls_next + libcfs_console_max_delay +
10 * HZ)) {
/* last timeout was a long time ago */
cdls->cdls_delay = libcfs_console_max_delay;
/* ensure cdls_next is never zero after it's been seen */
- cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
+ cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
}
if (tcd) {
ni->ni_lnd->lnd_query(ni, lp->lp_nid, &last_alive);
lnet_net_lock(lp->lp_cpt);
- lp->lp_last_query = cfs_time_current();
+ lp->lp_last_query = jiffies;
if (last_alive) /* NI has updated timestamp */
lp->lp_last_alive = last_alive;
static int
lnet_peer_alive_locked(struct lnet_peer *lp)
{
- unsigned long now = cfs_time_current();
+ unsigned long now = jiffies;
if (!lnet_peer_aliveness_enabled(lp))
return -ENODEV;
/* match this rule, check drop rate now */
spin_lock(&rule->dr_lock);
if (rule->dr_drop_time) { /* time based drop */
- unsigned long now = cfs_time_current();
+ unsigned long now = jiffies;
rule->dr_stat.fs_count++;
drop = cfs_time_aftereq(now, rule->dr_drop_time);
/* match this rule, check delay rate now */
spin_lock(&rule->dl_lock);
if (rule->dl_delay_time) { /* time based delay */
- unsigned long now = cfs_time_current();
+ unsigned long now = jiffies;
rule->dl_stat.fs_count++;
delay = cfs_time_aftereq(now, rule->dl_delay_time);
{
struct lnet_msg *msg;
struct lnet_msg *tmp;
- unsigned long now = cfs_time_current();
+ unsigned long now = jiffies;
if (!all && rule->dl_msg_send > now)
return;
lp->lp_alive_count = 0;
lp->lp_timestamp = 0;
lp->lp_alive = !lnet_peers_start_down(); /* 1 bit!! */
- lp->lp_last_alive = cfs_time_current(); /* assumes alive */
+ lp->lp_last_alive = jiffies; /* assumes alive */
lp->lp_last_query = 0; /* haven't asked NI yet */
lp->lp_ping_timestamp = 0;
lp->lp_ping_feats = LNET_PING_FEAT_INVAL;
* we ping alive routers to try to detect router death before
* apps get burned).
*/
- lnet_notify_locked(lp, 1, !event->status, cfs_time_current());
+ lnet_notify_locked(lp, 1, !event->status, jiffies);
/*
* The router checker will wake up very shortly and do the
lnet_ping_router_locked(struct lnet_peer *rtr)
{
struct lnet_rc_data *rcd = NULL;
- unsigned long now = cfs_time_current();
+ unsigned long now = jiffies;
int secs;
lnet_peer_addref_locked(rtr);
lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, int alive, unsigned long when)
{
struct lnet_peer *lp = NULL;
- unsigned long now = cfs_time_current();
+ unsigned long now = jiffies;
int cpt = lnet_cpt_of_nid(nid);
LASSERT(!in_interrupt());
if (peer) {
lnet_nid_t nid = peer->lp_nid;
- unsigned long now = cfs_time_current();
+ unsigned long now = jiffies;
unsigned long deadline = peer->lp_ping_deadline;
int nrefs = peer->lp_refcount;
int nrtrrefs = peer->lp_rtr_refcount;
aliveness = peer->lp_alive ? "up" : "down";
if (lnet_peer_aliveness_enabled(peer)) {
- unsigned long now = cfs_time_current();
+ unsigned long now = jiffies;
long delta;
delta = cfs_time_sub(now, peer->lp_last_alive);
/* not aborted */
LASSERT(!crpc->crp_status);
- crpc->crp_stamp = cfs_time_current();
+ crpc->crp_stamp = jiffies;
crpc->crp_status = rpc->crpc_status;
}
if (!crpc->crp_posted || /* not posted */
crpc->crp_stamp) { /* rpc done or aborted already */
if (!crpc->crp_stamp) {
- crpc->crp_stamp = cfs_time_current();
+ crpc->crp_stamp = jiffies;
crpc->crp_status = -EINTR;
}
spin_unlock(&rpc->crpc_lock);
continue;
}
- crpc->crp_stamp = cfs_time_current();
+ crpc->crp_stamp = jiffies;
crpc->crp_status = error;
spin_unlock(&rpc->crpc_lock);
ndl->ndl_node->nd_ref = 1;
ndl->ndl_node->nd_id = id;
- ndl->ndl_node->nd_stamp = cfs_time_current();
+ ndl->ndl_node->nd_stamp = jiffies;
ndl->ndl_node->nd_state = LST_NODE_UNKNOWN;
ndl->ndl_node->nd_timeout = 0;
memset(&ndl->ndl_node->nd_ping, 0, sizeof(struct lstcon_rpc));
LNetGetId(1, &id);
sid->ses_nid = id.nid;
- sid->ses_stamp = cfs_time_current();
+ sid->ses_stamp = jiffies;
}
int
sn->sn_id = sid;
sn->sn_features = features;
sn->sn_timeout = session_timeout;
- sn->sn_started = cfs_time_current();
+ sn->sn_started = jiffies;
timer->stt_data = sn;
timer->stt_func = sfw_session_expired;
if (req->rq_delay_limit != 0 &&
time_before(cfs_time_add(req->rq_queued_time,
req->rq_delay_limit * HZ),
- cfs_time_current())) {
+ jiffies)) {
return 1;
}
return 0;
/*
* @max_age is the oldest time in jiffies that we accept using a cached data.
* If the cache is older than @max_age we will get a new value from the
- * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness.
+ * target. Use a value of "jiffies + HZ" to guarantee freshness.
*/
static inline int obd_statfs_async(struct obd_export *exp,
struct obd_info *oinfo,
/*
* @max_age is the oldest time in jiffies that we accept using a cached data.
* If the cache is older than @max_age we will get a new value from the
- * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness.
+ * target. Use a value of "jiffies + HZ" to guarantee freshness.
*/
static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp,
struct obd_statfs *osfs, __u64 max_age,
if (rc == 0) {
spin_lock(&obd->obd_osfs_lock);
memcpy(&obd->obd_osfs, osfs, sizeof(obd->obd_osfs));
- obd->obd_osfs_age = cfs_time_current_64();
+ obd->obd_osfs_age = get_jiffies_64();
spin_unlock(&obd->obd_osfs_lock);
}
} else {
{
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- lock->l_last_used = cfs_time_current();
+ lock->l_last_used = jiffies;
LASSERT(list_empty(&lock->l_lru));
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
list_add_tail(&lock->l_lru, &ns->ns_unused_list);
lock_res_and_lock(lock);
if (lock->l_granted_mode == LCK_PW &&
!lock->l_readers && !lock->l_writers &&
- cfs_time_after(cfs_time_current(),
+ cfs_time_after(jiffies,
cfs_time_add(lock->l_last_used,
10 * HZ))) {
unlock_res_and_lock(lock);
(s64)lock->l_last_activity,
(s64)(ktime_get_real_seconds() -
lock->l_last_activity));
- if (cfs_time_after(cfs_time_current(), next_dump)) {
+ if (cfs_time_after(jiffies, next_dump)) {
last_dump = next_dump;
next_dump = cfs_time_shift(300);
ldlm_namespace_dump(D_DLMTRACE,
int unused, int added,
int count)
{
- unsigned long cur = cfs_time_current();
+ unsigned long cur = jiffies;
struct ldlm_pool *pl = &ns->ns_pool;
__u64 slv, lvf, lv;
unsigned long la;
* Despite of the LV, It doesn't make sense to keep the lock which
* is unused for ns_max_age time.
*/
- if (cfs_time_after(cfs_time_current(),
+ if (cfs_time_after(jiffies,
cfs_time_add(lock->l_last_used, ns->ns_max_age)))
return LDLM_POLICY_CANCEL_LOCK;
int count)
{
if ((added >= count) &&
- time_before(cfs_time_current(),
+ time_before(jiffies,
cfs_time_add(lock->l_last_used, ns->ns_max_age)))
return LDLM_POLICY_KEEP_LOCK;
continue;
last_use = lock->l_last_used;
- if (last_use == cfs_time_current())
+ if (last_use == jiffies)
continue;
/* Somebody is already doing CANCEL. No need for this
CDEBUG(level, "--- Namespace: %s (rc: %d, side: client)\n",
ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
- if (time_before(cfs_time_current(), ns->ns_next_dump))
+ if (time_before(jiffies, ns->ns_next_dump))
return;
cfs_hash_for_each_nolock(ns->ns_rs_hash,
down_read(&lli->lli_glimpse_sem);
rc = cl_glimpse_size(inode);
- lli->lli_glimpse_time = cfs_time_current();
+ lli->lli_glimpse_time = jiffies;
up_read(&lli->lli_glimpse_sem);
return rc;
}
cl_agl(inode);
lli->lli_agl_index = 0;
- lli->lli_glimpse_time = cfs_time_current();
+ lli->lli_glimpse_time = jiffies;
up_write(&lli->lli_glimpse_sem);
CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
spin_lock(&obd->obd_osfs_lock);
memcpy(&obd->obd_osfs, osfs, sizeof(*osfs));
- obd->obd_osfs_age = cfs_time_current_64();
+ obd->obd_osfs_age = get_jiffies_64();
spin_unlock(&obd->obd_osfs_lock);
return 0;
}
spin_lock(&tgtobd->obd_osfs_lock);
memcpy(&tgtobd->obd_osfs, lov_sfs, sizeof(*lov_sfs));
if ((oinfo->oi_flags & OBD_STATFS_FROM_CACHE) == 0)
- tgtobd->obd_osfs_age = cfs_time_current_64();
+ tgtobd->obd_osfs_age = get_jiffies_64();
spin_unlock(&tgtobd->obd_osfs_lock);
out_update:
result = cl_page_make_ready(env, page, CRT_WRITE);
if (result == 0)
- opg->ops_submit_time = cfs_time_current();
+ opg->ops_submit_time = jiffies;
return result;
}
void osc_object_set_contended(struct osc_object *obj)
{
- obj->oo_contention_time = cfs_time_current();
+ obj->oo_contention_time = jiffies;
/* mb(); */
obj->oo_contended = 1;
}
{
struct osc_device *dev = lu2osc_dev(obj->oo_cl.co_lu.lo_dev);
int osc_contention_time = dev->od_contention_time;
- unsigned long cur_time = cfs_time_current();
+ unsigned long cur_time = jiffies;
unsigned long retry_time;
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_OBJECT_CONTENTION))
if (opg->ops_submit_time == 0)
return 0;
- return (cfs_time_current() - opg->ops_submit_time);
+ return (jiffies - opg->ops_submit_time);
}
static int osc_page_print(const struct lu_env *env,
oap->oap_cmd |= OBD_BRW_NOQUOTA;
}
- opg->ops_submit_time = cfs_time_current();
+ opg->ops_submit_time = jiffies;
osc_page_transfer_get(opg, "transfer\0imm");
osc_page_transfer_add(env, opg, crt);
}
static int osc_should_shrink_grant(struct client_obd *client)
{
- unsigned long time = cfs_time_current();
+ unsigned long time = jiffies;
unsigned long next_shrink = client->cl_next_shrink_grant;
if ((client->cl_import->imp_connect_data.ocd_connect_flags &
list_add_tail(&req->rq_set_chain, &set->set_requests);
req->rq_set = set;
atomic_inc(&set->set_remaining);
- req->rq_queued_time = cfs_time_current();
+ req->rq_queued_time = jiffies;
if (req->rq_reqmsg)
lustre_msg_set_jobid(req->rq_reqmsg, NULL);
spin_lock(&set->set_new_req_lock);
/* The set takes over the caller's request reference. */
req->rq_set = set;
- req->rq_queued_time = cfs_time_current();
+ req->rq_queued_time = jiffies;
list_add_tail(&req->rq_set_chain, &set->set_new_requests);
count = atomic_inc_return(&set->set_new_count);
spin_unlock(&set->set_new_req_lock);
imp->imp_obd->obd_name, at_get(at));
}
- imp_conn->oic_last_attempt = cfs_time_current_64();
+ imp_conn->oic_last_attempt = get_jiffies_64();
/* switch connection, don't mind if it's same as the current one */
ptlrpc_connection_put(imp->imp_connection);
mutex_unlock(&pinger_mutex);
return cfs_time_sub(cfs_time_add(time, timeout * HZ),
- cfs_time_current());
+ jiffies);
}
static bool ir_up;
static void ptlrpc_pinger_main(struct work_struct *ws)
{
- unsigned long this_ping = cfs_time_current();
+ unsigned long this_ping = jiffies;
long time_to_next_wake;
struct timeout_item *item;
struct obd_import *imp;
svcpt = from_timer(svcpt, t, scp_at_timer);
svcpt->scp_at_check = 1;
- svcpt->scp_at_checktime = cfs_time_current();
+ svcpt->scp_at_checktime = jiffies;
wake_up(&svcpt->scp_waitq);
}
spin_unlock(&svcpt->scp_at_lock);
return;
}
- delay = cfs_time_sub(cfs_time_current(), svcpt->scp_at_checktime);
+ delay = cfs_time_sub(jiffies, svcpt->scp_at_checktime);
svcpt->scp_at_check = 0;
if (array->paa_count == 0) {