提交 4671dc9f 编写于 作者: N NeilBrown 提交者: Greg Kroah-Hartman

staging: lustre: cfs_time_current() -> jiffies.

Discard cfs_time_current() and cfs_time_current64()
and use jiffies and get_jiffies_64() like the rest of the kernel.
Signed-off-by: NNeilBrown <neilb@suse.com>
Reviewed-by: NJames Simmons <jsimmons@infradead.org>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 3d2ec9dc
...@@ -62,7 +62,7 @@ static inline int cfs_time_aftereq(unsigned long t1, unsigned long t2) ...@@ -62,7 +62,7 @@ static inline int cfs_time_aftereq(unsigned long t1, unsigned long t2)
static inline unsigned long cfs_time_shift(int seconds) static inline unsigned long cfs_time_shift(int seconds)
{ {
return cfs_time_add(cfs_time_current(), seconds * HZ); return cfs_time_add(jiffies, seconds * HZ);
} }
/* /*
......
...@@ -60,18 +60,11 @@ ...@@ -60,18 +60,11 @@
* Generic kernel stuff * Generic kernel stuff
*/ */
static inline unsigned long cfs_time_current(void)
{
return jiffies;
}
static inline long cfs_duration_sec(long d) static inline long cfs_duration_sec(long d)
{ {
return d / msecs_to_jiffies(MSEC_PER_SEC); return d / msecs_to_jiffies(MSEC_PER_SEC);
} }
#define cfs_time_current_64 get_jiffies_64
static inline u64 cfs_time_add_64(u64 t, u64 d) static inline u64 cfs_time_add_64(u64 t, u64 d)
{ {
return t + d; return t + d;
...@@ -79,7 +72,7 @@ static inline u64 cfs_time_add_64(u64 t, u64 d) ...@@ -79,7 +72,7 @@ static inline u64 cfs_time_add_64(u64 t, u64 d)
static inline u64 cfs_time_shift_64(int seconds) static inline u64 cfs_time_shift_64(int seconds)
{ {
return cfs_time_add_64(cfs_time_current_64(), return cfs_time_add_64(get_jiffies_64(),
seconds * HZ); seconds * HZ);
} }
......
...@@ -1043,7 +1043,7 @@ static void kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, ...@@ -1043,7 +1043,7 @@ static void kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid,
unsigned long *when) unsigned long *when)
{ {
unsigned long last_alive = 0; unsigned long last_alive = 0;
unsigned long now = cfs_time_current(); unsigned long now = jiffies;
rwlock_t *glock = &kiblnd_data.kib_global_lock; rwlock_t *glock = &kiblnd_data.kib_global_lock;
struct kib_peer *peer; struct kib_peer *peer;
unsigned long flags; unsigned long flags;
...@@ -1552,7 +1552,7 @@ void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status) ...@@ -1552,7 +1552,7 @@ void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status)
LIST_HEAD(zombies); LIST_HEAD(zombies);
struct kib_fmr_pool *fpo = fmr->fmr_pool; struct kib_fmr_pool *fpo = fmr->fmr_pool;
struct kib_fmr_poolset *fps; struct kib_fmr_poolset *fps;
unsigned long now = cfs_time_current(); unsigned long now = jiffies;
struct kib_fmr_pool *tmp; struct kib_fmr_pool *tmp;
int rc; int rc;
...@@ -1726,7 +1726,7 @@ int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx, ...@@ -1726,7 +1726,7 @@ int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
goto again; goto again;
} }
if (time_before(cfs_time_current(), fps->fps_next_retry)) { if (time_before(jiffies, fps->fps_next_retry)) {
/* someone failed recently */ /* someone failed recently */
spin_unlock(&fps->fps_lock); spin_unlock(&fps->fps_lock);
return -EAGAIN; return -EAGAIN;
...@@ -1858,7 +1858,7 @@ void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node) ...@@ -1858,7 +1858,7 @@ void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node)
LIST_HEAD(zombies); LIST_HEAD(zombies);
struct kib_poolset *ps = pool->po_owner; struct kib_poolset *ps = pool->po_owner;
struct kib_pool *tmp; struct kib_pool *tmp;
unsigned long now = cfs_time_current(); unsigned long now = jiffies;
spin_lock(&ps->ps_lock); spin_lock(&ps->ps_lock);
...@@ -1927,7 +1927,7 @@ struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps) ...@@ -1927,7 +1927,7 @@ struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps)
goto again; goto again;
} }
if (time_before(cfs_time_current(), ps->ps_next_retry)) { if (time_before(jiffies, ps->ps_next_retry)) {
/* someone failed recently */ /* someone failed recently */
spin_unlock(&ps->ps_lock); spin_unlock(&ps->ps_lock);
return NULL; return NULL;
...@@ -1937,10 +1937,10 @@ struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps) ...@@ -1937,10 +1937,10 @@ struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps)
spin_unlock(&ps->ps_lock); spin_unlock(&ps->ps_lock);
CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name); CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
time_before = cfs_time_current(); time_before = jiffies;
rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool); rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete", CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete",
cfs_time_current() - time_before); jiffies - time_before);
spin_lock(&ps->ps_lock); spin_lock(&ps->ps_lock);
ps->ps_increasing = 0; ps->ps_increasing = 0;
......
...@@ -1853,8 +1853,8 @@ kiblnd_thread_fini(void) ...@@ -1853,8 +1853,8 @@ kiblnd_thread_fini(void)
static void static void
kiblnd_peer_alive(struct kib_peer *peer) kiblnd_peer_alive(struct kib_peer *peer)
{ {
/* This is racy, but everyone's only writing cfs_time_current() */ /* This is racy, but everyone's only writing jiffies */
peer->ibp_last_alive = cfs_time_current(); peer->ibp_last_alive = jiffies;
mb(); mb();
} }
...@@ -3206,7 +3206,7 @@ kiblnd_check_conns(int idx) ...@@ -3206,7 +3206,7 @@ kiblnd_check_conns(int idx)
if (timedout) { if (timedout) {
CERROR("Timed out RDMA with %s (%lu): c: %u, oc: %u, rc: %u\n", CERROR("Timed out RDMA with %s (%lu): c: %u, oc: %u, rc: %u\n",
libcfs_nid2str(peer->ibp_nid), libcfs_nid2str(peer->ibp_nid),
cfs_duration_sec(cfs_time_current() - cfs_duration_sec(jiffies -
peer->ibp_last_alive), peer->ibp_last_alive),
conn->ibc_credits, conn->ibc_credits,
conn->ibc_outstanding_credits, conn->ibc_outstanding_credits,
...@@ -3681,7 +3681,7 @@ kiblnd_failover_thread(void *arg) ...@@ -3681,7 +3681,7 @@ kiblnd_failover_thread(void *arg)
list_for_each_entry(dev, &kiblnd_data.kib_failed_devs, list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
ibd_fail_list) { ibd_fail_list) {
if (time_before(cfs_time_current(), if (time_before(jiffies,
dev->ibd_next_failover)) dev->ibd_next_failover))
continue; continue;
do_failover = 1; do_failover = 1;
......
...@@ -1276,7 +1276,7 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route, ...@@ -1276,7 +1276,7 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
} }
conn->ksnc_peer = peer; /* conn takes my ref on peer */ conn->ksnc_peer = peer; /* conn takes my ref on peer */
peer->ksnp_last_alive = cfs_time_current(); peer->ksnp_last_alive = jiffies;
peer->ksnp_send_keepalive = 0; peer->ksnp_send_keepalive = 0;
peer->ksnp_error = 0; peer->ksnp_error = 0;
...@@ -1284,7 +1284,7 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route, ...@@ -1284,7 +1284,7 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
sched->kss_nconns++; sched->kss_nconns++;
conn->ksnc_scheduler = sched; conn->ksnc_scheduler = sched;
conn->ksnc_tx_last_post = cfs_time_current(); conn->ksnc_tx_last_post = jiffies;
/* Set the deadline for the outgoing HELLO to drain */ /* Set the deadline for the outgoing HELLO to drain */
conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued; conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
...@@ -1682,8 +1682,7 @@ ksocknal_destroy_conn(struct ksock_conn *conn) ...@@ -1682,8 +1682,7 @@ ksocknal_destroy_conn(struct ksock_conn *conn)
libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type, libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
&conn->ksnc_ipaddr, conn->ksnc_port, &conn->ksnc_ipaddr, conn->ksnc_port,
iov_iter_count(&conn->ksnc_rx_to), conn->ksnc_rx_nob_left, iov_iter_count(&conn->ksnc_rx_to), conn->ksnc_rx_nob_left,
cfs_duration_sec(cfs_time_sub(cfs_time_current(), cfs_duration_sec(cfs_time_sub(jiffies, last_rcv)));
last_rcv)));
lnet_finalize(conn->ksnc_peer->ksnp_ni, lnet_finalize(conn->ksnc_peer->ksnp_ni,
conn->ksnc_cookie, -EIO); conn->ksnc_cookie, -EIO);
break; break;
...@@ -1832,7 +1831,7 @@ ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when) ...@@ -1832,7 +1831,7 @@ ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when)
{ {
int connect = 1; int connect = 1;
unsigned long last_alive = 0; unsigned long last_alive = 0;
unsigned long now = cfs_time_current(); unsigned long now = jiffies;
struct ksock_peer *peer = NULL; struct ksock_peer *peer = NULL;
rwlock_t *glock = &ksocknal_data.ksnd_global_lock; rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
struct lnet_process_id id = { struct lnet_process_id id = {
......
...@@ -222,7 +222,7 @@ ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx) ...@@ -222,7 +222,7 @@ ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
*/ */
conn->ksnc_tx_deadline = conn->ksnc_tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout); cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); conn->ksnc_peer->ksnp_last_alive = jiffies;
conn->ksnc_tx_bufnob = bufnob; conn->ksnc_tx_bufnob = bufnob;
mb(); mb();
} }
...@@ -267,7 +267,7 @@ ksocknal_recv_iter(struct ksock_conn *conn) ...@@ -267,7 +267,7 @@ ksocknal_recv_iter(struct ksock_conn *conn)
/* received something... */ /* received something... */
nob = rc; nob = rc;
conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); conn->ksnc_peer->ksnp_last_alive = jiffies;
conn->ksnc_rx_deadline = conn->ksnc_rx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout); cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
mb(); /* order with setting rx_started */ mb(); /* order with setting rx_started */
...@@ -481,7 +481,7 @@ ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx) ...@@ -481,7 +481,7 @@ ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
LASSERT(conn->ksnc_tx_scheduled); LASSERT(conn->ksnc_tx_scheduled);
list_add_tail(&conn->ksnc_tx_list, list_add_tail(&conn->ksnc_tx_list,
&ksocknal_data.ksnd_enomem_conns); &ksocknal_data.ksnd_enomem_conns);
if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(), if (!cfs_time_aftereq(cfs_time_add(jiffies,
SOCKNAL_ENOMEM_RETRY), SOCKNAL_ENOMEM_RETRY),
ksocknal_data.ksnd_reaper_waketime)) ksocknal_data.ksnd_reaper_waketime))
wake_up(&ksocknal_data.ksnd_reaper_waitq); wake_up(&ksocknal_data.ksnd_reaper_waitq);
...@@ -612,7 +612,7 @@ ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, ...@@ -612,7 +612,7 @@ ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx,
conn = (typed) ? typed : fallback; conn = (typed) ? typed : fallback;
if (conn) if (conn)
conn->ksnc_tx_last_post = cfs_time_current(); conn->ksnc_tx_last_post = jiffies;
return conn; return conn;
} }
...@@ -680,7 +680,7 @@ ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn) ...@@ -680,7 +680,7 @@ ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
conn->ksnc_tx_deadline = conn->ksnc_tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout); cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */ if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); conn->ksnc_peer->ksnp_last_alive = jiffies;
conn->ksnc_tx_bufnob = 0; conn->ksnc_tx_bufnob = 0;
mb(); /* order with adding to tx_queue */ mb(); /* order with adding to tx_queue */
} }
...@@ -728,7 +728,7 @@ ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn) ...@@ -728,7 +728,7 @@ ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
struct ksock_route * struct ksock_route *
ksocknal_find_connectable_route_locked(struct ksock_peer *peer) ksocknal_find_connectable_route_locked(struct ksock_peer *peer)
{ {
unsigned long now = cfs_time_current(); unsigned long now = jiffies;
struct list_head *tmp; struct list_head *tmp;
struct ksock_route *route; struct ksock_route *route;
...@@ -1777,7 +1777,7 @@ ksocknal_connect(struct ksock_route *route) ...@@ -1777,7 +1777,7 @@ ksocknal_connect(struct ksock_route *route)
int retry_later = 0; int retry_later = 0;
int rc = 0; int rc = 0;
deadline = cfs_time_add(cfs_time_current(), deadline = cfs_time_add(jiffies,
*ksocknal_tunables.ksnd_timeout * HZ); *ksocknal_tunables.ksnd_timeout * HZ);
write_lock_bh(&ksocknal_data.ksnd_global_lock); write_lock_bh(&ksocknal_data.ksnd_global_lock);
...@@ -1825,7 +1825,7 @@ ksocknal_connect(struct ksock_route *route) ...@@ -1825,7 +1825,7 @@ ksocknal_connect(struct ksock_route *route)
write_unlock_bh(&ksocknal_data.ksnd_global_lock); write_unlock_bh(&ksocknal_data.ksnd_global_lock);
if (cfs_time_aftereq(cfs_time_current(), deadline)) { if (cfs_time_aftereq(jiffies, deadline)) {
rc = -ETIMEDOUT; rc = -ETIMEDOUT;
lnet_connect_console_error(rc, peer->ksnp_id.nid, lnet_connect_console_error(rc, peer->ksnp_id.nid,
route->ksnr_ipaddr, route->ksnr_ipaddr,
...@@ -1877,7 +1877,7 @@ ksocknal_connect(struct ksock_route *route) ...@@ -1877,7 +1877,7 @@ ksocknal_connect(struct ksock_route *route)
*/ */
route->ksnr_retry_interval = route->ksnr_retry_interval =
*ksocknal_tunables.ksnd_min_reconnectms * HZ / 1000; *ksocknal_tunables.ksnd_min_reconnectms * HZ / 1000;
route->ksnr_timeout = cfs_time_add(cfs_time_current(), route->ksnr_timeout = cfs_time_add(jiffies,
route->ksnr_retry_interval); route->ksnr_retry_interval);
} }
...@@ -1903,7 +1903,7 @@ ksocknal_connect(struct ksock_route *route) ...@@ -1903,7 +1903,7 @@ ksocknal_connect(struct ksock_route *route)
(long)*ksocknal_tunables.ksnd_max_reconnectms * HZ / 1000); (long)*ksocknal_tunables.ksnd_max_reconnectms * HZ / 1000);
LASSERT(route->ksnr_retry_interval); LASSERT(route->ksnr_retry_interval);
route->ksnr_timeout = cfs_time_add(cfs_time_current(), route->ksnr_timeout = cfs_time_add(jiffies,
route->ksnr_retry_interval); route->ksnr_retry_interval);
if (!list_empty(&peer->ksnp_tx_queue) && if (!list_empty(&peer->ksnp_tx_queue) &&
...@@ -2050,7 +2050,7 @@ ksocknal_connd_get_route_locked(signed long *timeout_p) ...@@ -2050,7 +2050,7 @@ ksocknal_connd_get_route_locked(signed long *timeout_p)
struct ksock_route *route; struct ksock_route *route;
unsigned long now; unsigned long now;
now = cfs_time_current(); now = jiffies;
/* connd_routes can contain both pending and ordinary routes */ /* connd_routes can contain both pending and ordinary routes */
list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes, list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
...@@ -2228,7 +2228,7 @@ ksocknal_find_timed_out_conn(struct ksock_peer *peer) ...@@ -2228,7 +2228,7 @@ ksocknal_find_timed_out_conn(struct ksock_peer *peer)
} }
if (conn->ksnc_rx_started && if (conn->ksnc_rx_started &&
cfs_time_aftereq(cfs_time_current(), cfs_time_aftereq(jiffies,
conn->ksnc_rx_deadline)) { conn->ksnc_rx_deadline)) {
/* Timed out incomplete incoming message */ /* Timed out incomplete incoming message */
ksocknal_conn_addref(conn); ksocknal_conn_addref(conn);
...@@ -2244,7 +2244,7 @@ ksocknal_find_timed_out_conn(struct ksock_peer *peer) ...@@ -2244,7 +2244,7 @@ ksocknal_find_timed_out_conn(struct ksock_peer *peer)
if ((!list_empty(&conn->ksnc_tx_queue) || if ((!list_empty(&conn->ksnc_tx_queue) ||
conn->ksnc_sock->sk->sk_wmem_queued) && conn->ksnc_sock->sk->sk_wmem_queued) &&
cfs_time_aftereq(cfs_time_current(), cfs_time_aftereq(jiffies,
conn->ksnc_tx_deadline)) { conn->ksnc_tx_deadline)) {
/* /*
* Timed out messages queued for sending or * Timed out messages queued for sending or
...@@ -2272,7 +2272,7 @@ ksocknal_flush_stale_txs(struct ksock_peer *peer) ...@@ -2272,7 +2272,7 @@ ksocknal_flush_stale_txs(struct ksock_peer *peer)
write_lock_bh(&ksocknal_data.ksnd_global_lock); write_lock_bh(&ksocknal_data.ksnd_global_lock);
list_for_each_entry_safe(tx, tmp, &peer->ksnp_tx_queue, tx_list) { list_for_each_entry_safe(tx, tmp, &peer->ksnp_tx_queue, tx_list) {
if (!cfs_time_aftereq(cfs_time_current(), if (!cfs_time_aftereq(jiffies,
tx->tx_deadline)) tx->tx_deadline))
break; break;
...@@ -2301,12 +2301,12 @@ ksocknal_send_keepalive_locked(struct ksock_peer *peer) ...@@ -2301,12 +2301,12 @@ ksocknal_send_keepalive_locked(struct ksock_peer *peer)
return 0; return 0;
if (*ksocknal_tunables.ksnd_keepalive <= 0 || if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
time_before(cfs_time_current(), time_before(jiffies,
cfs_time_add(peer->ksnp_last_alive, cfs_time_add(peer->ksnp_last_alive,
*ksocknal_tunables.ksnd_keepalive * HZ))) *ksocknal_tunables.ksnd_keepalive * HZ)))
return 0; return 0;
if (time_before(cfs_time_current(), peer->ksnp_send_keepalive)) if (time_before(jiffies, peer->ksnp_send_keepalive))
return 0; return 0;
/* /*
...@@ -2400,7 +2400,7 @@ ksocknal_check_peer_timeouts(int idx) ...@@ -2400,7 +2400,7 @@ ksocknal_check_peer_timeouts(int idx)
tx = list_entry(peer->ksnp_tx_queue.next, tx = list_entry(peer->ksnp_tx_queue.next,
struct ksock_tx, tx_list); struct ksock_tx, tx_list);
if (cfs_time_aftereq(cfs_time_current(), if (cfs_time_aftereq(jiffies,
tx->tx_deadline)) { tx->tx_deadline)) {
ksocknal_peer_addref(peer); ksocknal_peer_addref(peer);
read_unlock(&ksocknal_data.ksnd_global_lock); read_unlock(&ksocknal_data.ksnd_global_lock);
...@@ -2418,7 +2418,7 @@ ksocknal_check_peer_timeouts(int idx) ...@@ -2418,7 +2418,7 @@ ksocknal_check_peer_timeouts(int idx)
tx_stale = NULL; tx_stale = NULL;
spin_lock(&peer->ksnp_lock); spin_lock(&peer->ksnp_lock);
list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) { list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
if (!cfs_time_aftereq(cfs_time_current(), if (!cfs_time_aftereq(jiffies,
tx->tx_deadline)) tx->tx_deadline))
break; break;
/* ignore the TX if connection is being closed */ /* ignore the TX if connection is being closed */
...@@ -2444,7 +2444,7 @@ ksocknal_check_peer_timeouts(int idx) ...@@ -2444,7 +2444,7 @@ ksocknal_check_peer_timeouts(int idx)
CERROR("Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n", CERROR("Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n",
n, libcfs_nid2str(peer->ksnp_id.nid), tx_stale, n, libcfs_nid2str(peer->ksnp_id.nid), tx_stale,
cfs_duration_sec(cfs_time_current() - deadline), cfs_duration_sec(jiffies - deadline),
resid, conn->ksnc_sock->sk->sk_wmem_queued); resid, conn->ksnc_sock->sk->sk_wmem_queued);
ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT); ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
...@@ -2466,7 +2466,7 @@ ksocknal_reaper(void *arg) ...@@ -2466,7 +2466,7 @@ ksocknal_reaper(void *arg)
long timeout; long timeout;
int i; int i;
int peer_index = 0; int peer_index = 0;
unsigned long deadline = cfs_time_current(); unsigned long deadline = jiffies;
INIT_LIST_HEAD(&enomem_conns); INIT_LIST_HEAD(&enomem_conns);
init_waitqueue_entry(&wait, current); init_waitqueue_entry(&wait, current);
...@@ -2532,7 +2532,7 @@ ksocknal_reaper(void *arg) ...@@ -2532,7 +2532,7 @@ ksocknal_reaper(void *arg)
/* careful with the jiffy wrap... */ /* careful with the jiffy wrap... */
while ((timeout = cfs_time_sub(deadline, while ((timeout = cfs_time_sub(deadline,
cfs_time_current())) <= 0) { jiffies)) <= 0) {
const int n = 4; const int n = 4;
const int p = 1; const int p = 1;
int chunk = ksocknal_data.ksnd_peer_hash_size; int chunk = ksocknal_data.ksnd_peer_hash_size;
...@@ -2569,7 +2569,7 @@ ksocknal_reaper(void *arg) ...@@ -2569,7 +2569,7 @@ ksocknal_reaper(void *arg)
timeout = SOCKNAL_ENOMEM_RETRY; timeout = SOCKNAL_ENOMEM_RETRY;
} }
ksocknal_data.ksnd_reaper_waketime = ksocknal_data.ksnd_reaper_waketime =
cfs_time_add(cfs_time_current(), timeout); cfs_time_add(jiffies, timeout);
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait); add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
......
...@@ -431,7 +431,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, ...@@ -431,7 +431,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
if (cdls) { if (cdls) {
if (libcfs_console_ratelimit && if (libcfs_console_ratelimit &&
cdls->cdls_next && /* not first time ever */ cdls->cdls_next && /* not first time ever */
!cfs_time_after(cfs_time_current(), cdls->cdls_next)) { !cfs_time_after(jiffies, cdls->cdls_next)) {
/* skipping a console message */ /* skipping a console message */
cdls->cdls_count++; cdls->cdls_count++;
if (tcd) if (tcd)
...@@ -439,7 +439,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, ...@@ -439,7 +439,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
return 1; return 1;
} }
if (cfs_time_after(cfs_time_current(), if (cfs_time_after(jiffies,
cdls->cdls_next + libcfs_console_max_delay + cdls->cdls_next + libcfs_console_max_delay +
10 * HZ)) { 10 * HZ)) {
/* last timeout was a long time ago */ /* last timeout was a long time ago */
...@@ -454,7 +454,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, ...@@ -454,7 +454,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
cdls->cdls_delay = libcfs_console_max_delay; cdls->cdls_delay = libcfs_console_max_delay;
/* ensure cdls_next is never zero after it's been seen */ /* ensure cdls_next is never zero after it's been seen */
cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1; cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
} }
if (tcd) { if (tcd) {
......
...@@ -501,7 +501,7 @@ lnet_ni_query_locked(struct lnet_ni *ni, struct lnet_peer *lp) ...@@ -501,7 +501,7 @@ lnet_ni_query_locked(struct lnet_ni *ni, struct lnet_peer *lp)
ni->ni_lnd->lnd_query(ni, lp->lp_nid, &last_alive); ni->ni_lnd->lnd_query(ni, lp->lp_nid, &last_alive);
lnet_net_lock(lp->lp_cpt); lnet_net_lock(lp->lp_cpt);
lp->lp_last_query = cfs_time_current(); lp->lp_last_query = jiffies;
if (last_alive) /* NI has updated timestamp */ if (last_alive) /* NI has updated timestamp */
lp->lp_last_alive = last_alive; lp->lp_last_alive = last_alive;
...@@ -545,7 +545,7 @@ lnet_peer_is_alive(struct lnet_peer *lp, unsigned long now) ...@@ -545,7 +545,7 @@ lnet_peer_is_alive(struct lnet_peer *lp, unsigned long now)
static int static int
lnet_peer_alive_locked(struct lnet_peer *lp) lnet_peer_alive_locked(struct lnet_peer *lp)
{ {
unsigned long now = cfs_time_current(); unsigned long now = jiffies;
if (!lnet_peer_aliveness_enabled(lp)) if (!lnet_peer_aliveness_enabled(lp))
return -ENODEV; return -ENODEV;
......
...@@ -306,7 +306,7 @@ drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src, ...@@ -306,7 +306,7 @@ drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
/* match this rule, check drop rate now */ /* match this rule, check drop rate now */
spin_lock(&rule->dr_lock); spin_lock(&rule->dr_lock);
if (rule->dr_drop_time) { /* time based drop */ if (rule->dr_drop_time) { /* time based drop */
unsigned long now = cfs_time_current(); unsigned long now = jiffies;
rule->dr_stat.fs_count++; rule->dr_stat.fs_count++;
drop = cfs_time_aftereq(now, rule->dr_drop_time); drop = cfs_time_aftereq(now, rule->dr_drop_time);
...@@ -472,7 +472,7 @@ delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src, ...@@ -472,7 +472,7 @@ delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
/* match this rule, check delay rate now */ /* match this rule, check delay rate now */
spin_lock(&rule->dl_lock); spin_lock(&rule->dl_lock);
if (rule->dl_delay_time) { /* time based delay */ if (rule->dl_delay_time) { /* time based delay */
unsigned long now = cfs_time_current(); unsigned long now = jiffies;
rule->dl_stat.fs_count++; rule->dl_stat.fs_count++;
delay = cfs_time_aftereq(now, rule->dl_delay_time); delay = cfs_time_aftereq(now, rule->dl_delay_time);
...@@ -562,7 +562,7 @@ delayed_msg_check(struct lnet_delay_rule *rule, bool all, ...@@ -562,7 +562,7 @@ delayed_msg_check(struct lnet_delay_rule *rule, bool all,
{ {
struct lnet_msg *msg; struct lnet_msg *msg;
struct lnet_msg *tmp; struct lnet_msg *tmp;
unsigned long now = cfs_time_current(); unsigned long now = jiffies;
if (!all && rule->dl_msg_send > now) if (!all && rule->dl_msg_send > now)
return; return;
......
...@@ -315,7 +315,7 @@ lnet_nid2peer_locked(struct lnet_peer **lpp, lnet_nid_t nid, int cpt) ...@@ -315,7 +315,7 @@ lnet_nid2peer_locked(struct lnet_peer **lpp, lnet_nid_t nid, int cpt)
lp->lp_alive_count = 0; lp->lp_alive_count = 0;
lp->lp_timestamp = 0; lp->lp_timestamp = 0;
lp->lp_alive = !lnet_peers_start_down(); /* 1 bit!! */ lp->lp_alive = !lnet_peers_start_down(); /* 1 bit!! */
lp->lp_last_alive = cfs_time_current(); /* assumes alive */ lp->lp_last_alive = jiffies; /* assumes alive */
lp->lp_last_query = 0; /* haven't asked NI yet */ lp->lp_last_query = 0; /* haven't asked NI yet */
lp->lp_ping_timestamp = 0; lp->lp_ping_timestamp = 0;
lp->lp_ping_feats = LNET_PING_FEAT_INVAL; lp->lp_ping_feats = LNET_PING_FEAT_INVAL;
......
...@@ -765,7 +765,7 @@ lnet_router_checker_event(struct lnet_event *event) ...@@ -765,7 +765,7 @@ lnet_router_checker_event(struct lnet_event *event)
* we ping alive routers to try to detect router death before * we ping alive routers to try to detect router death before
* apps get burned). * apps get burned).
*/ */
lnet_notify_locked(lp, 1, !event->status, cfs_time_current()); lnet_notify_locked(lp, 1, !event->status, jiffies);
/* /*
* The router checker will wake up very shortly and do the * The router checker will wake up very shortly and do the
...@@ -976,7 +976,7 @@ static void ...@@ -976,7 +976,7 @@ static void
lnet_ping_router_locked(struct lnet_peer *rtr) lnet_ping_router_locked(struct lnet_peer *rtr)
{ {
struct lnet_rc_data *rcd = NULL; struct lnet_rc_data *rcd = NULL;
unsigned long now = cfs_time_current(); unsigned long now = jiffies;
int secs; int secs;
lnet_peer_addref_locked(rtr); lnet_peer_addref_locked(rtr);
...@@ -1730,7 +1730,7 @@ int ...@@ -1730,7 +1730,7 @@ int
lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, int alive, unsigned long when) lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, int alive, unsigned long when)
{ {
struct lnet_peer *lp = NULL; struct lnet_peer *lp = NULL;
unsigned long now = cfs_time_current(); unsigned long now = jiffies;
int cpt = lnet_cpt_of_nid(nid); int cpt = lnet_cpt_of_nid(nid);
LASSERT(!in_interrupt()); LASSERT(!in_interrupt());
......
...@@ -324,7 +324,7 @@ static int proc_lnet_routers(struct ctl_table *table, int write, ...@@ -324,7 +324,7 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
if (peer) { if (peer) {
lnet_nid_t nid = peer->lp_nid; lnet_nid_t nid = peer->lp_nid;
unsigned long now = cfs_time_current(); unsigned long now = jiffies;
unsigned long deadline = peer->lp_ping_deadline; unsigned long deadline = peer->lp_ping_deadline;
int nrefs = peer->lp_refcount; int nrefs = peer->lp_refcount;
int nrtrrefs = peer->lp_rtr_refcount; int nrtrrefs = peer->lp_rtr_refcount;
...@@ -509,7 +509,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write, ...@@ -509,7 +509,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
aliveness = peer->lp_alive ? "up" : "down"; aliveness = peer->lp_alive ? "up" : "down";
if (lnet_peer_aliveness_enabled(peer)) { if (lnet_peer_aliveness_enabled(peer)) {
unsigned long now = cfs_time_current(); unsigned long now = jiffies;
long delta; long delta;
delta = cfs_time_sub(now, peer->lp_last_alive); delta = cfs_time_sub(now, peer->lp_last_alive);
......
...@@ -75,7 +75,7 @@ lstcon_rpc_done(struct srpc_client_rpc *rpc) ...@@ -75,7 +75,7 @@ lstcon_rpc_done(struct srpc_client_rpc *rpc)
/* not aborted */ /* not aborted */
LASSERT(!crpc->crp_status); LASSERT(!crpc->crp_status);
crpc->crp_stamp = cfs_time_current(); crpc->crp_stamp = jiffies;
crpc->crp_status = rpc->crpc_status; crpc->crp_status = rpc->crpc_status;
} }
...@@ -297,14 +297,14 @@ lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error) ...@@ -297,14 +297,14 @@ lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error)
if (!crpc->crp_posted || /* not posted */ if (!crpc->crp_posted || /* not posted */
crpc->crp_stamp) { /* rpc done or aborted already */ crpc->crp_stamp) { /* rpc done or aborted already */
if (!crpc->crp_stamp) { if (!crpc->crp_stamp) {
crpc->crp_stamp = cfs_time_current(); crpc->crp_stamp = jiffies;
crpc->crp_status = -EINTR; crpc->crp_status = -EINTR;
} }
spin_unlock(&rpc->crpc_lock); spin_unlock(&rpc->crpc_lock);
continue; continue;
} }
crpc->crp_stamp = cfs_time_current(); crpc->crp_stamp = jiffies;
crpc->crp_status = error; crpc->crp_status = error;
spin_unlock(&rpc->crpc_lock); spin_unlock(&rpc->crpc_lock);
......
...@@ -98,7 +98,7 @@ lstcon_node_find(struct lnet_process_id id, struct lstcon_node **ndpp, ...@@ -98,7 +98,7 @@ lstcon_node_find(struct lnet_process_id id, struct lstcon_node **ndpp,
ndl->ndl_node->nd_ref = 1; ndl->ndl_node->nd_ref = 1;
ndl->ndl_node->nd_id = id; ndl->ndl_node->nd_id = id;
ndl->ndl_node->nd_stamp = cfs_time_current(); ndl->ndl_node->nd_stamp = jiffies;
ndl->ndl_node->nd_state = LST_NODE_UNKNOWN; ndl->ndl_node->nd_state = LST_NODE_UNKNOWN;
ndl->ndl_node->nd_timeout = 0; ndl->ndl_node->nd_timeout = 0;
memset(&ndl->ndl_node->nd_ping, 0, sizeof(struct lstcon_rpc)); memset(&ndl->ndl_node->nd_ping, 0, sizeof(struct lstcon_rpc));
...@@ -1701,7 +1701,7 @@ lstcon_new_session_id(struct lst_sid *sid) ...@@ -1701,7 +1701,7 @@ lstcon_new_session_id(struct lst_sid *sid)
LNetGetId(1, &id); LNetGetId(1, &id);
sid->ses_nid = id.nid; sid->ses_nid = id.nid;
sid->ses_stamp = cfs_time_current(); sid->ses_stamp = jiffies;
} }
int int
......
...@@ -272,7 +272,7 @@ sfw_init_session(struct sfw_session *sn, struct lst_sid sid, ...@@ -272,7 +272,7 @@ sfw_init_session(struct sfw_session *sn, struct lst_sid sid,
sn->sn_id = sid; sn->sn_id = sid;
sn->sn_features = features; sn->sn_features = features;
sn->sn_timeout = session_timeout; sn->sn_timeout = session_timeout;
sn->sn_started = cfs_time_current(); sn->sn_started = jiffies;
timer->stt_data = sn; timer->stt_data = sn;
timer->stt_func = sfw_session_expired; timer->stt_func = sfw_session_expired;
......
...@@ -2255,7 +2255,7 @@ static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req) ...@@ -2255,7 +2255,7 @@ static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req)
if (req->rq_delay_limit != 0 && if (req->rq_delay_limit != 0 &&
time_before(cfs_time_add(req->rq_queued_time, time_before(cfs_time_add(req->rq_queued_time,
req->rq_delay_limit * HZ), req->rq_delay_limit * HZ),
cfs_time_current())) { jiffies)) {
return 1; return 1;
} }
return 0; return 0;
......
...@@ -913,7 +913,7 @@ static inline int obd_destroy_export(struct obd_export *exp) ...@@ -913,7 +913,7 @@ static inline int obd_destroy_export(struct obd_export *exp)
/* /*
* @max_age is the oldest time in jiffies that we accept using a cached data. * @max_age is the oldest time in jiffies that we accept using a cached data.
* If the cache is older than @max_age we will get a new value from the * If the cache is older than @max_age we will get a new value from the
* target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. * target. Use a value of "jiffies + HZ" to guarantee freshness.
*/ */
static inline int obd_statfs_async(struct obd_export *exp, static inline int obd_statfs_async(struct obd_export *exp,
struct obd_info *oinfo, struct obd_info *oinfo,
...@@ -975,7 +975,7 @@ static inline int obd_statfs_rqset(struct obd_export *exp, ...@@ -975,7 +975,7 @@ static inline int obd_statfs_rqset(struct obd_export *exp,
/* /*
* @max_age is the oldest time in jiffies that we accept using a cached data. * @max_age is the oldest time in jiffies that we accept using a cached data.
* If the cache is older than @max_age we will get a new value from the * If the cache is older than @max_age we will get a new value from the
* target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. * target. Use a value of "jiffies + HZ" to guarantee freshness.
*/ */
static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp, static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp,
struct obd_statfs *osfs, __u64 max_age, struct obd_statfs *osfs, __u64 max_age,
...@@ -997,7 +997,7 @@ static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp, ...@@ -997,7 +997,7 @@ static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp,
if (rc == 0) { if (rc == 0) {
spin_lock(&obd->obd_osfs_lock); spin_lock(&obd->obd_osfs_lock);
memcpy(&obd->obd_osfs, osfs, sizeof(obd->obd_osfs)); memcpy(&obd->obd_osfs, osfs, sizeof(obd->obd_osfs));
obd->obd_osfs_age = cfs_time_current_64(); obd->obd_osfs_age = get_jiffies_64();
spin_unlock(&obd->obd_osfs_lock); spin_unlock(&obd->obd_osfs_lock);
} }
} else { } else {
......
...@@ -243,7 +243,7 @@ static void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock) ...@@ -243,7 +243,7 @@ static void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
{ {
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
lock->l_last_used = cfs_time_current(); lock->l_last_used = jiffies;
LASSERT(list_empty(&lock->l_lru)); LASSERT(list_empty(&lock->l_lru));
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
list_add_tail(&lock->l_lru, &ns->ns_unused_list); list_add_tail(&lock->l_lru, &ns->ns_unused_list);
......
...@@ -325,7 +325,7 @@ static void ldlm_handle_gl_callback(struct ptlrpc_request *req, ...@@ -325,7 +325,7 @@ static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
lock_res_and_lock(lock); lock_res_and_lock(lock);
if (lock->l_granted_mode == LCK_PW && if (lock->l_granted_mode == LCK_PW &&
!lock->l_readers && !lock->l_writers && !lock->l_readers && !lock->l_writers &&
cfs_time_after(cfs_time_current(), cfs_time_after(jiffies,
cfs_time_add(lock->l_last_used, cfs_time_add(lock->l_last_used,
10 * HZ))) { 10 * HZ))) {
unlock_res_and_lock(lock); unlock_res_and_lock(lock);
......
...@@ -116,7 +116,7 @@ static void ldlm_expired_completion_wait(struct ldlm_lock *lock, __u32 conn_cnt) ...@@ -116,7 +116,7 @@ static void ldlm_expired_completion_wait(struct ldlm_lock *lock, __u32 conn_cnt)
(s64)lock->l_last_activity, (s64)lock->l_last_activity,
(s64)(ktime_get_real_seconds() - (s64)(ktime_get_real_seconds() -
lock->l_last_activity)); lock->l_last_activity));
if (cfs_time_after(cfs_time_current(), next_dump)) { if (cfs_time_after(jiffies, next_dump)) {
last_dump = next_dump; last_dump = next_dump;
next_dump = cfs_time_shift(300); next_dump = cfs_time_shift(300);
ldlm_namespace_dump(D_DLMTRACE, ldlm_namespace_dump(D_DLMTRACE,
...@@ -1161,7 +1161,7 @@ static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, ...@@ -1161,7 +1161,7 @@ static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
int unused, int added, int unused, int added,
int count) int count)
{ {
unsigned long cur = cfs_time_current(); unsigned long cur = jiffies;
struct ldlm_pool *pl = &ns->ns_pool; struct ldlm_pool *pl = &ns->ns_pool;
__u64 slv, lvf, lv; __u64 slv, lvf, lv;
unsigned long la; unsigned long la;
...@@ -1176,7 +1176,7 @@ static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, ...@@ -1176,7 +1176,7 @@ static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
* Despite of the LV, It doesn't make sense to keep the lock which * Despite of the LV, It doesn't make sense to keep the lock which
* is unused for ns_max_age time. * is unused for ns_max_age time.
*/ */
if (cfs_time_after(cfs_time_current(), if (cfs_time_after(jiffies,
cfs_time_add(lock->l_last_used, ns->ns_max_age))) cfs_time_add(lock->l_last_used, ns->ns_max_age)))
return LDLM_POLICY_CANCEL_LOCK; return LDLM_POLICY_CANCEL_LOCK;
...@@ -1233,7 +1233,7 @@ static enum ldlm_policy_res ldlm_cancel_aged_policy(struct ldlm_namespace *ns, ...@@ -1233,7 +1233,7 @@ static enum ldlm_policy_res ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
int count) int count)
{ {
if ((added >= count) && if ((added >= count) &&
time_before(cfs_time_current(), time_before(jiffies,
cfs_time_add(lock->l_last_used, ns->ns_max_age))) cfs_time_add(lock->l_last_used, ns->ns_max_age)))
return LDLM_POLICY_KEEP_LOCK; return LDLM_POLICY_KEEP_LOCK;
...@@ -1380,7 +1380,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, ...@@ -1380,7 +1380,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
continue; continue;
last_use = lock->l_last_used; last_use = lock->l_last_used;
if (last_use == cfs_time_current()) if (last_use == jiffies)
continue; continue;
/* Somebody is already doing CANCEL. No need for this /* Somebody is already doing CANCEL. No need for this
......
...@@ -1316,7 +1316,7 @@ void ldlm_namespace_dump(int level, struct ldlm_namespace *ns) ...@@ -1316,7 +1316,7 @@ void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
CDEBUG(level, "--- Namespace: %s (rc: %d, side: client)\n", CDEBUG(level, "--- Namespace: %s (rc: %d, side: client)\n",
ldlm_ns_name(ns), atomic_read(&ns->ns_bref)); ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
if (time_before(cfs_time_current(), ns->ns_next_dump)) if (time_before(jiffies, ns->ns_next_dump))
return; return;
cfs_hash_for_each_nolock(ns->ns_rs_hash, cfs_hash_for_each_nolock(ns->ns_rs_hash,
......
...@@ -1110,7 +1110,7 @@ static inline int ll_glimpse_size(struct inode *inode) ...@@ -1110,7 +1110,7 @@ static inline int ll_glimpse_size(struct inode *inode)
down_read(&lli->lli_glimpse_sem); down_read(&lli->lli_glimpse_sem);
rc = cl_glimpse_size(inode); rc = cl_glimpse_size(inode);
lli->lli_glimpse_time = cfs_time_current(); lli->lli_glimpse_time = jiffies;
up_read(&lli->lli_glimpse_sem); up_read(&lli->lli_glimpse_sem);
return rc; return rc;
} }
......
...@@ -535,7 +535,7 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai) ...@@ -535,7 +535,7 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
cl_agl(inode); cl_agl(inode);
lli->lli_agl_index = 0; lli->lli_agl_index = 0;
lli->lli_glimpse_time = cfs_time_current(); lli->lli_glimpse_time = jiffies;
up_write(&lli->lli_glimpse_sem); up_write(&lli->lli_glimpse_sem);
CDEBUG(D_READA, "Handled (init) async glimpse: inode= " CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
......
...@@ -160,7 +160,7 @@ static int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs, ...@@ -160,7 +160,7 @@ static int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,
spin_lock(&obd->obd_osfs_lock); spin_lock(&obd->obd_osfs_lock);
memcpy(&obd->obd_osfs, osfs, sizeof(*osfs)); memcpy(&obd->obd_osfs, osfs, sizeof(*osfs));
obd->obd_osfs_age = cfs_time_current_64(); obd->obd_osfs_age = get_jiffies_64();
spin_unlock(&obd->obd_osfs_lock); spin_unlock(&obd->obd_osfs_lock);
return 0; return 0;
} }
...@@ -277,7 +277,7 @@ static int cb_statfs_update(void *cookie, int rc) ...@@ -277,7 +277,7 @@ static int cb_statfs_update(void *cookie, int rc)
spin_lock(&tgtobd->obd_osfs_lock); spin_lock(&tgtobd->obd_osfs_lock);
memcpy(&tgtobd->obd_osfs, lov_sfs, sizeof(*lov_sfs)); memcpy(&tgtobd->obd_osfs, lov_sfs, sizeof(*lov_sfs));
if ((oinfo->oi_flags & OBD_STATFS_FROM_CACHE) == 0) if ((oinfo->oi_flags & OBD_STATFS_FROM_CACHE) == 0)
tgtobd->obd_osfs_age = cfs_time_current_64(); tgtobd->obd_osfs_age = get_jiffies_64();
spin_unlock(&tgtobd->obd_osfs_lock); spin_unlock(&tgtobd->obd_osfs_lock);
out_update: out_update:
......
...@@ -1290,7 +1290,7 @@ static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap, ...@@ -1290,7 +1290,7 @@ static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
result = cl_page_make_ready(env, page, CRT_WRITE); result = cl_page_make_ready(env, page, CRT_WRITE);
if (result == 0) if (result == 0)
opg->ops_submit_time = cfs_time_current(); opg->ops_submit_time = jiffies;
return result; return result;
} }
......
...@@ -300,7 +300,7 @@ static int osc_object_fiemap(const struct lu_env *env, struct cl_object *obj, ...@@ -300,7 +300,7 @@ static int osc_object_fiemap(const struct lu_env *env, struct cl_object *obj,
void osc_object_set_contended(struct osc_object *obj) void osc_object_set_contended(struct osc_object *obj)
{ {
obj->oo_contention_time = cfs_time_current(); obj->oo_contention_time = jiffies;
/* mb(); */ /* mb(); */
obj->oo_contended = 1; obj->oo_contended = 1;
} }
...@@ -314,7 +314,7 @@ int osc_object_is_contended(struct osc_object *obj) ...@@ -314,7 +314,7 @@ int osc_object_is_contended(struct osc_object *obj)
{ {
struct osc_device *dev = lu2osc_dev(obj->oo_cl.co_lu.lo_dev); struct osc_device *dev = lu2osc_dev(obj->oo_cl.co_lu.lo_dev);
int osc_contention_time = dev->od_contention_time; int osc_contention_time = dev->od_contention_time;
unsigned long cur_time = cfs_time_current(); unsigned long cur_time = jiffies;
unsigned long retry_time; unsigned long retry_time;
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_OBJECT_CONTENTION)) if (OBD_FAIL_CHECK(OBD_FAIL_OSC_OBJECT_CONTENTION))
......
...@@ -125,7 +125,7 @@ static inline unsigned long osc_submit_duration(struct osc_page *opg) ...@@ -125,7 +125,7 @@ static inline unsigned long osc_submit_duration(struct osc_page *opg)
if (opg->ops_submit_time == 0) if (opg->ops_submit_time == 0)
return 0; return 0;
return (cfs_time_current() - opg->ops_submit_time); return (jiffies - opg->ops_submit_time);
} }
static int osc_page_print(const struct lu_env *env, static int osc_page_print(const struct lu_env *env,
...@@ -312,7 +312,7 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg, ...@@ -312,7 +312,7 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
oap->oap_cmd |= OBD_BRW_NOQUOTA; oap->oap_cmd |= OBD_BRW_NOQUOTA;
} }
opg->ops_submit_time = cfs_time_current(); opg->ops_submit_time = jiffies;
osc_page_transfer_get(opg, "transfer\0imm"); osc_page_transfer_get(opg, "transfer\0imm");
osc_page_transfer_add(env, opg, crt); osc_page_transfer_add(env, opg, crt);
} }
......
...@@ -741,7 +741,7 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes) ...@@ -741,7 +741,7 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
static int osc_should_shrink_grant(struct client_obd *client) static int osc_should_shrink_grant(struct client_obd *client)
{ {
unsigned long time = cfs_time_current(); unsigned long time = jiffies;
unsigned long next_shrink = client->cl_next_shrink_grant; unsigned long next_shrink = client->cl_next_shrink_grant;
if ((client->cl_import->imp_connect_data.ocd_connect_flags & if ((client->cl_import->imp_connect_data.ocd_connect_flags &
......
...@@ -1050,7 +1050,7 @@ void ptlrpc_set_add_req(struct ptlrpc_request_set *set, ...@@ -1050,7 +1050,7 @@ void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
list_add_tail(&req->rq_set_chain, &set->set_requests); list_add_tail(&req->rq_set_chain, &set->set_requests);
req->rq_set = set; req->rq_set = set;
atomic_inc(&set->set_remaining); atomic_inc(&set->set_remaining);
req->rq_queued_time = cfs_time_current(); req->rq_queued_time = jiffies;
if (req->rq_reqmsg) if (req->rq_reqmsg)
lustre_msg_set_jobid(req->rq_reqmsg, NULL); lustre_msg_set_jobid(req->rq_reqmsg, NULL);
...@@ -1081,7 +1081,7 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, ...@@ -1081,7 +1081,7 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
spin_lock(&set->set_new_req_lock); spin_lock(&set->set_new_req_lock);
/* The set takes over the caller's request reference. */ /* The set takes over the caller's request reference. */
req->rq_set = set; req->rq_set = set;
req->rq_queued_time = cfs_time_current(); req->rq_queued_time = jiffies;
list_add_tail(&req->rq_set_chain, &set->set_new_requests); list_add_tail(&req->rq_set_chain, &set->set_new_requests);
count = atomic_inc_return(&set->set_new_count); count = atomic_inc_return(&set->set_new_count);
spin_unlock(&set->set_new_req_lock); spin_unlock(&set->set_new_req_lock);
......
...@@ -508,7 +508,7 @@ static int import_select_connection(struct obd_import *imp) ...@@ -508,7 +508,7 @@ static int import_select_connection(struct obd_import *imp)
imp->imp_obd->obd_name, at_get(at)); imp->imp_obd->obd_name, at_get(at));
} }
imp_conn->oic_last_attempt = cfs_time_current_64(); imp_conn->oic_last_attempt = get_jiffies_64();
/* switch connection, don't mind if it's same as the current one */ /* switch connection, don't mind if it's same as the current one */
ptlrpc_connection_put(imp->imp_connection); ptlrpc_connection_put(imp->imp_connection);
......
...@@ -142,7 +142,7 @@ static long pinger_check_timeout(unsigned long time) ...@@ -142,7 +142,7 @@ static long pinger_check_timeout(unsigned long time)
mutex_unlock(&pinger_mutex); mutex_unlock(&pinger_mutex);
return cfs_time_sub(cfs_time_add(time, timeout * HZ), return cfs_time_sub(cfs_time_add(time, timeout * HZ),
cfs_time_current()); jiffies);
} }
static bool ir_up; static bool ir_up;
...@@ -223,7 +223,7 @@ static DECLARE_DELAYED_WORK(ping_work, ptlrpc_pinger_main); ...@@ -223,7 +223,7 @@ static DECLARE_DELAYED_WORK(ping_work, ptlrpc_pinger_main);
static void ptlrpc_pinger_main(struct work_struct *ws) static void ptlrpc_pinger_main(struct work_struct *ws)
{ {
unsigned long this_ping = cfs_time_current(); unsigned long this_ping = jiffies;
long time_to_next_wake; long time_to_next_wake;
struct timeout_item *item; struct timeout_item *item;
struct obd_import *imp; struct obd_import *imp;
......
...@@ -336,7 +336,7 @@ static void ptlrpc_at_timer(struct timer_list *t) ...@@ -336,7 +336,7 @@ static void ptlrpc_at_timer(struct timer_list *t)
svcpt = from_timer(svcpt, t, scp_at_timer); svcpt = from_timer(svcpt, t, scp_at_timer);
svcpt->scp_at_check = 1; svcpt->scp_at_check = 1;
svcpt->scp_at_checktime = cfs_time_current(); svcpt->scp_at_checktime = jiffies;
wake_up(&svcpt->scp_waitq); wake_up(&svcpt->scp_waitq);
} }
...@@ -1153,7 +1153,7 @@ static void ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) ...@@ -1153,7 +1153,7 @@ static void ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
spin_unlock(&svcpt->scp_at_lock); spin_unlock(&svcpt->scp_at_lock);
return; return;
} }
delay = cfs_time_sub(cfs_time_current(), svcpt->scp_at_checktime); delay = cfs_time_sub(jiffies, svcpt->scp_at_checktime);
svcpt->scp_at_check = 0; svcpt->scp_at_check = 0;
if (array->paa_count == 0) { if (array->paa_count == 0) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册