Commit 219e6de6 authored by Arnd Bergmann's avatar Arnd Bergmann Committed by Greg Kroah-Hartman
Browse files

staging/lustre: use 64-bit times for request times



All request timestamps and deadlines in lustre are recorded in time_t
and timeval units, which overflow in 2038 on 32-bit systems.

In this patch, I'm converting them to time64_t and timespec64,
respectively. Unfortunately, this makes a relatively large patch,
but I could not find an obvious way to split it up some more without
breaking atomicity of the change.

Also unfortunately, this introduces two instances of div_u64_rem()
in the request path, which can be slow on 32-bit architectures. This
can probably be avoided by a larger restructuring of the code, but
it is unlikely that lustre is used in performance critical setups
on 32-bit architectures, so it seems better to optimize for correctness
rather than speed here.

Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 8cc98071
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -74,7 +74,7 @@ struct ptlrpc_at_array {
	struct list_head       *paa_reqs_array; /** array to hold requests */
	__u32	     paa_size;       /** the size of array */
	__u32	     paa_count;      /** the total count of reqs */
	time_t	    paa_deadline;   /** the earliest deadline of reqs */
	time64_t     paa_deadline;   /** the earliest deadline of reqs */
	__u32	    *paa_reqs_count; /** the count of reqs in each entry */
};

+11 −11
Original line number Diff line number Diff line
@@ -1440,7 +1440,7 @@ struct ptlrpc_request {

	/* server-side... */
	/** request arrival time */
	struct timeval       rq_arrival_time;
	struct timespec64	rq_arrival_time;
	/** separated reply state */
	struct ptlrpc_reply_state *rq_reply_state;
	/** incoming request buffer */
@@ -1477,18 +1477,18 @@ struct ptlrpc_request {
	/**
	 * when request/reply sent (secs), or time when request should be sent
	 */
	time_t rq_sent;
	time64_t rq_sent;
	/** time for request really sent out */
	time_t rq_real_sent;
	time64_t rq_real_sent;

	/** when request must finish. volatile
	 * so that servers' early reply updates to the deadline aren't
	 * kept in per-cpu cache */
	volatile time_t rq_deadline;
	volatile time64_t rq_deadline;
	/** when req reply unlink must finish. */
	time_t rq_reply_deadline;
	time64_t rq_reply_deadline;
	/** when req bulk unlink must finish. */
	time_t rq_bulk_deadline;
	time64_t rq_bulk_deadline;
	/**
	 * service time estimate (secs)
	 * If the requestsis not served by this time, it is marked as timed out.
@@ -2323,7 +2323,7 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
	desc = req->rq_bulk;

	if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
	    req->rq_bulk_deadline > get_seconds())
	    req->rq_bulk_deadline > ktime_get_real_seconds())
		return 1;

	if (!desc)
@@ -2727,7 +2727,7 @@ static inline int
ptlrpc_client_early(struct ptlrpc_request *req)
{
	if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
	    req->rq_reply_deadline > get_seconds())
	    req->rq_reply_deadline > ktime_get_real_seconds())
		return 0;
	return req->rq_early;
}
@@ -2739,7 +2739,7 @@ static inline int
ptlrpc_client_replied(struct ptlrpc_request *req)
{
	if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
	    req->rq_reply_deadline > get_seconds())
	    req->rq_reply_deadline > ktime_get_real_seconds())
		return 0;
	return req->rq_replied;
}
@@ -2749,7 +2749,7 @@ static inline int
ptlrpc_client_recv(struct ptlrpc_request *req)
{
	if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
	    req->rq_reply_deadline > get_seconds())
	    req->rq_reply_deadline > ktime_get_real_seconds())
		return 1;
	return req->rq_receiving_reply;
}
@@ -2761,7 +2761,7 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)

	spin_lock(&req->rq_lock);
	if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
	    req->rq_reply_deadline > get_seconds()) {
	    req->rq_reply_deadline > ktime_get_real_seconds()) {
		spin_unlock(&req->rq_lock);
		return 1;
	}
+1 −1
Original line number Diff line number Diff line
@@ -864,7 +864,7 @@ resend:
	if (resends) {
		req->rq_generation_set = 1;
		req->rq_import_generation = generation;
		req->rq_sent = get_seconds() + resends;
		req->rq_sent = ktime_get_real_seconds() + resends;
	}

	/* It is important to obtain rpc_lock first (if applicable), so that
+1 −1
Original line number Diff line number Diff line
@@ -271,7 +271,7 @@ rebuild:
	if (resends) {
		req->rq_generation_set = 1;
		req->rq_import_generation = generation;
		req->rq_sent = get_seconds() + resends;
		req->rq_sent = ktime_get_real_seconds() + resends;
	}
	level = LUSTRE_IMP_FULL;
 resend:
+2 −2
Original line number Diff line number Diff line
@@ -1690,9 +1690,9 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
	/* cap resend delay to the current request timeout, this is similar to
	 * what ptlrpc does (see after_reply()) */
	if (aa->aa_resends > new_req->rq_timeout)
		new_req->rq_sent = get_seconds() + new_req->rq_timeout;
		new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
	else
		new_req->rq_sent = get_seconds() + aa->aa_resends;
		new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
	new_req->rq_generation_set = 1;
	new_req->rq_import_generation = request->rq_import_generation;

Loading