Commit 8d73b2ae authored by Arnd Bergmann's avatar Arnd Bergmann Committed by Corey Minyard
Browse files

ipmi: kill off 'timespec' usage again



'struct timespec' is getting removed from the kernel. The usage in ipmi
was fixed before in commit 48862ea2 ("ipmi: Update timespec usage
to timespec64"), but unfortunately it crept back in.

The busy looping code can better use ktime_t anyway, so use that
there to simplify the implementation.

Fixes: cbb19cb1 ("ipmi_si: Convert timespec64 to timespec")
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Message-Id: <20191108203435.112759-5-arnd@arndb.de>
Signed-off-by: default avatarCorey Minyard <cminyard@mvista.com>
parent 0d8633bf
Loading
Loading
Loading
Loading
+13 −27
Original line number Diff line number Diff line
@@ -265,10 +265,10 @@ static void cleanup_ipmi_si(void);
#ifdef DEBUG_TIMING
void debug_timestamp(char *msg)
{
	struct timespec t;
	struct timespec64 t;

	ktime_get_ts(&t);
	pr_debug("**%s: %ld.%9.9ld\n", msg, (long) t.tv_sec, t.tv_nsec);
	ktime_get_ts64(&t);
	pr_debug("**%s: %lld.%9.9ld\n", msg, t.tv_sec, t.tv_nsec);
}
#else
#define debug_timestamp(x)
@@ -935,38 +935,25 @@ static void set_run_to_completion(void *send_info, bool i_run_to_completion)
}

/*
 * Use -1 in the nsec value of the busy waiting timespec to tell that
 * we are spinning in kipmid looking for something and not delaying
 * between checks
 * Use -1 as a special constant to tell that we are spinning in kipmid
 * looking for something and not delaying between checks
 */
static inline void ipmi_si_set_not_busy(struct timespec *ts)
{
	ts->tv_nsec = -1;
}
static inline int ipmi_si_is_busy(struct timespec *ts)
{
	return ts->tv_nsec != -1;
}

#define IPMI_TIME_NOT_BUSY ns_to_ktime(-1ull)
static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result,
					 const struct smi_info *smi_info,
					 struct timespec *busy_until)
					 ktime_t *busy_until)
{
	unsigned int max_busy_us = 0;

	if (smi_info->si_num < num_max_busy_us)
		max_busy_us = kipmid_max_busy_us[smi_info->si_num];
	if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
		ipmi_si_set_not_busy(busy_until);
	else if (!ipmi_si_is_busy(busy_until)) {
		ktime_get_ts(busy_until);
		timespec_add_ns(busy_until, max_busy_us * NSEC_PER_USEC);
		*busy_until = IPMI_TIME_NOT_BUSY;
	else if (*busy_until == IPMI_TIME_NOT_BUSY) {
		*busy_until = ktime_get() + max_busy_us * NSEC_PER_USEC;
	} else {
		struct timespec now;

		ktime_get_ts(&now);
		if (unlikely(timespec_compare(&now, busy_until) > 0)) {
			ipmi_si_set_not_busy(busy_until);
		if (unlikely(ktime_get() > *busy_until)) {
			*busy_until = IPMI_TIME_NOT_BUSY;
			return false;
		}
	}
@@ -988,9 +975,8 @@ static int ipmi_thread(void *data)
	struct smi_info *smi_info = data;
	unsigned long flags;
	enum si_sm_result smi_result;
	struct timespec busy_until = { 0, 0 };
	ktime_t busy_until = IPMI_TIME_NOT_BUSY;

	ipmi_si_set_not_busy(&busy_until);
	set_user_nice(current, MAX_NICE);
	while (!kthread_should_stop()) {
		int busy_wait;