Commit 2352923c authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull percpu updates from Dennis Zhou:
 "This has a change to fix percpu-refcount for RT kernels because
  rcu-sched disables preemption and the refcount release callback might
  acquire a spinlock"

* 'for-5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu:
  Revert "percpu: add __percpu to SHIFT_PERCPU_PTR"
  percpu-refcount: Use normal instead of RCU-sched"
  percpu: add __percpu to SHIFT_PERCPU_PTR
parents 76bb8b05 ba30e274
Loading
Loading
Loading
Loading
+8 −8
Original line number Diff line number Diff line
@@ -186,14 +186,14 @@ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
{
	unsigned long __percpu *percpu_count;

	rcu_read_lock_sched();
	rcu_read_lock();

	if (__ref_is_percpu(ref, &percpu_count))
		this_cpu_add(*percpu_count, nr);
	else
		atomic_long_add(nr, &ref->count);

	rcu_read_unlock_sched();
	rcu_read_unlock();
}

/**
@@ -223,7 +223,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
	unsigned long __percpu *percpu_count;
	bool ret;

	rcu_read_lock_sched();
	rcu_read_lock();

	if (__ref_is_percpu(ref, &percpu_count)) {
		this_cpu_inc(*percpu_count);
@@ -232,7 +232,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
		ret = atomic_long_inc_not_zero(&ref->count);
	}

	rcu_read_unlock_sched();
	rcu_read_unlock();

	return ret;
}
@@ -257,7 +257,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
	unsigned long __percpu *percpu_count;
	bool ret = false;

	rcu_read_lock_sched();
	rcu_read_lock();

	if (__ref_is_percpu(ref, &percpu_count)) {
		this_cpu_inc(*percpu_count);
@@ -266,7 +266,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
		ret = atomic_long_inc_not_zero(&ref->count);
	}

	rcu_read_unlock_sched();
	rcu_read_unlock();

	return ret;
}
@@ -285,14 +285,14 @@ static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
{
	unsigned long __percpu *percpu_count;

	rcu_read_lock_sched();
	rcu_read_lock();

	if (__ref_is_percpu(ref, &percpu_count))
		this_cpu_sub(*percpu_count, nr);
	else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
		ref->release(ref);

	rcu_read_unlock_sched();
	rcu_read_unlock();
}

/**