Commit 8ac88f71 authored by Joel Fernandes (Google)'s avatar Joel Fernandes (Google) Committed by Paul E. McKenney
Browse files

rcu/tree: Keep kfree_rcu() awake during lock contention



On PREEMPT_RT kernels, the krcp spinlock gets converted to an rt-mutex
and causes kfree_rcu() callers to sleep. This makes it unusable for
callers in purely atomic sections such as non-threaded IRQ handlers and
raw spinlock sections. Fix it by converting the spinlock to a raw
spinlock.

Vetting all code paths, there is no reason to believe that the raw
spinlock will hurt RT latencies as it is not held for a long time.

Cc: bigeasy@linutronix.de
Cc: Uladzislau Rezki <urezki@gmail.com>
Reviewed-by: default avatarUladzislau Rezki <urezki@gmail.com>
Signed-off-by: default avatarJoel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: default avatarUladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent 8e11690d
Loading
Loading
Loading
Loading
+15 −15
Original line number Diff line number Diff line
@@ -3016,7 +3016,7 @@ struct kfree_rcu_cpu {
	struct kfree_rcu_bulk_data *bhead;
	struct kfree_rcu_bulk_data *bcached;
	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
	spinlock_t lock;
	raw_spinlock_t lock;
	struct delayed_work monitor_work;
	bool monitor_todo;
	bool initialized;
@@ -3049,12 +3049,12 @@ static void kfree_rcu_work(struct work_struct *work)
	krwp = container_of(to_rcu_work(work),
			    struct kfree_rcu_cpu_work, rcu_work);
	krcp = krwp->krcp;
	spin_lock_irqsave(&krcp->lock, flags);
	raw_spin_lock_irqsave(&krcp->lock, flags);
	head = krwp->head_free;
	krwp->head_free = NULL;
	bhead = krwp->bhead_free;
	krwp->bhead_free = NULL;
	spin_unlock_irqrestore(&krcp->lock, flags);
	raw_spin_unlock_irqrestore(&krcp->lock, flags);

	/* "bhead" is now private, so traverse locklessly. */
	for (; bhead; bhead = bnext) {
@@ -3157,14 +3157,14 @@ static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
	krcp->monitor_todo = false;
	if (queue_kfree_rcu_work(krcp)) {
		// Success! Our job is done here.
		spin_unlock_irqrestore(&krcp->lock, flags);
		raw_spin_unlock_irqrestore(&krcp->lock, flags);
		return;
	}

	// Previous RCU batch still in progress, try again later.
	krcp->monitor_todo = true;
	schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
	spin_unlock_irqrestore(&krcp->lock, flags);
	raw_spin_unlock_irqrestore(&krcp->lock, flags);
}

/*
@@ -3177,11 +3177,11 @@ static void kfree_rcu_monitor(struct work_struct *work)
	struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu,
						 monitor_work.work);

	spin_lock_irqsave(&krcp->lock, flags);
	raw_spin_lock_irqsave(&krcp->lock, flags);
	if (krcp->monitor_todo)
		kfree_rcu_drain_unlock(krcp, flags);
	else
		spin_unlock_irqrestore(&krcp->lock, flags);
		raw_spin_unlock_irqrestore(&krcp->lock, flags);
}

static inline bool
@@ -3252,7 +3252,7 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
	local_irq_save(flags);	// For safely calling this_cpu_ptr().
	krcp = this_cpu_ptr(&krc);
	if (krcp->initialized)
		spin_lock(&krcp->lock);
		raw_spin_lock(&krcp->lock);

	// Queue the object but don't yet schedule the batch.
	if (debug_rcu_head_queue(head)) {
@@ -3283,7 +3283,7 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)

unlock_return:
	if (krcp->initialized)
		spin_unlock(&krcp->lock);
		raw_spin_unlock(&krcp->lock);
	local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(kfree_call_rcu);
@@ -3315,11 +3315,11 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);

		count = krcp->count;
		spin_lock_irqsave(&krcp->lock, flags);
		raw_spin_lock_irqsave(&krcp->lock, flags);
		if (krcp->monitor_todo)
			kfree_rcu_drain_unlock(krcp, flags);
		else
			spin_unlock_irqrestore(&krcp->lock, flags);
			raw_spin_unlock_irqrestore(&krcp->lock, flags);

		sc->nr_to_scan -= count;
		freed += count;
@@ -3346,15 +3346,15 @@ void __init kfree_rcu_scheduler_running(void)
	for_each_online_cpu(cpu) {
		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);

		spin_lock_irqsave(&krcp->lock, flags);
		raw_spin_lock_irqsave(&krcp->lock, flags);
		if (!krcp->head || krcp->monitor_todo) {
			spin_unlock_irqrestore(&krcp->lock, flags);
			raw_spin_unlock_irqrestore(&krcp->lock, flags);
			continue;
		}
		krcp->monitor_todo = true;
		schedule_delayed_work_on(cpu, &krcp->monitor_work,
					 KFREE_DRAIN_JIFFIES);
		spin_unlock_irqrestore(&krcp->lock, flags);
		raw_spin_unlock_irqrestore(&krcp->lock, flags);
	}
}

@@ -4250,7 +4250,7 @@ static void __init kfree_rcu_batch_init(void)
	for_each_possible_cpu(cpu) {
		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);

		spin_lock_init(&krcp->lock);
		raw_spin_lock_init(&krcp->lock);
		for (i = 0; i < KFREE_N_BATCHES; i++) {
			INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
			krcp->krw_arr[i].krcp = krcp;