Commit 189a6883 authored by Joel Fernandes (Google)'s avatar Joel Fernandes (Google) Committed by Paul E. McKenney
Browse files

rcu: Remove kfree_call_rcu_nobatch()



Now that the kfree_rcu() special-casing has been removed from tree RCU,
this commit removes kfree_call_rcu_nobatch() since it is no longer needed.

Signed-off-by: default avatarJoel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent 77a40f97
Loading
Loading
Loading
Loading
+0 −4
Original line number Diff line number Diff line
@@ -3991,10 +3991,6 @@
			Number of loops doing rcuperf.kfree_alloc_num number
			of allocations and frees.

	rcuperf.kfree_no_batch= [KNL]
			Use the non-batching (less efficient) version of kfree_rcu().
			This is useful for comparing with the batched version.

	rcuperf.nreaders= [KNL]
			Set number of RCU readers.  The value -1 selects
			N, where N is the number of CPUs.  A value
+0 −5
Original line number Diff line number Diff line
@@ -39,11 +39,6 @@ static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
	call_rcu(head, func);
}

static inline void kfree_call_rcu_nobatch(struct rcu_head *head, rcu_callback_t func)
{
	call_rcu(head, func);
}

void rcu_qs(void);

static inline void rcu_softirq_qs(void)
+0 −1
Original line number Diff line number Diff line
@@ -34,7 +34,6 @@ static inline void rcu_virt_note_context_switch(int cpu)

void synchronize_rcu_expedited(void);
void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
void kfree_call_rcu_nobatch(struct rcu_head *head, rcu_callback_t func);

void rcu_barrier(void);
bool rcu_eqs_special_set(int cpu);
+1 −9
Original line number Diff line number Diff line
@@ -593,7 +593,6 @@ rcu_perf_shutdown(void *arg)
torture_param(int, kfree_nthreads, -1, "Number of threads running loops of kfree_rcu().");
torture_param(int, kfree_alloc_num, 8000, "Number of allocations and frees done in an iteration.");
torture_param(int, kfree_loops, 10, "Number of loops doing kfree_alloc_num allocations and frees.");
torture_param(int, kfree_no_batch, 0, "Use the non-batching (slower) version of kfree_rcu().");

static struct task_struct **kfree_reader_tasks;
static int kfree_nrealthreads;
@@ -632,14 +631,7 @@ kfree_perf_thread(void *arg)
			if (!alloc_ptr)
				return -ENOMEM;

			if (!kfree_no_batch) {
			kfree_rcu(alloc_ptr, rh);
			} else {
				rcu_callback_t cb;

				cb = (rcu_callback_t)(unsigned long)offsetof(struct kfree_obj, rh);
				kfree_call_rcu_nobatch(&(alloc_ptr->rh), cb);
			}
		}

		cond_resched();
+4 −14
Original line number Diff line number Diff line
@@ -2763,8 +2763,10 @@ static void kfree_rcu_work(struct work_struct *work)
		rcu_lock_acquire(&rcu_callback_map);
		trace_rcu_invoke_kfree_callback(rcu_state.name, head, offset);

		/* Could be possible to optimize with kfree_bulk in future */
		if (!WARN_ON_ONCE(!__is_kfree_rcu_offset(offset))) {
			/* Could be optimized with kfree_bulk() in future. */
			kfree((void *)head - offset);
		}

		rcu_lock_release(&rcu_callback_map);
		cond_resched_tasks_rcu_qs();
@@ -2835,16 +2837,6 @@ static void kfree_rcu_monitor(struct work_struct *work)
		spin_unlock_irqrestore(&krcp->lock, flags);
}

/*
 * This version of kfree_call_rcu does not do batching of kfree_rcu() requests.
 * Used only by rcuperf torture test for comparison with kfree_rcu_batch().
 */
void kfree_call_rcu_nobatch(struct rcu_head *head, rcu_callback_t func)
{
	__call_rcu(head, func);
}
EXPORT_SYMBOL_GPL(kfree_call_rcu_nobatch);

/*
 * Queue a request for lazy invocation of kfree() after a grace period.
 *
@@ -2864,8 +2856,6 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
	unsigned long flags;
	struct kfree_rcu_cpu *krcp;

	head->func = func;

	local_irq_save(flags);	// For safely calling this_cpu_ptr().
	krcp = this_cpu_ptr(&krc);
	if (krcp->initialized)