Commit 34e8c483 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

scftorture: Add smp_call_function() memory-ordering checks



This commit adds checks for memory misordering across calls to and
returns from smp_call_function() in the case where the caller waits.
Misordering results in a splat.

Note that in contrast to smp_call_function_single(), this code does not
test memory ordering into the handler in the no-wait case because none
of the handlers would be able to free the scf_check structure without
introducing heavy synchronization to work out which was last.

[ paulmck: s/GFP_KERNEL/GFP_ATOMIC/ per kernel test robot feedback. ]
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent 980205ee
Loading
Loading
Loading
Loading
+17 −8
Original line number Diff line number Diff line
@@ -297,11 +297,13 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
		cpus_read_lock();
	else
		preempt_disable();
	switch (scfsp->scfs_prim) {
	case SCF_PRIM_SINGLE:
	if (scfsp->scfs_prim == SCF_PRIM_SINGLE || scfsp->scfs_wait) {
		scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC);
		if (WARN_ON_ONCE(!scfcp))
			atomic_inc(&n_alloc_errs);
	}
	switch (scfsp->scfs_prim) {
	case SCF_PRIM_SINGLE:
		cpu = torture_random(trsp) % nr_cpu_ids;
		if (scfsp->scfs_wait)
			scfp->n_single_wait++;
@@ -328,11 +330,6 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
		}
		break;
	case SCF_PRIM_MANY:
		if (scfsp->scfs_wait) {
			scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC);
			if (WARN_ON_ONCE(!scfcp))
				atomic_inc(&n_alloc_errs);
		}
		if (scfsp->scfs_wait)
			scfp->n_many_wait++;
		else
@@ -356,7 +353,19 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
			scfp->n_all_wait++;
		else
			scfp->n_all++;
		smp_call_function(scf_handler, NULL, scfsp->scfs_wait);
		if (scfcp) {
			scfcp->scfc_cpu = -1;
			scfcp->scfc_wait = true;
			scfcp->scfc_out = false;
			scfcp->scfc_in = true;
		}
		smp_call_function(scf_handler, scfcp, scfsp->scfs_wait);
		if (scfcp) {
			if (WARN_ON_ONCE(!scfcp->scfc_out))
				atomic_inc(&n_mb_out_errs);  // Leak rather than trash!
			else
				kfree(scfcp);
		}
		break;
	}
	if (use_cpus_read_lock)