Commit 3527d3e9 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler updates from Ingo Molnar:
 "The main changes in this cycle were:

   - another round of rq-clock handling debugging, robustization and
     fixes

   - PELT accounting improvements

   - CPU hotplug related ->cpus_allowed affinity handling fixes all
     around the tree

   - ... plus misc fixes, cleanups and updates"

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (35 commits)
  sched/x86: Update reschedule warning text
  crypto: N2 - Replace racy task affinity logic
  cpufreq/sparc-us2e: Replace racy task affinity logic
  cpufreq/sparc-us3: Replace racy task affinity logic
  cpufreq/sh: Replace racy task affinity logic
  cpufreq/ia64: Replace racy task affinity logic
  ACPI/processor: Replace racy task affinity logic
  ACPI/processor: Fix error handling in __acpi_processor_start()
  sparc/sysfs: Replace racy task affinity logic
  powerpc/smp: Replace open coded task affinity logic
  ia64/sn/hwperf: Replace racy task affinity logic
  ia64/salinfo: Replace racy task affinity logic
  workqueue: Provide work_on_cpu_safe()
  ia64/topology: Remove cpus_allowed manipulation
  sched/fair: Move the PELT constants into a generated header
  sched/fair: Increase PELT accuracy for small tasks
  sched/fair: Fix comments
  sched/Documentation: Add 'sched-pelt' tool
  sched/fair: Fix corner case in __accumulate_sum()
  sched/core: Remove 'task' parameter and rename tsk_restore_flags() to current_restore_flags()
  ...
parents 3711c94f 21173d0b
Loading
Loading
Loading
Loading
+108 −0
Original line number Diff line number Diff line
/*
 * The following program is used to generate the constants for
 * computing sched averages.
 *
 * ==============================================================
 *		C program (compile with -lm)
 * ==============================================================
 */

#include <math.h>
#include <stdio.h>

#define HALFLIFE 32
#define SHIFT 32

double y;

void calc_runnable_avg_yN_inv(void)
{
	int i;
	unsigned int x;

	printf("static const u32 runnable_avg_yN_inv[] = {");
	for (i = 0; i < HALFLIFE; i++) {
		x = ((1UL<<32)-1)*pow(y, i);

		if (i % 6 == 0) printf("\n\t");
		printf("0x%8x, ", x);
	}
	printf("\n};\n\n");
}

int sum = 1024;

void calc_runnable_avg_yN_sum(void)
{
	int i;

	printf("static const u32 runnable_avg_yN_sum[] = {\n\t    0,");
	for (i = 1; i <= HALFLIFE; i++) {
		if (i == 1)
			sum *= y;
		else
			sum = sum*y + 1024*y;

		if (i % 11 == 0)
			printf("\n\t");

		printf("%5d,", sum);
	}
	printf("\n};\n\n");
}

int n = -1;
/* first period */
long max = 1024;

void calc_converged_max(void)
{
	long last = 0, y_inv = ((1UL<<32)-1)*y;

	for (; ; n++) {
		if (n > -1)
			max = ((max*y_inv)>>SHIFT) + 1024;
			/*
			 * This is the same as:
			 * max = max*y + 1024;
			 */

		if (last == max)
			break;

		last = max;
	}
	n--;
	printf("#define LOAD_AVG_PERIOD %d\n", HALFLIFE);
	printf("#define LOAD_AVG_MAX %ld\n", max);
//	printf("#define LOAD_AVG_MAX_N %d\n\n", n);
}

void calc_accumulated_sum_32(void)
{
	int i, x = sum;

	printf("static const u32 __accumulated_sum_N32[] = {\n\t     0,");
	for (i = 1; i <= n/HALFLIFE+1; i++) {
		if (i > 1)
			x = x/2 + sum;

		if (i % 6 == 0)
			printf("\n\t");

		printf("%6d,", x);
	}
	printf("\n};\n\n");
}

void main(void)
{
	printf("/* Generated by Documentation/scheduler/sched-pelt; do not modify. */\n\n");

	y = pow(0.5, 1/(double)HALFLIFE);

	calc_runnable_avg_yN_inv();
//	calc_runnable_avg_yN_sum();
	calc_converged_max();
//	calc_accumulated_sum_32();
}
+12 −19
Original line number Diff line number Diff line
@@ -179,14 +179,14 @@ struct salinfo_platform_oemdata_parms {
	const u8 *efi_guid;
	u8 **oemdata;
	u64 *oemdata_size;
	int ret;
};

static void
static long
salinfo_platform_oemdata_cpu(void *context)
{
	struct salinfo_platform_oemdata_parms *parms = context;
	parms->ret = salinfo_platform_oemdata(parms->efi_guid, parms->oemdata, parms->oemdata_size);

	return salinfo_platform_oemdata(parms->efi_guid, parms->oemdata, parms->oemdata_size);
}

static void
@@ -380,16 +380,7 @@ salinfo_log_release(struct inode *inode, struct file *file)
	return 0;
}

static void
call_on_cpu(int cpu, void (*fn)(void *), void *arg)
{
	cpumask_t save_cpus_allowed = current->cpus_allowed;
	set_cpus_allowed_ptr(current, cpumask_of(cpu));
	(*fn)(arg);
	set_cpus_allowed_ptr(current, &save_cpus_allowed);
}

static void
static long
salinfo_log_read_cpu(void *context)
{
	struct salinfo_data *data = context;
@@ -399,6 +390,7 @@ salinfo_log_read_cpu(void *context)
	/* Clear corrected errors as they are read from SAL */
	if (rh->severity == sal_log_severity_corrected)
		ia64_sal_clear_state_info(data->type);
	return 0;
}

static void
@@ -430,7 +422,7 @@ retry:
	spin_unlock_irqrestore(&data_saved_lock, flags);

	if (!data->saved_num)
		call_on_cpu(cpu, salinfo_log_read_cpu, data);
		work_on_cpu_safe(cpu, salinfo_log_read_cpu, data);
	if (!data->log_size) {
		data->state = STATE_NO_DATA;
		cpumask_clear_cpu(cpu, &data->cpu_event);
@@ -459,11 +451,13 @@ salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *p
	return simple_read_from_buffer(buffer, count, ppos, buf, bufsize);
}

static void
static long
salinfo_log_clear_cpu(void *context)
{
	struct salinfo_data *data = context;

	ia64_sal_clear_state_info(data->type);
	return 0;
}

static int
@@ -486,7 +480,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
	rh = (sal_log_record_header_t *)(data->log_buffer);
	/* Corrected errors have already been cleared from SAL */
	if (rh->severity != sal_log_severity_corrected)
		call_on_cpu(cpu, salinfo_log_clear_cpu, data);
		work_on_cpu_safe(cpu, salinfo_log_clear_cpu, data);
	/* clearing a record may make a new record visible */
	salinfo_log_new_read(cpu, data);
	if (data->state == STATE_LOG_RECORD) {
@@ -531,9 +525,8 @@ salinfo_log_write(struct file *file, const char __user *buffer, size_t count, lo
				.oemdata = &data->oemdata,
				.oemdata_size = &data->oemdata_size
			};
			call_on_cpu(cpu, salinfo_platform_oemdata_cpu, &parms);
			if (parms.ret)
				count = parms.ret;
			count = work_on_cpu_safe(cpu, salinfo_platform_oemdata_cpu,
						 &parms);
		} else
			data->oemdata_size = 0;
	} else
+0 −6
Original line number Diff line number Diff line
@@ -355,18 +355,12 @@ static int cache_add_dev(unsigned int cpu)
	unsigned long i, j;
	struct cache_info *this_object;
	int retval = 0;
	cpumask_t oldmask;

	if (all_cpu_cache_info[cpu].kobj.parent)
		return 0;

	oldmask = current->cpus_allowed;
	retval = set_cpus_allowed_ptr(current, cpumask_of(cpu));
	if (unlikely(retval))
		return retval;

	retval = cpu_cache_sysfs_init(cpu);
	set_cpus_allowed_ptr(current, &oldmask);
	if (unlikely(retval < 0))
		return retval;

+9 −8
Original line number Diff line number Diff line
@@ -598,12 +598,17 @@ static void sn_hwperf_call_sal(void *info)
	op_info->ret = r;
}

static long sn_hwperf_call_sal_work(void *info)
{
	sn_hwperf_call_sal(info);
	return 0;
}

static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
{
	u32 cpu;
	u32 use_ipi;
	int r = 0;
	cpumask_t save_allowed;
	
	cpu = (op_info->a->arg & SN_HWPERF_ARG_CPU_MASK) >> 32;
	use_ipi = op_info->a->arg & SN_HWPERF_ARG_USE_IPI_MASK;
@@ -629,13 +634,9 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
			/* use an interprocessor interrupt to call SAL */
			smp_call_function_single(cpu, sn_hwperf_call_sal,
				op_info, 1);
		}
		else {
			/* migrate the task before calling SAL */ 
			save_allowed = current->cpus_allowed;
			set_cpus_allowed_ptr(current, cpumask_of(cpu));
			sn_hwperf_call_sal(op_info);
			set_cpus_allowed_ptr(current, &save_allowed);
		} else {
			/* Call on the target CPU */
			work_on_cpu_safe(cpu, sn_hwperf_call_sal_work, op_info);
		}
	}
	r = op_info->ret;
+11 −15
Original line number Diff line number Diff line
@@ -787,24 +787,21 @@ static struct sched_domain_topology_level powerpc_topology[] = {
	{ NULL, },
};

void __init smp_cpus_done(unsigned int max_cpus)
static __init long smp_setup_cpu_workfn(void *data __always_unused)
{
	cpumask_var_t old_mask;
	smp_ops->setup_cpu(boot_cpuid);
	return 0;
}

	/* We want the setup_cpu() here to be called from CPU 0, but our
	 * init thread may have been "borrowed" by another CPU in the meantime
	 * se we pin us down to CPU 0 for a short while
void __init smp_cpus_done(unsigned int max_cpus)
{
	/*
	 * We want the setup_cpu() here to be called on the boot CPU, but
	 * init might run on any CPU, so make sure it's invoked on the boot
	 * CPU.
	 */
	alloc_cpumask_var(&old_mask, GFP_NOWAIT);
	cpumask_copy(old_mask, &current->cpus_allowed);
	set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
	
	if (smp_ops && smp_ops->setup_cpu)
		smp_ops->setup_cpu(boot_cpuid);

	set_cpus_allowed_ptr(current, old_mask);

	free_cpumask_var(old_mask);
		work_on_cpu_safe(boot_cpuid, smp_setup_cpu_workfn, NULL);

	if (smp_ops && smp_ops->bringup_done)
		smp_ops->bringup_done();
@@ -812,7 +809,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
	dump_numa_cpu_topology();

	set_sched_topology(powerpc_topology);

}

#ifdef CONFIG_HOTPLUG_CPU
Loading