Commit 8dd90265 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-fixes-for-linus' of...

Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  sched, doc: Update sched-design-CFS.txt
  sched: Remove unused 'rq' variable and cpu_rq() call from alloc_fair_sched_group()
  sched.h: Fix a typo ("its")
  sched: Fix yield_to kernel-doc
parents 2a20b02c 1232d613
Loading
Loading
Loading
Loading
+1 −6
Original line number Diff line number Diff line
@@ -164,7 +164,7 @@ This is the (partial) list of the hooks:
   It puts the scheduling entity (task) into the red-black tree and
   increments the nr_running variable.

 - dequeue_tree(...)
 - dequeue_task(...)

   When a task is no longer runnable, this function is called to keep the
   corresponding scheduling entity out of the red-black tree.  It decrements
@@ -195,11 +195,6 @@ This is the (partial) list of the hooks:
   This function is mostly called from time tick functions; it might lead to
   process switch.  This drives the running preemption.

 - task_new(...)

   The core scheduler gives the scheduling module an opportunity to manage new
   task startup.  The CFS scheduling module uses it for group scheduling, while
   the scheduling module for a real-time task does not use it.



+1 −1
Original line number Diff line number Diff line
@@ -517,7 +517,7 @@ struct thread_group_cputimer {
struct autogroup;

/*
 * NOTE! "signal_struct" does not have it's own
 * NOTE! "signal_struct" does not have its own
 * locking, because a shared signal_struct always
 * implies a shared sighand_struct, so locking
 * sighand_struct is always a proper superset of
+2 −3
Original line number Diff line number Diff line
@@ -5473,6 +5473,8 @@ EXPORT_SYMBOL(yield);
 * yield_to - yield the current processor to another thread in
 * your thread group, or accelerate that thread toward the
 * processor it's on.
 * @p: target task
 * @preempt: whether task preemption is allowed or not
 *
 * It's the caller's job to ensure that the target task struct
 * can't go away on us before we can do any checks.
@@ -8449,7 +8451,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
	struct cfs_rq *cfs_rq;
	struct sched_entity *se;
	struct rq *rq;
	int i;

	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
@@ -8462,8 +8463,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
	tg->shares = NICE_0_LOAD;

	for_each_possible_cpu(i) {
		rq = cpu_rq(i);

		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
				      GFP_KERNEL, cpu_to_node(i));
		if (!cfs_rq)
+0 −2
Original line number Diff line number Diff line
@@ -94,6 +94,4 @@ static const struct sched_class idle_sched_class = {

	.prio_changed		= prio_changed_idle,
	.switched_to		= switched_to_idle,

	/* no .task_new for idle tasks */
};
+0 −2
Original line number Diff line number Diff line
@@ -102,6 +102,4 @@ static const struct sched_class stop_sched_class = {

	.prio_changed		= prio_changed_stop,
	.switched_to		= switched_to_stop,

	/* no .task_new for stop tasks */
};