Commit 0cc4bd8f authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'core/kprobes' into perf/core, to pick up fixes



Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 56ee04aa 31537cf8
Loading
Loading
Loading
Loading
+43 −24
Original line number Diff line number Diff line
@@ -612,6 +612,18 @@ void wait_for_kprobe_optimizer(void)
	mutex_unlock(&kprobe_mutex);
}

static bool optprobe_queued_unopt(struct optimized_kprobe *op)
{
	struct optimized_kprobe *_op;

	list_for_each_entry(_op, &unoptimizing_list, list) {
		if (op == _op)
			return true;
	}

	return false;
}

/* Optimize kprobe if p is ready to be optimized */
static void optimize_kprobe(struct kprobe *p)
{
@@ -633,18 +645,22 @@ static void optimize_kprobe(struct kprobe *p)
		return;

	/* Check if it is already optimized. */
	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
		if (optprobe_queued_unopt(op)) {
			/* This is under unoptimizing. Just dequeue the probe */
			list_del_init(&op->list);
		}
		return;
	}
	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;

	if (!list_empty(&op->list))
		/* This is under unoptimizing. Just dequeue the probe */
		list_del_init(&op->list);
	else {
	/* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
	if (WARN_ON_ONCE(!list_empty(&op->list)))
		return;

	list_add(&op->list, &optimizing_list);
	kick_kprobe_optimizer();
}
}

/* Short cut to direct unoptimizing */
static void force_unoptimize_kprobe(struct optimized_kprobe *op)
@@ -665,30 +681,33 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
		return; /* This is not an optprobe nor optimized */

	op = container_of(p, struct optimized_kprobe, kp);
	if (!kprobe_optimized(p)) {
		/* Unoptimized or unoptimizing case */
		if (force && !list_empty(&op->list)) {
	if (!kprobe_optimized(p))
		return;

	if (!list_empty(&op->list)) {
		if (optprobe_queued_unopt(op)) {
			/* Queued in unoptimizing queue */
			if (force) {
				/*
			 * Only if this is unoptimizing kprobe and forced,
			 * forcibly unoptimize it. (No need to unoptimize
			 * unoptimized kprobe again :)
				 * Forcibly unoptimize the kprobe here, and queue it
				 * in the freeing list for release afterwards.
				 */
			list_del_init(&op->list);
				force_unoptimize_kprobe(op);
				list_move(&op->list, &freeing_list);
			}
		return;
	}

	if (!list_empty(&op->list)) {
		/* Dequeue from the optimization queue */
		} else {
			/* Dequeue from the optimizing queue */
			list_del_init(&op->list);
			op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
		}
		return;
	}

	/* Optimized kprobe case */
	if (force)
	if (force) {
		/* Forcibly update the code: this is a special case */
		force_unoptimize_kprobe(op);
	else {
	} else {
		list_add(&op->list, &unoptimizing_list);
		kick_kprobe_optimizer();
	}
+2 −1
Original line number Diff line number Diff line
@@ -274,7 +274,8 @@ static int __init syscall_enter_define_fields(struct trace_event_call *call)
	struct syscall_trace_enter trace;
	struct syscall_metadata *meta = call->data;
	int offset = offsetof(typeof(trace), args);
	int ret, i;
	int ret = 0;
	int i;

	for (i = 0; i < meta->nb_args; i++) {
		ret = trace_define_field(call, meta->types[i],