Commit f9ad4a5f authored by Peter Zijlstra's avatar Peter Zijlstra
Browse files

lockdep: Remove lockdep_hardirq{s_enabled,_context}() argument



Now that the macros use per-cpu data, we no longer need the argument.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lkml.kernel.org/r/20200623083721.571835311@infradead.org
parent a21ee605
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -758,7 +758,7 @@ noinstr void idtentry_exit_user(struct pt_regs *regs)

noinstr bool idtentry_enter_nmi(struct pt_regs *regs)
{
	bool irq_state = lockdep_hardirqs_enabled(current);
	bool irq_state = lockdep_hardirqs_enabled();

	__nmi_enter();
	lockdep_hardirqs_off(CALLER_ADDR0);
+4 −4
Original line number Diff line number Diff line
@@ -40,9 +40,9 @@ DECLARE_PER_CPU(int, hardirq_context);
  extern void trace_hardirqs_off_finish(void);
  extern void trace_hardirqs_on(void);
  extern void trace_hardirqs_off(void);
# define lockdep_hardirq_context(p)	(this_cpu_read(hardirq_context))
# define lockdep_hardirq_context()	(this_cpu_read(hardirq_context))
# define lockdep_softirq_context(p)	((p)->softirq_context)
# define lockdep_hardirqs_enabled(p)	(this_cpu_read(hardirqs_enabled))
# define lockdep_hardirqs_enabled()	(this_cpu_read(hardirqs_enabled))
# define lockdep_softirqs_enabled(p)	((p)->softirqs_enabled)
# define lockdep_hardirq_enter()			\
do {							\
@@ -109,9 +109,9 @@ do { \
# define trace_hardirqs_off_finish()		do { } while (0)
# define trace_hardirqs_on()		do { } while (0)
# define trace_hardirqs_off()		do { } while (0)
# define lockdep_hardirq_context(p)	0
# define lockdep_hardirq_context()	0
# define lockdep_softirq_context(p)	0
# define lockdep_hardirqs_enabled(p)	0
# define lockdep_hardirqs_enabled()	0
# define lockdep_softirqs_enabled(p)	0
# define lockdep_hardirq_enter()	do { } while (0)
# define lockdep_hardirq_threaded()	do { } while (0)
+1 −1
Original line number Diff line number Diff line
@@ -562,7 +562,7 @@ do { \

# define lockdep_assert_RT_in_threaded_ctx() do {			\
		WARN_ONCE(debug_locks && !current->lockdep_recursion &&	\
			  lockdep_hardirq_context(current) &&		\
			  lockdep_hardirq_context() &&			\
			  !(current->hardirq_threaded || current->irq_config),	\
			  "Not in threaded context on PREEMPT_RT as expected\n");	\
} while (0)
+15 −15
Original line number Diff line number Diff line
@@ -2062,9 +2062,9 @@ print_bad_irq_dependency(struct task_struct *curr,
	pr_warn("-----------------------------------------------------\n");
	pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
		curr->comm, task_pid_nr(curr),
		lockdep_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
		lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
		curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
		lockdep_hardirqs_enabled(curr),
		lockdep_hardirqs_enabled(),
		curr->softirqs_enabled);
	print_lock(next);

@@ -3331,9 +3331,9 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,

	pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
		curr->comm, task_pid_nr(curr),
		lockdep_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
		lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
		lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
		lockdep_hardirqs_enabled(curr),
		lockdep_hardirqs_enabled(),
		lockdep_softirqs_enabled(curr));
	print_lock(this);

@@ -3658,7 +3658,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
	if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
		return;

	if (unlikely(lockdep_hardirqs_enabled(current))) {
	if (unlikely(lockdep_hardirqs_enabled())) {
		/*
		 * Neither irq nor preemption are disabled here
		 * so this is racy by nature but losing one hit
@@ -3686,7 +3686,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
	 * Can't allow enabling interrupts while in an interrupt handler,
	 * that's general bad form and such. Recursion, limited stack etc..
	 */
	if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context(current)))
	if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context()))
		return;

	current->hardirq_chain_key = current->curr_chain_key;
@@ -3724,7 +3724,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
	if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
		return;

	if (lockdep_hardirqs_enabled(curr)) {
	if (lockdep_hardirqs_enabled()) {
		/*
		 * Neither irq nor preemption are disabled here
		 * so this is racy by nature but losing one hit
@@ -3783,7 +3783,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return;

	if (lockdep_hardirqs_enabled(curr)) {
	if (lockdep_hardirqs_enabled()) {
		/*
		 * We have done an ON -> OFF transition:
		 */
@@ -3832,7 +3832,7 @@ void lockdep_softirqs_on(unsigned long ip)
	 * usage bit for all held locks, if hardirqs are
	 * enabled too:
	 */
	if (lockdep_hardirqs_enabled(curr))
	if (lockdep_hardirqs_enabled())
		mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
	lockdep_recursion_finish();
}
@@ -3881,7 +3881,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
	 */
	if (!hlock->trylock) {
		if (hlock->read) {
			if (lockdep_hardirq_context(curr))
			if (lockdep_hardirq_context())
				if (!mark_lock(curr, hlock,
						LOCK_USED_IN_HARDIRQ_READ))
					return 0;
@@ -3890,7 +3890,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
						LOCK_USED_IN_SOFTIRQ_READ))
					return 0;
		} else {
			if (lockdep_hardirq_context(curr))
			if (lockdep_hardirq_context())
				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
					return 0;
			if (curr->softirq_context)
@@ -3928,7 +3928,7 @@ lock_used:

static inline unsigned int task_irq_context(struct task_struct *task)
{
	return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context(task) +
	return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() +
	       LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
}

@@ -4021,7 +4021,7 @@ static inline short task_wait_context(struct task_struct *curr)
	 * Set appropriate wait type for the context; for IRQs we have to take
	 * into account force_irqthread as that is implied by PREEMPT_RT.
	 */
	if (lockdep_hardirq_context(curr)) {
	if (lockdep_hardirq_context()) {
		/*
		 * Check if force_irqthreads will run us threaded.
		 */
@@ -4864,11 +4864,11 @@ static void check_flags(unsigned long flags)
		return;

	if (irqs_disabled_flags(flags)) {
		if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled(current))) {
		if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
			printk("possible reason: unannotated irqs-off.\n");
		}
	} else {
		if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled(current))) {
		if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) {
			printk("possible reason: unannotated irqs-on.\n");
		}
	}
+1 −1
Original line number Diff line number Diff line
@@ -230,7 +230,7 @@ static inline bool lockdep_softirq_start(void)
{
	bool in_hardirq = false;

	if (lockdep_hardirq_context(current)) {
	if (lockdep_hardirq_context()) {
		in_hardirq = true;
		lockdep_hardirq_exit();
	}
Loading