Commit 57ab5f74 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'hwlock-v5.3' of git://github.com/andersson/remoteproc

Pull hwspinlock updates from Bjorn Andersson:
 "This contains support for hardware spinlock TI K3 AM65x and J721E
  family of SoCs, support for using hwspinlocks from atomic context and
  better error reporting when dealing with hardware disabled in
  DeviceTree"

* tag 'hwlock-v5.3' of git://github.com/andersson/remoteproc:
  hwspinlock: add the 'in_atomic' API
  hwspinlock: document the hwspinlock 'raw' API
  hwspinlock: stm32: implement the relax() ops
  hwspinlock: ignore disabled device
  hwspinlock/omap: Add a trace during probe
  hwspinlock/omap: Add support for TI K3 SoCs
  dt-bindings: hwlock: Update OMAP binding for TI K3 SoCs
parents fdcec004 360aa640
Loading
Loading
Loading
Loading
+20 −5
Original line number Diff line number Diff line
OMAP4+ HwSpinlock Driver
========================
TI HwSpinlock for OMAP and K3 based SoCs
=========================================

Required properties:
- compatible:		Should be "ti,omap4-hwspinlock" for
- compatible:		Should be one of the following,
			  "ti,omap4-hwspinlock" for
				OMAP44xx, OMAP54xx, AM33xx, AM43xx, DRA7xx SoCs
			  "ti,am654-hwspinlock" for
				K3 AM65x and J721E SoCs
- reg:			Contains the hwspinlock module register address space
			(base address and length)
- ti,hwmods:		Name of the hwmod associated with the hwspinlock device
			(for OMAP architecture based SoCs only)
- #hwlock-cells:	Should be 1. The OMAP hwspinlock users will use a
			0-indexed relative hwlock number as the argument
			specifier value for requesting a specific hwspinlock
@@ -17,10 +21,21 @@ Please look at the generic hwlock binding for usage information for consumers,

Example:

/* OMAP4 */
1. OMAP4 SoCs
hwspinlock: spinlock@4a0f6000 {
	compatible = "ti,omap4-hwspinlock";
	reg = <0x4a0f6000 0x1000>;
	ti,hwmods = "spinlock";
	#hwlock-cells = <1>;
};

2. AM65x SoCs and J721E SoCs
&cbass_main {
	cbass_main_navss: interconnect0 {
		hwspinlock: spinlock@30e00000 {
			compatible = "ti,am654-hwspinlock";
			reg = <0x00 0x30e00000 0x00 0x1000>;
			#hwlock-cells = <1>;
		};
	};
};
+81 −0
Original line number Diff line number Diff line
@@ -134,6 +134,39 @@ notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).

The function will never sleep.

::

  int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int timeout);

Lock a previously-assigned hwspinlock with a timeout limit (specified in
msecs). If the hwspinlock is already taken, the function will busy loop
waiting for it to be released, but give up when the timeout elapses.

Caution: User must protect the routine of getting hardware lock with mutex
or spinlock to avoid dead-lock, that will let user can do some time-consuming
or sleepable operations under the hardware lock.

Returns 0 when successful and an appropriate error code otherwise (most
notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).

The function will never sleep.

::

  int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to);

Lock a previously-assigned hwspinlock with a timeout limit (specified in
msecs). If the hwspinlock is already taken, the function will busy loop
waiting for it to be released, but give up when the timeout elapses.

This function shall be called only from an atomic context and the timeout
value shall not exceed a few msecs.

Returns 0 when successful and an appropriate error code otherwise (most
notably -ETIMEDOUT if the hwspinlock is still busy after timeout msecs).

The function will never sleep.

::

  int hwspin_trylock(struct hwspinlock *hwlock);
@@ -184,6 +217,34 @@ Returns 0 on success and an appropriate error code otherwise (most
notably -EBUSY if the hwspinlock was already taken).
The function will never sleep.

::

  int hwspin_trylock_raw(struct hwspinlock *hwlock);

Attempt to lock a previously-assigned hwspinlock, but immediately fail if
it is already taken.

Caution: User must protect the routine of getting hardware lock with mutex
or spinlock to avoid dead-lock, that will let user can do some time-consuming
or sleepable operations under the hardware lock.

Returns 0 on success and an appropriate error code otherwise (most
notably -EBUSY if the hwspinlock was already taken).
The function will never sleep.

::

  int hwspin_trylock_in_atomic(struct hwspinlock *hwlock);

Attempt to lock a previously-assigned hwspinlock, but immediately fail if
it is already taken.

This function shall be called only from an atomic context.

Returns 0 on success and an appropriate error code otherwise (most
notably -EBUSY if the hwspinlock was already taken).
The function will never sleep.

::

  void hwspin_unlock(struct hwspinlock *hwlock);
@@ -220,6 +281,26 @@ Upon a successful return from this function, preemption is reenabled,
and the state of the local interrupts is restored to the state saved at
the given flags. This function will never sleep.

::

  void hwspin_unlock_raw(struct hwspinlock *hwlock);

Unlock a previously-locked hwspinlock.

The caller should **never** unlock an hwspinlock which is already unlocked.
Doing so is considered a bug (there is no protection against this).
This function will never sleep.

::

  void hwspin_unlock_in_atomic(struct hwspinlock *hwlock);

Unlock a previously-locked hwspinlock.

The caller should **never** unlock an hwspinlock which is already unlocked.
Doing so is considered a bug (there is no protection against this).
This function will never sleep.

::

  int hwspin_lock_get_id(struct hwspinlock *hwlock);
+1 −1
Original line number Diff line number Diff line
@@ -9,7 +9,7 @@ menuconfig HWSPINLOCK
config HWSPINLOCK_OMAP
	tristate "OMAP Hardware Spinlock device"
	depends on HWSPINLOCK
	depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX
	depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX || ARCH_K3
	help
	  Say y here to support the OMAP Hardware Spinlock device (firstly
	  introduced in OMAP4).
+35 −13
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@

#define pr_fmt(fmt)    "%s: " fmt, __func__

#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
@@ -23,6 +24,9 @@

#include "hwspinlock_internal.h"

/* retry delay used in atomic context */
#define HWSPINLOCK_RETRY_DELAY_US	100

/* radix tree tags */
#define HWSPINLOCK_UNUSED	(0) /* tags an hwspinlock as unused */

@@ -68,11 +72,11 @@ static DEFINE_MUTEX(hwspinlock_tree_lock);
 * user need some time-consuming or sleepable operations under the hardware
 * lock, they need one sleepable lock (like mutex) to protect the operations.
 *
 * If the mode is not HWLOCK_RAW, upon a successful return from this function,
 * preemption (and possibly interrupts) is disabled, so the caller must not
 * sleep, and is advised to release the hwspinlock as soon as possible. This is
 * required in order to minimize remote cores polling on the hardware
 * interconnect.
 * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
 * return from this function, preemption (and possibly interrupts) is disabled,
 * so the caller must not sleep, and is advised to release the hwspinlock as
 * soon as possible. This is required in order to minimize remote cores polling
 * on the hardware interconnect.
 *
 * The user decides whether local interrupts are disabled or not, and if yes,
 * whether he wants their previous state to be saved. It is up to the user
@@ -112,6 +116,7 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
		ret = spin_trylock_irq(&hwlock->lock);
		break;
	case HWLOCK_RAW:
	case HWLOCK_IN_ATOMIC:
		ret = 1;
		break;
	default:
@@ -136,6 +141,7 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
			spin_unlock_irq(&hwlock->lock);
			break;
		case HWLOCK_RAW:
		case HWLOCK_IN_ATOMIC:
			/* Nothing to do */
			break;
		default:
@@ -179,11 +185,14 @@ EXPORT_SYMBOL_GPL(__hwspin_trylock);
 * user need some time-consuming or sleepable operations under the hardware
 * lock, they need one sleepable lock (like mutex) to protect the operations.
 *
 * If the mode is not HWLOCK_RAW, upon a successful return from this function,
 * preemption is disabled (and possibly local interrupts, too), so the caller
 * must not sleep, and is advised to release the hwspinlock as soon as possible.
 * This is required in order to minimize remote cores polling on the
 * hardware interconnect.
 * If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout
 * is handled with busy-waiting delays, hence shall not exceed few msecs.
 *
 * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful
 * return from this function, preemption (and possibly interrupts) is disabled,
 * so the caller must not sleep, and is advised to release the hwspinlock as
 * soon as possible. This is required in order to minimize remote cores polling
 * on the hardware interconnect.
 *
 * The user decides whether local interrupts are disabled or not, and if yes,
 * whether he wants their previous state to be saved. It is up to the user
@@ -198,7 +207,7 @@ int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
					int mode, unsigned long *flags)
{
	int ret;
	unsigned long expire;
	unsigned long expire, atomic_delay = 0;

	expire = msecs_to_jiffies(to) + jiffies;

@@ -212,8 +221,15 @@ int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
		 * The lock is already taken, let's check if the user wants
		 * us to try again
		 */
		if (mode == HWLOCK_IN_ATOMIC) {
			udelay(HWSPINLOCK_RETRY_DELAY_US);
			atomic_delay += HWSPINLOCK_RETRY_DELAY_US;
			if (atomic_delay > to * 1000)
				return -ETIMEDOUT;
		} else {
			if (time_is_before_eq_jiffies(expire))
				return -ETIMEDOUT;
		}

		/*
		 * Allow platform-specific relax handlers to prevent
@@ -276,6 +292,7 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
		spin_unlock_irq(&hwlock->lock);
		break;
	case HWLOCK_RAW:
	case HWLOCK_IN_ATOMIC:
		/* Nothing to do */
		break;
	default:
@@ -333,6 +350,11 @@ int of_hwspin_lock_get_id(struct device_node *np, int index)
	if (ret)
		return ret;

	if (!of_device_is_available(args.np)) {
		ret = -ENOENT;
		goto out;
	}

	/* Find the hwspinlock device: we need its base_id */
	ret = -EPROBE_DEFER;
	rcu_read_lock();
+4 −0
Original line number Diff line number Diff line
@@ -140,6 +140,9 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
	if (ret)
		goto reg_fail;

	dev_dbg(&pdev->dev, "Registered %d locks with HwSpinlock core\n",
		num_locks);

	return 0;

reg_fail:
@@ -171,6 +174,7 @@ static int omap_hwspinlock_remove(struct platform_device *pdev)

static const struct of_device_id omap_hwspinlock_of_match[] = {
	{ .compatible = "ti,omap4-hwspinlock", },
	{ .compatible = "ti,am654-hwspinlock", },
	{ /* end */ },
};
MODULE_DEVICE_TABLE(of, omap_hwspinlock_of_match);
Loading