Commit fc564e09 authored by David S. Miller's avatar David S. Miller
Browse files

Merge tag 'linux-can-fixes-for-5.4-20191105' of...

Merge tag 'linux-can-fixes-for-5.4-20191105' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can



Marc Kleine-Budde says:

====================
pull-request: can 2019-11-05

this is a pull request of 33 patches for net/master.

In the first patch Wen Yang's patch adds a missing of_node_put() to CAN device
infrastructure.

Navid Emamdoost's patch for the gs_usb driver fixes a memory leak in the
gs_can_open() error path.

Johan Hovold provides two patches, one for the mcba_usb, the other for the
usb_8dev driver. Both fix a use-after-free after USB-disconnect.

Joakim Zhang's patch improves the flexcan driver, the ECC mechanism is now
completely disabled instead of masking the interrupts.

The next three patches all target the peak_usb driver. Stephane Grosjean's
patch fixes a potential out-of-sync while decoding packets, Johan Hovold's
patch fixes a slab info leak, Jeroen Hofstee's patch adds missing reporting of
bus off recovery events.

Followed by three patches for the c_can driver. Kurt Van Dijck's patch fixes
detection of potential missing status IRQs, Jeroen Hofstee's patches add a chip
reset on open and add missing reporting of bus off recovery events.

Appana Durga Kedareswara rao's patch for the xilinx driver fixes the flags
field initialization for axi CAN.

The next seven patches target the rx-offload helper, they are by me and Jeroen
Hofstee. The error handling in case of a queue overflow is fixed removing a
memory leak. Further the error handling in case of queue overflow and skb OOM
is cleaned up.

The next two patches are by me and target the flexcan and ti_hecc driver. In
case of a error during can_rx_offload_queue_sorted() the error counters in the
drivers are incremented.

Jeroen Hofstee provides 6 patches for the ti_hecc driver, which properly stop
the device in ifdown, improve the rx-offload support (which hit mainline in
v5.4-rc1), and add missing FIFO overflow and state change reporting.

The following four patches target the j1939 protocol. Colin Ian King's patch
fixes a memory leak in the j1939_sk_errqueue() handling. Three patches by
Oleksij Rempel fix a memory leak on socket release and fix the EOMA packet in
the transport protocol.

Timo Schlüßler's patch fixes a potential race condition in the mcp251x driver
on after suspend.

The last patch is by Yegor Yefremov and updates the SPDX-License-Identifier to
v3.0.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3d1e5039 3926a3a0
Loading
Loading
Loading
Loading
+64 −7
Original line number Diff line number Diff line
@@ -52,6 +52,7 @@
#define CONTROL_EX_PDR		BIT(8)

/* control register */
#define CONTROL_SWR		BIT(15)
#define CONTROL_TEST		BIT(7)
#define CONTROL_CCE		BIT(6)
#define CONTROL_DISABLE_AR	BIT(5)
@@ -97,6 +98,9 @@
#define BTR_TSEG2_SHIFT		12
#define BTR_TSEG2_MASK		(0x7 << BTR_TSEG2_SHIFT)

/* interrupt register */
#define INT_STS_PENDING		0x8000

/* brp extension register */
#define BRP_EXT_BRPE_MASK	0x0f
#define BRP_EXT_BRPE_SHIFT	0
@@ -569,6 +573,26 @@ static void c_can_configure_msg_objects(struct net_device *dev)
				   IF_MCONT_RCV_EOB);
}

static int c_can_software_reset(struct net_device *dev)
{
	struct c_can_priv *priv = netdev_priv(dev);
	int retry = 0;

	if (priv->type != BOSCH_D_CAN)
		return 0;

	priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_SWR | CONTROL_INIT);
	while (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_SWR) {
		msleep(20);
		if (retry++ > 100) {
			netdev_err(dev, "CCTRL: software reset failed\n");
			return -EIO;
		}
	}

	return 0;
}

/*
 * Configure C_CAN chip:
 * - enable/disable auto-retransmission
@@ -578,6 +602,11 @@ static void c_can_configure_msg_objects(struct net_device *dev)
static int c_can_chip_config(struct net_device *dev)
{
	struct c_can_priv *priv = netdev_priv(dev);
	int err;

	err = c_can_software_reset(dev);
	if (err)
		return err;

	/* enable automatic retransmission */
	priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
@@ -886,6 +915,9 @@ static int c_can_handle_state_change(struct net_device *dev,
	struct can_berr_counter bec;

	switch (error_type) {
	case C_CAN_NO_ERROR:
		priv->can.state = CAN_STATE_ERROR_ACTIVE;
		break;
	case C_CAN_ERROR_WARNING:
		/* error warning state */
		priv->can.can_stats.error_warning++;
@@ -916,6 +948,13 @@ static int c_can_handle_state_change(struct net_device *dev,
				ERR_CNT_RP_SHIFT;

	switch (error_type) {
	case C_CAN_NO_ERROR:
		/* error warning state */
		cf->can_id |= CAN_ERR_CRTL;
		cf->data[1] = CAN_ERR_CRTL_ACTIVE;
		cf->data[6] = bec.txerr;
		cf->data[7] = bec.rxerr;
		break;
	case C_CAN_ERROR_WARNING:
		/* error warning state */
		cf->can_id |= CAN_ERR_CRTL;
@@ -1029,10 +1068,16 @@ static int c_can_poll(struct napi_struct *napi, int quota)
	u16 curr, last = priv->last_status;
	int work_done = 0;

	/* Only read the status register if a status interrupt was pending */
	if (atomic_xchg(&priv->sie_pending, 0)) {
		priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
		/* Ack status on C_CAN. D_CAN is self clearing */
		if (priv->type != BOSCH_D_CAN)
			priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
	} else {
		/* no change detected ... */
		curr = last;
	}

	/* handle state changes */
	if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
@@ -1054,11 +1099,17 @@ static int c_can_poll(struct napi_struct *napi, int quota)
	/* handle bus recovery events */
	if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
		netdev_dbg(dev, "left bus off state\n");
		priv->can.state = CAN_STATE_ERROR_ACTIVE;
		work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
	}

	if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
		netdev_dbg(dev, "left error passive state\n");
		priv->can.state = CAN_STATE_ERROR_ACTIVE;
		work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
	}

	if ((!(curr & STATUS_EWARN)) && (last & STATUS_EWARN)) {
		netdev_dbg(dev, "left error warning state\n");
		work_done += c_can_handle_state_change(dev, C_CAN_NO_ERROR);
	}

	/* handle lec errors on the bus */
@@ -1083,10 +1134,16 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
{
	struct net_device *dev = (struct net_device *)dev_id;
	struct c_can_priv *priv = netdev_priv(dev);
	int reg_int;

	if (!priv->read_reg(priv, C_CAN_INT_REG))
	reg_int = priv->read_reg(priv, C_CAN_INT_REG);
	if (!reg_int)
		return IRQ_NONE;

	/* save for later use */
	if (reg_int & INT_STS_PENDING)
		atomic_set(&priv->sie_pending, 1);

	/* disable all interrupts and schedule the NAPI */
	c_can_irq_control(priv, false);
	napi_schedule(&priv->napi);
+1 −0
Original line number Diff line number Diff line
@@ -198,6 +198,7 @@ struct c_can_priv {
	struct net_device *dev;
	struct device *device;
	atomic_t tx_active;
	atomic_t sie_pending;
	unsigned long tx_dir;
	int last_status;
	u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
+1 −0
Original line number Diff line number Diff line
@@ -848,6 +848,7 @@ void of_can_transceiver(struct net_device *dev)
		return;

	ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
	of_node_put(dn);
	if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
		netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
}
+9 −2
Original line number Diff line number Diff line
@@ -677,6 +677,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
	struct can_frame *cf;
	bool rx_errors = false, tx_errors = false;
	u32 timestamp;
	int err;

	timestamp = priv->read(&regs->timer) << 16;

@@ -725,7 +726,9 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
	if (tx_errors)
		dev->stats.tx_errors++;

	can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
	if (err)
		dev->stats.rx_fifo_errors++;
}

static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
@@ -738,6 +741,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
	int flt;
	struct can_berr_counter bec;
	u32 timestamp;
	int err;

	timestamp = priv->read(&regs->timer) << 16;

@@ -769,7 +773,9 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
	if (unlikely(new_state == CAN_STATE_BUS_OFF))
		can_bus_off(dev);

	can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
	if (err)
		dev->stats.rx_fifo_errors++;
}

static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
@@ -1188,6 +1194,7 @@ static int flexcan_chip_start(struct net_device *dev)
		reg_mecr = priv->read(&regs->mecr);
		reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
		priv->write(reg_mecr, &regs->mecr);
		reg_mecr |= FLEXCAN_MECR_ECCDIS;
		reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
			      FLEXCAN_MECR_FANCEI_MSK);
		priv->write(reg_mecr, &regs->mecr);
+85 −17
Original line number Diff line number Diff line
@@ -107,37 +107,95 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
	return cb_b->timestamp - cb_a->timestamp;
}

static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
/**
 * can_rx_offload_offload_one() - Read one CAN frame from HW
 * @offload: pointer to rx_offload context
 * @n: number of mailbox to read
 *
 * The task of this function is to read a CAN frame from mailbox @n
 * from the device and return the mailbox's content as a struct
 * sk_buff.
 *
 * If the struct can_rx_offload::skb_queue exceeds the maximal queue
 * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
 * allocated, the mailbox contents is discarded by reading it into an
 * overflow buffer. This way the mailbox is marked as free by the
 * driver.
 *
 * Return: A pointer to skb containing the CAN frame on success.
 *
 *         NULL if the mailbox @n is empty.
 *
 *         ERR_PTR() in case of an error
 */
static struct sk_buff *
can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
{
	struct sk_buff *skb = NULL;
	struct sk_buff *skb = NULL, *skb_error = NULL;
	struct can_rx_offload_cb *cb;
	struct can_frame *cf;
	int ret;

	/* If queue is full or skb not available, read to discard mailbox */
	if (likely(skb_queue_len(&offload->skb_queue) <=
		   offload->skb_queue_len_max))
	if (likely(skb_queue_len(&offload->skb_queue) <
		   offload->skb_queue_len_max)) {
		skb = alloc_can_skb(offload->dev, &cf);
		if (unlikely(!skb))
			skb_error = ERR_PTR(-ENOMEM);	/* skb alloc failed */
	} else {
		skb_error = ERR_PTR(-ENOBUFS);		/* skb_queue is full */
	}

	if (!skb) {
	/* If queue is full or skb not available, drop by reading into
	 * overflow buffer.
	 */
	if (unlikely(skb_error)) {
		struct can_frame cf_overflow;
		u32 timestamp;

		ret = offload->mailbox_read(offload, &cf_overflow,
					    &timestamp, n);
		if (ret)
			offload->dev->stats.rx_dropped++;

		/* Mailbox was empty. */
		if (unlikely(!ret))
			return NULL;

		/* Mailbox has been read and we're dropping it or
		 * there was a problem reading the mailbox.
		 *
		 * Increment error counters in any case.
		 */
		offload->dev->stats.rx_dropped++;
		offload->dev->stats.rx_fifo_errors++;

		/* There was a problem reading the mailbox, propagate
		 * error value.
		 */
		if (unlikely(ret < 0))
			return ERR_PTR(ret);

		return skb_error;
	}

	cb = can_rx_offload_get_cb(skb);
	ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
	if (!ret) {

	/* Mailbox was empty. */
	if (unlikely(!ret)) {
		kfree_skb(skb);
		return NULL;
	}

	/* There was a problem reading the mailbox, propagate error value. */
	if (unlikely(ret < 0)) {
		kfree_skb(skb);

		offload->dev->stats.rx_dropped++;
		offload->dev->stats.rx_fifo_errors++;

		return ERR_PTR(ret);
	}

	/* Mailbox was read. */
	return skb;
}

@@ -157,8 +215,8 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pen
			continue;

		skb = can_rx_offload_offload_one(offload, i);
		if (!skb)
			break;
		if (IS_ERR_OR_NULL(skb))
			continue;

		__skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
	}
@@ -188,7 +246,13 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
	struct sk_buff *skb;
	int received = 0;

	while ((skb = can_rx_offload_offload_one(offload, 0))) {
	while (1) {
		skb = can_rx_offload_offload_one(offload, 0);
		if (IS_ERR(skb))
			continue;
		if (!skb)
			break;

		skb_queue_tail(&offload->skb_queue, skb);
		received++;
	}
@@ -207,8 +271,10 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
	unsigned long flags;

	if (skb_queue_len(&offload->skb_queue) >
	    offload->skb_queue_len_max)
		return -ENOMEM;
	    offload->skb_queue_len_max) {
		kfree_skb(skb);
		return -ENOBUFS;
	}

	cb = can_rx_offload_get_cb(skb);
	cb->timestamp = timestamp;
@@ -250,8 +316,10 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
			      struct sk_buff *skb)
{
	if (skb_queue_len(&offload->skb_queue) >
	    offload->skb_queue_len_max)
		return -ENOMEM;
	    offload->skb_queue_len_max) {
		kfree_skb(skb);
		return -ENOBUFS;
	}

	skb_queue_tail(&offload->skb_queue, skb);
	can_rx_offload_schedule(offload);
Loading