Commit c0286f56 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Merge tag 'thunderbolt-for-v5.2' of...

Merge tag 'thunderbolt-for-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into char-misc-next

Mika writes:

thunderbolt: Changes for v5.2 merge window

This improves software connection manager on older Apple systems with
Thunderbolt 1 and 2 controller to support full PCIe daisy chains,
Display Port tunneling and P2P networking. There are also fixes for
potential NULL pointer dereferences at various places in the driver.

* tag 'thunderbolt-for-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: (44 commits)
  thunderbolt: Make priority unsigned in struct tb_path
  thunderbolt: Start firmware on Titan Ridge Apple systems
  thunderbolt: Reword output of tb_dump_hop()
  thunderbolt: Make rest of the logging to happen at debug level
  thunderbolt: Make __TB_[SW|PORT]_PRINT take const parameters
  thunderbolt: Add support for XDomain connections
  thunderbolt: Make tb_switch_alloc() return ERR_PTR()
  thunderbolt: Add support for DMA tunnels
  thunderbolt: Add XDomain UUID exchange support
  thunderbolt: Run tb_xdp_handle_request() in system workqueue
  thunderbolt: Do not tear down tunnels when driver is unloaded
  thunderbolt: Add support for Display Port tunnels
  thunderbolt: Rework NFC credits handling
  thunderbolt: Generalize port finding routines to support all port types
  thunderbolt: Scan only valid NULL adapter ports in hotplug
  thunderbolt: Add support for full PCIe daisy chains
  thunderbolt: Discover preboot PCIe paths the boot firmware established
  thunderbolt: Deactivate all paths before restarting them
  thunderbolt: Extend tunnel creation to more than 2 adjacent switches
  thunderbolt: Add helper function to iterate from one port to another
  ...
parents 62909da8 37209783
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -1282,6 +1282,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev)
		tbnet_tear_down(net, true);
	}

	tb_unregister_protocol_handler(&net->handler);
	return 0;
}

@@ -1290,6 +1291,8 @@ static int __maybe_unused tbnet_resume(struct device *dev)
	struct tb_service *svc = tb_to_service(dev);
	struct tbnet *net = tb_service_get_drvdata(svc);

	tb_register_protocol_handler(&net->handler);

	netif_carrier_off(net->dev);
	if (netif_running(net->dev)) {
		netif_device_attach(net->dev);
+2 −2
Original line number Diff line number Diff line
obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o
thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o
+68 −17
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@

#define CAP_OFFSET_MAX		0xff
#define VSE_CAP_OFFSET_MAX	0xffff
#define TMU_ACCESS_EN		BIT(20)

struct tb_cap_any {
	union {
@@ -22,28 +23,53 @@ struct tb_cap_any {
	};
} __packed;

/**
 * tb_port_find_cap() - Find port capability
 * @port: Port to find the capability for
 * @cap: Capability to look
 *
 * Returns offset to start of capability or %-ENOENT if no such
 * capability was found. Negative errno is returned if there was an
 * error.
 */
int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
static int tb_port_enable_tmu(struct tb_port *port, bool enable)
{
	u32 offset;
	struct tb_switch *sw = port->sw;
	u32 value, offset;
	int ret;

	/*
	 * DP out adapters claim to implement TMU capability but in
	 * reality they do not so we hard code the adapter specific
	 * capability offset here.
	 * Legacy devices need to have TMU access enabled before port
	 * space can be fully accessed.
	 */
	if (port->config.type == TB_TYPE_DP_HDMI_OUT)
		offset = 0x39;
	if (tb_switch_is_lr(sw))
		offset = 0x26;
	else if (tb_switch_is_er(sw))
		offset = 0x2a;
	else
		return 0;

	ret = tb_sw_read(sw, &value, TB_CFG_SWITCH, offset, 1);
	if (ret)
		return ret;

	if (enable)
		value |= TMU_ACCESS_EN;
	else
		offset = 0x1;
		value &= ~TMU_ACCESS_EN;

	return tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
}

static void tb_port_dummy_read(struct tb_port *port)
{
	/*
	 * When reading from next capability pointer location in port
	 * config space the read data is not cleared on LR. To avoid
	 * reading stale data on next read perform one dummy read after
	 * port capabilities are walked.
	 */
	if (tb_switch_is_lr(port->sw)) {
		u32 dummy;

		tb_port_read(port, &dummy, TB_CFG_PORT, 0, 1);
	}
}

static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
{
	u32 offset = 1;

	do {
		struct tb_cap_any header;
@@ -62,6 +88,31 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
	return -ENOENT;
}

/**
 * tb_port_find_cap() - Find port capability
 * @port: Port to find the capability for
 * @cap: Capability to look
 *
 * Returns offset to start of capability or %-ENOENT if no such
 * capability was found. Negative errno is returned if there was an
 * error.
 */
int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
{
	int ret;

	ret = tb_port_enable_tmu(port, true);
	if (ret)
		return ret;

	ret = __tb_port_find_cap(port, cap);

	tb_port_dummy_read(port);
	tb_port_enable_tmu(port, false);

	return ret;
}

static int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
{
	int offset = sw->config.first_cap_offset;
+1 −1
Original line number Diff line number Diff line
@@ -720,7 +720,7 @@ int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
		.port = port,
		.error = error,
	};
	tb_ctl_info(ctl, "resetting error on %llx:%x.\n", route, port);
	tb_ctl_dbg(ctl, "resetting error on %llx:%x.\n", route, port);
	return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
}

+31 −34
Original line number Diff line number Diff line
@@ -42,7 +42,6 @@
#define ICM_TIMEOUT			5000	/* ms */
#define ICM_APPROVE_TIMEOUT		10000	/* ms */
#define ICM_MAX_LINK			4
#define ICM_MAX_DEPTH			6

/**
 * struct icm - Internal connection manager private data
@@ -469,10 +468,15 @@ static void add_switch(struct tb_switch *parent_sw, u64 route,
	pm_runtime_get_sync(&parent_sw->dev);

	sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
	if (!sw)
	if (IS_ERR(sw))
		goto out;

	sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
	if (!sw->uuid) {
		tb_sw_warn(sw, "cannot allocate memory for switch\n");
		tb_switch_put(sw);
		goto out;
	}
	sw->connection_id = connection_id;
	sw->connection_key = connection_key;
	sw->link = link;
@@ -709,7 +713,7 @@ icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
		ICM_LINK_INFO_DEPTH_SHIFT;

	if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
	if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
		return;
	}
@@ -739,7 +743,7 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
		ICM_LINK_INFO_DEPTH_SHIFT;

	if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
	if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
		return;
	}
@@ -793,9 +797,11 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
	 * connected another host to the same port, remove the switch
	 * first.
	 */
	sw = get_switch_at_route(tb->root_switch, route);
	if (sw)
	sw = tb_switch_find_by_route(tb, route);
	if (sw) {
		remove_switch(sw);
		tb_switch_put(sw);
	}

	sw = tb_switch_find_by_link_depth(tb, link, depth);
	if (!sw) {
@@ -1138,9 +1144,11 @@ icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
	 * connected another host to the same port, remove the switch
	 * first.
	 */
	sw = get_switch_at_route(tb->root_switch, route);
	if (sw)
	sw = tb_switch_find_by_route(tb, route);
	if (sw) {
		remove_switch(sw);
		tb_switch_put(sw);
	}

	sw = tb_switch_find_by_route(tb, get_parent_route(route));
	if (!sw) {
@@ -1191,6 +1199,8 @@ static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
		return parent;
	}

@@ -1560,7 +1570,7 @@ static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
	if (val & REG_FW_STS_ICM_EN)
		return 0;

	dev_info(&nhi->pdev->dev, "starting ICM firmware\n");
	dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");

	ret = icm_firmware_reset(tb, nhi);
	if (ret)
@@ -1753,15 +1763,9 @@ static void icm_unplug_children(struct tb_switch *sw)
	for (i = 1; i <= sw->config.max_port_number; i++) {
		struct tb_port *port = &sw->ports[i];

		if (tb_is_upstream_port(port))
			continue;
		if (port->xdomain) {
		if (port->xdomain)
			port->xdomain->is_unplugged = true;
			continue;
		}
		if (!port->remote)
			continue;

		else if (tb_port_has_remote(port))
			icm_unplug_children(port->remote->sw);
	}
}
@@ -1773,18 +1777,10 @@ static void icm_free_unplugged_children(struct tb_switch *sw)
	for (i = 1; i <= sw->config.max_port_number; i++) {
		struct tb_port *port = &sw->ports[i];

		if (tb_is_upstream_port(port))
			continue;

		if (port->xdomain && port->xdomain->is_unplugged) {
			tb_xdomain_remove(port->xdomain);
			port->xdomain = NULL;
			continue;
		}

		if (!port->remote)
			continue;

		} else if (tb_port_has_remote(port)) {
			if (port->remote->sw->is_unplugged) {
				tb_switch_remove(port->remote->sw);
				port->remote = NULL;
@@ -1793,6 +1789,7 @@ static void icm_free_unplugged_children(struct tb_switch *sw)
			}
		}
	}
}

static void icm_rescan_work(struct work_struct *work)
{
@@ -1853,8 +1850,8 @@ static int icm_start(struct tb *tb)
		tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
	else
		tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
	if (!tb->root_switch)
		return -ENODEV;
	if (IS_ERR(tb->root_switch))
		return PTR_ERR(tb->root_switch);

	/*
	 * NVM upgrade has not been tested on Apple systems and they
Loading