Commit a781f393 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki
Browse files

Merge branches 'acpi-apei', 'acpi-processor', 'acpi-tables', 'acpi-pci' and 'acpi-soc'

* acpi-apei:
  ACPI / APEI: Release resources if gen_pool_add() fails
  ACPI / APEI: Get rid of NULL_UUID_LE constant

* acpi-processor:
  ACPI / processor: don't print errors for processorIDs == 0xff

* acpi-tables:
  ACPI: custom_method: fix memory leaks
  HMAT: Skip publishing target info for nodes with no online memory
  HMAT: Register attributes for memory hot add
  HMAT: Register memory-side cache after parsing

* acpi-pci:
  ACPI / PCI: fix acpi_pci_irq_enable() memory leak
  ACPI/PCI: Remove surplus parentheses from a return statement

* acpi-soc:
  ACPI / LPSS: Save/restore LPSS private registers also on Lynxpoint
Loading
Loading
Loading
Loading
+5 −3
Original line number Diff line number Diff line
@@ -219,12 +219,13 @@ static void bsw_pwm_setup(struct lpss_private_data *pdata)
}

static const struct lpss_device_desc lpt_dev_desc = {
	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
			| LPSS_SAVE_CTX,
	.prv_offset = 0x800,
};

static const struct lpss_device_desc lpt_i2c_dev_desc = {
	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX,
	.prv_offset = 0x800,
};

@@ -236,7 +237,8 @@ static struct property_entry uart_properties[] = {
};

static const struct lpss_device_desc lpt_uart_dev_desc = {
	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
			| LPSS_SAVE_CTX,
	.clk_con_id = "baudclk",
	.prv_offset = 0x800,
	.setup = lpss_uart_setup,
+7 −3
Original line number Diff line number Diff line
@@ -279,6 +279,10 @@ static int acpi_processor_get_info(struct acpi_device *device)
	}

	if (acpi_duplicate_processor_id(pr->acpi_id)) {
		if (pr->acpi_id == 0xff)
			dev_info_once(&device->dev,
				"Entry not well-defined, consider updating BIOS\n");
		else
			dev_err(&device->dev,
				"Failed to get unique processor _UID (0x%x)\n",
				pr->acpi_id);
+16 −3
Original line number Diff line number Diff line
@@ -153,6 +153,7 @@ static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
int ghes_estatus_pool_init(int num_ghes)
{
	unsigned long addr, len;
	int rc;

	ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
	if (!ghes_estatus_pool)
@@ -164,7 +165,7 @@ int ghes_estatus_pool_init(int num_ghes)
	ghes_estatus_pool_size_request = PAGE_ALIGN(len);
	addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
	if (!addr)
		return -ENOMEM;
		goto err_pool_alloc;

	/*
	 * New allocation must be visible in all pgd before it can be found by
@@ -172,7 +173,19 @@ int ghes_estatus_pool_init(int num_ghes)
	 */
	vmalloc_sync_all();

	return gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
	rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
	if (rc)
		goto err_pool_add;

	return 0;

err_pool_add:
	vfree((void *)addr);

err_pool_alloc:
	gen_pool_destroy(ghes_estatus_pool);

	return -ENOMEM;
}

static int map_gen_v2(struct ghes *ghes)
@@ -483,7 +496,7 @@ static void ghes_do_proc(struct ghes *ghes,
	int sev, sec_sev;
	struct acpi_hest_generic_data *gdata;
	guid_t *sec_type;
	guid_t *fru_id = &NULL_UUID_LE;
	const guid_t *fru_id = &guid_null;
	char *fru_text = "";

	sev = ghes_severity(estatus->error_severity);
+4 −1
Original line number Diff line number Diff line
@@ -49,8 +49,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
	if ((*ppos > max_size) ||
	    (*ppos + count > max_size) ||
	    (*ppos + count < count) ||
	    (count > uncopied_bytes))
	    (count > uncopied_bytes)) {
		kfree(buf);
		return -EINVAL;
	}

	if (copy_from_user(buf + (*ppos), user_buf, count)) {
		kfree(buf);
@@ -70,6 +72,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
		add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
	}

	kfree(buf);
	return count;
}

+114 −29
Original line number Diff line number Diff line
@@ -14,14 +14,18 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/list_sort.h>
#include <linux/memory.h>
#include <linux/mutex.h>
#include <linux/node.h>
#include <linux/sysfs.h>

static __initdata u8 hmat_revision;
static u8 hmat_revision;

static __initdata LIST_HEAD(targets);
static __initdata LIST_HEAD(initiators);
static __initdata LIST_HEAD(localities);
static LIST_HEAD(targets);
static LIST_HEAD(initiators);
static LIST_HEAD(localities);

static DEFINE_MUTEX(target_lock);

/*
 * The defined enum order is used to prioritize attributes to break ties when
@@ -36,11 +40,19 @@ enum locality_types {

static struct memory_locality *localities_types[4];

struct target_cache {
	struct list_head node;
	struct node_cache_attrs cache_attrs;
};

struct memory_target {
	struct list_head node;
	unsigned int memory_pxm;
	unsigned int processor_pxm;
	struct node_hmem_attrs hmem_attrs;
	struct list_head caches;
	struct node_cache_attrs cache_attrs;
	bool registered;
};

struct memory_initiator {
@@ -53,7 +65,7 @@ struct memory_locality {
	struct acpi_hmat_locality *hmat_loc;
};

static __init struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
static struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
{
	struct memory_initiator *initiator;

@@ -63,7 +75,7 @@ static __init struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
	return NULL;
}

static __init struct memory_target *find_mem_target(unsigned int mem_pxm)
static struct memory_target *find_mem_target(unsigned int mem_pxm)
{
	struct memory_target *target;

@@ -96,9 +108,6 @@ static __init void alloc_memory_target(unsigned int mem_pxm)
{
	struct memory_target *target;

	if (pxm_to_node(mem_pxm) == NUMA_NO_NODE)
		return;

	target = find_mem_target(mem_pxm);
	if (target)
		return;
@@ -110,6 +119,7 @@ static __init void alloc_memory_target(unsigned int mem_pxm)
	target->memory_pxm = mem_pxm;
	target->processor_pxm = PXM_INVAL;
	list_add_tail(&target->node, &targets);
	INIT_LIST_HEAD(&target->caches);
}

static __init const char *hmat_data_type(u8 type)
@@ -148,7 +158,7 @@ static __init const char *hmat_data_type_suffix(u8 type)
	}
}

static __init u32 hmat_normalize(u16 entry, u64 base, u8 type)
static u32 hmat_normalize(u16 entry, u64 base, u8 type)
{
	u32 value;

@@ -183,7 +193,7 @@ static __init u32 hmat_normalize(u16 entry, u64 base, u8 type)
	return value;
}

static __init void hmat_update_target_access(struct memory_target *target,
static void hmat_update_target_access(struct memory_target *target,
					     u8 type, u32 value)
{
	switch (type) {
@@ -314,7 +324,8 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
				   const unsigned long end)
{
	struct acpi_hmat_cache *cache = (void *)header;
	struct node_cache_attrs cache_attrs;
	struct memory_target *target;
	struct target_cache *tcache;
	u32 attrs;

	if (cache->header.length < sizeof(*cache)) {
@@ -328,37 +339,47 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
		cache->memory_PD, cache->cache_size, attrs,
		cache->number_of_SMBIOShandles);

	cache_attrs.size = cache->cache_size;
	cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
	cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
	target = find_mem_target(cache->memory_PD);
	if (!target)
		return 0;

	tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
	if (!tcache) {
		pr_notice_once("Failed to allocate HMAT cache info\n");
		return 0;
	}

	tcache->cache_attrs.size = cache->cache_size;
	tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
	tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;

	switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
	case ACPI_HMAT_CA_DIRECT_MAPPED:
		cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
		tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
		break;
	case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
		cache_attrs.indexing = NODE_CACHE_INDEXED;
		tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
		break;
	case ACPI_HMAT_CA_NONE:
	default:
		cache_attrs.indexing = NODE_CACHE_OTHER;
		tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
		break;
	}

	switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
	case ACPI_HMAT_CP_WB:
		cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
		break;
	case ACPI_HMAT_CP_WT:
		cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
		break;
	case ACPI_HMAT_CP_NONE:
	default:
		cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
		break;
	}
	list_add_tail(&tcache->node, &target->caches);

	node_add_cache(pxm_to_node(cache->memory_PD), &cache_attrs);
	return 0;
}

@@ -435,7 +456,7 @@ static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
	return 0;
}

static __init u32 hmat_initiator_perf(struct memory_target *target,
static u32 hmat_initiator_perf(struct memory_target *target,
			       struct memory_initiator *initiator,
			       struct acpi_hmat_locality *hmat_loc)
{
@@ -473,7 +494,7 @@ static __init u32 hmat_initiator_perf(struct memory_target *target,
			      hmat_loc->data_type);
}

static __init bool hmat_update_best(u8 type, u32 value, u32 *best)
static bool hmat_update_best(u8 type, u32 value, u32 *best)
{
	bool updated = false;

@@ -517,7 +538,7 @@ static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b)
	return ia->processor_pxm - ib->processor_pxm;
}

static __init void hmat_register_target_initiators(struct memory_target *target)
static void hmat_register_target_initiators(struct memory_target *target)
{
	static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
	struct memory_initiator *initiator;
@@ -577,29 +598,89 @@ static __init void hmat_register_target_initiators(struct memory_target *target)
	}
}

static __init void hmat_register_target_perf(struct memory_target *target)
static void hmat_register_target_cache(struct memory_target *target)
{
	unsigned mem_nid = pxm_to_node(target->memory_pxm);
	struct target_cache *tcache;

	list_for_each_entry(tcache, &target->caches, node)
		node_add_cache(mem_nid, &tcache->cache_attrs);
}

static void hmat_register_target_perf(struct memory_target *target)
{
	unsigned mem_nid = pxm_to_node(target->memory_pxm);
	node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
}

static __init void hmat_register_targets(void)
static void hmat_register_target(struct memory_target *target)
{
	struct memory_target *target;
	int nid = pxm_to_node(target->memory_pxm);

	list_for_each_entry(target, &targets, node) {
	/*
	 * Skip offline nodes. This can happen when memory
	 * marked EFI_MEMORY_SP, "specific purpose", is applied
	 * to all the memory in a promixity domain leading to
	 * the node being marked offline / unplugged, or if
	 * memory-only "hotplug" node is offline.
	 */
	if (nid == NUMA_NO_NODE || !node_online(nid))
		return;

	mutex_lock(&target_lock);
	if (!target->registered) {
		hmat_register_target_initiators(target);
		hmat_register_target_cache(target);
		hmat_register_target_perf(target);
		target->registered = true;
	}
	mutex_unlock(&target_lock);
}

static void hmat_register_targets(void)
{
	struct memory_target *target;

	list_for_each_entry(target, &targets, node)
		hmat_register_target(target);
}

static int hmat_callback(struct notifier_block *self,
			 unsigned long action, void *arg)
{
	struct memory_target *target;
	struct memory_notify *mnb = arg;
	int pxm, nid = mnb->status_change_nid;

	if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
		return NOTIFY_OK;

	pxm = node_to_pxm(nid);
	target = find_mem_target(pxm);
	if (!target)
		return NOTIFY_OK;

	hmat_register_target(target);
	return NOTIFY_OK;
}

static struct notifier_block hmat_callback_nb = {
	.notifier_call = hmat_callback,
	.priority = 2,
};

static __init void hmat_free_structures(void)
{
	struct memory_target *target, *tnext;
	struct memory_locality *loc, *lnext;
	struct memory_initiator *initiator, *inext;
	struct target_cache *tcache, *cnext;

	list_for_each_entry_safe(target, tnext, &targets, node) {
		list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
			list_del(&tcache->node);
			kfree(tcache);
		}
		list_del(&target->node);
		kfree(target);
	}
@@ -658,6 +739,10 @@ static __init int hmat_init(void)
		}
	}
	hmat_register_targets();

	/* Keep the table and structures if the notifier may use them */
	if (!register_hotmemory_notifier(&hmat_callback_nb))
		return 0;
out_put:
	hmat_free_structures();
	acpi_put_table(tbl);
Loading