Commit 1cd7cccb authored by Daniel Leung's avatar Daniel Leung Committed by Anas Nashif
Browse files

kernel: mem_domain: arch_mem_domain functions to return errors



This changes the arch_mem_domain_*() functions to return errors.
This allows the callers a chance to recover if needed.

Note that:
() For assertions where it can bail out early without side
   effects, these are converted to CHECKIF(). (Usually means
   that updating of page tables or translation tables has not
   been started yet.)
() Other assertions are retained to signal fatal errors during
   development.
() The additional CHECKIF() are structured so that it will bail
   early if possible. If errors are encountered inside a loop,
   it will still continue with the loop so it works as before
   this changes with assertions disabled.

Signed-off-by: default avatarDaniel Leung <daniel.leung@intel.com>
parent bb595a85
Loading
Loading
Loading
Loading
+94 −38
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@
#include <arch/arm64/mm.h>
#include <linker/linker-defs.h>
#include <logging/log.h>
#include <sys/check.h>

LOG_MODULE_REGISTER(mpu, CONFIG_MPU_LOG_LEVEL);

@@ -247,8 +248,13 @@ static int dynamic_areas_init(uintptr_t start, size_t size)
static int dup_dynamic_regions(struct dynamic_region_info *dst, int len)
{
	size_t i;
	int ret = sys_dyn_regions_num;

	__ASSERT(sys_dyn_regions_num < len, "system dynamic region nums too large.");
	CHECKIF(!(sys_dyn_regions_num < len)) {
		LOG_ERR("system dynamic region nums too large.");
		ret = -EINVAL;
		goto out;
	}

	for (i = 0; i < sys_dyn_regions_num; i++) {
		dst[i] = sys_dyn_regions[i];
@@ -257,7 +263,8 @@ static int dup_dynamic_regions(struct dynamic_region_info *dst, int len)
		dst[i].index = -1;
	}

	return sys_dyn_regions_num;
out:
	return ret;
}

static void set_region(struct arm_mpu_region *region,
@@ -283,7 +290,7 @@ static int get_underlying_region_idx(struct dynamic_region_info *dyn_regions,
	return -1;
}

static uint8_t insert_region(struct dynamic_region_info *dyn_regions,
static int insert_region(struct dynamic_region_info *dyn_regions,
			 uint8_t region_idx, uint8_t region_num,
			 uintptr_t start, size_t size,
			 struct arm_mpu_region_attr *attr)
@@ -297,15 +304,23 @@ static uint8_t insert_region(struct dynamic_region_info *dyn_regions,
	uint64_t u_base;
	uint64_t u_limit;
	struct arm_mpu_region_attr *u_attr;
	int ret = 0;


	__ASSERT(region_idx < region_num,
		 "Out-of-bounds error for dynamic region map. region idx: %d, region num: %d",
	CHECKIF(!(region_idx < region_num)) {
		LOG_ERR("Out-of-bounds error for dynamic region map. "
			"region idx: %d, region num: %d",
			region_idx, region_num);
		ret = -EINVAL;
		goto out;
	}

	u_idx = get_underlying_region_idx(dyn_regions, region_idx, base, limit);

	__ASSERT(u_idx >= 0, "Invalid underlying region index");
	CHECKIF(!(u_idx >= 0)) {
		LOG_ERR("Invalid underlying region index");
		ret = -ENOENT;
		goto out;
	}

	/* Get underlying region range and attr */
	u_region = &(dyn_regions[u_idx].region_conf);
@@ -339,13 +354,18 @@ static uint8_t insert_region(struct dynamic_region_info *dyn_regions,
		region_idx++;
	}

	return region_idx;
	ret = region_idx;

out:
	return ret;
}

static int flush_dynamic_regions_to_mpu(struct dynamic_region_info *dyn_regions,
					uint8_t region_num)
{
	int reg_avail_idx = static_regions_num;
	int ret = 0;

	/*
	 * Clean the dynamic regions
	 */
@@ -371,16 +391,20 @@ static int flush_dynamic_regions_to_mpu(struct dynamic_region_info *dyn_regions,
		if (region_idx < 0) {
			region_idx = reg_avail_idx++;
		}
		__ASSERT(region_idx < get_num_regions(),
			 "Out-of-bounds error for mpu regions. region idx: %d, total mpu regions: %d",
		CHECKIF(!(region_idx < get_num_regions())) {
			LOG_ERR("Out-of-bounds error for mpu regions. "
				"region idx: %d, total mpu regions: %d",
				region_idx, get_num_regions());
			ret = -ENOENT;
		}

		region_init(region_idx, &(dyn_regions[i].region_conf));
	}

	return 0;
	return ret;
}

static void configure_dynamic_mpu_regions(struct k_thread *thread)
static int configure_dynamic_mpu_regions(struct k_thread *thread)
{
	/*
	 * Allocate double space for dyn_regions. Because when split
@@ -390,8 +414,15 @@ static void configure_dynamic_mpu_regions(struct k_thread *thread)
	struct dynamic_region_info dyn_regions[_MAX_DYNAMIC_MPU_REGIONS_NUM * 2];
	const uint8_t max_region_num = ARRAY_SIZE(dyn_regions);
	uint8_t region_num;
	int ret = 0, ret2;

	ret2 = dup_dynamic_regions(dyn_regions, max_region_num);
	CHECKIF(ret2 < 0) {
		ret = ret2;
		goto out;
	}

	region_num = dup_dynamic_regions(dyn_regions, max_region_num);
	region_num = (uint8_t)ret2;

	struct k_mem_domain *mem_domain = thread->mem_domain_info.mem_domain;

@@ -409,29 +440,42 @@ static void configure_dynamic_mpu_regions(struct k_thread *thread)
			}
			LOG_DBG("set region 0x%lx 0x%lx",
				partition->start, partition->size);
			region_num = insert_region(dyn_regions,
			ret2 = insert_region(dyn_regions,
					     region_num,
					     max_region_num,
					     partition->start,
					     partition->size,
					     &partition->attr);
			CHECKIF(ret2 != 0) {
				ret = ret2;
			}

			region_num = (uint8_t)ret2;
		}
	}

	LOG_DBG("configure user thread %p's context", thread);
	if ((thread->base.user_options & K_USER) != 0) {
		/* K_USER thread stack needs a region */
		region_num = insert_region(dyn_regions,
		ret2 = insert_region(dyn_regions,
				     region_num,
				     max_region_num,
				     thread->stack_info.start,
				     thread->stack_info.size,
				     &K_MEM_PARTITION_P_RW_U_RW);
		CHECKIF(ret2 != 0) {
			ret = ret2;
		}

		region_num = (uint8_t)ret2;
	}

	arm_core_mpu_disable();
	flush_dynamic_regions_to_mpu(dyn_regions, region_num);
	ret = flush_dynamic_regions_to_mpu(dyn_regions, region_num);
	arm_core_mpu_enable();

out:
	return ret;
}

int arch_mem_domain_max_partitions_get(void)
@@ -445,22 +489,28 @@ int arch_mem_domain_max_partitions_get(void)
	return max_parts;
}

void arch_mem_domain_partition_add(struct k_mem_domain *domain, uint32_t partition_id)
int arch_mem_domain_partition_add(struct k_mem_domain *domain, uint32_t partition_id)
{
	ARG_UNUSED(domain);
	ARG_UNUSED(partition_id);

	return 0;
}

void arch_mem_domain_partition_remove(struct k_mem_domain *domain, uint32_t partition_id)
int arch_mem_domain_partition_remove(struct k_mem_domain *domain, uint32_t partition_id)
{
	ARG_UNUSED(domain);
	ARG_UNUSED(partition_id);

	return 0;
}

void arch_mem_domain_thread_add(struct k_thread *thread)
int arch_mem_domain_thread_add(struct k_thread *thread)
{
	int ret = 0;

	if (thread == _current) {
		configure_dynamic_mpu_regions(thread);
		ret = configure_dynamic_mpu_regions(thread);
	}
#ifdef CONFIG_SMP
	else {
@@ -468,12 +518,16 @@ void arch_mem_domain_thread_add(struct k_thread *thread)
		z_arm64_mem_cfg_ipi();
	}
#endif

	return ret;
}

void arch_mem_domain_thread_remove(struct k_thread *thread)
int arch_mem_domain_thread_remove(struct k_thread *thread)
{
	int ret = 0;

	if (thread == _current) {
		configure_dynamic_mpu_regions(thread);
		ret = configure_dynamic_mpu_regions(thread);
	}
#ifdef CONFIG_SMP
	else {
@@ -481,6 +535,8 @@ void arch_mem_domain_thread_remove(struct k_thread *thread)
		z_arm64_mem_cfg_ipi();
	}
#endif

	return ret;
}

void z_arm64_thread_mem_domains_init(struct k_thread *thread)
+32 −24
Original line number Diff line number Diff line
@@ -1006,7 +1006,7 @@ int arch_mem_domain_init(struct k_mem_domain *domain)
	return 0;
}

static void private_map(struct arm_mmu_ptables *ptables, const char *name,
static int private_map(struct arm_mmu_ptables *ptables, const char *name,
		       uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs)
{
	int ret;
@@ -1018,9 +1018,11 @@ static void private_map(struct arm_mmu_ptables *ptables, const char *name,
	if (is_ptable_active(ptables)) {
		invalidate_tlb_all();
	}

	return ret;
}

static void reset_map(struct arm_mmu_ptables *ptables, const char *name,
static int reset_map(struct arm_mmu_ptables *ptables, const char *name,
		     uintptr_t addr, size_t size)
{
	int ret;
@@ -1030,40 +1032,44 @@ static void reset_map(struct arm_mmu_ptables *ptables, const char *name,
	if (is_ptable_active(ptables)) {
		invalidate_tlb_all();
	}

	return ret;
}

void arch_mem_domain_partition_add(struct k_mem_domain *domain,
int arch_mem_domain_partition_add(struct k_mem_domain *domain,
				  uint32_t partition_id)
{
	struct arm_mmu_ptables *domain_ptables = &domain->arch.ptables;
	struct k_mem_partition *ptn = &domain->partitions[partition_id];

	private_map(domain_ptables, "partition", ptn->start, ptn->start,
	return private_map(domain_ptables, "partition", ptn->start, ptn->start,
			   ptn->size, ptn->attr.attrs | MT_NORMAL);
}

void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
				     uint32_t partition_id)
{
	struct arm_mmu_ptables *domain_ptables = &domain->arch.ptables;
	struct k_mem_partition *ptn = &domain->partitions[partition_id];

	reset_map(domain_ptables, "partition removal", ptn->start, ptn->size);
	return reset_map(domain_ptables, "partition removal",
			 ptn->start, ptn->size);
}

static void map_thread_stack(struct k_thread *thread,
static int map_thread_stack(struct k_thread *thread,
			    struct arm_mmu_ptables *ptables)
{
	private_map(ptables, "thread_stack", thread->stack_info.start,
	return private_map(ptables, "thread_stack", thread->stack_info.start,
			    thread->stack_info.start, thread->stack_info.size,
			    MT_P_RW_U_RW | MT_NORMAL);
}

void arch_mem_domain_thread_add(struct k_thread *thread)
int arch_mem_domain_thread_add(struct k_thread *thread)
{
	struct arm_mmu_ptables *old_ptables, *domain_ptables;
	struct k_mem_domain *domain;
	bool is_user, is_migration;
	int ret = 0;

	domain = thread->mem_domain_info.mem_domain;
	domain_ptables = &domain->arch.ptables;
@@ -1073,7 +1079,7 @@ void arch_mem_domain_thread_add(struct k_thread *thread)
	is_migration = (old_ptables != NULL) && is_user;

	if (is_migration) {
		map_thread_stack(thread, domain_ptables);
		ret = map_thread_stack(thread, domain_ptables);
	}

	thread->arch.ptables = domain_ptables;
@@ -1089,12 +1095,14 @@ void arch_mem_domain_thread_add(struct k_thread *thread)
	}

	if (is_migration) {
		reset_map(old_ptables, __func__, thread->stack_info.start,
		ret = reset_map(old_ptables, __func__, thread->stack_info.start,
				thread->stack_info.size);
	}

	return ret;
}

void arch_mem_domain_thread_remove(struct k_thread *thread)
int arch_mem_domain_thread_remove(struct k_thread *thread)
{
	struct arm_mmu_ptables *domain_ptables;
	struct k_mem_domain *domain;
@@ -1103,14 +1111,14 @@ void arch_mem_domain_thread_remove(struct k_thread *thread)
	domain_ptables = &domain->arch.ptables;

	if ((thread->base.user_options & K_USER) == 0) {
		return;
		return 0;
	}

	if ((thread->base.thread_state & _THREAD_DEAD) == 0) {
		return;
		return 0;
	}

	reset_map(domain_ptables, __func__, thread->stack_info.start,
	return reset_map(domain_ptables, __func__, thread->stack_info.start,
			 thread->stack_info.size);
}

+60 −13
Original line number Diff line number Diff line
@@ -7,10 +7,14 @@
#include <kernel.h>
#include <kernel_internal.h>
#include <sys/__assert.h>
#include <sys/check.h>
#include "core_pmp.h"
#include <arch/riscv/csr.h>
#include <stdio.h>

#include <logging/log.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);

#define PMP_SLOT_NUMBER	CONFIG_PMP_SLOT

#ifdef CONFIG_USERSPACE
@@ -284,17 +288,21 @@ void z_riscv_configure_user_allowed_stack(struct k_thread *thread)
		csr_write_enum(CSR_PMPCFG0 + i, thread->arch.u_pmpcfg[i]);
}

void z_riscv_pmp_add_dynamic(struct k_thread *thread,
int z_riscv_pmp_add_dynamic(struct k_thread *thread,
			ulong_t addr,
			ulong_t size,
			unsigned char flags)
{
	unsigned char index = 0U;
	unsigned char *uchar_pmpcfg;
	int ret = 0;

	/* Check 4 bytes alignment */
	__ASSERT(((addr & 0x3) == 0) && ((size & 0x3) == 0) && size,
		 "address/size are not 4 bytes aligned\n");
	CHECKIF(!(((addr & 0x3) == 0) && ((size & 0x3) == 0) && size)) {
		LOG_ERR("address/size are not 4 bytes aligned\n");
		ret = -EINVAL;
		goto out;
	}

	/* Get next free entry */
	uchar_pmpcfg = (unsigned char *) thread->arch.u_pmpcfg;
@@ -306,6 +314,10 @@ void z_riscv_pmp_add_dynamic(struct k_thread *thread,
	}

	__ASSERT((index < CONFIG_PMP_SLOT), "no free PMP entry\n");
	CHECKIF(!(index < CONFIG_PMP_SLOT)) {
		ret = -ENOSPC;
		goto out;
	}

	/* Select the best type */
	if (size == 4) {
@@ -316,6 +328,11 @@ void z_riscv_pmp_add_dynamic(struct k_thread *thread,
	else if ((addr & (size - 1)) || (size & (size - 1))) {
		__ASSERT(((index + 1) < CONFIG_PMP_SLOT),
			"not enough free PMP entries\n");
		CHECKIF(!((index + 1) < CONFIG_PMP_SLOT)) {
			ret = -ENOSPC;
			goto out;
		}

		thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(addr);
		uchar_pmpcfg[index++] = flags | PMP_NA4;
		thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(addr + size);
@@ -326,6 +343,9 @@ void z_riscv_pmp_add_dynamic(struct k_thread *thread,
		thread->arch.u_pmpaddr[index] = TO_PMP_NAPOT(addr, size);
		uchar_pmpcfg[index] = flags | PMP_NAPOT;
	}

out:
	return ret;
}

int arch_buffer_validate(void *addr, size_t size, int write)
@@ -413,7 +433,7 @@ int arch_mem_domain_max_partitions_get(void)
	return PMP_MAX_DYNAMIC_REGION;
}

void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
				     uint32_t  partition_id)
{
	sys_dnode_t *node, *next_node;
@@ -423,6 +443,7 @@ void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
	struct k_thread *thread;
	ulong_t size = (ulong_t) domain->partitions[partition_id].size;
	ulong_t start = (ulong_t) domain->partitions[partition_id].start;
	int ret = 0;

	if (size == 4) {
		pmp_type = PMP_NA4;
@@ -444,7 +465,8 @@ void arch_mem_domain_partition_remove(struct k_mem_domain *domain,

	node = sys_dlist_peek_head(&domain->mem_domain_q);
	if (!node) {
		return;
		ret = -ENOENT;
		goto out;
	}

	thread = CONTAINER_OF(node, struct k_thread, mem_domain_info);
@@ -459,7 +481,11 @@ void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
		}
	}

	__ASSERT((index < CONFIG_PMP_SLOT), "partition not found\n");
	CHECKIF(!(index < CONFIG_PMP_SLOT)) {
		LOG_DBG("%s: partition not found\n", __func__);
		ret = -ENOENT;
		goto out;
	}

#if !defined(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT) || defined(CONFIG_PMP_STACK_GUARD)
	if (pmp_type == PMP_TOR) {
@@ -483,11 +509,15 @@ void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
			uchar_pmpcfg[CONFIG_PMP_SLOT - 2] = 0U;
		}
	}

out:
	return ret;
}

void arch_mem_domain_thread_add(struct k_thread *thread)
int arch_mem_domain_thread_add(struct k_thread *thread)
{
	struct k_mem_partition *partition;
	int ret = 0, ret2;

	for (int i = 0, pcount = 0;
		pcount < thread->mem_domain_info.mem_domain->num_partitions;
@@ -498,29 +528,44 @@ void arch_mem_domain_thread_add(struct k_thread *thread)
		}
		pcount++;

		z_riscv_pmp_add_dynamic(thread, (ulong_t) partition->start,
		ret2 = z_riscv_pmp_add_dynamic(thread,
			(ulong_t) partition->start,
			(ulong_t) partition->size, partition->attr.pmp_attr);
		ARG_UNUSED(ret2);
		CHECKIF(ret2 != 0) {
			ret = ret2;
		}
	}

	return ret;
}

void arch_mem_domain_partition_add(struct k_mem_domain *domain,
int arch_mem_domain_partition_add(struct k_mem_domain *domain,
				  uint32_t partition_id)
{
	sys_dnode_t *node, *next_node;
	struct k_thread *thread;
	struct k_mem_partition *partition;
	int ret = 0, ret2;

	partition = &domain->partitions[partition_id];

	SYS_DLIST_FOR_EACH_NODE_SAFE(&domain->mem_domain_q, node, next_node) {
		thread = CONTAINER_OF(node, struct k_thread, mem_domain_info);

		z_riscv_pmp_add_dynamic(thread, (ulong_t) partition->start,
		ret2 = z_riscv_pmp_add_dynamic(thread,
			(ulong_t) partition->start,
			(ulong_t) partition->size, partition->attr.pmp_attr);
		ARG_UNUSED(ret2);
		CHECKIF(ret2 != 0) {
			ret = ret2;
		}
	}

void arch_mem_domain_thread_remove(struct k_thread *thread)
	return ret;
}

int arch_mem_domain_thread_remove(struct k_thread *thread)
{
	uint32_t i;
	unsigned char *uchar_pmpcfg;
@@ -530,6 +575,8 @@ void arch_mem_domain_thread_remove(struct k_thread *thread)
	for (i = PMP_REGION_NUM_FOR_U_THREAD; i < CONFIG_PMP_SLOT; i++) {
		uchar_pmpcfg[i] = 0U;
	}

	return 0;
}

#endif /* CONFIG_USERSPACE */
+5 −1
Original line number Diff line number Diff line
@@ -94,8 +94,12 @@ void z_riscv_configure_user_allowed_stack(struct k_thread *thread);
 * @param addr   Start address of the memory area.
 * @param size   Size of the memory area.
 * @param flags  Pemissions: PMP_R, PMP_W, PMP_X, PMP_L
 *
 * @retval 0 if successful
 * @retval -EINVAL if invalid parameters supplied
 * @retval -ENOSPC if no free PMP entry
 */
void z_riscv_pmp_add_dynamic(struct k_thread *thread,
int z_riscv_pmp_add_dynamic(struct k_thread *thread,
			ulong_t addr,
			ulong_t size,
			unsigned char flags);
+217 −78

File changed.

Preview size limit exceeded, changes collapsed.

Loading