Commit 03d5f641 authored by Tomer Tayar's avatar Tomer Tayar Committed by Oded Gabbay
Browse files

habanalabs: Use single pool for CPU accessible host memory



The device's CPU accessible memory on host is managed in a dedicated
pool, except for 2 regions - Primary Queue (PQ) and Event Queue (EQ) -
which are allocated from generic DMA pools.
Due to address length limitations of the CPU, the addresses of all these
memory regions must have the same MSBs starting at bit 40.
This patch modifies the allocation of the PQ and EQ to be also from the
dedicated pool, to ensure compliance with the limitation.

Signed-off-by: default avatarTomer Tayar <ttayar@habana.ai>
Signed-off-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
parent a38693d7
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -321,6 +321,18 @@ struct hl_cs_job;
#define HL_EQ_LENGTH			64
#define HL_EQ_SIZE_IN_BYTES		(HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)

#define HL_CPU_PKT_SHIFT		5
#define HL_CPU_PKT_SIZE			(1 << HL_CPU_PKT_SHIFT)
#define HL_CPU_PKT_MASK			(~((1 << HL_CPU_PKT_SHIFT) - 1))
#define HL_CPU_MAX_PKTS_IN_CB		32
#define HL_CPU_CB_SIZE			(HL_CPU_PKT_SIZE * \
					 HL_CPU_MAX_PKTS_IN_CB)
#define HL_CPU_CB_QUEUE_SIZE		(HL_QUEUE_LENGTH * HL_CPU_CB_SIZE)

/* KMD <-> ArmCP shared memory size (EQ + PQ + CPU CB queue) */
#define HL_CPU_ACCESSIBLE_MEM_SIZE	(HL_EQ_SIZE_IN_BYTES + \
					 HL_QUEUE_SIZE_IN_BYTES + \
					 HL_CPU_CB_QUEUE_SIZE)

/**
 * struct hl_hw_queue - describes a H/W transport queue.
+30 −10
Original line number Diff line number Diff line
@@ -415,14 +415,20 @@ void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
}

static int ext_and_cpu_hw_queue_init(struct hl_device *hdev,
					struct hl_hw_queue *q)
				struct hl_hw_queue *q, bool is_cpu_queue)
{
	void *p;
	int rc;

	if (is_cpu_queue)
		p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
							HL_QUEUE_SIZE_IN_BYTES,
							&q->bus_address);
	else
		p = hdev->asic_funcs->dma_alloc_coherent(hdev,
						HL_QUEUE_SIZE_IN_BYTES,
				&q->bus_address, GFP_KERNEL | __GFP_ZERO);
						&q->bus_address,
						GFP_KERNEL | __GFP_ZERO);
	if (!p)
		return -ENOMEM;

@@ -446,8 +452,15 @@ static int ext_and_cpu_hw_queue_init(struct hl_device *hdev,
	return 0;

free_queue:
	hdev->asic_funcs->dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES,
			(void *) (uintptr_t) q->kernel_address, q->bus_address);
	if (is_cpu_queue)
		hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
					HL_QUEUE_SIZE_IN_BYTES,
					(void *) (uintptr_t) q->kernel_address);
	else
		hdev->asic_funcs->dma_free_coherent(hdev,
					HL_QUEUE_SIZE_IN_BYTES,
					(void *) (uintptr_t) q->kernel_address,
					q->bus_address);

	return rc;
}
@@ -474,12 +487,12 @@ static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)

static int cpu_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
	return ext_and_cpu_hw_queue_init(hdev, q);
	return ext_and_cpu_hw_queue_init(hdev, q, true);
}

static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
	return ext_and_cpu_hw_queue_init(hdev, q);
	return ext_and_cpu_hw_queue_init(hdev, q, false);
}

/*
@@ -569,8 +582,15 @@ static void hw_queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)

	kfree(q->shadow_queue);

	hdev->asic_funcs->dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES,
			(void *) (uintptr_t) q->kernel_address, q->bus_address);
	if (q->queue_type == QUEUE_TYPE_CPU)
		hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
					HL_QUEUE_SIZE_IN_BYTES,
					(void *) (uintptr_t) q->kernel_address);
	else
		hdev->asic_funcs->dma_free_coherent(hdev,
					HL_QUEUE_SIZE_IN_BYTES,
					(void *) (uintptr_t) q->kernel_address,
					q->bus_address);
}

int hl_hw_queues_create(struct hl_device *hdev)
+0 −8
Original line number Diff line number Diff line
@@ -300,14 +300,6 @@ enum armcp_pwm_attributes {
	armcp_pwm_enable
};

#define HL_CPU_PKT_SHIFT		5
#define HL_CPU_PKT_SIZE			(1 << HL_CPU_PKT_SHIFT)
#define HL_CPU_PKT_MASK			(~((1 << HL_CPU_PKT_SHIFT) - 1))
#define HL_CPU_MAX_PKTS_IN_CB		32
#define HL_CPU_CB_SIZE			(HL_CPU_PKT_SIZE * \
					 HL_CPU_MAX_PKTS_IN_CB)
#define HL_CPU_ACCESSIBLE_MEM_SIZE	(HL_QUEUE_LENGTH * HL_CPU_CB_SIZE)

/* Event Queue Packets */

struct eq_generic_event {
+6 −4
Original line number Diff line number Diff line
@@ -284,8 +284,9 @@ int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)

	BUILD_BUG_ON(HL_EQ_SIZE_IN_BYTES > HL_PAGE_SIZE);

	p = hdev->asic_funcs->dma_alloc_coherent(hdev, HL_EQ_SIZE_IN_BYTES,
				&q->bus_address, GFP_KERNEL | __GFP_ZERO);
	p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
							HL_EQ_SIZE_IN_BYTES,
							&q->bus_address);
	if (!p)
		return -ENOMEM;

@@ -308,8 +309,9 @@ void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
{
	flush_workqueue(hdev->eq_wq);

	hdev->asic_funcs->dma_free_coherent(hdev, HL_EQ_SIZE_IN_BYTES,
			(void *) (uintptr_t) q->kernel_address, q->bus_address);
	hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
					HL_EQ_SIZE_IN_BYTES,
					(void *) (uintptr_t) q->kernel_address);
}

void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)