Commit d1dfe5b8 authored by Dave Jiang's avatar Dave Jiang Committed by Vinod Koul
Browse files

dmaengine: idxd: add descriptor manipulation routines



This commit adds helper functions for DSA descriptor allocation,
submission, and free operations.

Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/157965025757.73301.12692876585357550065.stgit@djiang5-desk3.ch.intel.com


Signed-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent b131ad59
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
obj-$(CONFIG_INTEL_IDXD) += idxd.o
idxd-y := init.o irq.o device.o sysfs.o
idxd-y := init.o irq.o device.o sysfs.o submit.o
+10 −0
Original line number Diff line number Diff line
@@ -68,6 +68,11 @@ enum idxd_wq_type {
#define WQ_NAME_SIZE   1024
#define WQ_TYPE_SIZE   10

enum idxd_op_type {
	IDXD_OP_BLOCK = 0,
	IDXD_OP_NONBLOCK = 1,
};

struct idxd_wq {
	void __iomem *dportal;
	struct device conf_dev;
@@ -246,4 +251,9 @@ int idxd_wq_disable(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);

/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);

#endif
+91 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <uapi/linux/idxd.h>
#include "idxd.h"
#include "registers.h"

struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
{
	struct idxd_desc *desc;
	int idx;
	struct idxd_device *idxd = wq->idxd;

	if (idxd->state != IDXD_DEV_ENABLED)
		return ERR_PTR(-EIO);

	if (optype == IDXD_OP_BLOCK)
		percpu_down_read(&wq->submit_lock);
	else if (!percpu_down_read_trylock(&wq->submit_lock))
		return ERR_PTR(-EBUSY);

	if (!atomic_add_unless(&wq->dq_count, 1, wq->size)) {
		int rc;

		if (optype == IDXD_OP_NONBLOCK) {
			percpu_up_read(&wq->submit_lock);
			return ERR_PTR(-EAGAIN);
		}

		percpu_up_read(&wq->submit_lock);
		percpu_down_write(&wq->submit_lock);
		rc = wait_event_interruptible(wq->submit_waitq,
					      atomic_add_unless(&wq->dq_count,
								1, wq->size) ||
					       idxd->state != IDXD_DEV_ENABLED);
		percpu_up_write(&wq->submit_lock);
		if (rc < 0)
			return ERR_PTR(-EINTR);
		if (idxd->state != IDXD_DEV_ENABLED)
			return ERR_PTR(-EIO);
	} else {
		percpu_up_read(&wq->submit_lock);
	}

	idx = sbitmap_get(&wq->sbmap, 0, false);
	if (idx < 0) {
		atomic_dec(&wq->dq_count);
		return ERR_PTR(-EAGAIN);
	}

	desc = wq->descs[idx];
	memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
	memset(desc->completion, 0, sizeof(struct dsa_completion_record));
	return desc;
}

void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{
	atomic_dec(&wq->dq_count);

	sbitmap_clear_bit(&wq->sbmap, desc->id);
	wake_up(&wq->submit_waitq);
}

int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{
	struct idxd_device *idxd = wq->idxd;
	int vec = desc->hw->int_handle;

	if (idxd->state != IDXD_DEV_ENABLED)
		return -EIO;

	/*
	 * The wmb() flushes writes to coherent DMA data before possibly
	 * triggering a DMA read. The wmb() is necessary even on UP because
	 * the recipient is a device.
	 */
	wmb();
	iosubmit_cmds512(wq->dportal, desc->hw, 1);

	/*
	 * Pending the descriptor to the lockless list for the irq_entry
	 * that we designated the descriptor to.
	 */
	llist_add(&desc->llnode, &idxd->irq_entries[vec].pending_llist);

	return 0;
}