Commit ad4a7b50 authored by Dave Jiang's avatar Dave Jiang Committed by Vinod Koul
Browse files

dmaengine: ioatdma: adding shutdown support



The ioatdma needs to be queisced and block all additional op submission
during reboots. When NET_DMA was used, this caused issue as ops were still
being sent to ioatdma during reboots even though PCI BME has been turned
off. Even though NET_DMA has been deprecated, we need to prevent similar
situations. The shutdown handler should address that.

Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent 6ff33f39
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -197,6 +197,7 @@ static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
{
	spin_lock_bh(&ioat_chan->prep_lock);
	if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
		__ioat_start_null_desc(ioat_chan);
	spin_unlock_bh(&ioat_chan->prep_lock);
}
+4 −2
Original line number Diff line number Diff line
@@ -82,8 +82,9 @@ struct ioatdma_device {
	struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
	struct dma_device dma_dev;
	u8 version;
	struct msix_entry msix_entries[4];
	struct ioatdma_chan *idx[4];
#define IOAT_MAX_CHANS 4
	struct msix_entry msix_entries[IOAT_MAX_CHANS];
	struct ioatdma_chan *idx[IOAT_MAX_CHANS];
	struct dca_provider *dca;
	enum ioat_irq_mode irq_mode;
	u32 cap;
@@ -95,6 +96,7 @@ struct ioatdma_chan {
	dma_addr_t last_completion;
	spinlock_t cleanup_lock;
	unsigned long state;
	#define IOAT_CHAN_DOWN 0
	#define IOAT_COMPLETION_ACK 1
	#define IOAT_RESET_PENDING 2
	#define IOAT_KOBJ_INIT_FAIL 3
+26 −0
Original line number Diff line number Diff line
@@ -1186,6 +1186,31 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
	return 0;
}

static void ioat_shutdown(struct pci_dev *pdev)
{
	struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
	struct ioatdma_chan *ioat_chan;
	int i;

	if (!ioat_dma)
		return;

	for (i = 0; i < IOAT_MAX_CHANS; i++) {
		ioat_chan = ioat_dma->idx[i];
		if (!ioat_chan)
			continue;

		spin_lock_bh(&ioat_chan->prep_lock);
		set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
		del_timer_sync(&ioat_chan->timer);
		spin_unlock_bh(&ioat_chan->prep_lock);
		/* this should quiesce then reset */
		ioat_reset_hw(ioat_chan);
	}

	ioat_disable_interrupts(ioat_dma);
}

#define DRV_NAME "ioatdma"

static struct pci_driver ioat_pci_driver = {
@@ -1193,6 +1218,7 @@ static struct pci_driver ioat_pci_driver = {
	.id_table	= ioat_pci_tbl,
	.probe		= ioat_pci_probe,
	.remove		= ioat_remove,
	.shutdown	= ioat_shutdown,
};

static struct ioatdma_device *
+34 −0
Original line number Diff line number Diff line
@@ -121,6 +121,9 @@ ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
	size_t total_len = len;
	int num_descs, idx, i;

	if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
		return NULL;

	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
	if (likely(num_descs) &&
	    ioat_check_space_lock(ioat_chan, num_descs) == 0)
@@ -254,6 +257,11 @@ struct dma_async_tx_descriptor *
ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
	       unsigned int src_cnt, size_t len, unsigned long flags)
{
	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);

	if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
		return NULL;

	return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
}

@@ -262,6 +270,11 @@ ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
		    unsigned int src_cnt, size_t len,
		    enum sum_check_flags *result, unsigned long flags)
{
	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);

	if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
		return NULL;

	/* the cleanup routine only sets bits on validate failure, it
	 * does not clear bits on validate success... so clear it here
	 */
@@ -574,6 +587,11 @@ ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
	      unsigned int src_cnt, const unsigned char *scf, size_t len,
	      unsigned long flags)
{
	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);

	if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
		return NULL;

	/* specify valid address for disabled result */
	if (flags & DMA_PREP_PQ_DISABLE_P)
		dst[0] = dst[1];
@@ -614,6 +632,11 @@ ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
		  unsigned int src_cnt, const unsigned char *scf, size_t len,
		  enum sum_check_flags *pqres, unsigned long flags)
{
	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);

	if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
		return NULL;

	/* specify valid address for disabled result */
	if (flags & DMA_PREP_PQ_DISABLE_P)
		pq[0] = pq[1];
@@ -638,6 +661,10 @@ ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
{
	unsigned char scf[MAX_SCF];
	dma_addr_t pq[2];
	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);

	if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
		return NULL;

	if (src_cnt > MAX_SCF)
		return NULL;
@@ -661,6 +688,10 @@ ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
{
	unsigned char scf[MAX_SCF];
	dma_addr_t pq[2];
	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);

	if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
		return NULL;

	if (src_cnt > MAX_SCF)
		return NULL;
@@ -689,6 +720,9 @@ ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
	struct ioat_ring_ent *desc;
	struct ioat_dma_descriptor *hw;

	if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
		return NULL;

	if (ioat_check_space_lock(ioat_chan, 1) == 0)
		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
	else