Commit 6fe5efa1 authored by Bhaktipriya Shridhar's avatar Bhaktipriya Shridhar Committed by Greg Kroah-Hartman
Browse files

staging: octeon: Convert create_singlethread_workqueue()



With conccurency managed workqueues, use of dedicated workqueues can
be replaced by system_wq. Drop cvm_oct_poll_queue by using system_wq.

There are multiple work items per cvm_oct_poll_queue (viz.
cvm_oct_rx_refill_work, port_periodic_work) and different
cvm_oct_poll_queues need not be be ordered. Hence, concurrency
can be increased by switching to system_wq.

All work items are sync canceled in cvm_oct_remove() so it
is guaranteed that no work is in flight by the time exit path runs.

Signed-off-by: default avatarBhaktipriya Shridhar <bhaktipriya96@gmail.com>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 2d5f6fec
Loading
Loading
Loading
Loading
+4 −17
Original line number Diff line number Diff line
@@ -86,10 +86,6 @@ int rx_napi_weight = 32;
module_param(rx_napi_weight, int, 0444);
MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");

/*
 * cvm_oct_poll_queue - Workqueue for polling operations.
 */
struct workqueue_struct *cvm_oct_poll_queue;

/*
 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
@@ -121,8 +117,7 @@ static void cvm_oct_rx_refill_worker(struct work_struct *work)
	cvm_oct_rx_refill_pool(num_packet_buffers / 2);

	if (!atomic_read(&cvm_oct_poll_queue_stopping))
		queue_delayed_work(cvm_oct_poll_queue,
				   &cvm_oct_rx_refill_work, HZ);
		schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
}

static void cvm_oct_periodic_worker(struct work_struct *work)
@@ -138,8 +133,7 @@ static void cvm_oct_periodic_worker(struct work_struct *work)
						cvm_oct_device[priv->port]);

	if (!atomic_read(&cvm_oct_poll_queue_stopping))
		queue_delayed_work(cvm_oct_poll_queue,
						&priv->port_periodic_work, HZ);
		schedule_delayed_work(&priv->port_periodic_work, HZ);
}

static void cvm_oct_configure_common_hw(void)
@@ -666,11 +660,6 @@ static int cvm_oct_probe(struct platform_device *pdev)
		return -EINVAL;
	}

	cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
	if (!cvm_oct_poll_queue) {
		pr_err("octeon-ethernet: Cannot create workqueue");
		return -ENOMEM;
	}

	cvm_oct_configure_common_hw();

@@ -828,8 +817,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
				fau -=
				    cvmx_pko_get_num_queues(priv->port) *
				    sizeof(u32);
				queue_delayed_work(cvm_oct_poll_queue,
						&priv->port_periodic_work, HZ);
				schedule_delayed_work(&priv->port_periodic_work, HZ);
			}
		}
	}
@@ -842,7 +830,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
	 */
	cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);

	queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
	schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);

	return 0;
}
@@ -885,7 +873,6 @@ static int cvm_oct_remove(struct platform_device *pdev)
		}
	}

	destroy_workqueue(cvm_oct_poll_queue);

	cvmx_pko_shutdown();

+0 −1
Original line number Diff line number Diff line
@@ -76,7 +76,6 @@ extern int pow_send_group;
extern int pow_receive_group;
extern char pow_send_list[];
extern struct net_device *cvm_oct_device[];
extern struct workqueue_struct *cvm_oct_poll_queue;
extern atomic_t cvm_oct_poll_queue_stopping;
extern u64 cvm_oct_tx_poll_interval;