Commit b53966ff authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-5.10c-rc8-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen fixes from Juergen Gross:
 "A short series fixing a regression introduced in 5.9 for running as
  Xen dom0 on a system with NVMe backed storage"

* tag 'for-linus-5.10c-rc8-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen: don't use page->lru for ZONE_DEVICE memory
  xen: add helpers for caching grant mapping pages
parents b01deddb ee32f323
Loading
Loading
Loading
Loading
+17 −72
Original line number Diff line number Diff line
@@ -132,73 +132,12 @@ module_param(log_stats, int, 0644);

#define BLKBACK_INVALID_HANDLE (~0)

/* Number of free pages to remove on each call to gnttab_free_pages */
#define NUM_BATCH_FREE_PAGES 10

static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
{
	return pgrant_timeout && (jiffies - persistent_gnt->last_used >=
			HZ * pgrant_timeout);
}

static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
{
	unsigned long flags;

	spin_lock_irqsave(&ring->free_pages_lock, flags);
	if (list_empty(&ring->free_pages)) {
		BUG_ON(ring->free_pages_num != 0);
		spin_unlock_irqrestore(&ring->free_pages_lock, flags);
		return gnttab_alloc_pages(1, page);
	}
	BUG_ON(ring->free_pages_num == 0);
	page[0] = list_first_entry(&ring->free_pages, struct page, lru);
	list_del(&page[0]->lru);
	ring->free_pages_num--;
	spin_unlock_irqrestore(&ring->free_pages_lock, flags);

	return 0;
}

static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
                                  int num)
{
	unsigned long flags;
	int i;

	spin_lock_irqsave(&ring->free_pages_lock, flags);
	for (i = 0; i < num; i++)
		list_add(&page[i]->lru, &ring->free_pages);
	ring->free_pages_num += num;
	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
}

static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
{
	/* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
	struct page *page[NUM_BATCH_FREE_PAGES];
	unsigned int num_pages = 0;
	unsigned long flags;

	spin_lock_irqsave(&ring->free_pages_lock, flags);
	while (ring->free_pages_num > num) {
		BUG_ON(list_empty(&ring->free_pages));
		page[num_pages] = list_first_entry(&ring->free_pages,
		                                   struct page, lru);
		list_del(&page[num_pages]->lru);
		ring->free_pages_num--;
		if (++num_pages == NUM_BATCH_FREE_PAGES) {
			spin_unlock_irqrestore(&ring->free_pages_lock, flags);
			gnttab_free_pages(num_pages, page);
			spin_lock_irqsave(&ring->free_pages_lock, flags);
			num_pages = 0;
		}
	}
	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
	if (num_pages != 0)
		gnttab_free_pages(num_pages, page);
}

#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))

static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
@@ -331,7 +270,8 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
			unmap_data.count = segs_to_unmap;
			BUG_ON(gnttab_unmap_refs_sync(&unmap_data));

			put_free_pages(ring, pages, segs_to_unmap);
			gnttab_page_cache_put(&ring->free_pages, pages,
					      segs_to_unmap);
			segs_to_unmap = 0;
		}

@@ -371,7 +311,8 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
			unmap_data.count = segs_to_unmap;
			BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
			put_free_pages(ring, pages, segs_to_unmap);
			gnttab_page_cache_put(&ring->free_pages, pages,
					      segs_to_unmap);
			segs_to_unmap = 0;
		}
		kfree(persistent_gnt);
@@ -379,7 +320,7 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
	if (segs_to_unmap > 0) {
		unmap_data.count = segs_to_unmap;
		BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
		put_free_pages(ring, pages, segs_to_unmap);
		gnttab_page_cache_put(&ring->free_pages, pages, segs_to_unmap);
	}
}

@@ -664,9 +605,10 @@ purge_gnt_list:

		/* Shrink the free pages pool if it is too large. */
		if (time_before(jiffies, blkif->buffer_squeeze_end))
			shrink_free_pagepool(ring, 0);
			gnttab_page_cache_shrink(&ring->free_pages, 0);
		else
			shrink_free_pagepool(ring, max_buffer_pages);
			gnttab_page_cache_shrink(&ring->free_pages,
						 max_buffer_pages);

		if (log_stats && time_after(jiffies, ring->st_print))
			print_stats(ring);
@@ -697,7 +639,7 @@ void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
	ring->persistent_gnt_c = 0;

	/* Since we are shutting down remove all pages from the buffer */
	shrink_free_pagepool(ring, 0 /* All */);
	gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
}

static unsigned int xen_blkbk_unmap_prepare(
@@ -736,7 +678,7 @@ static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_
	   but is this the best way to deal with this? */
	BUG_ON(result);

	put_free_pages(ring, data->pages, data->count);
	gnttab_page_cache_put(&ring->free_pages, data->pages, data->count);
	make_response(ring, pending_req->id,
		      pending_req->operation, pending_req->status);
	free_req(ring, pending_req);
@@ -803,7 +745,8 @@ static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
		if (invcount) {
			ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
			BUG_ON(ret);
			put_free_pages(ring, unmap_pages, invcount);
			gnttab_page_cache_put(&ring->free_pages, unmap_pages,
					      invcount);
		}
		pages += batch;
		num -= batch;
@@ -850,7 +793,8 @@ again:
			pages[i]->page = persistent_gnt->page;
			pages[i]->persistent_gnt = persistent_gnt;
		} else {
			if (get_free_page(ring, &pages[i]->page))
			if (gnttab_page_cache_get(&ring->free_pages,
						  &pages[i]->page))
				goto out_of_memory;
			addr = vaddr(pages[i]->page);
			pages_to_gnt[segs_to_map] = pages[i]->page;
@@ -883,7 +827,8 @@ again:
			BUG_ON(new_map_idx >= segs_to_map);
			if (unlikely(map[new_map_idx].status != 0)) {
				pr_debug("invalid buffer -- could not remap it\n");
				put_free_pages(ring, &pages[seg_idx]->page, 1);
				gnttab_page_cache_put(&ring->free_pages,
						      &pages[seg_idx]->page, 1);
				pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
				ret |= 1;
				goto next;
@@ -944,7 +889,7 @@ next:

out_of_memory:
	pr_alert("%s: out of memory\n", __func__);
	put_free_pages(ring, pages_to_gnt, segs_to_map);
	gnttab_page_cache_put(&ring->free_pages, pages_to_gnt, segs_to_map);
	for (i = last_map; i < num; i++)
		pages[i]->handle = BLKBACK_INVALID_HANDLE;
	return -ENOMEM;
+1 −3
Original line number Diff line number Diff line
@@ -288,9 +288,7 @@ struct xen_blkif_ring {
	struct work_struct	persistent_purge_work;

	/* Buffer of free pages to map grant refs. */
	spinlock_t		free_pages_lock;
	int			free_pages_num;
	struct list_head	free_pages;
	struct gnttab_page_cache free_pages;

	struct work_struct	free_work;
	/* Thread shutdown wait queue. */
+2 −4
Original line number Diff line number Diff line
@@ -144,8 +144,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
		INIT_LIST_HEAD(&ring->pending_free);
		INIT_LIST_HEAD(&ring->persistent_purge_list);
		INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
		spin_lock_init(&ring->free_pages_lock);
		INIT_LIST_HEAD(&ring->free_pages);
		gnttab_page_cache_init(&ring->free_pages);

		spin_lock_init(&ring->pending_free_lock);
		init_waitqueue_head(&ring->pending_free_wq);
@@ -317,8 +316,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
		BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
		BUG_ON(!list_empty(&ring->persistent_purge_list));
		BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
		BUG_ON(!list_empty(&ring->free_pages));
		BUG_ON(ring->free_pages_num != 0);
		BUG_ON(ring->free_pages.num_pages != 0);
		BUG_ON(ring->persistent_gnt_c != 0);
		WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
		ring->active = false;
+123 −0
Original line number Diff line number Diff line
@@ -813,6 +813,129 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
}
EXPORT_SYMBOL_GPL(gnttab_alloc_pages);

#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
static inline void cache_init(struct gnttab_page_cache *cache)
{
	cache->pages = NULL;
}

static inline bool cache_empty(struct gnttab_page_cache *cache)
{
	return !cache->pages;
}

static inline struct page *cache_deq(struct gnttab_page_cache *cache)
{
	struct page *page;

	page = cache->pages;
	cache->pages = page->zone_device_data;

	return page;
}

static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
{
	page->zone_device_data = cache->pages;
	cache->pages = page;
}
#else
static inline void cache_init(struct gnttab_page_cache *cache)
{
	INIT_LIST_HEAD(&cache->pages);
}

static inline bool cache_empty(struct gnttab_page_cache *cache)
{
	return list_empty(&cache->pages);
}

static inline struct page *cache_deq(struct gnttab_page_cache *cache)
{
	struct page *page;

	page = list_first_entry(&cache->pages, struct page, lru);
	list_del(&page->lru);

	return page;
}

static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
{
	list_add(&page->lru, &cache->pages);
}
#endif

void gnttab_page_cache_init(struct gnttab_page_cache *cache)
{
	spin_lock_init(&cache->lock);
	cache_init(cache);
	cache->num_pages = 0;
}
EXPORT_SYMBOL_GPL(gnttab_page_cache_init);

int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
{
	unsigned long flags;

	spin_lock_irqsave(&cache->lock, flags);

	if (cache_empty(cache)) {
		spin_unlock_irqrestore(&cache->lock, flags);
		return gnttab_alloc_pages(1, page);
	}

	page[0] = cache_deq(cache);
	cache->num_pages--;

	spin_unlock_irqrestore(&cache->lock, flags);

	return 0;
}
EXPORT_SYMBOL_GPL(gnttab_page_cache_get);

void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
			   unsigned int num)
{
	unsigned long flags;
	unsigned int i;

	spin_lock_irqsave(&cache->lock, flags);

	for (i = 0; i < num; i++)
		cache_enq(cache, page[i]);
	cache->num_pages += num;

	spin_unlock_irqrestore(&cache->lock, flags);
}
EXPORT_SYMBOL_GPL(gnttab_page_cache_put);

void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
{
	struct page *page[10];
	unsigned int i = 0;
	unsigned long flags;

	spin_lock_irqsave(&cache->lock, flags);

	while (cache->num_pages > num) {
		page[i] = cache_deq(cache);
		cache->num_pages--;
		if (++i == ARRAY_SIZE(page)) {
			spin_unlock_irqrestore(&cache->lock, flags);
			gnttab_free_pages(i, page);
			i = 0;
			spin_lock_irqsave(&cache->lock, flags);
		}
	}

	spin_unlock_irqrestore(&cache->lock, flags);

	if (i != 0)
		gnttab_free_pages(i, page);
}
EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);

void gnttab_pages_clear_private(int nr_pages, struct page **pages)
{
	int i;
+11 −9
Original line number Diff line number Diff line
@@ -12,7 +12,7 @@
#include <xen/xen.h>

static DEFINE_MUTEX(list_lock);
static LIST_HEAD(page_list);
static struct page *page_list;
static unsigned int list_count;

static int fill_list(unsigned int nr_pages)
@@ -84,7 +84,8 @@ static int fill_list(unsigned int nr_pages)
		struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);

		BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
		list_add(&pg->lru, &page_list);
		pg->zone_device_data = page_list;
		page_list = pg;
		list_count++;
	}

@@ -118,12 +119,10 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
	}

	for (i = 0; i < nr_pages; i++) {
		struct page *pg = list_first_entry_or_null(&page_list,
							   struct page,
							   lru);
		struct page *pg = page_list;

		BUG_ON(!pg);
		list_del(&pg->lru);
		page_list = pg->zone_device_data;
		list_count--;
		pages[i] = pg;

@@ -134,7 +133,8 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
				unsigned int j;

				for (j = 0; j <= i; j++) {
					list_add(&pages[j]->lru, &page_list);
					pages[j]->zone_device_data = page_list;
					page_list = pages[j];
					list_count++;
				}
				goto out;
@@ -160,7 +160,8 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)

	mutex_lock(&list_lock);
	for (i = 0; i < nr_pages; i++) {
		list_add(&pages[i]->lru, &page_list);
		pages[i]->zone_device_data = page_list;
		page_list = pages[i];
		list_count++;
	}
	mutex_unlock(&list_lock);
@@ -189,7 +190,8 @@ static int __init init(void)
			struct page *pg =
				pfn_to_page(xen_extra_mem[i].start_pfn + j);

			list_add(&pg->lru, &page_list);
			pg->zone_device_data = page_list;
			page_list = pg;
			list_count++;
		}
	}
Loading