Commit 0c59f7ff authored by Maria Matejka's avatar Maria Matejka
Browse files

Revert "Bound allocated pages to resource pools with page caches to avoid unnecessary syscalls"

This reverts commit 7f0e5982.
parent c20506dc
Loading
Loading
Loading
Loading
+0 −58
Original line number Diff line number Diff line
@@ -31,18 +31,9 @@
struct pool {
  resource r;
  list inside;
  struct pool_pages *pages;
  const char *name;
};

struct pool_pages {
  uint free;
  uint used;
  void *ptr[0];
};

#define POOL_PAGES_MAX	((page_size - sizeof(struct pool_pages)) / sizeof (void *))

static void pool_dump(resource *);
static void pool_free(resource *);
static resource *pool_lookup(resource *, unsigned long);
@@ -59,10 +50,6 @@ static struct resclass pool_class = {

pool root_pool;

void *alloc_sys_page(void);
void free_sys_page(void *);
void resource_sys_init(void);

static int indent;

/**
@@ -95,14 +82,6 @@ pool_free(resource *P)
      xfree(r);
      r = rr;
    }

  if (p->pages)
    {
      ASSERT_DIE(!p->pages->used);
      for (uint i=0; i<p->pages->free; i++)
	free_sys_page(p->pages->ptr[i]);
      free_sys_page(p->pages);
    }
}

static void
@@ -128,9 +107,6 @@ pool_memsize(resource *P)
  WALK_LIST(r, p->inside)
    sum += rmemsize(r);

  if (p->pages)
    sum += page_size * (p->pages->used + p->pages->free + 1);

  return sum;
}

@@ -283,7 +259,6 @@ rlookup(unsigned long a)
void
resource_init(void)
{
  resource_sys_init();
  root_pool.r.class = &pool_class;
  root_pool.name = "Root";
  init_list(&root_pool.inside);
@@ -450,39 +425,6 @@ mb_free(void *m)
  rfree(b);
}

void *
alloc_page(pool *p)
{
  if (!p->pages)
  {
    p->pages = alloc_sys_page();
    p->pages->free = 0;
    p->pages->used = 1;
  }
  else
    p->pages->used++;

  if (p->pages->free)
  {
    void *ptr = p->pages->ptr[--p->pages->free];
    bzero(ptr, page_size);
    return ptr;
  }
  else
    return alloc_sys_page();
}

void
free_page(pool *p, void *ptr)
{
  ASSERT_DIE(p->pages);
  p->pages->used--;

  if (p->pages->free >= POOL_PAGES_MAX)
    return free_sys_page(ptr);
  else
    p->pages->ptr[p->pages->free++] = ptr;
}


#define STEP_UP(x) ((x) + (x)/2 + 4)
+3 −5
Original line number Diff line number Diff line
@@ -94,12 +94,10 @@ void sl_free(slab *, void *);

void buffer_realloc(void **buf, unsigned *size, unsigned need, unsigned item_size);

extern long page_size;

/* Allocator of whole pages; for use in slabs and other high-level allocators. */
void *alloc_page(pool *);
void free_page(pool *, void *);
#define PAGE_HEAD(x)	((void *) (((intptr_t) (x)) & ~(page_size-1)))
u64 get_page_size(void);
void *alloc_page(void);
void free_page(void *);

#ifdef HAVE_LIBDMALLOC
/*
+11 −13
Original line number Diff line number Diff line
@@ -152,7 +152,6 @@ slab_memsize(resource *r)

struct slab {
  resource r;
  pool *p;
  uint obj_size, head_size, head_bitfield_len;
  uint objs_per_slab, num_empty_heads, data_size;
  list empty_heads, partial_heads, full_heads;
@@ -192,7 +191,6 @@ slab *
sl_new(pool *p, uint size)
{
  slab *s = ralloc(p, &sl_class);
  s->p = p;
  uint align = sizeof(struct sl_alignment);
  if (align < sizeof(int))
    align = sizeof(int);
@@ -201,6 +199,7 @@ sl_new(pool *p, uint size)
  s->obj_size = size;

  s->head_size = sizeof(struct sl_head);
  u64 page_size = get_page_size();

  do {
    s->objs_per_slab = (page_size - s->head_size) / size;
@@ -269,9 +268,9 @@ no_partial:
      s->num_empty_heads--;
      goto okay;
    }
  h = alloc_page(s->p);
  h = alloc_page();
#ifdef POISON
  memset(h, 0xba, page_size);
  memset(h, 0xba, get_page_size());
#endif
  ASSERT_DIE(SL_GET_HEAD(h) == h);
  memset(h, 0, s->head_size);
@@ -330,9 +329,9 @@ sl_free(slab *s, void *oo)
      if (s->num_empty_heads >= MAX_EMPTY_HEADS)
      {
#ifdef POISON
	memset(h, 0xde, page_size);
	memset(h, 0xde, get_page_size());
#endif
	free_page(s->p, h);
	free_page(h);
      }
      else
	{
@@ -349,11 +348,11 @@ slab_free(resource *r)
  struct sl_head *h, *g;

  WALK_LIST_DELSAFE(h, g, s->empty_heads)
    free_page(s->p, h);
    free_page(h);
  WALK_LIST_DELSAFE(h, g, s->partial_heads)
    free_page(s->p, h);
    free_page(h);
  WALK_LIST_DELSAFE(h, g, s->full_heads)
    free_page(s->p, h);
    free_page(h);
}

static void
@@ -386,8 +385,7 @@ slab_memsize(resource *r)
  WALK_LIST(h, s->full_heads)
    heads++;

//  return ALLOC_OVERHEAD + sizeof(struct slab) + heads * (ALLOC_OVERHEAD + page_size);
  return ALLOC_OVERHEAD + sizeof(struct slab); /* The page sizes are accounted for in the pool */
  return ALLOC_OVERHEAD + sizeof(struct slab) + heads * (ALLOC_OVERHEAD + get_page_size());
}

static resource *
@@ -397,10 +395,10 @@ slab_lookup(resource *r, unsigned long a)
  struct sl_head *h;

  WALK_LIST(h, s->partial_heads)
    if ((unsigned long) h < a && (unsigned long) h + page_size < a)
    if ((unsigned long) h < a && (unsigned long) h + get_page_size() < a)
      return r;
  WALK_LIST(h, s->full_heads)
    if ((unsigned long) h < a && (unsigned long) h + page_size < a)
    if ((unsigned long) h < a && (unsigned long) h + get_page_size() < a)
      return r;
  return NULL;
}
+21 −16
Original line number Diff line number Diff line
@@ -16,36 +16,41 @@
#include <sys/mman.h>
#endif

long page_size = 0;

#ifdef HAVE_MMAP
static u64 page_size = 0;
static _Bool use_fake = 0;
#else
static _Bool use_fake = 1;
static const u64 page_size = 4096; /* Fake page size */
#endif

void resource_sys_init(void)
u64 get_page_size(void)
{
#ifdef HAVE_MMAP
  if (!(page_size = sysconf(_SC_PAGESIZE)))
    die("System page size must be non-zero");
  if (page_size)
    return page_size;

#ifdef HAVE_MMAP
  if (page_size = sysconf(_SC_PAGESIZE))
  {
    if ((u64_popcount(page_size) > 1) || (page_size > 16384))
    {
#endif
      /* Too big or strange page, use the aligned allocator instead */
      page_size = 4096;
      use_fake = 1;
    }
    return page_size;
  }

  bug("Page size must be non-zero");
#endif
}

void *
alloc_sys_page(void)
alloc_page(void)
{
#ifdef HAVE_MMAP
  if (!use_fake)
  {
    void *ret = mmap(NULL, page_size, PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    void *ret = mmap(NULL, get_page_size(), PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    if (ret == MAP_FAILED)
      bug("mmap(%lu) failed: %m", page_size);
    return ret;
@@ -61,12 +66,12 @@ alloc_sys_page(void)
}

void
free_sys_page(void *ptr)
free_page(void *ptr)
{
#ifdef HAVE_MMAP
  if (!use_fake)
  {
    if (munmap(ptr, page_size) < 0)
    if (munmap(ptr, get_page_size()) < 0)
      bug("munmap(%p) failed: %m", ptr);
  }
  else