Commit 0877d5cf authored by Maria Matejka's avatar Maria Matejka
Browse files

Slab allocator variant allowing to free blocks with unknown slab pointer

parent c53f547a
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -102,6 +102,8 @@ void *sl_alloc(slab *);
void *sl_allocz(slab *);
void sl_free(slab *, void *);

#define SLF_NULLFREE	0x80000000	/* OR this to slab size to allow sl_free(NULL, block); */

/*
 * Low-level memory allocation functions, please don't use
 * outside resource manager and possibly sysdep code.
+47 −11
Original line number Diff line number Diff line
@@ -155,7 +155,8 @@ slab_memsize(resource *r)

struct slab {
  resource r;
  uint obj_size, head_size, head_bitfield_len;
  uint obj_size, head_size;
  u16 head_bitfield_len, init_phead;
  uint objs_per_slab, num_empty_heads, data_size;
  list empty_heads, partial_heads, full_heads;
};
@@ -175,12 +176,17 @@ struct sl_head {
  u32 used_bits[0];
};

struct sl_phead {
  struct slab *slab;
  struct sl_head head;
};

struct sl_alignment {			/* Magic structure for testing of alignment */
  byte data;
  int x[0];
};

#define SL_GET_HEAD(x)	((struct sl_head *) (((uintptr_t) (x)) & ~(page_size-1)))
#define SL_GET_HEAD(x)	((void *) (((uintptr_t) (x)) & ~(page_size-1)))

/**
 * sl_new - create a new Slab
@@ -197,6 +203,13 @@ sl_new(pool *p, uint size)
  uint align = sizeof(struct sl_alignment);
  if (align < sizeof(void *))
    align = sizeof(void *);

  if (size & SLF_NULLFREE)
  {
    s->init_phead = sizeof(struct sl_phead) - sizeof(struct sl_head);
    size &= ~SLF_NULLFREE;
  }

  s->data_size = size;
  size = (size + align - 1) / align * align;
  s->obj_size = size;
@@ -204,14 +217,14 @@ sl_new(pool *p, uint size)
  s->head_size = sizeof(struct sl_head);

  do {
    s->objs_per_slab = (page_size - s->head_size) / size;
    s->objs_per_slab = (page_size - s->init_phead - s->head_size) / size;
    s->head_bitfield_len = (s->objs_per_slab + 31) / 32;
    s->head_size = (
	sizeof(struct sl_head)
      + sizeof(u32) * s->head_bitfield_len
      + align - 1)
    / align * align;
  } while (s->objs_per_slab * size + s->head_size > page_size);
  } while (s->objs_per_slab * size + s->head_size + s->init_phead > page_size);

  if (!s->objs_per_slab)
    bug("Slab: object too large");
@@ -234,6 +247,7 @@ void *
sl_alloc(slab *s)
{
  struct sl_head *h;
  struct sl_phead *ph;

redo:
  h = HEAD(s->partial_heads);
@@ -270,11 +284,24 @@ no_partial:
      s->num_empty_heads--;
      goto okay;
    }

  if (s->init_phead)
  {
    ph = alloc_page();
    h = &ph->head;
    ph->slab = s;
    ASSERT_DIE(SL_GET_HEAD(h) == ph);
  }
  else
  {
    h = alloc_page();
    ASSERT_DIE(SL_GET_HEAD(h) == h);
  }

#ifdef POISON
  memset(h, 0xba, page_size);
  memset(h, 0xba, page_size - s->init_phead);
#endif
  ASSERT_DIE(SL_GET_HEAD(h) == h);

  memset(h, 0, s->head_size);
  add_head(&s->partial_heads, &h->n);
  goto okay;
@@ -307,7 +334,16 @@ sl_allocz(slab *s)
void
sl_free(slab *s, void *oo)
{
  struct sl_head *h = SL_GET_HEAD(oo);
  void *head = SL_GET_HEAD(oo);

  struct sl_phead *ph = head;
  struct sl_head *h = head;

  if ((!s) || (s == ph->slab))
  {
    s = ph->slab;
    h = &ph->head;
  }

#ifdef POISON
  memset(oo, 0xdb, s->data_size);
@@ -331,7 +367,7 @@ sl_free(slab *s, void *oo)
      if (s->num_empty_heads >= MAX_EMPTY_HEADS)
      {
#ifdef POISON
	memset(h, 0xde, page_size);
	memset(h, 0xde, page_size - s->init_phead);
#endif
	free_page(h);
      }
@@ -409,10 +445,10 @@ slab_lookup(resource *r, unsigned long a)
  struct sl_head *h;

  WALK_LIST(h, s->partial_heads)
    if ((unsigned long) h < a && (unsigned long) h + page_size < a)
    if ((unsigned long) h < a && (unsigned long) h + page_size - s->init_phead < a)
      return r;
  WALK_LIST(h, s->full_heads)
    if ((unsigned long) h < a && (unsigned long) h + page_size < a)
    if ((unsigned long) h < a && (unsigned long) h + page_size - s->init_phead < a)
      return r;
  return NULL;
}
+82 −83
Original line number Diff line number Diff line
@@ -17,6 +17,26 @@ static const int sizes[] = {
#define TEST_SIZE	1024 * 128
#define ITEMS(sz)	TEST_SIZE / ( (sz) >> u32_log2((sz))/2 )

struct test_request {
  int size;
  enum strategy {
    TEST_NONE,
    TEST_FORWARDS,
    TEST_BACKWARDS,
    TEST_RANDOM,
    TEST_MIXED,
    TEST__MAX,
  } strategy;
  int phead;
};

const char * const strategy_name[TEST__MAX] = {
  [TEST_FORWARDS] = "forwards",
  [TEST_BACKWARDS] = "backwards",
  [TEST_RANDOM] = "random",
  [TEST_MIXED] = "mixed",
};

static inline byte *test_alloc(slab *s, int sz, struct resmem *sliz)
{
  byte *out = sl_alloc(s);
@@ -34,7 +54,7 @@ static inline byte *test_alloc(slab *s, int sz, struct resmem *sliz)
  return out;
}

static inline void test_free(slab *s, byte *block, int sz, struct resmem *sliz)
static inline void test_free(slab *s, byte *block, int nullslab, int sz, struct resmem *sliz)
{
  for (int p=0; p < sz; p++)
  {
@@ -42,7 +62,7 @@ static inline void test_free(slab *s, byte *block, int sz, struct resmem *sliz)
    block[p]++;
  }

  sl_free(s, block);
  sl_free(nullslab ? NULL : s, block);

  struct resmem ns = rmemsize((resource *) s);

@@ -60,87 +80,53 @@ static inline struct resmem get_memsize(slab *s)
}

static int
t_slab_forwards(const void *data)
t_slab(const void *data)
{
  int sz = (intptr_t) data;
  slab *s = sl_new(&root_pool, sz);
  const struct test_request *tr = data;
  uint sz = tr->size;

  slab *s = sl_new(&root_pool, sz | (tr->phead ? SLF_NULLFREE : 0));

  struct resmem sliz = get_memsize(s);

  int n = ITEMS(sz);
  byte **block = mb_alloc(&root_pool, n * sizeof(*block));

  switch (tr->strategy) {
    case TEST_FORWARDS:
      for (int i = 0; i < n; i++)
	block[i] = test_alloc(s, sz, &sliz);

      for (int i = 0; i < n; i++)
    test_free(s, block[i], sz, &sliz);

  mb_free(block);

  return 1;
}

static int
t_slab_backwards(const void *data)
{
  int sz = (intptr_t) data;
  slab *s = sl_new(&root_pool, sz);

  struct resmem sliz = get_memsize(s);
	test_free(s, block[i], (tr->phead && (i & 1)), sz, &sliz);

  int n = ITEMS(sz);
  byte **block = mb_alloc(&root_pool, n * sizeof(*block));
      break;

    case TEST_BACKWARDS:
      for (int i = 0; i < n; i++)
	block[i] = test_alloc(s, sz, &sliz);

      for (int i = n - 1; i >= 0; i--)
    test_free(s, block[i], sz, &sliz);

  mb_free(block);

  return 1;
}

static int
t_slab_random(const void *data)
{
  int sz = (intptr_t) data;
  slab *s = sl_new(&root_pool, sz);

  struct resmem sliz = get_memsize(s);
	test_free(s, block[i], (tr->phead && (i & 1)), sz, &sliz);

  int n = ITEMS(sz);
  byte **block = mb_alloc(&root_pool, n * sizeof(*block));
      break;

    case TEST_RANDOM:
      for (int i = 0; i < n; i++)
	block[i] = test_alloc(s, sz, &sliz);

      for (int i = 0; i < n; i++)
      {
	int pos = bt_random() % (n - i);
    test_free(s, block[pos], sz, &sliz);
	test_free(s, block[pos], (tr->phead && (i & 1)), sz, &sliz);
	if (pos != n - i - 1)
	  block[pos] = block[n - i - 1];
      }

  mb_free(block);

  return 1;
}
      break;

static int
t_slab_mixed(const void *data)
    case TEST_MIXED:
      {
  int sz = (intptr_t) data;
  slab *s = sl_new(&root_pool, sz);

  struct resmem sliz = get_memsize(s);

  int n = ITEMS(sz);
  byte **block = mb_alloc(&root_pool, n * sizeof(*block));

	int cur = 0;
	int pending = n;

@@ -148,7 +134,7 @@ t_slab_mixed(const void *data)
	  int action = bt_random() % (cur + pending);

	  if (action < cur) {
      test_free(s, block[action], sz, &sliz);
	    test_free(s, block[action], (tr->phead && (cur & 1)), sz, &sliz);
	    if (action != --cur)
	      block[action] = block[cur];
	  } else {
@@ -157,20 +143,33 @@ t_slab_mixed(const void *data)
	  }
	}

  mb_free(block);
	break;
      }

    default: bug("This shouldn't happen");
  }

  mb_free(block);
  return 1;
}

int main(int argc, char *argv[])
{
  bt_init(argc, argv);

  struct test_request tr;

  for (uint i = 0; i < sizeof(sizes) / sizeof(*sizes); i++)
    for (uint phead = 0; phead < 2; phead++)
      for (uint strategy = TEST_FORWARDS; strategy < TEST__MAX; strategy++)
      {
    bt_test_suite_arg(t_slab_forwards, (void *) (intptr_t) sizes[i], "Slab deallocation from beginning to end, size=%d", sizes[i]);
    bt_test_suite_arg(t_slab_backwards, (void *) (intptr_t) sizes[i], "Slab deallocation from end to beginning, size=%d", sizes[i]);
    bt_test_suite_arg(t_slab_random, (void *) (intptr_t) sizes[i], "Slab deallocation in random order, size=%d", sizes[i]);
    bt_test_suite_arg(t_slab_mixed, (void *) (intptr_t) sizes[i], "Slab deallocation in mixed order, size=%d", sizes[i]);
	tr = (struct test_request) {
	  .size = sizes[i],
	  .phead = phead,
	  .strategy = strategy,
	};
	bt_test_suite_arg(t_slab, &tr, "Slab allocator test, size=%d, phead=%d, strategy=%s",
	    tr.size, phead, strategy_name[strategy]);
      }

  return bt_exit_value();