Commit 377eaaf8 authored by Maria Matejka's avatar Maria Matejka
Browse files

Worker: debug code fixes and more debug code

parent ccdea82d
Loading
Loading
Loading
Loading
+22 −2
Original line number Diff line number Diff line
/*
 *	BIRD Library -- Atomic calls
 *
 *	(c) 2019 Maria Matejka <mq@ucw.cz>
 *
 *	Can be freely distributed and used under the terms of the GNU GPL.
 */

#ifndef _BIRD_ATOMIC_H_
#define _BIRD_ATOMIC_H_

//#if HAVE_ATOMIC
#if 0
/* If we have stdatomic.h, we simply use C11 atomic calls */

#if HAVE_ATOMIC
#include <stdatomic.h>
#else

/* Otherwise, we try to approximate the atomic calls by GCC __sync calls */

#define _Atomic

#define atomic_load(ptr) __sync_val_compare_and_swap(ptr, 0, 0)
@@ -36,5 +47,14 @@
#define atomic_compare_exchange_strong_explicit(ptr, desired, wanted, success, failure) \
  atomic_compare_exchange_weak(ptr, desired, wanted)

#define ATOMIC_FLAG_INIT  0
typedef u8 atomic_flag;

#define atomic_flag_test_and_set_explicit(ptr, memory) \
  __sync_lock_test_and_set(ptr, 1)

#define atomic_flag_clear_explicit(ptr, memory) \
  __sync_lock_release(ptr)

#endif
#endif
+14 −0
Original line number Diff line number Diff line
@@ -68,3 +68,17 @@ u32_log2(u32 v)
  return r;
}

u64
u64_log2(u64 v)
{
  /* The code from http://www-graphics.stanford.edu/~seander/bithacks.html */
  u64 r, shift;
  r =     (v > 0xFFFFFFFF) << 5; v >>= r;
  shift = (v > 0xFFFF) << 4; v >>= shift; r |= shift;
  shift = (v > 0xFF  ) << 3; v >>= shift; r |= shift;
  shift = (v > 0xF   ) << 2; v >>= shift; r |= shift;
  shift = (v > 0x3   ) << 1; v >>= shift; r |= shift;
  r |= (v >> 1);
  return r;
}
+1 −0
Original line number Diff line number Diff line
@@ -24,6 +24,7 @@ u32 u32_mkmask(uint n);
uint u32_masklen(u32 x);

u32 u32_log2(u32 v);
u64 u64_log2(u64 v);

static inline u32 u32_hash(u32 v) { return v * 2902958171u; }

+17 −1
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@
#define _BIRD_WORKER_H_

#include "lib/birdlib.h"
#include "lib/atomic.h"

struct config;

@@ -31,16 +32,31 @@ enum task_flags {
  TF_EXCLUSIVE = 0x1,		/* Lock the domain exclusively */
  TF_PUBLIC_MASK = 0xff,	/* Flags are masked by this value on task push */
  /* These flags are private for worker queue */
  TF_PREPENDED = 0x100,		/* Task is the first in domain blocked-queue */
  TF_PREPENDED = 0x100,		/* Task is waiting for the first free worker */
} PACKED;

struct task {
  node n;				/* Init this to zero. */
  enum task_flags flags;		/* Task flags */
  atomic_flag enqueued;			/* Is in queue */
  struct domain *domain;		/* Task's primary domain */
  void (*execute)(struct task *);	/* This will be called to execute the task */
};

/* Always initialize the task by task_init() */
static inline void task_init(struct task *t, enum task_flags tf, struct domain *domain, void (*execute)(struct task *))
{
  ASSERT(execute);
  *t = (struct task) {
    .n = { },
    .flags = tf & TF_PUBLIC_MASK,
    .enqueued =	ATOMIC_FLAG_INIT,
    .domain = domain,
    .execute = execute,
  };
}


/* Initialize the worker queue. Run once and never more. */
void worker_queue_init(void);

+15 −27
Original line number Diff line number Diff line
@@ -50,21 +50,16 @@ pool *rt_table_pool;
static pool *rup_pool;
static struct domain *rup_domain;

#define RUPS_MAX  8
#define RUPS_MAX  128

static slab *rte_slab;
static _Thread_local linpool *rte_update_pool[RUPS_MAX] = {};
static _Thread_local uint rups = 0;
static linpool *rte_update_pool[RUPS_MAX] = {};
static uint rups = 0;

static inline linpool *rup_get(void) {
  /* If we have a local spare linpool,
   * just use it. */
  if (rups > 0)
    return rte_update_pool[--rups];

  /* Allocate a new linpool */
  domain_write_lock(rup_domain);
  struct linpool *pool = lp_new_default(rup_pool);
  struct linpool *pool = (rups > 0) ? rte_update_pool[--rups] : lp_new_default(rup_pool);
  domain_write_unlock(rup_domain);

  /* Return the pool */
@@ -72,15 +67,16 @@ static inline linpool *rup_get(void) {
}

static inline void rup_free(linpool *pool) {
  if (rups == RUPS_MAX) {
  lp_flush(pool);

  domain_write_lock(rup_domain);
  if (rups == RUPS_MAX) {
    rfree(pool);
    domain_write_unlock(rup_domain);
  } else {
    /* Keep the linpool for future use */
    lp_flush(pool);
    rte_update_pool[rups++] = pool;
  }
  domain_write_unlock(rup_domain);
}

struct rte_update_data {
@@ -1746,16 +1742,13 @@ rte_finish_update_hook(struct task *import_task)
  struct rtable *rt = SKIP_BACK(struct rtable, import_task, import_task);
  domain_assert_write_locked(rt->domain);

  struct rte_update_data *rud;
  node *nn;
  WALK_LIST_DELSAFE(rud, nn, rt->pending_imports)
  {
  struct rte_update_data *rud = HEAD(rt->pending_imports);
  if (atomic_load(&(rud->state)) != RUS_PENDING_RECALCULATE)
    return;

  rem_node(&(rud->n));
  rte_finish_update(rud);
  }
  task_push(import_task);
}

/* Independent call to rte_announce(), used from next hop
@@ -2021,12 +2014,7 @@ rt_setup(pool *p, rtable *t, struct rtable_config *cf)
  t->domain = domain_new(p);

  init_list(&t->pending_imports);
  t->import_task = (struct task) {
    .n = {},				/* Not in any list */
    .flags = TF_EXCLUSIVE,		/* Import modifies the table */
    .domain = t->domain,
    .execute = rte_finish_update_hook,
  };
  task_init(&t->import_task, TF_EXCLUSIVE, t->domain, rte_finish_update_hook);

  t->rt_event = ev_new_init(p, rt_event, t);
  t->gc_time = current_time();
Loading