Commit b2bac7ae authored by Maria Matejka's avatar Maria Matejka
Browse files

Faster shutdown and cleanups by freeing route attributes strictly from main loop

parent 387b279f
Loading
Loading
Loading
Loading
+11 −3
Original line number Diff line number Diff line
@@ -883,14 +883,22 @@ static inline rta *rta_clone(rta *r) {
  return r;
}

void rta__free(rta *r);
#define RTA_OBSOLETE_LIMIT 512

extern _Atomic u32 rta_obsolete_count;
extern event rta_cleanup_event;

static inline void rta_free(rta *r) {
  if (!r)
    return;

  u32 uc = atomic_fetch_sub_explicit(&r->uc, 1, memory_order_acq_rel);
  if (uc == 1)
    rta__free(r);
  if (uc > 1)
    return;

  u32 obs = atomic_fetch_add_explicit(&rta_obsolete_count, 1, memory_order_acq_rel);
  if (obs == RTA_OBSOLETE_LIMIT)
    ev_send(&global_work_list, &rta_cleanup_event);
}

rta *rta_do_cow(rta *o, linpool *lp);
+53 −27
Original line number Diff line number Diff line
@@ -1384,38 +1384,57 @@ rta_lookup(rta *o)
  return r;
}

void
rta__free(rta *a)
static void
rta_cleanup(void *data UNUSED)
{
  ASSERT(a->cached);
  u32 count = 0;
  rta *ax[RTA_OBSOLETE_LIMIT];

  RTA_LOCK;
  struct rta_cache *c = atomic_load_explicit(&rta_cache, memory_order_acquire);

  if (atomic_load_explicit(&a->uc, memory_order_acquire))
  for(u32 h=0; h<c->size; h++)
    for(rta *a = atomic_load_explicit(&c->table[h], memory_order_acquire), *next;
	a;
	a = next)
    {
    /* Acquired inbetween */
    RTA_UNLOCK;
    return;
      next = atomic_load_explicit(&a->next, memory_order_acquire);
      if (atomic_load_explicit(&a->uc, memory_order_acquire) > 0)
	continue;

      /* Check if the cleanup fits in the buffer */
      if (count == RTA_OBSOLETE_LIMIT)
      {
	ev_send(&global_work_list, &rta_cleanup_event);
	goto wait;
      }

      /* Relink the forward pointer */
  rta *next = atomic_load_explicit(&a->next, memory_order_acquire);
      atomic_store_explicit(a->pprev, next, memory_order_release);

      /* Relink the backwards pointer */
      if (next)
	next->pprev = a->pprev;

      /* Store for freeing and go to the next */
      ax[count++] = a;
      a = next;
    }

wait:
  /* Wait until nobody knows about us */
  synchronize_rcu();

  if (atomic_load_explicit(&a->uc, memory_order_acquire))
  u32 freed = 0;

  for (u32 i=0; i<count; i++)
  {
    rta *a = ax[i];
    /* Acquired inbetween, relink back */
    if (atomic_load_explicit(&a->uc, memory_order_acquire))
    {
      rta_insert(a, c);
    RTA_UNLOCK;
    return;
      continue;
    }

    /* Cleared to free the memory */
@@ -1426,10 +1445,17 @@ rta__free(rta *a)
    a->cached = 0;
    c->count--;
    sl_free(rta_slab(a), a);
    freed++;
  }

  atomic_fetch_sub_explicit(&rta_obsolete_count, freed, memory_order_release);

  RTA_UNLOCK;
}

_Atomic u32 rta_obsolete_count;
event rta_cleanup_event = { .hook = rta_cleanup, .list = &global_work_list };

rta *
rta_do_cow(rta *o, linpool *lp)
{
+2 −1
Original line number Diff line number Diff line
@@ -201,5 +201,6 @@ struct coroutine *coro_run(pool *p, void (*entry)(void *), void *data)
void
coro_yield(void)
{
  usleep(100);
  const struct timespec req = { .tv_nsec = 100 };
  nanosleep(&req, NULL);
}