Commit 4a52e828 authored by Maria Matejka's avatar Maria Matejka
Browse files

Nest: Changed pending_imports list to new spin-locked typed list

parent 444e0b6c
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@
#define _BIRD_BIRDLIB_H_

#include "sysdep/config.h"
#include "sysdep/arch/asm.h"
#include "lib/alloca.h"

/* Ugly structure offset handling macros */
+4 −0
Original line number Diff line number Diff line
@@ -113,6 +113,10 @@ uint list_length(list *);
#define TTAIL(list) list.tail
#define TNODE_IN_LIST(n) (((n)->_tln.next) && ((n)->_tln.prev))

#define TLIST_NODE_TYPE(l) typeof(*(l.head))

#define TLIST_EMPTY(list) ((list)->head == ((list)->tail_node.node))

#define WALK_TLIST(n_, list) for (n_ = (list).head; n_->_tln.next; n_ = n_->_tln.next)
#define WALK_TLIST_DELSAFE(n_, list) for (typeof(n_) next_ = n_ = THEAD(list); next_ = n_->_tln.next; n_ = next_)

lib/locked.h

0 → 100644
+84 −0
Original line number Diff line number Diff line
/*
 *	BIRD Library -- Locked data structures
 *
 *	(c) 2019 Maria Matejka <mq@ucw.cz>
 *
 *	Can be freely distributed and used under the terms of the GNU GPL.
 */

#ifndef _BIRD_LOCKED_H_
#define _BIRD_LOCKED_H_

#include "lib/atomic.h"
#include "lib/worker.h"
#include "lib/lists.h"

typedef _Atomic u64 spinlock;

#define SPIN_LOCK(_sp) do { \
  while (1) { \
    u64 noworker = NOWORKER; \
    if (atomic_compare_exchange_weak_explicit(&_sp, &noworker, worker_id, memory_order_acquire, memory_order_relaxed)) \
      break; \
    CPU_RELAX(); \
  } \
} while (0)

#define SPIN_UNLOCK(_sp) do { \
  u64 expected = worker_id; \
  if (!atomic_compare_exchange_strong_explicit(&_sp, &expected, NOWORKER, memory_order_release, memory_order_relaxed)) \
    bug("The spinlock is locked by worker %lu but shall be locked by %lu!", expected, worker_id); \
} while (0)


#define LOCKED_LIST(_type) struct { \
  TLIST(_type) _llist; \
  spinlock _lsp; \
}

#define LOCKED_LIST_NODE(_type) struct { \
  TLIST_NODE(_type); \
  spinlock *_lsp; \
}

#define WLL_RETURN(list_...) return (domain_read_unlock(list_._lld), ##what)

#define INIT_LOCKED_LIST(list_) do { \
  INIT_TLIST(&((list_)->_llist)); \
  atomic_store_explicit(&((list_)->_lsp), NOWORKER, memory_order_relaxed); \
} while (0)

#define ADD_HEAD_LOCKED(list_, node_) do { \
  node_->_lsp = &(list_->_lsp); \
  SPIN_LOCK(list_->_lsp); \
  TADD_HEAD(&((list_)->_llist), node_); \
  SPIN_UNLOCK(list_->_lsp); \
} while (0)

#define ADD_TAIL_LOCKED(list_, node_) do { \
  node_->_lsp = &((list_)->_lsp); \
  SPIN_LOCK((list_)->_lsp); \
  TADD_TAIL(&((list_)->_llist), node_); \
  SPIN_UNLOCK((list_)->_lsp); \
} while (0)

#define REM_HEAD_LOCKED(list_) ({ \
    TLIST_NODE_TYPE((list_)->_llist) *node_ = NULL; \
    SPIN_LOCK((list_)->_lsp); \
    if (!TLIST_EMPTY(&((list_)->_llist))) { \
      node_ = THEAD((list_)->_llist); \
      TREM_NODE(node_); \
    } \
    SPIN_UNLOCK((list_)->_lsp); \
    if (node_) node_->_lsp = NULL; \
    node_; \
    })

#define REM_NODE_LOCKED(node_) do { \
  SPIN_LOCK(*(node_->_lsp)); \
  TREM_NODE(node_); \
  SPIN_UNLOCK(*(node_->_lsp)); \
  node_->_lsp = NULL; \
} while (0)

#endif
+4 −0
Original line number Diff line number Diff line
@@ -56,6 +56,10 @@ static inline void task_init(struct task *t, enum task_flags tf, struct domain *
  };
}

/* Sometimes it is needed to know the worker ID.
 * Don't rely on it unless you know what you are doing. */
#define NOWORKER (~((u64) 0))
extern _Thread_local u64 worker_id;

/* Initialize the worker queue. Run once and never more. */
void worker_queue_init(void);
+28 −1
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@
#include "lib/resource.h"
#include "lib/net.h"
#include "lib/worker.h"
#include "lib/locked.h"

struct ea_list;
struct protocol;
@@ -144,6 +145,32 @@ struct rtable_config {
  byte sorted;				/* Routes of network are sorted according to rte_better() */
};

/* Route update data that is passed through the filters */

struct rte_update_data {
  LOCKED_LIST_NODE(struct rte_update_data);
  struct task task;
  struct channel *channel;
  const net_addr *net;
  struct rte *rte;
  struct rta *old_rta;
  struct rte_src *src;
  struct linpool *pool;
  _Atomic PACKED enum rte_update_state {
    RUS_PENDING_UPDATE = 0,
    RUS_UPDATING,
    RUS_PENDING_RECALCULATE,
    RUS_RECALCULATING,
  } state;
  PACKED enum rte_update_result {
    RUR_UNKNOWN = 0,
    RUR_WITHDRAW = 1,
    RUR_INVALID = 2,
    RUR_FILTERED = 3,
    RUR_ACCEPTED = 4,
  } result;
};

typedef struct rtable {
  node n;				/* Node in list of all tables */
  struct fib fib;
@@ -168,7 +195,7 @@ typedef struct rtable {
  byte nhu_state;			/* Next Hop Update state */
  struct fib_iterator prune_fit;	/* Rtable prune FIB iterator */
  struct fib_iterator nhu_fit;		/* Next Hop Update FIB iterator */
  list pending_imports;			/* Imports shall be sequenced */
  LOCKED_LIST(struct rte_update_data) pending_imports;	/* Imports shall be sequenced */
  struct task import_task;		/* Route update task */
} rtable;

Loading