Commit 13d859ec authored by miaecle's avatar miaecle
Browse files

recover breaking changes

parent 83ddf864
Loading
Loading
Loading
Loading
+6 −8
Original line number Diff line number Diff line
@@ -30,23 +30,23 @@ def get_loss_fn(final_loss):
  if final_loss == 'L2':

    def loss_fn(x, t):
      diff = tf.substract(x, t)
      diff = tf.subtract(x, t)
      return tf.reduce_sum(tf.square(diff), 0)
  elif final_loss == 'weighted_L2':

    def loss_fn(x, t, w):
      diff = tf.substract(x, t)
      diff = tf.subtract(x, t)
      weighted_diff = tf.multiply(diff, w)
      return tf.reduce_sum(tf.square(weighted_diff), 0)
  elif final_loss == 'L1':

    def loss_fn(x, t):
      diff = tf.substract(x, t)
      diff = tf.subtract(x, t)
      return tf.reduce_sum(tf.abs(diff), 0)
  elif final_loss == 'huber':

    def loss_fn(x, t):
      diff = tf.substract(x, t)
      diff = tf.subtract(x, t)
      return tf.reduce_sum(
          tf.minimum(0.5 * tf.square(diff),
                     huber_d * (tf.abs(diff) - 0.5 * huber_d)), 0)
@@ -188,10 +188,8 @@ class MultitaskGraphClassifier(Model):
    task_losses = []
    # label_placeholder of shape (batch_size, n_tasks). Split into n_tasks
    # tensors of shape (batch_size,)
    task_labels = tf.split(
        axis=1, num_or_size_splits=self.n_tasks, value=self.label_placeholder)
    task_weights = tf.split(
        axis=1, num_or_size_splits=self.n_tasks, value=self.weight_placeholder)
    task_labels = tf.split(1, self.n_tasks, self.label_placeholder)
    task_weights = tf.split(1, self.n_tasks, self.weight_placeholder)
    for task in range(self.n_tasks):
      task_label_vector = task_labels[task]
      task_weight_vector = task_weights[task]
+2 −4
Original line number Diff line number Diff line
@@ -137,10 +137,8 @@ class MultitaskGraphRegressor(Model):
    task_losses = []
    # label_placeholder of shape (batch_size, n_tasks). Split into n_tasks
    # tensors of shape (batch_size,)
    task_labels = tf.split(
        axis=1, num_or_size_splits=self.n_tasks, value=self.label_placeholder)
    task_weights = tf.split(
        axis=1, num_or_size_splits=self.n_tasks, value=self.weight_placeholder)
    task_labels = tf.split(1, self.n_tasks, self.label_placeholder)
    task_weights = tf.split(1, self.n_tasks, self.weight_placeholder)
    for task in range(self.n_tasks):
      task_label_vector = task_labels[task]
      task_weight_vector = task_weights[task]
+11 −11
Original line number Diff line number Diff line
@@ -85,8 +85,8 @@ def graph_conv(atoms, deg_adj_lists, deg_slice, max_deg, min_deg, W_list,
    rel_atoms = deg_summed[deg - 1]

    # Get self atoms
    begin = tf.stack([deg_slice[deg - min_deg, 0], 0])
    size = tf.stack([deg_slice[deg - min_deg, 1], -1])
    begin = tf.pack([deg_slice[deg - min_deg, 0], 0])
    size = tf.pack([deg_slice[deg - min_deg, 1], -1])
    self_atoms = tf.slice(atoms, begin, size)

    # Apply hidden affine to relevant atoms and append
@@ -110,7 +110,7 @@ def graph_conv(atoms, deg_adj_lists, deg_slice, max_deg, min_deg, W_list,
    new_rel_atoms_collection[deg - min_deg] = out

  # Combine all atoms back into the list
  activated_atoms = tf.concat(axis=0, values=new_rel_atoms_collection)
  activated_atoms = tf.concat(0, new_rel_atoms_collection)

  return activated_atoms

@@ -145,7 +145,7 @@ def graph_gather(atoms, membership_placeholder, batch_size):
  ]

  # Get the final sparse representations
  sparse_reps = tf.concat(axis=0, values=sparse_reps)
  sparse_reps = tf.concat(0, sparse_reps)

  return sparse_reps

@@ -178,8 +178,8 @@ def graph_pool(atoms, deg_adj_lists, deg_slice, max_deg, min_deg):

  for deg in range(1, max_deg + 1):
    # Get self atoms
    begin = tf.stack([deg_slice[deg - min_deg, 0], 0])
    size = tf.stack([deg_slice[deg - min_deg, 1], -1])
    begin = tf.pack([deg_slice[deg - min_deg, 0], 0])
    size = tf.pack([deg_slice[deg - min_deg, 1], -1])
    self_atoms = tf.slice(atoms, begin, size)

    # Expand dims
@@ -187,18 +187,18 @@ def graph_pool(atoms, deg_adj_lists, deg_slice, max_deg, min_deg):

    # always deg-1 for deg_adj_lists
    gathered_atoms = tf.gather(atoms, deg_adj_lists[deg - 1])
    gathered_atoms = tf.concat(axis=1, values=[self_atoms, gathered_atoms])
    gathered_atoms = tf.concat(1, [self_atoms, gathered_atoms])

    maxed_atoms = tf.reduce_max(gathered_atoms, 1)
    deg_maxed[deg - min_deg] = maxed_atoms

  if min_deg == 0:
    begin = tf.stack([deg_slice[0, 0], 0])
    size = tf.stack([deg_slice[0, 1], -1])
    begin = tf.pack([deg_slice[0, 0], 0])
    size = tf.pack([deg_slice[0, 1], -1])
    self_atoms = tf.slice(atoms, begin, size)
    deg_maxed[0] = self_atoms

  return tf.concat(axis=0, values=deg_maxed)
  return tf.concat(0, deg_maxed)


class GraphConv(Layer):
+10 −10
Original line number Diff line number Diff line
@@ -218,7 +218,7 @@ def concatenate(tensors, axis=-1):
  try:
    return tf.concat_v2([x for x in tensors], axis)
  except AttributeError:
    return tf.concat(axis=axis, values=[x for x in tensors])
    return tf.concat(axis, [x for x in tensors])


def _normalize_axis(axis, ndim):
@@ -253,7 +253,7 @@ def mean(x, axis=None, keepdims=False):
  axis = _normalize_axis(axis, get_ndim(x))
  if x.dtype.base_dtype == tf.bool:
    x = tf.cast(x, tf.float32)
  return tf.reduce_mean(x, axis=axis, keep_dims=keepdims)
  return tf.reduce_mean(x, reduction_indices=axis, keep_dims=keepdims)


def dot(x, y):
@@ -273,14 +273,14 @@ def dot(x, y):
  """
  if get_ndim(x) is not None and (get_ndim(x) > 2 or get_ndim(y) > 2):
    x_shape = []
    for i, s in zip(int_shape(x), tf.unstack(tf.shape(x))):
    for i, s in zip(int_shape(x), tf.unpack(tf.shape(x))):
      if i is not None:
        x_shape.append(i)
      else:
        x_shape.append(s)
    x_shape = tuple(x_shape)
    y_shape = []
    for i, s in zip(int_shape(y), tf.unstack(tf.shape(y))):
    for i, s in zip(int_shape(y), tf.unpack(tf.shape(y))):
      if i is not None:
        y_shape.append(i)
      else:
@@ -431,7 +431,7 @@ def max(x, axis=None, keepdims=False):
  A tensor with maximum values of `x`.
  """
  axis = _normalize_axis(axis, get_ndim(x))
  return tf.reduce_max(x, axis=axis, keep_dims=keepdims)
  return tf.reduce_max(x, reduction_indices=axis, keep_dims=keepdims)


def l2_normalize(x, axis):
@@ -463,12 +463,12 @@ def categorical_crossentropy(output, target, from_logits=False):
  if not from_logits:
    # scale preds so that the class probas of each sample sum to 1
    output /= tf.reduce_sum(
        output, axis=len(output.get_shape()) - 1, keep_dims=True)
        output, reduction_indices=len(output.get_shape()) - 1, keep_dims=True)
    # manual computation of crossentropy
    epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
    output = tf.clip_by_value(output, epsilon, 1. - epsilon)
    return -tf.reduce_sum(
        target * tf.log(output), axis=len(output.get_shape()) - 1)
        target * tf.log(output), reduction_indices=len(output.get_shape()) - 1)
  else:
    try:
      return tf.nn.softmax_cross_entropy_with_logits(
@@ -700,10 +700,10 @@ def var(x, axis=None, keepdims=False):
  axis = _normalize_axis(axis, get_ndim(x))
  if x.dtype.base_dtype == tf.bool:
    x = tf.cast(x, tf.float32)
  m = tf.reduce_mean(x, axis=axis, keep_dims=True)
  m = tf.reduce_mean(x, reduction_indices=axis, keep_dims=True)
  devs_squared = tf.square(x - m)
  return tf.reduce_mean(
      devs_squared, axis=axis, keep_dims=keepdims)
      devs_squared, reduction_indices=axis, keep_dims=keepdims)


def euclidean_distance(test, support, max_dist_sq=20):
@@ -959,7 +959,7 @@ def softmax_N(tensor, name=None):
    return tf.div(exp_tensor,
                  tf.reduce_sum(
                      exp_tensor,
                      axis=reduction_indices,
                      reduction_indices=reduction_indices,
                      keep_dims=True))