Commit 2920bc29 authored by Bharath Ramsundar's avatar Bharath Ramsundar Committed by GitHub
Browse files

Merge pull request #511 from lilleswing/atomic-convs

Small updates for atomic convs
parents 71f4abe2 4712adac
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -80,8 +80,8 @@ def AtomicNNLayer(tensor, size, weights, biases, name=None):
  """

  if len(tensor.get_shape()) != 2:
    raise ValueError(
        'Dense layer input must be 2D, not %dD' % len(tensor.get_shape()))
    raise ValueError('Dense layer input must be 2D, not %dD' %
                     len(tensor.get_shape()))
  with tf.name_scope(name, 'fully_connected', [tensor, weights, biases]):
    return tf.nn.xw_plus_b(tensor, weights, biases)

@@ -282,7 +282,7 @@ def AtomicConvolutionLayer(X, Nbrs, Nbrs_Z, atom_types, radial_params, boxsize,
  """Atomic convoluation layer

  N = max_num_atoms, M = max_num_neighbors, B = batch_size, d = num_features
  l = num_radial_filters
  l = num_radial_filters * num_atom_types

  Parameters
  ----------
+12 −11
Original line number Diff line number Diff line
@@ -416,8 +416,8 @@ class TensorflowGraphModel(Model):
    feeding and fetching the same tensor.
    """
    weights = []
    placeholder_scope = TensorflowGraph.get_placeholder_scope(
        graph, name_scopes)
    placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
                                                              name_scopes)
    with placeholder_scope:
      for task in range(self.n_tasks):
        weights.append(
@@ -604,8 +604,9 @@ class TensorflowClassifier(TensorflowGraphModel):
      A tensor with shape batch_size containing the weighted cost for each
      example.
    """
    return tf.mul(
        tf.nn.softmax_cross_entropy_with_logits(logits, labels), weights)
    return tf.multiply(
        tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels),
        weights)

  def add_label_placeholders(self, graph, name_scopes):
    """Add Placeholders for labels for each task.
@@ -616,8 +617,8 @@ class TensorflowClassifier(TensorflowGraphModel):
    Placeholders are wrapped in identity ops to avoid the error caused by
    feeding and fetching the same tensor.
    """
    placeholder_scope = TensorflowGraph.get_placeholder_scope(
        graph, name_scopes)
    placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
                                                              name_scopes)
    with graph.as_default():
      batch_size = self.batch_size
      n_classes = self.n_classes
@@ -758,7 +759,7 @@ class TensorflowRegressor(TensorflowGraphModel):
      A tensor with shape batch_size containing the weighted cost for each
      example.
    """
    return tf.mul(0.5 * tf.square(output - labels), weights)
    return tf.multiply(0.5 * tf.square(output - labels), weights)

  def add_label_placeholders(self, graph, name_scopes):
    """Add Placeholders for labels for each task.
@@ -769,8 +770,8 @@ class TensorflowRegressor(TensorflowGraphModel):
    Placeholders are wrapped in identity ops to avoid the error caused by
    feeding and fetching the same tensor.
    """
    placeholder_scope = TensorflowGraph.get_placeholder_scope(
        graph, name_scopes)
    placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
                                                              name_scopes)
    with graph.as_default():
      batch_size = self.batch_size
      labels = []
@@ -858,8 +859,8 @@ class TensorflowMultiTaskRegressor(TensorflowRegressor):
        batch_size x n_features.
    """
    n_features = self.n_features
    placeholder_scope = TensorflowGraph.get_placeholder_scope(
        graph, name_scopes)
    placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
                                                              name_scopes)
    with graph.as_default():
      with placeholder_scope:
        self.mol_features = tf.placeholder(