Commit cda32c84 authored by Bharath Ramsundar's avatar Bharath Ramsundar
Browse files

Changes

parent 7e256667
Loading
Loading
Loading
Loading
+0 −6
Original line number Diff line number Diff line
"""Place constraints on models."""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals


from __future__ import absolute_import
from deepchem.nn import model_ops
from deepchem.nn.activations import get_from_module

contrib/nn/copy.py

deleted100644 → 0
+0 −0

Empty file deleted.

+0 −5
Original line number Diff line number Diff line
@@ -2,11 +2,6 @@

Code borrowed from Keras.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import

import warnings
import tensorflow as tf
from deepchem.nn import model_ops
+170 −170
Original line number Diff line number Diff line
@@ -19,181 +19,181 @@ from deepchem.nn import model_ops
# TODO(rbharath): This class does not yet have a
# TensorGraph equivalent, but one may not be required.
# Commented out for now, remove if OK.
#class AlternateWeaveLayer(WeaveLayer):
#  """ Alternate implementation of weave module
#      same variables, different graph structures
#  """
#
#  def call(self, x, mask=None):
#    """Execute this layer on input tensors.
#
#    x = [atom_features, pair_features, pair_split, atom_split, atom_to_pair]
#
#    Parameters
#    ----------
#    x: list
#      list of Tensors of form described above.
#    mask: bool, optional
#      Ignored. Present only to shadow superclass call() method.
#
#    Returns
#    -------
#    A: Tensor
#      Tensor of atom_features
#    P: Tensor
#      Tensor of pair_features
#    """
#    # Add trainable weights
#    self.build()
#
#    atom_features = x[0]
#    pair_features = x[1]
#
#    pair_split = x[2]
#    atom_to_pair = x[4]
#
#    AA = tf.matmul(atom_features, self.W_AA) + self.b_AA
#    AA = self.activation(AA)
#    PA = tf.matmul(pair_features, self.W_PA) + self.b_PA
#    PA = self.activation(PA)
#    PA = tf.segment_sum(PA, pair_split)
#
#    A = tf.matmul(tf.concat([AA, PA], 1), self.W_A) + self.b_A
#    A = self.activation(A)
#
#    if self.update_pair:
#      AP_ij = tf.matmul(
#          tf.reshape(
#              tf.gather(atom_features, atom_to_pair),
#              [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
#      AP_ij = self.activation(AP_ij)
#      AP_ji = tf.matmul(
#          tf.reshape(
#              tf.gather(atom_features, tf.reverse(atom_to_pair, [1])),
#              [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
#      AP_ji = self.activation(AP_ji)
#
#      PP = tf.matmul(pair_features, self.W_PP) + self.b_PP
#      PP = self.activation(PP)
#      P = tf.matmul(tf.concat([AP_ij + AP_ji, PP], 1), self.W_P) + self.b_P
#      P = self.activation(P)
#    else:
#      P = pair_features
#
#    return A, P
class AlternateWeaveLayer(WeaveLayer):
  """ Alternate implementation of weave module
      same variables, different graph structures
  """

  def call(self, x, mask=None):
    """Execute this layer on input tensors.

    x = [atom_features, pair_features, pair_split, atom_split, atom_to_pair]

    Parameters
    ----------
    x: list
      list of Tensors of form described above.
    mask: bool, optional
      Ignored. Present only to shadow superclass call() method.

    Returns
    -------
    A: Tensor
      Tensor of atom_features
    P: Tensor
      Tensor of pair_features
    """
    # Add trainable weights
    self.build()

    atom_features = x[0]
    pair_features = x[1]

    pair_split = x[2]
    atom_to_pair = x[4]

    AA = tf.matmul(atom_features, self.W_AA) + self.b_AA
    AA = self.activation(AA)
    PA = tf.matmul(pair_features, self.W_PA) + self.b_PA
    PA = self.activation(PA)
    PA = tf.segment_sum(PA, pair_split)

    A = tf.matmul(tf.concat([AA, PA], 1), self.W_A) + self.b_A
    A = self.activation(A)

    if self.update_pair:
      AP_ij = tf.matmul(
          tf.reshape(
              tf.gather(atom_features, atom_to_pair),
              [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
      AP_ij = self.activation(AP_ij)
      AP_ji = tf.matmul(
          tf.reshape(
              tf.gather(atom_features, tf.reverse(atom_to_pair, [1])),
              [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
      AP_ji = self.activation(AP_ji)

      PP = tf.matmul(pair_features, self.W_PP) + self.b_PP
      PP = self.activation(PP)
      P = tf.matmul(tf.concat([AP_ij + AP_ji, PP], 1), self.W_P) + self.b_P
      P = self.activation(P)
    else:
      P = pair_features

    return A, P

# TODO(rbharath): This class does not yet have a
# TensorGraph equivalent, but one may not be required.
# Commented out for now, remove if OK.
#class WeaveConcat(Layer):
#  """" Concat a batch of molecules into a batch of atoms
#  """
#
#  def __init__(self,
#               batch_size,
#               n_atom_input_feat=50,
#               n_output=128,
#               init='glorot_uniform',
#               activation='tanh',
#               **kwargs):
#    """
#    Parameters
#    ----------
#    batch_size: int
#      number of molecules in a batch
#    n_atom_input_feat: int, optional
#      Number of features for each atom in input.
#    n_output: int, optional
#      Number of output features for each atom(concatenated)
#    init: str, optional
#      Weight initialization for filters.
#    activation: str, optional
#      Activation function applied
#
#    """
#    self.batch_size = batch_size
#    self.n_atom_input_feat = n_atom_input_feat
#    self.n_output = n_output
#    self.init = initializations.get(init)  # Set weight initialization
#    self.activation = activations.get(activation)  # Get activations
#    super(WeaveConcat, self).__init__(**kwargs)
#
#  def build(self):
#    """"Construct internal trainable weights.
#    """
#
#    self.W = self.init([self.n_atom_input_feat, self.n_output])
#    self.b = model_ops.zeros(shape=[
#        self.n_output,
#    ])
#
#    self.trainable_weights = self.W + self.b
#
#  def call(self, x, mask=None):
#    """Execute this layer on input tensors.
#
#    x = [atom_features, atom_mask]
#
#    Parameters
#    ----------
#    x: list
#      Tensors as listed above
#    mask: bool, optional
#      Ignored. Present only to shadow superclass call() method.
#
#    Returns
#    -------
#    outputs: Tensor
#      Tensor of concatenated atom features
#    """
#    self.build()
#    atom_features = x[0]
#    atom_masks = x[1]
#    A = tf.split(atom_features, self.batch_size, axis=0)
#    A_mask = tf.split(
#        tf.cast(atom_masks, dtype=tf.bool), self.batch_size, axis=0)
#    outputs = tf.concat(
#        [tf.boolean_mask(A[i], A_mask[i]) for i in range(len(A))], axis=0)
#    outputs = tf.matmul(outputs, self.W) + self.b
#    outputs = self.activation(outputs)
#    return outputs
class WeaveConcat(Layer):
  """" Concat a batch of molecules into a batch of atoms
  """

  def __init__(self,
               batch_size,
               n_atom_input_feat=50,
               n_output=128,
               init='glorot_uniform',
               activation='tanh',
               **kwargs):
    """
    Parameters
    ----------
    batch_size: int
      number of molecules in a batch
    n_atom_input_feat: int, optional
      Number of features for each atom in input.
    n_output: int, optional
      Number of output features for each atom(concatenated)
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied

    """
    self.batch_size = batch_size
    self.n_atom_input_feat = n_atom_input_feat
    self.n_output = n_output
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    super(WeaveConcat, self).__init__(**kwargs)

  def build(self):
    """"Construct internal trainable weights.
    """

    self.W = self.init([self.n_atom_input_feat, self.n_output])
    self.b = model_ops.zeros(shape=[
        self.n_output,
    ])

    self.trainable_weights = self.W + self.b

  def call(self, x, mask=None):
    """Execute this layer on input tensors.

    x = [atom_features, atom_mask]

    Parameters
    ----------
    x: list
      Tensors as listed above
    mask: bool, optional
      Ignored. Present only to shadow superclass call() method.

    Returns
    -------
    outputs: Tensor
      Tensor of concatenated atom features
    """
    self.build()
    atom_features = x[0]
    atom_masks = x[1]
    A = tf.split(atom_features, self.batch_size, axis=0)
    A_mask = tf.split(
        tf.cast(atom_masks, dtype=tf.bool), self.batch_size, axis=0)
    outputs = tf.concat(
        [tf.boolean_mask(A[i], A_mask[i]) for i in range(len(A))], axis=0)
    outputs = tf.matmul(outputs, self.W) + self.b
    outputs = self.activation(outputs)
    return outputs

# TODO(rbharath): This class does not yet have a
# TensorGraph equivalent, but one may not be required.
# Commented out for now, remove if OK.
#class AlternateWeaveGather(WeaveGather):
#  """Alternate implementation of weave gather layer
#     corresponding to AlternateWeaveLayer
#  """
#
#  def call(self, x, mask=None):
#    """Execute this layer on input tensors.
#
#    x = [atom_features, atom_split]
#
#    Parameters
#    ----------
#    x: list
#      Tensors as listed above
#    mask: bool, optional
#      Ignored. Present only to shadow superclass call() method.
#
#    Returns
#    -------
#    outputs: Tensor
#      Tensor of molecular features
#    """
#    # Add trainable weights
#    self.build()
#    outputs = x[0]
#    atom_split = x[1]
#
#    if self.gaussian_expand:
#      outputs = self.gaussian_histogram(outputs)
#
#    output_molecules = tf.segment_sum(outputs, atom_split)
#
#    if self.gaussian_expand:
#      output_molecules = tf.matmul(output_molecules, self.W) + self.b
#      output_molecules = self.activation(output_molecules)
#    return output_molecules
class AlternateWeaveGather(WeaveGather):
  """Alternate implementation of weave gather layer
     corresponding to AlternateWeaveLayer
  """

  def call(self, x, mask=None):
    """Execute this layer on input tensors.

    x = [atom_features, atom_split]

    Parameters
    ----------
    x: list
      Tensors as listed above
    mask: bool, optional
      Ignored. Present only to shadow superclass call() method.

    Returns
    -------
    outputs: Tensor
      Tensor of molecular features
    """
    # Add trainable weights
    self.build()
    outputs = x[0]
    atom_split = x[1]

    if self.gaussian_expand:
      outputs = self.gaussian_histogram(outputs)

    output_molecules = tf.segment_sum(outputs, atom_split)

    if self.gaussian_expand:
      output_molecules = tf.matmul(output_molecules, self.W) + self.b
      output_molecules = self.activation(output_molecules)
    return output_molecules
+0 −158
Original line number Diff line number Diff line
"""
Convenience classes for assembling graph models.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals

__author__ = "Han Altae-Tran and Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"

import warnings
import tensorflow as tf
from deepchem.nn.layers import GraphGather
from deepchem.models.tf_new_models.graph_topology import GraphTopology, DTNNGraphTopology, DAGGraphTopology, WeaveGraphTopology, AlternateWeaveGraphTopology


class SequentialGraph(object):
  """An analog of Keras Sequential class for Graph data.

  Like the Sequential class from Keras, but automatically passes topology
  placeholders from GraphTopology to each graph layer (from layers) added
  to the network. Non graph layers don't get the extra placeholders. 
  """

  def __init__(self, n_feat):
    """
    Parameters
    ----------
    n_feat: int
      Number of features per atom.
    """
    warnings.warn("SequentialGraph is deprecated. "
                  "Will be removed in DeepChem 1.4.", DeprecationWarning)
    self.graph = tf.Graph()
    with self.graph.as_default():
      self.graph_topology = GraphTopology(n_feat)
      self.output = self.graph_topology.get_atom_features_placeholder()
    # Keep track of the layers
    self.layers = []

  def add(self, layer):
    """Adds a new layer to model."""
    with self.graph.as_default():
      # For graphical layers, add connectivity placeholders
      if type(layer).__name__ in ['GraphConv', 'GraphGather', 'GraphPool']:
        if (len(self.layers) > 0 and hasattr(self.layers[-1], "__name__")):
          assert self.layers[-1].__name__ != "GraphGather", \
                  'Cannot use GraphConv or GraphGather layers after a GraphGather'

        self.output = layer([self.output] +
                            self.graph_topology.get_topology_placeholders())
      else:
        self.output = layer(self.output)

      # Add layer to the layer list
      self.layers.append(layer)

  def get_graph_topology(self):
    return self.graph_topology

  def get_num_output_features(self):
    """Gets the output shape of the featurization layers of the network"""
    return self.layers[-1].output_shape[1]

  def return_outputs(self):
    return self.output

  def return_inputs(self):
    return self.graph_topology.get_input_placeholders()

  def get_layer(self, layer_id):
    return self.layers[layer_id]


class SequentialDTNNGraph(SequentialGraph):
  """An analog of Keras Sequential class for Coulomb Matrix data.

@@ -241,94 +174,3 @@ class AlternateSequentialWeaveGraph(SequentialGraph):
      else:
        self.output = layer(self.output)
      self.layers.append(layer)


class SequentialSupportGraph(object):
  """An analog of Keras Sequential model for test/support models."""

  def __init__(self, n_feat):
    """
    Parameters
    ----------
    n_feat: int
      Number of atomic features.
    """
    warnings.warn("SequentialSupportWeaveGraph is deprecated. "
                  "Will be removed in DeepChem 1.4.", DeprecationWarning)
    self.graph = tf.Graph()
    with self.graph.as_default():
      # Create graph topology and x
      self.test_graph_topology = GraphTopology(n_feat, name='test')
      self.support_graph_topology = GraphTopology(n_feat, name='support')
      self.test = self.test_graph_topology.get_atom_features_placeholder()
      self.support = self.support_graph_topology.get_atom_features_placeholder()

    # Keep track of the layers
    self.layers = []
    # Whether or not we have used the GraphGather layer yet
    self.bool_pre_gather = True

  def add(self, layer):
    """Adds a layer to both test/support stacks.

    Note that the layer transformation is performed independently on the
    test/support tensors.
    """
    with self.graph.as_default():
      self.layers.append(layer)

      # Update new value of x
      if type(layer).__name__ in ['GraphConv', 'GraphGather', 'GraphPool']:
        assert self.bool_pre_gather, "Cannot apply graphical layers after gather."

        self.test = layer([self.test] + self.test_graph_topology.topology)
        self.support = layer([self.support] +
                             self.support_graph_topology.topology)
      else:
        self.test = layer(self.test)
        self.support = layer(self.support)

      if type(layer).__name__ == 'GraphGather':
        self.bool_pre_gather = False  # Set flag to stop adding topology

  def add_test(self, layer):
    """Adds a layer to test."""
    with self.graph.as_default():
      self.layers.append(layer)

      # Update new value of x
      if type(layer).__name__ in ['GraphConv', 'GraphPool', 'GraphGather']:
        self.test = layer([self.test] + self.test_graph_topology.topology)
      else:
        self.test = layer(self.test)

  def add_support(self, layer):
    """Adds a layer to support."""
    with self.graph.as_default():
      self.layers.append(layer)

      # Update new value of x
      if type(layer).__name__ in ['GraphConv', 'GraphPool', 'GraphGather']:
        self.support = layer([self.support] +
                             self.support_graph_topology.topology)
      else:
        self.support = layer(self.support)

  def join(self, layer):
    """Joins test and support to a two input two output layer"""
    with self.graph.as_default():
      self.layers.append(layer)
      self.test, self.support = layer([self.test, self.support])

  def get_test_output(self):
    return self.test

  def get_support_output(self):
    return self.support

  def return_outputs(self):
    return [self.test] + [self.support]

  def return_inputs(self):
    return (self.test_graph_topology.get_inputs() +
            self.support_graph_topology.get_inputs())
Loading