Commit 8b2312b8 authored by peastman's avatar peastman
Browse files

Added Conv2DTranspose and Conv3DTranspose layers

parent 836b1b28
Loading
Loading
Loading
Loading
+159 −0
Original line number Diff line number Diff line
@@ -1519,6 +1519,165 @@ class Conv3D(Layer):
    return out_tensor


class Conv2DTranspose(Layer):
  """A transposed 2D convolution on the input.

  This layer is typically used for upsampling in a deconvolutional network.  It
  expects its input to be a four dimensional tensor of shape (batch size, height, width, # channels).
  If there is only one channel, the fourth dimension may optionally be omitted.
  """

  def __init__(self,
               num_outputs,
               kernel_size=5,
               stride=1,
               padding='SAME',
               activation_fn=tf.nn.relu,
               normalizer_fn=None,
               scope_name=None,
               **kwargs):
    """Create a Conv2DTranspose layer.

    Parameters
    ----------
    num_outputs: int
      the number of outputs produced by the convolutional kernel
    kernel_size: int or tuple
      the width of the convolutional kernel.  This can be either a two element tuple, giving
      the kernel size along each dimension, or an integer to use the same size along both
      dimensions.
    stride: int or tuple
      the stride between applications of the convolutional kernel.  This can be either a two
      element tuple, giving the stride along each dimension, or an integer to use the same
      stride along both dimensions.
    padding: str
      the padding method to use, either 'SAME' or 'VALID'
    activation_fn: object
      the Tensorflow activation function to apply to the output
    normalizer_fn: object
      the Tensorflow normalizer function to apply to the output
    """
    self.num_outputs = num_outputs
    self.kernel_size = kernel_size
    self.stride = stride
    self.padding = padding
    self.activation_fn = activation_fn
    self.normalizer_fn = normalizer_fn
    super(Conv2DTranspose, self).__init__(**kwargs)
    if scope_name is None:
      scope_name = self.name
    self.scope_name = scope_name
    try:
      parent_shape = self.in_layers[0].shape
      strides = stride
      if isinstance(stride, int):
        strides = (stride, stride)
      self._shape = (parent_shape[0], parent_shape[1] * strides[0],
                     parent_shape[2] * strides[1], num_outputs)
    except:
      pass

  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    inputs = self._get_input_tensors(in_layers)
    parent_tensor = inputs[0]
    if len(parent_tensor.get_shape()) == 3:
      parent_tensor = tf.expand_dims(parent_tensor, 3)
    out_tensor = tf.contrib.layers.conv2d_transpose(
        parent_tensor,
        num_outputs=self.num_outputs,
        kernel_size=self.kernel_size,
        stride=self.stride,
        padding=self.padding,
        activation_fn=self.activation_fn,
        normalizer_fn=self.normalizer_fn,
        scope=self.scope_name)
    out_tensor = out_tensor
    if set_tensors:
      self._record_variable_scope(self.scope_name)
      self.out_tensor = out_tensor
    return out_tensor


class Conv3DTranspose(Layer):
  """A transposed 3D convolution on the input.

  This layer is typically used for upsampling in a deconvolutional network.  It
  expects its input to be a five dimensional tensor of shape (batch size, height, width, depth, # channels).
  If there is only one channel, the fifth dimension may optionally be omitted.
  """

  def __init__(self,
               num_outputs,
               kernel_size=5,
               stride=1,
               padding='SAME',
               activation_fn=tf.nn.relu,
               normalizer_fn=None,
               scope_name=None,
               **kwargs):
    """Create a Conv3DTranspose layer.

    Parameters
    ----------
    num_outputs: int
      the number of outputs produced by the convolutional kernel
    kernel_size: int or tuple
      the width of the convolutional kernel.  This can be either a three element tuple, giving
      the kernel size along each dimension, or an integer to use the same size along both
      dimensions.
    stride: int or tuple
      the stride between applications of the convolutional kernel.  This can be either a three
      element tuple, giving the stride along each dimension, or an integer to use the same
      stride along both dimensions.
    padding: str
      the padding method to use, either 'SAME' or 'VALID'
    activation_fn: object
      the Tensorflow activation function to apply to the output
    normalizer_fn: object
      the Tensorflow normalizer function to apply to the output
    """
    self.num_outputs = num_outputs
    self.kernel_size = kernel_size
    self.stride = stride
    self.padding = padding
    self.activation_fn = activation_fn
    self.normalizer_fn = normalizer_fn
    super(Conv3DTranspose, self).__init__(**kwargs)
    if scope_name is None:
      scope_name = self.name
    self.scope_name = scope_name
    try:
      parent_shape = self.in_layers[0].shape
      strides = stride
      if isinstance(stride, int):
        strides = (stride, stride, stride)
      self._shape = (parent_shape[0], parent_shape[1] * strides[0],
                     parent_shape[2] * strides[1], parent_shape[3] * strides[2],
                     num_outputs)
    except:
      pass

  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    inputs = self._get_input_tensors(in_layers)
    parent_tensor = inputs[0]
    if len(parent_tensor.get_shape()) == 4:
      parent_tensor = tf.expand_dims(parent_tensor, 4)
    out_tensor = tf.layers.conv3d_transpose(
        parent_tensor,
        filters=self.num_outputs,
        kernel_size=self.kernel_size,
        strides=self.stride,
        padding=self.padding,
        activation=self.activation_fn,
        activity_regularizer=self.normalizer_fn,
        name=self.scope_name)
    out_tensor = out_tensor
    if set_tensors:
      self._record_variable_scope(self.scope_name)
      self.out_tensor = out_tensor
    return out_tensor


class MaxPool2D(Layer):

  def __init__(self,
+39 −1
Original line number Diff line number Diff line
@@ -5,7 +5,7 @@ from tensorflow.python.framework import test_util

from deepchem.feat.graph_features import ConvMolFeaturizer
from deepchem.feat.mol_graphs import ConvMol
from deepchem.models.tensorgraph.layers import Add, Conv3D, MaxPool2D, MaxPool3D, GraphCNN, GraphEmbedPoolLayer
from deepchem.models.tensorgraph.layers import Add, MaxPool2D, MaxPool3D, GraphCNN, GraphEmbedPoolLayer
from deepchem.models.tensorgraph.layers import AlphaShareLayer
from deepchem.models.tensorgraph.layers import AttnLSTMEmbedding
from deepchem.models.tensorgraph.layers import BatchNorm
@@ -15,6 +15,9 @@ from deepchem.models.tensorgraph.layers import Concat
from deepchem.models.tensorgraph.layers import Constant
from deepchem.models.tensorgraph.layers import Conv1D, Squeeze
from deepchem.models.tensorgraph.layers import Conv2D
from deepchem.models.tensorgraph.layers import Conv2DTranspose
from deepchem.models.tensorgraph.layers import Conv3D
from deepchem.models.tensorgraph.layers import Conv3DTranspose
from deepchem.models.tensorgraph.layers import Dense
from deepchem.models.tensorgraph.layers import Exp
from deepchem.models.tensorgraph.layers import Flatten
@@ -395,6 +398,41 @@ class TestLayers(test_util.TensorFlowTestCase):
      assert out_tensor.shape == (batch_size, length, width, depth,
                                  out_channels)

  def test_conv_2D_transpose(self):
    """Test that Conv2DTranspose can be invoked."""
    length = 4
    width = 5
    in_channels = 2
    out_channels = 3
    batch_size = 20
    in_tensor = np.random.rand(batch_size, length, width, in_channels)
    with self.test_session() as sess:
      in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
      out_tensor = Conv2DTranspose(
          out_channels, kernel_size=1, stride=2)(in_tensor)
      sess.run(tf.global_variables_initializer())
      out_tensor = out_tensor.eval()
      assert out_tensor.shape == (batch_size, 2 * length, 2 * width,
                                  out_channels)

  def test_conv_3D_transpose(self):
    """Test that Conv3DTranspose can be invoked."""
    length = 4
    width = 5
    depth = 6
    in_channels = 2
    out_channels = 3
    batch_size = 20
    in_tensor = np.random.rand(batch_size, length, width, depth, in_channels)
    with self.test_session() as sess:
      in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
      out_tensor = Conv3DTranspose(
          out_channels, kernel_size=1, stride=(2, 3, 1))(in_tensor)
      sess.run(tf.global_variables_initializer())
      out_tensor = out_tensor.eval()
      assert out_tensor.shape == (batch_size, 2 * length, 3 * width, depth,
                                  out_channels)

  def test_maxpool2D(self):
    """Test that MaxPool2D can be invoked."""
    length = 2
+21 −1
Original line number Diff line number Diff line
@@ -9,7 +9,7 @@ from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten,
  Constant, Variable, StopGradient, Add, Multiply, Log, Exp, InteratomicL2Distances, \
  SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool2D, ReduceSum, GraphConv, GraphPool, \
  GraphGather, BatchNorm, WeightedError, \
  Conv3D, MaxPool3D, \
  Conv3D, MaxPool3D, Conv2DTranspose, Conv3DTranspose, \
  LSTMStep, AttnLSTMEmbedding, IterRefLSTMEmbedding, GraphEmbedPoolLayer, GraphCNN
from deepchem.models.tensorgraph.symmetry_functions import AtomicDifferentiatedDense

@@ -288,6 +288,26 @@ def test_Conv3D_pickle():
  tg.save()


def test_Conv2DTranspose_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 10, 10, 1))
  layer = Conv2DTranspose(num_outputs=3, in_layers=feature)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()


def test_Conv3DTranspose_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 10, 10, 10, 1))
  layer = Conv3DTranspose(num_outputs=3, in_layers=feature)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()


def test_MaxPool2D_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 10, 10, 10))