Commit b6ced203 authored by marta-sd's avatar marta-sd
Browse files

tests for Conv3D and MaxPool3D

parent 970f7892
Loading
Loading
Loading
Loading
+34 −0
Original line number Diff line number Diff line
@@ -34,7 +34,9 @@ from deepchem.models.tensorgraph.layers import ToFloat
from deepchem.models.tensorgraph.layers import ReduceSum
from deepchem.models.tensorgraph.layers import ReduceSquareDifference
from deepchem.models.tensorgraph.layers import Conv2D
from deepchem.models.tensorgraph.layers import Conv3D
from deepchem.models.tensorgraph.layers import MaxPool
from deepchem.models.tensorgraph.layers import MaxPool3D
from deepchem.models.tensorgraph.layers import InputFifoQueue
from deepchem.models.tensorgraph.layers import GraphConv
from deepchem.models.tensorgraph.layers import GraphPool
@@ -368,6 +370,23 @@ class TestLayers(test_util.TensorFlowTestCase):
      out_tensor = out_tensor.eval()
      assert out_tensor.shape == (batch_size, length, width, out_channels)

  def test_conv_3D(self):
    """Test that Conv3D can be invoked."""
    length = 4
    width = 5
    depth = 6
    in_channels = 2
    out_channels = 3
    batch_size = 20
    in_tensor = np.random.rand(batch_size, length, width, depth, in_channels)
    with self.test_session() as sess:
      in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
      out_tensor = Conv3D(out_channels, kernel_size=1)(in_tensor)
      sess.run(tf.global_variables_initializer())
      out_tensor = out_tensor.eval()
      assert out_tensor.shape == (batch_size, length, width, depth,
                                  out_channels)

  def test_max_pool(self):
    """Test that MaxPool can be invoked."""
    length = 2
@@ -382,6 +401,21 @@ class TestLayers(test_util.TensorFlowTestCase):
      out_tensor = out_tensor.eval()
      assert out_tensor.shape == (batch_size, 1, 1, in_channels)

  def test_max_pool_3D(self):
    """Test that MaxPool3D can be invoked."""
    length = 2
    width = 2
    depth = 2
    in_channels = 2
    batch_size = 20
    in_tensor = np.random.rand(batch_size, length, width, depth, in_channels)
    with self.test_session() as sess:
      in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
      out_tensor = MaxPool3D()(in_tensor)
      sess.run(tf.global_variables_initializer())
      out_tensor = out_tensor.eval()
      assert out_tensor.shape == (batch_size, 1, 1, 1, in_channels)

  def test_input_fifo_queue(self):
    """Test InputFifoQueue can be invoked."""
    batch_size = 10
+21 −0
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@ from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten,
    CombineMeanStd, Repeat, Gather, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, Log, InteratomicL2Distances, \
    SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool, ReduceSum, GraphConv, GraphPool, \
    GraphGather, BatchNorm, WeightedError, \
    Conv3D, MaxPool3D, \
    LSTMStep, AttnLSTMEmbedding, IterRefLSTMEmbedding
from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, \
    WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, \
@@ -255,6 +256,16 @@ def test_Conv2D_pickle():
  tg.save()


def test_Conv3D_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 10, 10, 10, 1))
  layer = Conv3D(num_outputs=3, in_layers=feature)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()


def test_MaxPool_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 10, 10, 10))
@@ -265,6 +276,16 @@ def test_MaxPool_pickle():
  tg.save()


def test_MaxPool3D_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 10, 10, 10, 10))
  layer = MaxPool3D(in_layers=feature)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()


def test_GraphConv_pickle():
  tg = TensorGraph()
  atom_features = Feature(shape=(None, 75))