Commit 6dd2f1b3 authored by Bharath Ramsundar's avatar Bharath Ramsundar Committed by GitHub
Browse files

Merge pull request #655 from peastman/normalizer

Added normalizer_fn option to Conv2D layer
parents 762552e6 a65f514b
Loading
Loading
Loading
Loading
+5 −1
Original line number Diff line number Diff line
@@ -767,6 +767,7 @@ class Conv2D(Layer):
               stride=1,
               padding='SAME',
               activation_fn=tf.nn.relu,
               normalizer_fn=None,
               scope_name=None,
               **kwargs):
    """Create a Conv2D layer.
@@ -787,12 +788,15 @@ class Conv2D(Layer):
      the padding method to use, either 'SAME' or 'VALID'
    activation_fn: object
      the Tensorflow activation function to apply to the output
    normalizer_fn: object
      the Tensorflow normalizer function to apply to the output
    """
    self.num_outputs = num_outputs
    self.kernel_size = kernel_size
    self.stride = stride
    self.padding = padding
    self.activation_fn = activation_fn
    self.normalizer_fn = normalizer_fn
    super(Conv2D, self).__init__(**kwargs)
    if scope_name is None:
      scope_name = self.name
@@ -808,7 +812,7 @@ class Conv2D(Layer):
        stride=self.stride,
        padding=self.padding,
        activation_fn=self.activation_fn,
        normalizer_fn=tf.contrib.layers.batch_norm,
        normalizer_fn=self.normalizer_fn,
        scope=self.scope_name)
    out_tensor = out_tensor
    if set_tensors:
+8 −2
Original line number Diff line number Diff line
@@ -25,10 +25,16 @@ class TestTensorGraphMNIST(unittest.TestCase):
    feature = Feature(shape=(None, 784), name="Feature")
    make_image = Reshape(shape=(-1, 28, 28, 1), in_layers=[feature])

    conv2d_1 = Conv2D(num_outputs=32, in_layers=[make_image])
    conv2d_1 = Conv2D(
        num_outputs=32,
        normalizer_fn=tf.contrib.layers.batch_norm,
        in_layers=[make_image])
    maxpool_1 = MaxPool(in_layers=[conv2d_1])

    conv2d_2 = Conv2D(num_outputs=64, in_layers=[maxpool_1])
    conv2d_2 = Conv2D(
        num_outputs=64,
        normalizer_fn=tf.contrib.layers.batch_norm,
        in_layers=[maxpool_1])
    maxpool_2 = MaxPool(in_layers=[conv2d_2])
    flatten = Flatten(in_layers=[maxpool_2])