Commit 583865c2 authored by miaecle's avatar miaecle
Browse files

docstrings and unit tests

parent c5dbb5cd
Loading
Loading
Loading
Loading
+14 −3
Original line number Diff line number Diff line
@@ -79,11 +79,20 @@ class IRVLayer(Layer):


class IRVRegularize(Layer):
  """ This Layer extracts the trainable weights in IRVLayer
  and return the their L2-norm
  """ Extracts the trainable weights in IRVLayer
  and return their L2-norm
  No in_layers is required, but should be built after target IRVLayer
  """

  def __init__(self, IRVLayer, penalty=0.0, **kwargs):
    """
    Parameters
    ----------
    IRVLayer: IRVLayer
      Target layer for extracting weights and regularization
    penalty: float
      L2 Penalty strength
    """
    self.IRVLayer = IRVLayer
    self.penalty = penalty
    super(IRVRegularize, self).__init__(**kwargs)
@@ -100,7 +109,9 @@ class IRVRegularize(Layer):


class Slice(Layer):
  """ Choose a slice of input given axis and order
  """ Choose a slice of input on the last axis given order,
  Suppose input x has two dimensions,
  output f(x) = x[:, slice_num:slice_num+1]
  """

  def __init__(self, slice_num, axis=1, **kwargs):
+9 −0
Original line number Diff line number Diff line
@@ -1185,6 +1185,9 @@ class SoftMax(Layer):


class Sigmoid(Layer):
  """ Compute the sigmoid of input: f(x) = sigmoid(x)
  Only one input is allowed, output will have the same shape as input
  """

  def __init__(self, in_layers=None, **kwargs):
    super(Sigmoid, self).__init__(in_layers, **kwargs)
@@ -1552,6 +1555,12 @@ class SoftMaxCrossEntropy(Layer):


class SigmoidCrossEntropy(Layer):
  """ Compute the sigmoid cross entropy of inputs: [labels, logits]
  `labels` hold the binary labels(with no axis of n_classes),
  `logits` hold the log probabilities for positive class(label=1),
  `labels` and `logits` should have same shape and type.
  Output will have the same shape as `logits`
  """

  def __init__(self, in_layers=None, **kwargs):
    super(SigmoidCrossEntropy, self).__init__(in_layers, **kwargs)
+59 −0
Original line number Diff line number Diff line
@@ -40,6 +40,8 @@ from deepchem.models.tensorgraph.layers import ReduceSum
from deepchem.models.tensorgraph.layers import Repeat
from deepchem.models.tensorgraph.layers import Reshape
from deepchem.models.tensorgraph.layers import SluiceLoss
from deepchem.models.tensorgraph.layers import Sigmoid
from deepchem.models.tensorgraph.layers import SigmoidCrossEntropy
from deepchem.models.tensorgraph.layers import SoftMax
from deepchem.models.tensorgraph.layers import SoftMaxCrossEntropy
from deepchem.models.tensorgraph.layers import StopGradient
@@ -52,6 +54,10 @@ from deepchem.models.tensorgraph.layers import VinaFreeEnergy
from deepchem.models.tensorgraph.layers import WeightedError
from deepchem.models.tensorgraph.layers import WeightedLinearCombo

from deepchem.models.tensorflow_models.IRV import IRVLayer
from deepchem.models.tensorflow_models.IRV import IRVRegularize
from deepchem.models.tensorflow_models.IRV import Slice


class TestLayers(test_util.TensorFlowTestCase):
  """
@@ -223,6 +229,17 @@ class TestLayers(test_util.TensorFlowTestCase):
      out_tensor = out_tensor.eval()
      assert out_tensor.shape == (batch_size,)

  def test_sigmoid(self):
    """Test that Sigmoid can be invoked."""
    batch_size = 10
    n_features = 5
    in_tensor = np.random.rand(batch_size, n_features)
    with self.test_session() as sess:
      in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
      out_tensor = Sigmoid()(in_tensor)
      out_tensor = out_tensor.eval()
      assert out_tensor.shape == (batch_size, n_features)

  def test_softmax(self):
    """Test that Softmax can be invoked."""
    batch_size = 10
@@ -327,6 +344,19 @@ class TestLayers(test_util.TensorFlowTestCase):
      dists = dist_tensor.eval()
      assert dists.shape == (N_atoms, M_nbrs)

  def test_sigmoid_cross_entropy(self):
    """Test that SigmoidCrossEntropy can be invoked."""
    batch_size = 10
    n_features = 5
    logit_tensor = np.random.rand(batch_size, n_features)
    label_tensor = np.random.randint(0, 2, (batch_size, n_features))
    with self.test_session() as sess:
      logit_tensor = tf.convert_to_tensor(logit_tensor, dtype=tf.float32)
      label_tensor = tf.convert_to_tensor(label_tensor, dtype=tf.float32)
      out_tensor = SigmoidCrossEntropy()(label_tensor, logit_tensor)
      out_tensor = out_tensor.eval()
      assert out_tensor.shape == (batch_size, n_features)

  def test_softmax_cross_entropy(self):
    """Test that SoftMaxCrossEntropy can be invoked."""
    batch_size = 10
@@ -805,3 +835,32 @@ class TestLayers(test_util.TensorFlowTestCase):
      vertex_props, adjs = vertex_props.eval(), adjs.eval()
      assert vertex_props.shape == (10, 6, 50)
      assert adjs.shape == (10, 6, 5, 6)

  def test_slice(self):
    """Test that Slice can be invoked."""
    batch_size = 10
    n_features = 5
    test_tensor_input = np.random.rand(batch_size, n_features)
    with self.test_session() as sess:
      test_tensor = tf.convert_to_tensor(test_tensor_input, dtype=tf.float32)
      out_tensor = Slice(1)(test_tensor)
      out_tensor = out_tensor.eval()
      assert np.allclose(out_tensor, test_tensor_input[:, 1:2])

  def test_IRV(self):
    """Test that IRVLayer and IRVRegularize can be invoked."""
    batch_size = 10
    n_tasks = 5
    K = 10
    n_features = 2 * K * n_tasks
    test_tensor_input = np.random.rand(batch_size, n_features)
    with self.test_session() as sess:
      test_tensor = tf.convert_to_tensor(test_tensor_input, dtype=tf.float32)
      irv_layer = IRVLayer(n_tasks, K)
      irv_layer.create_tensor(in_layers=[test_tensor])
      out_tensor = irv_layer.out_tensor
      sess.run(tf.global_variables_initializer())
      out_tensor = out_tensor.eval()
      assert out_tensor.shape == (batch_size, n_tasks)
      irv_reg = IRVRegularize(irv_layer, 1.)()
      assert irv_reg.eval() >= 0