Unverified Commit c1d38548 authored by Bharath Ramsundar's avatar Bharath Ramsundar Committed by GitHub
Browse files

Merge pull request #1023 from lilleswing/toint32

 Add Cast Layer
parents a1db2ca4 0f5124a7
Loading
Loading
Loading
Loading
+40 −9
Original line number Diff line number Diff line
@@ -644,6 +644,38 @@ class Reshape(Layer):
    return out_tensor


class Cast(Layer):
  """
  Wrapper around tf.cast.  Changes the dtype of a single layer
  """

  def __init__(self, in_layers=None, dtype=None, **kwargs):
    """
    Parameters
    ----------
    dtype: tf.DType
      the dtype to cast the in_layer to
      e.x. tf.int32
    """
    if dtype is None:
      raise ValueError("Must cast to a dtype")
    self.dtype = dtype
    super(Cast, self).__init__(in_layers, **kwargs)
    try:
      parent_shape = self.in_layers[0].shape
      self._shape = parent_shape
    except:
      pass

  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    inputs = self._get_input_tensors(in_layers)
    parent_tensor = inputs[0]
    out_tensor = tf.cast(parent_tensor, self.dtype)
    if set_tensors:
      self.out_tensor = out_tensor
    return out_tensor


class Squeeze(Layer):

  def __init__(self, in_layers=None, squeeze_dims=None, **kwargs):
@@ -655,7 +687,8 @@ class Squeeze(Layer):
        self._shape = [i for i in parent_shape if i != 1]
      else:
        self._shape = [
            parent_shape[i] for i in range(len(parent_shape))
            parent_shape[i]
            for i in range(len(parent_shape))
            if i not in squeeze_dims
        ]
    except:
@@ -2817,15 +2850,13 @@ class VinaFreeEnergy(Layer):

  def hydrophobic(self, d):
    """Computes Autodock Vina's hydrophobic interaction term."""
    out_tensor = tf.where(d < 0.5,
                          tf.ones_like(d),
    out_tensor = tf.where(d < 0.5, tf.ones_like(d),
                          tf.where(d < 1.5, 1.5 - d, tf.zeros_like(d)))
    return out_tensor

  def hydrogen_bond(self, d):
    """Computes Autodock Vina's hydrogen bond interaction term."""
    out_tensor = tf.where(d < -0.7,
                          tf.ones_like(d),
    out_tensor = tf.where(d < -0.7, tf.ones_like(d),
                          tf.where(d < 0, (1.0 / 0.7) * (0 - d),
                                   tf.zeros_like(d)))
    return out_tensor
+12 −3
Original line number Diff line number Diff line
@@ -5,7 +5,7 @@ from tensorflow.python.framework import test_util

from deepchem.feat.graph_features import ConvMolFeaturizer
from deepchem.feat.mol_graphs import ConvMol
from deepchem.models.tensorgraph.layers import Add, MaxPool2D, MaxPool3D, GraphCNN, GraphEmbedPoolLayer
from deepchem.models.tensorgraph.layers import Add, MaxPool2D, MaxPool3D, GraphCNN, GraphEmbedPoolLayer, Cast
from deepchem.models.tensorgraph.layers import AlphaShareLayer
from deepchem.models.tensorgraph.layers import AttnLSTMEmbedding
from deepchem.models.tensorgraph.layers import BatchNorm
@@ -276,8 +276,9 @@ class TestLayers(test_util.TensorFlowTestCase):
    value2 = np.random.uniform(size=(2, 3)).astype(np.float32)
    value3 = np.random.uniform(size=(2, 3)).astype(np.float32)
    with self.test_session() as sess:
      out_tensor = Add(weights=[1, 2, 1])(
          tf.constant(value1), tf.constant(value2), tf.constant(value3))
      out_tensor = Add(weights=[1, 2, 1])(tf.constant(value1),
                                          tf.constant(value2),
                                          tf.constant(value3))
      assert np.array_equal(value1 + 2 * value2 + value3, out_tensor.eval())

  def test_multiply(self):
@@ -709,6 +710,14 @@ class TestLayers(test_util.TensorFlowTestCase):
      loss = np.mean(diff**2)
      assert (loss - result) / loss < 1e-6

  def test_cast(self):
    """Test that layers can automatically reshape inconsistent inputs."""
    value1 = np.random.uniform(size=(2, 1)).astype(np.float32)
    with self.test_session() as sess:
      out_tensor = Cast(dtype=tf.int32)(tf.constant(value1))
      result = out_tensor.eval()
      assert result.dtype == np.int32

  def test_squeeze_inputs(self):
    """Test that layers can automatically reshape inconsistent inputs."""
    value1 = np.random.uniform(size=(2, 1)).astype(np.float32)
+11 −1
Original line number Diff line number Diff line
@@ -10,7 +10,7 @@ from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten,
  SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool2D, ReduceSum, GraphConv, GraphPool, \
  GraphGather, BatchNorm, WeightedError, \
  Conv3D, MaxPool3D, Conv2DTranspose, Conv3DTranspose, \
  LSTMStep, AttnLSTMEmbedding, IterRefLSTMEmbedding, GraphEmbedPoolLayer, GraphCNN
  LSTMStep, AttnLSTMEmbedding, IterRefLSTMEmbedding, GraphEmbedPoolLayer, GraphCNN, Cast
from deepchem.models.tensorgraph.symmetry_functions import AtomicDifferentiatedDense


@@ -64,6 +64,16 @@ def test_Squeeze_pickle():
  tg.save()


def test_Cast_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = Cast(in_layers=feature, dtype=tf.int32)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()


def test_Transpose_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))