Commit 89a3ef4e authored by Bharath Ramsundar's avatar Bharath Ramsundar Committed by GitHub
Browse files

Merge pull request #826 from peastman/operators

Layers support math operators
parents d1e9575a 5afbd715
Loading
Loading
Loading
Loading
+33 −0
Original line number Diff line number Diff line
@@ -162,6 +162,39 @@ class Layer(object):
    else:
      return self.out_tensor

  def __add__(self, other):
    if not isinstance(other, Layer):
      other = Constant(other)
    return Add([self, other])

  def __radd__(self, other):
    if not isinstance(other, Layer):
      other = Constant(other)
    return Add([other, self])

  def __sub__(self, other):
    if not isinstance(other, Layer):
      other = Constant(other)
    return Add([self, other], weights=[1.0, -1.0])

  def __rsub__(self, other):
    if not isinstance(other, Layer):
      other = Constant(other)
    return Add([other, self], weights=[1.0, -1.0])

  def __mul__(self, other):
    if not isinstance(other, Layer):
      other = Constant(other)
    return Multiply([self, other])

  def __rmul__(self, other):
    if not isinstance(other, Layer):
      other = Constant(other)
    return Multiply([other, self])

  def __neg__(self):
    return Multiply([self, Constant(-1.0)])


def _convert_layer_to_tensor(value, dtype=None, name=None, as_ref=False):
  return tf.convert_to_tensor(value.out_tensor, dtype=dtype, name=name)
+6 −7
Original line number Diff line number Diff line
@@ -291,16 +291,15 @@ class TensorGraph(Model):
    elif not isinstance(outputs, collections.Sequence):
      outputs = [outputs]
    with self._get_tf("Graph").as_default():
      out_tensors = [x.out_tensor for x in self.outputs]
      # Gather results for each output
      results = [[] for out in out_tensors]
      results = [[] for out in outputs]
      for feed_dict in generator:
        feed_dict = {
            self.layers[k.name].out_tensor: v
            for k, v in six.iteritems(feed_dict)
        }
        feed_dict[self._training_placeholder] = 0.0
        feed_results = self.session.run(out_tensors, feed_dict=feed_dict)
        feed_results = self.session.run(outputs, feed_dict=feed_dict)
        if len(feed_results) > 1:
          if len(transformers):
            raise ValueError("Does not support transformations "
@@ -328,7 +327,7 @@ class TensorGraph(Model):
    """
    return self.predict_on_generator(generator, transformers, outputs)

  def predict_on_batch(self, X, transformers=[]):
  def predict_on_batch(self, X, transformers=[], outputs=None):
    """Generates predictions for input samples, processing samples in a batch.

    Parameters
@@ -344,9 +343,9 @@ class TensorGraph(Model):
    """
    dataset = NumpyDataset(X=X, y=None)
    generator = self.default_generator(dataset, predict=True, pad_batches=False)
    return self.predict_on_generator(generator, transformers)
    return self.predict_on_generator(generator, transformers, outputs)

  def predict_proba_on_batch(self, X, transformers=[]):
  def predict_proba_on_batch(self, X, transformers=[], outputs=None):
    """Generates predictions for input samples, processing samples in a batch.

    Parameters
@@ -360,7 +359,7 @@ class TensorGraph(Model):
    -------
    A Numpy array of predictions.
    """
    return self.predict_on_batch(X, transformers)
    return self.predict_on_batch(X, transformers, outputs)

  def predict(self, dataset, transformers=[], outputs=None):
    """
+34 −1
Original line number Diff line number Diff line
@@ -9,7 +9,7 @@ import tensorflow as tf
import deepchem as dc
from deepchem.data import NumpyDataset
from deepchem.data.datasets import Databag
from deepchem.models.tensorgraph.layers import Dense, SoftMaxCrossEntropy, ReduceMean, SoftMax
from deepchem.models.tensorgraph.layers import Dense, SoftMaxCrossEntropy, ReduceMean, SoftMax, Constant
from deepchem.models.tensorgraph.layers import Feature, Label
from deepchem.models.tensorgraph.layers import ReduceSquareDifference
from deepchem.models.tensorgraph.tensor_graph import TensorGraph
@@ -281,3 +281,36 @@ class TestTensorGraph(unittest.TestCase):
            epochs=1, batch_size=tg.batch_size, pad_batches=True))
    prediction = tg.predict_on_generator(databag.iterbatches())
    assert_true(np.all(np.isclose(prediction[0], prediction[1], atol=0.01)))

  def test_operators(self):
    """Test math operators on Layers."""
    v1 = np.random.uniform(size=(2, 3)).astype(np.float32)
    v2 = np.random.uniform(size=(2, 3)).astype(np.float32)
    c1 = Constant(v1)
    c2 = Constant(v2)
    tg = dc.models.TensorGraph()
    tg.set_loss(c1)
    expected = []
    tg.add_output(c1 + c2)
    expected.append(v1 + v2)
    tg.add_output(c1 + v2)
    expected.append(v1 + v2)
    tg.add_output(1 + c2)
    expected.append(1 + v2)
    tg.add_output(c1 - c2)
    expected.append(v1 - v2)
    tg.add_output(c1 - v2)
    expected.append(v1 - v2)
    tg.add_output(1 - c2)
    expected.append(1 - v2)
    tg.add_output(c1 * c2)
    expected.append(v1 * v2)
    tg.add_output(c1 * v2)
    expected.append(v1 * v2)
    tg.add_output(2 * c2)
    expected.append(2 * v2)
    tg.add_output(-c1)
    expected.append(-v1)
    for o, e in zip(tg.outputs, expected):
      value = tg.predict_on_batch(np.array([0]), outputs=o)
      assert np.array_equal(e, value)