Commit f373058b authored by miaecle's avatar miaecle
Browse files

finishing tensorgraph progressive

parent 7c13e017
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -17,6 +17,7 @@ from deepchem.models.tensorgraph.fcnet import MultiTaskFitTransformRegressor
from deepchem.models.tensorgraph.IRV import TensorflowMultiTaskIRVClassifier
from deepchem.models.tensorgraph.robust_multitask import RobustMultitaskClassifier
from deepchem.models.tensorgraph.robust_multitask import RobustMultitaskRegressor
from deepchem.models.tensorgraph.progressive_multitask import ProgressiveMultitaskRegressor
from deepchem.models.tensorgraph.models.graph_models import WeaveTensorGraph, DTNNTensorGraph, DAGTensorGraph, GraphConvTensorGraph, MPNNTensorGraph
from deepchem.models.tensorgraph.models.symmetry_function_regression import BPSymmetryFunctionRegression, ANIRegression

+23 −0
Original line number Diff line number Diff line
@@ -1207,6 +1207,29 @@ class Sigmoid(Layer):
    return out_tensor


class ReLU(Layer):
  """ Compute the relu activation of input: f(x) = relu(x)
  Only one input is allowed, output will have the same shape as input
  """

  def __init__(self, in_layers=None, **kwargs):
    super(ReLU, self).__init__(in_layers, **kwargs)
    try:
      self._shape = tuple(self.in_layers[0].shape)
    except:
      pass

  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    inputs = self._get_input_tensors(in_layers)
    if len(inputs) != 1:
      raise ValueError("ReLU must have a single input layer.")
    parent = inputs[0]
    out_tensor = tf.nn.relu(parent)
    if set_tensors:
      self.out_tensor = out_tensor
    return out_tensor


class Concat(Layer):

  def __init__(self, in_layers=None, axis=1, **kwargs):
+139 −437

File changed.

Preview size limit exceeded, changes collapsed.

+1 −0
Original line number Diff line number Diff line
@@ -928,6 +928,7 @@ class Submodel(object):
        optimizer = self.graph.optimizer
      else:
        optimizer = self.optimizer
      # Should we keep a separate global step count for each submodel?
      global_step = self.graph._get_tf('GlobalStep')
      tf_opt = optimizer._create_optimizer(global_step)
      self._train_op = tf_opt.minimize(loss.out_tensor, global_step, variables)
+12 −0
Original line number Diff line number Diff line
@@ -37,6 +37,7 @@ from deepchem.models.tensorgraph.layers import Multiply
from deepchem.models.tensorgraph.layers import ReduceMean
from deepchem.models.tensorgraph.layers import ReduceSquareDifference
from deepchem.models.tensorgraph.layers import ReduceSum
from deepchem.models.tensorgraph.layers import ReLU
from deepchem.models.tensorgraph.layers import Repeat
from deepchem.models.tensorgraph.layers import Reshape
from deepchem.models.tensorgraph.layers import SluiceLoss
@@ -228,6 +229,17 @@ class TestLayers(test_util.TensorFlowTestCase):
      out_tensor = out_tensor.eval()
      assert out_tensor.shape == (batch_size,)

  def test_relu(self):
    """Test that Sigmoid can be invoked."""
    batch_size = 10
    n_features = 5
    in_tensor = np.random.rand(batch_size, n_features)
    with self.test_session() as sess:
      in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
      out_tensor = ReLU()(in_tensor)
      out_tensor = out_tensor.eval()
      assert out_tensor.shape == (batch_size, n_features)

  def test_sigmoid(self):
    """Test that Sigmoid can be invoked."""
    batch_size = 10
Loading