Commit 3576933b authored by Peter Eastman's avatar Peter Eastman
Browse files

Fixed some names and imports

parent f7a0d2b0
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -18,6 +18,9 @@ from deepchem.models.sequential import Sequential
from deepchem.models.tensorflow_models.fcnet import TensorflowMultiTaskRegressor
from deepchem.models.tensorflow_models.fcnet import TensorflowMultiTaskClassifier
from deepchem.models.tensorflow_models.fcnet import TensorflowMultiTaskFitTransformRegressor
from deepchem.models.tensorflow_models.fcnet import TensorGraphMultiTaskRegressor
from deepchem.models.tensorflow_models.fcnet import TensorGraphMultiTaskClassifier
from deepchem.models.tensorflow_models.fcnet import TensorGraphMultiTaskFitTransformRegressor
from deepchem.models.tensorflow_models.robust_multitask import RobustMultitaskRegressor
from deepchem.models.tensorflow_models.robust_multitask import RobustMultitaskClassifier
from deepchem.models.tensorflow_models.lr import TensorflowLogisticRegression
+2 −2
Original line number Diff line number Diff line
@@ -241,7 +241,7 @@ class TensorGraphMultiTaskRegressor(TensorGraph):
        yield feed_dict


class TensorGraphMultiTaskFitRegressor(TensorGraphMultiTaskRegressor):
class TensorGraphMultiTaskFitTransformRegressor(TensorGraphMultiTaskRegressor):
  """Implements a TensorGraphMultiTaskRegressor that performs on-the-fly transformation during fit/predict.

  Example:
@@ -268,7 +268,7 @@ class TensorGraphMultiTaskFitRegressor(TensorGraphMultiTaskRegressor):
               n_evals=1,
               batch_size=50,
               **kwargs):
    """Create a TensorGraphMultiTaskFitRegressor.
    """Create a TensorGraphMultiTaskFitTransformRegressor.

    In addition to the following arguments, this class also accepts all the keywork arguments
    from TensorGraphMultiTaskRegressor.
+0 −1
Original line number Diff line number Diff line
@@ -1307,7 +1307,6 @@ class Dropout(Layer):
class WeightDecay(Layer):
  """Apply a weight decay penalty.


  The input should be the loss value.  This layer adds a weight decay penalty to it
  and outputs the sum.
  """