Commit f7a0d2b0 authored by Peter Eastman's avatar Peter Eastman
Browse files

Ran yapf to update formatting

parent 9b079e51
Loading
Loading
Loading
Loading
+59 −27
Original line number Diff line number Diff line
@@ -18,11 +18,12 @@ from deepchem.models.tensorflow_models import TensorflowClassifier
from deepchem.models.tensorflow_models import TensorflowRegressor
from deepchem.metrics import to_one_hot


from deepchem.models.tensorgraph.tensor_graph import TensorGraph, TFWrapper
from deepchem.models.tensorgraph.layers import Feature, Label, Weights, WeightedError, Dense, Dropout, WeightDecay, Reshape, SoftMaxCrossEntropy, L2Loss


class TensorGraphMultiTaskClassifier(TensorGraph):

  def __init__(self,
               n_tasks,
               n_features,
@@ -73,27 +74,41 @@ class TensorGraphMultiTaskClassifier(TensorGraph):

    # Add the dense layers

    for size, weight_stddev, bias_const, dropout in zip(layer_sizes, weight_init_stddevs, bias_init_consts, dropouts):
      layer = Dense(in_layers=[prev_layer], out_channels=size, activation_fn=tf.nn.relu,
                    weights_initializer=TFWrapper(tf.truncated_normal_initializer, stddev=weight_stddev),
                    biases_initializer=TFWrapper(tf.constant_initializer, value=bias_const))
    for size, weight_stddev, bias_const, dropout in zip(
        layer_sizes, weight_init_stddevs, bias_init_consts, dropouts):
      layer = Dense(
          in_layers=[prev_layer],
          out_channels=size,
          activation_fn=tf.nn.relu,
          weights_initializer=TFWrapper(
              tf.truncated_normal_initializer, stddev=weight_stddev),
          biases_initializer=TFWrapper(
              tf.constant_initializer, value=bias_const))
      if dropout > 0.0:
        layer = Dropout(dropout, in_layers=[layer])
      prev_layer = layer

    # Compute the loss function for each label.

    output = Reshape(shape=(-1, n_tasks, n_classes), in_layers=[Dense(in_layers=[prev_layer], out_channels=n_tasks*n_classes)])
    output = Reshape(
        shape=(-1, n_tasks, n_classes),
        in_layers=[
            Dense(in_layers=[prev_layer], out_channels=n_tasks * n_classes)
        ])
    self.add_output(output)
    labels = Label(shape=(None, n_tasks, n_classes))
    weights = Weights(shape=(None, n_tasks))
    loss = Reshape(shape=(-1, n_tasks), in_layers=[SoftMaxCrossEntropy(in_layers=[labels, output])])
    loss = Reshape(
        shape=(-1, n_tasks),
        in_layers=[SoftMaxCrossEntropy(in_layers=[labels, output])])
    weighted_loss = WeightedError(in_layers=[loss, weights])
    if weight_decay_penalty != 0.0:
      weighted_loss = WeightDecay(weight_decay_penalty, weight_decay_penalty_type, in_layers=[weighted_loss])
      weighted_loss = WeightDecay(
          weight_decay_penalty,
          weight_decay_penalty_type,
          in_layers=[weighted_loss])
    self.set_loss(weighted_loss)


  def default_generator(self,
                        dataset,
                        epochs=1,
@@ -106,7 +121,9 @@ class TensorGraphMultiTaskClassifier(TensorGraph):
          pad_batches=pad_batches):
        feed_dict = dict()
        if y_b is not None and not predict:
          feed_dict[self.labels[0]] = to_one_hot(y_b.flatten(), self.n_classes).reshape(-1, self.n_tasks, self.n_classes)
          feed_dict[self.labels[0]] = to_one_hot(
              y_b.flatten(), self.n_classes).reshape(-1, self.n_tasks,
                                                     self.n_classes)
        if X_b is not None:
          feed_dict[self.features[0]] = X_b
        if w_b is not None and not predict:
@@ -114,9 +131,8 @@ class TensorGraphMultiTaskClassifier(TensorGraph):
        yield feed_dict




class TensorGraphMultiTaskRegressor(TensorGraph):

  def __init__(self,
               n_tasks,
               n_features,
@@ -164,29 +180,47 @@ class TensorGraphMultiTaskRegressor(TensorGraph):

    # Add the dense layers

    for size, weight_stddev, bias_const, dropout in zip(layer_sizes, weight_init_stddevs, bias_init_consts, dropouts):
      layer = Dense(in_layers=[prev_layer], out_channels=size, activation_fn=tf.nn.relu,
                    weights_initializer=TFWrapper(tf.truncated_normal_initializer, stddev=weight_stddev),
                    biases_initializer=TFWrapper(tf.constant_initializer, value=bias_const))
    for size, weight_stddev, bias_const, dropout in zip(
        layer_sizes, weight_init_stddevs, bias_init_consts, dropouts):
      layer = Dense(
          in_layers=[prev_layer],
          out_channels=size,
          activation_fn=tf.nn.relu,
          weights_initializer=TFWrapper(
              tf.truncated_normal_initializer, stddev=weight_stddev),
          biases_initializer=TFWrapper(
              tf.constant_initializer, value=bias_const))
      if dropout > 0.0:
        layer = Dropout(dropout, in_layers=[layer])
      prev_layer = layer

    # Compute the loss function for each label.

    output = Reshape(shape=(-1, n_tasks, 1), in_layers=[Dense(in_layers=[prev_layer], out_channels=n_tasks,
                    weights_initializer=TFWrapper(tf.truncated_normal_initializer, stddev=weight_init_stddevs[-1]),
                    biases_initializer=TFWrapper(tf.constant_initializer, value=bias_init_consts[-1]))])
    output = Reshape(
        shape=(-1, n_tasks, 1),
        in_layers=[
            Dense(
                in_layers=[prev_layer],
                out_channels=n_tasks,
                weights_initializer=TFWrapper(
                    tf.truncated_normal_initializer,
                    stddev=weight_init_stddevs[-1]),
                biases_initializer=TFWrapper(
                    tf.constant_initializer, value=bias_init_consts[-1]))
        ])
    self.add_output(output)
    labels = Label(shape=(None, n_tasks, 1))
    weights = Weights(shape=(None, n_tasks))
    loss = Reshape(shape=(-1, n_tasks), in_layers=[L2Loss(in_layers=[labels, output])])
    loss = Reshape(
        shape=(-1, n_tasks), in_layers=[L2Loss(in_layers=[labels, output])])
    weighted_loss = WeightedError(in_layers=[loss, weights])
    if weight_decay_penalty != 0.0:
      weighted_loss = WeightDecay(weight_decay_penalty, weight_decay_penalty_type, in_layers=[weighted_loss])
      weighted_loss = WeightDecay(
          weight_decay_penalty,
          weight_decay_penalty_type,
          in_layers=[weighted_loss])
    self.set_loss(weighted_loss)


  def default_generator(self,
                        dataset,
                        epochs=1,
@@ -207,8 +241,6 @@ class TensorGraphMultiTaskRegressor(TensorGraph):
        yield feed_dict




class TensorGraphMultiTaskFitRegressor(TensorGraphMultiTaskRegressor):
  """Implements a TensorGraphMultiTaskRegressor that performs on-the-fly transformation during fit/predict.

@@ -269,7 +301,6 @@ class TensorGraphMultiTaskFitRegressor(TensorGraphMultiTaskRegressor):
    print("n_features after fit_transform: %d" % int(n_features))
    super().__init__(n_tasks, n_features, batch_size=batch_size, **kwargs)


  def default_generator(self,
                        dataset,
                        epochs=1,
@@ -292,8 +323,8 @@ class TensorGraphMultiTaskFitRegressor(TensorGraphMultiTaskRegressor):
          feed_dict[self.task_weights[0]] = w_b
        yield feed_dict


  def predict_proba_on_generator(self, generator, transformers=[]):

    def transform_generator():
      for feed_dict in generator:
        X = feed_dict[self.features[0]]
@@ -303,8 +334,9 @@ class TensorGraphMultiTaskFitRegressor(TensorGraphMultiTaskRegressor):
          X_t = transformer.X_transform(X_t)
        feed_dict[self.features[0]] = X_t
        yield feed_dict
    return super().predict_proba_on_generator(transform_generator(), transformers)

    return super().predict_proba_on_generator(transform_generator(),
                                              transformers)


class TensorflowMultiTaskClassifier(TensorflowClassifier):
+4 −2
Original line number Diff line number Diff line
@@ -1287,6 +1287,7 @@ class NeighborList(Layer):
            tf.transpose(tf.stack(tf.meshgrid(*mesh_args))), (self.n_cells,
                                                              self.ndim)))


class Dropout(Layer):

  def __init__(self, dropout_prob, **kwargs):
@@ -1330,7 +1331,8 @@ class WeightDecay(Layer):
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)
    parent_tensor = in_layers[0].out_tensor
    self.out_tensor = parent_tensor+model_ops.weight_decay(self.penalty_type, self.penalty)
    self.out_tensor = parent_tensor + model_ops.weight_decay(self.penalty_type,
                                                             self.penalty)
    return self.out_tensor


+4 −2
Original line number Diff line number Diff line
@@ -60,7 +60,8 @@ class TensorGraph(Model):
    self.loss = None
    self.built = False
    self.queue_installed = False
    self.optimizer = TFWrapper(tf.train.AdamOptimizer, learning_rate=0.001, beta1=0.9, beta2=0.999)
    self.optimizer = TFWrapper(
        tf.train.AdamOptimizer, learning_rate=0.001, beta1=0.9, beta2=0.999)

    # Singular place to hold Tensor objects which don't serialize
    # These have to be reconstructed on restoring from pickle
@@ -448,7 +449,8 @@ class TensorGraph(Model):
    elif obj == "FileWriter":
      self.tensor_objects['FileWriter'] = tf.summary.FileWriter(self.model_dir)
    elif obj == 'train_op':
      self.tensor_objects['train_op'] = self.optimizer().minimize(self.loss.out_tensor)
      self.tensor_objects['train_op'] = self.optimizer().minimize(
          self.loss.out_tensor)
    elif obj == 'summary_op':
      self.tensor_objects['summary_op'] = tf.summary.merge_all(
          key=tf.GraphKeys.SUMMARIES)