Commit e24b5c12 authored by leswing's avatar leswing
Browse files

yapf

parent bb6b9cdc
Loading
Loading
Loading
Loading
+86 −45
Original line number Diff line number Diff line
@@ -71,6 +71,7 @@ class Layer(object):


class Conv1DLayer(Layer):

  def __init__(self, width, out_channels, **kwargs):
    self.width = width
    self.out_channels = out_channels
@@ -95,7 +96,11 @@ class Conv1DLayer(Layer):


class Dense(Layer):
  def __init__(self, out_channels, activation_fn=None,

  def __init__(
      self,
      out_channels,
      activation_fn=None,
      biases_initializer=tf.zeros_initializer,
      weights_initializer=tf.contrib.layers.variance_scaling_initializer,
      time_series=False,
@@ -141,12 +146,19 @@ class Dense(Layer):

  def shared(self, in_layers):
    self.reuse = True
    return Dense(self.out_channels, self.activation_fn, self.biases_initializer,
                 self.weights_initializer, time_series=self.time_series,
                 reuse=self.reuse, scope_name=self.scope_name, in_layers=in_layers)
    return Dense(
        self.out_channels,
        self.activation_fn,
        self.biases_initializer,
        self.weights_initializer,
        time_series=self.time_series,
        reuse=self.reuse,
        scope_name=self.scope_name,
        in_layers=in_layers)


class Flatten(Layer):

  def __init__(self, **kwargs):
    super(Flatten, self).__init__(**kwargs)

@@ -164,6 +176,7 @@ class Flatten(Layer):


class Reshape(Layer):

  def __init__(self, shape, **kwargs):
    self.shape = shape
    super(Reshape, self).__init__(**kwargs)
@@ -172,7 +185,9 @@ class Reshape(Layer):
    parent_tensor = self.in_layers[0].out_tensor
    self.out_tensor = tf.reshape(parent_tensor, self.shape)


class Transpose(Layer):

  def __init__(self, out_shape, **kwargs):
    super(Transpose, self).__init__(**kwargs)
    self.out_shape = out_shape
@@ -185,6 +200,7 @@ class Transpose(Layer):


class CombineMeanStd(Layer):

  def __init__(self, **kwargs):
    super(CombineMeanStd, self).__init__(**kwargs)

@@ -199,6 +215,7 @@ class CombineMeanStd(Layer):


class Repeat(Layer):

  def __init__(self, n_times, **kwargs):
    self.n_times = n_times
    super(Repeat, self).__init__(**kwargs)
@@ -213,6 +230,7 @@ class Repeat(Layer):


class GRU(Layer):

  def __init__(self, n_hidden, out_channels, batch_size, **kwargs):
    self.n_hidden = n_hidden
    self.out_channels = out_channels
@@ -235,6 +253,7 @@ class GRU(Layer):


class TimeSeriesDense(Layer):

  def __init__(self, out_channels, **kwargs):
    super(TimeSeriesDense, self).__init__(**kwargs)

@@ -248,6 +267,7 @@ class TimeSeriesDense(Layer):


class Input(Layer):

  def __init__(self, shape, dtype=tf.float32, **kwargs):
    self.shape = shape
    self.dtype = dtype
@@ -272,21 +292,25 @@ class Input(Layer):


class Feature(Input):

  def __init__(self, **kwargs):
    super(Feature, self).__init__(**kwargs)


class Label(Input):

  def __init__(self, **kwargs):
    super(Label, self).__init__(**kwargs)


class Weights(Input):

  def __init__(self, **kwargs):
    super(Weights, self).__init__(**kwargs)


class L2LossLayer(Layer):

  def __init__(self, **kwargs):
    super(L2LossLayer, self).__init__(**kwargs)

@@ -298,6 +322,7 @@ class L2LossLayer(Layer):


class SoftMax(Layer):

  def __init__(self, **kwargs):
    super(SoftMax, self).__init__(**kwargs)

@@ -310,6 +335,7 @@ class SoftMax(Layer):


class Concat(Layer):

  def __init__(self, **kwargs):
    super(Concat, self).__init__(**kwargs)

@@ -324,6 +350,7 @@ class Concat(Layer):


class SoftMaxCrossEntropy(Layer):

  def __init__(self, **kwargs):
    super(SoftMaxCrossEntropy, self).__init__(**kwargs)

@@ -338,6 +365,7 @@ class SoftMaxCrossEntropy(Layer):


class ReduceMean(Layer):

  def _create_tensor(self):
    if len(self.in_layers) > 1:
      out_tensors = [x.out_tensor for x in self.in_layers]
@@ -350,6 +378,7 @@ class ReduceMean(Layer):


class ReduceSquareDifference(Layer):

  def __init__(self, **kwargs):
    super(ReduceSquareDifference, self).__init__(**kwargs)

@@ -361,6 +390,7 @@ class ReduceSquareDifference(Layer):


class Conv2d(Layer):

  def __init__(self, num_outputs, kernel_size=5, **kwargs):
    self.num_outputs = num_outputs
    self.kernel_size = kernel_size
@@ -379,6 +409,7 @@ class Conv2d(Layer):


class MaxPool(Layer):

  def __init__(self,
               ksize=[1, 2, 2, 1],
               strides=[1, 2, 2, 1],
@@ -433,6 +464,7 @@ class InputFifoQueue(Layer):


class GraphConvLayer(Layer):

  def __init__(self,
               out_channel,
               min_deg=0,
@@ -543,6 +575,7 @@ class GraphConvLayer(Layer):


class GraphPoolLayer(Layer):

  def __init__(self, min_degree=0, max_degree=10, **kwargs):
    self.min_degree = min_degree
    self.max_degree = max_degree
@@ -588,6 +621,7 @@ class GraphPoolLayer(Layer):


class GraphGather(Layer):

  def __init__(self, batch_size, activation_fn=None, **kwargs):
    self.batch_size = batch_size
    self.activation_fn = activation_fn
@@ -630,6 +664,7 @@ class GraphGather(Layer):


class BatchNormLayer(Layer):

  def _create_tensor(self):
    parent_tensor = self.in_layers[0].out_tensor
    self.out_tensor = tf.layers.batch_normalization(parent_tensor)
@@ -637,6 +672,7 @@ class BatchNormLayer(Layer):


class WeightedError(Layer):

  def _create_tensor(self):
    entropy, weights = self.in_layers[0], self.in_layers[1]
    self.out_tensor = tf.reduce_sum(entropy.out_tensor * weights.out_tensor)
@@ -644,7 +680,12 @@ class WeightedError(Layer):


class AtomicConvolution(Layer):
  def __init__(self, atom_types=None, radial_params=list(), boxsize=None, **kwargs):

  def __init__(self,
               atom_types=None,
               radial_params=list(),
               boxsize=None,
               **kwargs):
    """Atomic convoluation layer

    N = max_num_atoms, M = max_num_neighbors, B = batch_size, d = num_features
+26 −25
Original line number Diff line number Diff line
@@ -17,6 +17,7 @@ from deepchem.utils.evaluate import GeneratorEvaluator


class TensorGraph(Model):

  def __init__(self,
               tensorboard=False,
               tensorboard_log_frequency=100,
+12 −11
Original line number Diff line number Diff line
@@ -246,4 +246,5 @@ class TestTensorGraph(unittest.TestCase):
        databag.iterbatches(
            epochs=1, batch_size=tg.batch_size, pad_batches=True))
    prediction = tg.predict_proba_on_generator(databag.iterbatches())
    assert_true(np.all(np.isclose(prediction[:, 0], prediction[:, 1], atol=0.01)))
    assert_true(
        np.all(np.isclose(prediction[:, 0], prediction[:, 1], atol=0.01)))