Commit 9327bc7c authored by miaecle's avatar miaecle
Browse files

yapfed

parent 8cb3e7a2
Loading
Loading
Loading
Loading
+4 −8
Original line number Diff line number Diff line
@@ -469,8 +469,7 @@ class NumpyDataset(Dataset):
      w = np.concatenate([w, dataset.w], axis=0)
      ids = np.concatenate(
          [ids, dataset.ids],
          axis=0,
      )
          axis=0,)

    return NumpyDataset(X, y, w, ids, n_tasks=y.shape[1])

@@ -648,8 +647,7 @@ class DiskDataset(Dataset):
    if not len(self.metadata_df):
      raise ValueError("No data in dataset.")
    sample_X = load_from_disk(
        os.path.join(self.data_dir,
                     next(self.metadata_df.iterrows())[1]['X']))
        os.path.join(self.data_dir, next(self.metadata_df.iterrows())[1]['X']))
    return np.shape(sample_X)[1:]

  def get_shard_size(self):
@@ -657,8 +655,7 @@ class DiskDataset(Dataset):
    if not len(self.metadata_df):
      raise ValueError("No data in dataset.")
    sample_y = load_from_disk(
        os.path.join(self.data_dir,
                     next(self.metadata_df.iterrows())[1]['y']))
        os.path.join(self.data_dir, next(self.metadata_df.iterrows())[1]['y']))
    return len(sample_y)

  def _get_metadata_filename(self):
@@ -789,7 +786,6 @@ class DiskDataset(Dataset):
        else:
          shard_batch_size = batch_size


        if n_shard_samples == 0:
          cur_shard += 1
          if batch_size is None:
+21 −18
Original line number Diff line number Diff line
@@ -735,8 +735,7 @@ class Squeeze(Layer):
        self._shape = [i for i in parent_shape if i != 1]
      else:
        self._shape = [
            parent_shape[i]
            for i in range(len(parent_shape))
            parent_shape[i] for i in range(len(parent_shape))
            if i not in squeeze_dims
        ]
    except:
@@ -1561,6 +1560,7 @@ class ReduceMean(Layer):
      self.out_tensor = out_tensor
    return out_tensor


class ReduceMax(Layer):

  def __init__(self, in_layers=None, axis=None, **kwargs):
@@ -1591,6 +1591,7 @@ class ReduceMax(Layer):
      self.out_tensor = out_tensor
    return out_tensor


class ToFloat(Layer):

  def __init__(self, in_layers=None, **kwargs):
@@ -2093,8 +2094,8 @@ class MaxPool1D(Layer):
    super(MaxPool1D, self).__init__(**kwargs)
    try:
      parent_shape = self.in_layers[0].shape
      self._shape = tuple(
          None if p is None else p // s for p, s in zip(parent_shape, strides))
      self._shape = tuple(None if p is None else p // s
                          for p, s in zip(parent_shape, strides))
    except:
      pass

@@ -2125,8 +2126,8 @@ class MaxPool2D(Layer):
    super(MaxPool2D, self).__init__(**kwargs)
    try:
      parent_shape = self.in_layers[0].shape
      self._shape = tuple(
          None if p is None else p // s for p, s in zip(parent_shape, strides))
      self._shape = tuple(None if p is None else p // s
                          for p, s in zip(parent_shape, strides))
    except:
      pass

@@ -2172,8 +2173,8 @@ class MaxPool3D(Layer):
    super(MaxPool3D, self).__init__(**kwargs)
    try:
      parent_shape = self.in_layers[0].shape
      self._shape = tuple(
          None if p is None else p // s for p, s in zip(parent_shape, strides))
      self._shape = tuple(None if p is None else p // s
                          for p, s in zip(parent_shape, strides))
    except:
      pass

@@ -2938,13 +2939,15 @@ class VinaFreeEnergy(Layer):

  def hydrophobic(self, d):
    """Computes Autodock Vina's hydrophobic interaction term."""
    out_tensor = tf.where(d < 0.5, tf.ones_like(d),
    out_tensor = tf.where(d < 0.5,
                          tf.ones_like(d),
                          tf.where(d < 1.5, 1.5 - d, tf.zeros_like(d)))
    return out_tensor

  def hydrogen_bond(self, d):
    """Computes Autodock Vina's hydrogen bond interaction term."""
    out_tensor = tf.where(d < -0.7, tf.ones_like(d),
    out_tensor = tf.where(d < -0.7,
                          tf.ones_like(d),
                          tf.where(d < 0, (1.0 / 0.7) * (0 - d),
                                   tf.zeros_like(d)))
    return out_tensor
@@ -3321,8 +3324,8 @@ class NeighborList(Layer):
    mesh_args = [tf.range(start, stop, nbr_cutoff) for _ in range(self.ndim)]
    return tf.to_float(
        tf.reshape(
            tf.transpose(tf.stack(tf.meshgrid(*mesh_args))),
            (self.n_cells, self.ndim)))
            tf.transpose(tf.stack(tf.meshgrid(*mesh_args))), (self.n_cells,
                                                              self.ndim)))


class Dropout(Layer):
@@ -3609,8 +3612,8 @@ class AtomicConvolution(Layer):
    example_tensors = tf.unstack(X, axis=0)
    example_nbrs = tf.unstack(nbr_indices, axis=0)
    all_nbr_coords = []
    for example, (example_tensor, example_nbr) in enumerate(
        zip(example_tensors, example_nbrs)):
    for example, (example_tensor,
                  example_nbr) in enumerate(zip(example_tensors, example_nbrs)):
      nbr_coords = tf.gather(example_tensor, example_nbr)
      all_nbr_coords.append(nbr_coords)
    neighbors = tf.stack(all_nbr_coords)
@@ -4176,13 +4179,13 @@ class GraphCNN(Layer):
    no_features = V.get_shape()[2].value
    W = tf.get_variable(
        '%s_weights' % self.name, [no_features * no_A, self.num_filters],
        initializer=tf.truncated_normal_initializer(
            stddev=math.sqrt(1.0 / (no_features * (no_A + 1) * 1.0))),
        initializer=tf.truncated_normal_initializer(stddev=math.sqrt(
            1.0 / (no_features * (no_A + 1) * 1.0))),
        dtype=tf.float32)
    W_I = tf.get_variable(
        '%s_weights_I' % self.name, [no_features, self.num_filters],
        initializer=tf.truncated_normal_initializer(
            stddev=math.sqrt(1.0 / (no_features * (no_A + 1) * 1.0))),
        initializer=tf.truncated_normal_initializer(stddev=math.sqrt(
            1.0 / (no_features * (no_A + 1) * 1.0))),
        dtype=tf.float32)

    b = tf.get_variable(
+27 −30
Original line number Diff line number Diff line
@@ -172,8 +172,8 @@ class WeaveTensorGraph(TensorGraph):
          atom_feat.append(mol.get_atom_features())
          # pair features
          pair_feat.append(
              np.reshape(mol.get_pair_features(),
                         (n_atoms * n_atoms, self.n_pair_feat)))
              np.reshape(mol.get_pair_features(), (n_atoms * n_atoms,
                                                   self.n_pair_feat)))

        feed_dict[self.atom_features] = np.concatenate(atom_feat, axis=0)
        feed_dict[self.pair_features] = np.concatenate(pair_feat, axis=0)
@@ -184,9 +184,7 @@ class WeaveTensorGraph(TensorGraph):

  def predict_on_generator(self, generator, transformers=[], outputs=None):
    out = super(WeaveTensorGraph, self).predict_on_generator(
          generator, 
          transformers=[], 
          outputs=outputs)
        generator, transformers=[], outputs=outputs)
    if outputs is None:
      outputs = self.outputs
    if len(outputs) > 1:
@@ -196,7 +194,6 @@ class WeaveTensorGraph(TensorGraph):
    return out



class DTNNTensorGraph(TensorGraph):

  def __init__(self,
@@ -320,8 +317,8 @@ class DTNNTensorGraph(TensorGraph):
        num_atoms = list(map(sum, X_b.astype(bool)[:, :, 0]))
        atom_number = [
            np.round(
                np.power(2 * np.diag(X_b[i, :num_atoms[i], :num_atoms[i]]),
                         1 / 2.4)).astype(int) for i in range(len(num_atoms))
                np.power(2 * np.diag(X_b[i, :num_atoms[i], :num_atoms[i]]), 1 /
                         2.4)).astype(int) for i in range(len(num_atoms))
        ]
        start = 0
        for im, molecule in enumerate(atom_number):
@@ -360,6 +357,7 @@ class DTNNTensorGraph(TensorGraph):
    retval = np.concatenate(retval, axis=-1)
    return undo_transforms(retval, transformers)


class DAGTensorGraph(TensorGraph):

  def __init__(self,
@@ -511,9 +509,7 @@ class DAGTensorGraph(TensorGraph):

  def predict_on_generator(self, generator, transformers=[], outputs=None):
    out = super(DAGTensorGraph, self).predict_on_generator(
          generator, 
          transformers=[], 
          outputs=outputs)
        generator, transformers=[], outputs=outputs)
    if outputs is None:
      outputs = self.outputs
    if len(outputs) > 1:
@@ -522,6 +518,7 @@ class DAGTensorGraph(TensorGraph):
    out = undo_transforms(out, transformers)
    return out


class PetroskiSuchTensorGraph(TensorGraph):
  """
      Model from Robust Spatial Filtering with Graph Convolutional Neural Networks
@@ -1093,8 +1090,8 @@ class MPNNTensorGraph(TensorGraph):
          atom_feat.append(mol.get_atom_features())
          # pair features
          pair_feat.append(
              np.reshape(mol.get_pair_features(),
                         (n_atoms * n_atoms, self.n_pair_feat)))
              np.reshape(mol.get_pair_features(), (n_atoms * n_atoms,
                                                   self.n_pair_feat)))

        feed_dict[self.atom_features] = np.concatenate(atom_feat, axis=0)
        feed_dict[self.pair_features] = np.concatenate(pair_feat, axis=0)
+11 −14
Original line number Diff line number Diff line
@@ -175,9 +175,8 @@ class TextCNNTensorGraph(TensorGraph):
              padding='valid',
              in_layers=[self.Embedding]))
      # Max-over-time pooling
      self.pooled_outputs.append(ReduceMax(
          axis=1, 
          in_layers=[self.conv_layers[-1]]))
      self.pooled_outputs.append(
          ReduceMax(axis=1, in_layers=[self.conv_layers[-1]]))
    # Concat features from all filters(one feature per filter)
    concat_outputs = Concat(axis=1, in_layers=self.pooled_outputs)
    dropout = Dropout(dropout_prob=self.dropout, in_layers=[concat_outputs])
@@ -273,9 +272,7 @@ class TextCNNTensorGraph(TensorGraph):

  def predict_on_generator(self, generator, transformers=[], outputs=None):
    out = super(TextCNNTensorGraph, self).predict_on_generator(
          generator, 
          transformers=[], 
          outputs=outputs)
        generator, transformers=[], outputs=outputs)
    if outputs is None:
      outputs = self.outputs
    if len(outputs) > 1:
+14 −12
Original line number Diff line number Diff line
@@ -262,8 +262,10 @@ def benchmark_classification(train_dataset,
    filter_sizes = hyper_parameters['filter_sizes']
    num_filters = hyper_parameters['num_filters']

    all_data = deepchem.data.DiskDataset.merge([train_dataset, valid_dataset, test_dataset])
    char_dict, length = deepchem.models.TextCNNTensorGraph.build_char_dict(all_data)
    all_data = deepchem.data.DiskDataset.merge(
        [train_dataset, valid_dataset, test_dataset])
    char_dict, length = deepchem.models.TextCNNTensorGraph.build_char_dict(
        all_data)

    model = deepchem.models.TextCNNTensorGraph(
        len(tasks),
@@ -505,7 +507,6 @@ def benchmark_regression(train_dataset,
    n_filters = hyper_parameters['n_filters']
    n_fully_connected_nodes = hyper_parameters['n_fully_connected_nodes']


    model = deepchem.models.GraphConvTensorGraph(
        len(tasks),
        graph_conv_layers=[n_filters] * 2,
@@ -596,7 +597,8 @@ def benchmark_regression(train_dataset,
    filter_sizes = hyper_parameters['filter_sizes']
    num_filters = hyper_parameters['num_filters']

    char_dict, length = deepchem.models.TextCNNTensorGraph.build_char_dict(train_dataset)
    char_dict, length = deepchem.models.TextCNNTensorGraph.build_char_dict(
        train_dataset)

    model = deepchem.models.TextCNNTensorGraph(
        len(tasks),
Loading