Commit 6f87f962 authored by Bharath Ramsundar's avatar Bharath Ramsundar
Browse files

Changes

parent 9f5cd2b6
Loading
Loading
Loading
Loading
+10 −8
Original line number Diff line number Diff line
@@ -2439,9 +2439,9 @@ class WeaveGather(tf.keras.layers.Layer):
               batch_size: int,
               n_input: int = 128,
               gaussian_expand: bool = True,
               compress_post_gaussian_expansion: bool = False,
               init: str = 'glorot_uniform',
               activation: str = 'tanh',
               compress_post_gaussian_expansion: bool = False,
               **kwargs):
    """
    Parameters
@@ -2452,17 +2452,19 @@ class WeaveGather(tf.keras.layers.Layer):
      number of features for each input molecule
    gaussian_expand: boolean, optional (default True)
      Whether to expand each dimension of atomic features by gaussian histogram
    init: str, optional (default 'glorot_uniform')
      Weight initialization for filters.
    activation: str, optional (default 'tanh')
      Activation function applied. Should be recognizable by
      `tf.keras.activations`.
    compress_post_gaussian_expansion: bool, optional (default False)
      If True, compress the results of the Gaussian expansion back to the
      original dimensions of the input by using a linear layer with specified
      activation function. Note that this compression was not in the original
      paper, but was present in the original DeepChem implementation so is
      left present for backwards compatibility.
    init: str, optional (default 'glorot_uniform')
      Weight initialization for filters if `compress_post_gaussian_expansion`
      is True.
    activation: str, optional (default 'tanh')
      Activation function applied for filters if
      `compress_post_gaussian_expansion` is True. Should be recognizable by
      `tf.keras.activations`.
    """
    try:
      import tensorflow_probability as tfp
@@ -2473,10 +2475,10 @@ class WeaveGather(tf.keras.layers.Layer):
    self.n_input = n_input
    self.batch_size = batch_size
    self.gaussian_expand = gaussian_expand
    self.compress_post_gaussian_expansion = compress_post_gaussian_expansion
    self.init = init  # Set weight initialization
    self.activation = activation  # Get activations
    self.activation_fn = activations.get(activation)
    self.compress_post_gaussian_expansion = compress_post_gaussian_expansion

  def get_config(self):
    config = super(WeaveGather, self).get_config()
@@ -2490,7 +2492,7 @@ class WeaveGather(tf.keras.layers.Layer):
    return config

  def build(self, input_shape):
    if self.gaussian_expand:
    if self.compress_post_gaussian_expansion:
      init = initializers.get(self.init)
      self.W = init([self.n_input * 11, self.n_input])
      self.b = backend.zeros(shape=[self.n_input])
+1 −1
Original line number Diff line number Diff line
@@ -153,7 +153,7 @@ def test_weave_gather():
      batch_size=2,
      n_input=75,
      gaussian_expand=True,
      compress_post_expansion=True)
      compress_post_gaussian_expansion=True)
  # Outputs should be [mol1_vec, mol2_vec)
  outputs = gather(inputs)
  assert len(outputs) == 2
+501 −472
Original line number Diff line number Diff line
@@ -6,9 +6,7 @@ import tensorflow as tf
from tensorflow.python.eager import context


class TestLayer(unittest.TestCase):

  def test_interatomic_l2_distance(self):
def test_interatomic_l2_distance():
  N_atoms = 10
  M_nbrs = 15
  ndim = 20
@@ -22,7 +20,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.M_nbrs == layer.M_nbrs
  assert layer_copied.ndim == layer.ndim

  def test_graph_conv(self):

def test_graph_conv():
  out_channel = 10
  min_deg = 0,
  max_deg = 10,
@@ -41,7 +40,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.max_degree == layer.max_degree
  assert layer_copied.min_degree == layer.min_degree

  def test_graph_gather(self):

def test_graph_gather():
  batch_size = 10
  activation_fn = 'relu'

@@ -53,7 +53,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.batch_size == layer_copied.batch_size
  assert layer_copied.activation_fn == layer_copied.activation_fn

  def test_graph_pool(self):

def test_graph_pool():
  min_degree = 0
  max_degree = 10

@@ -65,7 +66,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.max_degree == layer_copied.max_degree
  assert layer_copied.min_degree == layer_copied.min_degree

  def test_lstmstep(self):

def test_lstmstep():
  output_dim = 100
  input_dim = 50
  init_fn = 'glorot_uniform'
@@ -86,7 +88,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.activation == layer.activation
  assert layer_copied.inner_activation == layer.inner_activation

  def test_attn_lstm_embedding(self):

def test_attn_lstm_embedding():
  n_test = 10
  n_support = 100
  n_feat = 20
@@ -102,7 +105,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.n_feat == layer.n_feat
  assert layer_copied.max_depth == layer.max_depth

  def test_iterref_lstm_embedding(self):

def test_iterref_lstm_embedding():
  n_test = 10
  n_support = 100
  n_feat = 20
@@ -118,7 +122,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.n_feat == layer.n_feat
  assert layer_copied.max_depth == layer.max_depth

  def test_switched_dropout(self):

def test_switched_dropout():
  rate = 0.1
  layer = dc.models.layers.SwitchedDropout(rate=rate)
  config = layer.get_config()
@@ -126,7 +131,8 @@ class TestLayer(unittest.TestCase):

  assert layer_copied.rate == layer.rate

  def test_weighted_linearcombo(self):

def test_weighted_linearcombo():
  std = 0.1
  layer = dc.models.layers.WeightedLinearCombo(std=std)

@@ -135,7 +141,8 @@ class TestLayer(unittest.TestCase):

  assert layer_copied.std == layer.std

  def test_combine_mean_std(self):

def test_combine_mean_std():
  training_only = True
  noise_epsilon = 0.001

@@ -146,7 +153,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.training_only == layer.training_only
  assert layer_copied.noise_epsilon == layer.noise_epsilon

  def test_stack(self):

def test_stack():
  axis = 2
  layer = dc.models.layers.Stack(axis=axis)
  config = layer.get_config()
@@ -154,7 +162,8 @@ class TestLayer(unittest.TestCase):

  assert layer_copied.axis == layer.axis

  def test_variable(self):

def test_variable():
  initial_value = 10
  layer = dc.models.layers.Variable(initial_value)
  config = layer.get_config()
@@ -162,7 +171,8 @@ class TestLayer(unittest.TestCase):

  assert layer_copied.initial_value == layer.initial_value

  def test_vina_free_energy(self):

def test_vina_free_energy():
  N_atoms = 10
  M_nbrs = 15
  ndim = 20
@@ -186,7 +196,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.stddev == layer.stddev
  assert layer_copied.Nrot == layer_copied.Nrot

  def test_neighbor_list(self):

def test_neighbor_list():
  N_atoms = 10
  M_nbrs = 15
  ndim = 20
@@ -206,13 +217,13 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.start == layer.start
  assert layer_copied.stop == layer.stop

  def test_atomic_convolution(self):

def test_atomic_convolution():
  atom_types = None
  radial_params = list()
  boxsize = None

    layer = dc.models.layers.AtomicConvolution(atom_types, radial_params,
                                               boxsize)
  layer = dc.models.layers.AtomicConvolution(atom_types, radial_params, boxsize)
  config = layer.get_config()
  layer_copied = dc.models.layers.AtomicConvolution.from_config(config)

@@ -220,7 +231,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.radial_params == layer.radial_params
  assert layer_copied.boxsize == layer.boxsize

  def test_ani_feat(self):

def test_ani_feat():
  max_atoms = 23
  radial_cutoff = 4.6
  angular_cutoff = 3.1
@@ -245,7 +257,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.atomic_number_differentiated == layer.atomic_number_differentiated
  assert layer_copied.coordinates_in_bohr == layer.coordinates_in_bohr

  def test_graph_embed_pool(self):

def test_graph_embed_pool():
  num_vertices = 100
  layer = dc.models.layers.GraphEmbedPoolLayer(num_vertices)
  config = layer.get_config()
@@ -253,7 +266,8 @@ class TestLayer(unittest.TestCase):

  assert layer_copied.num_vertices == layer.num_vertices

  def test_graph_cnn(self):

def test_graph_cnn():
  num_filters = 20
  layer = dc.models.layers.GraphCNN(num_filters)
  config = layer.get_config()
@@ -261,7 +275,8 @@ class TestLayer(unittest.TestCase):

  assert layer_copied.num_filters == layer.num_filters

  def test_highway(self):

def test_highway():
  activation_fn = 'relu'
  biases_initializer = 'zeros'
  weights_initializer = None
@@ -275,7 +290,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.biases_initializer == layer.biases_initializer
  assert layer_copied.weights_initializer == layer.weights_initializer

  def test_weave(self):

def test_weave():
  n_atom_input_feat = 75
  n_pair_input_feat = 14
  n_atom_output_feat = 50
@@ -287,11 +303,13 @@ class TestLayer(unittest.TestCase):
  update_pair = True
  init = 'glorot_uniform'
  activation = 'relu'
  batch_normalize = True
  batch_normalize_kwargs = {"renorm": True}

  layer = dc.models.layers.WeaveLayer(
      n_atom_input_feat, n_pair_input_feat, n_atom_output_feat,
      n_pair_output_feat, n_hidden_AA, n_hidden_PA, n_hidden_AP, n_hidden_PP,
        update_pair, init, activation)
      update_pair, init, activation, batch_normalize, batch_normalize_kwargs)
  config = layer.get_config()
  layer_copied = dc.models.layers.WeaveLayer.from_config(config)

@@ -306,30 +324,33 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.update_pair == layer.update_pair
  assert layer_copied.init == layer.init
  assert layer_copied.activation == layer.activation
  assert layer_copied.batch_normalize == layer.batch_normalize
  assert layer_copied.batch_normalize_kwargs == layer.batch_normalize_kwargs

  def test_weave_gather(self):

def test_weave_gather():
  batch_size = 32
  n_input = 128
    gaussian_expand = False
  gaussian_expand = True
  compress_post_gaussian_expansion = False
  init = 'glorot_uniform'
  activation = 'tanh'
    epsilon = 1e-3
    momentum = 0.99

  layer = dc.models.layers.WeaveGather(batch_size, n_input, gaussian_expand,
                                         init, activation, epsilon, momentum)
                                       compress_post_gaussian_expansion, init,
                                       activation)
  config = layer.get_config()
  layer_copied = dc.models.layers.WeaveGather.from_config(config)

  assert layer_copied.batch_size == layer.batch_size
  assert layer_copied.n_input == layer.n_input
  assert layer_copied.gaussian_expand == layer.gaussian_expand
  assert layer_copied.compress_post_gaussian_expansion == layer.compress_post_gaussian_expansion
  assert layer_copied.init == layer.init
  assert layer_copied.activation == layer.activation
    assert layer_copied.epsilon == layer.epsilon
    assert layer_copied.momentum == layer.momentum

  def test_dtnn_embedding(self):

def test_dtnn_embedding():
  n_embedding = 30
  periodic_table_length = 30
  init = 'glorot_uniform'
@@ -343,7 +364,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.periodic_table_length == layer.periodic_table_length
  assert layer_copied.init == layer.init

  def test_dtnn_step(self):

def test_dtnn_step():
  n_embedding = 30
  n_distance = 100
  n_hidden = 60
@@ -361,7 +383,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.init == layer.init
  assert layer_copied.activation == layer.activation

  def test_dtnn_gather(self):

def test_dtnn_gather():
  n_embedding = 30
  n_outputs = 100
  layer_sizes = [100]
@@ -381,7 +404,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.init == layer.init
  assert layer_copied.activation == layer.activation

  def test_dag(self):

def test_dag():
  n_graph_feat = 30
  n_atom_feat = 75
  max_atoms = 50
@@ -406,7 +430,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.dropout == layer.dropout
  assert layer_copied.batch_size == layer.batch_size

  def test_dag_gather(self):

def test_dag_gather():
  n_graph_feat = 30
  n_outputs = 30
  max_atoms = 50
@@ -428,7 +453,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.activation == layer.activation
  assert layer_copied.dropout == layer.dropout

  def test_message_passing(self):

def test_message_passing():
  T = 20
  message_fn = 'enn'
  update_fn = 'gru'
@@ -442,7 +468,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.update_fn == layer.update_fn
  assert layer_copied.n_hidden == layer.n_hidden

  def test_edge_network(self):

def test_edge_network():
  n_pair_features = 8
  n_hidden = 100
  init = 'glorot_uniform'
@@ -454,7 +481,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.n_hidden == layer.n_hidden
  assert layer_copied.init == layer.init

  def test_gru(self):

def test_gru():
  n_hidden = 100
  init = 'glorot_uniform'
  layer = dc.models.layers.GatedRecurrentUnit(n_hidden, init)
@@ -464,7 +492,8 @@ class TestLayer(unittest.TestCase):
  assert layer_copied.n_hidden == layer.n_hidden
  assert layer_copied.init == layer.init

  def test_set_gather(self):

def test_set_gather():
  M = 10
  batch_size = 16
  n_hidden = 100