Commit 38e7a470 authored by Nathan Frey's avatar Nathan Frey
Browse files

formatting

parent 1fd3b05a
Loading
Loading
Loading
Loading
+137 −139
Original line number Diff line number Diff line
import sys

import logging
import deepchem as dc
from deepchem.models import KerasModel
from deepchem.models.layers import AtomicConvolution
from deepchem.models.losses import L2Loss
from tensorflow.keras.layers import Input, Layer, Dense, Flatten, Concatenate 
from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Dropout, Activation, Lambda
from tensorflow.keras.layers import Input, Dense, Reshape, Dropout, Activation, Lambda, Flatten, Concatenate

import numpy as np
import tensorflow as tf
@@ -15,105 +12,106 @@ try:
  from collections.abc import Sequence as SequenceCollection
except:
  from collections import Sequence as SequenceCollection
from typing import Any, Callable, Iterable, List, Optional, Sequence, Tuple, Union
from typing import Sequence, Union
from deepchem.utils.typing import KerasActivationFn, LossFn, OneOrMany

logger = logging.getLogger(__name__)

class AtomicConvScore(Layer):
  """The scoring function used by the atomic convolution models."""

  def __init__(self, atom_types, layer_sizes, **kwargs):
    super(AtomicConvScore, self).__init__(**kwargs)
    self.atom_types = atom_types
    self.layer_sizes = layer_sizes

  def build(self, input_shape):
    self.type_weights = []
    self.type_biases = []
    self.output_weights = []
    self.output_biases = []
    n_features = int(input_shape[0][-1])
    layer_sizes = self.layer_sizes
    num_layers = len(layer_sizes)
    weight_init_stddevs = [1 / np.sqrt(x) for x in layer_sizes]
    bias_init_consts = [0.0] * num_layers
    for ind, atomtype in enumerate(self.atom_types):
      prev_layer_size = n_features
      self.type_weights.append([])
      self.type_biases.append([])
      self.output_weights.append([])
      self.output_biases.append([])
      for i in range(num_layers):
        weight, bias = initializeWeightsBiases(
            prev_layer_size=prev_layer_size,
            size=layer_sizes[i],
            weights=tf.random.truncated_normal(
                shape=[prev_layer_size, layer_sizes[i]],
                stddev=weight_init_stddevs[i]),
            biases=tf.constant(
                value=bias_init_consts[i], shape=[layer_sizes[i]]))
        self.type_weights[ind].append(weight)
        self.type_biases[ind].append(bias)
        prev_layer_size = layer_sizes[i]
      weight, bias = initializeWeightsBiases(prev_layer_size, 1)
      self.output_weights[ind].append(weight)
      self.output_biases[ind].append(bias)

  def call(self, inputs):
    frag1_layer, frag2_layer, complex_layer, frag1_z, frag2_z, complex_z = inputs
    atom_types = self.atom_types
    num_layers = len(self.layer_sizes)

    def atomnet(current_input, atomtype):
      prev_layer = current_input
      for i in range(num_layers):
        #layer = tf.nn.bias_add(
        #    tf.matmul(prev_layer, self.type_weights[atomtype][i]),
        #    self.type_biases[atomtype][i])
        #layer = tf.nn.relu(layer)
        layer = Dense(100)(prev_layer)
        prev_layer = layer

      #output_layer = tf.squeeze(
      #    tf.nn.bias_add(
      #        tf.matmul(prev_layer, self.output_weights[atomtype][0]),
      #        self.output_biases[atomtype][0]))
      print("self.output_weights[atomtype][0].shape")
      print(self.output_weights[atomtype][0].shape)
      output_layer = Dense(self.output_weights[atomtype][0].shape[0])(prev_layer)
      return output_layer

    frag1_zeros = tf.zeros_like(frag1_z, dtype=tf.float32)
    frag2_zeros = tf.zeros_like(frag2_z, dtype=tf.float32)
    complex_zeros = tf.zeros_like(complex_z, dtype=tf.float32)

    frag1_atomtype_energy = []
    frag2_atomtype_energy = []
    complex_atomtype_energy = []

    for ind, atomtype in enumerate(atom_types):
      frag1_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag1_layer)
      frag2_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag2_layer)
      complex_outputs = tf.map_fn(lambda x: atomnet(x, ind), complex_layer)

      cond = tf.equal(frag1_z, atomtype)
      frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros))
      cond = tf.equal(frag2_z, atomtype)
      frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros))
      cond = tf.equal(complex_z, atomtype)
      complex_atomtype_energy.append(
          tf.where(cond, complex_outputs, complex_zeros))

    frag1_outputs = tf.add_n(frag1_atomtype_energy)
    frag2_outputs = tf.add_n(frag2_atomtype_energy)
    complex_outputs = tf.add_n(complex_atomtype_energy)

    frag1_energy = tf.reduce_sum(frag1_outputs, 1)
    frag2_energy = tf.reduce_sum(frag2_outputs, 1)
    complex_energy = tf.reduce_sum(complex_outputs, 1)
    binding_energy = complex_energy - (frag1_energy + frag2_energy)
    return tf.expand_dims(binding_energy, axis=1)
# class AtomicConvScore(Layer):
#   """The scoring function used by the atomic convolution models."""

#   def __init__(self, atom_types, layer_sizes, **kwargs):
#     super(AtomicConvScore, self).__init__(**kwargs)
#     self.atom_types = atom_types
#     self.layer_sizes = layer_sizes

#   def build(self, input_shape):
#     self.type_weights = []
#     self.type_biases = []
#     self.output_weights = []
#     self.output_biases = []
#     n_features = int(input_shape[0][-1])
#     layer_sizes = self.layer_sizes
#     num_layers = len(layer_sizes)
#     weight_init_stddevs = [1 / np.sqrt(x) for x in layer_sizes]
#     bias_init_consts = [0.0] * num_layers
#     for ind, atomtype in enumerate(self.atom_types):
#       prev_layer_size = n_features
#       self.type_weights.append([])
#       self.type_biases.append([])
#       self.output_weights.append([])
#       self.output_biases.append([])
#       for i in range(num_layers):
#         weight, bias = initializeWeightsBiases(
#             prev_layer_size=prev_layer_size,
#             size=layer_sizes[i],
#             weights=tf.random.truncated_normal(
#                 shape=[prev_layer_size, layer_sizes[i]],
#                 stddev=weight_init_stddevs[i]),
#             biases=tf.constant(
#                 value=bias_init_consts[i], shape=[layer_sizes[i]]))
#         self.type_weights[ind].append(weight)
#         self.type_biases[ind].append(bias)
#         prev_layer_size = layer_sizes[i]
#       weight, bias = initializeWeightsBiases(prev_layer_size, 1)
#       self.output_weights[ind].append(weight)
#       self.output_biases[ind].append(bias)

#   def call(self, inputs):
#     frag1_layer, frag2_layer, complex_layer, frag1_z, frag2_z, complex_z = inputs
#     atom_types = self.atom_types
#     num_layers = len(self.layer_sizes)

#     def atomnet(current_input, atomtype):
#       prev_layer = current_input
#       for i in range(num_layers):
#         #layer = tf.nn.bias_add(
#         #    tf.matmul(prev_layer, self.type_weights[atomtype][i]),
#         #    self.type_biases[atomtype][i])
#         #layer = tf.nn.relu(layer)
#         layer = Dense(100)(prev_layer)
#         prev_layer = layer

#       #output_layer = tf.squeeze(
#       #    tf.nn.bias_add(
#       #        tf.matmul(prev_layer, self.output_weights[atomtype][0]),
#       #        self.output_biases[atomtype][0]))
#       print("self.output_weights[atomtype][0].shape")
#       print(self.output_weights[atomtype][0].shape)
#       output_layer = Dense(
#           self.output_weights[atomtype][0].shape[0])(prev_layer)
#       return output_layer

#     frag1_zeros = tf.zeros_like(frag1_z, dtype=tf.float32)
#     frag2_zeros = tf.zeros_like(frag2_z, dtype=tf.float32)
#     complex_zeros = tf.zeros_like(complex_z, dtype=tf.float32)

#     frag1_atomtype_energy = []
#     frag2_atomtype_energy = []
#     complex_atomtype_energy = []

#     for ind, atomtype in enumerate(atom_types):
#       frag1_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag1_layer)
#       frag2_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag2_layer)
#       complex_outputs = tf.map_fn(lambda x: atomnet(x, ind), complex_layer)

#       cond = tf.equal(frag1_z, atomtype)
#       frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros))
#       cond = tf.equal(frag2_z, atomtype)
#       frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros))
#       cond = tf.equal(complex_z, atomtype)
#       complex_atomtype_energy.append(
#           tf.where(cond, complex_outputs, complex_zeros))

#     frag1_outputs = tf.add_n(frag1_atomtype_energy)
#     frag2_outputs = tf.add_n(frag2_atomtype_energy)
#     complex_outputs = tf.add_n(complex_atomtype_energy)

#     frag1_energy = tf.reduce_sum(frag1_outputs, 1)
#     frag2_energy = tf.reduce_sum(frag2_outputs, 1)
#     complex_energy = tf.reduce_sum(complex_outputs, 1)
#     binding_energy = complex_energy - (frag1_energy + frag2_energy)
#     return tf.expand_dims(binding_energy, axis=1)


class AtomicConvModel(KerasModel):
@@ -131,7 +129,8 @@ class AtomicConvModel(KerasModel):
  geometry of the model.
  """

  def __init__(self,
  def __init__(
      self,
      n_tasks: int,
      frag1_num_atoms: int = 70,
      frag2_num_atoms: int = 634,
@@ -139,12 +138,11 @@ class AtomicConvModel(KerasModel):
      max_num_neighbors: int = 12,
      batch_size: int = 24,
      atom_types: Sequence[float] = [
                   6, 7., 8., 9., 11., 12., 15., 16., 17., 20., 25., 30., 35.,
                   53., -1.
          6, 7., 8., 9., 11., 12., 15., 16., 17., 20., 25., 30., 35., 53., -1.
      ],
      radial: Sequence[Sequence[float]] = [[
                   1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0,
                   7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0
          1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0,
          8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0
      ], [0.0, 4.0, 8.0], [0.4]],
      # layer_sizes=[32, 32, 16],
      layer_sizes=[100],
@@ -272,7 +270,7 @@ class AtomicConvModel(KerasModel):
    # dropout_switch = Input(shape=tuple())
    prev_size = concat.shape[0]
    next_activation = None
    ## Add the dense layers
    # Add the dense layers

    for size, weight_stddev, bias_const, dropout, activation_fn in zip(
        layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,