Commit 7dec6843 authored by Bharath Ramsundar's avatar Bharath Ramsundar Committed by GitHub
Browse files

Merge pull request #823 from lilleswing/save-ani1

Save Ani1Regression
parents 89a3ef4e 7b056ed6
Loading
Loading
Loading
Loading
+30 −21
Original line number Diff line number Diff line
@@ -403,8 +403,8 @@ class AtomicDifferentiatedDense(Layer):
               init='glorot_uniform',
               activation='relu',
               **kwargs):
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.init = init  # Set weight initialization
    self.activation = activation  # Get activations
    self.max_atoms = max_atoms
    self.out_channels = out_channels
    self.atom_number_cases = atom_number_cases
@@ -413,6 +413,8 @@ class AtomicDifferentiatedDense(Layer):

  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Generate Radial Symmetry Function """
    init_fn = initializations.get(self.init)  # Set weight initialization
    activation_fn = activations.get(self.activation)
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)
@@ -420,13 +422,12 @@ class AtomicDifferentiatedDense(Layer):
    inputs = in_layers[0].out_tensor
    atom_numbers = in_layers[1].out_tensor
    in_channels = inputs.get_shape().as_list()[-1]
    self.W = self.init(
    self.W = init_fn(
        [len(self.atom_number_cases), in_channels, self.out_channels])

    self.b = model_ops.zeros((len(self.atom_number_cases), self.out_channels))
    outputs = []
    for i, atom_case in enumerate(self.atom_number_cases):

      # optimization to allow for tensorcontraction/broadcasted mmul
      # using a reshape trick. Note that the np and tf matmul behavior
      # differs when dealing with broadcasts
@@ -439,7 +440,7 @@ class AtomicDifferentiatedDense(Layer):
      ak = tf.shape(a)[2]
      bl = tf.shape(b)[1]

      output = self.activation(
      output = activation_fn(
          tf.reshape(tf.matmul(tf.reshape(a, [ai * aj, ak]), b), [ai, aj, bl]) +
          self.b[i, :])

@@ -448,3 +449,11 @@ class AtomicDifferentiatedDense(Layer):
                                                             self.out_channels))
      outputs.append(output)
    self.out_tensor = tf.add_n(outputs)

  def none_tensors(self):
    w, b, out_tensor = self.W, self.b, self.out_tensor
    self.W, self.b, self.out_tensor = None, None, None
    return w, b, out_tensor

  def set_tensors(self, tensor):
    self.W, self.b, self.out_tensor = tensor
+25 −8
Original line number Diff line number Diff line
import numpy as np
import tensorflow as tf

from deepchem.models import TensorGraph
from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, \
  WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, \
  DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather
from deepchem.models.tensorgraph.layers import Feature, Conv1D, Dense, Flatten, Reshape, Squeeze, Transpose, \
    CombineMeanStd, Repeat, Gather, GRU, L2Loss, Concat, SoftMax, Constant, Variable, Add, Multiply, Log, InteratomicL2Distances, \
  CombineMeanStd, Repeat, Gather, GRU, L2Loss, Concat, SoftMax, \
  Constant, Variable, Add, Multiply, Log, InteratomicL2Distances, \
  SoftMaxCrossEntropy, ReduceMean, ToFloat, ReduceSquareDifference, Conv2D, MaxPool2D, ReduceSum, GraphConv, GraphPool, \
  GraphGather, BatchNorm, WeightedError, \
  Conv3D, MaxPool3D, \
  LSTMStep, AttnLSTMEmbedding, IterRefLSTMEmbedding
from deepchem.models.tensorgraph.graph_layers import Combine_AP, Separate_AP, \
    WeaveLayer, WeaveGather, DTNNEmbedding, DTNNGather, DTNNStep, \
    DTNNExtract, DAGLayer, DAGGather, MessagePassing, SetGather
from deepchem.models.tensorgraph.symmetry_functions import AtomicDifferentiatedDense


def test_Conv1D_pickle():
@@ -541,3 +544,17 @@ def test_SetGather_pickle():
  tg.set_loss(Gather)
  tg.build()
  tg.save()


def test_AtomicDifferentialDense_pickle():
  max_atoms = 23
  atom_features = 100
  tg = TensorGraph()
  atom_feature = Feature(shape=(None, max_atoms, atom_features))
  atom_numbers = Feature(shape=(None, max_atoms))
  atomic_differential_dense = AtomicDifferentiatedDense(
      max_atoms=23, out_channels=5, in_layers=[atom_feature, atom_numbers])
  tg.add_output(atomic_differential_dense)
  tg.set_loss(atomic_differential_dense)
  tg.build()
  tg.save()