Unverified Commit b631acb3 authored by Bharath Ramsundar's avatar Bharath Ramsundar Committed by GitHub
Browse files

Merge pull request #1651 from VIGS25/chemnet-fixes

ChemNet Fixes
parents 673d89d5 5ed8b888
Loading
Loading
Loading
Loading
+18 −10
Original line number Diff line number Diff line
@@ -17,11 +17,11 @@ import logging

from deepchem.data.datasets import pad_batch
from deepchem.models import KerasModel, layers
from deepchem.models.losses import L2Loss, SoftmaxCrossEntropy
from deepchem.models.losses import L2Loss, SoftmaxCrossEntropy, SigmoidCrossEntropy
from deepchem.metrics import to_one_hot
from deepchem.models.tensorgraph.layers import KerasLayer
from deepchem.models.tensorgraph import chemnet_layers
from tensorflow.keras.layers import Input, Dense, Reshape, Softmax
from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Activation
from tensorflow.keras.layers import Dropout, Conv1D, Concatenate, Lambda, GRU, LSTM, Bidirectional
from tensorflow.keras.layers import Conv2D, ReLU, Add, GlobalAveragePooling2D

@@ -154,12 +154,16 @@ class Smiles2Vec(KerasModel):
    rnn_embeddings = layer(rnn_embeddings)

    if self.mode == "classification":
      logits = Dense(self.n_tasks * 2)(rnn_embeddings)
      logits = Reshape((self.n_tasks, 2))(logits)
      logits = Dense(self.n_tasks * self.n_classes)(rnn_embeddings)
      logits = Reshape((self.n_tasks, self.n_classes))(logits)
      if self.n_classes == 2:
        output = Activation(activation='sigmoid')(logits)
        loss = SigmoidCrossEntropy()
      else:
        output = Softmax()(logits)
        loss = SoftmaxCrossEntropy()
      outputs = [output, logits]
      output_types = ['prediction', 'loss']
      loss = SoftmaxCrossEntropy()

    else:
      output = Dense(self.n_tasks * 1, name='Dense')(rnn_embeddings)
@@ -276,12 +280,16 @@ class ChemCeption(KerasModel):
    avg_pooling_out = GlobalAveragePooling2D()(inceptionC_out)

    if self.mode == "classification":
      logits = Dense(self.n_tasks * 2)(avg_pooling_out)
      logits = Reshape((self.n_tasks, 2))(logits)
      logits = Dense(self.n_tasks * self.n_classes)(rnn_embeddings)
      logits = Reshape((self.n_tasks, self.n_classes))(logits)
      if self.n_classes == 2:
        output = Activation(activation='sigmoid')(logits)
        loss = SigmoidCrossEntropy()
      else:
        output = Softmax()(logits)
        loss = SoftmaxCrossEntropy()
      outputs = [output, logits]
      output_types = ['prediction', 'loss']
      loss = SoftmaxCrossEntropy()

    else:
      output = Dense(self.n_tasks * 1)(avg_pooling_out)
+3 −1
Original line number Diff line number Diff line
@@ -85,8 +85,10 @@ def load_sampl(featurizer='ECFP',
  splitters = {
      'index': deepchem.splits.IndexSplitter(),
      'random': deepchem.splits.RandomSplitter(),
      'scaffold': deepchem.splits.ScaffoldSplitter()
      'scaffold': deepchem.splits.ScaffoldSplitter(),
      'stratified': deepchem.splits.SingletaskStratifiedSplitter(task_number=0)
  }

  splitter = splitters[split]
  logger.info("About to split dataset with {} splitter.".format(split))
  frac_train = kwargs.get("frac_train", 0.8)