Commit bd0c2434 authored by Bharath Ramsundar's avatar Bharath Ramsundar
Browse files

Changes

parent 7a0ba007
Loading
Loading
Loading
Loading
+8 −8
Original line number Diff line number Diff line
@@ -4,14 +4,14 @@ import deepchem as dc
import numpy as np
import tensorflow as tf

from typing import List
from deepchem.utils.typing import OneOrMany 
from typing import List, Union
from deepchem.utils.typing import OneOrMany, KerasLossFn
from deepchem.data import NumpyDataset, pad_features
from deepchem.feat.graph_features import ConvMolFeaturizer
from deepchem.feat.mol_graphs import ConvMol
from deepchem.metrics import to_one_hot
from deepchem.models import KerasModel, layers
from deepchem.models.losses import L2Loss, SoftmaxCrossEntropy
from deepchem.models.losses import L2Loss, SoftmaxCrossEntropy, Loss
from deepchem.trans import undo_transforms
from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Dropout, Activation, BatchNormalization

@@ -72,7 +72,7 @@ class WeaveModel(KerasModel):
               n_atom_feat: OneOrMany[int] = 75,
               n_pair_feat: OneOrMany[int] = 14,
               n_hidden: int = 50,
               n_graph_feat: OneOrMany[int] = 128,
               n_graph_feat: int = 128,
               n_weave: int = 2,
               fully_connected_layer_sizes: List[int] = [2000, 1000],
               mode: str = "classification",
@@ -161,7 +161,7 @@ class WeaveModel(KerasModel):
      output = Softmax()(logits)
      outputs = [output, logits]
      output_types = ['prediction', 'loss']
      loss = SoftmaxCrossEntropy()
      loss: Loss = SoftmaxCrossEntropy()
    else:
      output = Dense(n_tasks)(weave_gather)
      outputs = [output]
@@ -774,7 +774,7 @@ class GraphConvModel(KerasModel):
        batch_size=batch_size)
    if mode == "classification":
      output_types = ['prediction', 'loss', 'embedding']
      loss = SoftmaxCrossEntropy()
      loss: Union[Loss, KerasLossFn] = SoftmaxCrossEntropy()
    else:
      if self.uncertainty:
        output_types = ['prediction', 'variance', 'loss', 'loss', 'embedding']
+69 −1
Original line number Diff line number Diff line
@@ -144,9 +144,77 @@ XGBoostModel
.. autoclass:: deepchem.models.XGBoostModel
  :members:


Keras Models
============
DeepChem extensively uses `Keras`_ to build powerful machine learning models.

Losses
------

.. autoclass:: deepchem.models.losses.Loss
  :members:

.. autoclass:: deepchem.models.losses.L1Loss
  :members:

.. autoclass:: deepchem.models.losses.L2Loss
  :members:

.. autoclass:: deepchem.models.losses.HingeLoss
  :members:

.. autoclass:: deepchem.models.losses.BinaryCrossEntropy
  :members:

.. autoclass:: deepchem.models.losses.CategoricalCrossEntropy
  :members:

.. autoclass:: deepchem.models.losses.SigmoidCrossEntropy
  :members:

.. autoclass:: deepchem.models.losses.SoftmaxCrossEntropy
  :members:

.. autoclass:: deepchem.models.losses.SparseSoftmaxCrossEntropy
  :members:

.. autoclass:: deepchem.models.losses.SparseSoftmaxCrossEntropy
  :members:

Optimizers
----------

.. autoclass:: deepchem.models.optimizers.Optimizer
  :members:

.. autoclass:: deepchem.models.optimizers.LearningRateSchedule
  :members:

.. autoclass:: deepchem.models.optimizers.Adam
  :members:

.. autoclass:: deepchem.models.optimizers.RMSProp
  :members:

.. autoclass:: deepchem.models.optimizers.GradientDescent
  :members:

.. autoclass:: deepchem.models.optimizers.ExponentialDecay
  :members:

.. autoclass:: deepchem.models.optimizers.PolynomialDecay
  :members:

.. autoclass:: deepchem.models.optimizers.LinearCosineDecay
  :members:

.. autoclass:: deepchem.models.optimizers.LinearCosineDecay
  :members:


KerasModel
----------
DeepChem extensively uses `Keras`_ to build powerful machine learning models.

Training loss and validation metrics can be automatically logged to `Weights & Biases`_ with the following commands::