Commit 4e03bc74 authored by peastman's avatar peastman
Browse files

Merged changes from main branch

parents 574be8a1 595d13b2
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -6,7 +6,7 @@ from deepchem.feat import OneHotFeaturizer
from deepchem.feat.molecule_featurizers.one_hot_featurizer import ZINC_CHARSET


class TestOneHotFeaturizert(unittest.TestCase):
class TestOneHotFeaturizer(unittest.TestCase):
  """
  Test OneHotFeaturizer.
  """
+1 −3
Original line number Diff line number Diff line
@@ -7,9 +7,6 @@ from deepchem.models.keras_model import KerasModel
from deepchem.models.multitask import SingletaskToMultitask
from deepchem.models.callbacks import ValidationCallback

from deepchem.models.fcnet import MultitaskRegressor
from deepchem.models.fcnet import MultitaskClassifier
from deepchem.models.fcnet import MultitaskFitTransformRegressor
from deepchem.models.IRV import MultitaskIRVClassifier
from deepchem.models.robust_multitask import RobustMultitaskClassifier
from deepchem.models.robust_multitask import RobustMultitaskRegressor
@@ -38,6 +35,7 @@ try:
  from deepchem.models.torch_models import GCN, GCNModel
  from deepchem.models.torch_models import LCNN, LCNNModel
  from deepchem.models.torch_models import Pagtn, PagtnModel
  from deepchem.models.fcnet import MultitaskRegressor, MultitaskClassifier, MultitaskFitTransformRegressor
except ModuleNotFoundError:
  pass

+2 −2
Original line number Diff line number Diff line
@@ -13,7 +13,7 @@ try:
except:
  from collections import Sequence as SequenceCollection
from typing import Sequence, Union
from deepchem.utils.typing import KerasActivationFn, LossFn, OneOrMany
from deepchem.utils.typing import ActivationFn, LossFn, OneOrMany
from deepchem.utils.data_utils import load_from_disk, save_to_disk

logger = logging.getLogger(__name__)
@@ -56,7 +56,7 @@ class AtomicConvModel(KerasModel):
      weight_decay_penalty: float = 0.0,
      weight_decay_penalty_type: str = "l2",
      dropouts: OneOrMany[float] = 0.5,
      activation_fns: OneOrMany[KerasActivationFn] = tf.nn.relu,
      activation_fns: OneOrMany[ActivationFn] = tf.nn.relu,
      residual: bool = False,
      learning_rate=0.001,
      **kwargs) -> None:
+29 −30
Original line number Diff line number Diff line
@@ -8,7 +8,7 @@ import numpy as np
import tensorflow as tf

from typing import List, Union, Tuple, Iterable, Dict, Optional
from deepchem.utils.typing import OneOrMany, LossFn, KerasActivationFn
from deepchem.utils.typing import OneOrMany, LossFn, ActivationFn
from deepchem.data import Dataset, NumpyDataset, pad_features
from deepchem.feat.graph_features import ConvMolFeaturizer
from deepchem.feat.mol_graphs import ConvMol
@@ -81,8 +81,7 @@ class WeaveModel(KerasModel):

  """

  def __init__(
      self,
  def __init__(self,
               n_tasks: int,
               n_atom_feat: OneOrMany[int] = 75,
               n_pair_feat: OneOrMany[int] = 14,
@@ -96,8 +95,8 @@ class WeaveModel(KerasModel):
               weight_decay_penalty: float = 0.0,
               weight_decay_penalty_type: str = "l2",
               dropouts: OneOrMany[float] = 0.25,
      final_conv_activation_fn: Optional[KerasActivationFn] = tf.nn.tanh,
      activation_fns: OneOrMany[KerasActivationFn] = tf.nn.relu,
               final_conv_activation_fn: Optional[ActivationFn] = tf.nn.tanh,
               activation_fns: OneOrMany[ActivationFn] = tf.nn.relu,
               batch_normalize: bool = True,
               batch_normalize_kwargs: Dict = {
                   "renorm": True,
@@ -151,7 +150,7 @@ class WeaveModel(KerasModel):
      The dropout probablity to use for each fully connected layer.  The length of this list
      should equal len(layer_sizes).  Alternatively this may be a single value
      instead of a list, in which case the same value is used for every layer.
    final_conv_activation_fn: Optional[KerasActivationFn] (default `tf.nn.tanh`)
    final_conv_activation_fn: Optional[ActivationFn] (default `tf.nn.tanh`)
      The Tensorflow activation funcntion to apply to the final
      convolution at the end of the weave convolutions. If `None`, then no
      activate is applied (hence linear).
+20 −4
Original line number Diff line number Diff line
@@ -38,7 +38,12 @@ class L1Loss(Loss):

  def _create_pytorch_loss(self):
    import torch
    return torch.nn.L1Loss(reduction='none')

    def loss(output, labels):
      output, labels = _make_pytorch_shapes_consistent(output, labels)
      return torch.nn.functional.l1_loss(output, labels, reduction='none')

    return loss


class HuberLoss(Loss):
@@ -55,7 +60,13 @@ class HuberLoss(Loss):

  def _create_pytorch_loss(self):
    import torch
    return torch.nn.SmoothL1Loss(reduction='none')

    def loss(output, labels):
      output, labels = _make_pytorch_shapes_consistent(output, labels)
      return torch.nn.functional.smooth_l1_loss(
          output, labels, reduction='none')

    return loss


class L2Loss(Loss):
@@ -69,7 +80,12 @@ class L2Loss(Loss):

  def _create_pytorch_loss(self):
    import torch
    return torch.nn.MSELoss(reduction='none')

    def loss(output, labels):
      output, labels = _make_pytorch_shapes_consistent(output, labels)
      return torch.nn.functional.mse_loss(output, labels, reduction='none')

    return loss


class HingeLoss(Loss):
@@ -229,7 +245,7 @@ class SoftmaxCrossEntropy(Loss):

  def _create_pytorch_loss(self):
    import torch
    ls = torch.nn.LogSoftmax(dim=1)
    ls = torch.nn.LogSoftmax(dim=-1)

    def loss(output, labels):
      output, labels = _make_pytorch_shapes_consistent(output, labels)
Loading