Commit ddfe97f0 authored by Bharath Ramsundar's avatar Bharath Ramsundar
Browse files

Fixes

parent 3596f6b3
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -108,7 +108,7 @@ def _featurize_smiles_df(df, featurizer, field, log_every_n=1000):
      mol = rdmolops.RenumberAtoms(mol, new_order)
    if ind % log_every_n == 0:
      logger.info("Featurizing sample %d" % ind)
    features.append(featurizer.featurize([mol]))
    features.append(featurizer._featurize([mol]))
  valid_inds = np.array(
      [1 if elt.size > 0 else 0 for elt in features], dtype=bool)
  features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
@@ -171,7 +171,7 @@ def _featurize_mol_df(df, featurizer, field, log_every_n=1000):
  for ind, mol in enumerate(sample_elems):
    if ind % log_every_n == 0:
      logger.info("Featurizing sample %d" % ind)
    features.append(featurizer.featurize([mol]))
    features.append(featurizer._featurize([mol]))
  valid_inds = np.array(
      [1 if elt.size > 0 else 0 for elt in features], dtype=bool)
  features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
+127 −25
Original line number Diff line number Diff line
@@ -4,8 +4,8 @@ import deepchem as dc
import numpy as np
import tensorflow as tf

from typing import List, Union, Tuple, Iterable
from deepchem.utils.typing import OneOrMany, KerasLossFn
from typing import List, Union, Tuple, Iterable, Dict
from deepchem.utils.typing import OneOrMany, KerasLossFn, KerasActivationFn
from deepchem.data import Dataset, NumpyDataset, pad_features
from deepchem.feat.graph_features import ConvMolFeaturizer
from deepchem.feat.mol_graphs import ConvMol
@@ -45,6 +45,10 @@ class WeaveModel(KerasModel):
  scaling issues, but may possibly allow for better modeling
  of subtle bond effects.

  Note that [1]_ introduces a whole variety of different architectures for
  Weave models. The default settings in this class correspond to the W2N2
  variant from [1]_ which is the most commonly used variant..

  Examples
  --------

@@ -59,6 +63,13 @@ class WeaveModel(KerasModel):
  >>> model = dc.models.WeaveModel(n_tasks=1, n_weave=2, fully_connected_layer_sizes=[2000, 1000], mode="classification")
  >>> loss = model.fit(dataset)

  Note
  ----
  In general, the use of batch normalization can cause issues with NaNs. If
  you're having trouble with NaNs while using this model, consider setting
  `batch_normalize_kwargs={"trainable": False}` or turning off batch
  normalization entirely with `batch_normalize=False`.

  References
  ----------
  .. [1] Kearnes, Steven, et al. "Molecular graph convolutions: moving beyond
@@ -74,7 +85,20 @@ class WeaveModel(KerasModel):
               n_hidden: int = 50,
               n_graph_feat: int = 128,
               n_weave: int = 2,
               fully_connected_layer_sizes: List[int] = [2000, 1000],
               fully_connected_layer_sizes: List[int] = [2000, 100],
               weight_init_stddevs: OneOrMany[float] = [0.01, 0.04],
               bias_init_consts: OneOrMany[float] = [0.5, 3.0],
               weight_decay_penalty: float = 0.0,
               weight_decay_penalty_type: str = "l2",
               dropouts: OneOrMany[float] = 0.25,
               activation_fns: OneOrMany[KerasActivationFn] = tf.nn.relu,
               batch_normalize: bool = True,
               batch_normalize_kwargs: Dict = {
                   "renorm": True,
                   "fused": False
               },
               gaussian_expand: bool = True,
               compress_post_gaussian_expansion: bool = False,
               mode: str = "classification",
               n_classes: int = 2,
               batch_size: int = 100,
@@ -94,6 +118,47 @@ class WeaveModel(KerasModel):
      Number of output features for each molecule(graph)
    n_weave: int, optional
      The number of weave layers in this model.
    fully_connected_layer_sizes: list
      The size of each dense layer in the network.  The length of
      this list determines the number of layers.
    weight_init_stddevs: list or float
      The standard deviation of the distribution to use for weight
      initialization of each layer.  The length of this list should
      equal len(layer_sizes).  Alternatively this may be a single
      value instead of a list, in which case the same value is used
      for every layer.
    bias_init_consts: list or float
      The value to initialize the biases in each layer to.  The
      length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in
      which case the same value is used for every layer.
    weight_decay_penalty: float
      The magnitude of the weight decay penalty to use
    weight_decay_penalty_type: str
      The type of penalty to use for weight decay, either 'l1' or 'l2'
    dropouts: list or float
      The dropout probablity to use for each layer.  The length of this list
      should equal len(layer_sizes).  Alternatively this may be a single value
      instead of a list, in which case the same value is used for every layer.
    activation_fns: list or object
      The Tensorflow activation function to apply to each layer.  The length
      of this list should equal len(layer_sizes).  Alternatively this may be a
      single value instead of a list, in which case the same value is used for
      every layer.
    batch_normalize: bool, optional (default True)
      If this is turned on, apply batch normalization before applying
      activation functions on convolutional and fully connected layers.
    batch_normalize_kwargs: Dict, optional (default `{"renorm"=True, "fused": False}`)
      Batch normalization is a complex layer which has many potential
      argumentswhich change behavior. This layer accepts user-defined
      parameters which are passed to all `BatchNormalization` layers in
      `WeaveModel`, `WeaveLayer`, and `WeaveGather`.
    gaussian_expand: boolean, optional (default True)
      Whether to expand each dimension of atomic features by gaussian
      histogram
    compress_post_gaussian_expansion: bool, optional (default False)
      If True, compress the results of the Gaussian expansion back to the
      original dimensions of the input.
    mode: str
      Either "classification" or "regression" for type of model.
    n_classes: int
@@ -106,6 +171,22 @@ class WeaveModel(KerasModel):
      n_atom_feat = [n_atom_feat] * n_weave
    if not isinstance(n_pair_feat, collections.Sequence):
      n_pair_feat = [n_pair_feat] * n_weave
    n_layers = len(fully_connected_layer_sizes)
    if not isinstance(weight_init_stddevs, collections.Sequence):
      weight_init_stddevs = [weight_init_stddevs] * n_layers
    if not isinstance(bias_init_consts, collections.Sequence):
      bias_init_consts = [bias_init_consts] * n_layers
    if not isinstance(dropouts, collections.Sequence):
      dropouts = [dropouts] * n_layers
    if not isinstance(activation_fns, collections.Sequence):
      activation_fns = [activation_fns] * n_layers
    if weight_decay_penalty != 0.0:
      if weight_decay_penalty_type == 'l1':
        regularizer = tf.keras.regularizers.l1(weight_decay_penalty)
      else:
        regularizer = tf.keras.regularizers.l2(weight_decay_penalty)
    else:
      regularizer = None

    self.n_tasks = n_tasks
    self.n_atom_feat = n_atom_feat
@@ -136,29 +217,49 @@ class WeaveModel(KerasModel):
          n_atom_input_feat=n_atom,
          n_pair_input_feat=n_pair,
          n_atom_output_feat=n_atom_next,
          n_pair_output_feat=n_pair_next)(inputs)
          n_pair_output_feat=n_pair_next,
          batch_normalize=batch_normalize)(inputs)
      inputs = [weave_layer_ind_A, weave_layer_ind_P, pair_split, atom_to_pair]
    #weave_layer2A, weave_layer2P = layers.WeaveLayer(
    #    n_atom_input_feat=self.n_hidden,
    #    n_pair_input_feat=self.n_hidden,
    #    n_atom_output_feat=self.n_hidden,
    #    n_pair_output_feat=self.n_hidden,
    #    update_pair=False)(
    #        [weave_layer1A, weave_layer1P, pair_split, atom_to_pair])
    #dense1 = Dense(self.n_graph_feat, activation=tf.nn.tanh)(weave_layer2A)
    # Final atom-layer convolution. Note this differs slightly from the paper
    # since we use a tanh activation. This seems necessary for numerical
    # stability.
    dense1 = Dense(self.n_graph_feat, activation=tf.nn.tanh)(weave_layer_ind_A)
    # Batch normalization causes issues, spitting out NaNs if
    # allowed to train
    batch_norm1 = BatchNormalization(epsilon=1e-5, trainable=False)(dense1)
    if batch_normalize:
      dense1 = BatchNormalization(**batch_normalize_kwargs)(dense1)
    weave_gather = layers.WeaveGather(
        batch_size, n_input=self.n_graph_feat,
        gaussian_expand=True)([batch_norm1, atom_split])
        batch_size,
        n_input=self.n_graph_feat,
        gaussian_expand=gaussian_expand,
        compress_post_gaussian_expansion=compress_post_gaussian_expansion)(
            [dense1, atom_split])

    if n_layers > 0:
      # Now fully connected layers
      input_layer = weave_gather
      for layer_size, weight_stddev, bias_const, dropout, activation_fn in zip(
          fully_connected_layer_sizes, weight_init_stddevs, bias_init_consts,
          dropouts, activation_fns):
        layer = Dense(
            layer_size,
            kernel_initializer=tf.keras.initializers.TruncatedNormal(
                stddev=weight_stddev),
            bias_initializer=tf.constant_initializer(value=bias_const),
            kernel_regularizer=regularizer)(weave_gather)
        if dropout > 0.0:
          layer = Dropout(rate=dropout)(layer)
        if batch_normalize:
          # Should this allow for training?
          layer = BatchNormalization(**batch_normalize_kwargs)(layer)
        layer = Activation(activation_fn)(layer)
        input_layer = layer
      output = input_layer
    else:
      output = weave_gather

    n_tasks = self.n_tasks
    if self.mode == 'classification':
      n_classes = self.n_classes
      logits = Reshape((n_tasks,
                        n_classes))(Dense(n_tasks * n_classes)(weave_gather))
      logits = Reshape((n_tasks, n_classes))(Dense(n_tasks * n_classes)(output))
      output = Softmax()(logits)
      outputs = [output, logits]
      output_types = ['prediction', 'loss']
@@ -176,12 +277,13 @@ class WeaveModel(KerasModel):
    super(WeaveModel, self).__init__(
        model, loss, output_types=output_types, batch_size=batch_size, **kwargs)

  def default_generator(self,
  def default_generator(
      self,
      dataset: Dataset,
      epochs: int = 1,
                        mode: float = 'fit',
                        deterministic=True,
                        pad_batches=True) -> Iterable[Tuple[List, List, List]]:
      mode: str = 'fit',
      deterministic: bool = True,
      pad_batches: bool = True) -> Iterable[Tuple[List, List, List]]:
    """Convert a dataset into the tensors needed for learning.

    Parameters
+157 −31
Original line number Diff line number Diff line
@@ -4,7 +4,7 @@ import numpy as np
import collections
from typing import Callable, Dict, List
from tensorflow.keras import activations, initializers, backend
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dropout, BatchNormalization


class InteratomicL2Distances(tf.keras.layers.Layer):
@@ -44,7 +44,8 @@ class InteratomicL2Distances(tf.keras.layers.Layer):
    self.M_nbrs = M_nbrs
    self.ndim = ndim

  def get_config(self):
  def get_config(self) -> Dict:
    """Returns config dictionary for this layer."""
    config = super(InteratomicL2Distances, self).get_config()
    config['N_atoms'] = self.N_atoms
    config['M_nbrs'] = self.M_nbrs
@@ -2121,7 +2122,7 @@ class WeaveLayer(tf.keras.layers.Layer):
  n_pair_feat)` of pairwise features for each pair of atoms in the molecule.
  Let's construct this conceptually for our example.

  >>> pair_feat = [np.random.rand(1*1, n_pair_feat), np.random.rand(3*3, n_pair_feat)]
  >>> pair_feat = [np.random.rand(3*3, n_pair_feat), np.random.rand(1*1, n_pair_feat)]
  >>> pair_feat = np.concatenate(pair_feat, axis=0)
  >>> pair_feat.shape
  (10, 14)
@@ -2186,27 +2187,43 @@ class WeaveLayer(tf.keras.layers.Layer):
               update_pair: bool = True,
               init: str = 'glorot_uniform',
               activation: str = 'relu',
               batch_normalize: bool = True,
               batch_normalize_kwargs: Dict = {"renorm": True},
               **kwargs):
    """
    Parameters
    ----------
    n_atom_input_feat: int, optional
    n_atom_input_feat: int, optional (default 75)
      Number of features for each atom in input.
    n_pair_input_feat: int, optional
    n_pair_input_feat: int, optional (default 14)
      Number of features for each pair of atoms in input.
    n_atom_output_feat: int, optional
    n_atom_output_feat: int, optional (default 50)
      Number of features for each atom in output.
    n_pair_output_feat: int, optional
    n_pair_output_feat: int, optional (default 50)
      Number of features for each pair of atoms in output.
    n_hidden_XX: int, optional
    n_hidden_AA: int, optional (default 50)
      Number of units(convolution depths) in corresponding hidden layer
    n_hidden_PA: int, optional (default 50)
      Number of units(convolution depths) in corresponding hidden layer
    update_pair: bool, optional
    n_hidden_AP: int, optional (default 50)
      Number of units(convolution depths) in corresponding hidden layer
    n_hidden_PP: int, optional (default 50)
      Number of units(convolution depths) in corresponding hidden layer
    update_pair: bool, optional (default True)
      Whether to calculate for pair features,
      could be turned off for last layer
    init: str, optional
    init: str, optional (default 'glorot_uniform')
      Weight initialization for filters.
    activation: str, optional
    activation: str, optional (default 'relu')
      Activation function applied
    batch_normalize: bool, optional (default True)
      If this is turned on, apply batch normalization before applying
      activation functions on convolutional layers.
    batch_normalize_kwargs: Dict, optional (default `{renorm=True}`)
      Batch normalization is a complex layer which has many potential
      argumentswhich change behavior. This layer accepts user-defined
      parameters which are passed to all `BatchNormalization` layers in
      `WeaveModel`, `WeaveLayer`, and `WeaveGather`.
    """
    super(WeaveLayer, self).__init__(**kwargs)
    self.init = init  # Set weight initialization
@@ -2219,6 +2236,8 @@ class WeaveLayer(tf.keras.layers.Layer):
    self.n_hidden_PP = n_hidden_PP
    self.n_hidden_A = n_hidden_AA + n_hidden_PA
    self.n_hidden_P = n_hidden_AP + n_hidden_PP
    self.batch_normalize = batch_normalize
    self.batch_normalize_kwargs = batch_normalize_kwargs

    self.n_atom_input_feat = n_atom_input_feat
    self.n_pair_input_feat = n_pair_input_feat
@@ -2237,6 +2256,8 @@ class WeaveLayer(tf.keras.layers.Layer):
    config['n_hidden_PA'] = self.n_hidden_PA
    config['n_hidden_AP'] = self.n_hidden_AP
    config['n_hidden_PP'] = self.n_hidden_PP
    config['batch_normalize'] = self.batch_normalize
    config['batch_normalize_kwargs'] = self.batch_normalize_kwargs
    config['update_pair'] = self.update_pair
    config['init'] = self.init
    config['activation'] = self.activation
@@ -2256,32 +2277,38 @@ class WeaveLayer(tf.keras.layers.Layer):
    self.b_AA = backend.zeros(shape=[
        self.n_hidden_AA,
    ])
    self.AA_bn = BatchNormalization(**self.batch_normalize_kwargs)

    self.W_PA = init([self.n_pair_input_feat, self.n_hidden_PA])
    self.b_PA = backend.zeros(shape=[
        self.n_hidden_PA,
    ])
    self.PA_bn = BatchNormalization(**self.batch_normalize_kwargs)

    self.W_A = init([self.n_hidden_A, self.n_atom_output_feat])
    self.b_A = backend.zeros(shape=[
        self.n_atom_output_feat,
    ])
    self.A_bn = BatchNormalization(**self.batch_normalize_kwargs)

    if self.update_pair:
      self.W_AP = init([self.n_atom_input_feat * 2, self.n_hidden_AP])
      self.b_AP = backend.zeros(shape=[
          self.n_hidden_AP,
      ])
      self.AP_bn = BatchNormalization(**self.batch_normalize_kwargs)

      self.W_PP = init([self.n_pair_input_feat, self.n_hidden_PP])
      self.b_PP = backend.zeros(shape=[
          self.n_hidden_PP,
      ])
      self.PP_bn = BatchNormalization(**self.batch_normalize_kwargs)

      self.W_P = init([self.n_hidden_P, self.n_pair_output_feat])
      self.b_P = backend.zeros(shape=[
          self.n_pair_output_feat,
      ])
      self.P_bn = BatchNormalization(**self.batch_normalize_kwargs)
    self.built = True

  def call(self, inputs: List) -> List:
@@ -2302,29 +2329,45 @@ class WeaveLayer(tf.keras.layers.Layer):
    activation = self.activation_fn

    AA = tf.matmul(atom_features, self.W_AA) + self.b_AA
    if self.batch_normalize:
      AA = self.AA_bn(AA)
    AA = activation(AA)
    PA = tf.matmul(pair_features, self.W_PA) + self.b_PA
    if self.batch_normalize:
      PA = self.PA_bn(PA)
    PA = activation(PA)
    PA = tf.math.segment_sum(PA, pair_split)

    A = tf.matmul(tf.concat([AA, PA], 1), self.W_A) + self.b_A
    if self.batch_normalize:
      A = self.A_bn(A)
    A = activation(A)

    if self.update_pair:
      # Note that AP_ij and AP_ji share the same self.AP_bn batch
      # normalization
      AP_ij = tf.matmul(
          tf.reshape(
              tf.gather(atom_features, atom_to_pair),
              [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
      if self.batch_normalize:
        AP_ij = self.AP_bn(AP_ij)
      AP_ij = activation(AP_ij)
      AP_ji = tf.matmul(
          tf.reshape(
              tf.gather(atom_features, tf.reverse(atom_to_pair, [1])),
              [-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
      if self.batch_normalize:
        AP_ji = self.AP_bn(AP_ji)
      AP_ji = activation(AP_ji)

      PP = tf.matmul(pair_features, self.W_PP) + self.b_PP
      if self.batch_normalize:
        PP = self.PP_bn(PP)
      PP = activation(PP)
      P = tf.matmul(tf.concat([AP_ij + AP_ji, PP], 1), self.W_P) + self.b_P
      if self.batch_normalize:
        P = self.P_bn(P)
      P = activation(P)
    else:
      P = pair_features
@@ -2335,41 +2378,91 @@ class WeaveLayer(tf.keras.layers.Layer):
class WeaveGather(tf.keras.layers.Layer):
  """Implements the weave-gathering section of weave convolutions.

  Implements the gathering layer from [1]_.
  Implements the gathering layer from [1]_. The weave gathering layer gathers
  per-atom features to create a molecule-level fingerprint in a weave
  convolutional network. This layer can also performs Gaussian histogram
  expansion as detailed in [1]_. Note that the gathering function here is
  simply addition as in [1]_>

  Examples
  --------
  This layer expects 2 inputs in a list of the form `[atom_features,
  pair_features]`. We'll walk through the structure
  of these inputs. Let's start with some basic definitions.

  >>> import deepchem as dc
  >>> import numpy as np

  Suppose you have a batch of molecules

  >>> smiles = ["CCC", "C"]

  Note that there are 4 atoms in total in this system. This layer expects its
  input molecules to be batched together.

  >>> total_n_atoms = 4

  The weave gathering layer gathers per-atom features to create a
  molecule-level fingerprint in a weave convolutional network. This layer can
  also perform Gaussian histogram expansion as detailed in the original paper.
  Let's suppose that we have `n_atom_feat` features per atom. 

  >>> n_atom_feat = 75

  Then conceptually, `atom_feat` is the array of shape `(total_n_atoms,
  n_atom_feat)` of atomic features. For simplicity, let's just go with a
  random such matrix.

  >>> atom_feat = np.random.rand(total_n_atoms, n_atom_feat)

  We then need to provide a mapping of indices to the atoms they belong to. In
  ours case this would be

  >>> atom_split = np.array([0, 0, 0, 1])

  Let's now define the actual layer

  >>> gather = WeaveGather(batch_size=2, n_input=n_atom_feat)
  >>> output_molecules = gather([atom_feat, atom_split])
  >>> len(output_molecules)
  2

  References
  ----------
  .. [1] Kearnes, Steven, et al. "Molecular graph convolutions: moving beyond
  fingerprints." Journal of computer-aided molecular design 30.8 (2016):
  595-608.

  Note
  ----
  This class requires `tensorflow_probability` to be installed.
  """

  def __init__(self,
               batch_size: int,
               n_input: int = 128,
               gaussian_expand: bool = False,
               gaussian_expand: bool = True,
               init: str = 'glorot_uniform',
               activation: str = 'tanh',
               epsilon: float = 1e-3,
               momentum: float = 0.99,
               compress_post_gaussian_expansion: bool = False,
               **kwargs):
    """
    Parameters
    ----------
    batch_size: int
      number of molecules in a batch
    n_input: int, optional
    n_input: int, optional (default 128)
      number of features for each input molecule
    gaussian_expand: boolean. optional
    gaussian_expand: boolean, optional (default True)
      Whether to expand each dimension of atomic features by gaussian histogram
    init: str, optional
    init: str, optional (default 'glorot_uniform')
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    activation: str, optional (default 'tanh')
      Activation function applied. Should be recognizable by
      `tf.keras.activations`.
    compress_post_gaussian_expansion: bool, optional (default False)
      If True, compress the results of the Gaussian expansion back to the
      original dimensions of the input by using a linear layer with specified
      activation function. Note that this compression was not in the original
      paper, but was present in the original DeepChem implementation so is
      left present for backwards compatibility.
    """
    try:
      import tensorflow_probability as tfp
@@ -2383,8 +2476,7 @@ class WeaveGather(tf.keras.layers.Layer):
    self.init = init  # Set weight initialization
    self.activation = activation  # Get activations
    self.activation_fn = activations.get(activation)
    self.epsilon = epsilon
    self.momentum = momentum
    self.compress_post_gaussian_expansion = compress_post_gaussian_expansion

  def get_config(self):
    config = super(WeaveGather, self).get_config()
@@ -2393,8 +2485,8 @@ class WeaveGather(tf.keras.layers.Layer):
    config['gaussian_expand'] = self.gaussian_expand
    config['init'] = self.init
    config['activation'] = self.activation
    config['epsilon'] = self.epsilon
    config['momentum'] = self.momentum
    config[
        'compress_post_gaussian_expansion'] = self.compress_post_gaussian_expansion
    return config

  def build(self, input_shape):
@@ -2404,14 +2496,19 @@ class WeaveGather(tf.keras.layers.Layer):
      self.b = backend.zeros(shape=[self.n_input])
    self.built = True

  def call(self, inputs):
  def call(self, inputs: List) -> List:
    """Creates weave tensors.

    Parameters
    ----------
    inputs: List
      Should contain 4 tensors [atom_features, pair_features, pair_split,
      atom_to_pair]
      Should contain 2 tensors [atom_features, atom_split]

    Returns
    -------
    output_molecules: List 
      Each entry in this list is of shape `(self.n_inputs,)`
    
    """
    outputs = inputs[0]
    atom_split = inputs[1]
@@ -2421,13 +2518,42 @@ class WeaveGather(tf.keras.layers.Layer):

    output_molecules = tf.math.segment_sum(outputs, atom_split)

    if self.gaussian_expand:
    if self.compress_post_gaussian_expansion:
      output_molecules = tf.matmul(output_molecules, self.W) + self.b
      output_molecules = self.activation_fn(output_molecules)

    return output_molecules

  def gaussian_histogram(self, x):
    """Expands input into a set of gaussian histogram bins.

    Parameters
    ----------
    x: tf.Tensor
      Of shape `(N, n_feat)`

    Examples
    --------
    This method uses 11 bins spanning portions of a Gaussian with zero mean
    and unit standard deviation.

    >>> gaussian_memberships = [(-1.645, 0.283), (-1.080, 0.170),
    ...                         (-0.739, 0.134), (-0.468, 0.118),
    ...                         (-0.228, 0.114), (0., 0.114),
    ...                         (0.228, 0.114), (0.468, 0.118),
    ...                         (0.739, 0.134), (1.080, 0.170),
    ...                         (1.645, 0.283)]

    We construct a Gaussian at `gaussian_memberships[i][0]` with standard
    deviation `gaussian_memberships[i][1]`. Each feature in `x` is assigned
    the probability of falling in each Gaussian, and probabilities are
    normalized across the 11 different Gaussians.
    
    Returns
    -------
    outputs: tf.Tensor
      Of shape `(N, 11*n_feat)`
    """
    import tensorflow_probability as tfp
    gaussian_memberships = [(-1.645, 0.283), (-1.080, 0.170), (-0.739, 0.134),
                            (-0.468, 0.118), (-0.228, 0.114), (0., 0.114),
+45 −0

File changed.

Preview size limit exceeded, changes collapsed.

+30 −7

File changed.

Preview size limit exceeded, changes collapsed.

Loading