Commit 5abb9a5a authored by leswing's avatar leswing
Browse files

Tensorflow 1.0.1

parent 5639ccb5
Loading
Loading
Loading
Loading
+1 −4
Original line number Diff line number Diff line
@@ -5,8 +5,6 @@ from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals

from pkg_resources import get_distribution

import deepchem.data
import deepchem.feat
import deepchem.hyper
@@ -18,4 +16,3 @@ import deepchem.trans
import deepchem.utils
import deepchem.dock
import deepchem.molnet
+2 −1
Original line number Diff line number Diff line
@@ -609,7 +609,8 @@ class TensorflowClassifier(TensorflowGraphModel):
      example.
    """
    return tf.multiply(
        tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels), weights)
        tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels),
        weights)

  def add_label_placeholders(self, graph, name_scopes):
    """Add Placeholders for labels for each task.
+2 −1
Original line number Diff line number Diff line
@@ -125,7 +125,8 @@ class TensorflowLogisticRegression(TensorflowGraphModel):

  def cost(self, logits, labels, weights):
    return tf.multiply(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels), weights)
        tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels),
        weights)

  def add_output_ops(self, graph, output):
    # adding output nodes of sigmoid function
+116 −94
Original line number Diff line number Diff line
@@ -10,23 +10,29 @@ from deepchem.models.tensorflow_models import TensorflowGraph
from deepchem.models.tensorflow_models.fcnet import TensorflowMultiTaskClassifier
from deepchem.models.tensorflow_models.fcnet import TensorflowMultiTaskRegressor


class RobustMultitaskClassifier(TensorflowMultiTaskClassifier):
  """Implements a neural network for robust multitasking.
  
  Key idea is to have bypass layers that feed directly from features to task
  output. Hopefully will allow tasks to route around bad multitasking.
  """
  def __init__(self, n_tasks, n_features, logdir=None,

  def __init__(self,
               n_tasks,
               n_features,
               logdir=None,
               bypass_layer_sizes=[100],
               bypass_weight_init_stddevs=[.02],
               bypass_bias_init_consts=[1.],
               bypass_dropouts=[.5], **kwargs):
               bypass_dropouts=[.5],
               **kwargs):
    self.bypass_layer_sizes = bypass_layer_sizes
    self.bypass_weight_init_stddevs = bypass_weight_init_stddevs
    self.bypass_bias_init_consts = bypass_bias_init_consts
    self.bypass_dropouts = bypass_dropouts
    super(RobustMultitaskClassifier, self).__init__(
        n_tasks, n_features, logdir, **kwargs)
    super(RobustMultitaskClassifier, self).__init__(n_tasks, n_features, logdir,
                                                    **kwargs)

  def build(self, graph, name_scopes, training):
    """Constructs the graph architecture as specified in its config.
@@ -36,14 +42,12 @@ class RobustMultitaskClassifier(TensorflowMultiTaskClassifier):
        batch_size x num_features.
    """
    num_features = self.n_features
    placeholder_scope = TensorflowGraph.get_placeholder_scope(
        graph, name_scopes)
    placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
                                                              name_scopes)
    with graph.as_default():
      with placeholder_scope:
        self.mol_features = tf.placeholder(
            tf.float32,
            shape=[None, num_features],
            name='mol_features')
            tf.float32, shape=[None, num_features], name='mol_features')

      layer_sizes = self.layer_sizes
      weight_init_stddevs = self.weight_init_stddevs
@@ -71,23 +75,25 @@ class RobustMultitaskClassifier(TensorflowMultiTaskClassifier):
          len(bypass_bias_init_consts),
          len(bypass_dropouts),
      }
      assert len(bypass_lengths_set) == 1, ("All bypass_layer params"+
                                            " must have same length.")
      assert len(bypass_lengths_set) == 1, (
          "All bypass_layer params" + " must have same length.")
      num_bypass_layers = bypass_lengths_set.pop()

      prev_layer = self.mol_features
      prev_layer_size = num_features
      for i in range(num_layers):
        # layer has shape [None, layer_sizes[i]]
        print("Adding weights of shape %s" % str([prev_layer_size, layer_sizes[i]]))
        layer = tf.nn.relu(model_ops.fully_connected_layer(
        print("Adding weights of shape %s" % str(
            [prev_layer_size, layer_sizes[i]]))
        layer = tf.nn.relu(
            model_ops.fully_connected_layer(
                tensor=prev_layer,
                size=layer_sizes[i],
                weight_init=tf.truncated_normal(
                    shape=[prev_layer_size, layer_sizes[i]],
                    stddev=weight_init_stddevs[i]),
            bias_init=tf.constant(value=bias_init_consts[i],
                                  shape=[layer_sizes[i]])))
                bias_init=tf.constant(
                    value=bias_init_consts[i], shape=[layer_sizes[i]])))
        layer = model_ops.dropout(layer, dropouts[i], training)
        prev_layer = layer
        prev_layer_size = layer_sizes[i]
@@ -103,57 +109,69 @@ class RobustMultitaskClassifier(TensorflowMultiTaskClassifier):
        prev_bypass_layer_size = num_features
        for i in range(num_bypass_layers):
          # bypass_layer has shape [None, bypass_layer_sizes[i]]
          print("Adding bypass weights of shape %s"
                % str([prev_bypass_layer_size, bypass_layer_sizes[i]]))
          bypass_layer = tf.nn.relu(model_ops.fully_connected_layer(
          print("Adding bypass weights of shape %s" %
                str([prev_bypass_layer_size, bypass_layer_sizes[i]]))
          bypass_layer = tf.nn.relu(
              model_ops.fully_connected_layer(
                  tensor=prev_bypass_layer,
                  size=bypass_layer_sizes[i],
                  weight_init=tf.truncated_normal(
                      shape=[prev_bypass_layer_size, bypass_layer_sizes[i]],
                      stddev=bypass_weight_init_stddevs[i]),
            bias_init=tf.constant(value=bypass_bias_init_consts[i],
                  bias_init=tf.constant(
                      value=bypass_bias_init_consts[i],
                      shape=[bypass_layer_sizes[i]])))

          bypass_layer = model_ops.dropout(bypass_layer, bypass_dropouts[i], training)
          bypass_layer = model_ops.dropout(bypass_layer, bypass_dropouts[i],
                                           training)
          prev_bypass_layer = bypass_layer
          prev_bypass_layer_size = bypass_layer_sizes[i]
        top_bypass_layer = prev_bypass_layer

        if num_bypass_layers > 0:
          # task_layer has shape [None, layer_sizes[-1] + bypass_layer_sizes[-1]]
          task_layer = tf.concat(axis=1, values=[top_multitask_layer, top_bypass_layer])
          task_layer = tf.concat(
              axis=1, values=[top_multitask_layer, top_bypass_layer])
          task_layer_size = layer_sizes[-1] + bypass_layer_sizes[-1]
        else:
          task_layer = top_multitask_layer
          task_layer_size = layer_sizes[-1]
        print("Adding output weights of shape %s"
              % str([task_layer_size, 1]))
        output.append(tf.squeeze(
            model_ops.logits(task_layer, num_classes=2,
        print("Adding output weights of shape %s" % str([task_layer_size, 1]))
        output.append(
            tf.squeeze(
                model_ops.logits(
                    task_layer,
                    num_classes=2,
                    weight_init=tf.truncated_normal(
                        shape=[task_layer_size, 2],
                        stddev=weight_init_stddevs[-1]),
                bias_init=tf.constant(value=bias_init_consts[-1],
                                      shape=[2]))))
                    bias_init=tf.constant(
                        value=bias_init_consts[-1], shape=[2]))))
      return output


class RobustMultitaskRegressor(TensorflowMultiTaskRegressor):
  """Implements a neural network for robust multitasking.
  
  Key idea is to have bypass layers that feed directly from features to task
  output. Hopefully will allow tasks to route around bad multitasking.
  """
  def __init__(self, n_tasks, n_features, logdir=None,

  def __init__(self,
               n_tasks,
               n_features,
               logdir=None,
               bypass_layer_sizes=[100],
               bypass_weight_init_stddevs=[.02],
               bypass_bias_init_consts=[1.],
               bypass_dropouts=[.5], **kwargs):
               bypass_dropouts=[.5],
               **kwargs):
    self.bypass_layer_sizes = bypass_layer_sizes
    self.bypass_weight_init_stddevs = bypass_weight_init_stddevs
    self.bypass_bias_init_consts = bypass_bias_init_consts
    self.bypass_dropouts = bypass_dropouts
    super(RobustMultitaskRegressor, self).__init__(
        n_tasks, n_features, logdir, **kwargs)
    super(RobustMultitaskRegressor, self).__init__(n_tasks, n_features, logdir,
                                                   **kwargs)

  def build(self, graph, name_scopes, training):
    """Constructs the graph architecture as specified in its config.
@@ -163,14 +181,12 @@ class RobustMultitaskRegressor(TensorflowMultiTaskRegressor):
        batch_size x num_features.
    """
    num_features = self.n_features
    placeholder_scope = TensorflowGraph.get_placeholder_scope(
        graph, name_scopes)
    placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
                                                              name_scopes)
    with graph.as_default():
      with placeholder_scope:
        self.mol_features = tf.placeholder(
            tf.float32,
            shape=[None, num_features],
            name='mol_features')
            tf.float32, shape=[None, num_features], name='mol_features')

      layer_sizes = self.layer_sizes
      weight_init_stddevs = self.weight_init_stddevs
@@ -198,23 +214,25 @@ class RobustMultitaskRegressor(TensorflowMultiTaskRegressor):
          len(bypass_bias_init_consts),
          len(bypass_dropouts),
      }
      assert len(bypass_lengths_set) == 1, ("All bypass_layer params"+
                                            " must have same length.")
      assert len(bypass_lengths_set) == 1, (
          "All bypass_layer params" + " must have same length.")
      num_bypass_layers = bypass_lengths_set.pop()

      prev_layer = self.mol_features
      prev_layer_size = num_features
      for i in range(num_layers):
        # layer has shape [None, layer_sizes[i]]
        print("Adding weights of shape %s" % str([prev_layer_size, layer_sizes[i]]))
        layer = tf.nn.relu(model_ops.fully_connected_layer(
        print("Adding weights of shape %s" % str(
            [prev_layer_size, layer_sizes[i]]))
        layer = tf.nn.relu(
            model_ops.fully_connected_layer(
                tensor=prev_layer,
                size=layer_sizes[i],
                weight_init=tf.truncated_normal(
                    shape=[prev_layer_size, layer_sizes[i]],
                    stddev=weight_init_stddevs[i]),
            bias_init=tf.constant(value=bias_init_consts[i],
                                  shape=[layer_sizes[i]])))
                bias_init=tf.constant(
                    value=bias_init_consts[i], shape=[layer_sizes[i]])))
        layer = model_ops.dropout(layer, dropouts[i], training)
        prev_layer = layer
        prev_layer_size = layer_sizes[i]
@@ -230,38 +248,42 @@ class RobustMultitaskRegressor(TensorflowMultiTaskRegressor):
        prev_bypass_layer_size = num_features
        for i in range(num_bypass_layers):
          # bypass_layer has shape [None, bypass_layer_sizes[i]]
          print("Adding bypass weights of shape %s"
                % str([prev_bypass_layer_size, bypass_layer_sizes[i]]))
          bypass_layer = tf.nn.relu(model_ops.fully_connected_layer(
          print("Adding bypass weights of shape %s" %
                str([prev_bypass_layer_size, bypass_layer_sizes[i]]))
          bypass_layer = tf.nn.relu(
              model_ops.fully_connected_layer(
                  tensor=prev_bypass_layer,
                  size=bypass_layer_sizes[i],
                  weight_init=tf.truncated_normal(
                      shape=[prev_bypass_layer_size, bypass_layer_sizes[i]],
                      stddev=bypass_weight_init_stddevs[i]),
            bias_init=tf.constant(value=bypass_bias_init_consts[i],
                  bias_init=tf.constant(
                      value=bypass_bias_init_consts[i],
                      shape=[bypass_layer_sizes[i]])))

          bypass_layer = model_ops.dropout(bypass_layer, bypass_dropouts[i], training)
          bypass_layer = model_ops.dropout(bypass_layer, bypass_dropouts[i],
                                           training)
          prev_bypass_layer = bypass_layer
          prev_bypass_layer_size = bypass_layer_sizes[i]
        top_bypass_layer = prev_bypass_layer

        if num_bypass_layers > 0:
          # task_layer has shape [None, layer_sizes[-1] + bypass_layer_sizes[-1]]
          task_layer = tf.concat(axis=1, values=[top_multitask_layer, top_bypass_layer])
          task_layer = tf.concat(
              axis=1, values=[top_multitask_layer, top_bypass_layer])
          task_layer_size = layer_sizes[-1] + bypass_layer_sizes[-1]
        else:
          task_layer = top_multitask_layer
          task_layer_size = layer_sizes[-1]
        print("Adding output weights of shape %s"
              % str([task_layer_size, 1]))
        output.append(tf.squeeze(
        print("Adding output weights of shape %s" % str([task_layer_size, 1]))
        output.append(
            tf.squeeze(
                model_ops.fully_connected_layer(
                tensor=task_layer, size=1,
                    tensor=task_layer,
                    size=1,
                    weight_init=tf.truncated_normal(
                        shape=[task_layer_size, 1],
                        stddev=weight_init_stddevs[-1]),
                bias_init=tf.constant(value=bias_init_consts[-1],
                                      shape=[1]))))
                    bias_init=tf.constant(
                        value=bias_init_consts[-1], shape=[1]))))
      return output
+22 −22
Original line number Diff line number Diff line
@@ -15,7 +15,6 @@
# limitations under the License.
"""Utils for graph convolution models."""


import numpy as np
import tensorflow as tf
from tensorflow.python.ops import math_ops
@@ -81,8 +80,12 @@ def Mean(tensor, reduction_indices=None, mask=None):
  Returns:
    A tensor with the same type as the input tensor.
  """
  return Moment(1, tensor, standardize=False,
                reduction_indices=reduction_indices, mask=mask)[0]
  return Moment(
      1,
      tensor,
      standardize=False,
      reduction_indices=reduction_indices,
      mask=mask)[0]


def Variance(tensor, reduction_indices=None, mask=None):
@@ -96,8 +99,12 @@ def Variance(tensor, reduction_indices=None, mask=None):
  Returns:
    A tensor with the same type as the input tensor.
  """
  return Moment(2, tensor, standardize=False,
                reduction_indices=reduction_indices, mask=mask)[1]
  return Moment(
      2,
      tensor,
      standardize=False,
      reduction_indices=reduction_indices,
      mask=mask)[1]


def Skewness(tensor, reduction_indices=None):
@@ -110,8 +117,8 @@ def Skewness(tensor, reduction_indices=None):
  Returns:
    A tensor with the same type as the input tensor.
  """
  return Moment(3, tensor, standardize=True,
                reduction_indices=reduction_indices)[1]
  return Moment(
      3, tensor, standardize=True, reduction_indices=reduction_indices)[1]


def Kurtosis(tensor, reduction_indices=None):
@@ -124,8 +131,8 @@ def Kurtosis(tensor, reduction_indices=None):
  Returns:
    A tensor with the same type as the input tensor.
  """
  return Moment(4, tensor, standardize=True,
                reduction_indices=reduction_indices)[1] - 3
  return Moment(
      4, tensor, standardize=True, reduction_indices=reduction_indices)[1] - 3


def Moment(k, tensor, standardize=False, reduction_indices=None, mask=None):
@@ -149,9 +156,8 @@ def Moment(k, tensor, standardize=False, reduction_indices=None, mask=None):
  if mask is not None:
    tensor = Mask(tensor, mask)
    ones = tf.constant(1, dtype=tf.float32, shape=tensor.get_shape())
    divisor = tf.reduce_sum(Mask(ones, mask),
                            axis=reduction_indices,
                            keep_dims=True)
    divisor = tf.reduce_sum(
        Mask(ones, mask), axis=reduction_indices, keep_dims=True)
  elif reduction_indices is None:
    divisor = tf.constant(np.prod(tensor.get_shape().as_list()), tensor.dtype)
  else:
@@ -164,26 +170,20 @@ def Moment(k, tensor, standardize=False, reduction_indices=None, mask=None):
  # compute the requested central moment
  # note that mean is a raw moment, not a central moment
  mean = tf.div(
      tf.reduce_sum(tensor,
                    axis=reduction_indices,
                    keep_dims=True),
      divisor)
      tf.reduce_sum(tensor, axis=reduction_indices, keep_dims=True), divisor)
  delta = tensor - mean
  if mask is not None:
    delta = Mask(delta, mask)
  moment = tf.div(
      tf.reduce_sum(math_ops.pow(delta, k),
                    axis=reduction_indices,
                    keep_dims=True),
      tf.reduce_sum(
          math_ops.pow(delta, k), axis=reduction_indices, keep_dims=True),
      divisor)
  moment = tf.squeeze(moment, reduction_indices)
  if standardize:
    moment = tf.multiply(
        moment,
        math_ops.pow(
            tf.rsqrt(Moment(2,
                            tensor,
                            reduction_indices=reduction_indices)[1]),
            tf.rsqrt(Moment(2, tensor, reduction_indices=reduction_indices)[1]),
            k))

  return tf.squeeze(mean, reduction_indices), moment
Loading