Commit 5639ccb5 authored by leswing's avatar leswing
Browse files

TF 1.0.1

parent 367bea14
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -44,7 +44,7 @@ You can install deepchem in a new conda environment using the conda commands in

```bash
bash scripts/install_deepchem_conda.sh deepchem
pip install tensorflow-gpu==0.12.1                      # If you want GPU support
pip install tensorflow-gpu==1.0.1                      # If you want GPU support
git clone https://github.com/deepchem/deepchem.git      # Clone deepchem source code from GitHub
cd deepchem
python setup.py install                                 # Manual install
@@ -95,7 +95,7 @@ via this installation procedure.
    contact your local sysadmin to work out a custom installation. If your
    version of Linux is recent, then the following command will work:
    ```
    pip install tensorflow-gpu==0.12.1
    pip install tensorflow-gpu==1.0.1
    ```

9. `deepchem`: Clone the `deepchem` github repo:
+4 −1
Original line number Diff line number Diff line
@@ -5,6 +5,8 @@ from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals

from pkg_resources import get_distribution

import deepchem.data
import deepchem.feat
import deepchem.hyper
@@ -16,3 +18,4 @@ import deepchem.trans
import deepchem.utils
import deepchem.dock
import deepchem.molnet
+3 −3
Original line number Diff line number Diff line
@@ -608,8 +608,8 @@ class TensorflowClassifier(TensorflowGraphModel):
      A tensor with shape batch_size containing the weighted cost for each
      example.
    """
    return tf.mul(
        tf.nn.softmax_cross_entropy_with_logits(logits, labels), weights)
    return tf.multiply(
        tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels), weights)

  def add_label_placeholders(self, graph, name_scopes):
    """Add Placeholders for labels for each task.
@@ -762,7 +762,7 @@ class TensorflowRegressor(TensorflowGraphModel):
      A tensor with shape batch_size containing the weighted cost for each
      example.
    """
    return tf.mul(0.5 * tf.square(output - labels), weights)
    return tf.multiply(0.5 * tf.square(output - labels), weights)

  def add_label_placeholders(self, graph, name_scopes):
    """Add Placeholders for labels for each task.
+3 −3
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@ def weight_decay(penalty_type, penalty):
    else:
      raise NotImplementedError('Unsupported penalty_type %s' % penalty_type)
    cost *= penalty
    tf.scalar_summary('Weight Decay Cost', cost)
    tf.summary.scalar('Weight Decay Cost', cost)
  return cost


@@ -124,8 +124,8 @@ class TensorflowLogisticRegression(TensorflowGraphModel):
      return loss

  def cost(self, logits, labels, weights):
    return tf.mul(
        tf.nn.sigmoid_cross_entropy_with_logits(logits, labels), weights)
    return tf.multiply(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels), weights)

  def add_output_ops(self, graph, output):
    # adding output nodes of sigmoid function
+2 −2
Original line number Diff line number Diff line
@@ -173,11 +173,11 @@ class ProgressiveJointRegressor(TensorflowMultiTaskRegressor):
      prev_layers.append(all_layers[(i - 1, prev_task)])
    # prev_layers is a list with elements of size
    # (batch_size, layer_sizes[i-1])
    prev_layer = tf.concat(1, prev_layers)
    prev_layer = tf.concat(axis=1, values=prev_layers)
    alpha = tf.Variable(tf.truncated_normal([
        1,
    ], stddev=alpha_init_stddev))
    prev_layer = tf.mul(alpha, prev_layer)
    prev_layer = tf.multiply(alpha, prev_layer)
    prev_layer_size = task * layer_sizes[i - 1]
    print("Creating V_layer_%d_task%d of shape %s" %
          (i, task, str([prev_layer_size, layer_sizes[i - 1]])))
Loading