Commit c3833ed1 authored by Bharath Ramsundar's avatar Bharath Ramsundar
Browse files

Basic tensorflow test passes now.

parent 466e5097
Loading
Loading
Loading
Loading
+0 −4
Original line number Diff line number Diff line
@@ -335,10 +335,6 @@ class Dataset(object):
        shard_batch_size = n_samples
      else:
        shard_batch_size = batch_size 
      ############################################################### DEBUG
      print("n_samples, shard_batch_size")
      print(n_samples, shard_batch_size)
      ############################################################### DEBUG
      interval_points = np.linspace(
          0, n_samples, np.ceil(float(n_samples)/shard_batch_size)+1, dtype=int)
      for j in range(len(interval_points)-1):
+14 −14
Original line number Diff line number Diff line
@@ -22,7 +22,7 @@ class MultiTaskDNN(Graph):
  TODO(rbharath): Port this code over to use Keras's new functional-API
  instead of using legacy Graph object.
  """
  def __init__(self, n_tasks, n_inputs, task_type, nb_layers=1, nb_hidden=1000,
  def __init__(self, n_tasks, n_features, task_type, n_layers=1, n_hidden=1000,
               init="glorot_uniform", batchnorm=False, dropout=0.5,
               activation="relu", learning_rate=.001, decay=1e-6,
               momentum=0.9, nesterov=False):
@@ -30,10 +30,10 @@ class MultiTaskDNN(Graph):
    # Store hyperparameters
    assert task_type in ["classification", "regression"]
    self.task_type = task_type
    self.n_inputs = n_inputs
    self.n_features = n_features
    self.n_tasks = n_tasks
    self.nb_layers = nb_layers
    self.nb_hidden = nb_hidden
    self.n_layers = n_layers
    self.n_hidden = n_hidden
    self.init = init
    self.batchnorm = batchnorm
    self.dropout = dropout
@@ -43,15 +43,15 @@ class MultiTaskDNN(Graph):
    self.momentum = momentum
    self.nesterov = nesterov

    self.add_input(name="input", input_shape=(self.n_inputs,))
    self.add_input(name="input", input_shape=(self.n_features,))
    prev_layer = "input"
    for ind, layer in enumerate(range(self.nb_layers)):
    for ind, layer in enumerate(range(self.n_layers)):
      dense_layer_name = "dense%d" % ind
      activation_layer_name = "activation%d" % ind
      batchnorm_layer_name = "batchnorm%d" % ind
      dropout_layer_name = "dropout%d" % ind
      self.add_node(
          Dense(self.nb_hidden, init=self.init),
          Dense(self.n_hidden, init=self.init),
          name=dense_layer_name, input=prev_layer)
      prev_layer = dense_layer_name 
      if self.batchnorm:
@@ -91,11 +91,11 @@ class MultiTaskDNN(Graph):
    self.compile(optimizer=sgd, loss=loss_dict)

  def get_config(self):
    return {"n_inputs": self.n_inputs,
    return {"n_features": self.n_features,
            "n_tasks": self.n_tasks,
            "task_type": self.task_type,
            "nb_layers": self.nb_layers,
            "nb_hidden": self.nb_hidden,
            "n_layers": self.n_layers,
            "n_hidden": self.n_hidden,
            "init": self.init,
            "batchnorm": self.batchnorm,
            "dropout": self.dropout,
@@ -148,8 +148,8 @@ class MultiTaskDNN(Graph):
    """
    data = self.get_data_dict(X)
    y_pred_dict = super(MultiTaskDNN, self).predict_on_batch(data)
    nb_samples = np.shape(X)[0]
    y_pred = np.zeros((nb_samples, self.n_tasks))
    n_samples = np.shape(X)[0]
    y_pred = np.zeros((n_samples, self.n_tasks))
    for task in range(self.n_tasks):
      taskname = "task%d" % task 
      if self.task_type == "classification":
@@ -181,7 +181,7 @@ class SingleTaskDNN(MultiTaskDNN):
  """
  Abstract base class for different ML models.
  """
  def __init__(self, n_inputs, task_type, **kwargs):
  def __init__(self, n_features, task_type, **kwargs):
    n_tasks = 1
    super(SingleTaskDNN, self).__init__(
        n_tasks, n_inputs, task_type, **kwargs)
        n_tasks, n_features, task_type, **kwargs)
+51 −38
Original line number Diff line number Diff line
@@ -38,7 +38,7 @@ class TensorflowGraph(object):
    dropouts

  Classifier:
    num_classes
    n_classes

  Has the following attributes:

@@ -64,7 +64,7 @@ class TensorflowGraph(object):
    graph: TensorFlow graph object.
    logdir: Path to the file output directory to store checkpoints etc.
    master: TensorFlow session master specification string.
    num_tasks: Integer number of tasks this model trains/evals on.
    n_tasks: Integer number of tasks this model trains/evals on.
    placeholder_scope: name scope where tf.placeholders are defined.
    valid: Placeholder for a boolean tensor with shape batch_size to use as a
      mask when calculating gradient costs.
@@ -74,10 +74,11 @@ class TensorflowGraph(object):
    logdir: Directory for output files.
  """

  def __init__(self, n_tasks, n_inputs, layer_sizes=[1000],
               weight_init_stddevs=[.02], bias_init_consts=[1], penalty=0.0,
               learning_rate=.001, momentum=".9", optimizer="adam",
               batch_size=50, num_classes=2, logdir, train=True, verbosity=None):
  def __init__(self, n_tasks, n_features, logdir, layer_sizes=[1000],
               weight_init_stddevs=[.02], bias_init_consts=[1.], penalty=0.0,
               dropouts=[0.5], learning_rate=.001, momentum=".9",
               optimizer="adam", batch_size=50, n_classes=2,
               train=True, verbosity=None):
    """Constructs the computational graph.

    Args:
@@ -87,13 +88,27 @@ class TensorflowGraph(object):
    This function constructs the computational graph for the model. It relies
    subclassed methods (build/cost) to construct specific graphs.
    """
    self.graph = tf.Graph() 
    # Save hyperparameters
    self.n_tasks = n_tasks
    self.n_features = n_features
    self.logdir = logdir
    self.tasks = tasks
    self.task_types = task_types
    self.num_tasks = len(task_types)
    self.layer_sizes = layer_sizes
    self.weight_init_stddevs = weight_init_stddevs
    self.bias_init_consts = bias_init_consts
    self.penalty = penalty
    self.dropouts = dropouts
    self.learning_rate = learning_rate
    self.momentum = momentum
    self.optimizer = optimizer
    self.batch_size = batch_size
    self.n_classes = n_classes
    self.train = train
    self.verbosity = verbosity


    self.graph = tf.Graph() 
    self.logdir = logdir

    # Lazily created by _get_shared_session().
    self._shared_session = None

@@ -154,8 +169,8 @@ class TensorflowGraph(object):
      gradient_costs = []  # costs used for gradient calculation

      with self._shared_name_scope('costs'):
        for task in xrange(self.num_tasks):
          task_str = str(task).zfill(len(str(self.num_tasks)))
        for task in xrange(self.n_tasks):
          task_str = str(task).zfill(len(str(self.n_tasks)))
          with self._shared_name_scope('cost_{}'.format(task_str)):
            with tf.name_scope('weighted'):
              weighted_cost = self.cost(self.output[task], self.labels[task],
@@ -210,9 +225,9 @@ class TensorflowGraph(object):
    ############################################################## TIMING
    time1 = time.time()
    ############################################################## TIMING
    num_datapoints = len(dataset)
    n_datapoints = len(dataset)
    batch_size = self.batch_size
    step_per_epoch = np.ceil(float(num_datapoints)/batch_size)
    step_per_epoch = np.ceil(float(n_datapoints)/batch_size)
    log("Training for %d epochs" % nb_epoch, self.verbosity)
    with self.graph.as_default():
      self.require_attributes(['loss', 'updates'])
@@ -223,7 +238,7 @@ class TensorflowGraph(object):
        # Save an initial checkpoint.
        saver.save(sess, self._save_path, global_step=0)
        for epoch in range(nb_epoch):
          avg_loss, num_batches = 0., 0
          avg_loss, n_batches = 0., 0
          if shuffle:
            log("About to shuffle dataset before epoch start.", self.verbosity)
            dataset.shuffle()
@@ -243,9 +258,9 @@ class TensorflowGraph(object):
            avg_loss += loss
            y_pred = np.squeeze(np.array(output))
            y_b = y_b.flatten()
            num_batches += 1
            n_batches += 1
          saver.save(sess, self._save_path, global_step=epoch)
          avg_loss = float(avg_loss)/num_batches
          avg_loss = float(avg_loss)/n_batches
          log('Ending epoch %d: Average loss %g' % (epoch, avg_loss), self.verbosity)
        # Always save a final checkpoint when complete.
        saver.save(sess, self._save_path, global_step=epoch+1)
@@ -267,7 +282,7 @@ class TensorflowGraph(object):
      dataset: deepchem.datasets.dataset object.

    Returns:
      Tuple of three numpy arrays with shape num_examples x num_tasks (x ...):
      Tuple of three numpy arrays with shape n_examples x n_tasks (x ...):
        output: Model outputs.
        labels: True labels.
        weights: Example weights.
@@ -286,15 +301,15 @@ class TensorflowGraph(object):
      self.require_attributes(['output'])

      # run eval data through the model
      num_tasks = self.num_tasks
      n_tasks = self.n_tasks
      output = []
      start = time.time()
      with self._get_shared_session().as_default():
        feed_dict = self.construct_feed_dict(X)
        data = self._get_shared_session().run(
            self.output, feed_dict=feed_dict)
        batch_output = np.asarray(data[:num_tasks], dtype=float)
        # reshape to batch_size x num_tasks x ...
        batch_output = np.asarray(data[:n_tasks], dtype=float)
        # reshape to batch_size x n_tasks x ...
        if batch_output.ndim == 3:
          batch_output = batch_output.transpose((1, 0, 2))
        elif batch_output.ndim == 2:
@@ -343,7 +358,7 @@ class TensorflowGraph(object):

    This method creates the following Placeholders for each task:
      labels_%d: Float label tensor. For classification tasks, this tensor will
        have shape batch_size x num_classes. For regression tasks, this tensor
        have shape batch_size x n_classes. For regression tasks, this tensor
        will have shape batch_size.

    Raises:
@@ -361,7 +376,7 @@ class TensorflowGraph(object):
    feeding and fetching the same tensor.
    """
    weights = []
    for task in xrange(self.num_tasks):
    for task in xrange(self.n_tasks):
      with tf.name_scope(self.placeholder_scope):
        weights.append(tf.identity(
            tf.placeholder(tf.float32, shape=[None],
@@ -399,9 +414,6 @@ class TensorflowGraph(object):
      # allow_soft_placement=True allows ops without a GPU implementation
      # to run on the CPU instead.
      config = tf.ConfigProto(allow_soft_placement=True)
      ################################################################## DEBUG
      #config.gpu_options.allow_growth = True
      ################################################################## DEBUG
      self._shared_session = tf.Session(config=config)
    return self._shared_session

@@ -478,8 +490,8 @@ class TensorflowClassifier(TensorflowGraph):
    """Calculate single-task training cost for a batch of examples.

    Args:
      logits: Tensor with shape batch_size x num_classes containing logits.
      labels: Tensor with shape batch_size x num_classes containing true labels
      logits: Tensor with shape batch_size x n_classes containing logits.
      labels: Tensor with shape batch_size x n_classes containing true labels
        in a one-hot encoding.
      weights: Tensor with shape batch_size containing example weights.

@@ -494,19 +506,19 @@ class TensorflowClassifier(TensorflowGraph):
    """Add Placeholders for labels for each task.

    This method creates the following Placeholders for each task:
      labels_%d: Label tensor with shape batch_size x num_classes.
      labels_%d: Label tensor with shape batch_size x n_classes.

    Placeholders are wrapped in identity ops to avoid the error caused by
    feeding and fetching the same tensor.
    """
    with self.graph.as_default():
      batch_size = self.batch_size 
      num_classes = self.num_classes
      n_classes = self.n_classes
      labels = []
      for task in xrange(self.num_tasks):
      for task in xrange(self.n_tasks):
        with tf.name_scope(self.placeholder_scope):
          labels.append(tf.identity(
              tf.placeholder(tf.float32, shape=[None, num_classes],
              tf.placeholder(tf.float32, shape=[None, n_classes],
                             name='labels_%d' % task)))
      self.labels = labels

@@ -557,7 +569,7 @@ class TensorflowRegressor(TensorflowGraph):
    with self.graph.as_default():
      batch_size = self.batch_size
      labels = []
      for task in xrange(self.num_tasks):
      for task in xrange(self.n_tasks):
        with tf.name_scope(self.placeholder_scope):
          labels.append(tf.identity(
              tf.placeholder(tf.float32, shape=[None],
@@ -578,10 +590,8 @@ class TensorflowModel(Model):
    self.verbosity = verbosity
    if tf_class is None:
      tf_class = TensorflowGraph
    self.train_model = tf_class(logdir, tasks, task_types,
                                train=True, verbosity=verbosity)
    self.eval_model = tf_class(logdir, tasks, task_types,
                                train=False, verbosity=verbosity)
    self.train_model = tf_class(logdir, train=True)
    self.eval_model = tf_class(logdir, train=False)
    self.fit_transformers = None

  def fit(self, dataset, shuffle=False):
@@ -619,3 +629,6 @@ class TensorflowModel(Model):
    Loads model from disk. Thin wrapper around restore() for consistency.
    """
    self.eval_model.restore()

  def get_num_tasks(self):
    return self.train_model.n_tasks
+0 −489

File deleted.

Preview size limit exceeded, changes collapsed.

+40 −96
Original line number Diff line number Diff line
"""TensorFlow implementation of fully connected networks. 

# Hyperparams used in Arxiv paper "Massively Multitask Networks for Drug Discovery"
# TODO(rbharath): Should these be moved elsewhere?
hyperparam_dict = {
    "single": Hyperparams(num_layers=1,
                          num_hidden=1200,
                          node_depth=1,
                          nonlinearity=ACTIVATION_RECTIFIED_LINEAR,
                          weight_init=GaussianWeightInit(0.01),
                          bias_init=ConstantBiasInit(0.5),
                          dropout=1.),
    "deep": Hyperparams(num_layers=4,
                        num_hidden=1000,
                        node_depth=1,
                        nonlinearity=ACTIVATION_RECTIFIED_LINEAR,
                        weight_init=GaussianWeightInit(0.01),
                        bias_init=ConstantBiasInit(0.5),
                        dropout=1.),
    "deepaux": Hyperparams(num_layers=4,
                        num_hidden=1000,
                        auxiliary_softmax_layers=[0, 1, 2],
                        auxiliary_softmax_weight=0.3,
                        node_depth=1,
                        nonlinearity=ACTIVATION_RECTIFIED_LINEAR,
                        weight_init=GaussianWeightInit(0.01),
                        bias_init=ConstantBiasInit(0.5),
                        dropout=1.),
    "py": Hyperparams(num_layers=2,
                      num_hidden=[2000, 100],
                      node_depth=1,
                      nonlinearity=ACTIVATION_RECTIFIED_LINEAR,
                      weight_init=[GaussianWeightInit(0.01),
                                   GaussianWeightInit(0.04)],
                      bias_init=[ConstantBiasInit(0.5),
                                 ConstantBiasInit(3.0)],
                      dropout=1.),
    "pydrop1": Hyperparams(num_layers=2,
                           num_hidden=[2000, 100],
                           node_depth=1,
                           nonlinearity=ACTIVATION_RECTIFIED_LINEAR,
                           weight_init=[GaussianWeightInit(0.01),
                                        GaussianWeightInit(0.04)],
                           bias_init=[ConstantBiasInit(0.5),
                                      ConstantBiasInit(3.0)],
                           dropout=[0.75, 1.]),
    "pydrop2": Hyperparams(num_layers=2,
                           num_hidden=[2000, 100],
                           node_depth=1,
                           nonlinearity=ACTIVATION_RECTIFIED_LINEAR,
                           weight_init=[GaussianWeightInit(0.01),
                                        GaussianWeightInit(0.04)],
                           bias_init=[ConstantBiasInit(0.5),
                                      ConstantBiasInit(3.0)],
                           dropout=[0.75, 0.75])}
"""
from __future__ import print_function
from __future__ import division
@@ -96,14 +42,14 @@ class TensorflowMultiTaskClassifier(TensorflowClassifier):

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x num_features.
        batch_size x n_features.
    """
    num_features = self.n_inputs
    n_features = self.n_features
    with self.graph.as_default():
      with tf.name_scope(self.placeholder_scope):
        self.mol_features = tf.placeholder(
            tf.float32,
            shape=[None, num_features],
            shape=[None, n_features],
            name='mol_features')

      layer_sizes = self.layer_sizes
@@ -117,12 +63,12 @@ class TensorflowMultiTaskClassifier(TensorflowClassifier):
          len(dropouts),
          }
      assert len(lengths_set) == 1, 'All layer params must have same length.'
      num_layers = lengths_set.pop()
      assert num_layers > 0, 'Must have some layers defined.'
      n_layers = lengths_set.pop()
      assert n_layers > 0, 'Must have some layers defined.'

      prev_layer = self.mol_features
      prev_layer_size = num_features 
      for i in xrange(num_layers):
      prev_layer_size = n_features 
      for i in xrange(n_layers):
        layer = tf.nn.relu(model_ops.FullyConnectedLayer(
            tensor=prev_layer,
            size=layer_sizes[i],
@@ -136,7 +82,7 @@ class TensorflowMultiTaskClassifier(TensorflowClassifier):
        prev_layer_size = layer_sizes[i]

      self.output = model_ops.MultitaskLogits(
          layer, self.num_tasks)
          layer, self.n_tasks)

  def construct_feed_dict(self, X_b, y_b=None, w_b=None, ids_b=None):
    """Construct a feed dictionary from minibatch data.
@@ -144,14 +90,14 @@ class TensorflowMultiTaskClassifier(TensorflowClassifier):
    TODO(rbharath): ids_b is not used here. Can we remove it?

    Args:
      X_b: np.ndarray of shape (batch_size, num_features)
      y_b: np.ndarray of shape (batch_size, num_tasks)
      w_b: np.ndarray of shape (batch_size, num_tasks)
      X_b: np.ndarray of shape (batch_size, n_features)
      y_b: np.ndarray of shape (batch_size, n_tasks)
      w_b: np.ndarray of shape (batch_size, n_tasks)
      ids_b: List of length (batch_size) with datapoint identifiers.
    """ 
    orig_dict = {}
    orig_dict["mol_features"] = X_b
    for task in xrange(self.num_tasks):
    for task in xrange(self.n_tasks):
      if y_b is not None:
        orig_dict["labels_%d" % task] = to_one_hot(y_b[:, task])
      else:
@@ -175,7 +121,7 @@ class TensorflowMultiTaskClassifier(TensorflowClassifier):
      dataset: deepchem.datasets.dataset object.

    Returns:
      Tuple of three numpy arrays with shape num_examples x num_tasks (x ...):
      Tuple of three numpy arrays with shape n_examples x n_tasks (x ...):
        output: Model outputs.
      Note that the output arrays may be more than 2D, e.g. for
      classifier models that return class probabilities.
@@ -191,14 +137,14 @@ class TensorflowMultiTaskClassifier(TensorflowClassifier):
      self.require_attributes(['output'])

      # run eval data through the model
      num_tasks = self.num_tasks
      n_tasks = self.n_tasks
      outputs = []
      with self._get_shared_session().as_default():
        feed_dict = self.construct_feed_dict(X)
        data = self._get_shared_session().run(
            self.output, feed_dict=feed_dict)
        batch_outputs = np.asarray(data[:num_tasks], dtype=float)
        # reshape to batch_size x num_tasks x ...
        batch_outputs = np.asarray(data[:n_tasks], dtype=float)
        # reshape to batch_size x n_tasks x ...
        if batch_outputs.ndim == 3:
          batch_outputs = batch_outputs.transpose((1, 0, 2))
        elif batch_outputs.ndim == 2:
@@ -222,21 +168,20 @@ class TensorflowMultiTaskRegressor(TensorflowRegressor):

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x num_features.
        batch_size x n_features.
    """
    assert len(self.model_params["data_shape"]) == 1
    num_features = self.model_params["data_shape"][0]
    n_features = self.n_inputs
    with self.graph.as_default():
      with tf.name_scope(self.placeholder_scope):
        self.mol_features = tf.placeholder(
            tf.float32,
            shape=[None, num_features],
            shape=[None, n_features],
            name='mol_features')

      layer_sizes = self.model_params["layer_sizes"]
      weight_init_stddevs = self.model_params["weight_init_stddevs"]
      bias_init_consts = self.model_params["bias_init_consts"]
      dropouts = self.model_params["dropouts"]
      layer_sizes = self.layer_sizes
      weight_init_stddevs = self.weight_init_stddevs
      bias_init_consts = self.bias_init_consts
      dropouts = self.dropouts
      lengths_set = {
          len(layer_sizes),
          len(weight_init_stddevs),
@@ -244,12 +189,12 @@ class TensorflowMultiTaskRegressor(TensorflowRegressor):
          len(dropouts),
          }
      assert len(lengths_set) == 1, 'All layer params must have same length.'
      num_layers = lengths_set.pop()
      assert num_layers > 0, 'Must have some layers defined.'
      n_layers = lengths_set.pop()
      assert n_layers > 0, 'Must have some layers defined.'

      prev_layer = self.mol_features
      prev_layer_size = num_features 
      for i in xrange(num_layers):
      prev_layer_size = n_features 
      for i in xrange(n_layers):
        layer = tf.nn.relu(model_ops.FullyConnectedLayer(
            tensor=prev_layer,
            size=layer_sizes[i],
@@ -259,12 +204,11 @@ class TensorflowMultiTaskRegressor(TensorflowRegressor):
            bias_init=tf.constant(value=bias_init_consts[i],
                                  shape=[layer_sizes[i]])))
        layer = model_ops.Dropout(layer, dropouts[i])
        #layer = tf.nn.dropout(layer, keep_prob)
        prev_layer = layer
        prev_layer_size = layer_sizes[i]

      self.output = []
      for task in range(self.num_tasks):
      for task in range(self.n_tasks):
        self.output.append(tf.squeeze(
            model_ops.FullyConnectedLayer(
                tensor=prev_layer,
@@ -281,26 +225,26 @@ class TensorflowMultiTaskRegressor(TensorflowRegressor):
    TODO(rbharath): ids_b is not used here. Can we remove it?

    Args:
      X_b: np.ndarray of shape (batch_size, num_features)
      y_b: np.ndarray of shape (batch_size, num_tasks)
      w_b: np.ndarray of shape (batch_size, num_tasks)
      X_b: np.ndarray of shape (batch_size, n_features)
      y_b: np.ndarray of shape (batch_size, n_tasks)
      w_b: np.ndarray of shape (batch_size, n_tasks)
      ids_b: List of length (batch_size) with datapoint identifiers.
    """ 
    orig_dict = {}
    orig_dict["mol_features"] = X_b
    for task in xrange(self.num_tasks):
    for task in xrange(self.n_tasks):
      if y_b is not None:
        orig_dict["labels_%d" % task] = y_b[:, task]
      else:
        # Dummy placeholders
        orig_dict["labels_%d" % task] = np.squeeze(
            np.zeros((self.model_params["batch_size"],)))
            np.zeros((self.batch_size,)))
      if w_b is not None:
        orig_dict["weights_%d" % task] = w_b[:, task]
      else:
        # Dummy placeholders
        orig_dict["weights_%d" % task] = np.ones(
            (self.model_params["batch_size"],)) 
            (self.batch_size,)) 
    return self._get_feed_dict(orig_dict)

  def predict_on_batch(self, X):
@@ -312,7 +256,7 @@ class TensorflowMultiTaskRegressor(TensorflowRegressor):
      dataset: deepchem.datasets.dataset object.

    Returns:
      Tuple of three numpy arrays with shape num_examples x num_tasks (x ...):
      Tuple of three numpy arrays with shape n_examples x n_tasks (x ...):
        output: Model outputs.
        labels: True labels.
        weights: Example weights.
@@ -330,19 +274,19 @@ class TensorflowMultiTaskRegressor(TensorflowRegressor):
      self.require_attributes(['output'])

      # run eval data through the model
      num_tasks = self.num_tasks
      n_tasks = self.n_tasks
      outputs = []
      with self._get_shared_session().as_default():
        n_samples = len(X)
        # Some tensorflow models can't handle variadic batches,
        # especially models using tf.pack, tf.split. Pad batch-size
        # to handle these cases.
        X = pad_features(self.model_params["batch_size"], X)
        X = pad_features(self.batch_size, X)
        feed_dict = self.construct_feed_dict(X)
        data = self._get_shared_session().run(
            self.output, feed_dict=feed_dict)
        batch_outputs = np.asarray(data[:num_tasks], dtype=float)
        # reshape to batch_size x num_tasks x ...
        batch_outputs = np.asarray(data[:n_tasks], dtype=float)
        # reshape to batch_size x n_tasks x ...
        if batch_outputs.ndim == 3:
          batch_outputs = batch_outputs.transpose((1, 0, 2))
        elif batch_outputs.ndim == 2:
@@ -352,7 +296,7 @@ class TensorflowMultiTaskRegressor(TensorflowRegressor):
          #print("X.shape, batch_outputs.shape")
          #print(X.shape, batch_outputs.shape)
          n_samples = len(X)
          batch_outputs = batch_outputs.reshape((n_samples, num_tasks))
          batch_outputs = batch_outputs.reshape((n_samples, n_tasks))
        else:
          raise ValueError(
              'Unrecognized rank combination for output: %s' %
Loading