Commit f01ed589 authored by Bharath Ramsundar's avatar Bharath Ramsundar
Browse files

Merge pull request #7 from rbharath/descriptor

[WIP] Descriptor Support
parents cab666f3 bf1810e4
Loading
Loading
Loading
Loading
+19 −113
Original line number Diff line number Diff line
@@ -11,8 +11,7 @@ from keras.optimizers import SGD
from deep_chem.utils.load import load_datasets
from deep_chem.utils.load import ensure_balanced
from deep_chem.utils.preprocess import multitask_to_singletask
from deep_chem.utils.preprocess import train_test_random_split
from deep_chem.utils.preprocess import train_test_scaffold_split
from deep_chem.utils.preprocess import split_dataset
from deep_chem.utils.preprocess import dataset_to_numpy
from deep_chem.utils.preprocess import to_one_hot
from deep_chem.utils.evaluate import eval_model
@@ -21,113 +20,22 @@ from deep_chem.utils.evaluate import compute_rms_scores
from deep_chem.utils.evaluate import compute_roc_auc_scores
from deep_chem.utils.load import load_and_transform_dataset

def process_multitask(paths, task_transforms, splittype="random",
    seed=None, weight_positives=False):
  """Extracts multitask datasets and splits into train/test.

  Returns a tuple of test/train datasets, fingerprints, and labels.

  TODO(rbharath): This function is ugly. Returns way too many arguments. Clean
  it up.

  Parameters
  ----------
  paths: list 
    List of paths to datasets. 
  task_transforms: dict 
    dict mapping target names to label transform. Each output type must be either
    None, "log", "normalize" or "log-normalize". Only for regression outputs.
  splittype: string
    Must be "random" or "scaffold"
  seed: int
    Seed used for random splits.
  """
  dataset = load_and_transform_dataset(paths, task_transforms,
      prediction_endpoint,
      weight_positives=weight_positives)
  sorted_targets = sorted(dataset.keys())
  if splittype == "random":
    train, test = train_test_random_split(dataset, seed=seed)
  elif splittype == "scaffold":
    train, test = train_test_scaffold_split(dataset)
  else:
    raise ValueError("Improper splittype. Must be random/scaffold.")
  X_train, y_train, W_train = dataset_to_numpy(train)
  ## TODO(rbharath): Still need to fix the failures for PCBA. Temporarily
  ## commenting out to experiment.
  #if weight_positives:
  #  print "Train set balance"
  #  ensure_balanced(y_train, W_train)
  X_test, y_test, W_test = dataset_to_numpy(test)
  #if weight_positives:
  #  print "Test set balance"
  #  ensure_balanced(y_test, W_test)
  return (train, X_train, y_train, W_train, test, X_test, y_test, W_test)

def process_singletask(paths, task_transforms,
    prediction_endpoint,
    splittype="random", seed=None,
    weight_positives=True):
  """Extracts singletask datasets and splits into train/test.

  Returns a dict that maps target names to tuples.

  Parameters
  ----------
  paths: list 
    List of paths to Google vs datasets. 
  task_transforms: dict 
    dict mapping target names to label transform. Each output type must be either
    None or "log". Only for regression outputs.
  splittype: string
    Must be "random" or "scaffold"
  seed: int
    Seed used for random splits.
  """
  dataset = load_and_transform_dataset(paths, task_transforms,
      prediction_endpoint,
      weight_positives=weight_positives)
  singletask = multitask_to_singletask(dataset)
  arrays = {}
  for target in singletask:
    data = singletask[target]
    if len(data) == 0:
      continue
    if splittype == "random":
      train, test = train_test_random_split(data, seed=seed)
    elif splittype == "scaffold":
      train, test = train_test_scaffold_split(data)
    else:
      raise ValueError("Improper splittype. Must be random/scaffold.")
    X_train, y_train, W_train = dataset_to_numpy(train)
    X_test, y_test, W_test = dataset_to_numpy(test)
    arrays[target] = (train, X_train, y_train, W_train), (test, X_test, y_test, W_test)
  return arrays


def fit_multitask_mlp(paths, task_types, task_transforms, prediction_endpoint,
                      splittype="random", weight_positives=False, **training_params):
def fit_multitask_mlp(train_data, test_data, task_types, **training_params):
  """
  Perform stochastic gradient descent optimization for a keras multitask MLP.
  Returns AUCs, R^2 scores, and RMS values.

  Parameters
  ----------
  paths: list 
    List of paths to Google vs datasets. 
  task_types: dict 
    dict mapping target names to output type. Each output type must be either
    "classification" or "regression".
  task_transforms: dict 
    dict mapping target names to label transform. Each output type must be either
    None, "log", "normalize", or "log-normalize". Only for regression outputs.
  training_params: dict
    Aggregates keyword parameters to pass to train_multitask_model
  """
  (train, X_train, y_train, W_train), (test, X_test, y_test, W_test) = (
      process_multitask(paths, task_transforms, splittype=splittype,
      weight_positives=weight_positives))
  print np.shape(y_train)
      train_data, test_data)
  model = train_multitask_model(X_train, y_train, W_train, task_types,
                                **training_params)
  results = eval_model(test, model, task_types,
@@ -139,44 +47,43 @@ def fit_multitask_mlp(paths, task_types, task_transforms, prediction_endpoint,
  r2s = compute_r2_scores(results, local_task_types)
  if r2s:
    print "Mean R^2: %f" % np.mean(np.array(r2s.values()))
  return results

def fit_singletask_mlp(paths, task_types, task_transforms,
                       prediction_endpoint,
                       splittype="random", weight_positives=True,
                       num_to_train=None, **training_params):
def fit_singletask_mlp(per_task_data, task_types, num_to_train=None, **training_params):
  """
  Perform stochastic gradient descent optimization for a keras MLP.

  paths: list 
    List of paths to Google vs datasets. 
  task_types: dict 
    dict mapping target names to output type. Each output type must be either
    "classification" or "regression".
  task_transforms: dict 
  output_transforms: dict 
    dict mapping target names to label transform. Each output type must be either
    None or "log". Only for regression outputs.
  training_params: dict
    Aggregates keyword parameters to pass to train_multitask_model
  """
  singletasks = process_singletask(paths, task_transforms,
    prediction_endpoint,
    splittype=splittype, weight_positives=weight_positives)
  ret_vals = {}
  aucs, r2s, rms = {}, {}, {}
  sorted_targets = sorted(singletasks.keys())
  sorted_targets = sorted(per_task_data.keys())
  if num_to_train:
    sorted_targets = sorted_targets[:num_to_train]
  all_results = {}
  for index, target in enumerate(sorted_targets):
    print "Training model %d" % index
    print "Target %s" % target
    (train, X_train, y_train, W_train), (test, X_test, y_test, W_test) = (
        singletasks[target])
        per_task_data[target])
    print "len(train)"
    print len(train)
    print "len(test)"
    print len(test)
    model = train_multitask_model(X_train, y_train, W_train,
        {target: task_types[target]}, **training_params)
    results = eval_model(test, model, {target: task_types[target]}, 
                         # We run singletask models as special cases of
                         # multitask.
                         modeltype="keras_multitask")
    all_results[target] = results[target]
    target_aucs = compute_roc_auc_scores(results, task_types)
    target_r2s = compute_r2_scores(results, task_types)
    target_rms = compute_rms_scores(results, task_types)
@@ -190,13 +97,11 @@ def fit_singletask_mlp(paths, task_types, task_transforms,
  if r2s:
    print r2s
    print "Mean R^2: %f" % np.mean(np.array(r2s.values()))
  if rms:
    print rms
    print "Mean RMS: %f" % np.mean(np.array(rms.values()))
  return all_results

def train_multitask_model(X, y, W, task_types,
  learning_rate=0.01, decay=1e-6, momentum=0.9, nesterov=True, activation="relu",
  dropout=0.5, nb_epoch=20, batch_size=50, n_hidden=500, n_input=1024,
  dropout=0.5, nb_epoch=20, batch_size=50, n_hidden=500,
  validation_split=0.1):
  """
  Perform stochastic gradient descent optimization for a keras multitask MLP.
@@ -229,12 +134,13 @@ def train_multitask_model(X, y, W, task_types,
  sorted_targets = sorted(task_types.keys())
  local_task_types = task_types.copy()
  endpoints = sorted_targets
  (_, n_inputs) = np.shape(X)
  # Add eps weight to avoid minibatches with zero weight (causes theano to crash).
  W = W + eps * np.ones(np.shape(W))
  model = Graph()
  model.add_input(name="input", ndim=n_input)
  model.add_input(name="input", ndim=n_inputs)
  model.add_node(
      Dense(n_input, n_hidden, init='uniform', activation=activation),
      Dense(n_inputs, n_hidden, init='uniform', activation=activation),
      name="dense", input="input")
  model.add_node(Dropout(dropout), name="dropout", input="dense")
  top_layer = "dropout"
+5 −31
Original line number Diff line number Diff line
@@ -7,45 +7,18 @@ from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution3D, MaxPooling3D
from keras.utils import np_utils
from deep_chem.utils.preprocess import train_test_random_split
from deep_chem.utils.preprocess import split_dataset
from deep_chem.utils.load import load_and_transform_dataset
from deep_chem.utils.preprocess import tensor_dataset_to_numpy
from deep_chem.utils.evaluate import eval_model
from deep_chem.utils.evaluate import compute_r2_scores

# TODO(rbharath): Factor this out into a separate function in utils. Duplicates
# code in deep.py
# TODO(rbharath): paths is to handle sharded input pickle files. Might be
# better to use hdf5 datasets like in MSMBuilder
def process_3D_convolutions(paths, task_transforms, prediction_endpoint, seed=None, splittype="random"):
  """Loads 3D Convolution datasets.

  Parameters
  ----------
  paths: list
    List of paths to convolution datasets.
  """
  dataset = load_and_transform_dataset(paths, task_transforms,
    prediction_endpoint, datatype="pdbbind")
  # TODO(rbharath): Factor this code splitting out into a util function.
  if splittype == "random":
    train, test = train_test_random_split(dataset, seed=seed)
  elif splittype == "scaffold":
    train, test = train_test_scaffold_split(dataset)
  X_train, y_train, W_train = tensor_dataset_to_numpy(train)
  X_test, y_test, W_test = tensor_dataset_to_numpy(test)
  return (X_train, y_train, W_train, train), (X_test, y_test, W_test, test)

def fit_3D_convolution(paths, task_types, task_transforms, prediction_endpoint,
    axis_length=32, **training_params):
def fit_3D_convolution(train_data, test_data, task_types, axis_length=32, **training_params):
  """
  Perform stochastic gradient descent for a 3D CNN.
  """
  (X_train, y_train, W_train, train), (X_test, y_test, W_test, test) = process_3D_convolutions(
    paths, task_transforms, prediction_endpoint)

  print "np.shape(X_train): " + str(np.shape(X_train))
  print "np.shape(y_train): " + str(np.shape(y_train))
  (X_train, y_train, W_train, train), (X_test, y_test, W_test, test) = (
      train_data, test_data)

  nb_classes = 2
  model = train_3D_convolution(X_train, y_train, axis_length, **training_params)
@@ -54,6 +27,7 @@ def fit_3D_convolution(paths, task_types, task_transforms, prediction_endpoint,
  local_task_types = task_types.copy()
  r2s = compute_r2_scores(results, local_task_types)
  print "Mean R^2: %f" % np.mean(np.array(r2s.values()))
  return results

def train_3D_convolution(X, y, axis_length=32, batch_size=50, nb_epoch=1):
  """
+16 −39
Original line number Diff line number Diff line
@@ -4,9 +4,7 @@ Code for processing datasets using scikit-learn.
import numpy as np
from deep_chem.utils.analysis import results_to_csv
from deep_chem.utils.load import load_and_transform_dataset
from deep_chem.utils.preprocess import multitask_to_singletask
from deep_chem.utils.preprocess import train_test_random_split
from deep_chem.utils.preprocess import train_test_scaffold_split
from deep_chem.utils.preprocess import split_dataset
from deep_chem.utils.preprocess import dataset_to_numpy
from deep_chem.utils.evaluate import eval_model
from deep_chem.utils.evaluate import compute_r2_scores
@@ -23,8 +21,8 @@ from sklearn.linear_model import ElasticNetCV
from sklearn.linear_model import LassoLarsCV
from sklearn.svm import SVR

def fit_singletask_models(paths, modeltype, task_types, task_transforms,
    splittype="random", seed=None, num_to_train=None):
def fit_singletask_models(per_task_data, modeltype, task_types,
    num_to_train=None):
  """Fits singletask linear regression models to potency.

  Parameters
@@ -40,27 +38,19 @@ def fit_singletask_models(paths, modeltype, task_types, task_transforms,
  task_types: dict 
    dict mapping target names to output type. Each output type must be either
    "classification" or "regression".
  task_transforms: dict 
  output_transforms: dict 
    dict mapping target names to label transform. Each output type must be either
    None or "log". Only for regression outputs.
  """
  dataset = load_and_transform_dataset(paths, task_transforms)
  singletask = multitask_to_singletask(dataset)
  all_results = {}
  aucs, r2s, rms = {}, {}, {}
  sorted_targets = sorted(singletask.keys())
  sorted_targets = sorted(per_task_data.keys())
  if num_to_train:
    sorted_targets = sorted_targets[:num_to_train]
  for index, target in enumerate(sorted_targets):
    print "Building model %d" % index
    data = singletask[target]
    if splittype == "random":
      train, test = train_test_random_split(data, seed=seed)
    elif splittype == "scaffold":
      train, test = train_test_scaffold_split(data)
    else:
      raise ValueError("Improper splittype. Must be random/scaffold.")
    X_train, y_train, W_train = dataset_to_numpy(train)
    X_test, y_test, W_test = dataset_to_numpy(test)
    (train, X_train, y_train, W_train), (test, X_test, y_test, W_test) = (
        per_task_data[target])
    if modeltype == "rf_regressor":
      model = RandomForestRegressor(n_estimators=500, n_jobs=-1,
          warm_start=True, max_features="sqrt")
@@ -84,6 +74,7 @@ def fit_singletask_models(paths, modeltype, task_types, task_transforms,
    model.fit(X_train, y_train.ravel())
    results = eval_model(test, model, {target: task_types[target]},
        modeltype="sklearn")
    all_results[target] = results[target]

    target_aucs = compute_roc_auc_scores(results, task_types)
    target_r2s = compute_r2_scores(results, task_types)
@@ -101,30 +92,16 @@ def fit_singletask_models(paths, modeltype, task_types, task_transforms,
  if rms:
    print results_to_csv(rms)
    print "Mean RMS: %f" % np.mean(np.array(rms.values()))
  return all_results


def fit_multitask_rf(dataset, splittype="random"):
def fit_multitask_rf(train_data, test_data, task_types):
  """Fits a multitask RF model to provided dataset.

  Performs a random 80-20 train/test split.

  Parameters
  ----------
  dataset: dict 
    A dictionary of type produced by load_datasets. 
  splittype: string
    Type of split for train/test. Either random or scaffold.
  """
  if splittype == "random":
    train, test = train_test_random_split(data, seed=0)
  elif splittype == "scaffold":
    train, test = train_test_scaffold_split(data)
  else:
    raise ValueError("Improper splittype. Must be random/scaffold.")
  X_train, y_train, W_train = dataset_to_numpy(train)
  classifier = RandomForestClassifier(n_estimators=100, n_jobs=-1,
  (train, X_train, y_train, W_train), (test, X_train, y_train, W_train) = (
      train_data, test_data) 
  model = RandomForestClassifier(n_estimators=100, n_jobs=-1,
      class_weight="auto")
  classifier.fit(X_train, y_train)
  results = eval_model(test, classifier)
  model.fit(X_train, y_train)
  results = eval_model(test, model, task_types)
  scores = compute_roc_auc_scores(results)
  print "Mean AUC: %f" % np.mean(np.array(scores.values()))
+63 −29
Original line number Diff line number Diff line
@@ -7,26 +7,50 @@ from deep_chem.models.deep import fit_singletask_mlp
from deep_chem.models.deep import fit_multitask_mlp
from deep_chem.models.deep3d import fit_3D_convolution
from deep_chem.models.standard import fit_singletask_models
from deep_chem.utils.load import get_default_task_types_and_transforms
from deep_chem.utils.load import get_target_names
from deep_chem.utils.load import process_datasets
from deep_chem.utils.evaluate import results_to_csv

# TODO(rbharath): Factor this into subcommands. The interface is too
# complicated now to effectively use.
def parse_args(input_args=None):
  """Parse command-line arguments."""
  parser = argparse.ArgumentParser()
  parser.add_argument('--datasets', nargs="+", required=1,
                      choices=['muv', 'pcba', 'dude', 'pfizer', 'globavir', 'pdbbind'],
                      help='Name of dataset to process.')
  parser.add_argument("--task-type", default="classification",
                      choices=["classification", "regression"],
                      help="Type of learning task.")
  parser.add_argument("--input-transforms", nargs="+", default=[],
                      choices=["normalize", "truncate-outliers"],
                      help="Transforms to apply to input data.")
  parser.add_argument("--output-transforms", nargs="+", default=[],
                      choices=["log", "normalize"],
                      help="Transforms to apply to output data.")
  parser.add_argument("--feature-types", nargs="+", required=1,
                      choices=["fingerprints", "descriptors", "grid"],
                      help="Types of featurizations to use.")
  parser.add_argument("--paths", nargs="+", required=1,
                      help="Paths to input datasets.")
  parser.add_argument('--model', required=1,
  parser.add_argument("--mode", default="singletask",
                      choices=["singletask", "multitask"],
                      help="Type of model being built.")
  parser.add_argument("--model", required=1,
                      choices=["logistic", "rf_classifier", "rf_regressor",
                      "linear", "ridge", "lasso", "lasso_lars", "elastic_net",
                      "singletask_deep_network", "multitask_deep_network",
                      "3D_cnn"])
  parser.add_argument("--splittype", type=str, default="scaffold",
                       choices=["scaffold", "random"],
                       help="Type of cross-validation data-splitting.")
  parser.add_argument("--prediction-endpoint", type=str, default="IC50",
                       choices=["scaffold", "random", "specified"],
                       help="Type of train/test data-splitting.\n"
                            "scaffold uses Bemis-Murcko scaffolds.\n"
                            "specified requires that split be in original data.")
  parser.add_argument("--csv-out", type=str, default=None,
                  help="Outputted predictions on the test set.")
  #TODO(rbharath): These two arguments (prediction/split-endpoint) should be
  #moved to process_datataset to simplify the invocation here.
  parser.add_argument("--prediction-endpoint", type=str, required=1,
                       help="Name of measured endpoint to predict.")
  parser.add_argument("--split-endpoint", type=str, default=None,
                       help="Name of endpoint specifying train/test split.")
  parser.add_argument("--n-hidden", type=int, default=500,
                      help="Number of hidden neurons for NN models.")
  parser.add_argument("--learning-rate", type=float, default=0.01,
@@ -48,44 +72,54 @@ def parse_args(input_args=None):
                  help="Number of datasets to train on. Only for debug.")
  parser.add_argument("--axis-length", type=int, default=32,
                  help="Size of a grid axis for 3D CNN input.")
      
  return parser.parse_args(input_args)

def main():
  args = parse_args()
  paths = {}

  for dataset, path in zip(args.datasets, args.paths):
    paths[dataset] = path
  paths = args.paths

  task_types, task_transforms = get_default_task_types_and_transforms(paths)
  targets = get_target_names(paths)
  task_types = {target: args.task_type for target in targets}
  input_transforms = args.input_transforms 
  output_transforms = {target: args.output_transforms for target in targets}

  if args.model == "singletask_deep_network":
    fit_singletask_mlp(paths.values(), task_types, task_transforms,
  datatype = "tensor" if args.model == "3D_cnn" else "vector"
  processed = process_datasets(paths,
      input_transforms, output_transforms, feature_types=args.feature_types, 
      prediction_endpoint=args.prediction_endpoint,
      splittype=args.splittype, 
      n_hidden=args.n_hidden,
      split_endpoint=args.split_endpoint,
      splittype=args.splittype, weight_positives=args.weight_positives,
      datatype=datatype, mode=args.mode)
  if args.mode == "multitask":
    train_data, test_data = processed
  else:
    per_task_data = processed
  # TODO(rbharath): Bundle training params into a training_param dict that's passed
  # down to these functions.
  if args.model == "singletask_deep_network":
    results = fit_singletask_mlp(per_task_data, task_types, n_hidden=args.n_hidden,
      learning_rate=args.learning_rate, dropout=args.dropout,
      nb_epoch=args.n_epochs, decay=args.decay, batch_size=args.batch_size,
      validation_split=args.validation_split,
      weight_positives=args.weight_positives, num_to_train=args.num_to_train)
      num_to_train=args.num_to_train)
  elif args.model == "multitask_deep_network":
    fit_multitask_mlp(paths.values(), task_types, task_transforms,
      prediction_endpoint=args.prediction_endpoint,
      splittype=args.splittype,
      n_hidden=args.n_hidden, learning_rate =
      args.learning_rate, dropout = args.dropout, batch_size=args.batch_size,
    results = fit_multitask_mlp(train_data, test_data, task_types,
      n_hidden=args.n_hidden, learning_rate = args.learning_rate,
      dropout = args.dropout, batch_size=args.batch_size,
      nb_epoch=args.n_epochs, decay=args.decay,
      validation_split=args.validation_split,
      weight_positives=args.weight_positives)
      validation_split=args.validation_split)
  elif args.model == "3D_cnn":
    fit_3D_convolution(paths.values(), task_types, task_transforms,
        prediction_endpoint=args.prediction_endpoint,
    results = fit_3D_convolution(train_data, test_data, task_types,
        axis_length=args.axis_length, nb_epoch=args.n_epochs,
        batch_size=args.batch_size)
  else:
    fit_singletask_models(paths.values(), args.model, task_types,
        task_transforms, splittype=args.splittype, num_to_train=args.num_to_train)
    results = fit_singletask_models(per_task_data, args.model, task_types,
                                    num_to_train=args.num_to_train)
  
  if args.csv_out is not None:
    results_to_csv(results, args.csv_out, task_type=args.task_type)

if __name__ == "__main__":
  main()
+2 −2
Original line number Diff line number Diff line
# Usage ./process_bace.sh INPUT_SDF_FILE
python -m deep_chem.scripts.process_dataset --input-file $1 --input-type sdf --fields Name smiles pIC50 --field-types string string concentration --name BACE --out /tmp/
# Usage ./process_bace.sh INPUT_SDF_FILE OUT_DIR DATASET_NAME
python -m deep_chem.scripts.process_dataset --input-file $1 --input-type sdf --fields Name smiles pIC50 Model --field-types string string float string --name $3 --out $2 --prediction-endpoint pIC50
Loading