Commit 1f756744 authored by Bharath Ramsundar's avatar Bharath Ramsundar
Browse files

Specified train/test splits now work. Thresholding for regression datasets now works.

parent 467aa60e
Loading
Loading
Loading
Loading
+5 −4
Original line number Diff line number Diff line
@@ -14,7 +14,6 @@ from deep_chem.utils.preprocess import multitask_to_singletask
from deep_chem.utils.preprocess import split_dataset
from deep_chem.utils.preprocess import dataset_to_numpy
from deep_chem.utils.preprocess import to_one_hot
from deep_chem.utils.preprocess import process_multitask_dataset
from deep_chem.utils.evaluate import eval_model
from deep_chem.utils.evaluate import compute_r2_scores
from deep_chem.utils.evaluate import compute_rms_scores
@@ -62,6 +61,7 @@ def fit_singletask_mlp(per_task_data, task_types, num_to_train=None, **training_
  training_params: dict
    Aggregates keyword parameters to pass to train_multitask_model
  """
  print "ENTERING FIT_SINGLETASK_MLP"
  ret_vals = {}
  aucs, r2s, rms = {}, {}, {}
  sorted_targets = sorted(per_task_data.keys())
@@ -72,6 +72,10 @@ def fit_singletask_mlp(per_task_data, task_types, num_to_train=None, **training_
    print "Target %s" % target
    (train, X_train, y_train, W_train), (test, X_test, y_test, W_test) = (
        per_task_data[target])
    print "len(train)"
    print len(train)
    print "len(test)"
    print len(test)
    model = train_multitask_model(X_train, y_train, W_train,
        {target: task_types[target]}, **training_params)
    results = eval_model(test, model, {target: task_types[target]}, 
@@ -91,9 +95,6 @@ def fit_singletask_mlp(per_task_data, task_types, num_to_train=None, **training_
  if r2s:
    print r2s
    print "Mean R^2: %f" % np.mean(np.array(r2s.values()))
  if rms:
    print rms
    print "Mean RMS: %f" % np.mean(np.array(rms.values()))

def train_multitask_model(X, y, W, task_types,
  learning_rate=0.01, decay=1e-6, momentum=0.9, nesterov=True, activation="relu",
+13 −11
Original line number Diff line number Diff line
@@ -8,6 +8,7 @@ from deep_chem.models.deep import fit_multitask_mlp
from deep_chem.models.deep3d import fit_3D_convolution
from deep_chem.models.standard import fit_singletask_models
from deep_chem.utils.load import get_target_names
from deep_chem.utils.load import process_datasets

# TODO(rbharath): Factor this into subcommands. The interface is too
# complicated now to effectively use.
@@ -30,7 +31,7 @@ def parse_args(input_args=None):
                      help="Paths to input datasets.")
  parser.add_argument("--mode", default="singletask",
                      choices=["singletask", "multitask"],
                      "Type of model being built.")
                      help="Type of model being built.")
  parser.add_argument("--model", required=1,
                      choices=["logistic", "rf_classifier", "rf_regressor",
                      "linear", "ridge", "lasso", "lasso_lars", "elastic_net",
@@ -41,6 +42,8 @@ def parse_args(input_args=None):
                       help="Type of train/test data-splitting.\n"
                            "scaffold uses Bemis-Murcko scaffolds.\n"
                            "specified requires that split be in original data.")
  #TODO(rbharath): These two arguments (prediction/split-endpoint) should be
  #moved to process_datataset to simplify the invocation here.
  parser.add_argument("--prediction-endpoint", type=str, required=1,
                       help="Name of measured endpoint to predict.")
  parser.add_argument("--split-endpoint", type=str, default=None,
@@ -82,11 +85,11 @@ def main():

  datatype = "tensor" if args.model == "3D_cnn" else "vector"
  processed = process_datasets(paths,
      input_transforms, output_transforms, feature_types, 
      prediction_endpoint=prediction_endpoint,
      split_endpoint=split_endpoint,
      splittype=splittype, weight_positives=weight_positives,
      datatype=datatype)
      input_transforms, output_transforms, feature_types=args.feature_types, 
      prediction_endpoint=args.prediction_endpoint,
      split_endpoint=args.split_endpoint,
      splittype=args.splittype, weight_positives=args.weight_positives,
      datatype=datatype, mode=args.mode)
  if args.mode == "multitask":
    train_data, test_data = processed
  else:
@@ -98,14 +101,13 @@ def main():
      learning_rate=args.learning_rate, dropout=args.dropout,
      nb_epoch=args.n_epochs, decay=args.decay, batch_size=args.batch_size,
      validation_split=args.validation_split,
      weight_positives=args.weight_positives, num_to_train=args.num_to_train)
      num_to_train=args.num_to_train)
  elif args.model == "multitask_deep_network":
    fit_multitask_mlp(train_data, test_data, task_types,
      n_hidden=args.n_hidden, learning_rate = args.learning_rate,
      dropout = args.dropout, batch_size=args.batch_size,
      nb_epoch=args.n_epochs, decay=args.decay,
      validation_split=args.validation_split,
      weight_positives=args.weight_positives)
      validation_split=args.validation_split)
  elif args.model == "3D_cnn":
    fit_3D_convolution(train_data, test_data, task_types,
        axis_length=args.axis_length, nb_epoch=args.n_epochs,
+2 −2
Original line number Diff line number Diff line
# Usage ./process_bace.sh INPUT_SDF_FILE OUT_DIR
python -m deep_chem.scripts.process_dataset --input-file $1 --input-type sdf --fields Name smiles pIC50 --field-types string string concentration --name BACE --out $2
# Usage ./process_bace.sh INPUT_SDF_FILE OUT_DIR DATASET_NAME
python -m deep_chem.scripts.process_dataset --input-file $1 --input-type sdf --fields Name smiles pIC50 Model --field-types string string float string --name $3 --out $2 --prediction-endpoint pIC50
+13 −13
Original line number Diff line number Diff line
@@ -26,13 +26,16 @@ def parse_args(input_args=None):
  parser.add_argument("--fields", required=1, nargs="+",
                      help = "Names of fields.")
  parser.add_argument("--field-types", required=1, nargs="+",
                      choices=["string", "float", "list-string", "list-float",
                               "ndarray", "concentration"],
                      help="Type of data in fields. Concentration is for molar concentrations.")
                      choices=["string", "float", "list-string", "list-float", "ndarray"],
                      help="Type of data in fields.")
  parser.add_argument("--name", required=1,
                      help="Name of the dataset.")
  parser.add_argument("--out", required=1,
                      help="Folder to generate processed dataset in.")
  parser.add_argument("--prediction-endpoint", type=str, required=1,
                      help="Name of measured endpoint to predict.")
  parser.add_argument("--threshold", type=float, default=None,
                      help="Used to turn real-valued data into binary.")
  return parser.parse_args(input_args)

def generate_directories(name, out):
@@ -160,12 +163,6 @@ def process_field(data, field_type):
    return data 
  elif field_type == "float":
    return parse_float_input(data)
  elif field_type == "concentration":
    fl = parse_float_input(data)
    if fl is not None:
      return parse_float_input(data) / 1e-7
    else:
      return None
  elif field_type == "list-string":
    return data.split(",")
  elif field_type == "list-float":
@@ -174,17 +171,20 @@ def process_field(data, field_type):
    return data 

def generate_targets(input_file, input_type, fields, field_types, out_pkl,
    out_sdf):
    out_sdf, prediction_endpoint, threshold):
  """Process input data file."""
  rows, mols, smiles = [], [], SmilesGenerator()
  for row_index, raw_row in enumerate(get_rows(input_file, input_type)):
    print row_index
    print raw_row
    # Skip row labels.
    if row_index == 0 or raw_row is None:
      continue
    row, row_data = {}, get_row_data(raw_row, input_type, fields, field_types)
    for ind, (field, field_type) in enumerate(zip(fields, field_types)):
      if field == prediction_endpoint and threshold is not None:
        raw_val = process_field(row_data[ind], field_type)
        row[field] = 1 if raw_val > threshold else 0 
      else:
        row[field] = process_field(row_data[ind], field_type)
    # TODO(rbharath): This patch is only in place until the smiles/sequence
    # support is fixed.
@@ -211,7 +211,7 @@ def main():
  args = parse_args()
  out_pkl, out_sdf = generate_directories(args.name, args.out)
  generate_targets(args.input_file, args.input_type, args.fields,
      args.field_types, out_pkl, out_sdf)
      args.field_types, out_pkl, out_sdf, args.prediction_endpoint, args.threshold)
  generate_fingerprints(args.name, args.out)
  generate_descriptors(args.name, args.out)

+47 −2
Original line number Diff line number Diff line
@@ -13,6 +13,52 @@ from deep_chem.utils.preprocess import transform_outputs
from deep_chem.utils.preprocess import transform_inputs
from deep_chem.utils.preprocess import dataset_to_numpy
from deep_chem.utils.preprocess import tensor_dataset_to_numpy
from deep_chem.utils.preprocess import multitask_to_singletask
from deep_chem.utils.preprocess import split_dataset
from deep_chem.utils.preprocess import to_arrays

def process_datasets(paths, input_transforms, output_transforms,
    prediction_endpoint=None, split_endpoint=None, datatype="vector",
    feature_types=["fingerprints"], mode="multitask", splittype="random",
    seed=None, weight_positives=True):
  """Extracts datasets and split into train/test.

  Returns a dict that maps target names to tuples.

  Parameters
  ----------
  paths: list 
    List of paths to Google vs datasets. 
  output_transforms: dict 
    dict mapping target names to label transform. Each output type must be either
    None or "log". Only for regression outputs.
  splittype: string
    Must be "random" or "scaffold"
  seed: int
    Seed used for random splits.
  """
  dataset = load_and_transform_dataset(paths, input_transforms, output_transforms,
      prediction_endpoint, split_endpoint=split_endpoint,
      feature_types=feature_types, weight_positives=weight_positives)
  if mode == "singletask":
    singletask = multitask_to_singletask(dataset)
    arrays = {}
    for target in singletask:
      data = singletask[target]
      if len(data) == 0:
        continue
      train, test = split_dataset(dataset, splittype)
      train_data, test_data = to_arrays(train, test, datatype)
      arrays[target] = train_data, test_data 
    return arrays
  elif mode == "multitask":
    sorted_targets = sorted(dataset.keys())
    train, test = split_dataset(dataset, splittype)
    train_data, test_data = to_arrays(train, test, datatype)
    return train_data, test_data
  else:
    raise ValueError("Unsupported mode for process_datasets.")


def load_molecules(paths, feature_types=["fingerprints"]):
  """Load dataset fingerprints and return fingerprints.
@@ -210,7 +256,7 @@ def load_vs_datasets(paths, prediction_endpoint, split_endpoint, target_dir_name
    data[smiles] = {"fingerprint": mol["fingerprint"],
                    "scaffold": mol["scaffold"],
                    "labels": labels[smiles],
                    "splits": splits[smiles]}
                    "split": splits[smiles]}
  return data

def ensure_balanced(y, W):
@@ -254,7 +300,6 @@ def load_and_transform_dataset(paths, input_transforms, output_transforms,
  sorted_targets = sorted(output_transforms.keys())
  for s_index, smiles in enumerate(sorted_smiles):
    datapoint = dataset[smiles]
    #print datapoint
    labels = {}
    for t_index, target in enumerate(sorted_targets):
      if W[s_index][t_index] == 0:
Loading