Commit a0d83754 authored by Bharath Ramsundar's avatar Bharath Ramsundar
Browse files

UV changes

parent e17f245a
Loading
Loading
Loading
Loading
+43 −0
Original line number Diff line number Diff line
"""
Script that computes correlations of UV tasks. 
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals

import os
import numpy as np
import tempfile
import shutil
import deepchem as dc
import pandas as pd
from UV_datasets import load_uv

###Load data###
np.random.seed(123)
shard_size = 2000
print("About to load UV data.")
UV_tasks, datasets, transformers = load_uv(shard_size=shard_size)
train_dataset, valid_dataset, test_dataset = datasets

y_train = train_dataset.y
n_tasks = y_train.shape[1]

all_results = []
for task in range(n_tasks):
  y_task = y_train[:, task]
  task_results = []
  for other_task in range(n_tasks):
    if task == other_task:
      task_results.append(1.)
      continue
    y_other = y_train[:, other_task]
    r2 = dc.metrics.pearson_r2_score(y_task, y_other)
    print("r2 for %s-%s is %f" % (task, other_task, r2))
    task_results.append(r2)
  print("Task %d" % task)
  print(task_results)
  all_results.append(task_results)
print("Writing results to uv_corr.csv")
df = pd.DataFrame(all_results)
df.to_csv("uv_corr.csv")
+71 −31
Original line number Diff line number Diff line
@@ -11,19 +11,25 @@ import tempfile
import shutil
import numpy as np
import deepchem as dc
from MERCK_datasets import load_uv
from UV_datasets import load_uv

# Set numpy seed
np.random.seed(123)

###Load data###
shard_size = 2000
num_shards_per_batch = 4
print("About to load MERCK data.")
UV_tasks, datasets, transformers = load_uv(
    shard_size=shard_size, num_shards_per_batch=num_shards_per_batch)
num_trials = 1
print("About to load UV data.")
UV_tasks, datasets, transformers = load_uv(shard_size=shard_size)
train_dataset, valid_dataset, test_dataset = datasets

#############################################################  DEBUG
print("np.amin(train_dataset.y)")
print(np.amin(train_dataset.y))
print("np.amax(train_dataset.y)")
print(np.amax(train_dataset.y))
#############################################################  DEBUG

print("Number of compounds in train set")
print(len(train_dataset))
print("Number of compounds in validation set")
@@ -31,10 +37,10 @@ print(len(valid_dataset))
print("Number of compounds in test set")
print(len(test_dataset))

all_results = []
for trial in range(num_trials):
  ###Create model###
#n_layers = 3
n_layers = 2
#nb_epoch = 30 
  n_layers = 3
  nb_epoch = 50
  model = dc.models.TensorflowMultiTaskRegressor(
      len(UV_tasks), train_dataset.get_data_shape()[0],
@@ -42,7 +48,7 @@ model = dc.models.TensorflowMultiTaskRegressor(
      weight_init_stddevs=[.02]*n_layers,
      bias_init_consts=[1.]*n_layers, learning_rate=.0003,
      penalty=.0001, penalty_type="l2", optimizer="adam", batch_size=100,
    seed=123, verbosity="high")
      seed=123, logdir="UV_tf_model")

  #Use R2 classification metric
  metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, task_averager=np.mean)
@@ -50,16 +56,50 @@ metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, task_averager=np.mean)
  print("Training model")
  model.fit(train_dataset, nb_epoch=nb_epoch)

train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
#Only use for final evaluation
test_scores = model.evaluate(test_dataset, [metric], transformers)
  print("Evaluating models")
  train_score, train_task_scores = model.evaluate(
      train_dataset, [metric], transformers, per_task_metrics=True)
  valid_score, valid_task_scores = model.evaluate(
      valid_dataset, [metric], transformers, per_task_metrics=True)
  test_score, test_task_scores = model.evaluate(
      test_dataset, [metric], transformers, per_task_metrics=True)

  all_results.append((train_score, train_task_scores,
                      valid_score, valid_task_scores,
                      test_score, test_task_scores))

  print("Scores for trial %d" % trial)
  print("----------------------------------------------------------------")
  print("train_task_scores")
  print(train_task_scores)
  print("Mean Train score")
  print(train_score)
  print("valid_task_scores")
  print(valid_task_scores)
  print("Mean Validation score")
  print(valid_score)
  print("test_task_scores")
  print(test_task_scores)
  print("Mean Test score")
  print(test_score)

print("Train scores")
print(train_scores)
print("####################################################################")

print("Validation scores")
print(valid_scores)
for trial in range(num_trials):
  (train_score, train_task_scores, valid_score, valid_task_scores,
   test_score, test_task_scores) = all_results[trial]

print("Test scores")
print(test_scores)
  print("Scores for trial %d" % trial)
  print("----------------------------------------------------------------")
  print("train_task_scores")
  print(train_task_scores)
  print("Mean Train score")
  print(train_score)
  print("valid_task_scores")
  print(valid_task_scores)
  print("Mean Validation score")
  print(valid_score)
  print("test_task_scores")
  print(test_task_scores)
  print("Mean Test score")
  print(test_score)
+63 −26
Original line number Diff line number Diff line
@@ -11,17 +11,17 @@ import numpy as np
import tempfile
import shutil
import deepchem as dc
from MERCK_datasets import load_uv
from UV_datasets import load_uv

# Set numpy seed
np.random.seed(123)

###Load data###
shard_size = 2000
num_shards_per_batch = 4
print("About to load MERCK data.")
UV_tasks, datasets, transformers = load_uv(
    shard_size=shard_size, num_shards_per_batch=num_shards_per_batch)
#num_trials = 5
num_trials = 1
print("About to load data.")
UV_tasks, datasets, transformers = load_uv(shard_size=shard_size)
train_dataset, valid_dataset, test_dataset = datasets

print("Number of compounds in train set")
@@ -34,6 +34,12 @@ print(len(test_dataset))
n_layers = 3
n_bypass_layers = 3
nb_epoch = 30

#Use R2 classification metric
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, task_averager=np.mean)

all_results = []
for trial in range(num_trials):
  model = dc.models.RobustMultitaskRegressor(
      len(UV_tasks), train_dataset.get_data_shape()[0],
      layer_sizes=[500]*n_layers, bypass_layer_sizes=[40]*n_bypass_layers,
@@ -42,24 +48,55 @@ model = dc.models.RobustMultitaskRegressor(
      bypass_weight_init_stddevs=[.02]*n_bypass_layers,
      bypass_bias_init_consts=[.5]*n_bypass_layers,
      learning_rate=.0003, penalty=.0001, penalty_type="l2",
    optimizer="adam", batch_size=100, verbosity="high")
      optimizer="adam", batch_size=100, logdir="UV_tf_robust")

#Use R2 classification metric
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, task_averager=np.mean)

print("Optimizing Hyperparameters")
  print("Fitting Model")
  model.fit(train_dataset, nb_epoch=nb_epoch)

train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
#Only use for final evaluation
test_scores = model.evaluate(test_dataset, [metric], transformers)
  print("Evaluating models")
  train_score, train_task_scores = model.evaluate(
      train_dataset, [metric], transformers, per_task_metrics=True)
  valid_score, valid_task_scores = model.evaluate(
      valid_dataset, [metric], transformers, per_task_metrics=True)
  test_score, test_task_scores = model.evaluate(
      test_dataset, [metric], transformers, per_task_metrics=True)

  all_results.append((train_score, train_task_scores,
                      valid_score, valid_task_scores,
                      test_score, test_task_scores))

  print("Scores for trial %d" % trial)
  print("----------------------------------------------------------------")
  print("train_task_scores")
  print(train_task_scores)
  print("Mean Train score")
  print(train_score)
  print("valid_task_scores")
  print(valid_task_scores)
  print("Mean Validation score")
  print(valid_score)
  print("test_task_scores")
  print(test_task_scores)
  print("Mean Test score")
  print(test_score)

print("Train scores")
print(train_scores)
print("####################################################################")

print("Validation scores")
print(valid_scores)
for trial in range(num_trials):
  (train_score, train_task_scores, valid_score, valid_task_scores,
   test_score, test_task_scores) = all_results[trial]

print("Test scores")
print(test_scores)
  print("Scores for trial %d" % trial)
  print("----------------------------------------------------------------")
  print("train_task_scores")
  print(train_task_scores)
  print("Mean Train score")
  print(train_score)
  print("valid_task_scores")
  print(valid_task_scores)
  print("Mean Validation score")
  print(valid_score)
  print("test_task_scores")
  print(test_task_scores)
  print("Mean Test score")
  print(test_score)