Commit 64072689 authored by Peter Eastman's avatar Peter Eastman
Browse files

Fixed broken PCBA example

parent d77ccf9b
Loading
Loading
Loading
Loading
+16 −10
Original line number Diff line number Diff line
@@ -16,33 +16,39 @@ from deepchem.metrics import Metric
from deepchem.metrics import to_one_hot
from deepchem.utils.evaluate import Evaluator
from deepchem.models.tensorflow_models.fcnet import TensorflowMultiTaskClassifier
from deepchem.models.tensorflow_models import TensorflowModel

np.random.seed(123)

pcba_tasks, pcba_datasets, transformers = load_pcba()
(train_dataset, valid_dataset) = pcba_datasets
(train_dataset, valid_dataset, test_dataset) = pcba_datasets

metric = Metric(metrics.roc_auc_score, np.mean, mode="classification")

metric = Metric(metrics.roc_auc_score, np.mean,
                               mode="classification")

n_features = train_dataset.get_data_shape()[0]
model_dir = None
model = TensorflowMultiTaskClassifier(
    len(pcba_tasks), n_features, model_dir, dropouts=[.25],
    learning_rate=0.001, weight_init_stddevs=[.1],
    batch_size=64, verbosity="high")
    len(pcba_tasks),
    n_features,
    model_dir,
    dropouts=[.25],
    learning_rate=0.001,
    weight_init_stddevs=[.1],
    batch_size=64,
    verbosity="high")

# Fit trained model
model.fit(train_dataset)
model.save()

train_evaluator = Evaluator(model, train_dataset, transformers, verbosity=verbosity)
train_evaluator = Evaluator(
    model, train_dataset, transformers, verbosity=verbosity)
train_scores = train_evaluator.compute_model_performance([metric])

print("Train scores")
print(train_scores)

valid_evaluator = Evaluator(model, valid_dataset, transformers, verbosity=verbosity)
valid_evaluator = Evaluator(
    model, valid_dataset, transformers, verbosity=verbosity)
valid_scores = valid_evaluator.compute_model_performance([metric])

print("Validation scores")