Commit 5eb92cbe authored by joegomes's avatar joegomes
Browse files

Add qm9 example

parent 55437e02
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
echo "Pulling qm9 dataset from deepchem"
wget http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/gdb9.tar.gz
echo "Extracting qm9 structures"
tar -zxvf gdb9.tar.gz
+38 −0
Original line number Diff line number Diff line
"""
qm9 dataset loader.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals

import os
import numpy as np
import shutil
import deepchem as dc

def load_qm9(featurizer=None, split='random'):

  """Load qm9 datasets."""
  # Featurize qm9 dataset
  print("About to featurize qm9 dataset.")
  current_dir = os.path.dirname(os.path.realpath(__file__))
  dataset_file = os.path.join(
      current_dir, "./gdb9.sdf")
  qm9_tasks = ["A", "B", "C", "mu", "alpha", "homo", "lumo", "gap", "r2", "zpve", "cv", 
                "u0_atom", "u298_atom", "h298_atom", "g298_atom"]
  if featurizer is None:
    featurizer = dc.feat.CoulombMatrix(29)
  loader = dc.data.SDFLoader(tasks=qm9_tasks, smiles_field="smiles", 
                             mol_field="mol", featurizer=featurizer)
  dataset = loader.featurize(dataset_file)
  splitters = {'index': dc.splits.IndexSplitter(),
               'random': dc.splits.RandomSplitter(),
               'stratified': dc.splits.SingletaskStratifiedSplitter(task_number=11)}
  splitter = splitters[split]
  train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(dataset)
  transformers = [dc.trans.NormalizationTransformer(transform_y=True, dataset=train_dataset)]
  for transformer in transformers:
    train_dataset = transformer.transform(train_dataset)
    valid_dataset = transformer.transform(valid_dataset)
    test_dataset = transformer.transform(test_dataset)
  return qm9_tasks, (train_dataset, valid_dataset, test_dataset), transformers
+39 −0
Original line number Diff line number Diff line
"""
Script that trains Tensorflow multitask models on QM9 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals

import os
import deepchem as dc
import numpy as np
from qm9_datasets import load_qm9

np.random.seed(123)
qm9_tasks, datasets, transformers = load_qm9()
train_dataset, valid_dataset, test_dataset = datasets
fit_transformers = [dc.trans.CoulombFitTransformer(train_dataset)]
regression_metric = [dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression"), 
              dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")]
model = dc.models.TensorflowMultiTaskFitTransformRegressor(
    n_tasks=len(qm9_tasks), n_features=[29, 29], learning_rate=0.001 , momentum=.8, batch_size=32,
    weight_init_stddevs=[1/np.sqrt(400),1/np.sqrt(100),1/np.sqrt(100)],
    bias_init_consts=[0.,0.,0.], layer_sizes=[400,100,100], 
    dropouts=[0.01,0.01,0.01], fit_transformers=fit_transformers, n_evals=10, seed=123)

# Fit trained model
model.fit(train_dataset, nb_epoch=50)
model.save()

train_scores = model.evaluate(train_dataset, regression_metric, transformers)
print("Train scores [kcal/mol]")
print(train_scores)

valid_scores = model.evaluate(valid_dataset, regression_metric, transformers)
print("Valid scores [kcal/mol]")
print(valid_scores)

test_scores = model.evaluate(test_dataset, regression_metric, transformers)
print("Test scores [kcal/mol]")
print(test_scores)