Unverified Commit 753d1ec0 authored by micimize's avatar micimize
Browse files

rm -r examples/sampl

parent a04773bf
Loading
Loading
Loading
Loading

examples/sampl/SAMPL.csv

deleted100644 → 0
+0 −644

File deleted.

Preview size limit exceeded, changes collapsed.

+0 −37
Original line number Diff line number Diff line
"""
Script that trains graph-conv models on SAMPL(FreeSolv) dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals

import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.random.set_seed(123)
import deepchem as dc

# Load SAMPL(FreeSolv) dataset
SAMPL_tasks, SAMPL_datasets, transformers = dc.molnet.load_sampl(
    featurizer='GraphConv')
train_dataset, valid_dataset, test_dataset = SAMPL_datasets

# Define metric
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)

# Batch size of models
batch_size = 50
model = dc.models.GraphConvModel(len(SAMPL_tasks), mode='regression')

# Fit trained model
model.fit(train_dataset, nb_epoch=20)

print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)

print("Train scores")
print(train_scores)

print("Validation scores")
print(valid_scores)

examples/sampl/sampl_tf_models.py

deleted100644 → 0
+0 −44
Original line number Diff line number Diff line
"""
Script that trains multitask models on SAMPL dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals

import os
import shutil
import numpy as np
import deepchem as dc
from deepchem.molnet import load_sampl

# Only for debug!
np.random.seed(123)

# Load SAMPL dataset
n_features = 1024
SAMPL_tasks, SAMPL_datasets, transformers = load_sampl()
train_dataset, valid_dataset, test_dataset = SAMPL_datasets

# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)

model = dc.models.MultitaskRegressor(
    len(SAMPL_tasks),
    n_features,
    layer_sizes=[1000],
    dropouts=[.25],
    learning_rate=0.001,
    batch_size=50)

# Fit trained model
model.fit(train_dataset)

print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)

print("Train scores")
print(train_scores)

print("Validation scores")
print(valid_scores)