Commit 1e646c4e authored by miaecle's avatar miaecle
Browse files

fixing bugs

parent 005942c3
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -9,7 +9,7 @@ from deepchem.models.models import Model
from deepchem.models.sklearn_models import SklearnModel
from deepchem.models.tf_new_models.multitask_classifier import MultitaskGraphClassifier
from deepchem.models.tf_new_models.multitask_regressor import MultitaskGraphRegressor
from deepchem.models.tf_new_models.DTNN_regressor import DTNNRegressor
from deepchem.models.tf_new_models.DTNN_regressor import DTNNGraphRegressor

from deepchem.models.tf_new_models.support_classifier import SupportGraphClassifier
from deepchem.models.multitask import SingletaskToMultitask
+4 −5
Original line number Diff line number Diff line
@@ -666,8 +666,6 @@ class TestOverfit(test_util.TensorFlowTestCase):
    """Test deep tensor neural net overfits tiny data."""
    np.random.seed(123)
    tf.set_random_seed(123)
    g = tf.Graph()
    sess = tf.Session(graph=g)

    # Load mini log-solubility dataset.
    input_file = os.path.join(self.current_dir, "example_DTNN.mat")
@@ -688,11 +686,12 @@ class TestOverfit(test_util.TensorFlowTestCase):
    graph_model.add(dc.nn.DTNNStep())
    graph_model.add(dc.nn.DTNNGather(n_tasks=n_tasks))

    model = dc.models.DTNNRegressor(
    model = dc.models.DTNNGraphRegressor(
        graph_model,
        n_tasks=n_tasks,
        n_tasks,
        n_feat,
        batch_size=batch_size,
        learning_rate=1e-2,
        learning_rate=1e-3,
        learning_rate_decay_time=1000,
        optimizer_type="adam",
        beta1=.9,
+28 −0
Original line number Diff line number Diff line
import tensorflow as tf
from deepchem.models.tf_new_models.multitask_regressor import MultitaskGraphRegressor


class DTNNGraphRegressor(MultitaskGraphRegressor):

  def build(self):
    # Create target inputs
    self.label_placeholder = tf.placeholder(
        dtype='float32', shape=(None, self.n_tasks), name="label_placeholder")
    self.weight_placeholder = tf.placeholder(
        dtype='float32', shape=(None, self.n_tasks), name="weight_placholder")

    feat = self.model.return_outputs()
    feat_size = self.feat_dim
    outputs = []
    W_list = []
    b_list = []
    for task in range(self.n_tasks):
      W_list.append(
          tf.Variable(
              tf.truncated_normal([feat_size, 1], stddev=0.01),
              name='w',
              dtype=tf.float32))
      b_list.append(tf.Variable(tf.zeros([1]), name='b', dtype=tf.float32))
      outputs.append(
          tf.squeeze(tf.nn.xw_plus_b(feat, W_list[task], b_list[task])))
    return outputs
+0 −3
Original line number Diff line number Diff line
@@ -128,9 +128,6 @@ class SequentialDTNNGraph(SequentialGraph):
        self.output = layer(self.output)
      self.layers.append(layer)

  def return_inputs(self):
    return self.graph_topology.get_atom_number_placeholders()


class SequentialSupportGraph(object):
  """An analog of Keras Sequential model for test/support models."""
+3 −3
Original line number Diff line number Diff line
@@ -23,14 +23,14 @@ metric = [

# Batch size of models
batch_size = 50
n_feat = [23, 23]
graph_model = dc.nn.SequentialDTNNGraph(max_n_atoms=23, n_distance=100)
graph_model.add(dc.nn.DTNNEmbedding(n_embedding=20))
graph_model.add(dc.nn.DTNNStep(n_embedding=20, n_distance=100))
graph_model.add(dc.nn.DTNNStep(n_embedding=20, n_distance=100))
graph_model.add(dc.nn.DTNNGather(n_embedding=20))
n_feat = 20

model = dc.models.MultitaskGraphRegressor(
model = dc.models.DTNNGraphRegressor(
    graph_model,
    len(tasks),
    n_feat,
@@ -42,7 +42,7 @@ model = dc.models.MultitaskGraphRegressor(
    beta2=.999)

# Fit trained model
model.fit(train_dataset, nb_epoch=10)
model.fit(train_dataset, nb_epoch=50)

print("Evaluating model")
train_scores = model.evaluate(train_dataset, metric, transformers)
Loading