Commit 2c33ff1a authored by miaecle's avatar miaecle
Browse files

minor changes

parent 380b2146
Loading
Loading
Loading
Loading
+8 −4
Original line number Diff line number Diff line
@@ -117,6 +117,7 @@ class WeaveTensorGraph(TensorGraph):
        similar to deepchem.models.tf_new_models.graph_topology.AlternateWeaveTopology.batch_to_feed_dict
        """
    for epoch in range(epochs):
      if not predict:
        print('Starting epoch %i' % epoch)
      for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
          batch_size=self.batch_size,
@@ -266,6 +267,7 @@ class DTNNTensorGraph(TensorGraph):
        similar to deepchem.models.tf_new_models.graph_topology.DTNNGraphTopology.batch_to_feed_dict
        """
    for epoch in range(epochs):
      if not predict:
        print('Starting epoch %i' % epoch)
      for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
          batch_size=self.batch_size,
@@ -411,6 +413,7 @@ class DAGTensorGraph(TensorGraph):
        similar to deepchem.models.tf_new_models.graph_topology.DAGGraphTopology.batch_to_feed_dict
        """
    for epoch in range(epochs):
      if not predict:
        print('Starting epoch %i' % epoch)
      for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
          batch_size=self.batch_size,
@@ -543,6 +546,7 @@ class GraphConvTensorGraph(TensorGraph):
                        predict=False,
                        pad_batches=True):
    for epoch in range(epochs):
      if not predict:
        print('Starting epoch %i' % epoch)
      for ind, (X_b, y_b, w_b, ids_b) in enumerate(
          dataset.iterbatches(
+20 −19
Original line number Diff line number Diff line
@@ -94,6 +94,8 @@ class BPSymmetryFunctionRegression(TensorGraph):
                        predict=False,
                        pad_batches=True):
    for epoch in range(epochs):
      if not predict:
        print('Starting epoch %i' % epoch)
      for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
          batch_size=self.batch_size,
          deterministic=True,
@@ -118,8 +120,8 @@ class ANIRegression(TensorGraph):
  def __init__(self,
               n_tasks,
               max_atoms,
               n_feat=1120,
               n_hidden=40,
               n_feat,
               layer_structures=[128, 64],
               atom_number_cases=[1, 6, 7, 8, 16],
               **kwargs):
    """
@@ -134,8 +136,8 @@ class ANIRegression(TensorGraph):
    """
    self.n_tasks = n_tasks
    self.max_atoms = max_atoms
    self.n_hidden = n_hidden
    self.n_feat = n_feat
    self.layer_structures = layer_structures
    self.atom_number_cases = atom_number_cases
    super(ANIRegression, self).__init__(**kwargs)
    self.build_graph()
@@ -144,26 +146,24 @@ class ANIRegression(TensorGraph):
    self.atom_numbers = Feature(shape=(None, self.max_atoms), dtype=tf.int32)
    self.atom_flags = Feature(shape=(None, self.max_atoms, self.max_atoms))
    self.atom_feats = Feature(shape=(None, self.max_atoms, self.n_feat))
    previous_layer = self.atom_feats

    Hiddens = []
    for n_hidden in self.layer_structures:
      Hidden = AtomicDifferentiatedDense(
          self.max_atoms,
        self.n_hidden,
          n_hidden,
          self.atom_number_cases,
          activation='tanh',
        in_layers=[self.atom_feats, self.atom_numbers])

    Hidden2 = AtomicDifferentiatedDense(
        self.max_atoms,
        self.n_hidden,
        self.atom_number_cases,
        activation='tanh',
        in_layers=[Hidden, self.atom_numbers])
          in_layers=[previous_layer, self.atom_numbers])
      Hiddens.append(Hidden)
      previous_layer = Hiddens[-1]

    costs = []
    self.labels_fd = []
    for task in range(self.n_tasks):
      regression = Dense(
          out_channels=1, activation_fn=None, in_layers=[Hidden2])
          out_channels=1, activation_fn=None, in_layers=[Hiddens[-1]])
      output = BPGather(self.max_atoms, in_layers=[regression, self.atom_flags])
      self.add_output(output)

@@ -183,6 +183,7 @@ class ANIRegression(TensorGraph):
                        predict=False,
                        pad_batches=True):
    for epoch in range(epochs):
      if not predict:
        print('Starting epoch %i' % epoch)
      for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
          batch_size=self.batch_size,
+4 −7
Original line number Diff line number Diff line
@@ -18,9 +18,8 @@ train_dataset, valid_dataset, test_dataset = datasets

# Batch size of models
max_atoms = 23
n_hidden = 40
n_embedding = 0
batch_size = 16
layer_structures = [128, 128, 64]
atom_number_cases = [1, 6, 7, 8, 16]

ANItransformer = dc.trans.ANITransformer(max_atoms=max_atoms,
@@ -28,8 +27,6 @@ ANItransformer = dc.trans.ANITransformer(max_atoms=max_atoms,
train_dataset = ANItransformer.transform(train_dataset)
valid_dataset = ANItransformer.transform(valid_dataset)
test_dataset = ANItransformer.transform(test_dataset)

# The first column is atom numbers
n_feat = ANItransformer.get_num_feats() - 1

# Fit models
@@ -42,8 +39,8 @@ metric = [
model = dc.models.ANIRegression(
    len(tasks),
    max_atoms,
    n_feat=n_feat,
    n_hidden=n_hidden,
    n_feat,
    layer_structures=layer_structures,
    atom_number_cases=atom_number_cases,
    batch_size=batch_size,
    learning_rate=0.001,
@@ -51,7 +48,7 @@ model = dc.models.ANIRegression(
    mode="regression")

# Fit trained model
model.fit(train_dataset, nb_epoch=100, checkpoint_interval=100)
model.fit(train_dataset, nb_epoch=3000, checkpoint_interval=100)

print("Evaluating model")
train_scores = model.evaluate(train_dataset, metric, transformers)