Commit da703c9e authored by VIGNESHinZONE's avatar VIGNESHinZONE
Browse files

Merge branch 'master' into jax2

parents cee0531d 86042a06
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -93,7 +93,7 @@ class ValidationCallback(object):
      if self._best_score is None or score < self._best_score:
        model.save_checkpoint(model_dir=self.save_dir)
        self._best_score = score
    if model.wandb or (model.wandb_logger is not None):
    if model.wandb_logger is not None:
      # Log data to Wandb
      data = {'eval/' + k: v for k, v in scores.items()}
      model.wandb_logger.log_data(data, step)
      model.wandb_logger.log_data(data, step, dataset_id=id(self.dataset))
+0 −4
Original line number Diff line number Diff line
@@ -468,10 +468,6 @@ class KerasModel(Model):
        all_data = dict({'train/loss': batch_loss})
        self.wandb_logger.log_data(all_data, step=current_step)

    # Close WandbLogger
    if self.wandb_logger is not None:
      self.wandb_logger.finish()

    # Report final results.
    if averaged_batches > 0:
      avg_loss = float(avg_loss) / averaged_batches
+7 −3
Original line number Diff line number Diff line
@@ -335,15 +335,19 @@ def test_wandblogger():
       tf.keras.layers.Dense(1)])
  model = dc.models.KerasModel(
      keras_model, dc.models.losses.L2Loss(), wandb_logger=wandblogger)
  vc = dc.models.ValidationCallback(valid_dataset, 1, [metric])
  model.fit(train_dataset, nb_epoch=10, callbacks=[vc])
  vc_train = dc.models.ValidationCallback(train_dataset, 1, [metric])
  vc_valid = dc.models.ValidationCallback(valid_dataset, 1, [metric])
  model.fit(train_dataset, nb_epoch=10, callbacks=[vc_train, vc_valid])
  # call model.fit again to test multiple fit() calls
  model.fit(train_dataset, nb_epoch=10, callbacks=[vc_train, vc_valid])
  wandblogger.finish()

  run_data = wandblogger.run_history
  valid_score = model.evaluate(valid_dataset, [metric], transformers)

  assert math.isclose(
      valid_score["pearson_r2_score"],
      run_data['eval/pearson_r2_score'],
      run_data['eval/pearson_r2_score_(1)'],
      abs_tol=0.0005)


+18 −0
Original line number Diff line number Diff line
@@ -9,6 +9,13 @@ try:
except:
  has_tensorflow = False

try:
  import torch
  import deepchem.models.torch_models.layers as torch_layers
  has_torch = True
except:
  has_torch = False


@pytest.mark.tensorflow
def test_cosine_dist():
@@ -598,3 +605,14 @@ def test_DAG_gather():
  atom_features = np.random.rand(batch_size, n_atom_feat)
  membership = np.sort(np.random.randint(0, batch_size, size=(batch_size)))
  outputs = layer([atom_features, membership])


@pytest.mark.pytorch
def test_layer_norm():
  """Test invoking LayerNorm."""
  input_ar = torch.tensor([[1., 99., 10000.], [0.003, 999.37, 23.]])
  layer = torch_layers.ScaleNorm(0.35)
  result1 = layer.forward(input_ar)
  output_ar = np.array([[5.9157897e-05, 5.8566318e-03, 5.9157896e-01],
                        [1.7754727e-06, 5.9145141e-01, 1.3611957e-02]])
  assert np.allclose(result1, output_ar)
+7 −3
Original line number Diff line number Diff line
@@ -360,15 +360,19 @@ def test_wandblogger():
      torch.nn.Linear(1000, 1))
  model = dc.models.TorchModel(
      pytorch_model, dc.models.losses.L2Loss(), wandb_logger=wandblogger)
  vc = dc.models.ValidationCallback(valid_dataset, 1, [metric])
  model.fit(train_dataset, nb_epoch=10, callbacks=[vc])
  vc_train = dc.models.ValidationCallback(train_dataset, 1, [metric])
  vc_valid = dc.models.ValidationCallback(valid_dataset, 1, [metric])
  model.fit(train_dataset, nb_epoch=10, callbacks=[vc_train, vc_valid])
  # call model.fit again to test multiple fit() calls
  model.fit(train_dataset, nb_epoch=10, callbacks=[vc_train, vc_valid])
  wandblogger.finish()

  run_data = wandblogger.run_history
  valid_score = model.evaluate(valid_dataset, [metric], transformers)

  assert math.isclose(
      valid_score["pearson_r2_score"],
      run_data['eval/pearson_r2_score'],
      run_data['eval/pearson_r2_score_(1)'],
      abs_tol=0.0005)


Loading