Commit 06a45509 authored by Atreya Majumdar's avatar Atreya Majumdar
Browse files

Update

parent 518de85e
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -621,12 +621,12 @@ def test_scale_norm():
@pytest.mark.torch
def test_multi_headed_mat_attention():
  """Test invoking MultiHeadedMATAttention."""
  import rdkit
  from rdkit import Chem
  torch.manual_seed(0)
  input_smile = "CC"
  mol = rdkit.Chem.rdmolfiles.MolFromSmiles(input_smile)
  adj_matrix = rdkit.Chem.rdmolops.GetAdjacencyMatrix(mol)
  distance_matrix = rdkit.Chem.rdmolops.GetDistanceMatrix(mol)
  mol = Chem.MolFromSmiles(input_smile)
  adj_matrix = Chem.GetAdjacencyMatrix(mol)
  distance_matrix = Chem.GetDistanceMatrix(mol)
  layer = torch_layers.MultiHeadedMATAttention(
      dist_kernel='softmax',
      lambda_attention=0.33,
+4 −4
Original line number Diff line number Diff line
@@ -63,10 +63,10 @@ class MultiHeadedMATAttention(nn.Module):
  Examples
  --------
  >>> import deepchem as dc
  >>> import rdkit
  >>> mol = rdkit.Chem.rdmolfiles.MolFromSmiles("CC")
  >>> adj_matrix = rdkit.Chem.rdmolops.GetAdjacencyMatrix(mol)
  >>> distance_matrix = rdkit.Chem.rdmolops.GetDistanceMatrix(mol)
  >>> from rdkit import Chem
  >>> mol = rdkit.Chem.MolFromSmiles("CC")
  >>> adj_matrix = Chem.GetAdjacencyMatrix(mol)
  >>> distance_matrix = Chem.GetDistanceMatrix(mol)
  >>> layer = dc.models.torch_models.layers.MultiHeadedMATAttention(dist_kernel='softmax', lambda_attention=0.33, lambda_distance=0.33, h=2, hsize=2, dropout_p=0.0)
  >>> input_tensor = torch.tensor([[1., 2.], [5., 6.]])
  >>> mask = torch.tensor([[1., 1.], [1., 1.]])