Commit 4257e5eb authored by Bharath Ramsundar's avatar Bharath Ramsundar
Browse files

Fixing more doctests

parent 1bdbe0fd
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -22,6 +22,7 @@ class Linear(hk.Module):
  --------
  --------
  >>> import deepchem as dc
  >>> import deepchem as dc
  >>> import haiku as hk
  >>> import haiku as hk
  >>> import jax
  >>> import deepchem.models.jax_models.layers
  >>> import deepchem.models.jax_models.layers
  >>> def forward_model(x):
  >>> def forward_model(x):
  ...   layer = dc.models.jax_models.layers.Linear(2)
  ...   layer = dc.models.jax_models.layers.Linear(2)
+2 −1
Original line number Original line Diff line number Diff line
@@ -64,9 +64,10 @@ class MultiHeadedMATAttention(nn.Module):
  --------
  --------
  >>> import deepchem as dc
  >>> import deepchem as dc
  >>> from rdkit import Chem
  >>> from rdkit import Chem
  >>> mol = rdkit.Chem.MolFromSmiles("CC")
  >>> mol = Chem.MolFromSmiles("CC")
  >>> adj_matrix = Chem.GetAdjacencyMatrix(mol)
  >>> adj_matrix = Chem.GetAdjacencyMatrix(mol)
  >>> distance_matrix = Chem.GetDistanceMatrix(mol)
  >>> distance_matrix = Chem.GetDistanceMatrix(mol)
  >>> import deepchem.models.torch_models.layers
  >>> layer = dc.models.torch_models.layers.MultiHeadedMATAttention(dist_kernel='softmax', lambda_attention=0.33, lambda_distance=0.33, h=2, hsize=2, dropout_p=0.0)
  >>> layer = dc.models.torch_models.layers.MultiHeadedMATAttention(dist_kernel='softmax', lambda_attention=0.33, lambda_distance=0.33, h=2, hsize=2, dropout_p=0.0)
  >>> input_tensor = torch.tensor([[1., 2.], [5., 6.]])
  >>> input_tensor = torch.tensor([[1., 2.], [5., 6.]])
  >>> mask = torch.tensor([[1., 1.], [1., 1.]])
  >>> mask = torch.tensor([[1., 1.], [1., 1.]])