Unverified Commit 500467de authored by Bharath Ramsundar's avatar Bharath Ramsundar Committed by GitHub
Browse files

Merge pull request #2630 from arunppsg/tm_docs

Minor fixes to examples in torch models
parents 4e98fc0c 3b13162f
Loading
Loading
Loading
Loading
+12 −11
Original line number Diff line number Diff line
@@ -189,17 +189,18 @@ class AttentiveFPModel(TorchModel):

  Examples
  --------

  >>>
  >> import deepchem as dc
  >> from deepchem.models import AttentiveFPModel
  >> featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)
  >> tasks, datasets, transformers = dc.molnet.load_tox21(
  ..     reload=False, featurizer=featurizer, transformers=[])
  >> train, valid, test = datasets
  >> model = AttentiveFPModel(mode='classification', n_tasks=len(tasks),
  ..                          batch_size=32, learning_rate=0.001)
  >> model.fit(train, nb_epoch=50)
  >>> import deepchem as dc
  >>> from deepchem.models import AttentiveFPModel
  >>> # preparing dataset
  >>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
  >>> labels = [0., 1.]
  >>> featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)
  >>> X = featurizer.featurize(smiles)
  >>> dataset = dc.data.NumpyDataset(X=X, y=labels)
  >>> # training model
  >>> model = AttentiveFPModel(mode='classification', n_tasks=1,
  ...    batch_size=16, learning_rate=0.001)
  >>> loss = model.fit(dataset, nb_epoch=5)

  References
  ----------
+12 −11
Original line number Diff line number Diff line
@@ -227,17 +227,18 @@ class GATModel(TorchModel):

  Examples
  --------

  >>>
  >> import deepchem as dc
  >> from deepchem.models import GATModel
  >> featurizer = dc.feat.MolGraphConvFeaturizer()
  >> tasks, datasets, transformers = dc.molnet.load_tox21(
  ..     reload=False, featurizer=featurizer, transformers=[])
  >> train, valid, test = datasets
  >> model = GATModel(mode='classification', n_tasks=len(tasks),
  ..                  batch_size=32, learning_rate=0.001)
  >> model.fit(train, nb_epoch=50)
  >>> import deepchem as dc
  >>> from deepchem.models import GATModel
  >>> # preparing dataset
  >>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
  >>> labels = [0., 1.]
  >>> featurizer = dc.feat.MolGraphConvFeaturizer()
  >>> X = featurizer.featurize(smiles)
  >>> dataset = dc.data.NumpyDataset(X=X, y=labels)
  >>> # training model
  >>> model = GATModel(mode='classification', n_tasks=1,
  ...                  batch_size=16, learning_rate=0.001)
  >>> loss = model.fit(dataset, nb_epoch=5)

  References
  ----------
+12 −11
Original line number Diff line number Diff line
@@ -211,17 +211,18 @@ class GCNModel(TorchModel):

  Examples
  --------

  >>>
  >> import deepchem as dc
  >> from deepchem.models import GCNModel
  >> featurizer = dc.feat.MolGraphConvFeaturizer()
  >> tasks, datasets, transformers = dc.molnet.load_tox21(
  ..     reload=False, featurizer=featurizer, transformers=[])
  >> train, valid, test = datasets
  >> model = GCNModel(mode='classification', n_tasks=len(tasks),
  ..                  batch_size=32, learning_rate=0.001)
  >> model.fit(train, nb_epoch=50)
  >>> import deepchem as dc
  >>> from deepchem.models import GCNModel
  >>> # preparing dataset
  >>> smiles = ["C1CCC1", "CCC"]
  >>> labels = [0., 1.]
  >>> featurizer = dc.feat.MolGraphConvFeaturizer()
  >>> X = featurizer.featurize(smiles)
  >>> dataset = dc.data.NumpyDataset(X=X, y=labels)
  >>> # training model
  >>> model = GCNModel(mode='classification', n_tasks=1,
  ...                  batch_size=16, learning_rate=0.001)
  >>> loss = model.fit(dataset, nb_epoch=5)

  References
  ----------
+12 −11
Original line number Diff line number Diff line
@@ -188,17 +188,18 @@ class MPNNModel(TorchModel):

  Examples
  --------

  >>>
  >> import deepchem as dc
  >> from deepchem.models.torch_models import MPNNModel
  >> featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)
  >> tasks, datasets, transformers = dc.molnet.load_tox21(
  ..     reload=False, featurizer=featurizer, transformers=[])
  >> train, valid, test = datasets
  >> model = MPNNModel(mode='classification', n_tasks=len(tasks),
  ..                   batch_size=32, learning_rate=0.001)
  >> model.fit(train, nb_epoch=50)
  >>> import deepchem as dc
  >>> from deepchem.models.torch_models import MPNNModel
  >>> # preparing dataset
  >>> smiles = ["C1CCC1", "CCC"]
  >>> labels = [0., 1.]
  >>> featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)
  >>> X = featurizer.featurize(smiles)
  >>> dataset = dc.data.NumpyDataset(X=X, y=labels)
  >>> # training model
  >>> model = MPNNModel(mode='classification', n_tasks=1,
  ...                  batch_size=16, learning_rate=0.001)
  >>> loss =  model.fit(dataset, nb_epoch=5)

  References
  ----------
+12 −11
Original line number Diff line number Diff line
@@ -197,17 +197,18 @@ class PagtnModel(TorchModel):

  Examples
  --------

  >>>
  >> import deepchem as dc
  >> from deepchem.models import PagtnModel
  >> featurizer = dc.feat.PagtnMolGraphFeaturizer(max_length=5)
  >> tasks, datasets, transformers = dc.molnet.load_tox21(
  ..     reload=False, featurizer=featurizer, transformers=[])
  >> train, valid, test = datasets
  >> model = PagtnModel(mode='classification', n_tasks=len(tasks),
  ..                    batch_size=16, learning_rate=0.001)
  >> model.fit(train, nb_epoch=50)
  >>> import deepchem as dc
  >>> from deepchem.models import PagtnModel
  >>> # preparing dataset
  >>> smiles = ["C1CCC1", "CCC"]
  >>> labels = [0., 1.]
  >>> featurizer = dc.feat.PagtnMolGraphFeaturizer(max_length=5)
  >>> X = featurizer.featurize(smiles)
  >>> dataset = dc.data.NumpyDataset(X=X, y=labels)
  >>> # training model
  >>> model = PagtnModel(mode='classification', n_tasks=1,
  ...                    batch_size=16, learning_rate=0.001)
  >>> loss = model.fit(dataset, nb_epoch=5)

  References
  ----------
Loading