Commit d3f8212a authored by Arun's avatar Arun
Browse files

fixed examples

parent d6368781
Loading
Loading
Loading
Loading
+9 −11
Original line number Original line Diff line number Diff line
@@ -189,17 +189,15 @@ class AttentiveFPModel(TorchModel):


  Examples
  Examples
  --------
  --------

  >>> import deepchem as dc
  >>>
  >>> from deepchem.models import AttentiveFPModel
  >> import deepchem as dc
  >>> featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)
  >> from deepchem.models import AttentiveFPModel
  >>> tasks, datasets, transformers = dc.molnet.load_tox21(
  >> featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)
  ...    reload=False, featurizer=featurizer, transformers=[])
  >> tasks, datasets, transformers = dc.molnet.load_tox21(
  >>> train, valid, test = datasets
  ..     reload=False, featurizer=featurizer, transformers=[])
  >>> model = AttentiveFPModel(mode='classification', n_tasks=len(tasks),
  >> train, valid, test = datasets
  ...    batch_size=32, learning_rate=0.001)
  >> model = AttentiveFPModel(mode='classification', n_tasks=len(tasks),
  >>> loss = model.fit(train, nb_epoch=10)
  ..                          batch_size=32, learning_rate=0.001)
  >> model.fit(train, nb_epoch=50)


  References
  References
  ----------
  ----------
+9 −11
Original line number Original line Diff line number Diff line
@@ -227,17 +227,15 @@ class GATModel(TorchModel):


  Examples
  Examples
  --------
  --------

  >>> import deepchem as dc
  >>>
  >>> from deepchem.models import GATModel
  >> import deepchem as dc
  >>> featurizer = dc.feat.MolGraphConvFeaturizer()
  >> from deepchem.models import GATModel
  >>> tasks, datasets, transformers = dc.molnet.load_tox21(
  >> featurizer = dc.feat.MolGraphConvFeaturizer()
  ...    reload=False, featurizer=featurizer, transformers=[])
  >> tasks, datasets, transformers = dc.molnet.load_tox21(
  >>> train, valid, test = datasets
  ..     reload=False, featurizer=featurizer, transformers=[])
  >>> model = GATModel(mode='classification', n_tasks=len(tasks),
  >> train, valid, test = datasets
  ...                  batch_size=32, learning_rate=0.001)
  >> model = GATModel(mode='classification', n_tasks=len(tasks),
  >>> loss = model.fit(train, nb_epoch=10)
  ..                  batch_size=32, learning_rate=0.001)
  >> model.fit(train, nb_epoch=50)


  References
  References
  ----------
  ----------
+9 −11
Original line number Original line Diff line number Diff line
@@ -211,17 +211,15 @@ class GCNModel(TorchModel):


  Examples
  Examples
  --------
  --------

  >>> import deepchem as dc
  >>>
  >>> from deepchem.models import GCNModel
  >> import deepchem as dc
  >>> featurizer = dc.feat.MolGraphConvFeaturizer()
  >> from deepchem.models import GCNModel
  >>> tasks, datasets, transformers = dc.molnet.load_tox21(
  >> featurizer = dc.feat.MolGraphConvFeaturizer()
  ...     reload=False, featurizer=featurizer, transformers=[])
  >> tasks, datasets, transformers = dc.molnet.load_tox21(
  >>> train, valid, test = datasets
  ..     reload=False, featurizer=featurizer, transformers=[])
  >>> model = GCNModel(mode='classification', n_tasks=len(tasks),
  >> train, valid, test = datasets
  ...                  batch_size=32, learning_rate=0.001)
  >> model = GCNModel(mode='classification', n_tasks=len(tasks),
  >>> loss = model.fit(train, nb_epoch=10)
  ..                  batch_size=32, learning_rate=0.001)
  >> model.fit(train, nb_epoch=50)


  References
  References
  ----------
  ----------
+9 −11
Original line number Original line Diff line number Diff line
@@ -188,17 +188,15 @@ class MPNNModel(TorchModel):


  Examples
  Examples
  --------
  --------

  >>> import deepchem as dc
  >>>
  >>> from deepchem.models.torch_models import MPNNModel
  >> import deepchem as dc
  >>> featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)
  >> from deepchem.models.torch_models import MPNNModel
  >>> tasks, datasets, transformers = dc.molnet.load_tox21(
  >> featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)
  ...     reload=False, featurizer=featurizer, transformers=[])
  >> tasks, datasets, transformers = dc.molnet.load_tox21(
  >>> train, valid, test = datasets
  ..     reload=False, featurizer=featurizer, transformers=[])
  >>> model = MPNNModel(mode='classification', n_tasks=len(tasks),
  >> train, valid, test = datasets
  ...                  batch_size=32, learning_rate=0.001)
  >> model = MPNNModel(mode='classification', n_tasks=len(tasks),
  >>> loss =  model.fit(train, nb_epoch=10)
  ..                   batch_size=32, learning_rate=0.001)
  >> model.fit(train, nb_epoch=50)


  References
  References
  ----------
  ----------
+9 −11
Original line number Original line Diff line number Diff line
@@ -197,17 +197,15 @@ class PagtnModel(TorchModel):


  Examples
  Examples
  --------
  --------

  >>> import deepchem as dc
  >>>
  >>> from deepchem.models import PagtnModel
  >> import deepchem as dc
  >>> featurizer = dc.feat.PagtnMolGraphFeaturizer(max_length=5)
  >> from deepchem.models import PagtnModel
  >>> tasks, datasets, transformers = dc.molnet.load_tox21(
  >> featurizer = dc.feat.PagtnMolGraphFeaturizer(max_length=5)
  ...     reload=False, featurizer=featurizer, transformers=[])
  >> tasks, datasets, transformers = dc.molnet.load_tox21(
  >>> train, valid, test = datasets
  ..     reload=False, featurizer=featurizer, transformers=[])
  >>> model = PagtnModel(mode='classification', n_tasks=len(tasks),
  >> train, valid, test = datasets
  ...                    batch_size=16, learning_rate=0.001)
  >> model = PagtnModel(mode='classification', n_tasks=len(tasks),
  >>> loss = model.fit(train, nb_epoch=10)
  ..                    batch_size=16, learning_rate=0.001)
  >> model.fit(train, nb_epoch=50)


  References
  References
  ----------
  ----------
Loading