Commit 3b13162f authored by Arun's avatar Arun
Browse files

minor fixes

parent d3f8212a
Loading
Loading
Loading
Loading
+9 −6
Original line number Original line Diff line number Diff line
@@ -191,13 +191,16 @@ class AttentiveFPModel(TorchModel):
  --------
  --------
  >>> import deepchem as dc
  >>> import deepchem as dc
  >>> from deepchem.models import AttentiveFPModel
  >>> from deepchem.models import AttentiveFPModel
  >>> # preparing dataset
  >>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
  >>> labels = [0., 1.]
  >>> featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)
  >>> featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)
  >>> tasks, datasets, transformers = dc.molnet.load_tox21(
  >>> X = featurizer.featurize(smiles)
  ...    reload=False, featurizer=featurizer, transformers=[])
  >>> dataset = dc.data.NumpyDataset(X=X, y=labels)
  >>> train, valid, test = datasets
  >>> # training model
  >>> model = AttentiveFPModel(mode='classification', n_tasks=len(tasks),
  >>> model = AttentiveFPModel(mode='classification', n_tasks=1,
  ...    batch_size=32, learning_rate=0.001)
  ...    batch_size=16, learning_rate=0.001)
  >>> loss = model.fit(train, nb_epoch=10)
  >>> loss = model.fit(dataset, nb_epoch=5)


  References
  References
  ----------
  ----------
+9 −6
Original line number Original line Diff line number Diff line
@@ -229,13 +229,16 @@ class GATModel(TorchModel):
  --------
  --------
  >>> import deepchem as dc
  >>> import deepchem as dc
  >>> from deepchem.models import GATModel
  >>> from deepchem.models import GATModel
  >>> # preparing dataset
  >>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
  >>> labels = [0., 1.]
  >>> featurizer = dc.feat.MolGraphConvFeaturizer()
  >>> featurizer = dc.feat.MolGraphConvFeaturizer()
  >>> tasks, datasets, transformers = dc.molnet.load_tox21(
  >>> X = featurizer.featurize(smiles)
  ...    reload=False, featurizer=featurizer, transformers=[])
  >>> dataset = dc.data.NumpyDataset(X=X, y=labels)
  >>> train, valid, test = datasets
  >>> # training model
  >>> model = GATModel(mode='classification', n_tasks=len(tasks),
  >>> model = GATModel(mode='classification', n_tasks=1,
  ...                  batch_size=32, learning_rate=0.001)
  ...                  batch_size=16, learning_rate=0.001)
  >>> loss = model.fit(train, nb_epoch=10)
  >>> loss = model.fit(dataset, nb_epoch=5)


  References
  References
  ----------
  ----------
+9 −6
Original line number Original line Diff line number Diff line
@@ -213,13 +213,16 @@ class GCNModel(TorchModel):
  --------
  --------
  >>> import deepchem as dc
  >>> import deepchem as dc
  >>> from deepchem.models import GCNModel
  >>> from deepchem.models import GCNModel
  >>> # preparing dataset
  >>> smiles = ["C1CCC1", "CCC"]
  >>> labels = [0., 1.]
  >>> featurizer = dc.feat.MolGraphConvFeaturizer()
  >>> featurizer = dc.feat.MolGraphConvFeaturizer()
  >>> tasks, datasets, transformers = dc.molnet.load_tox21(
  >>> X = featurizer.featurize(smiles)
  ...     reload=False, featurizer=featurizer, transformers=[])
  >>> dataset = dc.data.NumpyDataset(X=X, y=labels)
  >>> train, valid, test = datasets
  >>> # training model
  >>> model = GCNModel(mode='classification', n_tasks=len(tasks),
  >>> model = GCNModel(mode='classification', n_tasks=1,
  ...                  batch_size=32, learning_rate=0.001)
  ...                  batch_size=16, learning_rate=0.001)
  >>> loss = model.fit(train, nb_epoch=10)
  >>> loss = model.fit(dataset, nb_epoch=5)


  References
  References
  ----------
  ----------
+9 −6
Original line number Original line Diff line number Diff line
@@ -190,13 +190,16 @@ class MPNNModel(TorchModel):
  --------
  --------
  >>> import deepchem as dc
  >>> import deepchem as dc
  >>> from deepchem.models.torch_models import MPNNModel
  >>> from deepchem.models.torch_models import MPNNModel
  >>> # preparing dataset
  >>> smiles = ["C1CCC1", "CCC"]
  >>> labels = [0., 1.]
  >>> featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)
  >>> featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)
  >>> tasks, datasets, transformers = dc.molnet.load_tox21(
  >>> X = featurizer.featurize(smiles)
  ...     reload=False, featurizer=featurizer, transformers=[])
  >>> dataset = dc.data.NumpyDataset(X=X, y=labels)
  >>> train, valid, test = datasets
  >>> # training model
  >>> model = MPNNModel(mode='classification', n_tasks=len(tasks),
  >>> model = MPNNModel(mode='classification', n_tasks=1,
  ...                  batch_size=32, learning_rate=0.001)
  ...                  batch_size=16, learning_rate=0.001)
  >>> loss =  model.fit(train, nb_epoch=10)
  >>> loss =  model.fit(dataset, nb_epoch=5)


  References
  References
  ----------
  ----------
+8 −5
Original line number Original line Diff line number Diff line
@@ -199,13 +199,16 @@ class PagtnModel(TorchModel):
  --------
  --------
  >>> import deepchem as dc
  >>> import deepchem as dc
  >>> from deepchem.models import PagtnModel
  >>> from deepchem.models import PagtnModel
  >>> # preparing dataset
  >>> smiles = ["C1CCC1", "CCC"]
  >>> labels = [0., 1.]
  >>> featurizer = dc.feat.PagtnMolGraphFeaturizer(max_length=5)
  >>> featurizer = dc.feat.PagtnMolGraphFeaturizer(max_length=5)
  >>> tasks, datasets, transformers = dc.molnet.load_tox21(
  >>> X = featurizer.featurize(smiles)
  ...     reload=False, featurizer=featurizer, transformers=[])
  >>> dataset = dc.data.NumpyDataset(X=X, y=labels)
  >>> train, valid, test = datasets
  >>> # training model
  >>> model = PagtnModel(mode='classification', n_tasks=len(tasks),
  >>> model = PagtnModel(mode='classification', n_tasks=1,
  ...                    batch_size=16, learning_rate=0.001)
  ...                    batch_size=16, learning_rate=0.001)
  >>> loss = model.fit(train, nb_epoch=10)
  >>> loss = model.fit(dataset, nb_epoch=5)


  References
  References
  ----------
  ----------
Loading