Commit 8f1d6a7c authored by nd-02110114's avatar nd-02110114
Browse files

🚨 apply flake8 in rl and metaleanring

parent b097eb3c
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
# flake8: noqa

from deepchem.metalearning.maml import MAML, MetaLearner
+8 −7
Original line number Diff line number Diff line
"""Model-Agnostic Meta-Learning (MAML) algorithm for low data learning."""

from deepchem.models.optimizers import Adam, GradientDescent
import numpy as np
import os
import shutil
import tempfile
import tensorflow as tf
import time

import tensorflow as tf

from deepchem.models.optimizers import Adam, GradientDescent


class MetaLearner(object):
  """Model and data to which the MAML algorithm can be applied.
@@ -37,12 +38,12 @@ class MetaLearner(object):
    (loss, outputs) where loss is the value of the model's loss function, and
    outputs is a list of the model's outputs
    """
    raise NotImplemented("Subclasses must implement this")
    raise NotImplementedError("Subclasses must implement this")

  @property
  def variables(self):
    """Get the list of Tensorflow variables to train."""
    raise NotImplemented("Subclasses must implement this")
    raise NotImplementedError("Subclasses must implement this")

  def select_task(self):
    """Select a new task to train on.
@@ -51,7 +52,7 @@ class MetaLearner(object):
    If there are infinitely many training tasks, this can simply select a new one each
    time it is called.
    """
    raise NotImplemented("Subclasses must implement this")
    raise NotImplementedError("Subclasses must implement this")

  def get_batch(self):
    """Get a batch of data for training.
@@ -60,7 +61,7 @@ class MetaLearner(object):
    inputs.  This will usually be called twice for each task, and should
    return a different batch on each call.
    """
    raise NotImplemented("Subclasses must implement this")
    raise NotImplementedError("Subclasses must implement this")


class MAML(object):
+4 −3
Original line number Diff line number Diff line
from flaky import flaky
import unittest

import deepchem as dc
import numpy as np
import tensorflow as tf
import unittest
from flaky import flaky

import deepchem as dc


class TestMAML(unittest.TestCase):
+6 −5
Original line number Diff line number Diff line
"""Interface for reinforcement learning."""

from deepchem.rl.a2c import A2C
from deepchem.rl.ppo import PPO

from deepchem.rl.a2c import A2C  # noqa: F401
from deepchem.rl.ppo import PPO  # noqa: F401


class Environment(object):
@@ -120,7 +121,7 @@ class Environment(object):
    This must be called before calling step() or querying the state.  You can call it
    again later to reset the environment back to its original state.
    """
    raise NotImplemented("Subclasses must implement this")
    raise NotImplementedError("Subclasses must implement this")

  def step(self, action):
    """Take a time step by performing an action.
@@ -137,7 +138,7 @@ class Environment(object):
    the reward earned by taking the action, represented as a floating point number
    (higher values are better)
    """
    raise NotImplemented("Subclasses must implement this")
    raise NotImplementedError("Subclasses must implement this")


class GymEnvironment(Environment):
@@ -225,4 +226,4 @@ class Policy(object):
    Depending on the algorithm being used, other inputs might get passed as
    well.  It is up to each algorithm to document that.
    """
    raise NotImplemented("Subclasses must implement this")
    raise NotImplementedError("Subclasses must implement this")
+6 −11
Original line number Diff line number Diff line
"""Advantage Actor-Critic (A2C) algorithm for reinforcement learning."""
import time
import collections

from deepchem.models import KerasModel
from deepchem.models.optimizers import Adam
import numpy as np
import tensorflow as tf
import collections
import copy
import multiprocessing
import os
import re
import threading
import time

from deepchem.models import KerasModel
from deepchem.models.optimizers import Adam


class A2CLossDiscrete(object):
@@ -49,7 +45,7 @@ class A2CLossContinuous(object):
  def __init__(self, value_weight, entropy_weight, mean_index, std_index,
               value_index):
    try:
      import tensorflow_probability as tfp
      import tensorflow_probability as tfp  # noqa: F401
    except ModuleNotFoundError:
      raise ValueError(
          "This class requires tensorflow-probability to be installed.")
@@ -384,7 +380,6 @@ class A2C(object):

  def _create_rollout(self, rnn_states):
    """Generate a rollout."""
    n_actions = self._env.n_actions
    states = []
    actions = []
    rewards = []
Loading