From a14ae69b6bf7e28e8b95c10479c4c454477adc7f Mon Sep 17 00:00:00 2001 From: Paul Auerbach Date: Mon, 31 Jul 2023 16:07:08 +0200 Subject: [PATCH 1/8] Added first version of SAC Discrete, which is running but not learning currently --- sb3_contrib/sacd/__init__.py | 4 + sb3_contrib/sacd/policies.py | 521 +++++++++++++++++++++++++++++++++++ sb3_contrib/sacd/sacd.py | 337 ++++++++++++++++++++++ 3 files changed, 862 insertions(+) create mode 100644 sb3_contrib/sacd/__init__.py create mode 100644 sb3_contrib/sacd/policies.py create mode 100644 sb3_contrib/sacd/sacd.py diff --git a/sb3_contrib/sacd/__init__.py b/sb3_contrib/sacd/__init__.py new file mode 100644 index 00000000..98ec6779 --- /dev/null +++ b/sb3_contrib/sacd/__init__.py @@ -0,0 +1,4 @@ +from sb3_contrib.sacd.policies import CnnPolicy, MlpPolicy, MultiInputPolicy +from sb3_contrib.sacd.sacd import SACD + +__all__ = ["CnnPolicy", "MlpPolicy", "MultiInputPolicy", "SACD"] diff --git a/sb3_contrib/sacd/policies.py b/sb3_contrib/sacd/policies.py new file mode 100644 index 00000000..1676d585 --- /dev/null +++ b/sb3_contrib/sacd/policies.py @@ -0,0 +1,521 @@ +from typing import Any, Dict, List, Optional, Tuple, Type, Union + +import torch as th +from gymnasium import spaces +from torch import nn + +from torch.distributions import Categorical + +from stable_baselines3.common.distributions import SquashedDiagGaussianDistribution, StateDependentNoiseDistribution +from stable_baselines3.common.policies import BasePolicy, BaseModel +from stable_baselines3.common.preprocessing import get_action_dim +from stable_baselines3.common.torch_layers import ( + BaseFeaturesExtractor, + CombinedExtractor, + FlattenExtractor, + NatureCNN, + create_mlp, + get_actor_critic_arch, +) +from stable_baselines3.common.type_aliases import Schedule + +class Actor(BasePolicy): + """ + Actor network (policy) for SAC. + + :param observation_space: Obervation space + :param action_space: Action space + :param net_arch: Network architecture + :param features_extractor: Network to extract features + (a CNN when using images, a nn.Flatten() layer otherwise) + :param features_dim: Number of features + :param activation_fn: Activation function + :param use_sde: Whether to use State Dependent Exploration or not + :param log_std_init: Initial value for the log standard deviation + :param full_std: Whether to use (n_features x n_actions) parameters + for the std instead of only (n_features,) when using gSDE. + :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure + a positive standard deviation (cf paper). It allows to keep variance + above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough. + :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability. + :param normalize_images: Whether to normalize images or not, + dividing by 255.0 (True by default) + """ + + action_space: spaces.Box + + def __init__( + self, + observation_space: spaces.Space, + action_space: spaces.Box, + net_arch: List[int], + features_extractor: nn.Module, + features_dim: int, + activation_fn: Type[nn.Module] = nn.Softmax(dim=1), + use_sde: bool = False, + log_std_init: float = -3, + full_std: bool = True, + use_expln: bool = False, + clip_mean: float = 2.0, + normalize_images: bool = True, + ): + super().__init__( + observation_space, + action_space, + features_extractor=features_extractor, + normalize_images=normalize_images, + # squash_output=True, + squash_output=False, + ) + + # Save arguments to re-create object at loading + self.use_sde = use_sde + self.sde_features_extractor = None + self.net_arch = net_arch + self.features_dim = features_dim + self.activation_fn = activation_fn + self.log_std_init = log_std_init + self.use_expln = use_expln + self.full_std = full_std + self.clip_mean = clip_mean + + num_actions = self.action_space.n + + latent_pi_net = create_mlp(features_dim, num_actions, net_arch, activation_fn) + self.latent_pi = nn.Sequential(*latent_pi_net) + + self.output_activation = nn.Softmax(dim=1) + + def _get_constructor_parameters(self) -> Dict[str, Any]: + data = super()._get_constructor_parameters() + + data.update( + dict( + net_arch=self.net_arch, + features_dim=self.features_dim, + activation_fn=self.activation_fn, + use_sde=self.use_sde, + log_std_init=self.log_std_init, + full_std=self.full_std, + use_expln=self.use_expln, + features_extractor=self.features_extractor, + clip_mean=self.clip_mean, + ) + ) + return data + + def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor: + features = self.extract_features(obs, self.features_extractor) + + action_probabilities = self.output_activation(self.latent_pi(features)) + + if deterministic: + action = th.argmax(action_probabilities) + else: + # random action according to policy + dist = Categorical(probs=action_probabilities) + action = dist.sample() + + return action + + def action_log_prob(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor]: + features = self.extract_features(obs, self.features_extractor) + + action_prob = self.output_activation(self.latent_pi(features)) + + # Have to deal with situation of 0.0 probabilities because we can't do log 0 + z = action_prob == 0.0 + z = z.float() * 1e-8 + log_action_prob = th.log(action_prob + z) + return action_prob, log_action_prob + + def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor: + return self(observation, deterministic) + +class DiscreteCritic(BaseModel): + """ + Critic network(s) for DDPG/SAC/TD3. + It represents the action-state value function (Q-value function). + Compared to A2C/PPO critics, this one represents the Q-value + and takes the continuous action as input. It is concatenated with the state + and then fed to the network which outputs a single value: Q(s, a). + For more recent algorithms like SAC/TD3, multiple networks + are created to give different estimates. + + By default, it creates two critic networks used to reduce overestimation + thanks to clipped Q-learning (cf TD3 paper). + + :param observation_space: Obervation space + :param action_space: Action space + :param net_arch: Network architecture + :param features_extractor: Network to extract features + (a CNN when using images, a nn.Flatten() layer otherwise) + :param features_dim: Number of features + :param activation_fn: Activation function + :param normalize_images: Whether to normalize images or not, + dividing by 255.0 (True by default) + :param n_critics: Number of critic networks to create. + :param share_features_extractor: Whether the features extractor is shared or not + between the actor and the critic (this saves computation time) + """ + + def __init__( + self, + observation_space: spaces.Space, + action_space: spaces.Box, + net_arch: List[int], + features_extractor: BaseFeaturesExtractor, + features_dim: int, + activation_fn: Type[nn.Module] = nn.ReLU, + normalize_images: bool = True, + n_critics: int = 2, + share_features_extractor: bool = True, + ): + super().__init__( + observation_space, + action_space, + features_extractor=features_extractor, + normalize_images=normalize_images, + ) + + action_dim = get_action_dim(self.action_space) + + self.share_features_extractor = share_features_extractor + self.n_critics = n_critics + self.q_networks = [] + for idx in range(n_critics): + q_net = create_mlp(features_dim, action_dim, net_arch, activation_fn) + q_net = nn.Sequential(*q_net) + self.add_module(f"qf{idx}", q_net) + self.q_networks.append(q_net) + + def get_crit_params(self, n): + return self.q_networks[n].parameters() + + def forward(self, obs: th.Tensor) -> Tuple[th.Tensor, ...]: + # Learn the features extractor using the policy loss only + # when the features_extractor is shared with the actor + with th.set_grad_enabled(not self.share_features_extractor): + features = self.extract_features(obs, self.features_extractor) + return tuple(q_net(features) for q_net in self.q_networks) + +class SACPolicy(BasePolicy): + """ + Policy class (with both actor and critic) for SAC. + + :param observation_space: Observation space + :param action_space: Action space + :param lr_schedule: Learning rate schedule (could be constant) + :param net_arch: The specification of the policy and value networks. + :param activation_fn: Activation function + :param use_sde: Whether to use State Dependent Exploration or not + :param log_std_init: Initial value for the log standard deviation + :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure + a positive standard deviation (cf paper). It allows to keep variance + above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough. + :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability. + :param features_extractor_class: Features extractor to use. + :param features_extractor_kwargs: Keyword arguments + to pass to the features extractor. + :param normalize_images: Whether to normalize images or not, + dividing by 255.0 (True by default) + :param optimizer_class: The optimizer to use, + ``th.optim.Adam`` by default + :param optimizer_kwargs: Additional keyword arguments, + excluding the learning rate, to pass to the optimizer + :param n_critics: Number of critic networks to create. + :param share_features_extractor: Whether to share or not the features extractor + between the actor and the critic (this saves computation time) + """ + + actor: Actor + critic: DiscreteCritic + critic_target: DiscreteCritic + + def __init__( + self, + observation_space: spaces.Space, + action_space: spaces.Box, + lr_schedule: Schedule, + net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None, + activation_fn: Type[nn.Module] = nn.ReLU, + use_sde: bool = False, + log_std_init: float = -3, + use_expln: bool = False, + clip_mean: float = 2.0, + features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, + features_extractor_kwargs: Optional[Dict[str, Any]] = None, + normalize_images: bool = True, + optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, + optimizer_kwargs: Optional[Dict[str, Any]] = None, + n_critics: int = 2, + share_features_extractor: bool = False, + ): + super().__init__( + observation_space, + action_space, + features_extractor_class, + features_extractor_kwargs, + optimizer_class=optimizer_class, + optimizer_kwargs=optimizer_kwargs, + squash_output=True, + normalize_images=normalize_images, + ) + + if net_arch is None: + net_arch = [256, 256] + + actor_arch, critic_arch = get_actor_critic_arch(net_arch) + + self.net_arch = net_arch + self.activation_fn = activation_fn + self.net_args = { + "observation_space": self.observation_space, + "action_space": self.action_space, + "net_arch": actor_arch, + "activation_fn": self.activation_fn, + "normalize_images": normalize_images, + } + self.actor_kwargs = self.net_args.copy() + + sde_kwargs = { + "use_sde": use_sde, + "log_std_init": log_std_init, + "use_expln": use_expln, + "clip_mean": clip_mean, + } + self.actor_kwargs.update(sde_kwargs) + self.critic_kwargs = self.net_args.copy() + self.critic_kwargs.update( + { + "n_critics": n_critics, + "net_arch": critic_arch, + "share_features_extractor": share_features_extractor, + } + ) + + self.share_features_extractor = share_features_extractor + + self._build(lr_schedule) + + def _build(self, lr_schedule: Schedule) -> None: + self.actor = self.make_actor() + self.actor.optimizer = self.optimizer_class( + self.actor.parameters(), + lr=lr_schedule(1), # type: ignore[call-arg] + **self.optimizer_kwargs, + ) + + if self.share_features_extractor: + self.critic = self.make_critic(features_extractor=self.actor.features_extractor) + # Do not optimize the shared features extractor with the critic loss + # otherwise, there are gradient computation issues + critic_parameters = [param for name, param in self.critic.named_parameters() if "features_extractor" not in name] + else: + # Create a separate features extractor for the critic + # this requires more memory and computation + self.critic = self.make_critic(features_extractor=None) + critic_parameters = list(self.critic.parameters()) + + # Critic target should not share the features extractor with critic + self.critic_target = self.make_critic(features_extractor=None) + self.critic_target.load_state_dict(self.critic.state_dict()) + + self.critic.optimizer = self.optimizer_class( + critic_parameters, + lr=lr_schedule(1), # type: ignore[call-arg] + **self.optimizer_kwargs, + ) + + # Target networks should always be in eval mode + self.critic_target.set_training_mode(False) + + def _get_constructor_parameters(self) -> Dict[str, Any]: + data = super()._get_constructor_parameters() + + data.update( + dict( + net_arch=self.net_arch, + activation_fn=self.net_args["activation_fn"], + use_sde=self.actor_kwargs["use_sde"], + log_std_init=self.actor_kwargs["log_std_init"], + use_expln=self.actor_kwargs["use_expln"], + clip_mean=self.actor_kwargs["clip_mean"], + n_critics=self.critic_kwargs["n_critics"], + lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone + optimizer_class=self.optimizer_class, + optimizer_kwargs=self.optimizer_kwargs, + features_extractor_class=self.features_extractor_class, + features_extractor_kwargs=self.features_extractor_kwargs, + ) + ) + return data + + def reset_noise(self, batch_size: int = 1) -> None: + """ + Sample new weights for the exploration matrix, when using gSDE. + + :param batch_size: + """ + self.actor.reset_noise(batch_size=batch_size) + + def make_actor(self, features_extractor: Optional[BaseFeaturesExtractor] = None) -> Actor: + actor_kwargs = self._update_features_extractor(self.actor_kwargs, features_extractor) + return Actor(**actor_kwargs).to(self.device) + + def make_critic(self, features_extractor: Optional[BaseFeaturesExtractor] = None) -> DiscreteCritic: + critic_kwargs = self._update_features_extractor(self.critic_kwargs, features_extractor) + return DiscreteCritic(**critic_kwargs).to(self.device) + + def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor: + return self._predict(obs, deterministic=deterministic) + + def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor: + return self.actor(observation, deterministic) + + def set_training_mode(self, mode: bool) -> None: + """ + Put the policy in either training or evaluation mode. + + This affects certain modules, such as batch normalisation and dropout. + + :param mode: if true, set to training mode, else set to evaluation mode + """ + self.actor.set_training_mode(mode) + self.critic.set_training_mode(mode) + self.training = mode + + +MlpPolicy = SACPolicy + + +class CnnPolicy(SACPolicy): + """ + Policy class (with both actor and critic) for SAC. + + :param observation_space: Observation space + :param action_space: Action space + :param lr_schedule: Learning rate schedule (could be constant) + :param net_arch: The specification of the policy and value networks. + :param activation_fn: Activation function + :param use_sde: Whether to use State Dependent Exploration or not + :param log_std_init: Initial value for the log standard deviation + :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure + a positive standard deviation (cf paper). It allows to keep variance + above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough. + :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability. + :param features_extractor_class: Features extractor to use. + :param normalize_images: Whether to normalize images or not, + dividing by 255.0 (True by default) + :param optimizer_class: The optimizer to use, + ``th.optim.Adam`` by default + :param optimizer_kwargs: Additional keyword arguments, + excluding the learning rate, to pass to the optimizer + :param n_critics: Number of critic networks to create. + :param share_features_extractor: Whether to share or not the features extractor + between the actor and the critic (this saves computation time) + """ + + def __init__( + self, + observation_space: spaces.Space, + action_space: spaces.Box, + lr_schedule: Schedule, + net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None, + activation_fn: Type[nn.Module] = nn.ReLU, + use_sde: bool = False, + log_std_init: float = -3, + use_expln: bool = False, + clip_mean: float = 2.0, + features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN, + features_extractor_kwargs: Optional[Dict[str, Any]] = None, + normalize_images: bool = True, + optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, + optimizer_kwargs: Optional[Dict[str, Any]] = None, + n_critics: int = 2, + share_features_extractor: bool = False, + ): + super().__init__( + observation_space, + action_space, + lr_schedule, + net_arch, + activation_fn, + use_sde, + log_std_init, + use_expln, + clip_mean, + features_extractor_class, + features_extractor_kwargs, + normalize_images, + optimizer_class, + optimizer_kwargs, + n_critics, + share_features_extractor, + ) + + +class MultiInputPolicy(SACPolicy): + """ + Policy class (with both actor and critic) for SAC. + + :param observation_space: Observation space + :param action_space: Action space + :param lr_schedule: Learning rate schedule (could be constant) + :param net_arch: The specification of the policy and value networks. + :param activation_fn: Activation function + :param use_sde: Whether to use State Dependent Exploration or not + :param log_std_init: Initial value for the log standard deviation + :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure + a positive standard deviation (cf paper). It allows to keep variance + above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough. + :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability. + :param features_extractor_class: Features extractor to use. + :param normalize_images: Whether to normalize images or not, + dividing by 255.0 (True by default) + :param optimizer_class: The optimizer to use, + ``th.optim.Adam`` by default + :param optimizer_kwargs: Additional keyword arguments, + excluding the learning rate, to pass to the optimizer + :param n_critics: Number of critic networks to create. + :param share_features_extractor: Whether to share or not the features extractor + between the actor and the critic (this saves computation time) + """ + + def __init__( + self, + observation_space: spaces.Space, + action_space: spaces.Box, + lr_schedule: Schedule, + net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None, + activation_fn: Type[nn.Module] = nn.ReLU, + use_sde: bool = False, + log_std_init: float = -3, + use_expln: bool = False, + clip_mean: float = 2.0, + features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor, + features_extractor_kwargs: Optional[Dict[str, Any]] = None, + normalize_images: bool = True, + optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, + optimizer_kwargs: Optional[Dict[str, Any]] = None, + n_critics: int = 2, + share_features_extractor: bool = False, + ): + super().__init__( + observation_space, + action_space, + lr_schedule, + net_arch, + activation_fn, + use_sde, + log_std_init, + use_expln, + clip_mean, + features_extractor_class, + features_extractor_kwargs, + normalize_images, + optimizer_class, + optimizer_kwargs, + n_critics, + share_features_extractor, + ) diff --git a/sb3_contrib/sacd/sacd.py b/sb3_contrib/sacd/sacd.py new file mode 100644 index 00000000..1119bf00 --- /dev/null +++ b/sb3_contrib/sacd/sacd.py @@ -0,0 +1,337 @@ +from typing import Any, ClassVar, Dict, List, Optional, Tuple, Type, TypeVar, Union + +import numpy as np +import torch as th +from gymnasium import spaces +from torch.nn import functional as F + +from stable_baselines3.common.buffers import ReplayBuffer +from stable_baselines3.common.noise import ActionNoise +from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm +from stable_baselines3.common.policies import BasePolicy +from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule +from stable_baselines3.common.utils import get_parameters_by_name, polyak_update + +from sb3_contrib.sacd.policies import Actor, DiscreteCritic, CnnPolicy, MlpPolicy, MultiInputPolicy, SACPolicy + +SelfSACD = TypeVar("SelfSACD", bound="SACD") + + +class SACD(OffPolicyAlgorithm): + """ + Soft Actor-Critic (SAC) + Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor, + This implementation borrows code from original implementation (https://github.com/haarnoja/sac) + from OpenAI Spinning Up (https://github.com/openai/spinningup), from the softlearning repo + (https://github.com/rail-berkeley/softlearning/) + and from Stable Baselines (https://github.com/hill-a/stable-baselines) + Paper: https://arxiv.org/abs/1801.01290 + Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html + + Note: we use double q target and not value target as discussed + in https://github.com/hill-a/stable-baselines/issues/270 + + :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) + :param env: The environment to learn from (if registered in Gym, can be str) + :param learning_rate: learning rate for adam optimizer, + the same learning rate will be used for all networks (Q-Values, Actor and Value function) + it can be a function of the current progress remaining (from 1 to 0) + :param buffer_size: size of the replay buffer + :param learning_starts: how many steps of the model to collect transitions for before learning starts + :param batch_size: Minibatch size for each gradient update + :param tau: the soft update coefficient ("Polyak update", between 0 and 1) + :param gamma: the discount factor + :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit + like ``(5, "step")`` or ``(2, "episode")``. + :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) + Set to ``-1`` means to do as many gradient steps as steps done in the environment + during the rollout. + :param action_noise: the action noise type (None by default), this can help + for hard exploration problem. Cf common.noise for the different action noise type. + :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). + If ``None``, it will be automatically selected. + :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. + :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer + at a cost of more complexity. + See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 + :param ent_coef: Entropy regularization coefficient. (Equivalent to + inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off. + Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value) + :param target_update_interval: update the target network every ``target_network_update_freq`` + gradient steps. + :param target_entropy: target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``) + :param use_sde: Whether to use generalized State Dependent Exploration (gSDE) + instead of action noise exploration (default: False) + :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE + Default: -1 (only sample at the beginning of the rollout) + :param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling + during the warm up phase (before learning starts) + :param stats_window_size: Window size for the rollout logging, specifying the number of episodes to average + the reported success rate, mean episode length, and mean reward over + :param tensorboard_log: the log location for tensorboard (if None, no logging) + :param policy_kwargs: additional arguments to be passed to the policy on creation + :param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for + debug messages + :param seed: Seed for the pseudo random generators + :param device: Device (cpu, cuda, ...) on which the code should be run. + Setting it to auto, the code will be run on the GPU if possible. + :param _init_setup_model: Whether or not to build the network at the creation of the instance + """ + + policy_aliases: ClassVar[Dict[str, Type[BasePolicy]]] = { + "MlpPolicy": MlpPolicy, + "CnnPolicy": CnnPolicy, + "MultiInputPolicy": MultiInputPolicy, + } + policy: SACPolicy + actor: Actor + critic: DiscreteCritic + critic_target: DiscreteCritic + + def __init__( + self, + policy: Union[str, Type[SACPolicy]], + env: Union[GymEnv, str], + learning_rate: Union[float, Schedule] = 3e-4, + buffer_size: int = 1_000_000, # 1e6 + learning_starts: int = 400, + batch_size: int = 256, + tau: float = 0.005, + gamma: float = 0.99, + train_freq: Union[int, Tuple[int, str]] = 1, + gradient_steps: int = 1, + action_noise: Optional[ActionNoise] = None, + replay_buffer_class: Optional[Type[ReplayBuffer]] = None, + replay_buffer_kwargs: Optional[Dict[str, Any]] = None, + optimize_memory_usage: bool = False, + ent_coef: Union[str, float] = "auto", + target_update_interval: int = 1, + target_entropy: Union[str, float] = "auto", + use_sde: bool = False, + sde_sample_freq: int = -1, + use_sde_at_warmup: bool = False, + stats_window_size: int = 100, + tensorboard_log: Optional[str] = None, + policy_kwargs: Optional[Dict[str, Any]] = None, + verbose: int = 0, + seed: Optional[int] = None, + device: Union[th.device, str] = "auto", + _init_setup_model: bool = True, + ): + super().__init__( + policy, + env, + learning_rate, + buffer_size, + learning_starts, + batch_size, + tau, + gamma, + train_freq, + gradient_steps, + action_noise, + replay_buffer_class=replay_buffer_class, + replay_buffer_kwargs=replay_buffer_kwargs, + policy_kwargs=policy_kwargs, + stats_window_size=stats_window_size, + tensorboard_log=tensorboard_log, + verbose=verbose, + device=device, + seed=seed, + use_sde=use_sde, + sde_sample_freq=sde_sample_freq, + use_sde_at_warmup=use_sde_at_warmup, + optimize_memory_usage=optimize_memory_usage, + supported_action_spaces=(spaces.Discrete,), + support_multi_env=True, + ) + + self.target_entropy = target_entropy + self.log_ent_coef = None # type: Optional[th.Tensor] + # Entropy coefficient / Entropy temperature + # Inverse of the reward scale + self.ent_coef = ent_coef + self.target_update_interval = target_update_interval + self.ent_coef_optimizer: Optional[th.optim.Adam] = None + + if _init_setup_model: + self._setup_model() + + def _setup_model(self) -> None: + super()._setup_model() + self._create_aliases() + # Running mean and running var + self.batch_norm_stats = get_parameters_by_name(self.critic, ["running_"]) + self.batch_norm_stats_target = get_parameters_by_name(self.critic_target, ["running_"]) + # Target entropy is used when learning the entropy coefficient + if self.target_entropy == "auto": + # we set the max possible entropy as the target entropy + self.target_entropy = 0.98 * -np.log(1 / np.prod(self.env.action_space.shape)) + else: + # Force conversion + # this will also throw an error for unexpected string + self.target_entropy = float(self.target_entropy) + + # The entropy coefficient or entropy can be learned automatically + # see Automating Entropy Adjustment for Maximum Entropy RL section + # of https://arxiv.org/abs/1812.05905 + if isinstance(self.ent_coef, str) and self.ent_coef.startswith("auto"): + # Default initial value of ent_coef when learned + init_value = 1.0 + if "_" in self.ent_coef: + init_value = float(self.ent_coef.split("_")[1]) + assert init_value > 0.0, "The initial value of ent_coef must be greater than 0" + + # Note: we optimize the log of the entropy coeff which is slightly different from the paper + # as discussed in https://github.com/rail-berkeley/softlearning/issues/37 + # self.log_ent_coef = th.log(th.ones(1, device=self.device) * init_value).requires_grad_(True) + self.log_ent_coef = th.zeros(1, device=self.device, requires_grad=True) + self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef], lr=self.lr_schedule(1)) + else: + # Force conversion to float + # this will throw an error if a malformed string (different from 'auto') + # is passed + self.ent_coef_tensor = th.tensor(float(self.ent_coef), device=self.device) + + def _create_aliases(self) -> None: + self.actor = self.policy.actor + self.critic = self.policy.critic + self.critic_target = self.policy.critic_target + + def train(self, gradient_steps: int, batch_size: int = 64) -> None: + # Switch to train mode (this affects batch norm / dropout) + self.policy.set_training_mode(True) + # Update optimizers learning rate + optimizers = [self.actor.optimizer, self.critic.optimizer] + if self.ent_coef_optimizer is not None: + optimizers += [self.ent_coef_optimizer] + + # Update learning rate according to lr schedule + self._update_learning_rate(optimizers) + + ent_coef_losses, ent_coefs = [], [] + actor_losses, critic_losses = [], [] + + for gradient_step in range(gradient_steps): + # Sample replay buffer + replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) # type: ignore[union-attr] + + # We need to sample because `log_std` may have changed between two gradient steps + # if self.use_sde: + # self.actor.reset_noise() + + # Action by the current actor for the sampled state + actions_pi, log_prob = self.actor.action_log_prob(replay_data.observations) + + # Compute entropy loss + ent_coef_loss = None + if self.ent_coef_optimizer is not None and self.log_ent_coef is not None: + # Important: detach the variable from the graph + # so we don't change it with other losses + # see https://github.com/rail-berkeley/softlearning/issues/60 + ent_coef = th.exp(self.log_ent_coef.detach()) + ent_coef_loss = -(self.log_ent_coef * (log_prob + self.target_entropy).detach()).mean() + ent_coef_losses.append(ent_coef_loss.item()) + else: + ent_coef = self.ent_coef_tensor + + ent_coefs.append(ent_coef.item()) + + # print(f"Alpha Loss{ent_coef_loss.item()}") + + # Optimize entropy coefficient, also called + # entropy temperature or alpha in the paper + if ent_coef_loss is not None and self.ent_coef_optimizer is not None: + self.ent_coef_optimizer.zero_grad() + ent_coef_loss.backward() + self.ent_coef_optimizer.step() + + with th.no_grad(): + # Select action according to policy + action_prob, next_log_prob = self.actor.action_log_prob(replay_data.next_observations) + # Compute the next Q values: min over all critics targets + next_q_values = th.cat(self.critic_target(replay_data.next_observations), dim=1) + next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True) + + # add entropy term + next_q_values = (action_prob * next_q_values - ent_coef * next_log_prob).sum(dim=1).unsqueeze(-1) + # td error + entropy term + target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values + + # Get current Q-values estimates for each critic network + # using action from the replay buffer + current_q_values = self.critic(replay_data.observations) + + # Compute critic loss + critic_loss = 0.5 * sum(F.mse_loss(current_q, target_q_values) for current_q in current_q_values) + critic_losses.append(critic_loss.item()) # type: ignore[union-attr] + + # print(f"Critic Loss{critic_loss.item()}") + + # Optimize the critic + self.critic.optimizer.zero_grad() + critic_loss.backward() + th.nn.utils.clip_grad_norm(self.actor.parameters(), 5.0) + self.critic.optimizer.step() + + # Compute actor loss + # Min over all critic networks + q_values_pi = th.cat(self.critic(replay_data.observations), dim=1) + min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True) + + inside_term = ent_coef * log_prob - min_qf_pi + actor_loss = (actions_pi * inside_term).sum(dim=1).mean() + actor_losses.append(actor_loss.item()) + + # print(f"Actor Loss{actor_loss.item()}") + + # Optimize the actor + self.actor.optimizer.zero_grad() + actor_loss.backward() + th.nn.utils.clip_grad_norm(self.critic.parameters(), 5.0) + self.actor.optimizer.step() + + # Update target networks + if gradient_step % self.target_update_interval == 0: + polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau) + # Copy running stats, see GH issue #996 + # polyak_update(self.batch_norm_stats, self.batch_norm_stats_target, 1.0) + + self._n_updates += gradient_steps + + self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard") + self.logger.record("train/ent_coef", np.mean(ent_coefs)) + self.logger.record("train/actor_loss", np.mean(actor_losses)) + self.logger.record("train/critic_loss", np.mean(critic_losses)) + if len(ent_coef_losses) > 0: + self.logger.record("train/ent_coef_loss", np.mean(ent_coef_losses)) + + def learn( + self: SelfSACD, + total_timesteps: int, + callback: MaybeCallback = None, + log_interval: int = 4, + tb_log_name: str = "SACD", + reset_num_timesteps: bool = True, + progress_bar: bool = False, + ) -> SelfSACD: + return super().learn( + total_timesteps=total_timesteps, + callback=callback, + log_interval=log_interval, + tb_log_name=tb_log_name, + reset_num_timesteps=reset_num_timesteps, + progress_bar=progress_bar, + ) + + def _excluded_save_params(self) -> List[str]: + return super()._excluded_save_params() + ["actor", "critic", "critic_target"] # noqa: RUF005 + + def _get_torch_save_params(self) -> Tuple[List[str], List[str]]: + state_dicts = ["policy", "actor.optimizer", "critic.optimizer"] + if self.ent_coef_optimizer is not None: + saved_pytorch_variables = ["log_ent_coef"] + state_dicts.append("ent_coef_optimizer") + else: + saved_pytorch_variables = ["ent_coef_tensor"] + return state_dicts, saved_pytorch_variables From 875b8bca0dce2656e433e31e43dde9a88bb9ea08 Mon Sep 17 00:00:00 2001 From: Paul Auerbach Date: Tue, 1 Aug 2023 15:09:55 +0200 Subject: [PATCH 2/8] Fixed bugs in that lead to wrong results, currently only working with 2 critics --- sb3_contrib/__init__.py | 2 + sb3_contrib/sacd/policies.py | 4 +- sb3_contrib/sacd/sacd.py | 148 ++++++++++++++++++----------------- 3 files changed, 80 insertions(+), 74 deletions(-) diff --git a/sb3_contrib/__init__.py b/sb3_contrib/__init__.py index 3fbd28d8..9e5f4cde 100644 --- a/sb3_contrib/__init__.py +++ b/sb3_contrib/__init__.py @@ -6,6 +6,7 @@ from sb3_contrib.qrdqn import QRDQN from sb3_contrib.tqc import TQC from sb3_contrib.trpo import TRPO +from sb3_contrib.sacd import SACD # Read version from file version_file = os.path.join(os.path.dirname(__file__), "version.txt") @@ -19,4 +20,5 @@ "QRDQN", "TQC", "TRPO", + "SACD", ] diff --git a/sb3_contrib/sacd/policies.py b/sb3_contrib/sacd/policies.py index 1676d585..f4efef50 100644 --- a/sb3_contrib/sacd/policies.py +++ b/sb3_contrib/sacd/policies.py @@ -178,13 +178,13 @@ def __init__( normalize_images=normalize_images, ) - action_dim = get_action_dim(self.action_space) + num_actions = self.action_space.n self.share_features_extractor = share_features_extractor self.n_critics = n_critics self.q_networks = [] for idx in range(n_critics): - q_net = create_mlp(features_dim, action_dim, net_arch, activation_fn) + q_net = create_mlp(features_dim, num_actions, net_arch, activation_fn) q_net = nn.Sequential(*q_net) self.add_module(f"qf{idx}", q_net) self.q_networks.append(q_net) diff --git a/sb3_contrib/sacd/sacd.py b/sb3_contrib/sacd/sacd.py index 1119bf00..4b612a0b 100644 --- a/sb3_contrib/sacd/sacd.py +++ b/sb3_contrib/sacd/sacd.py @@ -186,6 +186,7 @@ def _setup_model(self) -> None: # as discussed in https://github.com/rail-berkeley/softlearning/issues/37 # self.log_ent_coef = th.log(th.ones(1, device=self.device) * init_value).requires_grad_(True) self.log_ent_coef = th.zeros(1, device=self.device, requires_grad=True) + self.ent_coef = th.exp(self.log_ent_coef) self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef], lr=self.lr_schedule(1)) else: # Force conversion to float @@ -216,80 +217,19 @@ def train(self, gradient_steps: int, batch_size: int = 64) -> None: # Sample replay buffer replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) # type: ignore[union-attr] - # We need to sample because `log_std` may have changed between two gradient steps - # if self.use_sde: - # self.actor.reset_noise() - - # Action by the current actor for the sampled state - actions_pi, log_prob = self.actor.action_log_prob(replay_data.observations) - - # Compute entropy loss - ent_coef_loss = None - if self.ent_coef_optimizer is not None and self.log_ent_coef is not None: - # Important: detach the variable from the graph - # so we don't change it with other losses - # see https://github.com/rail-berkeley/softlearning/issues/60 - ent_coef = th.exp(self.log_ent_coef.detach()) - ent_coef_loss = -(self.log_ent_coef * (log_prob + self.target_entropy).detach()).mean() - ent_coef_losses.append(ent_coef_loss.item()) - else: - ent_coef = self.ent_coef_tensor - - ent_coefs.append(ent_coef.item()) - - # print(f"Alpha Loss{ent_coef_loss.item()}") - - # Optimize entropy coefficient, also called - # entropy temperature or alpha in the paper - if ent_coef_loss is not None and self.ent_coef_optimizer is not None: - self.ent_coef_optimizer.zero_grad() - ent_coef_loss.backward() - self.ent_coef_optimizer.step() - - with th.no_grad(): - # Select action according to policy - action_prob, next_log_prob = self.actor.action_log_prob(replay_data.next_observations) - # Compute the next Q values: min over all critics targets - next_q_values = th.cat(self.critic_target(replay_data.next_observations), dim=1) - next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True) - - # add entropy term - next_q_values = (action_prob * next_q_values - ent_coef * next_log_prob).sum(dim=1).unsqueeze(-1) - # td error + entropy term - target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values - - # Get current Q-values estimates for each critic network - # using action from the replay buffer - current_q_values = self.critic(replay_data.observations) - - # Compute critic loss - critic_loss = 0.5 * sum(F.mse_loss(current_q, target_q_values) for current_q in current_q_values) - critic_losses.append(critic_loss.item()) # type: ignore[union-attr] - - # print(f"Critic Loss{critic_loss.item()}") - - # Optimize the critic - self.critic.optimizer.zero_grad() - critic_loss.backward() - th.nn.utils.clip_grad_norm(self.actor.parameters(), 5.0) - self.critic.optimizer.step() - - # Compute actor loss - # Min over all critic networks - q_values_pi = th.cat(self.critic(replay_data.observations), dim=1) - min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True) - - inside_term = ent_coef * log_prob - min_qf_pi - actor_loss = (actions_pi * inside_term).sum(dim=1).mean() - actor_losses.append(actor_loss.item()) + # Compute the critic loss + critic_loss = self.calc_critic_loss(replay_data) + critic_losses.append(critic_loss.item()) + self.take_optimisation_step(self.critic.optimizer, self.critic, critic_loss, 5.0) - # print(f"Actor Loss{actor_loss.item()}") + # Compute the actor loss + actor_loss, log_action_prob = self.calc_actor_loss(replay_data) + actor_losses.append(actor_loss.item()) + self.take_optimisation_step(self.actor.optimizer, self.actor, actor_loss, 5.0) - # Optimize the actor - self.actor.optimizer.zero_grad() - actor_loss.backward() - th.nn.utils.clip_grad_norm(self.critic.parameters(), 5.0) - self.actor.optimizer.step() + # Compute entropy loss and optimize + ent_coeff = self.calc_entropy_loss(log_action_prob) + ent_coefs.append(self.ent_coef.item()) # Update target networks if gradient_step % self.target_update_interval == 0: @@ -306,6 +246,70 @@ def train(self, gradient_steps: int, batch_size: int = 64) -> None: if len(ent_coef_losses) > 0: self.logger.record("train/ent_coef_loss", np.mean(ent_coef_losses)) + def take_optimisation_step(self, optimizer, network, loss, clipping_norm=None): + optimizer.zero_grad() + loss.backward() + if clipping_norm is not None: + th.nn.utils.clip_grad_norm_(network.parameters(), clipping_norm) #clip gradients to help stabilise training + optimizer.step() + + def calc_critic_loss(self, replay_data): + with th.no_grad(): + # Select action according to policy + action_prob, next_log_prob = self.actor.action_log_prob(replay_data.next_observations) + # Compute the next Q values: min over all critics targets + # next_q_values = th.cat(self.critic_target(replay_data.next_observations), dim=1) + next_q_values = self.critic_target(replay_data.next_observations) + # print(self.critic_target(replay_data.next_observations)) + # exit(0) + # print(next_q_values) + + # next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True) + next_q_values = th.min(*next_q_values) + # print(next_q_values) + # exit(0) + + next_q_values = (action_prob * (next_q_values - self.ent_coef * next_log_prob)).sum(dim=1).unsqueeze(-1) + target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values + + # Get current Q-values estimates for each critic network + # using action from the replay buffer + current_q_values = self.critic(replay_data.observations) + + # Compute critic loss + critic_loss = 0.5 * sum(F.mse_loss(current_q.gather(1, replay_data.actions), target_q_values) for current_q in current_q_values) + + return critic_loss + + def calc_actor_loss(self, replay_data): + action_prob, log_prob = self.actor.action_log_prob(replay_data.observations) + + # Min over all critic networks + # q_values_pi = th.cat(self.critic(replay_data.observations), dim=1) + q_values_pi = self.critic(replay_data.observations) + # min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True) + min_qf_pi = th.min(*q_values_pi) + + inside_term = self.ent_coef * log_prob - min_qf_pi + actor_loss = (action_prob * inside_term).sum(dim=1).mean() + return actor_loss, log_prob + + def calc_entropy_loss(self, log_action_prob): + ent_coef_loss = None + if self.ent_coef_optimizer is not None and self.log_ent_coef is not None: + # Important: detach the variable from the graph + # so we don't change it with other losses + # see https://github.com/rail-berkeley/softlearning/issues/60 + ent_coef_loss = -(self.log_ent_coef * (log_action_prob + self.target_entropy).detach()).mean() + # ent_coef_losses.append(ent_coef_loss.item()) + ent_coef_loss.backward() + self.ent_coef_optimizer.step() + self.ent_coef = th.exp(self.log_ent_coef.detach()) + else: + self.ent_coef = self.ent_coef_tensor + + return self.ent_coef + def learn( self: SelfSACD, total_timesteps: int, From 7711813dbb5f2f289622f2120124d6444f18b3d0 Mon Sep 17 00:00:00 2001 From: Paul Auerbach Date: Wed, 2 Aug 2023 13:17:22 +0200 Subject: [PATCH 3/8] Reworked code to work whith more than 2 critic networks --- sb3_contrib/sacd/sacd.py | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/sb3_contrib/sacd/sacd.py b/sb3_contrib/sacd/sacd.py index 4b612a0b..9e485139 100644 --- a/sb3_contrib/sacd/sacd.py +++ b/sb3_contrib/sacd/sacd.py @@ -68,6 +68,7 @@ class SACD(OffPolicyAlgorithm): during the warm up phase (before learning starts) :param stats_window_size: Window size for the rollout logging, specifying the number of episodes to average the reported success rate, mean episode length, and mean reward over + :param max_grad_norm: The maximum value for the gradient clipping :param tensorboard_log: the log location for tensorboard (if None, no logging) :param policy_kwargs: additional arguments to be passed to the policy on creation :param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for @@ -111,6 +112,7 @@ def __init__( sde_sample_freq: int = -1, use_sde_at_warmup: bool = False, stats_window_size: int = 100, + max_grad_norm = 5.0, tensorboard_log: Optional[str] = None, policy_kwargs: Optional[Dict[str, Any]] = None, verbose: int = 0, @@ -153,6 +155,7 @@ def __init__( self.ent_coef = ent_coef self.target_update_interval = target_update_interval self.ent_coef_optimizer: Optional[th.optim.Adam] = None + self.gradient_clip_norm = max_grad_norm if _init_setup_model: self._setup_model() @@ -220,12 +223,12 @@ def train(self, gradient_steps: int, batch_size: int = 64) -> None: # Compute the critic loss critic_loss = self.calc_critic_loss(replay_data) critic_losses.append(critic_loss.item()) - self.take_optimisation_step(self.critic.optimizer, self.critic, critic_loss, 5.0) + self.take_optimisation_step(self.critic.optimizer, self.critic, critic_loss, self.gradient_clip_norm) # Compute the actor loss actor_loss, log_action_prob = self.calc_actor_loss(replay_data) actor_losses.append(actor_loss.item()) - self.take_optimisation_step(self.actor.optimizer, self.actor, actor_loss, 5.0) + self.take_optimisation_step(self.actor.optimizer, self.actor, actor_loss, self.gradient_clip_norm) # Compute entropy loss and optimize ent_coeff = self.calc_entropy_loss(log_action_prob) @@ -235,7 +238,7 @@ def train(self, gradient_steps: int, batch_size: int = 64) -> None: if gradient_step % self.target_update_interval == 0: polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau) # Copy running stats, see GH issue #996 - # polyak_update(self.batch_norm_stats, self.batch_norm_stats_target, 1.0) + polyak_update(self.batch_norm_stats, self.batch_norm_stats_target, 1.0) self._n_updates += gradient_steps @@ -257,17 +260,10 @@ def calc_critic_loss(self, replay_data): with th.no_grad(): # Select action according to policy action_prob, next_log_prob = self.actor.action_log_prob(replay_data.next_observations) - # Compute the next Q values: min over all critics targets - # next_q_values = th.cat(self.critic_target(replay_data.next_observations), dim=1) - next_q_values = self.critic_target(replay_data.next_observations) - # print(self.critic_target(replay_data.next_observations)) - # exit(0) - # print(next_q_values) - # next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True) - next_q_values = th.min(*next_q_values) - # print(next_q_values) - # exit(0) + # Compute the next Q values: min over all critics targets + next_q_values = th.stack(self.critic_target(replay_data.next_observations), dim=2) + next_q_values, _ = th.min(next_q_values, dim=2) next_q_values = (action_prob * (next_q_values - self.ent_coef * next_log_prob)).sum(dim=1).unsqueeze(-1) target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values @@ -285,10 +281,8 @@ def calc_actor_loss(self, replay_data): action_prob, log_prob = self.actor.action_log_prob(replay_data.observations) # Min over all critic networks - # q_values_pi = th.cat(self.critic(replay_data.observations), dim=1) - q_values_pi = self.critic(replay_data.observations) - # min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True) - min_qf_pi = th.min(*q_values_pi) + q_values_pi = th.stack(self.critic(replay_data.observations), dim=2) + min_qf_pi, _ = th.min(q_values_pi, dim=2) inside_term = self.ent_coef * log_prob - min_qf_pi actor_loss = (action_prob * inside_term).sum(dim=1).mean() From 4a37f58259148faf12540badc99521dac16b101f Mon Sep 17 00:00:00 2001 From: Paul Auerbach Date: Wed, 2 Aug 2023 13:52:40 +0200 Subject: [PATCH 4/8] Code style changes --- sb3_contrib/__init__.py | 2 +- sb3_contrib/sacd/policies.py | 45 +++++++++++++++--------------------- sb3_contrib/sacd/sacd.py | 37 ++++++++++++++--------------- 3 files changed, 37 insertions(+), 47 deletions(-) diff --git a/sb3_contrib/__init__.py b/sb3_contrib/__init__.py index 9e5f4cde..f49a7256 100644 --- a/sb3_contrib/__init__.py +++ b/sb3_contrib/__init__.py @@ -4,9 +4,9 @@ from sb3_contrib.ppo_mask import MaskablePPO from sb3_contrib.ppo_recurrent import RecurrentPPO from sb3_contrib.qrdqn import QRDQN +from sb3_contrib.sacd import SACD from sb3_contrib.tqc import TQC from sb3_contrib.trpo import TRPO -from sb3_contrib.sacd import SACD # Read version from file version_file = os.path.join(os.path.dirname(__file__), "version.txt") diff --git a/sb3_contrib/sacd/policies.py b/sb3_contrib/sacd/policies.py index f4efef50..49f560a2 100644 --- a/sb3_contrib/sacd/policies.py +++ b/sb3_contrib/sacd/policies.py @@ -2,13 +2,7 @@ import torch as th from gymnasium import spaces -from torch import nn - -from torch.distributions import Categorical - -from stable_baselines3.common.distributions import SquashedDiagGaussianDistribution, StateDependentNoiseDistribution -from stable_baselines3.common.policies import BasePolicy, BaseModel -from stable_baselines3.common.preprocessing import get_action_dim +from stable_baselines3.common.policies import BaseModel, BasePolicy from stable_baselines3.common.torch_layers import ( BaseFeaturesExtractor, CombinedExtractor, @@ -18,10 +12,13 @@ get_actor_critic_arch, ) from stable_baselines3.common.type_aliases import Schedule +from torch import nn +from torch.distributions import Categorical + class Actor(BasePolicy): """ - Actor network (policy) for SAC. + Actor network (policy) for SACD :param observation_space: Obervation space :param action_space: Action space @@ -42,7 +39,7 @@ class Actor(BasePolicy): dividing by 255.0 (True by default) """ - action_space: spaces.Box + action_space: spaces.Discrete def __init__( self, @@ -51,7 +48,7 @@ def __init__( net_arch: List[int], features_extractor: nn.Module, features_dim: int, - activation_fn: Type[nn.Module] = nn.Softmax(dim=1), + activation_fn: Type[nn.Module] = nn.Softmax, use_sde: bool = False, log_std_init: float = -3, full_std: bool = True, @@ -132,6 +129,7 @@ def action_log_prob(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor]: def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor: return self(observation, deterministic) + class DiscreteCritic(BaseModel): """ Critic network(s) for DDPG/SAC/TD3. @@ -159,10 +157,12 @@ class DiscreteCritic(BaseModel): between the actor and the critic (this saves computation time) """ + action_space: spaces.Discrete + def __init__( self, observation_space: spaces.Space, - action_space: spaces.Box, + action_space: spaces.Discrete, net_arch: List[int], features_extractor: BaseFeaturesExtractor, features_dim: int, @@ -199,9 +199,10 @@ def forward(self, obs: th.Tensor) -> Tuple[th.Tensor, ...]: features = self.extract_features(obs, self.features_extractor) return tuple(q_net(features) for q_net in self.q_networks) -class SACPolicy(BasePolicy): + +class SACDPolicy(BasePolicy): """ - Policy class (with both actor and critic) for SAC. + Policy class (with both actor and critic) for SACD. :param observation_space: Observation space :param action_space: Action space @@ -351,14 +352,6 @@ def _get_constructor_parameters(self) -> Dict[str, Any]: ) return data - def reset_noise(self, batch_size: int = 1) -> None: - """ - Sample new weights for the exploration matrix, when using gSDE. - - :param batch_size: - """ - self.actor.reset_noise(batch_size=batch_size) - def make_actor(self, features_extractor: Optional[BaseFeaturesExtractor] = None) -> Actor: actor_kwargs = self._update_features_extractor(self.actor_kwargs, features_extractor) return Actor(**actor_kwargs).to(self.device) @@ -386,12 +379,12 @@ def set_training_mode(self, mode: bool) -> None: self.training = mode -MlpPolicy = SACPolicy +MlpPolicy = SACDPolicy -class CnnPolicy(SACPolicy): +class CnnPolicy(SACDPolicy): """ - Policy class (with both actor and critic) for SAC. + Policy class (with both actor and critic) for SACD. :param observation_space: Observation space :param action_space: Action space @@ -455,9 +448,9 @@ def __init__( ) -class MultiInputPolicy(SACPolicy): +class MultiInputPolicy(SACDPolicy): """ - Policy class (with both actor and critic) for SAC. + Policy class (with both actor and critic) for SACD. :param observation_space: Observation space :param action_space: Action space diff --git a/sb3_contrib/sacd/sacd.py b/sb3_contrib/sacd/sacd.py index 9e485139..8433728a 100644 --- a/sb3_contrib/sacd/sacd.py +++ b/sb3_contrib/sacd/sacd.py @@ -3,23 +3,22 @@ import numpy as np import torch as th from gymnasium import spaces -from torch.nn import functional as F - from stable_baselines3.common.buffers import ReplayBuffer from stable_baselines3.common.noise import ActionNoise from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule from stable_baselines3.common.utils import get_parameters_by_name, polyak_update +from torch.nn import functional as F -from sb3_contrib.sacd.policies import Actor, DiscreteCritic, CnnPolicy, MlpPolicy, MultiInputPolicy, SACPolicy +from sb3_contrib.sacd.policies import Actor, CnnPolicy, DiscreteCritic, MlpPolicy, MultiInputPolicy, SACDPolicy SelfSACD = TypeVar("SelfSACD", bound="SACD") class SACD(OffPolicyAlgorithm): """ - Soft Actor-Critic (SAC) + Soft Actor-Critic (SACD) Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor, This implementation borrows code from original implementation (https://github.com/haarnoja/sac) from OpenAI Spinning Up (https://github.com/openai/spinningup), from the softlearning repo @@ -84,14 +83,14 @@ class SACD(OffPolicyAlgorithm): "CnnPolicy": CnnPolicy, "MultiInputPolicy": MultiInputPolicy, } - policy: SACPolicy + policy: SACDPolicy actor: Actor critic: DiscreteCritic critic_target: DiscreteCritic def __init__( self, - policy: Union[str, Type[SACPolicy]], + policy: Union[str, Type[SACDPolicy]], env: Union[GymEnv, str], learning_rate: Union[float, Schedule] = 3e-4, buffer_size: int = 1_000_000, # 1e6 @@ -112,7 +111,7 @@ def __init__( sde_sample_freq: int = -1, use_sde_at_warmup: bool = False, stats_window_size: int = 100, - max_grad_norm = 5.0, + max_grad_norm=5.0, tensorboard_log: Optional[str] = None, policy_kwargs: Optional[Dict[str, Any]] = None, verbose: int = 0, @@ -189,7 +188,7 @@ def _setup_model(self) -> None: # as discussed in https://github.com/rail-berkeley/softlearning/issues/37 # self.log_ent_coef = th.log(th.ones(1, device=self.device) * init_value).requires_grad_(True) self.log_ent_coef = th.zeros(1, device=self.device, requires_grad=True) - self.ent_coef = th.exp(self.log_ent_coef) + self.ent_coef_tensor = th.exp(self.log_ent_coef) self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef], lr=self.lr_schedule(1)) else: # Force conversion to float @@ -231,8 +230,8 @@ def train(self, gradient_steps: int, batch_size: int = 64) -> None: self.take_optimisation_step(self.actor.optimizer, self.actor, actor_loss, self.gradient_clip_norm) # Compute entropy loss and optimize - ent_coeff = self.calc_entropy_loss(log_action_prob) - ent_coefs.append(self.ent_coef.item()) + self.ent_coef_tensor = self.calc_entropy_loss(log_action_prob) + ent_coefs.append(self.ent_coef_tensor.item()) # Update target networks if gradient_step % self.target_update_interval == 0: @@ -253,7 +252,7 @@ def take_optimisation_step(self, optimizer, network, loss, clipping_norm=None): optimizer.zero_grad() loss.backward() if clipping_norm is not None: - th.nn.utils.clip_grad_norm_(network.parameters(), clipping_norm) #clip gradients to help stabilise training + th.nn.utils.clip_grad_norm_(network.parameters(), clipping_norm) # clip gradients to help stabilise training optimizer.step() def calc_critic_loss(self, replay_data): @@ -265,7 +264,7 @@ def calc_critic_loss(self, replay_data): next_q_values = th.stack(self.critic_target(replay_data.next_observations), dim=2) next_q_values, _ = th.min(next_q_values, dim=2) - next_q_values = (action_prob * (next_q_values - self.ent_coef * next_log_prob)).sum(dim=1).unsqueeze(-1) + next_q_values = (action_prob * (next_q_values - self.ent_coef_tensor * next_log_prob)).sum(dim=1).unsqueeze(-1) target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values # Get current Q-values estimates for each critic network @@ -273,7 +272,9 @@ def calc_critic_loss(self, replay_data): current_q_values = self.critic(replay_data.observations) # Compute critic loss - critic_loss = 0.5 * sum(F.mse_loss(current_q.gather(1, replay_data.actions), target_q_values) for current_q in current_q_values) + critic_loss = 0.5 * sum( + F.mse_loss(current_q.gather(1, replay_data.actions), target_q_values) for current_q in current_q_values + ) return critic_loss @@ -284,25 +285,21 @@ def calc_actor_loss(self, replay_data): q_values_pi = th.stack(self.critic(replay_data.observations), dim=2) min_qf_pi, _ = th.min(q_values_pi, dim=2) - inside_term = self.ent_coef * log_prob - min_qf_pi + inside_term = self.ent_coef_tensor * log_prob - min_qf_pi actor_loss = (action_prob * inside_term).sum(dim=1).mean() return actor_loss, log_prob def calc_entropy_loss(self, log_action_prob): - ent_coef_loss = None if self.ent_coef_optimizer is not None and self.log_ent_coef is not None: # Important: detach the variable from the graph # so we don't change it with other losses # see https://github.com/rail-berkeley/softlearning/issues/60 ent_coef_loss = -(self.log_ent_coef * (log_action_prob + self.target_entropy).detach()).mean() - # ent_coef_losses.append(ent_coef_loss.item()) ent_coef_loss.backward() self.ent_coef_optimizer.step() - self.ent_coef = th.exp(self.log_ent_coef.detach()) + return th.exp(self.log_ent_coef.detach()) else: - self.ent_coef = self.ent_coef_tensor - - return self.ent_coef + return self.ent_coef_tensor def learn( self: SelfSACD, From fca2c6d4901e412cf95269bd55b869a9be823d9e Mon Sep 17 00:00:00 2001 From: Paul Auerbach Date: Fri, 4 Aug 2023 18:19:34 +0200 Subject: [PATCH 5/8] Prepared files for merge request (minor cleanup) --- sb3_contrib/sacd/policies.py | 12 ++-- sb3_contrib/sacd/sacd.py | 113 +++++++++++++++-------------------- 2 files changed, 55 insertions(+), 70 deletions(-) diff --git a/sb3_contrib/sacd/policies.py b/sb3_contrib/sacd/policies.py index 49f560a2..fa5b3fd2 100644 --- a/sb3_contrib/sacd/policies.py +++ b/sb3_contrib/sacd/policies.py @@ -61,7 +61,6 @@ def __init__( action_space, features_extractor=features_extractor, normalize_images=normalize_images, - # squash_output=True, squash_output=False, ) @@ -78,7 +77,7 @@ def __init__( num_actions = self.action_space.n - latent_pi_net = create_mlp(features_dim, num_actions, net_arch, activation_fn) + latent_pi_net = create_mlp(features_dim, num_actions.item(), net_arch, activation_fn) self.latent_pi = nn.Sequential(*latent_pi_net) self.output_activation = nn.Softmax(dim=1) @@ -184,8 +183,8 @@ def __init__( self.n_critics = n_critics self.q_networks = [] for idx in range(n_critics): - q_net = create_mlp(features_dim, num_actions, net_arch, activation_fn) - q_net = nn.Sequential(*q_net) + q_net_list = create_mlp(features_dim, num_actions.item(), net_arch, activation_fn) + q_net = nn.Sequential(*q_net_list) self.add_module(f"qf{idx}", q_net) self.q_networks.append(q_net) @@ -195,8 +194,9 @@ def get_crit_params(self, n): def forward(self, obs: th.Tensor) -> Tuple[th.Tensor, ...]: # Learn the features extractor using the policy loss only # when the features_extractor is shared with the actor - with th.set_grad_enabled(not self.share_features_extractor): - features = self.extract_features(obs, self.features_extractor) + if self.features_extractor is not None: + with th.set_grad_enabled(not self.share_features_extractor): + features = self.extract_features(obs, self.features_extractor) return tuple(q_net(features) for q_net in self.q_networks) diff --git a/sb3_contrib/sacd/sacd.py b/sb3_contrib/sacd/sacd.py index 8433728a..f1796b59 100644 --- a/sb3_contrib/sacd/sacd.py +++ b/sb3_contrib/sacd/sacd.py @@ -18,17 +18,14 @@ class SACD(OffPolicyAlgorithm): """ - Soft Actor-Critic (SACD) - Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor, - This implementation borrows code from original implementation (https://github.com/haarnoja/sac) - from OpenAI Spinning Up (https://github.com/openai/spinningup), from the softlearning repo - (https://github.com/rail-berkeley/softlearning/) - and from Stable Baselines (https://github.com/hill-a/stable-baselines) - Paper: https://arxiv.org/abs/1801.01290 - Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html - - Note: we use double q target and not value target as discussed - in https://github.com/hill-a/stable-baselines/issues/270 + Discrete Soft Actor-Critic (SACD) + The Soft Actor-Critic Algorithm modified for discrete Action spaces. + This implementation borrows code from the original implementation of + the papers author (https://github.com/p-christ/Deep-Reinforcement-Learning-Algorithms-with-PyTorch) + from toshikawa (https://github.com/toshikwa/sac-discrete.pytorch#references) + and from Stable Baselines (https://github.com/DLR-RM/stable-baselines3) + + Paper: https://arxiv.org/abs/1910.07207 :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) :param env: The environment to learn from (if registered in Gym, can be str) @@ -168,7 +165,7 @@ def _setup_model(self) -> None: # Target entropy is used when learning the entropy coefficient if self.target_entropy == "auto": # we set the max possible entropy as the target entropy - self.target_entropy = 0.98 * -np.log(1 / np.prod(self.env.action_space.shape)) + self.target_entropy = 0.98 * -np.log(1 / np.prod(np.array(self.action_space.shape))) else: # Force conversion # this will also throw an error for unexpected string @@ -220,17 +217,45 @@ def train(self, gradient_steps: int, batch_size: int = 64) -> None: replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) # type: ignore[union-attr] # Compute the critic loss - critic_loss = self.calc_critic_loss(replay_data) + with th.no_grad(): + # Select action according to policy + action_prob, next_log_prob = self.actor.action_log_prob(replay_data.next_observations) + + # Compute the next Q values: min over all critics targets + next_q_values = th.stack(self.critic_target(replay_data.next_observations), dim=2) + next_q_values, _ = th.min(next_q_values, dim=2) + + next_q_values = (action_prob * (next_q_values - self.ent_coef_tensor * next_log_prob)).sum(dim=1).unsqueeze(-1) + target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values + + # Get current Q-values estimates for each critic network + current_q_values = self.critic(replay_data.observations) + critic_loss = 0.5 * sum( + F.mse_loss(current_q.gather(1, replay_data.actions), target_q_values) for current_q in current_q_values + ) + assert isinstance(critic_loss, th.Tensor) # for type checker critic_losses.append(critic_loss.item()) self.take_optimisation_step(self.critic.optimizer, self.critic, critic_loss, self.gradient_clip_norm) # Compute the actor loss - actor_loss, log_action_prob = self.calc_actor_loss(replay_data) + action_prob, log_action_prob = self.actor.action_log_prob(replay_data.observations) + + # Min over all critic networks + q_values_pi = th.stack(self.critic(replay_data.observations), dim=2) + min_qf_pi, _ = th.min(q_values_pi, dim=2) + + inside_term = self.ent_coef_tensor * log_action_prob - min_qf_pi + actor_loss = (action_prob * inside_term).sum(dim=1).mean() actor_losses.append(actor_loss.item()) self.take_optimisation_step(self.actor.optimizer, self.actor, actor_loss, self.gradient_clip_norm) - # Compute entropy loss and optimize - self.ent_coef_tensor = self.calc_entropy_loss(log_action_prob) + # Compute entropy loss + if self.ent_coef_optimizer is not None and self.log_ent_coef is not None: + ent_coef_loss = -(self.log_ent_coef * (log_action_prob + self.target_entropy).detach()).mean() + ent_coef_losses.append(ent_coef_loss.item()) + self.take_optimisation_step(self.ent_coef_optimizer, None, ent_coef_loss, None) + self.ent_coef_tensor = th.exp(self.log_ent_coef.detach()) + ent_coefs.append(self.ent_coef_tensor.item()) # Update target networks @@ -248,59 +273,19 @@ def train(self, gradient_steps: int, batch_size: int = 64) -> None: if len(ent_coef_losses) > 0: self.logger.record("train/ent_coef_loss", np.mean(ent_coef_losses)) - def take_optimisation_step(self, optimizer, network, loss, clipping_norm=None): + def take_optimisation_step( + self, + optimizer: th.optim.Optimizer, + network: Optional[th.nn.Module], + loss: th.Tensor, + clipping_norm: Optional[float] = None, + ) -> None: optimizer.zero_grad() loss.backward() - if clipping_norm is not None: + if clipping_norm is not None and network is not None: th.nn.utils.clip_grad_norm_(network.parameters(), clipping_norm) # clip gradients to help stabilise training optimizer.step() - def calc_critic_loss(self, replay_data): - with th.no_grad(): - # Select action according to policy - action_prob, next_log_prob = self.actor.action_log_prob(replay_data.next_observations) - - # Compute the next Q values: min over all critics targets - next_q_values = th.stack(self.critic_target(replay_data.next_observations), dim=2) - next_q_values, _ = th.min(next_q_values, dim=2) - - next_q_values = (action_prob * (next_q_values - self.ent_coef_tensor * next_log_prob)).sum(dim=1).unsqueeze(-1) - target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values - - # Get current Q-values estimates for each critic network - # using action from the replay buffer - current_q_values = self.critic(replay_data.observations) - - # Compute critic loss - critic_loss = 0.5 * sum( - F.mse_loss(current_q.gather(1, replay_data.actions), target_q_values) for current_q in current_q_values - ) - - return critic_loss - - def calc_actor_loss(self, replay_data): - action_prob, log_prob = self.actor.action_log_prob(replay_data.observations) - - # Min over all critic networks - q_values_pi = th.stack(self.critic(replay_data.observations), dim=2) - min_qf_pi, _ = th.min(q_values_pi, dim=2) - - inside_term = self.ent_coef_tensor * log_prob - min_qf_pi - actor_loss = (action_prob * inside_term).sum(dim=1).mean() - return actor_loss, log_prob - - def calc_entropy_loss(self, log_action_prob): - if self.ent_coef_optimizer is not None and self.log_ent_coef is not None: - # Important: detach the variable from the graph - # so we don't change it with other losses - # see https://github.com/rail-berkeley/softlearning/issues/60 - ent_coef_loss = -(self.log_ent_coef * (log_action_prob + self.target_entropy).detach()).mean() - ent_coef_loss.backward() - self.ent_coef_optimizer.step() - return th.exp(self.log_ent_coef.detach()) - else: - return self.ent_coef_tensor - def learn( self: SelfSACD, total_timesteps: int, From 610fd3dcf66adb22470799644f460f45e0ed1dc1 Mon Sep 17 00:00:00 2001 From: Paul Auerbach Date: Mon, 7 Aug 2023 13:16:00 +0200 Subject: [PATCH 6/8] Added run test for SACD --- tests/test_run.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/tests/test_run.py b/tests/test_run.py index 90b5703b..9ce4c712 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -3,7 +3,7 @@ from stable_baselines3.common.env_util import make_vec_env from stable_baselines3.common.vec_env import VecNormalize -from sb3_contrib import ARS, QRDQN, TQC, TRPO, MaskablePPO +from sb3_contrib import ARS, QRDQN, SACD, TQC, TRPO, MaskablePPO from sb3_contrib.common.envs import InvalidActionEnvDiscrete from sb3_contrib.common.vec_env import AsyncEval @@ -61,6 +61,17 @@ def test_qrdqn(): model.learn(total_timesteps=500) +def test_sacd(): + model = SACD( + "MlpPolicy", + "CartPole-v1", + policy_kwargs=dict(net_arch=[64, 64], n_critics=2), + learning_starts=100, + verbose=1, + ) + model.learn(total_timesteps=3000) + + @pytest.mark.parametrize("env_id", ["CartPole-v1", "Pendulum-v1"]) def test_trpo(env_id): model = TRPO("MlpPolicy", env_id, n_steps=128, seed=0, policy_kwargs=dict(net_arch=[16]), verbose=1) From d97dbc727c20a8a0ebbd59fcbe42464e0aa0129f Mon Sep 17 00:00:00 2001 From: Paul Auerbach Date: Mon, 7 Aug 2023 14:03:12 +0200 Subject: [PATCH 7/8] Added doc page for SACD --- docs/index.rst | 1 + docs/modules/sacd.rst | 99 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 100 insertions(+) create mode 100644 docs/modules/sacd.rst diff --git a/docs/index.rst b/docs/index.rst index 5e322652..e49e5824 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -35,6 +35,7 @@ RL Baselines3 Zoo also offers a simple interface to train, evaluate agents and d modules/ppo_mask modules/ppo_recurrent modules/qrdqn + modules/sacd modules/tqc modules/trpo diff --git a/docs/modules/sacd.rst b/docs/modules/sacd.rst new file mode 100644 index 00000000..5f261ddf --- /dev/null +++ b/docs/modules/sacd.rst @@ -0,0 +1,99 @@ +.. _sacd: + +.. automodule:: sb3_contrib.sacd + + +SACD +==== + + +`Soft Actor Critic Discrete (SACD) `_ is a modification of the original Soft Actor Critic Algorithm for discrete action spaces. + +.. rubric:: Available Policies + +.. autosummary:: + :nosignatures: + + MlpPolicy + CnnPolicy + MultiInputPolicy + + +Notes +----- + +- Original paper: https://arxiv.org/abs/1910.07207 +- Original Implementation: https://github.com/p-christ/Deep-Reinforcement-Learning-Algorithms-with-PyTorch + + +Can I use? +---------- + +- Recurrent policies: ❌ +- Multi processing: ✔️ +- Gym spaces: + + +============= ====== =========== +Space Action Observation +============= ====== =========== +Discrete ✔️ ✔️ +Box ❌ ✔️ +MultiDiscrete ❌ ✔️ +MultiBinary ❌ ✔️ +Dict ❌ ✔️ +============= ====== =========== + + +Example +------- +.. code-block:: python + + import gymnasium as gym + + from sb3_contrib import SACD + + env = gym.make("CartPole-v1", render_mode="rgb_array") + + model = SACD("MlpPolicy", env, verbose=1, policy_kwargs=dict(net_arch=[64,64])) + model.learn(total_timesteps=20_000) + model.save("sacd_cartpole") + + del model # remove to demonstrate saving and loading + + model = SACD.load("sac_cartpole") + + obs, info = env.reset() + while True: + action, _states = model.predict(obs, deterministic=True) + obs, reward, terminated, truncated, info = env.step(action) + if terminated or truncated: + obs, info = env.reset() + + + +Parameters +---------- + +.. autoclass:: SACD + :members: + :inherited-members: + +.. _sac_policies: + +SACD Policies +------------- + +.. autoclass:: MlpPolicy + :members: + :inherited-members: + +.. autoclass:: stable_baselines3.sac.policies.SACPolicy + :members: + :noindex: + +.. autoclass:: CnnPolicy + :members: + +.. autoclass:: MultiInputPolicy + :members: From bc08ee985e191156faf8fe224e026edd9819f8e1 Mon Sep 17 00:00:00 2001 From: Paul Auerbach Date: Mon, 7 Aug 2023 14:23:07 +0200 Subject: [PATCH 8/8] Added save_load test for SACD --- tests/test_save_load.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/test_save_load.py b/tests/test_save_load.py index 502d2394..5528b232 100644 --- a/tests/test_save_load.py +++ b/tests/test_save_load.py @@ -12,16 +12,16 @@ from stable_baselines3.common.utils import get_device from stable_baselines3.common.vec_env import DummyVecEnv -from sb3_contrib import ARS, QRDQN, TQC, TRPO +from sb3_contrib import ARS, QRDQN, SACD, TQC, TRPO -MODEL_LIST = [ARS, QRDQN, TQC, TRPO] +MODEL_LIST = [ARS, QRDQN, SACD, TQC, TRPO] def select_env(model_class: BaseAlgorithm) -> gym.Env: """ Selects an environment with the correct action space as QRDQN only supports discrete action space """ - if model_class == QRDQN: + if model_class in [QRDQN, SACD]: return IdentityEnv(10) else: return IdentityEnvBox(-10, 10) @@ -281,12 +281,16 @@ def test_save_load_policy(tmp_path, model_class, policy_str): learning_starts=100, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)), ) + elif model_class == SACD: + kwargs = dict( + policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)), + ) else: kwargs = dict( n_steps=128, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)), ) - env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=2, discrete=model_class == QRDQN) + env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=2, discrete=model_class in [QRDQN, SACD]) # Reduce number of quantiles for faster tests if model_class in [TQC, QRDQN]: