Skip to content

Commit

Permalink
Fix coverage issue in BC tests (#830)
Browse files Browse the repository at this point in the history
* Remove custom logger from hypothesis strategies to make the number of choices smaller, also suppress the warning about too large number of choices.

* Test BC with different batch_size/minibatch_size combinations.

* Add some no cover pragmas to unimportant edge-cases of BC.

* Fix formatting

* Add better A note explaining why we suppress certain hypothesis health checks.
  • Loading branch information
ernestum authored Dec 15, 2023
1 parent ab45b47 commit d74e903
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 12 deletions.
4 changes: 2 additions & 2 deletions src/imitation/algorithms/bc.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,7 @@ def __init__(
self._demo_data_loader: Optional[Iterable[types.TransitionMapping]] = None
self.batch_size = batch_size
self.minibatch_size = minibatch_size or batch_size
if self.batch_size % self.minibatch_size != 0:
if self.batch_size % self.minibatch_size != 0: # pragma: no cover
raise ValueError("Batch size must be a multiple of minibatch size.")
super().__init__(
demonstrations=demonstrations,
Expand Down Expand Up @@ -358,7 +358,7 @@ def __init__(
assert self.policy.action_space == self.action_space

if optimizer_kwargs:
if "weight_decay" in optimizer_kwargs:
if "weight_decay" in optimizer_kwargs: # pragma: no cover
raise ValueError("Use the parameter l2_weight instead of weight_decay.")
optimizer_kwargs = optimizer_kwargs or {}
self.optimizer = optimizer_cls(
Expand Down
32 changes: 22 additions & 10 deletions tests/algorithms/test_bc.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@ def make_bc_train_args(
rng=rngs,
)
batch_sizes = st.integers(min_value=1, max_value=50)
loggers = st.sampled_from([None, logger.configure()])
expert_data_types = st.sampled_from(
["data_loader", "ducktyped_data_loader", "transitions"],
)
Expand All @@ -95,17 +94,17 @@ def make_bc_train_args(
log_rollouts_venv=st.one_of(rollout_envs, st.just(None)),
)
bc_args = st.builds(
lambda env, batch_size, custom_logger, rng: dict(
lambda env, minibatch_size, rng, minibatch_fraction: dict(
observation_space=env.observation_space,
action_space=env.action_space,
batch_size=batch_size,
custom_logger=custom_logger,
batch_size=minibatch_size * minibatch_fraction,
minibatch_size=minibatch_size,
rng=rng,
),
env=envs,
batch_size=batch_sizes,
custom_logger=loggers,
minibatch_size=batch_sizes,
rng=rngs,
minibatch_fraction=st.integers(1, 10),
)


Expand Down Expand Up @@ -136,7 +135,7 @@ def test_smoke_bc_creation(
**bc_args,
demonstrations=make_expert_transition_loader(
cache_dir=cache.mkdir("experts"),
batch_size=bc_args["batch_size"],
batch_size=bc_args["minibatch_size"],
expert_data_type=expert_data_type,
env_name=env_name,
rng=rng,
Expand All @@ -152,7 +151,21 @@ def test_smoke_bc_creation(
expert_data_type=expert_data_types,
rng=rngs,
)
@hypothesis.settings(deadline=20000, max_examples=15)
@hypothesis.settings(
deadline=20000,
max_examples=15,
# TODO: one day consider removing this. For now we are good.
# Note: Hypothesis automatically generates input examples. The "size" of
# the examples is determined by the number of decisions it has to make when
# generating each example. E.g. a list of 100 random integers has a size of 100 but
# choosing between one of three different lists of length 100 has a size of 1.
# If the number of choices becomes too large we risk not properly covering the
# search space and hypothesis will complain. In this particular case we are not
# too concerned with covering the entire search space so we suppress the warning.
# Read me for more info:
# https://hypothesis.readthedocs.io/en/latest/settings.html#hypothesis.HealthCheck.data_too_large
suppress_health_check=[hypothesis.HealthCheck.data_too_large],
)
def test_smoke_bc_training(
env_name: str,
bc_args: dict,
Expand All @@ -168,7 +181,7 @@ def test_smoke_bc_training(
**bc_args,
demonstrations=make_expert_transition_loader(
cache_dir=cache.mkdir("experts"),
batch_size=bc_args["batch_size"],
batch_size=bc_args["minibatch_size"],
expert_data_type=expert_data_type,
env_name=env_name,
rng=rng,
Expand Down Expand Up @@ -246,7 +259,6 @@ def make_trainer(**kwargs: Any) -> bc.BC:
action_space=cartpole_venv.action_space,
batch_size=batch_size,
demonstrations=demonstrations,
custom_logger=None,
rng=rng,
**kwargs,
)
Expand Down

0 comments on commit d74e903

Please sign in to comment.