Skip to content

Commit

Permalink
test PL examples (Lightning-AI#4551)
Browse files Browse the repository at this point in the history
* test PL examples

* minor formatting

* skip failing

* skip failing

* args

* mnist datamodule

* refactor tests

* refactor tests

* skip

* skip

* drop DM

* drop DM

Co-authored-by: Sean Naren <[email protected]>
  • Loading branch information
Borda and SeanNaren authored Nov 17, 2020
1 parent b8a1916 commit 9a5d40a
Show file tree
Hide file tree
Showing 5 changed files with 49 additions and 74 deletions.
2 changes: 1 addition & 1 deletion .drone.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ steps:
- pip install --extra-index-url https://developer.download.nvidia.com/compute/redist nvidia-dali-cuda100 --upgrade-strategy only-if-needed
- pip list
- coverage run --source pytorch_lightning -m pytest pytorch_lightning tests -v --color=yes --durations=25 # --flake8
- python -m pytest benchmarks pl_examples -v --color=yes --maxfail=2 --durations=0 # --flake8
- python -m pytest benchmarks pl_examples -v --color=yes --maxfail=7 --durations=0 # --flake8
#- cd docs; make doctest; make coverage
- coverage report
# see: https://docs.codecov.io/docs/merging-reports
Expand Down
4 changes: 2 additions & 2 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -316,8 +316,8 @@ def package_list_from_file(file):
if SPHINX_MOCK_REQUIREMENTS:
# mock also base packages when we are on RTD since we don't install them there
MOCK_PACKAGES += package_list_from_file(os.path.join(PATH_ROOT, 'requirements.txt'))
MOCK_PACKAGES += package_list_from_file(os.path.join(PATH_ROOT, 'requirements/extra.txt'))
MOCK_PACKAGES += package_list_from_file(os.path.join(PATH_ROOT, 'requirements/loggers.txt'))
MOCK_PACKAGES += package_list_from_file(os.path.join(PATH_ROOT, 'requirements', 'extra.txt'))
MOCK_PACKAGES += package_list_from_file(os.path.join(PATH_ROOT, 'requirements', 'loggers.txt'))
MOCK_PACKAGES = [PACKAGE_MAPPING.get(pkg, pkg) for pkg in MOCK_PACKAGES]

autodoc_mock_imports = MOCK_PACKAGES
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,10 @@
from argparse import ArgumentParser

import torch
from torch.utils.data import random_split, DataLoader

import pytorch_lightning as pl
from torch.nn import functional as F
from torch.utils.data import DataLoader, random_split

try:
from torchvision.datasets.mnist import MNIST
Expand Down Expand Up @@ -105,7 +106,7 @@ def cli_main():
# ------------
# testing
# ------------
trainer.test(test_dataloaders=test_loader)
result = trainer.test(test_dataloaders=test_loader)


if __name__ == '__main__':
Expand Down
File renamed without changes.
112 changes: 43 additions & 69 deletions pl_examples/test_examples.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import importlib
import platform
from unittest import mock

Expand All @@ -11,109 +12,82 @@
else:
DALI_AVAILABLE = True

dp_16_args = """
ARGS_DEFAULT = """
--max_epochs 1 \
--batch_size 32 \
--limit_train_batches 2 \
--limit_val_batches 2 \
--gpus 2 \
--distributed_backend dp \
--precision 16 \
"""

cpu_args = """
--max_epochs 1 \
--batch_size 32 \
--limit_train_batches 2 \
--limit_val_batches 2 \
ARGS_GPU = ARGS_DEFAULT + """
--gpus 1 \
"""

ddp_args = """
--max_epochs 1 \
--batch_size 32 \
--limit_train_batches 2 \
--limit_val_batches 2 \
ARGS_DP_AMP = ARGS_DEFAULT + """
--gpus 2 \
--distributed_backend dp \
--precision 16 \
"""

# TODO
# @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
# @pytest.mark.parametrize('cli_args', [dp_16_args])
# def test_examples_dp_mnist(cli_args):
# from pl_examples.basic_examples.mnist import cli_main
#
# with mock.patch("argparse._sys.argv", ["any.py"] + cli_args.strip().split()):
# cli_main()
ARGS_DDP_AMP = ARGS_DEFAULT + """
--gpus 2 \
--distributed_backend ddp \
--precision 16 \
"""


# TODO
# ToDo: fix this failing example
# @pytest.mark.parametrize('import_cli', [
# 'pl_examples.basic_examples.mnist_classifier',
# 'pl_examples.basic_examples.image_classifier',
# 'pl_examples.basic_examples.autoencoder',
# ])
# @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
# @pytest.mark.parametrize('cli_args', [dp_16_args])
# def test_examples_dp_image_classifier(cli_args):
# from pl_examples.basic_examples.image_classifier import cli_main
# @pytest.mark.parametrize('cli_args', [ARGS_DP_AMP])
# def test_examples_dp(import_cli, cli_args):
#
# with mock.patch("argparse._sys.argv", ["any.py"] + cli_args.strip().split()):
# cli_main()


# TODO
# @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
# @pytest.mark.parametrize('cli_args', [dp_16_args])
# def test_examples_dp_autoencoder(cli_args):
# from pl_examples.basic_examples.autoencoder import cli_main
# module = importlib.import_module(import_cli)
#
# with mock.patch("argparse._sys.argv", ["any.py"] + cli_args.strip().split()):
# cli_main()
# module.cli_main()


# TODO
# ToDo: fix this failing example
# @pytest.mark.parametrize('import_cli', [
# 'pl_examples.basic_examples.mnist_classifier',
# 'pl_examples.basic_examples.image_classifier',
# 'pl_examples.basic_examples.autoencoder',
# ])
# @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
# @pytest.mark.parametrize('cli_args', [ddp_args])
# def test_examples_ddp_mnist(cli_args):
# from pl_examples.basic_examples.mnist import cli_main
# @pytest.mark.parametrize('cli_args', [ARGS_DDP_AMP])
# def test_examples_ddp(import_cli, cli_args):
#
# with mock.patch("argparse._sys.argv", ["any.py"] + cli_args.strip().split()):
# cli_main()


# TODO
# @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
# @pytest.mark.parametrize('cli_args', [ddp_args])
# def test_examples_ddp_image_classifier(cli_args):
# from pl_examples.basic_examples.image_classifier import cli_main
# module = importlib.import_module(import_cli)
#
# with mock.patch("argparse._sys.argv", ["any.py"] + cli_args.strip().split()):
# cli_main()
# module.cli_main()


# TODO
# @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
# @pytest.mark.parametrize('cli_args', [ddp_args])
# def test_examples_ddp_autoencoder(cli_args):
# from pl_examples.basic_examples.autoencoder import cli_main
#
# with mock.patch("argparse._sys.argv", ["any.py"] + cli_args.strip().split()):
# cli_main()
#
@pytest.mark.parametrize('import_cli', [
'pl_examples.basic_examples.mnist_classifier',
'pl_examples.basic_examples.image_classifier',
'pl_examples.basic_examples.autoencoder',
])
@pytest.mark.parametrize('cli_args', [ARGS_DEFAULT])
def test_examples_cpu(import_cli, cli_args):

@pytest.mark.parametrize('cli_args', [cpu_args])
def test_examples_cpu(cli_args):
from pl_examples.basic_examples.mnist import cli_main as mnist_cli
from pl_examples.basic_examples.image_classifier import cli_main as ic_cli
from pl_examples.basic_examples.autoencoder import cli_main as ae_cli
module = importlib.import_module(import_cli)

for cli_cmd in [mnist_cli, ic_cli, ae_cli]:
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args.strip().split()):
cli_cmd()
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args.strip().split()):
module.cli_main()


@pytest.mark.skipif(not DALI_AVAILABLE, reason="Nvidia DALI required")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
@pytest.mark.skipif(platform.system() != 'Linux', reason='Only applies to Linux platform.')
@pytest.mark.parametrize('cli_args', [cpu_args])
@pytest.mark.parametrize('cli_args', [ARGS_GPU])
def test_examples_mnist_dali(cli_args):
from pl_examples.basic_examples.mnist_dali import cli_main
from pl_examples.basic_examples.mnist_classifier_dali import cli_main

with mock.patch("argparse._sys.argv", ["any.py"] + cli_args.strip().split()):
cli_main()

0 comments on commit 9a5d40a

Please sign in to comment.