Skip to content

Commit

Permalink
[Fix] Skip filtered_lrelu ut when cuda is less than 10.2 (#2677)
Browse files Browse the repository at this point in the history
  • Loading branch information
zhouzaida committed Mar 19, 2023
1 parent d31b221 commit e7adffb
Showing 1 changed file with 5 additions and 1 deletion.
6 changes: 5 additions & 1 deletion tests/test_ops/test_filtered_lrelu.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmengine.utils import digit_version

from mmcv.ops import filtered_lrelu

Expand Down Expand Up @@ -113,7 +114,10 @@ def test_filtered_lrelu_cpu(self):
self.input_tensor, bias=self.bias, flip_filter=True)
assert out.shape == (1, 3, 16, 16)

@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
@pytest.mark.skipif(
not torch.cuda.is_available()
or digit_version(torch.version.cuda) < digit_version('10.2'),
reason='requires cuda>=10.2')
def test_filtered_lrelu_cuda(self):
out = filtered_lrelu(self.input_tensor.cuda(), bias=self.bias.cuda())
assert out.shape == (1, 3, 16, 16)
Expand Down

0 comments on commit e7adffb

Please sign in to comment.