Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
[v1.x][FEATURE] Add MKLDNN Deconvolution 1D and 3D support (#20137) (#…
Browse files Browse the repository at this point in the history
…20726)

Co-authored-by: Paweł Głomski <[email protected]>
  • Loading branch information
samskalicky and PawelGlomski-Intel committed Nov 4, 2021
1 parent 200b284 commit 8173b85
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 15 deletions.
6 changes: 5 additions & 1 deletion src/operator/nn/deconvolution-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,11 @@ class DeconvolutionOp {
using namespace mshadow::expr;

if (param_.kernel.ndim() > 2) {
LOG(FATAL) << "If not using CUDNN, only 1D or 2D Deconvolution is supported";
LOG(FATAL) << "Only 1D or 2D Deconvolution is natively supported. "
<< ((MXNET_USE_MKLDNN || MXNET_USE_CUDNN)
? "Fallback to native implementation (if occurred) is therefore "
"impossible for 3D Deconvolution."
: "");
}

CHECK_EQ(req[deconv::kOut], kWriteTo);
Expand Down
28 changes: 19 additions & 9 deletions src/operator/nn/deconvolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,14 @@ static void DeconvolutionComputeExCPU(const nnvm::NodeAttrs& attrs,
const std::vector<NDArray>& outputs) {
const DeconvolutionParam& params = nnvm::get<DeconvolutionParam>(attrs.parsed);
if (SupportMKLDNNDeconv(params, inputs[0])) {
MKLDNN_OPCHECK_INIT(false, outputs.size(), inputs, outputs);
MKLDNNRun(MKLDNNDeconvolutionForward, attrs, ctx, inputs, req, outputs);
MKLDNN_OPCHECK_RUN(DeconvolutionCompute<cpu>, attrs, ctx, inputs, req, outputs);
if (params.kernel.ndim() == 3) {
// we cannot check the output, as 3D deconvolution is not natively supported yet
MKLDNNRun(MKLDNNDeconvolutionForward, attrs, ctx, inputs, req, outputs);
} else {
MKLDNN_OPCHECK_INIT(false, outputs.size(), inputs, outputs);
MKLDNNRun(MKLDNNDeconvolutionForward, attrs, ctx, inputs, req, outputs);
MKLDNN_OPCHECK_RUN(DeconvolutionCompute<cpu>, attrs, ctx, inputs, req, outputs);
}
return;
}
FallBackCompute(DeconvolutionCompute<cpu>, attrs, ctx, inputs, req, outputs);
Expand All @@ -57,9 +62,14 @@ static void DeconvolutionGradComputeExCPU(const nnvm::NodeAttrs& attrs,
const std::vector<NDArray>& outputs) {
const DeconvolutionParam& params = nnvm::get<DeconvolutionParam>(attrs.parsed);
if (SupportMKLDNNDeconv(params, inputs[0])) {
MKLDNN_OPCHECK_INIT(true, outputs.size(), inputs, outputs);
MKLDNNRun(MKLDNNDeconvolutionBackward, attrs, ctx, inputs, req, outputs);
MKLDNN_OPCHECK_RUN(DeconvolutionGradCompute<cpu>, attrs, ctx, inputs, req, outputs);
if (params.kernel.ndim() == 3) {
// we cannot check the output, as 3D deconvolution is not natively supported yet
MKLDNNRun(MKLDNNDeconvolutionBackward, attrs, ctx, inputs, req, outputs);
} else {
MKLDNN_OPCHECK_INIT(true, outputs.size(), inputs, outputs);
MKLDNNRun(MKLDNNDeconvolutionBackward, attrs, ctx, inputs, req, outputs);
MKLDNN_OPCHECK_RUN(DeconvolutionGradCompute<cpu>, attrs, ctx, inputs, req, outputs);
}
return;
}
FallBackCompute(DeconvolutionGradCompute<cpu>, attrs, ctx, inputs, req, outputs);
Expand Down Expand Up @@ -99,12 +109,12 @@ static bool DeconvolutionShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_shape,
mxnet::ShapeVector *out_shape) {
const DeconvolutionParam& param_ = nnvm::get<DeconvolutionParam>(attrs.parsed);
#if MXNET_USE_CUDNN == 0
#if MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 0
if (param_.kernel.ndim() > 2) {
LOG(FATAL) << "If not using CUDNN, only 1D or 2D Deconvolution is supported";
LOG(FATAL) << "If not using CUDNN or MKLDNN, only 1D or 2D Deconvolution is supported";
return false;
}
#endif // CUDNN
#endif

using namespace mshadow;
if (!param_.no_bias) {
Expand Down
7 changes: 4 additions & 3 deletions src/operator/nn/mkldnn/mkldnn_deconvolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@ namespace mxnet {
namespace op {

bool SupportMKLDNNDeconv(const DeconvolutionParam &params, const NDArray &input) {
return params.kernel.ndim() == 2 && input.shape().ndim() == 4 &&
return params.kernel.ndim() >= 1 && params.kernel.ndim() <= 3 &&
input.shape().ndim() == (params.kernel.ndim() + 2) &&
(input.dtype() == mshadow::kFloat32 || input.dtype() == mshadow::kBfloat16);
}

Expand Down Expand Up @@ -322,10 +323,10 @@ DeconvDescCreator::DeconvDescCreator(const DeconvolutionParam &param, const NDAr
strides(param.stride.ndim()),
padding(param.pad.ndim()),
dilates(param.dilate.ndim()) {
// assuming only deconv2D is supported for now
CHECK_EQ(param.stride.ndim(), param.pad.ndim());
CHECK_EQ(param.stride.ndim(), param.dilate.ndim());
CHECK_EQ(param.stride.ndim(), 2);
CHECK_GE(param.stride.ndim(), 1);
CHECK_LE(param.stride.ndim(), 3);
for (int i = 0; i < param.stride.ndim(); ++i) {
strides[i] = param.stride[i];
padding[i] = param.pad[i];
Expand Down
9 changes: 7 additions & 2 deletions tests/python/mkl/test_mkldnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -471,8 +471,8 @@ def check_convolution_training(stype):
@with_seed()
def test_Deconvolution():
def check_Deconvolution_training(stype):
for shape in [(3, 3, 10, 10)]: # testing only 2D for now
data_tmp = np.random.randint(256, size=shape)
for shape in [(3, 3, 10), (3, 3, 10, 10), (3, 3, 10, 10, 10)]:
data_tmp = np.random.normal(-0.1, 1, size=shape)
data = mx.symbol.Variable('data', stype=stype)

if np.array(shape).shape[0] == 3:
Expand All @@ -481,6 +481,11 @@ def check_Deconvolution_training(stype):
elif np.array(shape).shape[0] == 4:
test = mx.symbol.Deconvolution(data=data, kernel=(3, 3), stride=(2, 2), num_filter=4)
weight_tmp = np.random.normal(-0.1, 0.1, size=(3, 4, 3, 3))
elif np.array(shape).shape[0] == 5 and stype == "default":
# Unable to test fallback to native implementation for non-default storage types
# as 3D deconvolution is not natively supported
test = mx.symbol.Deconvolution(data=data, kernel=(3,3,3), stride=(2,2,2), num_filter=4)
weight_tmp = np.random.normal(-0.1, 0.1, size=(3, 4, 3, 3, 3))
else:
return 0
bias_tmp = np.random.normal(0.1, 0.1, size=(4,))
Expand Down

0 comments on commit 8173b85

Please sign in to comment.