Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Add support for bool data type for condition in where operator (#21103)
Browse files Browse the repository at this point in the history
* Add support for bool data type for condition in where operator

* Update src/operator/nn/dnnl/dnnl_where.cc

Co-authored-by: bartekkuncer <[email protected]>

* Update src/operator/nn/dnnl/dnnl_where.cc

Co-authored-by: bartekkuncer <[email protected]>

* apply review comment
  • Loading branch information
bgawrych committed Aug 12, 2022
1 parent 6d1fbe3 commit 736313f
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 9 deletions.
3 changes: 2 additions & 1 deletion src/operator/nn/dnnl/dnnl_base-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ void DNNLMemorySum(const dnnl::memory& arr1, const dnnl::memory& arr2, const dnn

static int GetTypeSize(int dtype) {
int size = -1;
MSHADOW_TYPE_SWITCH(dtype, DType, { size = sizeof(DType); });
MSHADOW_TYPE_SWITCH_WITH_BOOL(dtype, DType, { size = sizeof(DType); });
return size;
}

Expand All @@ -298,6 +298,7 @@ static inline dnnl::memory::data_type get_dnnl_type(int dtype) {
case mshadow::kInt8:
return dnnl::memory::data_type::s8;
case mshadow::kUint8:
case mshadow::kBool:
return dnnl::memory::data_type::u8;
default:
LOG(FATAL) << "unknown type for oneDNN :" << static_cast<int>(dtype);
Expand Down
46 changes: 38 additions & 8 deletions src/operator/nn/dnnl/dnnl_where.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,15 @@ namespace op {

// Support for https://oneapi-src.github.io/oneDNN/v2.6/dev_guide_binary.html
bool SupportDNNLWhere(const std::vector<NDArray>& inputs) {
return SupportDNNL<DNNLTypeMode::NoInt32, DNNLTensorsDtypes::Mixed>(inputs);
if (inputs[0].dtype() == mshadow::kBool) {
// oneDNN natively doesn't support bool type, however this operator was written
// to allow using bool type for 'condition' tensor - data will be treated as uint8
return SupportDNNLShape<1, 12>(inputs[0].shape()) &&
SupportDNNL<DNNLTypeMode::NoInt32, DNNLTensorsDtypes::AllSame>({inputs[1], inputs[2]});
}

return SupportDNNL<DNNLTypeMode::NoInt32>(inputs[0]) &&
SupportDNNL<DNNLTypeMode::NoInt32, DNNLTensorsDtypes::AllSame>({inputs[1], inputs[2]});
}

void DNNLWhereForward(const nnvm::NodeAttrs& attrs,
Expand Down Expand Up @@ -95,6 +103,25 @@ static mxnet::TShape GetBroadcastableShape(const mxnet::TShape& in_shape,
return broadcastable_in_shape;
}

/*!
* \brief Create shape vector basing on two input shapes
* \param first_shape first input shape
* \param second_shape second input shape
* \return deduced broadcasted shape based on first_shape and second_shape
*/
static mxnet::TShape GetBroadcastedShape(const mxnet::TShape& first_shape,
const mxnet::TShape& second_shape) {
if (first_shape == second_shape) {
return first_shape;
}

mxnet::TShape dst_shape(first_shape.ndim(), 1);
for (int i = 0; i < first_shape.ndim(); ++i) {
dst_shape[i] = first_shape[i] == 1 ? second_shape[i] : first_shape[i];
}
return dst_shape;
}

DNNLWhereFwd::DNNLWhereFwd(const Tensors& tensors) {
const auto cpu_engine = CpuEngine::Get()->get_engine();

Expand All @@ -107,7 +134,8 @@ DNNLWhereFwd::DNNLWhereFwd(const Tensors& tensors) {
const auto lhs_shape = GetBroadcastableShape(lhs.shape(), out.shape());
const auto rhs_shape = GetBroadcastableShape(rhs.shape(), out.shape());

const auto& cnd_dtype = get_dnnl_type(cnd.dtype());
const auto& cnd_dtype =
cnd.dtype() != mshadow::kBool ? get_dnnl_type(cnd.dtype()) : dnnl::memory::data_type::u8;
const auto& inp_dtype = get_dnnl_type(lhs.dtype());
const auto& def_ft = static_cast<dnnl::memory::format_tag>(GetDefaultFormat(lhs_shape.ndim()));

Expand All @@ -129,14 +157,16 @@ DNNLWhereFwd::DNNLWhereFwd(const Tensors& tensors) {
dnnl::binary::desc(dnnl::algorithm::binary_eq, cnd_md, scalar_md, cnd_md), cpu_engine);

// if broadcast is needed output must be larger in size
auto lmask_dim = lhs_shape.Size() > cnd_shape.Size() ? lhs_dims : cnd_dims;
auto lmask_md = dnnl::memory::desc(lmask_dim, inp_dtype, def_ft);
binary_mul_l_pd = dnnl::binary::primitive_desc(
const auto lmask_shape = GetBroadcastedShape(lhs_shape, cnd_shape);
const auto lmask_dim = dnnl::memory::dims(lmask_shape.begin(), lmask_shape.end());
auto lmask_md = dnnl::memory::desc(lmask_dim, inp_dtype, def_ft);
binary_mul_l_pd = dnnl::binary::primitive_desc(
dnnl::binary::desc(dnnl::algorithm::binary_mul, lhs_md, cnd_md, lmask_md), cpu_engine);

auto rmask_dim = rhs_shape.Size() > cnd_shape.Size() ? rhs_dims : cnd_dims;
auto rmask_md = dnnl::memory::desc(rmask_dim, inp_dtype, def_ft);
binary_mul_r_pd = dnnl::binary::primitive_desc(
const auto rmask_shape = GetBroadcastedShape(rhs_shape, cnd_shape);
const auto rmask_dim = dnnl::memory::dims(rmask_shape.begin(), rmask_shape.end());
auto rmask_md = dnnl::memory::desc(rmask_dim, inp_dtype, def_ft);
binary_mul_r_pd = dnnl::binary::primitive_desc(
dnnl::binary::desc(dnnl::algorithm::binary_mul, rhs_md, cnd_md, rmask_md), cpu_engine);

binary_sum_pd = dnnl::binary::primitive_desc(
Expand Down

0 comments on commit 736313f

Please sign in to comment.