Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[Bug] fused_op does not support boolean type #16723

Closed
sxjscience opened this issue Nov 5, 2019 · 1 comment · Fixed by #16796
Closed

[Bug] fused_op does not support boolean type #16723

sxjscience opened this issue Nov 5, 2019 · 1 comment · Fixed by #16796
Assignees
Labels

Comments

@sxjscience
Copy link
Member

@ptrendx I find that the FusedOp does not support the boolean type. The following script will trigger the error.

import mxnet as mx
import numpy as np
from mxnet.gluon import HybridBlock
mx.npx.set_np()

class Foo(HybridBlock):
    def __init__(self, prefix=None, params=None):
        super(Foo, self).__init__(prefix=prefix, params=params)

    def hybrid_forward(self, F, valid_length):
        mask = (F.np.ones((10,)) < valid_length).astype(np.float32)
        mask2 = (F.np.ones((10,)) < valid_length).astype(np.float32)
        mask = mask * F.np.expand_dims(mask2, axis=-1)
        return mask

foo = Foo()
foo.hybridize()
out = foo(mx.np.ones((10,), ctx=mx.gpu()))
print(out)

Stack Trace:

MXNetError: [02:32:00] src/operator/fusion/fused_op.cu:76: Unknown type enum 7
Stack trace:
  [bt] (0) /home/ubuntu/mxnet/python/mxnet/../../lib/libmxnet.so(dmlc::LogMessageFatal::~LogMessageFatal()+0x32) [0x7f310563bed2]
  [bt] (1) /home/ubuntu/mxnet/python/mxnet/../../lib/libmxnet.so(mxnet::FusedOp::CheckShapesAndTypes(std::vector<mxnet::TBlob, std::allocator<mxnet::TBlob> > const&, std::vector<mxnet::TBlob, std::allocator<mxnet::TBlob> > const&, std::vector<int, std::allocator<int> >*, std::vector<int, std::allocator<int> >*, std::vector<int, std::allocator<int> >*, std::vector<int, std::allocator<int> >*, int*)+0x17b3) [0x7f310b4743c3]
  [bt] (2) /home/ubuntu/mxnet/python/mxnet/../../lib/libmxnet.so(void mxnet::FusedOp::Forward<mshadow::gpu>(nnvm::NodeAttrs const&, mxnet::OpContext const&, std::vector<mxnet::TBlob, std::allocator<mxnet::TBlob> > const&, std::vector<mxnet::OpReqType, std::allocator<mxnet::OpReqType> > const&, std::vector<mxnet::TBlob, std::allocator<mxnet::TBlob> > const&)+0x1a0) [0x7f310b47df50]
  [bt] (3) /home/ubuntu/mxnet/python/mxnet/../../lib/libmxnet.so(mxnet::imperative::PushFCompute(std::function<void (nnvm::NodeAttrs const&, mxnet::OpContext const&, std::vector<mxnet::TBlob, std::allocator<mxnet::TBlob> > const&, std::vector<mxnet::OpReqType, std::allocator<mxnet::OpReqType> > const&, std::vector<mxnet::TBlob, std::allocator<mxnet::TBlob> > const&)> const&, nnvm::Op const*, nnvm::NodeAttrs const&, mxnet::Context const&, std::vector<mxnet::engine::Var*, std::allocator<mxnet::engine::Var*> > const&, std::vector<mxnet::engine::Var*, std::allocator<mxnet::engine::Var*> > const&, std::vector<mxnet::Resource, std::allocator<mxnet::Resource> > const&, std::vector<mxnet::NDArray*, std::allocator<mxnet::NDArray*> > const&, std::vector<mxnet::NDArray*, std::allocator<mxnet::NDArray*> > const&, std::vector<unsigned int, std::allocator<unsigned int> > const&, std::vector<mxnet::OpReqType, std::allocator<mxnet::OpReqType> > const&)::{lambda(mxnet::RunContext)#1}::operator()(mxnet::RunContext) const+0x1423) [0x7f3108a01733]
  [bt] (4) /home/ubuntu/mxnet/python/mxnet/../../lib/libmxnet.so(std::_Function_handler<void (mxnet::RunContext), mxnet::imperative::PushFCompute(std::function<void (nnvm::NodeAttrs const&, mxnet::OpContext const&, std::vector<mxnet::TBlob, std::allocator<mxnet::TBlob> > const&, std::vector<mxnet::OpReqType, std::allocator<mxnet::OpReqType> > const&, std::vector<mxnet::TBlob, std::allocator<mxnet::TBlob> > const&)> const&, nnvm::Op const*, nnvm::NodeAttrs const&, mxnet::Context const&, std::vector<mxnet::engine::Var*, std::allocator<mxnet::engine::Var*> > const&, std::vector<mxnet::engine::Var*, std::allocator<mxnet::engine::Var*> > const&, std::vector<mxnet::Resource, std::allocator<mxnet::Resource> > const&, std::vector<mxnet::NDArray*, std::allocator<mxnet::NDArray*> > const&, std::vector<mxnet::NDArray*, std::allocator<mxnet::NDArray*> > const&, std::vector<unsigned int, std::allocator<unsigned int> > const&, std::vector<mxnet::OpReqType, std::allocator<mxnet::OpReqType> > const&)::{lambda(mxnet::RunContext)#1}>::_M_invoke(std::_Any_data const&, mxnet::RunContext&&)+0x17) [0x7f3108a01c17]
  [bt] (5) /home/ubuntu/mxnet/python/mxnet/../../lib/libmxnet.so(std::_Function_handler<void (mxnet::RunContext, mxnet::engine::CallbackOnComplete), mxnet::engine::ThreadedEngine::BulkFlush()::{lambda(mxnet::RunContext, mxnet::engine::CallbackOnComplete)#1}>::_M_invoke(std::_Any_data const&, mxnet::RunContext&&, mxnet::engine::CallbackOnComplete&&)+0x1bf) [0x7f310916266f]
  [bt] (6) /home/ubuntu/mxnet/python/mxnet/../../lib/libmxnet.so(mxnet::engine::ThreadedEngine::ExecuteOprBlock(mxnet::RunContext, mxnet::engine::OprBlock*)+0x995) [0x7f3109166475]
  [bt] (7) /home/ubuntu/mxnet/python/mxnet/../../lib/libmxnet.so(void mxnet::engine::ThreadedEnginePerDevice::GPUWorker<(dmlc::ConcurrentQueueType)0>(mxnet::Context, bool, mxnet::engine::ThreadedEnginePerDevice::ThreadWorkerBlock<(dmlc::ConcurrentQueueType)0>*, std::shared_ptr<dmlc::ManualEvent> const&)+0x11d) [0x7f310917ed7d]
  [bt] (8) /home/ubuntu/mxnet/python/mxnet/../../lib/libmxnet.so(std::_Function_handler<void (std::shared_ptr<dmlc::ManualEvent>), mxnet::engine::ThreadedEnginePerDevice::PushToExecute(mxnet::engine::OprBlock*, bool)::{lambda()#4}::operator()() const::{lambda(std::shared_ptr<dmlc::ManualEvent>)#1}>::_M_invoke(std::_Any_data const&, std::shared_ptr<dmlc::ManualEvent>&&)+0x4e) [0x7f310917f02e]

We can also manually disable the Fuse OP, which will generate the correct answer.

import mxnet as mx
import numpy as np
import os
from mxnet.gluon import HybridBlock
mx.npx.set_np()

os.environ['MXNET_USE_FUSION'] = '0'

class Foo(HybridBlock):
    def __init__(self, prefix=None, params=None):
        super(Foo, self).__init__(prefix=prefix, params=params)

    def hybrid_forward(self, F, valid_length):
        mask = (F.np.ones((10,)) < valid_length).astype(np.float32)
        mask2 = (F.np.ones((10,)) < valid_length).astype(np.float32)
        mask = mask * F.np.expand_dims(mask2, axis=-1)
        return mask

foo = Foo()
foo.hybridize()
out = foo(mx.np.ones((10,), ctx=mx.gpu()))
print(out)
@sxjscience sxjscience added the Bug label Nov 5, 2019
@sxjscience sxjscience changed the title Fuse_op does not support boolean type [Bug] fused_op does not support boolean type Nov 5, 2019
@ptrendx ptrendx self-assigned this Nov 5, 2019
@ptrendx
Copy link
Member

ptrendx commented Nov 5, 2019

I see, this is a newly added type. We will fix this.

Sign up for free to subscribe to this conversation on GitHub. Already have an account? Sign in.
Labels
Projects
None yet
Development

Successfully merging a pull request may close this issue.

2 participants