@@ -10,9 +10,9 @@ | |||
* implied. | |||
*/ | |||
#include "megbrain/utils/debug.h" | |||
#include <cerrno> | |||
#include <cmath> | |||
#include "megbrain/utils/debug.h" | |||
#include "megdnn/tensor_iter.h" | |||
using namespace mgb; | |||
@@ -127,7 +127,7 @@ void get_mem_map( | |||
} | |||
#ifndef WIN32 | |||
//FIXME: imp SigHandlerInit backtrace for windows | |||
// FIXME: imp SigHandlerInit backtrace for windows | |||
class SigHandlerInit { | |||
static void death_handler(int signum) { | |||
char msg0[] = | |||
@@ -146,7 +146,7 @@ class SigHandlerInit { | |||
mgb_log_error("%s: caught deadly signal %d(%s)", msg0, signum, | |||
strsignal(signum)); | |||
} | |||
//FIXME: imp backtrace for macos | |||
// FIXME: imp backtrace for macos | |||
#ifndef __APPLE__ | |||
std::string bp; | |||
debug::backtrace(2).fmt_to_str(bp); | |||
@@ -279,7 +279,7 @@ BacktraceResult mgb::debug::backtrace(int nr_exclude) { | |||
recursive_call = false; | |||
return result; | |||
#else | |||
//FIXME: imp Backtrace for windows | |||
// FIXME: imp Backtrace for windows | |||
BacktraceResult result; | |||
return result; | |||
#endif | |||
@@ -352,6 +352,16 @@ std::string num2str(float val) { | |||
return ret; | |||
} | |||
#endif | |||
template <typename dnn_ctype> | |||
struct RealCtype { | |||
using ctype = dnn_ctype; | |||
static dnn_ctype trans(dnn_ctype val) { return val; } | |||
}; | |||
template <> | |||
struct RealCtype<dt_qint8> { | |||
using ctype = int; | |||
static int trans(dt_qint8 val) { return val.as_int8(); } | |||
}; | |||
template <typename ctype> | |||
Maybe<std::string> do_compare_tensor_value(const char* expr0, const char* expr1, | |||
@@ -361,7 +371,8 @@ Maybe<std::string> do_compare_tensor_value(const char* expr0, const char* expr1, | |||
auto it0 = megdnn::tensor_iter<ctype>(v0.as_megdnn()).begin(), | |||
it1 = megdnn::tensor_iter<ctype>(v1.as_megdnn()).begin(); | |||
for (size_t i = 0, it = v0.shape().total_nr_elems(); i < it; ++i) { | |||
ctype iv0 = *it0, iv1 = *it1; | |||
typename RealCtype<ctype>::ctype iv0 = RealCtype<ctype>::trans(*it0), | |||
iv1 = RealCtype<ctype>::trans(*it1); | |||
double err = std::abs(iv0 - iv1) / | |||
std::max<double>( | |||
1, std::min(std::abs(static_cast<double>(iv0)), | |||
@@ -424,7 +435,8 @@ Maybe<std::string> debug::compare_tensor_value(const HostTensorND& v0, | |||
return do_compare_tensor_value<DTypeTrait<_dt>::ctype>( \ | |||
expr0, expr1, v0, v1, maxerr); | |||
MEGDNN_FOREACH_COMPUTING_DTYPE(cb) | |||
cb(::megdnn::dtype::Bool) | |||
cb(::megdnn::dtype::QuantizedS8); | |||
cb(::megdnn::dtype::Bool); | |||
#undef cb | |||
default: | |||
mgb_throw(MegBrainError, "unhandled dtype: %s", dtype.name()); | |||
@@ -10,9 +10,9 @@ | |||
* implied. | |||
*/ | |||
#include "megbrain/opr/imgproc.h" | |||
#include "./internal/megdnn_opr_wrapper.inl" | |||
#include "megbrain/graph/grad_impl.h" | |||
#include "megbrain/opr/imgproc.h" | |||
#include "megbrain/opr/utility.h" | |||
using namespace mgb; | |||
@@ -267,9 +267,10 @@ void WarpPerspectiveBackwardMat::scn_do_execute() { | |||
} | |||
} | |||
SymbolVar WarpPerspectiveBackwardMat::make( | |||
SymbolVar i0, SymbolVar i1, SymbolVar i2, SymbolVar i3, | |||
const Param& param, const OperatorNodeConfig& config) { | |||
SymbolVar WarpPerspectiveBackwardMat::make(SymbolVar i0, SymbolVar i1, | |||
SymbolVar i2, SymbolVar i3, | |||
const Param& param, | |||
const OperatorNodeConfig& config) { | |||
intl::MegDNNOprInitInputsModifier<WarpPerspectiveBackwardMat>::apply( | |||
param, {&i0, &i1, &i2, &i3}); | |||
return i0.insert_single_output_opr<WarpPerspectiveBackwardMat>( | |||
@@ -447,14 +448,12 @@ void RemapForward::init_output_dtype() { | |||
MGB_IMPL_OPR_GRAD(RemapForward) { | |||
mgb_assert(opr.input().size() == 2); | |||
if (wrt_idx == 0) { | |||
SymbolVar grad = | |||
RemapBackwardData::make(opr.input(1), out_grad[0], | |||
opr.input(0), opr.param()); | |||
SymbolVar grad = RemapBackwardData::make(opr.input(1), out_grad[0], | |||
opr.input(0), opr.param()); | |||
return grad.node(); | |||
} else if (wrt_idx == 1) { | |||
SymbolVar grad = | |||
RemapBackwardMat::make(opr.input(0), opr.input(1), | |||
out_grad[0], opr.param()); | |||
SymbolVar grad = RemapBackwardMat::make(opr.input(0), opr.input(1), | |||
out_grad[0], opr.param()); | |||
return grad.node(); | |||
} else | |||
return InvalidGrad::make(opr, wrt_idx); | |||
@@ -468,4 +467,73 @@ MEGDNN_OPR_INIT3(RemapBackwardData, "remap_bwd_data", 2, false); | |||
MGB_DYN_TYPE_OBJ_FINAL_IMPL(RemapBackwardMat); | |||
MEGDNN_OPR_INIT3(RemapBackwardMat, "remap_bwd_mat", 1, true); | |||
/* ======================= DctChannelSelectForward ======================= */ | |||
MGB_DYN_TYPE_OBJ_FINAL_IMPL(DctChannelSelectForward); | |||
namespace mgb { | |||
namespace opr { | |||
namespace intl { | |||
template <> | |||
struct MegDNNOprInitPostCtor<DctChannelSelectForward> { | |||
static void apply(cg::OperatorNodeBase& opr) { | |||
if (opr.config().output_dtype().valid()) { | |||
opr.output(0)->dtype(opr.config().output_dtype()); | |||
} else { | |||
opr.output(0)->dtype(dtype::Float32()); | |||
} | |||
} | |||
}; | |||
} // namespace intl | |||
} // namespace opr | |||
} // namespace mgb | |||
void DctChannelSelectForward::get_output_var_shape( | |||
const TensorShapeArray& inp_shape, TensorShapeArray& out_shape) const { | |||
auto mo = megdnn_opr(); | |||
TensorLayout dst; | |||
dst.dtype = output(0)->dtype(); | |||
if (inp_shape.size() == 1) { | |||
mo->deduce_layout({inp_shape[0], input(0)->dtype(), input(0)->format()}, | |||
{}, {}, dst); | |||
} else { | |||
mgb_assert(inp_shape.size() == 3, "no support input tensor num %zu", | |||
inp_shape.size()); | |||
mo->deduce_layout({inp_shape[0], input(0)->dtype(), input(0)->format()}, | |||
{inp_shape[1], input(1)->dtype(), input(1)->format()}, | |||
{inp_shape[2], input(2)->dtype(), input(2)->format()}, | |||
dst); | |||
} | |||
out_shape[0] = dst; | |||
} | |||
size_t DctChannelSelectForward::get_workspace_size_bytes( | |||
const TensorShapeArray& input_shapes, | |||
const TensorShapeArray& output_shapes) const { | |||
auto mo = megdnn_opr(); | |||
return mo->get_workspace_in_bytes( | |||
{input_shapes[0], input(0)->dtype(), input(0)->format()}, {}, {}, | |||
{output_shapes[0], output(0)->dtype(), output(0)->format()}); | |||
} | |||
void DctChannelSelectForward::scn_do_execute() { | |||
auto&& inp = input(); | |||
auto mo = megdnn_opr(); | |||
if (inp.size() == 1) { | |||
mo->exec(inp[0]->dev_tensor().as_megdnn(), {}, {}, | |||
output(0)->dev_tensor().as_megdnn(), | |||
intl::get_megdnn_workspace_from_var(output().back())); | |||
} else { | |||
mgb_assert(inp.size() == 3, "no support input tensor num %zu", | |||
inp.size()); | |||
mo->exec(inp[0]->dev_tensor().as_megdnn(), | |||
inp[1]->dev_tensor().as_megdnn(), | |||
inp[2]->dev_tensor().as_megdnn(), | |||
output(0)->dev_tensor().as_megdnn(), | |||
intl::get_megdnn_workspace_from_var(output().back())); | |||
} | |||
} | |||
MEGDNN_OPR_INIT3(DctChannelSelectForward, "dct_channel_select") | |||
MEGDNN_OPR_INIT1(DctChannelSelectForward, "dct_channel_select") | |||
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} |
@@ -90,4 +90,30 @@ decl_opr( | |||
desc='Remap transformation to batched 2D images; ' | |||
'see https://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html?highlight=remap' | |||
'for details on remap transformations.') | |||
decl_raw_opr( | |||
'dct_channel_select', | |||
inputs=[ | |||
Doc('src', 'input image, uint8 data type with NCHW format'), | |||
Doc('mask_offset', 'out channel offset array'), | |||
Doc('mask_val', 'out index per channel'), | |||
], | |||
params='DctChannelSelect', | |||
body=[ | |||
'if mask_offset is None:', | |||
' all_inputs = _helper.canonize_input_vars([src], comp_graph=comp_graph, config=config)', | |||
'else:', | |||
' all_inputs = _helper.canonize_input_vars([src, mask_offset, mask_val], comp_graph=comp_graph, config=config)', | |||
'cvt_result_kwargs = {}', | |||
'param = _helper.cvt_to_opr_param_def(param, _opr_param_defs.DctChannelSelect, kwargs)', | |||
'assert not kwargs, "extra kwargs: {}".format(kwargs)', | |||
'all_params = []', | |||
'all_params.append(param.serialize())', | |||
'output = _mgb._create_opr("DctChannelSelect", all_inputs, all_params, config)', | |||
], | |||
has_out_dtype=True, | |||
desc='DctChannelSelect do DCT with channel select' | |||
'see https://docs.opencv.org/2.4/modules/core/doc/operations_on_arrays.html?highlight=dct#dct' | |||
'for details on DCT transformations. It will output float32 or qint8') | |||
# vim: ft=python |
@@ -38,6 +38,26 @@ namespace serialization { | |||
} | |||
}; | |||
template <> | |||
struct OprMaker<opr::DctChannelSelectForward, 0> { | |||
using Opr = opr::DctChannelSelectForward; | |||
using Param = Opr::Param; | |||
static cg::OperatorNodeBase* make(const Param& param, | |||
const cg::VarNodeArray& inputs, | |||
ComputingGraph& graph, | |||
const OperatorNodeConfig& config) { | |||
MGB_MARK_USED_VAR(graph); | |||
if (inputs.size() == 3) { | |||
return Opr::make(inputs[0], inputs[1], inputs[2], param, config) | |||
.node() | |||
->owner_opr(); | |||
} else { | |||
mgb_assert(inputs.size() == 1); | |||
return Opr::make(inputs[0], param, config).node()->owner_opr(); | |||
} | |||
} | |||
}; | |||
template<> | |||
struct OprMaker<opr::WarpPerspectiveBackwardData, 0> { | |||
using Opr = opr::WarpPerspectiveBackwardData; | |||
@@ -107,6 +127,8 @@ namespace opr { | |||
//! current resize version | |||
using ResizeV1 = opr::Resize; | |||
MGB_SEREG_OPR(ResizeV1, 2); | |||
MGB_SEREG_OPR(DctChannelSelect, 0); | |||
} // namespace opr | |||
@@ -259,6 +259,37 @@ void record_execute_deps(ExecDependencyArray& deps) override; | |||
}; | |||
using WarpAffine = WarpAffineForward; | |||
/*! | |||
* \brief apply DCT transformation to batched 2D images | |||
*/ | |||
MGB_DEFINE_OPR_CLASS( | |||
DctChannelSelectForward, | |||
intl::MegDNNOprWrapperFwd<megdnn::DctChannelSelectForward>) // { | |||
public: | |||
DctChannelSelectForward(VarNode* src, VarNode* mask_offset, VarNode* mask_val, | |||
const Param& param, const OperatorNodeConfig& config); | |||
static SymbolVar make(SymbolVar src, SymbolVar mask_offset, SymbolVar mask_val, | |||
const Param& param, | |||
const OperatorNodeConfig& config = {}); | |||
DctChannelSelectForward(VarNode* src, const Param& param, | |||
const OperatorNodeConfig& config); | |||
static SymbolVar make(SymbolVar src, const Param& param, | |||
const OperatorNodeConfig& config = {}); | |||
void get_output_var_shape(const TensorShapeArray& inp_shape, | |||
TensorShapeArray& out_shape) const override; | |||
size_t get_workspace_size_bytes( | |||
const TensorShapeArray& input_shapes, | |||
const TensorShapeArray& output_shapes) const override; | |||
void scn_do_execute() override; | |||
}; | |||
using DctChannelSelect = DctChannelSelectForward; | |||
} // opr | |||
} // mgb | |||
@@ -294,7 +294,7 @@ namespace intl { | |||
void scn_do_execute() override; | |||
void get_output_var_shape( | |||
const TensorShapeArray &inp_shape, | |||
TensorShapeArray &out_shape) const override final; | |||
TensorShapeArray &out_shape) const override; | |||
void record_execute_deps( | |||
cg::GraphExecutable::ExecDependencyArray& deps) override { | |||
@@ -713,4 +713,94 @@ TEST(TestOprImgproc, Remap_NHWC) { | |||
.run({TensorShape{N, 20, 20, C}, TensorShape{N, 10, 10, 2}}, opt); | |||
} | |||
TEST(TestOprImgproc, DCT) { | |||
REQUIRE_GPU(1); | |||
using Checker3 = AutoOprChecker<3, 1>; | |||
using Checker1 = AutoOprChecker<1, 1>; | |||
opr::DctChannelSelectForward::Param param; | |||
opr::DctChannelSelectForward::Param param_nchw4; | |||
param_nchw4.format = opr::DctChannelSelectForward::Param::Format::NCHW4; | |||
auto make_graph3 = | |||
[&](const Checker3::SymInpArray& inputs) -> Checker3::SymOutArray { | |||
return {opr::DctChannelSelectForward::make(inputs[0], inputs[1], | |||
inputs[2], param)}; | |||
}; | |||
auto fwd3 = [&](Checker3::NumOutArray& dest, Checker3::NumInpArray inp) { | |||
auto opr = megdnn_naive_handle() | |||
->create_operator<megdnn::DctChannelSelectForward>(); | |||
auto& in_shape = inp[0]->shape(); | |||
TensorShape out_shp{in_shape[0], in_shape[1] * 64, in_shape[2] / 8, | |||
in_shape[3] / 8}; | |||
dest[0].comp_node(inp[0]->comp_node()).resize(out_shp); | |||
opr->param() = param; | |||
opr->exec(inp[0]->as_megdnn(), inp[1]->as_megdnn(), inp[2]->as_megdnn(), | |||
dest[0].as_megdnn(), {}); | |||
}; | |||
auto make_graph1 = | |||
[&](const Checker1::SymInpArray& inputs) -> Checker1::SymOutArray { | |||
return {opr::DctChannelSelectForward::make(inputs[0], param)}; | |||
}; | |||
auto make_graph1_s8 = | |||
[&](const Checker1::SymInpArray& inputs) -> Checker1::SymOutArray { | |||
return {opr::DctChannelSelectForward::make( | |||
inputs[0], param_nchw4, | |||
OperatorNodeConfig(dtype::QuantizedS8(10.f)))}; | |||
}; | |||
auto fwd1 = [&](Checker1::NumOutArray& dest, Checker1::NumInpArray inp) { | |||
auto opr = megdnn_naive_handle() | |||
->create_operator<megdnn::DctChannelSelectForward>(); | |||
auto& in_shape = inp[0]->shape(); | |||
TensorShape out_shp{in_shape[0], in_shape[1] * 64, in_shape[2] / 8, | |||
in_shape[3] / 8}; | |||
dest[0].comp_node(inp[0]->comp_node()).resize(out_shp); | |||
opr->param() = param; | |||
opr->exec(inp[0]->as_megdnn(), {}, {}, dest[0].as_megdnn(), {}); | |||
}; | |||
auto fwd1_s8 = [&](Checker1::NumOutArray& dest, Checker1::NumInpArray inp) { | |||
auto opr = megdnn_naive_handle() | |||
->create_operator<megdnn::DctChannelSelectForward>(); | |||
auto& in_shape = inp[0]->shape(); | |||
TensorShape out_shp{in_shape[0], in_shape[1] * 64 / 4, in_shape[2] / 8, | |||
in_shape[3] / 8, 4}; | |||
dest[0].comp_node(inp[0]->comp_node()).resize(out_shp); | |||
opr->param() = param_nchw4; | |||
opr->exec(inp[0]->as_megdnn(), {}, {}, dest[0].as_megdnn(), {}); | |||
}; | |||
Checker3::RunOptions opt3; | |||
Checker1::RunOptions opt1; | |||
Checker1::RunOptions opt1_qint8; | |||
opt3.outputs_max_err = 1e-3; | |||
opt1.outputs_max_err = 1e-3; | |||
opt1_qint8.outputs_max_err = 1.001; | |||
auto gen_input = [](HostTensorND& dest) { | |||
HostTensorGenerator<dtype::Uint8, RandomDistribution::UNIFORM> | |||
mask_generator{0, 255}; | |||
dest = *mask_generator(dest.shape(), dest.comp_node()); | |||
}; | |||
auto gen_mask = [](HostTensorND& dest) { | |||
HostTensorGenerator<dtype::Int32, RandomDistribution::UNIFORM> | |||
mask_generator{0, 8}; | |||
dest = *mask_generator(dest.shape(), dest.comp_node()); | |||
}; | |||
Checker1(make_graph1, fwd1, CompNode::load("gpu0")) | |||
.disable_grad_check() | |||
.set_input_generator(0, gen_input) | |||
.set_input_dtype(0, dtype::Uint8()) | |||
.run({TensorShape{1, 1, 16, 16}}, opt1) | |||
.run({TensorShape{1, 3, 256, 256}}, opt1) | |||
.run({TensorShape{4, 3, 512, 512}}, opt1); | |||
Checker1(make_graph1_s8, fwd1_s8, CompNode::load("gpu0")) | |||
.disable_grad_check() | |||
.set_input_generator(0, gen_input) | |||
.set_input_dtype(0, dtype::Uint8()) | |||
.run({TensorShape{1, 1, 16, 16}}, opt1_qint8) | |||
.run({TensorShape{1, 3, 256, 256}}, opt1_qint8) | |||
.run({TensorShape{4, 3, 512, 512}}, opt1_qint8); | |||
MGB_MARK_USED_VAR(make_graph3); | |||
MGB_MARK_USED_VAR(fwd3); | |||
MGB_MARK_USED_VAR(gen_mask); | |||
} | |||
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} |
@@ -101,6 +101,7 @@ union OperatorParam { | |||
param.NMSKeep = 69, | |||
param.AdaptivePooling = 70, | |||
param.NvOf = 71, | |||
param.DctChannelSelect = 72, | |||
} | |||
table Operator { | |||