Browse Source

fix(mge): fix some warnings

GitOrigin-RevId: 38b285f991
tags/v0.5.0
Megvii Engine Team Xu Xinran 5 years ago
parent
commit
be205727bc
6 changed files with 48 additions and 54 deletions
  1. +40
    -48
      dnn/src/arm_common/conv_bias/int8/direct_dotprod.cpp
  2. +3
    -1
      dnn/src/arm_common/conv_bias/int8/direct_dotprod_nchw44.cpp
  3. +3
    -1
      dnn/src/arm_common/conv_bias/int8/direct_dotprod_nchw44_algo.cpp
  4. +1
    -0
      dnn/src/arm_common/conv_bias/int8/direct_dotprod_nchw44_kern.h
  5. +0
    -3
      dnn/src/common/conv_bias.cpp
  6. +1
    -1
      src/core/impl/comp_node/cpu/comp_node.cpp

+ 40
- 48
dnn/src/arm_common/conv_bias/int8/direct_dotprod.cpp View File

@@ -90,12 +90,11 @@ inline int8x16_t vqtbl1q_s8_v7(int8x16_t a, uint8x16_t index) {
_sum1##_c_idx = vdotq_s32(_sum1##_c_idx, _k##_k2_idx, _elem);

template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op>
void conv_bias::conv_direct_stride1_2x2_int8_dot(const int8_t* src,
const int8_t* filter,
const int32_t* bias, int32_t* temp,
int8_t* dst, const size_t IH,
const size_t IW, const size_t OH,
const size_t OW, const Op& op) {
void conv_bias::conv_direct_stride1_2x2_int8_dot(
const int8_t* src, const int8_t* filter, const int32_t* bias,
int32_t* temp, int8_t* dst, const size_t IH, const size_t IW,
const size_t OH, const size_t OW, const Op& op) {
MEGDNN_MARK_USED_VAR(IH);
const size_t tail_step = IW - OW;
const uint8x16_t _idx0 = {0, 1, 16, 16, 1, 2, 16, 16,
2, 3, 16, 16, 3, 4, 16, 16};
@@ -326,12 +325,11 @@ void conv_bias::conv_direct_stride1_2x2_int8_dot(const int8_t* src,
}

template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op>
void conv_bias::conv_direct_stride1_3x3_int8_dot(const int8_t* src,
const int8_t* filter,
const int32_t* bias, int32_t* temp,
int8_t* dst, const size_t IH,
const size_t IW, const size_t OH,
const size_t OW, const Op& op) {
void conv_bias::conv_direct_stride1_3x3_int8_dot(
const int8_t* src, const int8_t* filter, const int32_t* bias,
int32_t* temp, int8_t* dst, const size_t IH, const size_t IW,
const size_t OH, const size_t OW, const Op& op) {
MEGDNN_MARK_USED_VAR(IH);
const size_t tail_step = IW - OW;

const uint8x16_t _idx0 = {0, 1, 2, 16, 1, 2, 3, 16,
@@ -562,12 +560,11 @@ void conv_bias::conv_direct_stride1_3x3_int8_dot(const int8_t* src,
}

template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op>
void conv_bias::conv_direct_stride2_2x2_int8_dot(const int8_t* src,
const int8_t* filter,
const int32_t* bias, int32_t* temp,
int8_t* dst, const size_t IH,
const size_t IW, const size_t OH,
const size_t OW, const Op& op) {
void conv_bias::conv_direct_stride2_2x2_int8_dot(
const int8_t* src, const int8_t* filter, const int32_t* bias,
int32_t* temp, int8_t* dst, const size_t IH, const size_t IW,
const size_t OH, const size_t OW, const Op& op) {
MEGDNN_MARK_USED_VAR(IH);
const size_t tail_step = IW - 2 * OW + IW;

const uint8x16_t _idx0 = {0, 1, 16, 16, 2, 3, 16, 16,
@@ -658,12 +655,11 @@ void conv_bias::conv_direct_stride2_2x2_int8_dot(const int8_t* src,
}

template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op>
void conv_bias::conv_direct_stride2_3x3_int8_dot(const int8_t* src,
const int8_t* filter,
const int32_t* bias, int32_t* temp,
int8_t* dst, const size_t IH,
const size_t IW, const size_t OH,
const size_t OW, const Op& op) {
void conv_bias::conv_direct_stride2_3x3_int8_dot(
const int8_t* src, const int8_t* filter, const int32_t* bias,
int32_t* temp, int8_t* dst, const size_t IH, const size_t IW,
const size_t OH, const size_t OW, const Op& op) {
MEGDNN_MARK_USED_VAR(IH);
const size_t tail_step = IW - 2 * OW + IW;

const uint8x16_t _idx0 = {0, 1, 2, 16, 2, 3, 4, 16,
@@ -814,12 +810,11 @@ void conv_bias::conv_direct_stride2_3x3_int8_dot(const int8_t* src,
_sum1##_c_idx = vdotq_s32(_sum1##_c_idx, _k##_k11_idx, _elem);

template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op>
void conv_bias::conv_direct_stride2_5x5_int8_dot(const int8_t* src,
const int8_t* filter,
const int32_t* bias, int32_t* temp,
int8_t* dst, const size_t IH,
const size_t IW, const size_t OH,
const size_t OW, const Op& op) {
void conv_bias::conv_direct_stride2_5x5_int8_dot(
const int8_t* src, const int8_t* filter, const int32_t* bias,
int32_t* temp, int8_t* dst, const size_t IH, const size_t IW,
const size_t OH, const size_t OW, const Op& op) {
MEGDNN_MARK_USED_VAR(IH);
const size_t tail_step = IW - 2 * OW + IW;

const uint8x16_t _idx00 = {0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9};
@@ -1113,12 +1108,11 @@ void conv_bias::conv_direct_stride2_5x5_int8_dot(const int8_t* src,
}

template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op>
void conv_bias::conv_direct_stride2_7x7_int8_dot(const int8_t* src,
const int8_t* filter,
const int32_t* bias, int32_t* temp,
int8_t* dst, const size_t IH,
const size_t IW, const size_t OH,
const size_t OW, const Op& op) {
void conv_bias::conv_direct_stride2_7x7_int8_dot(
const int8_t* src, const int8_t* filter, const int32_t* bias,
int32_t* temp, int8_t* dst, const size_t IH, const size_t IW,
const size_t OH, const size_t OW, const Op& op) {
MEGDNN_MARK_USED_VAR(IH);
const size_t tail_step = IW - 2 * OW + IW;

const uint8x16_t _idx00 = {0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9};
@@ -1476,12 +1470,11 @@ void conv_bias::conv_direct_stride2_7x7_int8_dot(const int8_t* src,
}

template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op>
void conv_bias::conv_direct_stride1_5x5_int8_dot(const int8_t* src,
const int8_t* filter,
const int32_t* bias, int32_t* temp,
int8_t* dst, const size_t IH,
const size_t IW, const size_t OH,
const size_t OW, const Op& op) {
void conv_bias::conv_direct_stride1_5x5_int8_dot(
const int8_t* src, const int8_t* filter, const int32_t* bias,
int32_t* temp, int8_t* dst, const size_t IH, const size_t IW,
const size_t OH, const size_t OW, const Op& op) {
MEGDNN_MARK_USED_VAR(IH);
const size_t tail_step = IW - OW;

const uint8x16_t _idx00 = {0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6};
@@ -1777,12 +1770,11 @@ void conv_bias::conv_direct_stride1_5x5_int8_dot(const int8_t* src,
}

template <bool first_ic, bool last_ic, BiasMode bias_mode, typename Op>
void conv_bias::conv_direct_stride1_7x7_int8_dot(const int8_t* src,
const int8_t* filter,
const int32_t* bias, int32_t* temp,
int8_t* dst, const size_t IH,
const size_t IW, const size_t OH,
const size_t OW, const Op& op) {
void conv_bias::conv_direct_stride1_7x7_int8_dot(
const int8_t* src, const int8_t* filter, const int32_t* bias,
int32_t* temp, int8_t* dst, const size_t IH, const size_t IW,
const size_t OH, const size_t OW, const Op& op) {
MEGDNN_MARK_USED_VAR(IH);
const size_t tail_step = IW - OW;

const uint8x16_t _idx00 = {0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6};


+ 3
- 1
dnn/src/arm_common/conv_bias/int8/direct_dotprod_nchw44.cpp View File

@@ -29,6 +29,7 @@ void copy_packed_src_int8_nchw44<1>(int8_t* dst, const int dst_step,
const int ih, const int pad_left,
const int pad_right, const int pad_top,
const int pad_bottom) {
MEGDNN_MARK_USED_VAR(pad_right);
constexpr int IC_PACK_SIZE = 4;
rep_step(ic_idx, ic, IC_PACK_SIZE) {
const int8_t* i_src = src + ic_idx * ic_step;
@@ -66,6 +67,7 @@ void copy_packed_src_int8_nchw44<2>(int8_t* dst, const int dst_step,
const int ih, const int pad_left,
const int pad_right, const int pad_top,
const int pad_bottom) {
MEGDNN_MARK_USED_VAR(pad_right);
constexpr int IC_PACK_SIZE = 4;
int odd_start = megdnn::div_ceil(dst_step, 2);
bool nochange = pad_left % 2 == 0;
@@ -367,4 +369,4 @@ FOR_FILTER(2)
} // namespace megdnn
#endif

//vim: syntax=cpp.doxygen
//vim: syntax=cpp.doxygen

+ 3
- 1
dnn/src/arm_common/conv_bias/int8/direct_dotprod_nchw44_algo.cpp View File

@@ -163,6 +163,7 @@ static void conv_kern(WorkspaceBundle bundle,
bool ConvBiasImpl::AlgoDotS8Direct_NCHW44::usable(
FallbackConvBiasImpl*, const NCBKernSizeParam& param,
AlgoSelectionStrategy algo_selection_strategy) const {
MEGDNN_MARK_USED_VAR(algo_selection_strategy);
auto&& fm = param.filter_meta;
auto FH = fm.spatial[0];
auto FW = fm.spatial[1];
@@ -199,6 +200,7 @@ bool ConvBiasImpl::AlgoDotS8Direct_NCHW44::usable(

bool ConvBiasImpl::AlgoDotS8Direct_NCHW44::is_preferred(
megdnn::fallback::ConvBiasImpl*, const NCBKernSizeParam& param) const {
MEGDNN_MARK_USED_VAR(param);
return true;
}

@@ -338,4 +340,4 @@ ConvBiasImpl::AlgoDotS8Direct_NCHW44::dispatch_kerns(

#endif

//vim: syntax=cpp.doxygen
//vim: syntax=cpp.doxygen

+ 1
- 0
dnn/src/arm_common/conv_bias/int8/direct_dotprod_nchw44_kern.h View File

@@ -98,6 +98,7 @@ template <int ow_remain, typename Op, typename T>
struct StoreOCxOWx<1, ow_remain, Op, T> {
static void impl(int32x4_t res[][8], const Op& op, T* dst_ptr,
const int ld_dst_oc) {
MEGDNN_MARK_USED_VAR(ld_dst_oc);
switch (ow_remain) {
case 8:
UNROLL_CALL_RAW(4, cb12);


+ 0
- 3
dnn/src/common/conv_bias.cpp View File

@@ -337,14 +337,11 @@ ConvBias::WinogradParam ConvBias::parse_winograd_name(
&(ret.channel_block_size), &(ret.output_block_size),
&(ret.tile_size));
if (strcmp(name, pre.c_str())) {
megdnn_log_warn("algo %s is not %s algo", name, pre.c_str());
ret = INVALID_WINOGRAD_PARAM;
return false;
}
if (ret.tile_size == 0 || ret.output_block_size == 0 ||
ret.channel_block_size == 0) {
megdnn_log_warn("the algo name %s is not suitable for %s",
algo_name.c_str(), pre.c_str());
ret = INVALID_WINOGRAD_PARAM;
return false;
}


+ 1
- 1
src/core/impl/comp_node/cpu/comp_node.cpp View File

@@ -795,7 +795,7 @@ bool CpuCompNode::CompNodeImpl::check_global_finalized(const char* reason) {
/* ======================== CompNode methods ======================== */

CompNode CompNode::default_cpu() {
static Locator locator{DeviceType::CPU, Locator::DEVICE_CPU_DEFAULT, -1};
static Locator locator{DeviceType::CPU, Locator::DEVICE_CPU_DEFAULT, {-1}};
static auto empty_queue =
std::make_shared<CpuCompNode::WorkerQueue>(locator);
static CpuCompNodeImpl impl{locator, locator, empty_queue};


Loading…
Cancel
Save