|
|
@@ -519,6 +519,38 @@ TEST_F(CUDA, CONVOLUTION_BACKWARD_FILTER_MATMUL) { |
|
|
|
.set_param(arg.param) |
|
|
|
.exec(TensorLayoutArray{src, dst, filter}); |
|
|
|
} |
|
|
|
//! noncontiguous case |
|
|
|
{ |
|
|
|
NormalRNG default_rng; |
|
|
|
param::Convolution param; |
|
|
|
param.pad_h = param.pad_w = 1; |
|
|
|
checker.set_rng(0, &default_rng) |
|
|
|
.set_rng(1, &default_rng) |
|
|
|
.set_param(param) |
|
|
|
.execl(TensorLayoutArray{ |
|
|
|
{{2, 16, 7, 7}, {1568, 49, 7, 1}, dtype::Float32()}, |
|
|
|
{{2, 16, 7, 7}, {1568, 49, 7, 1}, dtype::Float32()}, |
|
|
|
{{16, 16, 3, 3}, {144, 9, 3, 1}, dtype::Float32()}}); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
TEST_F(CUDA, CONVOLUTION_BACKWARD_FILTER_CUDNN) { |
|
|
|
if (cuda::is_compute_capability_required(7, 0)) |
|
|
|
return; |
|
|
|
using namespace convolution; |
|
|
|
Checker<ConvolutionBackwardFilter> checker(handle_cuda()); |
|
|
|
checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardFilter>( |
|
|
|
"CUDNN_CONVOLUTION")); |
|
|
|
//! noncontiguous case |
|
|
|
{ |
|
|
|
param::Convolution param; |
|
|
|
param.pad_h = param.pad_w = 1; |
|
|
|
checker.set_param(param).execl(TensorLayoutArray{ |
|
|
|
{{2, 16, 7, 7}, {1568, 49, 7, 1}, dtype::Float32()}, |
|
|
|
{{2, 16, 7, 7}, {1568, 49, 7, 1}, dtype::Float32()}, |
|
|
|
{{16, 16, 3, 3}, {144, 9, 3, 1}, dtype::Float32()} |
|
|
|
}); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
TEST_F(CUDA, CONV_CONFIG_COMBINATIONS) { |
|
|
|