summaryrefslogtreecommitdiff
path: root/src/gpu/cl
diff options
context:
space:
mode:
Diffstat (limited to 'src/gpu/cl')
-rw-r--r--src/gpu/cl/ClKernelLibrary.cpp50
-rw-r--r--src/gpu/cl/kernels/ClCastKernel.cpp4
-rw-r--r--src/gpu/cl/kernels/ClDirectConv2dKernel.cpp356
-rw-r--r--src/gpu/cl/kernels/ClDirectConv2dKernel.h4
-rw-r--r--src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp23
-rw-r--r--src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h3
-rw-r--r--src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp22
-rw-r--r--src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.h4
-rw-r--r--src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.cpp17
-rw-r--r--src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.h5
-rw-r--r--src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp64
-rw-r--r--src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h3
-rw-r--r--src/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.cpp12
-rw-r--r--src/gpu/cl/kernels/ClIm2ColKernel.cpp6
-rw-r--r--src/gpu/cl/kernels/ClPool2dKernel.cpp3
-rw-r--r--src/gpu/cl/kernels/ClScaleKernel.cpp22
-rw-r--r--src/gpu/cl/operators/ClConv2d.cpp34
-rw-r--r--src/gpu/cl/operators/ClDirectConv2d.cpp2
-rw-r--r--src/gpu/cl/operators/ClGemm.cpp10
-rw-r--r--src/gpu/cl/operators/ClGemm.h1
20 files changed, 217 insertions, 428 deletions
diff --git a/src/gpu/cl/ClKernelLibrary.cpp b/src/gpu/cl/ClKernelLibrary.cpp
index c47cf8ef1..bab534216 100644
--- a/src/gpu/cl/ClKernelLibrary.cpp
+++ b/src/gpu/cl/ClKernelLibrary.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2021 Arm Limited.
+ * Copyright (c) 2016-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -290,10 +290,10 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map =
{ "gemm_mm_reshaped_only_rhs_t_post_act_eltwise_op_act", "common/experimental/gemm_fused_post_ops/act_eltwise_op_act/gemm_mm_reshaped_only_rhs.cl" },
{ "gemm_mm_reshaped_only_rhs_t_texture_post_act_eltwise_op_act", "common/experimental/gemm_fused_post_ops/act_eltwise_op_act/gemm_mm_reshaped_only_rhs.cl" },
{ "gemm_lc_vm_f32", "common/gemm.cl" },
- { "gemm_reshape_lhs_matrix_nt", "common/gemm.cl" },
- { "gemm_reshape_lhs_matrix_t", "common/gemm.cl" },
- { "gemm_reshape_rhs_matrix_nt", "common/gemm.cl" },
- { "gemm_reshape_rhs_matrix_t", "common/gemm.cl" },
+ { "gemm_reshape_lhs_matrix_nt", "common/gemm_utils.cl" },
+ { "gemm_reshape_lhs_matrix_t", "common/gemm_utils.cl" },
+ { "gemm_reshape_rhs_matrix_nt", "common/gemm_utils.cl" },
+ { "gemm_reshape_rhs_matrix_t", "common/gemm_utils.cl" },
{ "gemmlowp_matrix_a_reduction", "common/gemmlowp.cl" },
{ "gemmlowp_matrix_a_reduction_dot8", "common/gemmlowp.cl" },
{ "gemmlowp_matrix_b_reduction", "common/gemmlowp.cl" },
@@ -363,12 +363,8 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map =
{ "depth_to_space_nchw", "nchw/depth_to_space.cl" },
{ "dequantization_layer_per_channel_nchw", "nchw/dequantization_layer.cl" },
{ "direct_convolution1x1", "nchw/direct_convolution1x1.cl" },
- { "direct_convolution1x1_f32_bifrost", "nchw/direct_convolution1x1.cl" },
- { "direct_convolution3x3", "nchw/direct_convolution3x3.cl" },
- { "direct_convolution3x3_f32_bifrost", "nchw/direct_convolution3x3.cl" },
- { "direct_convolution5x5", "nchw/direct_convolution5x5.cl" },
- { "direct_convolution5x5_f32_bifrost", "nchw/direct_convolution5x5.cl" },
- { "direct_convolution_quantized", "nchw/direct_convolution_quantized.cl" },
+ { "direct_convolution_nchw", "nchw/direct_convolution.cl" },
+
{ "im2col1x1_stridex1_nchw", "nchw/im2col.cl" },
{ "im2col3x3_nchw", "nchw/im2col.cl" },
{ "im2col5x5_nchw", "nchw/im2col.cl" },
@@ -382,8 +378,6 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map =
{ "pooling_layer_MxN_nchw", "nchw/pooling_layer.cl" },
{ "pooling_layer_2_nchw_indices", "nchw/pooling_layer.cl" },
{ "prior_box_layer_nchw", "nchw/prior_box_layer.cl" },
- { "remap_nearest_neighbour_nchw", "nchw/remap.cl" },
- { "remap_bilinear_nchw", "nchw/remap.cl" },
{ "reorg_layer_nchw", "nchw/reorg_layer.cl" },
{ "scale_nearest_neighbour_nchw", "nchw/scale.cl" },
{ "scale_bilinear_nchw", "nchw/scale.cl" },
@@ -443,8 +437,6 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map =
{ "pooling_layer_MxN_nhwc", "nhwc/pooling_layer.cl" },
{ "pooling_layer_2x2_nhwc", "nhwc/pooling_layer.cl" },
{ "pooling_layer_MxN_quantized_nhwc", "nhwc/pooling_layer_quantized.cl" },
- { "remap_nearest_neighbour_nhwc", "nhwc/remap.cl" },
- { "remap_bilinear_nhwc", "nhwc/remap.cl" },
{ "reorg_layer_nhwc", "nhwc/reorg_layer.cl" },
{ "scale_nearest_neighbour_nhwc", "nhwc/scale.cl" },
{ "scale_bilinear_nhwc", "nhwc/scale.cl" },
@@ -590,6 +582,10 @@ const std::map<std::string, std::string> ClKernelLibrary::_program_source_map =
#include "./cl_kernels/common/gemm.clembed"
},
{
+ "common/gemm_utils.cl",
+#include "./cl_kernels/common/gemm_utils.clembed"
+ },
+ {
"common/experimental/gemm_fused_post_ops/act_eltwise_op_act/gemm_mm_native.cl",
#include "./cl_kernels/common/experimental/gemm_fused_post_ops/act_eltwise_op_act/gemm_mm_native.clembed"
},
@@ -763,20 +759,8 @@ const std::map<std::string, std::string> ClKernelLibrary::_program_source_map =
#include "./cl_kernels/nchw/dequantization_layer.clembed"
},
{
- "nchw/direct_convolution1x1.cl",
-#include "./cl_kernels/nchw/direct_convolution1x1.clembed"
- },
- {
- "nchw/direct_convolution3x3.cl",
-#include "./cl_kernels/nchw/direct_convolution3x3.clembed"
- },
- {
- "nchw/direct_convolution5x5.cl",
-#include "./cl_kernels/nchw/direct_convolution5x5.clembed"
- },
- {
- "nchw/direct_convolution_quantized.cl",
-#include "./cl_kernels/nchw/direct_convolution_quantized.clembed"
+ "nchw/direct_convolution.cl",
+#include "./cl_kernels/nchw/direct_convolution.clembed"
},
{
"nchw/im2col.cl",
@@ -807,10 +791,6 @@ const std::map<std::string, std::string> ClKernelLibrary::_program_source_map =
#include "./cl_kernels/nchw/prior_box_layer.clembed"
},
{
- "nchw/remap.cl",
-#include "./cl_kernels/nchw/remap.clembed"
- },
- {
"nchw/reorg_layer.cl",
#include "./cl_kernels/nchw/reorg_layer.clembed"
},
@@ -906,10 +886,6 @@ const std::map<std::string, std::string> ClKernelLibrary::_program_source_map =
#include "./cl_kernels/nhwc/pooling_layer_quantized.clembed"
},
{
- "nhwc/remap.cl",
-#include "./cl_kernels/nhwc/remap.clembed"
- },
- {
"nhwc/reorg_layer.cl",
#include "./cl_kernels/nhwc/reorg_layer.clembed"
},
diff --git a/src/gpu/cl/kernels/ClCastKernel.cpp b/src/gpu/cl/kernels/ClCastKernel.cpp
index 48caf21d1..bfcd15229 100644
--- a/src/gpu/cl/kernels/ClCastKernel.cpp
+++ b/src/gpu/cl/kernels/ClCastKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2021 Arm Limited.
+ * Copyright (c) 2016-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,7 +52,7 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, Conver
ARM_COMPUTE_RETURN_ERROR_ON(src == dst);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src,
1,
- DataType::U8, DataType::S8, DataType::QSYMM8_PER_CHANNEL, DataType::S16,
+ DataType::U8, DataType::S8, DataType::QASYMM8, DataType::QSYMM8_PER_CHANNEL, DataType::S16,
DataType::U16, DataType::U32, DataType::S32, DataType::F16,
DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst,
diff --git a/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp b/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp
index 2d851a698..ff8c2c32a 100644
--- a/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp
+++ b/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp
@@ -122,209 +122,6 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, co
return Status{};
}
-inline bool can_run_optimized_kernel_for_bifrost_nchw(GPUTarget gpu_target, unsigned int conv_stride_x, unsigned int conv_stride_y, unsigned int kernel_size,
- DataType data_type, DataLayout data_layout)
-{
- return gpu_target_is_in(gpu_target,
- GPUTarget::G71, GPUTarget::G72, GPUTarget::G76,
- GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT,
- GPUTarget::G52, GPUTarget::G52LIT)
- && (kernel_size <= 5)
- && (conv_stride_x == 1) && (conv_stride_y == 1)
- && (data_type == DataType::F32)
- && (data_layout == DataLayout::NCHW);
-}
-
-inline void setup_num_elems_nchw(unsigned int &num_elems_read_per_iteration_x, unsigned int &num_elems_read_per_iteration_y,
- unsigned int &num_elems_written_per_iteration_x, unsigned int &num_elems_written_per_iteration_y,
- unsigned int kernel_size, const PadStrideInfo &conv_info, const GPUTarget target, ITensorInfo *src)
-{
- const DataType data_type = src->data_type();
- const DataLayout data_layout = src->data_layout();
- unsigned int conv_stride_x = std::get<0>(conv_info.stride());
- unsigned int conv_stride_y = std::get<1>(conv_info.stride());
-
- const bool run_optimized_bifrost = can_run_optimized_kernel_for_bifrost_nchw(target, conv_stride_x, conv_stride_y, kernel_size, data_type, data_layout);
-
- if(run_optimized_bifrost)
- {
- // Configure kernel window
- switch(kernel_size)
- {
- case 1:
- {
- num_elems_read_per_iteration_x = 4;
- num_elems_read_per_iteration_y = 4;
- num_elems_written_per_iteration_x = 4;
- num_elems_written_per_iteration_y = 4;
- break;
- }
- case 3:
- {
- num_elems_read_per_iteration_x = 6;
- num_elems_read_per_iteration_y = 5;
- num_elems_written_per_iteration_x = 4;
- num_elems_written_per_iteration_y = 3;
- break;
- }
- case 5:
- {
- num_elems_read_per_iteration_x = 8;
- num_elems_read_per_iteration_y = 6;
- num_elems_written_per_iteration_x = 4;
- num_elems_written_per_iteration_y = 2;
- break;
- }
- default:
- {
- ARM_COMPUTE_ERROR("Kernel size not optimized for Bifrost");
- }
- }
- }
- else
- {
- num_elems_read_per_iteration_y = kernel_size;
- num_elems_written_per_iteration_x = 8;
- num_elems_written_per_iteration_y = 1;
- switch(kernel_size)
- {
- case 1:
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_x = 8;
- break;
- case 2:
- num_elems_read_per_iteration_x = 16;
- break;
- case 3:
- switch(src->element_size())
- {
- case 1:
- num_elems_read_per_iteration_x = 28;
- break;
- case 2:
- num_elems_read_per_iteration_x = 24;
- break;
- case 4:
- num_elems_read_per_iteration_x = 22;
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid data size");
- }
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid convolution stride X");
- }
- break;
- case 3:
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_x = 10;
- break;
- case 2:
- num_elems_read_per_iteration_x = 17;
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid convolution stride X");
- }
- break;
- case 5:
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_x = 12;
- break;
- case 2:
- num_elems_read_per_iteration_x = 20;
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid convolution stride X");
- }
- break;
- case 9:
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_x = 16;
- break;
- case 2:
- num_elems_read_per_iteration_x = 24;
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid convolution stride X");
- }
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid direct convolution size");
- }
- }
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src, ITensorInfo *weights, ITensorInfo *dst, const PadStrideInfo &conv_info, const GPUTarget target)
-{
- const DataLayout data_layout = src->data_layout();
-
- // Get dst shape
- TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info);
-
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*dst, output_shape,
- 1,
- src->data_type(),
- src->quantization_info());
-
- if(data_layout == DataLayout::NHWC)
- {
- const unsigned int vec_size = std::min(static_cast<unsigned int>(dst->tensor_shape()[0]), 4u);
- unsigned int num_rows = 1U;
- if(dst->tensor_shape()[0] > 16)
- {
- num_rows = src->data_type() == DataType::F32 ? 2U : 4U;
- }
-
- // Create window and update padding
- Window win = calculate_max_window(output_shape, Steps(vec_size, num_rows));
- return std::make_pair(Status{}, win);
- }
- else if(data_layout == DataLayout::NCHW)
- {
- const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const unsigned int kernel_size = weights->dimension(width_idx);
-
- unsigned int num_elems_read_per_iteration_x = 0;
- unsigned int num_elems_read_per_iteration_y = 0;
- unsigned int num_elems_written_per_iteration_x = 0;
- unsigned int num_elems_written_per_iteration_y = 0;
-
- unsigned int conv_pad_left = conv_info.pad_left();
- unsigned int conv_pad_top = conv_info.pad_top();
- unsigned int conv_stride_x = std::get<0>(conv_info.stride());
- unsigned int conv_stride_y = std::get<1>(conv_info.stride());
-
- setup_num_elems_nchw(num_elems_read_per_iteration_x, num_elems_read_per_iteration_y,
- num_elems_written_per_iteration_x, num_elems_written_per_iteration_y,
- kernel_size, conv_info, target, src);
-
- // Create window and update padding
- bool window_changed = false;
- Window win = calculate_max_window(*dst, Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y));
-
- AccessWindowRectangle input_access(src, -conv_pad_left, -conv_pad_top, num_elems_read_per_iteration_x, num_elems_read_per_iteration_y, conv_stride_x, conv_stride_y);
- AccessWindowStatic weights_access(weights, 0, 0, kernel_size, kernel_size);
- AccessWindowRectangle output_access(dst, 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y);
- window_changed = update_window_and_padding(win, input_access, weights_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), dst->tensor_shape()));
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not supported");
- }
-}
-
bool export_to_cl_image_support(ITensorInfo *tensor, GPUTarget gpu_target, DataLayout data_layout)
{
if(tensor->tensor_shape()[0] % 4 || (data_layout != DataLayout::NHWC))
@@ -370,11 +167,6 @@ bool export_to_cl_image_support(ITensorInfo *tensor, GPUTarget gpu_target, DataL
} // namespace
-BorderSize ClDirectConv2dKernel::border_size() const
-{
- return _border_size;
-}
-
ClDirectConv2dKernel::ClDirectConv2dKernel()
{
_type = CLKernelType::DIRECT;
@@ -400,24 +192,49 @@ void ClDirectConv2dKernel::configure(const CLCompileContext &compile_context, IT
const unsigned int kernel_size = weights->dimension(width_idx);
const DataType data_type = src->data_type();
- const GPUTarget gpu_target = get_target();
+ const GPUTarget gpu_target = get_target();
+ unsigned int _num_elems_processed_per_iteration = 0;
+
+ // Get dst shape
+ TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info);
+
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*dst, output_shape,
+ 1,
+ src->data_type(),
+ src->quantization_info());
// Configure kernel window
- auto win_config = validate_and_configure_window(src, weights, dst, conv_info, gpu_target);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
+ Window win;
+ if(_data_layout == DataLayout::NHWC)
+ {
+ const unsigned int vec_size = std::min(static_cast<unsigned int>(dst->tensor_shape()[0]), 4u);
+ unsigned int num_rows = 1U;
+ if(dst->tensor_shape()[0] > 16)
+ {
+ num_rows = src->data_type() == DataType::F32 ? 2U : 4U;
+ }
+
+ // Create window and update padding
+ win = calculate_max_window(output_shape, Steps(vec_size, num_rows));
+ }
+ else if(_data_layout == DataLayout::NCHW)
+ {
+ _num_elems_processed_per_iteration = 1u;
+ win = calculate_max_window(*dst, Steps(_num_elems_processed_per_iteration));
+ }
+
+ ICLKernel::configure_internal(win);
std::stringstream kernel_name;
CLBuildOptions build_options;
if(_data_layout == DataLayout::NHWC)
{
- _border_size = BorderSize();
-
kernel_name << "direct_convolution_nhwc";
- const unsigned int n0 = win_config.second.x().step();
- const unsigned int m0 = win_config.second.y().step();
+ const unsigned int n0 = win.x().step();
+ const unsigned int m0 = win.y().step();
const unsigned int k0 = adjust_vec_size(is_data_type_quantized(data_type) ? 16u : 8u, src->dimension(channel_idx));
const unsigned int partial_store_n0 = dst->dimension(channel_idx) % n0;
const unsigned int pad_left = conv_info.pad_left();
@@ -438,14 +255,8 @@ void ClDirectConv2dKernel::configure(const CLCompileContext &compile_context, IT
build_options.add_option("-cl-fast-relaxed-math");
build_options.add_option("-DSRC_TENSOR_TYPE=BUFFER");
- build_options.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src->dimension(width_idx)));
- build_options.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src->dimension(height_idx)));
- build_options.add_option("-DSRC_CHANNELS=" + support::cpp11::to_string(src->dimension(channel_idx)));
build_options.add_option("-DSRC_DATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
build_options.add_option("-DDST_TENSOR_TYPE=BUFFER");
- build_options.add_option("-DDST_WIDTH=" + support::cpp11::to_string(dst->dimension(width_idx)));
- build_options.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(dst->dimension(height_idx)));
- build_options.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(dst->dimension(channel_idx)));
build_options.add_option("-DDST_DATA_TYPE=" + get_cl_type_from_data_type(dst->data_type()));
build_options.add_option_if_else(export_to_cl_image, "-DWEI_TENSOR_TYPE=IMAGE", "-DWEI_TENSOR_TYPE=BUFFER");
build_options.add_option("-DWEI_WIDTH=" + support::cpp11::to_string(weights->dimension(width_idx)));
@@ -459,6 +270,7 @@ void ClDirectConv2dKernel::configure(const CLCompileContext &compile_context, IT
build_options.add_option("-DM0=" + support::cpp11::to_string(m0));
build_options.add_option("-DK0=" + support::cpp11::to_string(k0));
build_options.add_option("-DPARTIAL_N0=" + support::cpp11::to_string(partial_store_n0));
+ build_options.add_option_if((src->dimension(channel_idx) % k0) != 0, "-DLEFTOVER_LOOP");
build_options.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
if(is_data_type_quantized(data_type))
@@ -497,47 +309,42 @@ void ClDirectConv2dKernel::configure(const CLCompileContext &compile_context, IT
}
else
{
- _border_size = BorderSize(src->padding());
-
- kernel_name << "direct_convolution" << kernel_size << "x" << kernel_size;
-
+ kernel_name << "direct_convolution_nchw";
build_options.add_option_if(biases != nullptr, std::string("-DHAS_BIAS"));
+ build_options.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src->dimension(width_idx)));
+ build_options.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src->dimension(height_idx)));
+ build_options.add_option("-DSRC_CHANNELS=" + support::cpp11::to_string(src->dimension(channel_idx)));
+ build_options.add_option("-DPAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left()));
+ build_options.add_option("-DPAD_TOP=" + support::cpp11::to_string(conv_info.pad_top()));
+ build_options.add_option("-DSTRIDE_X=" + support::cpp11::to_string(conv_stride_x));
+ build_options.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(conv_stride_y));
+ build_options.add_option("-DWEI_WIDTH=" + support::cpp11::to_string(weights->dimension(width_idx)));
+ build_options.add_option("-DWEI_HEIGHT=" + support::cpp11::to_string(weights->dimension(height_idx)));
+ build_options.add_option(std::string("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)));
+ build_options.add_option(std::string("-DDATA_SIZE=" + get_data_size_from_data_type(data_type)));
+ build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(weights->dimension(channel_idx))));
+ build_options.add_option(std::string("-DSTRIDE_X=" + support::cpp11::to_string(conv_stride_x)));
+ build_options.add_option(std::string("-DDATA_TYPE_PROMOTED=" + get_cl_type_from_data_type(data_type)));
+ build_options.add_option(std::string("-DVEC_SIZE=" + support::cpp11::to_string(_num_elems_processed_per_iteration)));
+ build_options.add_option(std::string("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(src->dimension(0) % _num_elems_processed_per_iteration)));
- const bool run_optimized_for_bifrost = can_run_optimized_kernel_for_bifrost_nchw(gpu_target, conv_stride_x, conv_stride_y, kernel_size, data_type, _data_layout);
-
- if(run_optimized_for_bifrost)
+ if(is_data_type_quantized(data_type))
{
- build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(weights->dimension(channel_idx))));
+ const UniformQuantizationInfo iqinfo = src->quantization_info().uniform();
+ const UniformQuantizationInfo wqinfo = weights->quantization_info().uniform();
+ const UniformQuantizationInfo oqinfo = dst->quantization_info().uniform();
- kernel_name << "_f32_bifrost";
- }
- else
- {
- build_options.add_option(std::string("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)));
- build_options.add_option(std::string("-DDATA_SIZE=" + get_data_size_from_data_type(data_type)));
- build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(weights->dimension(channel_idx))));
- build_options.add_option(std::string("-DSTRIDE_X=" + support::cpp11::to_string(conv_stride_x)));
- build_options.add_option(std::string("-DDATA_TYPE_PROMOTED=" + get_cl_type_from_data_type(data_type)));
-
- if(is_data_type_quantized(data_type))
- {
- const UniformQuantizationInfo iqinfo = src->quantization_info().uniform();
- const UniformQuantizationInfo wqinfo = weights->quantization_info().uniform();
- const UniformQuantizationInfo oqinfo = dst->quantization_info().uniform();
-
- float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
- int output_multiplier = 0;
- int output_shift = 0;
- quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
- build_options.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
- build_options.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift));
- build_options.add_option("-DKERNEL_SIZE=" + support::cpp11::to_string(kernel_size));
- build_options.add_option("-DINPUT_OFFSET=" + support::cpp11::to_string(-iqinfo.offset));
- build_options.add_option("-DWEIGHTS_OFFSET=" + support::cpp11::to_string(-wqinfo.offset));
- build_options.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(oqinfo.offset));
-
- kernel_name.str("direct_convolution_quantized");
- }
+ float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
+ int output_multiplier = 0;
+ int output_shift = 0;
+ quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
+ build_options.add_option("-DIS_QUANTIZED");
+ build_options.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
+ build_options.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift));
+ build_options.add_option("-DKERNEL_SIZE=" + support::cpp11::to_string(kernel_size));
+ build_options.add_option("-DINPUT_OFFSET=" + support::cpp11::to_string(-iqinfo.offset));
+ build_options.add_option("-DWEIGHTS_OFFSET=" + support::cpp11::to_string(-wqinfo.offset));
+ build_options.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(oqinfo.offset));
}
}
@@ -570,11 +377,9 @@ void ClDirectConv2dKernel::configure(const CLCompileContext &compile_context, IT
}
Status ClDirectConv2dKernel::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
- const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info, const GPUTarget target)
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, weights, biases, dst, conv_info, act_info));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), weights->clone().get(), dst->clone().get(), conv_info, target).first);
-
return Status{};
}
@@ -613,13 +418,13 @@ void ClDirectConv2dKernel::run_op(ITensorPack &tensors, const Window &window, cl
}
unsigned int idx = 0;
- add_4D_tensor_argument(idx, src, slice);
- add_4D_tensor_argument(idx, dst, slice);
+ add_4d_tensor_nhwc_argument(idx, src);
+ add_4d_tensor_nhwc_argument(idx, dst);
if(export_to_cl_image)
{
_kernel.setArg(idx++, weights_cl_image);
}
- add_4D_tensor_argument(idx, weights, slice);
+ add_4d_tensor_nhwc_argument(idx, weights);
if(biases != nullptr)
{
add_1D_tensor_argument(idx, biases, slice);
@@ -628,22 +433,7 @@ void ClDirectConv2dKernel::run_op(ITensorPack &tensors, const Window &window, cl
}
else
{
- Window win_in = window;
-
- win_in.adjust(Window::DimX, -_conv_info.pad_left(), true);
- win_in.adjust(Window::DimY, -_conv_info.pad_top(), true);
-
- const int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
-
- const int conv_stride_x = std::get<0>(_conv_info.stride());
- const int conv_stride_y = std::get<1>(_conv_info.stride());
-
- win_in.set_dimension_step(width_idx, window[width_idx].step() * conv_stride_x);
- win_in.set_dimension_step(height_idx, window[height_idx].step() * conv_stride_y);
-
- Window slice_in = win_in.first_slice_window_3D();
- unsigned int idx1 = 2 * num_arguments_per_3D_tensor();
+ unsigned int idx1 = 2 * num_arguments_per_3D_tensor();
add_3D_tensor_argument(idx1, weights, slice);
if(biases != nullptr)
@@ -658,11 +448,11 @@ void ClDirectConv2dKernel::run_op(ITensorPack &tensors, const Window &window, cl
do
{
unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice_in);
+ add_3D_tensor_argument(idx, src, slice);
add_3D_tensor_argument(idx, dst, slice);
enqueue(queue, *this, slice, lws_hint());
}
- while(window.slide_window_slice_3D(slice) && win_in.slide_window_slice_3D(slice_in));
+ while(window.slide_window_slice_3D(slice));
}
}
} // namespace kernels
diff --git a/src/gpu/cl/kernels/ClDirectConv2dKernel.h b/src/gpu/cl/kernels/ClDirectConv2dKernel.h
index 5624f3a0a..568192781 100644
--- a/src/gpu/cl/kernels/ClDirectConv2dKernel.h
+++ b/src/gpu/cl/kernels/ClDirectConv2dKernel.h
@@ -72,15 +72,13 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
- const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info, const GPUTarget target);
+ const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info);
// Inherited methods overridden:
void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
public:
DataLayout _data_layout{};
- BorderSize _border_size{};
PadStrideInfo _conv_info{};
};
} // namespace kernels
diff --git a/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp b/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp
index af794354c..05988997e 100644
--- a/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp
+++ b/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp
@@ -275,6 +275,9 @@ void ClGemmMatrixMultiplyNativeKernel::configure(const CLCompileContext &compile
// Shrink M0 to be always <= M (internal_m) to prevent out-of-bounds reads.
// NOTE: This might have implications on heuristics and performance
const unsigned int internal_m0 = std::min(internal_m, lhs_info.m0);
+ _m = internal_m;
+ _n = gemm_info.n;
+ _k = gemm_info.k;
// Create build options
CLBuildOptions build_opts;
@@ -289,9 +292,6 @@ void ClGemmMatrixMultiplyNativeKernel::configure(const CLCompileContext &compile
build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(d_gemm_3d));
build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(src1->dimension(2)));
build_opts.add_option_if(_use_dummy_work_items, "-DDUMMY_WORK_ITEMS");
- build_opts.add_option("-DM=" + support::cpp11::to_string(internal_m));
- build_opts.add_option("-DN=" + support::cpp11::to_string(gemm_info.n));
- build_opts.add_option("-DK=" + support::cpp11::to_string(gemm_info.k));
build_opts.add_option("-DM0=" + support::cpp11::to_string(internal_m0));
build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
build_opts.add_option("-DK0=" + support::cpp11::to_string(rhs_info.k0));
@@ -312,6 +312,9 @@ void ClGemmMatrixMultiplyNativeKernel::configure(const CLCompileContext &compile
std::string kernel_name("gemm_mm_native");
post_op_utils.set_post_ops_cl_kernel_name(kernel_name, gemm_info.post_ops);
+ // A macro guard to compile ONLY the kernel of interest
+ build_opts.add_option("-D" + upper_string(kernel_name));
+
// Create kernel
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
@@ -392,11 +395,11 @@ void ClGemmMatrixMultiplyNativeKernel::run_op(ITensorPack &tensors, const Window
unsigned int idx0;
if(_add_bias)
{
- idx0 = (4 + _num_post_op_args) * num_arguments_per_2D_tensor() + (4 + _num_post_op_args);
+ idx0 = (4 + _num_post_op_args) * num_arguments_per_2D_tensor() + (7 + _num_post_op_args);
}
else
{
- idx0 = (3 + _num_post_op_args) * num_arguments_per_2D_tensor() + (3 + _num_post_op_args);
+ idx0 = (3 + _num_post_op_args) * num_arguments_per_2D_tensor() + (6 + _num_post_op_args);
}
const unsigned int total_cross_plane_pad = src0->info()->padding().top + src0->info()->padding().bottom;
_kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
@@ -408,11 +411,11 @@ void ClGemmMatrixMultiplyNativeKernel::run_op(ITensorPack &tensors, const Window
unsigned int idx0;
if(_add_bias)
{
- idx0 = (4 + _num_post_op_args) * num_arguments_per_2D_tensor() + 4 + (_reinterpret_input_as_3d ? 1 : 0) + _num_post_op_args;
+ idx0 = (4 + _num_post_op_args) * num_arguments_per_2D_tensor() + 7 + (_reinterpret_input_as_3d ? 1 : 0) + _num_post_op_args;
}
else
{
- idx0 = (3 + _num_post_op_args) * num_arguments_per_2D_tensor() + 3 + (_reinterpret_input_as_3d ? 1 : 0) + _num_post_op_args;
+ idx0 = (3 + _num_post_op_args) * num_arguments_per_2D_tensor() + 6 + (_reinterpret_input_as_3d ? 1 : 0) + _num_post_op_args;
}
const unsigned int total_cross_plane_pad = dst->info()->padding().top + dst->info()->padding().bottom;
_kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
@@ -455,6 +458,12 @@ void ClGemmMatrixMultiplyNativeKernel::run_op(ITensorPack &tensors, const Window
const auto post_op_arg = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(experimental::get_post_op_arg_type(i)));
_kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(post_op_arg->info()->strides_in_bytes()[2]));
}
+
+ // Pass m, n and k at runtime
+ _kernel.setArg<cl_int>(idx++, _m);
+ _kernel.setArg<cl_int>(idx++, _n);
+ _kernel.setArg<cl_int>(idx++, _k);
+
enqueue(queue, *this, slice, lws_hint(), _use_dummy_work_items);
}
while(window.slide_window_slice_3D(slice));
diff --git a/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h b/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h
index 415eb7bf3..e478df727 100644
--- a/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h
+++ b/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h
@@ -81,6 +81,9 @@ private:
bool _reinterpret_output_as_3d{ false };
bool _use_dummy_work_items{ false };
bool _add_bias{ false };
+ signed int _m{ 1 };
+ signed int _n{ 1 };
+ signed int _k{ 1 };
unsigned int _num_post_op_args{ 0 }; // (EXPERIMENTAL_POST_OPS) total number of post op arguments
};
} // namespace kernels
diff --git a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp
index 64e99332f..6a450b652 100644
--- a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp
+++ b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp
@@ -201,7 +201,6 @@ void ClGemmMatrixMultiplyReshapedKernel::configure(const CLCompileContext &compi
_use_dummy_work_items = preferred_dummy_work_items_support(CLKernelLibrary::get().get_device());
_add_bias = src2 != nullptr;
_export_to_cl_image = rhs_info.export_to_cl_image;
- _k = gemm_info.k;
_num_post_op_args = gemm_info.post_ops.total_num_arguments();
// Check if we need to slide the matrix B
@@ -230,6 +229,9 @@ void ClGemmMatrixMultiplyReshapedKernel::configure(const CLCompileContext &compi
const unsigned int partial_store_m0 = internal_m % lhs_info.m0;
const unsigned int partial_store_n0 = gemm_info.n % rhs_info.n0;
+ _m = gemm_info.m;
+ _n = gemm_info.n;
+ _k = gemm_info.k;
// Create build options
CLBuildOptions build_opts;
@@ -250,9 +252,6 @@ void ClGemmMatrixMultiplyReshapedKernel::configure(const CLCompileContext &compi
build_opts.add_option("-DRHS_HEIGHT=" + support::cpp11::to_string(src1->dimension(1)));
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
build_opts.add_option("-DDATA_TYPE_ACCUMULATOR=" + (enable_mixed_precision ? get_cl_type_from_data_type(DataType::F32) : get_cl_type_from_data_type(data_type)));
- build_opts.add_option("-DM=" + support::cpp11::to_string(gemm_info.m));
- build_opts.add_option("-DN=" + support::cpp11::to_string(gemm_info.n));
- build_opts.add_option("-DK=" + support::cpp11::to_string(gemm_info.k));
build_opts.add_option("-DM0=" + support::cpp11::to_string(lhs_info.m0));
build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
build_opts.add_option("-DK0=" + support::cpp11::to_string(lhs_info.k0));
@@ -278,6 +277,9 @@ void ClGemmMatrixMultiplyReshapedKernel::configure(const CLCompileContext &compi
kernel_name += rhs_info.export_to_cl_image ? "_texture" : "";
post_op_utils.set_post_ops_cl_kernel_name(kernel_name, gemm_info.post_ops);
+ // A macro guard to compile ONLY the kernel of interest
+ build_opts.add_option("-D" + upper_string(kernel_name));
+
// Create kernel
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
@@ -399,9 +401,6 @@ void ClGemmMatrixMultiplyReshapedKernel::run_op(ITensorPack &tensors, const Wind
add_2D_tensor_argument(idx, post_op_arg, slice);
}
- // K dimension (not used if _export_to_cl_image == true)
- _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_k));
-
// LHS stride_z
_kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src0->info()->strides_in_bytes()[2]));
@@ -429,6 +428,13 @@ void ClGemmMatrixMultiplyReshapedKernel::run_op(ITensorPack &tensors, const Wind
_kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(total_cross_plane_pad));
}
+ // Pass m, n and k at runtime
+ _kernel.setArg<cl_int>(idx++, _m);
+ _kernel.setArg<cl_int>(idx++, _n);
+
+ // K dimension (not used if _export_to_cl_image == true)
+ _kernel.setArg<cl_int>(idx++, _k);
+
// Dispatch kernel
enqueue(queue, *this, slice, lws_hint(), _use_dummy_work_items);
}
@@ -436,4 +442,4 @@ void ClGemmMatrixMultiplyReshapedKernel::run_op(ITensorPack &tensors, const Wind
}
} // namespace kernels
} // namespace opencl
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.h b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.h
index 09160ec0d..2d668b91a 100644
--- a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.h
+++ b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.h
@@ -105,7 +105,9 @@ private:
bool _use_dummy_work_items{ false };
bool _add_bias{ false };
bool _export_to_cl_image{ false };
- unsigned int _k{ 1 };
+ signed int _m{ 1 };
+ signed int _n{ 1 };
+ signed int _k{ 1 };
unsigned int _num_post_op_args{ 0 }; // (EXPERIMENTAL_POST_OPS) total number of post op arguments
};
} // namespace kernels
diff --git a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.cpp b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.cpp
index aa806978e..a8bcf8d6a 100644
--- a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.cpp
+++ b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -240,7 +240,9 @@ void ClGemmMatrixMultiplyReshapedOnlyRhsKernel::configure(const CLCompileContext
// Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
const unsigned int partial_store_m0 = internal_m % internal_m0;
const unsigned int partial_store_n0 = gemm_info.n % rhs_info.n0;
-
+ _m = internal_m;
+ _n = gemm_info.n;
+ _k = gemm_info.k;
// Create build options
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src0->data_type()));
@@ -253,9 +255,6 @@ void ClGemmMatrixMultiplyReshapedOnlyRhsKernel::configure(const CLCompileContext
build_opts.add_option_if(_use_dummy_work_items, "-DDUMMY_WORK_ITEMS");
build_opts.add_option_if(rhs_info.export_to_cl_image, "-DOPENCL_IMAGE_SUPPORT");
build_opts.add_option("-DRHS_HEIGHT=" + support::cpp11::to_string(src1->dimension(1)));
- build_opts.add_option("-DM=" + support::cpp11::to_string(internal_m));
- build_opts.add_option("-DN=" + support::cpp11::to_string(gemm_info.n));
- build_opts.add_option("-DK=" + support::cpp11::to_string(gemm_info.k));
build_opts.add_option("-DM0=" + support::cpp11::to_string(internal_m0));
build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
build_opts.add_option("-DK0=" + support::cpp11::to_string(rhs_info.k0));
@@ -286,6 +285,9 @@ void ClGemmMatrixMultiplyReshapedOnlyRhsKernel::configure(const CLCompileContext
kernel_name += rhs_info.export_to_cl_image ? "_texture" : "";
post_op_utils.set_post_ops_cl_kernel_name(kernel_name, gemm_info.post_ops);
+ // A macro guard to compile ONLY the kernel of interest
+ build_opts.add_option("-D" + upper_string(kernel_name));
+
// Create kernel
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
@@ -447,6 +449,11 @@ void ClGemmMatrixMultiplyReshapedOnlyRhsKernel::run_op(ITensorPack &tensors, con
_kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(total_cross_plane_pad_out));
}
+ // Pass m, n and k at runtime as signed ints, to ensure results of any subractions they could be operand in, would still be signed.
+ _kernel.setArg<cl_int>(idx++, _m);
+ _kernel.setArg<cl_int>(idx++, _n);
+ _kernel.setArg<cl_int>(idx++, _k);
+
enqueue(queue, *this, slice, lws_hint(), _use_dummy_work_items);
}
while(window.slide_window_slice_3D(slice));
diff --git a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.h b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.h
index a8f0c4c3a..00cdb299c 100644
--- a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.h
+++ b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -97,6 +97,9 @@ private:
bool _add_bias{ false };
bool _export_to_cl_image{ false };
bool _has_pad_y{ false };
+ signed int _m{ 1 };
+ signed int _n{ 1 };
+ signed int _k{ 1 };
unsigned int _num_post_op_args{ 0 }; // (EXPERIMENTAL_POST_OPS) total number of post op arguments
};
} // namespace kernels
diff --git a/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp b/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp
index 4a01c77d0..413c70ae1 100644
--- a/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp
+++ b/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp
@@ -55,6 +55,7 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const
ARM_COMPUTE_RETURN_ERROR_ON_MSG(((lhs_info.k0 & (lhs_info.k0 - 1)) && lhs_info.k0 != 3), "Only 2,3,4,8,16 are supported for k0");
ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 > 16);
ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 2 || lhs_info.m0 > 8);
+ ARM_COMPUTE_RETURN_ERROR_ON((lhs_info.m0 > 4 && lhs_info.m0 < 8) && lhs_info.transpose);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
@@ -70,11 +71,10 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src, ITensorInfo *dst, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
+Window configure_window(ITensorInfo *src, ITensorInfo *dst, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
{
const unsigned int num_elems_processed_per_iteration_x = lhs_info.k0;
const unsigned int num_elems_processed_per_iteration_y = lhs_info.m0;
- bool window_changed = false;
TensorInfo tmp_info(*src);
@@ -91,23 +91,13 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src, ITenso
auto_init_if_empty(*dst, src->clone()->set_tensor_shape(misc::shape_calculator::compute_lhs_reshaped_shape(*src, lhs_info, reinterpret_input_as_3d)));
// Configure window
- Window win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- Window win_in = calculate_max_window(*src, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- AccessWindowStatic src_access(src, 0, 0,
- src->dimension(0),
- src->dimension(1));
- AccessWindowStatic dst_access(dst, 0, 0, dst->dimension(0), dst->dimension(1));
-
- window_changed = update_window_and_padding(win_in, src_access) || // window used by the execute_window_loop
- update_window_and_padding(win, dst_access); // window used to update the padding requirements of dst tensor
+ Window win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
// Collapse along the Z direction
// This collapse needs to be here in order to tune the Z dimension of LWS
Window collapsed = win.collapse(win, Window::DimZ);
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
+ return collapsed;
}
} // namespace
@@ -125,27 +115,20 @@ void ClGemmReshapeLhsMatrixKernel::configure(const CLCompileContext &compile_con
auto padding_info = get_padding_info({ src });
- _reinterpret_input_as_3d = reinterpret_input_as_3d;
-
- const unsigned int src_w = src->dimension(0);
- const unsigned int src_h = _reinterpret_input_as_3d ? src->dimension(1) * src->dimension(2) : src->dimension(1);
- const unsigned int partial_load_m0 = src_h % lhs_info.m0;
- const unsigned int partial_load_k0 = src_w % lhs_info.k0;
+ const unsigned int src_w = src->dimension(0);
+ const unsigned int m = reinterpret_input_as_3d ? src->dimension(1) * src->dimension(2) : src->dimension(1);
+ const unsigned int partial_m0 = m % lhs_info.m0;
+ const unsigned int partial_k0 = src_w % lhs_info.k0;
// Create build options
CLBuildOptions build_opts;
build_opts.add_option("-DM0=" + support::cpp11::to_string(lhs_info.m0));
build_opts.add_option("-DK0=" + support::cpp11::to_string(lhs_info.k0));
- build_opts.add_option("-DV0=" + support::cpp11::to_string(lhs_info.v0));
- build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src_w));
- build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src_h));
build_opts.add_option_if(lhs_info.interleave, "-DINTERLEAVE");
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(src->dimension(1)));
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(src->dimension(2)));
+ build_opts.add_option_if_else(lhs_info.transpose, "-DRESHAPE_LHS_T", "-DRESHAPE_LHS_NT");
build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(src->element_size()));
- build_opts.add_option("-DPARTIAL_LOAD_M0=" + support::cpp11::to_string(partial_load_m0));
- build_opts.add_option("-DPARTIAL_LOAD_K0=" + support::cpp11::to_string(partial_load_k0));
+ build_opts.add_option("-DPARTIAL_M0=" + support::cpp11::to_string(partial_m0));
+ build_opts.add_option("-DPARTIAL_K0=" + support::cpp11::to_string(partial_k0));
std::string kernel_name("gemm_reshape_lhs_matrix_");
kernel_name += lhs_info.transpose ? "t" : "nt";
@@ -154,13 +137,16 @@ void ClGemmReshapeLhsMatrixKernel::configure(const CLCompileContext &compile_con
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
- auto win_config = validate_and_configure_window(src, dst, lhs_info, reinterpret_input_as_3d);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
+ auto win_config = configure_window(src, dst, lhs_info, reinterpret_input_as_3d);
+ ICLKernel::configure_internal(win_config);
+
+ unsigned int idx = 2 * num_arguments_per_3d_tensor_nhw();
+ _kernel.setArg<cl_int>(idx++, m);
+ _kernel.setArg<cl_int>(idx++, lhs_info.v0);
// Set config_id for enabling LWS tuning
_config_id = "gemm_reshape_lhs_matrix_";
- _config_id += (_reinterpret_input_as_3d ? "3d_" : "");
+ _config_id += (reinterpret_input_as_3d ? "3d_" : "");
_config_id += lower_string(string_from_data_type(src->data_type()));
_config_id += "_";
_config_id += support::cpp11::to_string(dst->dimension(0));
@@ -185,8 +171,6 @@ void ClGemmReshapeLhsMatrixKernel::configure(const CLCompileContext &compile_con
Status ClGemmReshapeLhsMatrixKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, lhs_info, reinterpret_input_as_3d));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), dst->clone().get(), lhs_info, reinterpret_input_as_3d).first);
-
return Status{};
}
@@ -202,19 +186,11 @@ void ClGemmReshapeLhsMatrixKernel::run_op(ITensorPack &tensors, const Window &wi
Window slice = window.first_slice_window_3D();
- if(_reinterpret_input_as_3d)
- {
- // Pass bottom paddings to the kernel if the src has to be reinterpreted as 3D tensor
- const unsigned int idx0 = 2 * num_arguments_per_3D_tensor();
- const unsigned int total_cross_plane_pad = src->info()->padding().top + src->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
do
{
unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, dst, slice);
+ add_3d_tensor_nhw_argument(idx, src);
+ add_3d_tensor_nhw_argument(idx, dst);
enqueue(queue, *this, slice, lws_hint());
}
while(window.slide_window_slice_3D(slice));
diff --git a/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h b/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h
index 69ec8f04f..db88e0d73 100644
--- a/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h
+++ b/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h
@@ -68,9 +68,6 @@ public:
// Inherited methods overridden:
void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-
-private:
- bool _reinterpret_input_as_3d{ false };
};
} // namespace kernels
} // namespace opencl
diff --git a/src/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.cpp b/src/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.cpp
index 778b9b9fa..b3a03880e 100644
--- a/src/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.cpp
+++ b/src/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.cpp
@@ -123,10 +123,9 @@ void ClGemmReshapeRhsMatrixKernel::configure(const CLCompileContext &compile_con
CLBuildOptions build_opts;
build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0));
build_opts.add_option("-DK0=" + support::cpp11::to_string(rhs_info.k0));
- build_opts.add_option("-DH0=" + support::cpp11::to_string(rhs_info.h0));
- build_opts.add_option_if(rhs_info.transpose, "-DTRANSPOSE");
build_opts.add_option_if(rhs_info.interleave, "-DINTERLEAVE");
- build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src->dimension(1)));
+ build_opts.add_option_if(rhs_info.transpose, "-DRESHAPE_RHS_T");
+ build_opts.add_option_if(!rhs_info.transpose, "-DRESHAPE_RHS_NT");
build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(src->element_size()));
std::string kernel_name("gemm_reshape_rhs_matrix_");
@@ -139,6 +138,9 @@ void ClGemmReshapeRhsMatrixKernel::configure(const CLCompileContext &compile_con
auto win_config = validate_and_configure_window(src, dst, rhs_info);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
+
+ unsigned int idx = 2 * num_arguments_per_3d_tensor_nhw();
+ _kernel.setArg<cl_int>(idx++, rhs_info.h0);
}
Status ClGemmReshapeRhsMatrixKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const GEMMRHSMatrixInfo &rhs_info)
@@ -164,8 +166,8 @@ void ClGemmReshapeRhsMatrixKernel::run_op(ITensorPack &tensors, const Window &wi
do
{
unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, dst, slice);
+ add_3d_tensor_nhw_argument(idx, src);
+ add_3d_tensor_nhw_argument(idx, dst);
enqueue(queue, *this, slice, lws_hint());
}
while(window.slide_window_slice_3D(slice));
diff --git a/src/gpu/cl/kernels/ClIm2ColKernel.cpp b/src/gpu/cl/kernels/ClIm2ColKernel.cpp
index c42762b99..6d1271d24 100644
--- a/src/gpu/cl/kernels/ClIm2ColKernel.cpp
+++ b/src/gpu/cl/kernels/ClIm2ColKernel.cpp
@@ -195,10 +195,16 @@ Im2ColConfiguration configure_opencl_kernel(const ITensorInfo *src, const Size2D
if(kernel_dims == Size2D(3U, 3U))
{
kernel_name = "im2col3x3_";
+ build_opts.add_option("-DIM2COL_3X3");
}
else if(kernel_dims == Size2D(9U, 9U))
{
kernel_name = "im2col9x9_";
+ build_opts.add_option("-DIM2COL_9X9");
+ }
+ else
+ {
+ build_opts.add_option("-DIM2COL_GENERIC");
}
// Get boundary vector (the first/last vector with potentially a partial vector size) size
diff --git a/src/gpu/cl/kernels/ClPool2dKernel.cpp b/src/gpu/cl/kernels/ClPool2dKernel.cpp
index 5e53799f3..2c98c5940 100644
--- a/src/gpu/cl/kernels/ClPool2dKernel.cpp
+++ b/src/gpu/cl/kernels/ClPool2dKernel.cpp
@@ -57,6 +57,9 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const
unsigned int pool_size_y = is_global_pooling ? src->dimension(idx_height) : pool_info.pool_size.height;
int output_width = 0;
int output_height = 0;
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_pool_region_entirely_outside_input(pool_info), "Pooling region that is entirely outside input tensor is unsupported");
+
std::tie(output_width, output_height) = scaled_dimensions_signed(src->tensor_shape()[idx_width], src->tensor_shape()[idx_height],
pool_size_x, pool_size_y, pool_info.pad_stride_info);
ARM_COMPUTE_RETURN_ERROR_ON_MSG((output_width < 1 || output_height < 1), "Calculated output dimension size is invalid");
diff --git a/src/gpu/cl/kernels/ClScaleKernel.cpp b/src/gpu/cl/kernels/ClScaleKernel.cpp
index d63c0e175..6f16adc65 100644
--- a/src/gpu/cl/kernels/ClScaleKernel.cpp
+++ b/src/gpu/cl/kernels/ClScaleKernel.cpp
@@ -117,9 +117,7 @@ void ClScaleKernel::configure(const CLCompileContext &compile_context, ITensorIn
const int idx_channel = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
const unsigned int src_width = src->dimension(idx_width);
const unsigned int src_height = src->dimension(idx_height);
- const unsigned int src_channel = src->dimension(idx_channel);
const unsigned int dst_width = dst->dimension(idx_width);
- const unsigned int dst_height = dst->dimension(idx_height);
const unsigned int dst_channels = dst->dimension(idx_channel);
unsigned int vec_size = 0;
unsigned int vec_size_leftover = 0;
@@ -130,20 +128,13 @@ void ClScaleKernel::configure(const CLCompileContext &compile_context, ITensorIn
vec_size = adjust_vec_size(src->data_type() == DataType::F32 ? 4 : 8, dst_channels);
vec_size_leftover = dst_channels % vec_size;
build_opts.add_option("-DSRC_TENSOR_TYPE=BUFFER");
- build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src_width));
- build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src_height));
- build_opts.add_option("-DSRC_CHANNELS=" + support::cpp11::to_string(src_channel));
build_opts.add_option("-DSRC_DATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
build_opts.add_option("-DDST_TENSOR_TYPE=BUFFER");
- build_opts.add_option("-DDST_WIDTH=" + support::cpp11::to_string(dst_width));
- build_opts.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(dst_height));
- build_opts.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(dst_channels));
build_opts.add_option("-DDST_DATA_TYPE=" + get_cl_type_from_data_type(dst->data_type()));
build_opts.add_option("-DCONSTANT_VALUE=" + string_from_pixel_value(info.constant_border_value, src->data_type()));
- build_opts.add_option("-DSCALE_X=" + float_to_string_with_full_precision(scale_x));
- build_opts.add_option("-DSCALE_Y=" + float_to_string_with_full_precision(scale_y));
build_opts.add_option("-DN0=" + support::cpp11::to_string(vec_size));
build_opts.add_option("-DPARTIAL_N0=" + support::cpp11::to_string(vec_size_leftover));
+ build_opts.add_option("-DSCALE_" + string_from_interpolation_policy(interpolation_policy_to_use));
build_opts.add_option_if(src->num_dimensions() > 3, "-DBATCHED_EXECUTION");
build_opts.add_option_if(info.border_mode == BorderMode::REPLICATE, "-DBORDER_MODE_REPLICATE");
build_opts.add_option_if(info.border_mode == BorderMode::CONSTANT, "-DBORDER_MODE_CONSTANT");
@@ -203,6 +194,13 @@ void ClScaleKernel::configure(const CLCompileContext &compile_context, ITensorIn
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
+ // Pass scale kernel arguments
+ if(is_nhwc)
+ {
+ unsigned int idx = 2 * num_arguments_per_4d_tensor_nhwc();
+ _kernel.setArg<cl_float>(idx++, scale_x);
+ _kernel.setArg<cl_float>(idx++, scale_y);
+ }
// Set config_id for enabling LWS tuning
_config_id = "scale_";
_config_id += (info.border_mode == BorderMode::REPLICATE ? "Bord_rep" : "");
@@ -248,8 +246,8 @@ void ClScaleKernel::run_op(ITensorPack &tensors, const Window &window, cl::Comma
Window slice = collapsed.first_slice_window_4D();
unsigned int idx = 0;
- add_4D_tensor_argument(idx, src, slice);
- add_4D_tensor_argument(idx, dst, slice);
+ add_4d_tensor_nhwc_argument(idx, src);
+ add_4d_tensor_nhwc_argument(idx, dst);
enqueue(queue, *this, slice, lws_hint());
break;
}
diff --git a/src/gpu/cl/operators/ClConv2d.cpp b/src/gpu/cl/operators/ClConv2d.cpp
index d633c8f73..23c1b8af9 100644
--- a/src/gpu/cl/operators/ClConv2d.cpp
+++ b/src/gpu/cl/operators/ClConv2d.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -255,19 +255,39 @@ ConvolutionMethod ClConv2d::get_convolution_method(const ITensorInfo *src, const
// Floating-point case: GeMM/Direct/Winograd
if(is_data_type_float(src->data_type()))
{
- const bool is_large_kernel_sz = (weights->dimension(idx_w) >= kernel_sz_direct_conv_thr) && (weights->dimension(idx_h) >= kernel_sz_direct_conv_thr);
- const bool is_ifm_ge_16 = src->dimension(idx_c) >= 16;
- const bool is_ifm_gt_ofm = src->dimension(idx_c) > weights->dimension(3U);
+ // Get dst shape
+ TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info);
+ const bool is_large_kernel_sz = (weights->dimension(idx_w) >= kernel_sz_direct_conv_thr) && (weights->dimension(idx_h) >= kernel_sz_direct_conv_thr);
+ const bool is_ifm_ge_16 = src->dimension(idx_c) >= 16;
+ const bool is_ofm_lte_8 = weights->dimension(3U) <= 8;
+ const bool workload_gte_8192 = (output_shape[0] * output_shape[1] * output_shape[2]) / 16 >= 8192;
+ const bool is_ifm_gt_ofm = src->dimension(idx_c) > weights->dimension(3U);
// Run Winograd if valid and IFM >= 16
if(is_wino_valid && is_ifm_ge_16)
{
return ConvolutionMethod::WINOGRAD;
}
- // Run Direct for Large kernel size
- if(is_large_kernel_sz && is_ifm_ge_16 && is_direct_valid && is_ifm_gt_ofm)
+
+ // Direct convolution case
+ if(is_direct_valid)
{
- return ConvolutionMethod::DIRECT;
+ if((gpu_target == arm_compute::GPUTarget::G71 ||
+ gpu_target == arm_compute::GPUTarget::G72 ||
+ gpu_target == arm_compute::GPUTarget::MIDGARD))
+ {
+ if(is_large_kernel_sz && is_ifm_ge_16 && is_ifm_gt_ofm)
+ {
+ return ConvolutionMethod::DIRECT;
+ }
+ }
+ else
+ {
+ if((is_large_kernel_sz && workload_gte_8192 && is_ifm_ge_16) || (is_ofm_lte_8 && is_ifm_ge_16))
+ {
+ return ConvolutionMethod::DIRECT;
+ }
+ }
}
// Default case
diff --git a/src/gpu/cl/operators/ClDirectConv2d.cpp b/src/gpu/cl/operators/ClDirectConv2d.cpp
index d2e4049a0..53de6fc40 100644
--- a/src/gpu/cl/operators/ClDirectConv2d.cpp
+++ b/src/gpu/cl/operators/ClDirectConv2d.cpp
@@ -83,7 +83,7 @@ void ClDirectConv2d::configure(const CLCompileContext &compile_context, ITensorI
Status ClDirectConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
{
- ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClDirectConv2dKernel::validate(src, weights, biases, dst, conv_info, ActivationLayerInfo(), CLScheduler::get().target()));
+ ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClDirectConv2dKernel::validate(src, weights, biases, dst, conv_info, ActivationLayerInfo()));
if(act_info.enabled())
{
ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClActivationKernel::validate(dst, dst, act_info));
diff --git a/src/gpu/cl/operators/ClGemm.cpp b/src/gpu/cl/operators/ClGemm.cpp
index 50ecb214e..88f6b79b5 100644
--- a/src/gpu/cl/operators/ClGemm.cpp
+++ b/src/gpu/cl/operators/ClGemm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -191,7 +191,6 @@ ClGemm::ClGemm()
_mm_native_kernel(std::make_unique<ClGemmMatrixMultiplyNativeKernel>()),
_mm_reshaped_kernel(std::make_unique<ClGemmMatrixMultiplyReshapedKernel>()),
_mm_reshaped_only_rhs_kernel(std::make_unique<ClGemmMatrixMultiplyReshapedOnlyRhsKernel>()),
- _mm_reshaped_only_rhs_fallback_kernel(std::make_unique<ClGemmMatrixMultiplyReshapedOnlyRhsKernel>()),
_tmp_a(),
_tmp_b(),
_reshape_b_only_on_first_run(false),
@@ -303,7 +302,6 @@ void ClGemm::configure_reshaped_only_rhs(const CLCompileContext &compile_context
// Set the target for the kernels
_mm_reshaped_only_rhs_kernel->set_target(gpu_target);
- _mm_reshaped_only_rhs_fallback_kernel->set_target(gpu_target);
GEMMLHSMatrixInfo lhs_info{};
GEMMRHSMatrixInfo rhs_info{};
@@ -322,10 +320,6 @@ void ClGemm::configure_reshaped_only_rhs(const CLCompileContext &compile_context
kernel_info.has_pad_y = false;
_mm_reshaped_only_rhs_kernel->configure(compile_context, a, &_tmp_b, c, output, alpha, beta, lhs_info, rhs_info, kernel_info);
- // Configure matrix multiply kernel with y padding support
- kernel_info.has_pad_y = true;
- _mm_reshaped_only_rhs_fallback_kernel->configure(compile_context, a, &_tmp_b, c, output, alpha, beta, lhs_info, rhs_info, kernel_info);
-
// Request memory for RHS reshape matrix
_aux_mem[RhsReshape] = MemoryInfo(offset_int_vec(RhsReshape), _reshape_b_only_on_first_run ? MemoryLifetime::Persistent : MemoryLifetime::Temporary, _tmp_b.total_size());
}
@@ -625,7 +619,7 @@ void ClGemm::run(ITensorPack &tensors)
if(has_pad_y)
{
- CLScheduler::get().enqueue_op(*_mm_reshaped_only_rhs_fallback_kernel, gemm_reshaped_onlyrhs_pack, true);
+ ARM_COMPUTE_ERROR_ON(has_pad_y);
}
else
{
diff --git a/src/gpu/cl/operators/ClGemm.h b/src/gpu/cl/operators/ClGemm.h
index e084e53fe..3c0cad3ca 100644
--- a/src/gpu/cl/operators/ClGemm.h
+++ b/src/gpu/cl/operators/ClGemm.h
@@ -121,7 +121,6 @@ private:
std::unique_ptr<kernels::ClGemmMatrixMultiplyNativeKernel> _mm_native_kernel;
std::unique_ptr<kernels::ClGemmMatrixMultiplyReshapedKernel> _mm_reshaped_kernel;
std::unique_ptr<kernels::ClGemmMatrixMultiplyReshapedOnlyRhsKernel> _mm_reshaped_only_rhs_kernel;
- std::unique_ptr<kernels::ClGemmMatrixMultiplyReshapedOnlyRhsKernel> _mm_reshaped_only_rhs_fallback_kernel;
TensorInfo _tmp_a;
TensorInfo _tmp_b;
bool _reshape_b_only_on_first_run;