aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenoit Jacob <benoitjacob@google.com>2019-08-20 10:25:05 -0400
committerBenoit Jacob <benoitjacob@google.com>2020-03-10 16:36:41 -0400
commitfa69a4bbdf3b676156668842b5d2e042cd4cd1f7 (patch)
tree2f19d08f4e70d5ab400aa64d0eaf341fa299dc01
parent9a8ac17ea97b04776c6c0ab9f90ee4f9c3636afe (diff)
downloadruy-fa69a4bbdf3b676156668842b5d2e042cd4cd1f7.tar.gz
Some more fixes to arm32 asm:
- Use vld1.8 not vld1.32 to load 8bit values. Especially in packing code, the source pointers are not guaranteed so have any alignment. In kernels, they are more or less guaranteed to be, but .8 is more idiomatic. If we ever notice a performance benefit of .32 (news to me) justifying this choice, we could then use .32 in kernels only and with a comment recording the performance rationale. - One vld1 was passing a single d-register without enclosing it in {} to make it a register-list. - Pack8bitNeonOutOfOrder{LHS,RHS} renamed to Pack8bitNeonOutOfOrder{4Cols,2Cols} because that's more descriptive of the actual difference between these functions. PiperOrigin-RevId: 264378751
-rw-r--r--kernel_arm32.cc36
-rw-r--r--pack_arm.cc16
-rw-r--r--pack_arm.h8
3 files changed, 30 insertions, 30 deletions
diff --git a/kernel_arm32.cc b/kernel_arm32.cc
index 23e373d..e07e87e 100644
--- a/kernel_arm32.cc
+++ b/kernel_arm32.cc
@@ -633,12 +633,12 @@ void Kernel8bitNeonOutOfOrder(const KernelParams8bit<4, 2>& params) {
// clang-format off
// Load the first 64 bytes of LHS and RHS data.
- "vld1.32 {d0, d1}, [%[lhs_ptr]]!\n"
- "vld1.32 {d2, d3}, [%[lhs_ptr]]!\n"
- "vld1.32 {d4, d5}, [%[lhs_ptr]]!\n"
- "vld1.32 {d6, d7}, [%[lhs_ptr]]!\n"
- "vld1.32 {d8, d9}, [%[rhs_ptr]]!\n"
- "vld1.32 {d10, d11}, [%[rhs_ptr]]!\n"
+ "vld1.8 {d0, d1}, [%[lhs_ptr]]!\n"
+ "vld1.8 {d2, d3}, [%[lhs_ptr]]!\n"
+ "vld1.8 {d4, d5}, [%[lhs_ptr]]!\n"
+ "vld1.8 {d6, d7}, [%[lhs_ptr]]!\n"
+ "vld1.8 {d8, d9}, [%[rhs_ptr]]!\n"
+ "vld1.8 {d10, d11}, [%[rhs_ptr]]!\n"
"sub sp, sp, #" RUY_STR(RUY_STACK_OFFSET_SIZE) "\n"
@@ -729,13 +729,13 @@ void Kernel8bitNeonOutOfOrder(const KernelParams8bit<4, 2>& params) {
"vpadal.s16 q13, q15\n"
// Load the next 64 bytes of LHS and RHS data.
- "vld1.32 {d0, d1}, [%[lhs_ptr]]!\n"
- "vld1.32 {d2, d3}, [%[lhs_ptr]]!\n"
- "vld1.32 {d4, d5}, [%[lhs_ptr]]!\n"
- "vld1.32 {d6, d7}, [%[lhs_ptr]]!\n"
+ "vld1.8 {d0, d1}, [%[lhs_ptr]]!\n"
+ "vld1.8 {d2, d3}, [%[lhs_ptr]]!\n"
+ "vld1.8 {d4, d5}, [%[lhs_ptr]]!\n"
+ "vld1.8 {d6, d7}, [%[lhs_ptr]]!\n"
RUY_PREFETCH("pld [%[lhs_ptr]]\n")
- "vld1.32 {d8, d9}, [%[rhs_ptr]]!\n"
- "vld1.32 {d10, d11}, [%[rhs_ptr]]!\n"
+ "vld1.8 {d8, d9}, [%[rhs_ptr]]!\n"
+ "vld1.8 {d10, d11}, [%[rhs_ptr]]!\n"
RUY_PREFETCH("pld [%[rhs_ptr]]\n")
// Each iteration of this loop advances by 16 levels of depth.
@@ -897,12 +897,12 @@ void Kernel8bitNeonOutOfOrder(const KernelParams8bit<4, 2>& params) {
// main loop will need to load, we start loading the first 32 bytes of
// each of LHS and RHS, into v0 -- v3, as we don't need v0 -- v3 anymore
// in the rest of the work on the current block.
- "vld1.32 {d0, d1}, [%[lhs_ptr]]!\n"
- "vld1.32 {d2, d3}, [%[lhs_ptr]]!\n"
- "vld1.32 {d4, d5}, [%[lhs_ptr]]!\n"
- "vld1.32 {d6, d7}, [%[lhs_ptr]]!\n"
+ "vld1.8 {d0, d1}, [%[lhs_ptr]]!\n"
+ "vld1.8 {d2, d3}, [%[lhs_ptr]]!\n"
+ "vld1.8 {d4, d5}, [%[lhs_ptr]]!\n"
+ "vld1.8 {d6, d7}, [%[lhs_ptr]]!\n"
RUY_PREFETCH("pld [%[lhs_ptr]]\n")
- "vld1.32 {d8, d9, d10, d11}, [%[rhs_ptr]]!\n"
+ "vld1.8 {d8, d9, d10, d11}, [%[rhs_ptr]]!\n"
RUY_PREFETCH("pld [%[rhs_ptr]]\n")
// Add to the bias values the product
@@ -927,7 +927,7 @@ void Kernel8bitNeonOutOfOrder(const KernelParams8bit<4, 2>& params) {
"ldr r4, [sp, #" RUY_STR(RUY_STACK_OFFSET_COL) "]\n"
// Offset by current col * number of bytes per value
"add r3, r3, r4, lsl #2\n"
- "vld1.32 d12, [r3]\n"
+ "vld1.32 { d12 }, [r3]\n"
"ldr r5, [%[params], #" RUY_STR(RUY_OFFSET_LHS_ZERO_POINT) "]\n"
"vdup.32 q10, r5\n" // create lhs_zero_point_vec
// Subtract rhs_sums * lhs_zero_point, per
diff --git a/pack_arm.cc b/pack_arm.cc
index a4ea2ab..1cd452e 100644
--- a/pack_arm.cc
+++ b/pack_arm.cc
@@ -223,7 +223,7 @@ void CheckOffsetsInPackParams8bit(const Params&) {
// Packing code for out-of-order ARMv7 CPUs like the Krait 400 or A9.
// No attempt made at making this code efficient on in-order cores yet.
-void Pack8bitNeonOutOfOrderLHS(const PackParams8bit& params) {
+void Pack8bitNeonOutOfOrder4Cols(const PackParams8bit& params) {
CheckOffsetsInPackParams8bit(params);
gemmlowp::ScopedProfilingLabel label(
"Pack (kNeon, optimized for out-of-order cores)");
@@ -258,11 +258,11 @@ void Pack8bitNeonOutOfOrderLHS(const PackParams8bit& params) {
"1:\n"
"add r1, r1, #16\n"
/* Load q0 */
- "vld1.32 {d0, d1}, [%[src_ptr0]]\n"
+ "vld1.8 {d0, d1}, [%[src_ptr0]]\n"
"add %[src_ptr0], %[src_ptr0], %[src_inc0]\n"
/* Load q1 */
- "vld1.32 {d2, d3}, [%[src_ptr1]]\n"
+ "vld1.8 {d2, d3}, [%[src_ptr1]]\n"
"add %[src_ptr1], %[src_ptr1], %[src_inc1]\n"
"veor.8 q4, q0, q11\n"
@@ -281,10 +281,10 @@ void Pack8bitNeonOutOfOrderLHS(const PackParams8bit& params) {
"vpadal.s16 q13, q9\n"
// Now do the same for src_ptr2 and src_ptr3.
- "vld1.32 {d0, d1}, [%[src_ptr2]]\n"
+ "vld1.8 {d0, d1}, [%[src_ptr2]]\n"
"add %[src_ptr2], %[src_ptr2], %[src_inc2]\n"
- "vld1.32 {d2, d3}, [%[src_ptr3]]\n"
+ "vld1.8 {d2, d3}, [%[src_ptr3]]\n"
"add %[src_ptr3], %[src_ptr3], %[src_inc3]\n"
"veor.8 q4, q0, q11\n"
@@ -445,7 +445,7 @@ void Pack8bitNeonOutOfOrderLHS(const PackParams8bit& params) {
// No attempt made at making this code efficient on in-order cores yet.
// This version differs from the above in that we only handle two columns
// at a time.
-void Pack8bitNeonOutOfOrderRHS(const PackParams8bit& params) {
+void Pack8bitNeonOutOfOrder2Cols(const PackParams8bit& params) {
CheckOffsetsInPackParams8bit(params);
gemmlowp::ScopedProfilingLabel label(
"Pack (kNeon, optimized for out-of-order cores)");
@@ -474,11 +474,11 @@ void Pack8bitNeonOutOfOrderRHS(const PackParams8bit& params) {
"1:\n"
"add r1, r1, #16\n"
/* Load q0 */
- "vld1.32 {d0, d1}, [%[src_ptr0]]\n"
+ "vld1.8 {d0, d1}, [%[src_ptr0]]\n"
"add %[src_ptr0], %[src_ptr0], %[src_inc0]\n"
/* Load q1 */
- "vld1.32 {d2, d3}, [%[src_ptr1]]\n"
+ "vld1.8 {d2, d3}, [%[src_ptr1]]\n"
"add %[src_ptr1], %[src_ptr1], %[src_inc1]\n"
"veor.8 q4, q0, q11\n"
diff --git a/pack_arm.h b/pack_arm.h
index 60aec73..b7532f2 100644
--- a/pack_arm.h
+++ b/pack_arm.h
@@ -128,8 +128,8 @@ void Pack8bitNeonDotprodInOrder(const void* src_ptr0, const void* src_ptr1,
int input_xor);
#elif RUY_PLATFORM(NEON_32) && RUY_OPT_ENABLED(RUY_OPT_ASM)
-void Pack8bitNeonOutOfOrderLHS(const PackParams8bit& params);
-void Pack8bitNeonOutOfOrderRHS(const PackParams8bit& params);
+void Pack8bitNeonOutOfOrder4Cols(const PackParams8bit& params);
+void Pack8bitNeonOutOfOrder2Cols(const PackParams8bit& params);
#endif // (RUY_PLATFORM(NEON_64)&& RUY_OPT_ENABLED(RUY_OPT_ASM)
#if (RUY_PLATFORM(NEON_32) || RUY_PLATFORM(NEON_64)) && \
@@ -205,7 +205,7 @@ struct PackImpl<Path::kNeon, FixedKernelLayout<Order::kColMajor, 16, 4>, Scalar,
packed_ptr, src_inc0, src_inc1, src_inc2, src_inc3,
src_matrix.layout.rows, src_matrix.zero_point,
kInputXor, &params);
- Pack8bitNeonOutOfOrderLHS(params);
+ Pack8bitNeonOutOfOrder4Cols(params);
#endif // RUY_PLATFORM(NEON_64)
}
}
@@ -259,7 +259,7 @@ struct PackImpl<Path::kNeon, FixedKernelLayout<Order::kColMajor, 16, 2>, Scalar,
packed_ptr, src_inc0, src_inc1, -1, -1,
src_matrix.layout.rows, src_matrix.zero_point,
kInputXor, &params);
- Pack8bitNeonOutOfOrderRHS(params);
+ Pack8bitNeonOutOfOrder2Cols(params);
}
}
};