aboutsummaryrefslogtreecommitdiff
path: root/vpx_dsp/arm/mem_neon.h
diff options
context:
space:
mode:
Diffstat (limited to 'vpx_dsp/arm/mem_neon.h')
-rw-r--r--vpx_dsp/arm/mem_neon.h102
1 files changed, 102 insertions, 0 deletions
diff --git a/vpx_dsp/arm/mem_neon.h b/vpx_dsp/arm/mem_neon.h
index 19cfc7c7f..586bfb85a 100644
--- a/vpx_dsp/arm/mem_neon.h
+++ b/vpx_dsp/arm/mem_neon.h
@@ -102,6 +102,16 @@ static INLINE void store_s16q_to_tran_low(tran_low_t *buf, const int16x8_t a) {
#endif
}
+#if CONFIG_VP9_HIGHBITDEPTH
+static INLINE void store_s32q_to_tran_low(tran_low_t *buf, const int32x4_t a) {
+ vst1q_s32(buf, a);
+}
+
+static INLINE int32x4_t load_tran_low_to_s32q(const tran_low_t *buf) {
+ return vld1q_s32(buf);
+}
+#endif
+
// Propagate type information to the compiler. Without this the compiler may
// assume the required alignment of uint32_t (4 bytes) and add alignment hints
// to the memory access.
@@ -112,6 +122,34 @@ static INLINE void uint32_to_mem(uint8_t *buf, uint32_t a) {
memcpy(buf, &a, 4);
}
+// Load 4 contiguous bytes when alignment is not guaranteed.
+static INLINE uint8x8_t load_unaligned_u8_4x1(const uint8_t *buf) {
+ uint32_t a;
+ uint32x2_t a_u32;
+ memcpy(&a, buf, 4);
+ a_u32 = vdup_n_u32(0);
+ a_u32 = vset_lane_u32(a, a_u32, 0);
+ return vreinterpret_u8_u32(a_u32);
+}
+
+// Load 4 contiguous bytes and replicate across a vector when alignment is not
+// guaranteed.
+static INLINE uint8x8_t load_replicate_u8_4x1(const uint8_t *buf) {
+ uint32_t a;
+ memcpy(&a, buf, 4);
+ return vreinterpret_u8_u32(vdup_n_u32(a));
+}
+
+// Store 4 contiguous bytes from the low half of an 8x8 vector.
+static INLINE void store_u8_4x1(uint8_t *buf, uint8x8_t a) {
+ vst1_lane_u32((uint32_t *)buf, vreinterpret_u32_u8(a), 0);
+}
+
+// Store 4 contiguous bytes from the high half of an 8x8 vector.
+static INLINE void store_u8_4x1_high(uint8_t *buf, uint8x8_t a) {
+ vst1_lane_u32((uint32_t *)buf, vreinterpret_u32_u8(a), 1);
+}
+
// Load 2 sets of 4 bytes when alignment is not guaranteed.
static INLINE uint8x8_t load_unaligned_u8(const uint8_t *buf,
ptrdiff_t stride) {
@@ -126,6 +164,29 @@ static INLINE uint8x8_t load_unaligned_u8(const uint8_t *buf,
return vreinterpret_u8_u32(a_u32);
}
+// Load 8 bytes when alignment is not guaranteed.
+static INLINE uint16x4_t load_unaligned_u16(const uint16_t *buf) {
+ uint64_t a;
+ uint64x1_t a_u64 = vdup_n_u64(0);
+ memcpy(&a, buf, 8);
+ a_u64 = vset_lane_u64(a, a_u64, 0);
+ return vreinterpret_u16_u64(a_u64);
+}
+
+// Load 2 sets of 8 bytes when alignment is not guaranteed.
+static INLINE uint16x8_t load_unaligned_u16q(const uint16_t *buf,
+ ptrdiff_t stride) {
+ uint64_t a;
+ uint64x2_t a_u64;
+ if (stride == 4) return vld1q_u16(buf);
+ memcpy(&a, buf, 8);
+ buf += stride;
+ a_u64 = vdupq_n_u64(a);
+ memcpy(&a, buf, 8);
+ a_u64 = vsetq_lane_u64(a, a_u64, 1);
+ return vreinterpretq_u16_u64(a_u64);
+}
+
// Store 2 sets of 4 bytes when alignment is not guaranteed.
static INLINE void store_unaligned_u8(uint8_t *buf, ptrdiff_t stride,
const uint8x8_t a) {
@@ -202,6 +263,16 @@ static INLINE void store_u8(uint8_t *buf, ptrdiff_t stride, const uint8x8_t a) {
vst1_lane_u32((uint32_t *)buf, a_u32, 1);
}
+static INLINE void store_u8_8x3(uint8_t *s, const ptrdiff_t p,
+ const uint8x8_t s0, const uint8x8_t s1,
+ const uint8x8_t s2) {
+ vst1_u8(s, s0);
+ s += p;
+ vst1_u8(s, s1);
+ s += p;
+ vst1_u8(s, s2);
+}
+
static INLINE void load_u8_8x4(const uint8_t *s, const ptrdiff_t p,
uint8x8_t *const s0, uint8x8_t *const s1,
uint8x8_t *const s2, uint8x8_t *const s3) {
@@ -226,6 +297,16 @@ static INLINE void store_u8_8x4(uint8_t *s, const ptrdiff_t p,
vst1_u8(s, s3);
}
+static INLINE void load_u8_16x3(const uint8_t *s, const ptrdiff_t p,
+ uint8x16_t *const s0, uint8x16_t *const s1,
+ uint8x16_t *const s2) {
+ *s0 = vld1q_u8(s);
+ s += p;
+ *s1 = vld1q_u8(s);
+ s += p;
+ *s2 = vld1q_u8(s);
+}
+
static INLINE void load_u8_16x4(const uint8_t *s, const ptrdiff_t p,
uint8x16_t *const s0, uint8x16_t *const s1,
uint8x16_t *const s2, uint8x16_t *const s3) {
@@ -358,4 +439,25 @@ static INLINE void store_u8_16x8(uint8_t *s, const ptrdiff_t p,
vst1q_u8(s, s7);
}
+static INLINE void load_u16_8x8(const uint16_t *s, const ptrdiff_t p,
+ uint16x8_t *s0, uint16x8_t *s1, uint16x8_t *s2,
+ uint16x8_t *s3, uint16x8_t *s4, uint16x8_t *s5,
+ uint16x8_t *s6, uint16x8_t *s7) {
+ *s0 = vld1q_u16(s);
+ s += p;
+ *s1 = vld1q_u16(s);
+ s += p;
+ *s2 = vld1q_u16(s);
+ s += p;
+ *s3 = vld1q_u16(s);
+ s += p;
+ *s4 = vld1q_u16(s);
+ s += p;
+ *s5 = vld1q_u16(s);
+ s += p;
+ *s6 = vld1q_u16(s);
+ s += p;
+ *s7 = vld1q_u16(s);
+}
+
#endif // VPX_VPX_DSP_ARM_MEM_NEON_H_