1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
|
.text
.p2align 2
.global ixheaacd_sbr_qmfanal32_winadds_eld
ixheaacd_sbr_qmfanal32_winadds_eld:
STMFD sp!, {R4-R12, R14}
VPUSH {D8 - D15}
LDR R5, [SP, #108] @filterStates
LDR R6, [SP, #112] @timeIn
LDR R7, [SP, #116] @stride
MOV R9, R7, LSL #1
ADD r5, r5, #64
MOV r10, #3
LOOP:
LDRSH r4 , [R6], r9
LDRSH r8 , [R6], r9
LDRSH r11 , [R6], r9
LDRSH r12 , [R6], r9
STRH r4 , [r5 , #-2]!
STRH r8 , [r5 , #-2]!
STRH r11 , [r5 , #-2]!
STRH r12 , [r5 , #-2]!
LDRSH r4 , [R6], r9
LDRSH r8 , [R6], r9
LDRSH r11 , [R6], r9
LDRSH r12 , [R6], r9
STRH r4 , [r5 , #-2]!
STRH r8 , [r5 , #-2]!
STRH r11 , [r5 , #-2]!
STRH r12 , [r5 , #-2]!
SUBS r10, r10, #1
BPL LOOP
LDR R4, [SP, #104] @winAdd
MOV R5, #8
VLD1.16 D0, [R0]! @tmpQ1[n + 0] load and incremented R0 by 8
MOV R6, #64
MOV R6, R6, LSL #1 @
VLD1.16 {D1, D2}, [R2]! @ tmpQmf_c1[2*(n + 0)] load and incremented
MOV R7, #244 @ NOT USED further
MOV R9, R0
ADD R0, R0, #120 @ incrementing R0 by 120 + 8 = 128
MOV R11, R4 @ Mov winAdd to R11
VLD1.16 D2, [R0], R6 @ tmpQ1[n + 64] load and incremented by R6
ADD R11, R11, #128 @ increment winAdd by 128
MOV R10, R2 @
ADD R2, R2, #112 @ This should be 240 --> 112
VMULL.S16 Q15, D0, D1
VLD1.16 {D3, D4}, [R2]! @ tmpQmf_c1[2*(n + 64)] load and incremented
ADD R2, R2, #112 @ This should be 112
VLD1.16 D4, [R0], R6 @ tmpQ1[n + 128] load and incremented by R6
VMLAL.S16 Q15, D2, D3
VLD1.16 {D5, D6}, [R2]! @ tmpQmf_c1[2*(n + 128)] load and incremented
SUB R10, R10, #8
ADD R2, R2, #112 @ This should be 112
VLD1.16 D6, [R0], R6 @ tmpQ1[n + 192] load and incremented by R6
VMLAL.S16 Q15, D4, D5
VLD1.16 {D7, D8}, [R2]! @ tmpQmf_c1[2*(n + 192)] load and incremented
ADD R2, R2, #112 @ This should be 112
VLD1.16 D8, [R0], R6 @ tmpQ1[n + 256] load and incremented by R6
VMLAL.S16 Q15, D6, D7
MOV R0, R9
VLD1.16 {D9, D10}, [R2]! @ tmpQmf_c1[2*(n + 256)] load and incremented
ADD R2, R2, #112 @ This should be 112
VLD1.16 D10, [R1]! @ tmpQ2[n + 0] load and incremented
VMLAL.S16 Q15, D8, D9
MOV R9, R1
VLD1.16 {D11, D12}, [R3]! @ tmpQmf_c2[2*(n + 0)] load and incremented
ADD R1, R1, #120 @ incrementing R1 by 120 + 8 = 128
MOV R2, R10 @
VLD1.16 D12, [R1], R6 @ tmpQ2[n + 64] load and incremented by R6
MOV R10, R3
ADD R3, R3, #112 @ This sholud be 112
VLD1.16 {D13, D14}, [R3]! @ tmpQmf_c2[2*(n + 64)] load and incremented
ADD R3, R3, #112 @ This sholud be 112
VLD1.16 {D15, D16}, [R3]! @ tmpQmf_c2[2*(n + 128)] load and incremented
SUB R10, R10, #8
VLD1.16 D14, [R1], R6
ADD R3, R3, #112 @ This should be 112
VLD1.16 D16, [R1], R6
SUB R5, R5, #1
VLD1.16 {D17, D18}, [R3]! @ tmpQmf_c2[2*(n + 192)] load and incremented
ADD R3, R3, #112 @ This should be 112
VLD1.16 D18, [R1], R6
MOV R1, R9
VLD1.16 {D19, D20}, [R3]! @ tmpQmf_c2[2*(n + 256)] load and incremented
ADD R3, R3, #112 @ This should be 112
MOV R3, R10
LOOP_1:
VLD1.16 D0, [R0]!
MOV R9, R0
VLD1.16 {D1, D2}, [R2]!
ADD R0, R0, #120
MOV R10, R2
VST1.32 {Q15}, [R4]!
ADD R2, R2, #112 @ This should be 112
VMULL.S16 Q15, D10, D11
VLD1.16 D2, [R0], R6
VMLAL.S16 Q15, D12, D13
VMLAL.S16 Q15, D14, D15
VLD1.16 {D3, D4}, [R2]!
VMLAL.S16 Q15, D16, D17
VMLAL.S16 Q15, D18, D19
VLD1.16 D4, [R0], R6
ADD R2, R2, #112 @ This should be 112
VST1.32 {Q15}, [R11]!
SUB R10, R10, #8
VMULL.S16 Q15, D0, D1
VLD1.16 {D5, D6}, [R2]!
VMLAL.S16 Q15, D2, D3
ADD R2, R2, #112 @ This should be 112
VLD1.16 D6, [R0], R6
VMLAL.S16 Q15, D4, D5
VLD1.16 {D7, D8}, [R2]!
ADD R2, R2, #112 @ This should be 112
VLD1.16 D8, [R0], R6
VMLAL.S16 Q15, D6, D7
MOV R0, R9
VLD1.16 {D9, D10}, [R2]!
ADD R2, R2, #112 @ This should be 112
VLD1.16 D10, [R1]!
MOV R2, R10
MOV R9, R1
VLD1.16 {D11, D12}, [R3]!
ADD R1, R1, #120
VMLAL.S16 Q15, D8, D9
VLD1.16 D12, [R1], R6
MOV R10, R3
ADD R3, R3, #112 @ This should be 112
VLD1.16 {D13, D14}, [R3]!
ADD R3, R3, #112 @ This should be 112
VLD1.16 D14, [R1], R6
SUB R10, R10, #8
VLD1.16 {D15, D16}, [R3]!
ADD R3, R3, #112 @ This should be 112
VLD1.16 D16, [R1], R6
VLD1.16 {D17, D18}, [R3]!
ADD R3, R3, #112 @ This should be 112
VLD1.16 D18, [R1], R6
SUBS R5, R5, #1
MOV R1, R9
VLD1.16 {D19, D20}, [R3]!
ADD R3, R3, #112 @ This should be 112
MOV R3, R10
BGT LOOP_1
VST1.32 {Q15}, [R4]!
VMULL.S16 Q15, D10, D11
VMLAL.S16 Q15, D12, D13
VMLAL.S16 Q15, D14, D15
VMLAL.S16 Q15, D16, D17
VMLAL.S16 Q15, D18, D19
VST1.32 {Q15}, [R11]!
VPOP {D8 - D15}
LDMFD sp!, {R4-R12, R15}
|