xref: /aosp_15_r20/external/libvpx/vpx_dsp/mips/idct32x32_msa.c (revision fb1b10ab9aebc7c7068eedab379b749d7e3900be)
1 /*
2  *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include "./vpx_dsp_rtcd.h"
12 #include "vpx_dsp/mips/inv_txfm_msa.h"
13 
idct32x8_row_transpose_store(const int16_t * input,int16_t * tmp_buf)14 static void idct32x8_row_transpose_store(const int16_t *input,
15                                          int16_t *tmp_buf) {
16   v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
17 
18   /* 1st & 2nd 8x8 */
19   LD_SH8(input, 32, m0, n0, m1, n1, m2, n2, m3, n3);
20   LD_SH8((input + 8), 32, m4, n4, m5, n5, m6, n6, m7, n7);
21   TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3,
22                      n3);
23   TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7,
24                      n7);
25   ST_SH8(m0, n0, m1, n1, m2, n2, m3, n3, (tmp_buf), 8);
26   ST_SH4(m4, n4, m5, n5, (tmp_buf + 8 * 8), 8);
27   ST_SH4(m6, n6, m7, n7, (tmp_buf + 12 * 8), 8);
28 
29   /* 3rd & 4th 8x8 */
30   LD_SH8((input + 16), 32, m0, n0, m1, n1, m2, n2, m3, n3);
31   LD_SH8((input + 24), 32, m4, n4, m5, n5, m6, n6, m7, n7);
32   TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3,
33                      n3);
34   TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7,
35                      n7);
36   ST_SH4(m0, n0, m1, n1, (tmp_buf + 16 * 8), 8);
37   ST_SH4(m2, n2, m3, n3, (tmp_buf + 20 * 8), 8);
38   ST_SH4(m4, n4, m5, n5, (tmp_buf + 24 * 8), 8);
39   ST_SH4(m6, n6, m7, n7, (tmp_buf + 28 * 8), 8);
40 }
41 
idct32x8_row_even_process_store(int16_t * tmp_buf,int16_t * tmp_eve_buf)42 static void idct32x8_row_even_process_store(int16_t *tmp_buf,
43                                             int16_t *tmp_eve_buf) {
44   v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
45   v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
46   v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
47 
48   /* Even stage 1 */
49   LD_SH8(tmp_buf, 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
50 
51   DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
52   DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
53   BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
54   DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
55 
56   loc1 = vec3;
57   loc0 = vec1;
58 
59   DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
60   DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
61   BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
62   BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
63   BUTTERFLY_4(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
64 
65   /* Even stage 2 */
66   LD_SH8((tmp_buf + 16), 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
67   DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
68   DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
69   DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
70   DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
71 
72   vec0 = reg0 + reg4;
73   reg0 = reg0 - reg4;
74   reg4 = reg6 + reg2;
75   reg6 = reg6 - reg2;
76   reg2 = reg1 + reg5;
77   reg1 = reg1 - reg5;
78   reg5 = reg7 + reg3;
79   reg7 = reg7 - reg3;
80   reg3 = vec0;
81 
82   vec1 = reg2;
83   reg2 = reg3 + reg4;
84   reg3 = reg3 - reg4;
85   reg4 = reg5 - vec1;
86   reg5 = reg5 + vec1;
87 
88   DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
89   DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
90 
91   vec0 = reg0 - reg6;
92   reg0 = reg0 + reg6;
93   vec1 = reg7 - reg1;
94   reg7 = reg7 + reg1;
95 
96   DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
97   DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
98 
99   /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */
100   BUTTERFLY_4(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
101   ST_SH(loc0, (tmp_eve_buf + 15 * 8));
102   ST_SH(loc1, (tmp_eve_buf));
103   ST_SH(loc2, (tmp_eve_buf + 14 * 8));
104   ST_SH(loc3, (tmp_eve_buf + 8));
105 
106   BUTTERFLY_4(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
107   ST_SH(loc0, (tmp_eve_buf + 13 * 8));
108   ST_SH(loc1, (tmp_eve_buf + 2 * 8));
109   ST_SH(loc2, (tmp_eve_buf + 12 * 8));
110   ST_SH(loc3, (tmp_eve_buf + 3 * 8));
111 
112   /* Store 8 */
113   BUTTERFLY_4(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
114   ST_SH(loc0, (tmp_eve_buf + 11 * 8));
115   ST_SH(loc1, (tmp_eve_buf + 4 * 8));
116   ST_SH(loc2, (tmp_eve_buf + 10 * 8));
117   ST_SH(loc3, (tmp_eve_buf + 5 * 8));
118 
119   BUTTERFLY_4(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
120   ST_SH(loc0, (tmp_eve_buf + 9 * 8));
121   ST_SH(loc1, (tmp_eve_buf + 6 * 8));
122   ST_SH(loc2, (tmp_eve_buf + 8 * 8));
123   ST_SH(loc3, (tmp_eve_buf + 7 * 8));
124 }
125 
idct32x8_row_odd_process_store(int16_t * tmp_buf,int16_t * tmp_odd_buf)126 static void idct32x8_row_odd_process_store(int16_t *tmp_buf,
127                                            int16_t *tmp_odd_buf) {
128   v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
129   v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
130 
131   /* Odd stage 1 */
132   reg0 = LD_SH(tmp_buf + 8);
133   reg1 = LD_SH(tmp_buf + 7 * 8);
134   reg2 = LD_SH(tmp_buf + 9 * 8);
135   reg3 = LD_SH(tmp_buf + 15 * 8);
136   reg4 = LD_SH(tmp_buf + 17 * 8);
137   reg5 = LD_SH(tmp_buf + 23 * 8);
138   reg6 = LD_SH(tmp_buf + 25 * 8);
139   reg7 = LD_SH(tmp_buf + 31 * 8);
140 
141   DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
142   DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
143   DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
144   DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
145 
146   vec0 = reg0 + reg3;
147   reg0 = reg0 - reg3;
148   reg3 = reg7 + reg4;
149   reg7 = reg7 - reg4;
150   reg4 = reg1 + reg2;
151   reg1 = reg1 - reg2;
152   reg2 = reg6 + reg5;
153   reg6 = reg6 - reg5;
154   reg5 = vec0;
155 
156   /* 4 Stores */
157   ADD2(reg5, reg4, reg3, reg2, vec0, vec1);
158   ST_SH2(vec0, vec1, (tmp_odd_buf + 4 * 8), 8);
159 
160   SUB2(reg5, reg4, reg3, reg2, vec0, vec1);
161   DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
162   ST_SH2(vec0, vec1, (tmp_odd_buf), 8);
163 
164   /* 4 Stores */
165   DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
166   DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
167   BUTTERFLY_4(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
168   ST_SH2(vec0, vec1, (tmp_odd_buf + 6 * 8), 8);
169 
170   DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
171   ST_SH2(vec2, vec3, (tmp_odd_buf + 2 * 8), 8);
172 
173   /* Odd stage 2 */
174   /* 8 loads */
175   reg0 = LD_SH(tmp_buf + 3 * 8);
176   reg1 = LD_SH(tmp_buf + 5 * 8);
177   reg2 = LD_SH(tmp_buf + 11 * 8);
178   reg3 = LD_SH(tmp_buf + 13 * 8);
179   reg4 = LD_SH(tmp_buf + 19 * 8);
180   reg5 = LD_SH(tmp_buf + 21 * 8);
181   reg6 = LD_SH(tmp_buf + 27 * 8);
182   reg7 = LD_SH(tmp_buf + 29 * 8);
183 
184   DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
185   DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
186   DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
187   DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
188 
189   /* 4 Stores */
190   SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec0, vec1, vec2, vec3);
191   DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
192   DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
193 
194   BUTTERFLY_4(loc3, loc2, loc0, loc1, vec1, vec0, vec2, vec3);
195   ST_SH2(vec0, vec1, (tmp_odd_buf + 12 * 8), 3 * 8);
196 
197   DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
198   ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8);
199 
200   /* 4 Stores */
201   ADD4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec1, vec2, vec0, vec3);
202   BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
203   ST_SH(reg0, (tmp_odd_buf + 13 * 8));
204   ST_SH(reg1, (tmp_odd_buf + 14 * 8));
205 
206   DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
207   ST_SH2(reg0, reg1, (tmp_odd_buf + 8 * 8), 8);
208 
209   /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */
210 
211   /* Load 8 & Store 8 */
212   LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3);
213   LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7);
214 
215   ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
216   ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8);
217 
218   SUB2(reg0, reg4, reg1, reg5, vec0, vec1);
219   DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
220 
221   SUB2(reg2, reg6, reg3, reg7, vec0, vec1);
222   DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
223   ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 8 * 8), 8);
224 
225   /* Load 8 & Store 8 */
226   LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3);
227   LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7);
228 
229   ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
230   ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8);
231 
232   SUB2(reg0, reg4, reg3, reg7, vec0, vec1);
233   DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
234 
235   SUB2(reg1, reg5, reg2, reg6, vec0, vec1);
236   DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
237   ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 12 * 8), 8);
238 }
239 
idct_butterfly_transpose_store(int16_t * tmp_buf,int16_t * tmp_eve_buf,int16_t * tmp_odd_buf,int16_t * dst)240 static void idct_butterfly_transpose_store(int16_t *tmp_buf,
241                                            int16_t *tmp_eve_buf,
242                                            int16_t *tmp_odd_buf, int16_t *dst) {
243   v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
244   v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
245 
246   /* FINAL BUTTERFLY : Dependency on Even & Odd */
247   vec0 = LD_SH(tmp_odd_buf);
248   vec1 = LD_SH(tmp_odd_buf + 9 * 8);
249   vec2 = LD_SH(tmp_odd_buf + 14 * 8);
250   vec3 = LD_SH(tmp_odd_buf + 6 * 8);
251   loc0 = LD_SH(tmp_eve_buf);
252   loc1 = LD_SH(tmp_eve_buf + 8 * 8);
253   loc2 = LD_SH(tmp_eve_buf + 4 * 8);
254   loc3 = LD_SH(tmp_eve_buf + 12 * 8);
255 
256   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
257 
258   ST_SH((loc0 - vec3), (tmp_buf + 31 * 8));
259   ST_SH((loc1 - vec2), (tmp_buf + 23 * 8));
260   ST_SH((loc2 - vec1), (tmp_buf + 27 * 8));
261   ST_SH((loc3 - vec0), (tmp_buf + 19 * 8));
262 
263   /* Load 8 & Store 8 */
264   vec0 = LD_SH(tmp_odd_buf + 4 * 8);
265   vec1 = LD_SH(tmp_odd_buf + 13 * 8);
266   vec2 = LD_SH(tmp_odd_buf + 10 * 8);
267   vec3 = LD_SH(tmp_odd_buf + 3 * 8);
268   loc0 = LD_SH(tmp_eve_buf + 2 * 8);
269   loc1 = LD_SH(tmp_eve_buf + 10 * 8);
270   loc2 = LD_SH(tmp_eve_buf + 6 * 8);
271   loc3 = LD_SH(tmp_eve_buf + 14 * 8);
272 
273   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
274 
275   ST_SH((loc0 - vec3), (tmp_buf + 29 * 8));
276   ST_SH((loc1 - vec2), (tmp_buf + 21 * 8));
277   ST_SH((loc2 - vec1), (tmp_buf + 25 * 8));
278   ST_SH((loc3 - vec0), (tmp_buf + 17 * 8));
279 
280   /* Load 8 & Store 8 */
281   vec0 = LD_SH(tmp_odd_buf + 2 * 8);
282   vec1 = LD_SH(tmp_odd_buf + 11 * 8);
283   vec2 = LD_SH(tmp_odd_buf + 12 * 8);
284   vec3 = LD_SH(tmp_odd_buf + 7 * 8);
285   loc0 = LD_SH(tmp_eve_buf + 1 * 8);
286   loc1 = LD_SH(tmp_eve_buf + 9 * 8);
287   loc2 = LD_SH(tmp_eve_buf + 5 * 8);
288   loc3 = LD_SH(tmp_eve_buf + 13 * 8);
289 
290   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
291 
292   ST_SH((loc0 - vec3), (tmp_buf + 30 * 8));
293   ST_SH((loc1 - vec2), (tmp_buf + 22 * 8));
294   ST_SH((loc2 - vec1), (tmp_buf + 26 * 8));
295   ST_SH((loc3 - vec0), (tmp_buf + 18 * 8));
296 
297   /* Load 8 & Store 8 */
298   vec0 = LD_SH(tmp_odd_buf + 5 * 8);
299   vec1 = LD_SH(tmp_odd_buf + 15 * 8);
300   vec2 = LD_SH(tmp_odd_buf + 8 * 8);
301   vec3 = LD_SH(tmp_odd_buf + 1 * 8);
302   loc0 = LD_SH(tmp_eve_buf + 3 * 8);
303   loc1 = LD_SH(tmp_eve_buf + 11 * 8);
304   loc2 = LD_SH(tmp_eve_buf + 7 * 8);
305   loc3 = LD_SH(tmp_eve_buf + 15 * 8);
306 
307   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
308 
309   ST_SH((loc0 - vec3), (tmp_buf + 28 * 8));
310   ST_SH((loc1 - vec2), (tmp_buf + 20 * 8));
311   ST_SH((loc2 - vec1), (tmp_buf + 24 * 8));
312   ST_SH((loc3 - vec0), (tmp_buf + 16 * 8));
313 
314   /* Transpose : 16 vectors */
315   /* 1st & 2nd 8x8 */
316   TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3,
317                      n3);
318   ST_SH4(m0, n0, m1, n1, (dst + 0), 32);
319   ST_SH4(m2, n2, m3, n3, (dst + 4 * 32), 32);
320 
321   TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7,
322                      n7);
323   ST_SH4(m4, n4, m5, n5, (dst + 8), 32);
324   ST_SH4(m6, n6, m7, n7, (dst + 8 + 4 * 32), 32);
325 
326   /* 3rd & 4th 8x8 */
327   LD_SH8((tmp_buf + 8 * 16), 8, m0, n0, m1, n1, m2, n2, m3, n3);
328   LD_SH8((tmp_buf + 12 * 16), 8, m4, n4, m5, n5, m6, n6, m7, n7);
329   TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3,
330                      n3);
331   ST_SH4(m0, n0, m1, n1, (dst + 16), 32);
332   ST_SH4(m2, n2, m3, n3, (dst + 16 + 4 * 32), 32);
333 
334   TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7,
335                      n7);
336   ST_SH4(m4, n4, m5, n5, (dst + 24), 32);
337   ST_SH4(m6, n6, m7, n7, (dst + 24 + 4 * 32), 32);
338 }
339 
idct32x8_1d_rows_msa(const int16_t * input,int16_t * output)340 static void idct32x8_1d_rows_msa(const int16_t *input, int16_t *output) {
341   DECLARE_ALIGNED(32, int16_t, tmp_buf[8 * 32]);
342   DECLARE_ALIGNED(32, int16_t, tmp_odd_buf[16 * 8]);
343   DECLARE_ALIGNED(32, int16_t, tmp_eve_buf[16 * 8]);
344 
345   idct32x8_row_transpose_store(input, &tmp_buf[0]);
346   idct32x8_row_even_process_store(&tmp_buf[0], &tmp_eve_buf[0]);
347   idct32x8_row_odd_process_store(&tmp_buf[0], &tmp_odd_buf[0]);
348   idct_butterfly_transpose_store(&tmp_buf[0], &tmp_eve_buf[0], &tmp_odd_buf[0],
349                                  output);
350 }
351 
idct8x32_column_even_process_store(int16_t * tmp_buf,int16_t * tmp_eve_buf)352 static void idct8x32_column_even_process_store(int16_t *tmp_buf,
353                                                int16_t *tmp_eve_buf) {
354   v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
355   v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
356   v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
357 
358   /* Even stage 1 */
359   LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
360   tmp_buf += (2 * 32);
361 
362   DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
363   DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
364   BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
365   DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
366 
367   loc1 = vec3;
368   loc0 = vec1;
369 
370   DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
371   DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
372   BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
373   BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
374   BUTTERFLY_4(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
375 
376   /* Even stage 2 */
377   /* Load 8 */
378   LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
379 
380   DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
381   DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
382   DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
383   DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
384 
385   vec0 = reg0 + reg4;
386   reg0 = reg0 - reg4;
387   reg4 = reg6 + reg2;
388   reg6 = reg6 - reg2;
389   reg2 = reg1 + reg5;
390   reg1 = reg1 - reg5;
391   reg5 = reg7 + reg3;
392   reg7 = reg7 - reg3;
393   reg3 = vec0;
394 
395   vec1 = reg2;
396   reg2 = reg3 + reg4;
397   reg3 = reg3 - reg4;
398   reg4 = reg5 - vec1;
399   reg5 = reg5 + vec1;
400 
401   DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
402   DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
403 
404   vec0 = reg0 - reg6;
405   reg0 = reg0 + reg6;
406   vec1 = reg7 - reg1;
407   reg7 = reg7 + reg1;
408 
409   DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
410   DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
411 
412   /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */
413   /* Store 8 */
414   BUTTERFLY_4(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
415   ST_SH2(loc1, loc3, tmp_eve_buf, 8);
416   ST_SH2(loc2, loc0, (tmp_eve_buf + 14 * 8), 8);
417 
418   BUTTERFLY_4(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
419   ST_SH2(loc1, loc3, (tmp_eve_buf + 2 * 8), 8);
420   ST_SH2(loc2, loc0, (tmp_eve_buf + 12 * 8), 8);
421 
422   /* Store 8 */
423   BUTTERFLY_4(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
424   ST_SH2(loc1, loc3, (tmp_eve_buf + 4 * 8), 8);
425   ST_SH2(loc2, loc0, (tmp_eve_buf + 10 * 8), 8);
426 
427   BUTTERFLY_4(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
428   ST_SH2(loc1, loc3, (tmp_eve_buf + 6 * 8), 8);
429   ST_SH2(loc2, loc0, (tmp_eve_buf + 8 * 8), 8);
430 }
431 
idct8x32_column_odd_process_store(int16_t * tmp_buf,int16_t * tmp_odd_buf)432 static void idct8x32_column_odd_process_store(int16_t *tmp_buf,
433                                               int16_t *tmp_odd_buf) {
434   v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
435   v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
436 
437   /* Odd stage 1 */
438   reg0 = LD_SH(tmp_buf + 32);
439   reg1 = LD_SH(tmp_buf + 7 * 32);
440   reg2 = LD_SH(tmp_buf + 9 * 32);
441   reg3 = LD_SH(tmp_buf + 15 * 32);
442   reg4 = LD_SH(tmp_buf + 17 * 32);
443   reg5 = LD_SH(tmp_buf + 23 * 32);
444   reg6 = LD_SH(tmp_buf + 25 * 32);
445   reg7 = LD_SH(tmp_buf + 31 * 32);
446 
447   DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
448   DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
449   DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
450   DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
451 
452   vec0 = reg0 + reg3;
453   reg0 = reg0 - reg3;
454   reg3 = reg7 + reg4;
455   reg7 = reg7 - reg4;
456   reg4 = reg1 + reg2;
457   reg1 = reg1 - reg2;
458   reg2 = reg6 + reg5;
459   reg6 = reg6 - reg5;
460   reg5 = vec0;
461 
462   /* 4 Stores */
463   ADD2(reg5, reg4, reg3, reg2, vec0, vec1);
464   ST_SH2(vec0, vec1, (tmp_odd_buf + 4 * 8), 8);
465   SUB2(reg5, reg4, reg3, reg2, vec0, vec1);
466   DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
467   ST_SH2(vec0, vec1, tmp_odd_buf, 8);
468 
469   /* 4 Stores */
470   DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
471   DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
472   BUTTERFLY_4(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
473   ST_SH2(vec0, vec1, (tmp_odd_buf + 6 * 8), 8);
474   DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
475   ST_SH2(vec2, vec3, (tmp_odd_buf + 2 * 8), 8);
476 
477   /* Odd stage 2 */
478   /* 8 loads */
479   reg0 = LD_SH(tmp_buf + 3 * 32);
480   reg1 = LD_SH(tmp_buf + 5 * 32);
481   reg2 = LD_SH(tmp_buf + 11 * 32);
482   reg3 = LD_SH(tmp_buf + 13 * 32);
483   reg4 = LD_SH(tmp_buf + 19 * 32);
484   reg5 = LD_SH(tmp_buf + 21 * 32);
485   reg6 = LD_SH(tmp_buf + 27 * 32);
486   reg7 = LD_SH(tmp_buf + 29 * 32);
487 
488   DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
489   DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
490   DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
491   DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
492 
493   /* 4 Stores */
494   SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec0, vec1, vec2, vec3);
495   DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
496   DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
497   BUTTERFLY_4(loc2, loc3, loc1, loc0, vec0, vec1, vec3, vec2);
498   ST_SH2(vec0, vec1, (tmp_odd_buf + 12 * 8), 3 * 8);
499   DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
500   ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8);
501 
502   /* 4 Stores */
503   ADD4(reg0, reg3, reg1, reg2, reg5, reg6, reg4, reg7, vec0, vec1, vec2, vec3);
504   BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
505   ST_SH2(reg0, reg1, (tmp_odd_buf + 13 * 8), 8);
506   DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
507   ST_SH2(reg0, reg1, (tmp_odd_buf + 8 * 8), 8);
508 
509   /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */
510   /* Load 8 & Store 8 */
511   LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3);
512   LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7);
513 
514   ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
515   ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8);
516 
517   SUB2(reg0, reg4, reg1, reg5, vec0, vec1);
518   DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
519 
520   SUB2(reg2, reg6, reg3, reg7, vec0, vec1);
521   DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
522   ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 8 * 8), 8);
523 
524   /* Load 8 & Store 8 */
525   LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3);
526   LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7);
527 
528   ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
529   ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8);
530 
531   SUB2(reg0, reg4, reg3, reg7, vec0, vec1);
532   DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
533 
534   SUB2(reg1, reg5, reg2, reg6, vec0, vec1);
535   DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
536   ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 12 * 8), 8);
537 }
538 
idct8x32_column_butterfly_addblk(int16_t * tmp_eve_buf,int16_t * tmp_odd_buf,uint8_t * dst,int32_t dst_stride)539 static void idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf,
540                                              int16_t *tmp_odd_buf, uint8_t *dst,
541                                              int32_t dst_stride) {
542   v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
543   v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
544 
545   /* FINAL BUTTERFLY : Dependency on Even & Odd */
546   vec0 = LD_SH(tmp_odd_buf);
547   vec1 = LD_SH(tmp_odd_buf + 9 * 8);
548   vec2 = LD_SH(tmp_odd_buf + 14 * 8);
549   vec3 = LD_SH(tmp_odd_buf + 6 * 8);
550   loc0 = LD_SH(tmp_eve_buf);
551   loc1 = LD_SH(tmp_eve_buf + 8 * 8);
552   loc2 = LD_SH(tmp_eve_buf + 4 * 8);
553   loc3 = LD_SH(tmp_eve_buf + 12 * 8);
554 
555   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
556   SRARI_H4_SH(m0, m2, m4, m6, 6);
557   VP9_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6);
558 
559   SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m6, m2, m4, m0);
560   SRARI_H4_SH(m0, m2, m4, m6, 6);
561   VP9_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride), m0, m2, m4,
562                       m6);
563 
564   /* Load 8 & Store 8 */
565   vec0 = LD_SH(tmp_odd_buf + 4 * 8);
566   vec1 = LD_SH(tmp_odd_buf + 13 * 8);
567   vec2 = LD_SH(tmp_odd_buf + 10 * 8);
568   vec3 = LD_SH(tmp_odd_buf + 3 * 8);
569   loc0 = LD_SH(tmp_eve_buf + 2 * 8);
570   loc1 = LD_SH(tmp_eve_buf + 10 * 8);
571   loc2 = LD_SH(tmp_eve_buf + 6 * 8);
572   loc3 = LD_SH(tmp_eve_buf + 14 * 8);
573 
574   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
575   SRARI_H4_SH(m1, m3, m5, m7, 6);
576   VP9_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride), m1, m3, m5, m7);
577 
578   SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m7, m3, m5, m1);
579   SRARI_H4_SH(m1, m3, m5, m7, 6);
580   VP9_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride), m1, m3, m5,
581                       m7);
582 
583   /* Load 8 & Store 8 */
584   vec0 = LD_SH(tmp_odd_buf + 2 * 8);
585   vec1 = LD_SH(tmp_odd_buf + 11 * 8);
586   vec2 = LD_SH(tmp_odd_buf + 12 * 8);
587   vec3 = LD_SH(tmp_odd_buf + 7 * 8);
588   loc0 = LD_SH(tmp_eve_buf + 1 * 8);
589   loc1 = LD_SH(tmp_eve_buf + 9 * 8);
590   loc2 = LD_SH(tmp_eve_buf + 5 * 8);
591   loc3 = LD_SH(tmp_eve_buf + 13 * 8);
592 
593   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
594   SRARI_H4_SH(n0, n2, n4, n6, 6);
595   VP9_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride), n0, n2, n4, n6);
596 
597   SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n6, n2, n4, n0);
598   SRARI_H4_SH(n0, n2, n4, n6, 6);
599   VP9_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride), n0, n2, n4,
600                       n6);
601 
602   /* Load 8 & Store 8 */
603   vec0 = LD_SH(tmp_odd_buf + 5 * 8);
604   vec1 = LD_SH(tmp_odd_buf + 15 * 8);
605   vec2 = LD_SH(tmp_odd_buf + 8 * 8);
606   vec3 = LD_SH(tmp_odd_buf + 1 * 8);
607   loc0 = LD_SH(tmp_eve_buf + 3 * 8);
608   loc1 = LD_SH(tmp_eve_buf + 11 * 8);
609   loc2 = LD_SH(tmp_eve_buf + 7 * 8);
610   loc3 = LD_SH(tmp_eve_buf + 15 * 8);
611 
612   ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
613   SRARI_H4_SH(n1, n3, n5, n7, 6);
614   VP9_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride), n1, n3, n5, n7);
615 
616   SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n7, n3, n5, n1);
617   SRARI_H4_SH(n1, n3, n5, n7, 6);
618   VP9_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride), n1, n3, n5,
619                       n7);
620 }
621 
idct8x32_1d_columns_addblk_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)622 static void idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
623                                            int32_t dst_stride) {
624   DECLARE_ALIGNED(32, int16_t, tmp_odd_buf[16 * 8]);
625   DECLARE_ALIGNED(32, int16_t, tmp_eve_buf[16 * 8]);
626 
627   idct8x32_column_even_process_store(input, &tmp_eve_buf[0]);
628   idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]);
629   idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0], dst,
630                                    dst_stride);
631 }
632 
vpx_idct32x32_1024_add_msa(const int16_t * input,uint8_t * dst,int32_t dst_stride)633 void vpx_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dst,
634                                 int32_t dst_stride) {
635   int32_t i;
636   DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]);
637   int16_t *out_ptr = out_arr;
638 
639   /* transform rows */
640   for (i = 0; i < 4; ++i) {
641     /* process 32 * 8 block */
642     idct32x8_1d_rows_msa((input + (i << 8)), (out_ptr + (i << 8)));
643   }
644 
645   /* transform columns */
646   for (i = 0; i < 4; ++i) {
647     /* process 8 * 32 block */
648     idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
649                                    dst_stride);
650   }
651 }
652 
vpx_idct32x32_34_add_msa(const int16_t * input,uint8_t * dst,int32_t dst_stride)653 void vpx_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst,
654                               int32_t dst_stride) {
655   int32_t i;
656   DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]);
657   int16_t *out_ptr = out_arr;
658 
659   for (i = 32; i--;) {
660     __asm__ __volatile__(
661         "sw     $zero,      0(%[out_ptr])     \n\t"
662         "sw     $zero,      4(%[out_ptr])     \n\t"
663         "sw     $zero,      8(%[out_ptr])     \n\t"
664         "sw     $zero,     12(%[out_ptr])     \n\t"
665         "sw     $zero,     16(%[out_ptr])     \n\t"
666         "sw     $zero,     20(%[out_ptr])     \n\t"
667         "sw     $zero,     24(%[out_ptr])     \n\t"
668         "sw     $zero,     28(%[out_ptr])     \n\t"
669         "sw     $zero,     32(%[out_ptr])     \n\t"
670         "sw     $zero,     36(%[out_ptr])     \n\t"
671         "sw     $zero,     40(%[out_ptr])     \n\t"
672         "sw     $zero,     44(%[out_ptr])     \n\t"
673         "sw     $zero,     48(%[out_ptr])     \n\t"
674         "sw     $zero,     52(%[out_ptr])     \n\t"
675         "sw     $zero,     56(%[out_ptr])     \n\t"
676         "sw     $zero,     60(%[out_ptr])     \n\t"
677 
678         :
679         : [out_ptr] "r"(out_ptr));
680 
681     out_ptr += 32;
682   }
683 
684   out_ptr = out_arr;
685 
686   /* rows: only upper-left 8x8 has non-zero coeff */
687   idct32x8_1d_rows_msa(input, out_ptr);
688 
689   /* transform columns */
690   for (i = 0; i < 4; ++i) {
691     /* process 8 * 32 block */
692     idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
693                                    dst_stride);
694   }
695 }
696 
vpx_idct32x32_1_add_msa(const int16_t * input,uint8_t * dst,int32_t dst_stride)697 void vpx_idct32x32_1_add_msa(const int16_t *input, uint8_t *dst,
698                              int32_t dst_stride) {
699   int32_t i;
700   int16_t out;
701   v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
702   v8i16 res0, res1, res2, res3, res4, res5, res6, res7, vec;
703 
704   out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS);
705   out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS);
706   out = ROUND_POWER_OF_TWO(out, 6);
707 
708   vec = __msa_fill_h(out);
709 
710   for (i = 16; i--;) {
711     LD_UB2(dst, 16, dst0, dst1);
712     LD_UB2(dst + dst_stride, 16, dst2, dst3);
713 
714     UNPCK_UB_SH(dst0, res0, res4);
715     UNPCK_UB_SH(dst1, res1, res5);
716     UNPCK_UB_SH(dst2, res2, res6);
717     UNPCK_UB_SH(dst3, res3, res7);
718     ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3);
719     ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, res7);
720     CLIP_SH4_0_255(res0, res1, res2, res3);
721     CLIP_SH4_0_255(res4, res5, res6, res7);
722     PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3, tmp0, tmp1,
723                 tmp2, tmp3);
724 
725     ST_UB2(tmp0, tmp1, dst, 16);
726     dst += dst_stride;
727     ST_UB2(tmp2, tmp3, dst, 16);
728     dst += dst_stride;
729   }
730 }
731