1 // Copyright 2016 Google Inc. All Rights Reserved.
2 //
3 // Use of this source code is governed by a BSD-style license
4 // that can be found in the COPYING file in the root of the source
5 // tree. An additional intellectual property rights grant can be found
6 // in the file PATENTS. All contributing project authors may
7 // be found in the AUTHORS file in the root of the source tree.
8 // -----------------------------------------------------------------------------
9 //
10 // MSA common macros
11 //
12 // Author(s): Prashant Patil ([email protected])
13
14 #ifndef WEBP_DSP_MSA_MACRO_H_
15 #define WEBP_DSP_MSA_MACRO_H_
16
17 #include "src/dsp/dsp.h"
18
19 #if defined(WEBP_USE_MSA)
20
21 #include <stdint.h>
22 #include <msa.h>
23
24 #if defined(__clang__)
25 #define CLANG_BUILD
26 #endif
27
28 #ifdef CLANG_BUILD
29 #define ALPHAVAL (-1)
30 #define ADDVI_H(a, b) __msa_addvi_h((v8i16)a, b)
31 #define ADDVI_W(a, b) __msa_addvi_w((v4i32)a, b)
32 #define SRAI_B(a, b) __msa_srai_b((v16i8)a, b)
33 #define SRAI_H(a, b) __msa_srai_h((v8i16)a, b)
34 #define SRAI_W(a, b) __msa_srai_w((v4i32)a, b)
35 #define SRLI_H(a, b) __msa_srli_h((v8i16)a, b)
36 #define SLLI_B(a, b) __msa_slli_b((v4i32)a, b)
37 #define ANDI_B(a, b) __msa_andi_b((v16u8)a, b)
38 #define ORI_B(a, b) __msa_ori_b((v16u8)a, b)
39 #else
40 #define ALPHAVAL (0xff)
41 #define ADDVI_H(a, b) (a + b)
42 #define ADDVI_W(a, b) (a + b)
43 #define SRAI_B(a, b) (a >> b)
44 #define SRAI_H(a, b) (a >> b)
45 #define SRAI_W(a, b) (a >> b)
46 #define SRLI_H(a, b) (a << b)
47 #define SLLI_B(a, b) (a << b)
48 #define ANDI_B(a, b) (a & b)
49 #define ORI_B(a, b) (a | b)
50 #endif
51
52 #define LD_B(RTYPE, psrc) *((RTYPE*)(psrc))
53 #define LD_UB(...) LD_B(v16u8, __VA_ARGS__)
54 #define LD_SB(...) LD_B(v16i8, __VA_ARGS__)
55
56 #define LD_H(RTYPE, psrc) *((RTYPE*)(psrc))
57 #define LD_UH(...) LD_H(v8u16, __VA_ARGS__)
58 #define LD_SH(...) LD_H(v8i16, __VA_ARGS__)
59
60 #define LD_W(RTYPE, psrc) *((RTYPE*)(psrc))
61 #define LD_UW(...) LD_W(v4u32, __VA_ARGS__)
62 #define LD_SW(...) LD_W(v4i32, __VA_ARGS__)
63
64 #define ST_B(RTYPE, in, pdst) *((RTYPE*)(pdst)) = in
65 #define ST_UB(...) ST_B(v16u8, __VA_ARGS__)
66 #define ST_SB(...) ST_B(v16i8, __VA_ARGS__)
67
68 #define ST_H(RTYPE, in, pdst) *((RTYPE*)(pdst)) = in
69 #define ST_UH(...) ST_H(v8u16, __VA_ARGS__)
70 #define ST_SH(...) ST_H(v8i16, __VA_ARGS__)
71
72 #define ST_W(RTYPE, in, pdst) *((RTYPE*)(pdst)) = in
73 #define ST_UW(...) ST_W(v4u32, __VA_ARGS__)
74 #define ST_SW(...) ST_W(v4i32, __VA_ARGS__)
75
76 #define MSA_LOAD_FUNC(TYPE, INSTR, FUNC_NAME) \
77 static inline TYPE FUNC_NAME(const void* const psrc) { \
78 const uint8_t* const psrc_m = (const uint8_t*)psrc; \
79 TYPE val_m; \
80 __asm__ volatile("" #INSTR " %[val_m], %[psrc_m] \n\t" \
81 : [val_m] "=r"(val_m) \
82 : [psrc_m] "m"(*psrc_m)); \
83 return val_m; \
84 }
85
86 #define MSA_LOAD(psrc, FUNC_NAME) FUNC_NAME(psrc)
87
88 #define MSA_STORE_FUNC(TYPE, INSTR, FUNC_NAME) \
89 static inline void FUNC_NAME(TYPE val, void* const pdst) { \
90 uint8_t* const pdst_m = (uint8_t*)pdst; \
91 TYPE val_m = val; \
92 __asm__ volatile(" " #INSTR " %[val_m], %[pdst_m] \n\t" \
93 : [pdst_m] "=m"(*pdst_m) \
94 : [val_m] "r"(val_m)); \
95 }
96
97 #define MSA_STORE(val, pdst, FUNC_NAME) FUNC_NAME(val, pdst)
98
99 #if (__mips_isa_rev >= 6)
100 MSA_LOAD_FUNC(uint16_t, lh, msa_lh);
101 #define LH(psrc) MSA_LOAD(psrc, msa_lh)
102 MSA_LOAD_FUNC(uint32_t, lw, msa_lw);
103 #define LW(psrc) MSA_LOAD(psrc, msa_lw)
104 #if (__mips == 64)
105 MSA_LOAD_FUNC(uint64_t, ld, msa_ld);
106 #define LD(psrc) MSA_LOAD(psrc, msa_ld)
107 #else // !(__mips == 64)
108 #define LD(psrc) ((((uint64_t)MSA_LOAD(psrc + 4, msa_lw)) << 32) | \
109 MSA_LOAD(psrc, msa_lw))
110 #endif // (__mips == 64)
111
112 MSA_STORE_FUNC(uint16_t, sh, msa_sh);
113 #define SH(val, pdst) MSA_STORE(val, pdst, msa_sh)
114 MSA_STORE_FUNC(uint32_t, sw, msa_sw);
115 #define SW(val, pdst) MSA_STORE(val, pdst, msa_sw)
116 MSA_STORE_FUNC(uint64_t, sd, msa_sd);
117 #define SD(val, pdst) MSA_STORE(val, pdst, msa_sd)
118 #else // !(__mips_isa_rev >= 6)
119 MSA_LOAD_FUNC(uint16_t, ulh, msa_ulh);
120 #define LH(psrc) MSA_LOAD(psrc, msa_ulh)
121 MSA_LOAD_FUNC(uint32_t, ulw, msa_ulw);
122 #define LW(psrc) MSA_LOAD(psrc, msa_ulw)
123 #if (__mips == 64)
124 MSA_LOAD_FUNC(uint64_t, uld, msa_uld);
125 #define LD(psrc) MSA_LOAD(psrc, msa_uld)
126 #else // !(__mips == 64)
127 #define LD(psrc) ((((uint64_t)MSA_LOAD(psrc + 4, msa_ulw)) << 32) | \
128 MSA_LOAD(psrc, msa_ulw))
129 #endif // (__mips == 64)
130
131 MSA_STORE_FUNC(uint16_t, ush, msa_ush);
132 #define SH(val, pdst) MSA_STORE(val, pdst, msa_ush)
133 MSA_STORE_FUNC(uint32_t, usw, msa_usw);
134 #define SW(val, pdst) MSA_STORE(val, pdst, msa_usw)
135 #define SD(val, pdst) do { \
136 uint8_t* const pdst_sd_m = (uint8_t*)(pdst); \
137 const uint32_t val0_m = (uint32_t)(val & 0x00000000FFFFFFFF); \
138 const uint32_t val1_m = (uint32_t)((val >> 32) & 0x00000000FFFFFFFF); \
139 SW(val0_m, pdst_sd_m); \
140 SW(val1_m, pdst_sd_m + 4); \
141 } while (0)
142 #endif // (__mips_isa_rev >= 6)
143
144 /* Description : Load 4 words with stride
145 * Arguments : Inputs - psrc, stride
146 * Outputs - out0, out1, out2, out3
147 * Details : Load word in 'out0' from (psrc)
148 * Load word in 'out1' from (psrc + stride)
149 * Load word in 'out2' from (psrc + 2 * stride)
150 * Load word in 'out3' from (psrc + 3 * stride)
151 */
152 #define LW4(psrc, stride, out0, out1, out2, out3) do { \
153 const uint8_t* ptmp = (const uint8_t*)psrc; \
154 out0 = LW(ptmp); \
155 ptmp += stride; \
156 out1 = LW(ptmp); \
157 ptmp += stride; \
158 out2 = LW(ptmp); \
159 ptmp += stride; \
160 out3 = LW(ptmp); \
161 } while (0)
162
163 /* Description : Store words with stride
164 * Arguments : Inputs - in0, in1, in2, in3, pdst, stride
165 * Details : Store word from 'in0' to (pdst)
166 * Store word from 'in1' to (pdst + stride)
167 * Store word from 'in2' to (pdst + 2 * stride)
168 * Store word from 'in3' to (pdst + 3 * stride)
169 */
170 #define SW4(in0, in1, in2, in3, pdst, stride) do { \
171 uint8_t* ptmp = (uint8_t*)pdst; \
172 SW(in0, ptmp); \
173 ptmp += stride; \
174 SW(in1, ptmp); \
175 ptmp += stride; \
176 SW(in2, ptmp); \
177 ptmp += stride; \
178 SW(in3, ptmp); \
179 } while (0)
180
181 #define SW3(in0, in1, in2, pdst, stride) do { \
182 uint8_t* ptmp = (uint8_t*)pdst; \
183 SW(in0, ptmp); \
184 ptmp += stride; \
185 SW(in1, ptmp); \
186 ptmp += stride; \
187 SW(in2, ptmp); \
188 } while (0)
189
190 #define SW2(in0, in1, pdst, stride) do { \
191 uint8_t* ptmp = (uint8_t*)pdst; \
192 SW(in0, ptmp); \
193 ptmp += stride; \
194 SW(in1, ptmp); \
195 } while (0)
196
197 /* Description : Store 4 double words with stride
198 * Arguments : Inputs - in0, in1, in2, in3, pdst, stride
199 * Details : Store double word from 'in0' to (pdst)
200 * Store double word from 'in1' to (pdst + stride)
201 * Store double word from 'in2' to (pdst + 2 * stride)
202 * Store double word from 'in3' to (pdst + 3 * stride)
203 */
204 #define SD4(in0, in1, in2, in3, pdst, stride) do { \
205 uint8_t* ptmp = (uint8_t*)pdst; \
206 SD(in0, ptmp); \
207 ptmp += stride; \
208 SD(in1, ptmp); \
209 ptmp += stride; \
210 SD(in2, ptmp); \
211 ptmp += stride; \
212 SD(in3, ptmp); \
213 } while (0)
214
215 /* Description : Load vectors with 16 byte elements with stride
216 * Arguments : Inputs - psrc, stride
217 * Outputs - out0, out1
218 * Return Type - as per RTYPE
219 * Details : Load 16 byte elements in 'out0' from (psrc)
220 * Load 16 byte elements in 'out1' from (psrc + stride)
221 */
222 #define LD_B2(RTYPE, psrc, stride, out0, out1) do { \
223 out0 = LD_B(RTYPE, psrc); \
224 out1 = LD_B(RTYPE, psrc + stride); \
225 } while (0)
226 #define LD_UB2(...) LD_B2(v16u8, __VA_ARGS__)
227 #define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__)
228
229 #define LD_B3(RTYPE, psrc, stride, out0, out1, out2) do { \
230 LD_B2(RTYPE, psrc, stride, out0, out1); \
231 out2 = LD_B(RTYPE, psrc + 2 * stride); \
232 } while (0)
233 #define LD_UB3(...) LD_B3(v16u8, __VA_ARGS__)
234 #define LD_SB3(...) LD_B3(v16i8, __VA_ARGS__)
235
236 #define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) do { \
237 LD_B2(RTYPE, psrc, stride, out0, out1); \
238 LD_B2(RTYPE, psrc + 2 * stride , stride, out2, out3); \
239 } while (0)
240 #define LD_UB4(...) LD_B4(v16u8, __VA_ARGS__)
241 #define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__)
242
243 #define LD_B8(RTYPE, psrc, stride, \
244 out0, out1, out2, out3, out4, out5, out6, out7) do { \
245 LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3); \
246 LD_B4(RTYPE, psrc + 4 * stride, stride, out4, out5, out6, out7); \
247 } while (0)
248 #define LD_UB8(...) LD_B8(v16u8, __VA_ARGS__)
249 #define LD_SB8(...) LD_B8(v16i8, __VA_ARGS__)
250
251 /* Description : Load vectors with 8 halfword elements with stride
252 * Arguments : Inputs - psrc, stride
253 * Outputs - out0, out1
254 * Details : Load 8 halfword elements in 'out0' from (psrc)
255 * Load 8 halfword elements in 'out1' from (psrc + stride)
256 */
257 #define LD_H2(RTYPE, psrc, stride, out0, out1) do { \
258 out0 = LD_H(RTYPE, psrc); \
259 out1 = LD_H(RTYPE, psrc + stride); \
260 } while (0)
261 #define LD_UH2(...) LD_H2(v8u16, __VA_ARGS__)
262 #define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__)
263
264 /* Description : Load vectors with 4 word elements with stride
265 * Arguments : Inputs - psrc, stride
266 * Outputs - out0, out1, out2, out3
267 * Details : Load 4 word elements in 'out0' from (psrc + 0 * stride)
268 * Load 4 word elements in 'out1' from (psrc + 1 * stride)
269 * Load 4 word elements in 'out2' from (psrc + 2 * stride)
270 * Load 4 word elements in 'out3' from (psrc + 3 * stride)
271 */
272 #define LD_W2(RTYPE, psrc, stride, out0, out1) do { \
273 out0 = LD_W(RTYPE, psrc); \
274 out1 = LD_W(RTYPE, psrc + stride); \
275 } while (0)
276 #define LD_UW2(...) LD_W2(v4u32, __VA_ARGS__)
277 #define LD_SW2(...) LD_W2(v4i32, __VA_ARGS__)
278
279 #define LD_W3(RTYPE, psrc, stride, out0, out1, out2) do { \
280 LD_W2(RTYPE, psrc, stride, out0, out1); \
281 out2 = LD_W(RTYPE, psrc + 2 * stride); \
282 } while (0)
283 #define LD_UW3(...) LD_W3(v4u32, __VA_ARGS__)
284 #define LD_SW3(...) LD_W3(v4i32, __VA_ARGS__)
285
286 #define LD_W4(RTYPE, psrc, stride, out0, out1, out2, out3) do { \
287 LD_W2(RTYPE, psrc, stride, out0, out1); \
288 LD_W2(RTYPE, psrc + 2 * stride, stride, out2, out3); \
289 } while (0)
290 #define LD_UW4(...) LD_W4(v4u32, __VA_ARGS__)
291 #define LD_SW4(...) LD_W4(v4i32, __VA_ARGS__)
292
293 /* Description : Store vectors of 16 byte elements with stride
294 * Arguments : Inputs - in0, in1, pdst, stride
295 * Details : Store 16 byte elements from 'in0' to (pdst)
296 * Store 16 byte elements from 'in1' to (pdst + stride)
297 */
298 #define ST_B2(RTYPE, in0, in1, pdst, stride) do { \
299 ST_B(RTYPE, in0, pdst); \
300 ST_B(RTYPE, in1, pdst + stride); \
301 } while (0)
302 #define ST_UB2(...) ST_B2(v16u8, __VA_ARGS__)
303 #define ST_SB2(...) ST_B2(v16i8, __VA_ARGS__)
304
305 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) do { \
306 ST_B2(RTYPE, in0, in1, pdst, stride); \
307 ST_B2(RTYPE, in2, in3, pdst + 2 * stride, stride); \
308 } while (0)
309 #define ST_UB4(...) ST_B4(v16u8, __VA_ARGS__)
310 #define ST_SB4(...) ST_B4(v16i8, __VA_ARGS__)
311
312 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
313 pdst, stride) do { \
314 ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \
315 ST_B4(RTYPE, in4, in5, in6, in7, pdst + 4 * stride, stride); \
316 } while (0)
317 #define ST_UB8(...) ST_B8(v16u8, __VA_ARGS__)
318
319 /* Description : Store vectors of 4 word elements with stride
320 * Arguments : Inputs - in0, in1, in2, in3, pdst, stride
321 * Details : Store 4 word elements from 'in0' to (pdst + 0 * stride)
322 * Store 4 word elements from 'in1' to (pdst + 1 * stride)
323 * Store 4 word elements from 'in2' to (pdst + 2 * stride)
324 * Store 4 word elements from 'in3' to (pdst + 3 * stride)
325 */
326 #define ST_W2(RTYPE, in0, in1, pdst, stride) do { \
327 ST_W(RTYPE, in0, pdst); \
328 ST_W(RTYPE, in1, pdst + stride); \
329 } while (0)
330 #define ST_UW2(...) ST_W2(v4u32, __VA_ARGS__)
331 #define ST_SW2(...) ST_W2(v4i32, __VA_ARGS__)
332
333 #define ST_W3(RTYPE, in0, in1, in2, pdst, stride) do { \
334 ST_W2(RTYPE, in0, in1, pdst, stride); \
335 ST_W(RTYPE, in2, pdst + 2 * stride); \
336 } while (0)
337 #define ST_UW3(...) ST_W3(v4u32, __VA_ARGS__)
338 #define ST_SW3(...) ST_W3(v4i32, __VA_ARGS__)
339
340 #define ST_W4(RTYPE, in0, in1, in2, in3, pdst, stride) do { \
341 ST_W2(RTYPE, in0, in1, pdst, stride); \
342 ST_W2(RTYPE, in2, in3, pdst + 2 * stride, stride); \
343 } while (0)
344 #define ST_UW4(...) ST_W4(v4u32, __VA_ARGS__)
345 #define ST_SW4(...) ST_W4(v4i32, __VA_ARGS__)
346
347 /* Description : Store vectors of 8 halfword elements with stride
348 * Arguments : Inputs - in0, in1, pdst, stride
349 * Details : Store 8 halfword elements from 'in0' to (pdst)
350 * Store 8 halfword elements from 'in1' to (pdst + stride)
351 */
352 #define ST_H2(RTYPE, in0, in1, pdst, stride) do { \
353 ST_H(RTYPE, in0, pdst); \
354 ST_H(RTYPE, in1, pdst + stride); \
355 } while (0)
356 #define ST_UH2(...) ST_H2(v8u16, __VA_ARGS__)
357 #define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__)
358
359 /* Description : Store 2x4 byte block to destination memory from input vector
360 * Arguments : Inputs - in, stidx, pdst, stride
361 * Details : Index 'stidx' halfword element from 'in' vector is copied to
362 * the GP register and stored to (pdst)
363 * Index 'stidx+1' halfword element from 'in' vector is copied to
364 * the GP register and stored to (pdst + stride)
365 * Index 'stidx+2' halfword element from 'in' vector is copied to
366 * the GP register and stored to (pdst + 2 * stride)
367 * Index 'stidx+3' halfword element from 'in' vector is copied to
368 * the GP register and stored to (pdst + 3 * stride)
369 */
370 #define ST2x4_UB(in, stidx, pdst, stride) do { \
371 uint8_t* pblk_2x4_m = (uint8_t*)pdst; \
372 const uint16_t out0_m = __msa_copy_s_h((v8i16)in, stidx); \
373 const uint16_t out1_m = __msa_copy_s_h((v8i16)in, stidx + 1); \
374 const uint16_t out2_m = __msa_copy_s_h((v8i16)in, stidx + 2); \
375 const uint16_t out3_m = __msa_copy_s_h((v8i16)in, stidx + 3); \
376 SH(out0_m, pblk_2x4_m); \
377 pblk_2x4_m += stride; \
378 SH(out1_m, pblk_2x4_m); \
379 pblk_2x4_m += stride; \
380 SH(out2_m, pblk_2x4_m); \
381 pblk_2x4_m += stride; \
382 SH(out3_m, pblk_2x4_m); \
383 } while (0)
384
385 /* Description : Store 4x4 byte block to destination memory from input vector
386 * Arguments : Inputs - in0, in1, pdst, stride
387 * Details : 'Idx0' word element from input vector 'in0' is copied to the
388 * GP register and stored to (pdst)
389 * 'Idx1' word element from input vector 'in0' is copied to the
390 * GP register and stored to (pdst + stride)
391 * 'Idx2' word element from input vector 'in0' is copied to the
392 * GP register and stored to (pdst + 2 * stride)
393 * 'Idx3' word element from input vector 'in0' is copied to the
394 * GP register and stored to (pdst + 3 * stride)
395 */
396 #define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) do { \
397 uint8_t* const pblk_4x4_m = (uint8_t*)pdst; \
398 const uint32_t out0_m = __msa_copy_s_w((v4i32)in0, idx0); \
399 const uint32_t out1_m = __msa_copy_s_w((v4i32)in0, idx1); \
400 const uint32_t out2_m = __msa_copy_s_w((v4i32)in1, idx2); \
401 const uint32_t out3_m = __msa_copy_s_w((v4i32)in1, idx3); \
402 SW4(out0_m, out1_m, out2_m, out3_m, pblk_4x4_m, stride); \
403 } while (0)
404
405 #define ST4x8_UB(in0, in1, pdst, stride) do { \
406 uint8_t* const pblk_4x8 = (uint8_t*)pdst; \
407 ST4x4_UB(in0, in0, 0, 1, 2, 3, pblk_4x8, stride); \
408 ST4x4_UB(in1, in1, 0, 1, 2, 3, pblk_4x8 + 4 * stride, stride); \
409 } while (0)
410
411 /* Description : Immediate number of elements to slide
412 * Arguments : Inputs - in0, in1, slide_val
413 * Outputs - out
414 * Return Type - as per RTYPE
415 * Details : Byte elements from 'in1' vector are slid into 'in0' by
416 * value specified in the 'slide_val'
417 */
418 #define SLDI_B(RTYPE, in0, in1, slide_val) \
419 (RTYPE)__msa_sldi_b((v16i8)in0, (v16i8)in1, slide_val) \
420
421 #define SLDI_UB(...) SLDI_B(v16u8, __VA_ARGS__)
422 #define SLDI_SB(...) SLDI_B(v16i8, __VA_ARGS__)
423 #define SLDI_SH(...) SLDI_B(v8i16, __VA_ARGS__)
424
425 /* Description : Shuffle byte vector elements as per mask vector
426 * Arguments : Inputs - in0, in1, in2, in3, mask0, mask1
427 * Outputs - out0, out1
428 * Return Type - as per RTYPE
429 * Details : Byte elements from 'in0' & 'in1' are copied selectively to
430 * 'out0' as per control vector 'mask0'
431 */
432 #define VSHF_B(RTYPE, in0, in1, mask) \
433 (RTYPE)__msa_vshf_b((v16i8)mask, (v16i8)in1, (v16i8)in0)
434
435 #define VSHF_UB(...) VSHF_B(v16u8, __VA_ARGS__)
436 #define VSHF_SB(...) VSHF_B(v16i8, __VA_ARGS__)
437 #define VSHF_UH(...) VSHF_B(v8u16, __VA_ARGS__)
438 #define VSHF_SH(...) VSHF_B(v8i16, __VA_ARGS__)
439
440 #define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) do { \
441 out0 = VSHF_B(RTYPE, in0, in1, mask0); \
442 out1 = VSHF_B(RTYPE, in2, in3, mask1); \
443 } while (0)
444 #define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__)
445 #define VSHF_B2_SB(...) VSHF_B2(v16i8, __VA_ARGS__)
446 #define VSHF_B2_UH(...) VSHF_B2(v8u16, __VA_ARGS__)
447 #define VSHF_B2_SH(...) VSHF_B2(v8i16, __VA_ARGS__)
448
449 /* Description : Shuffle halfword vector elements as per mask vector
450 * Arguments : Inputs - in0, in1, in2, in3, mask0, mask1
451 * Outputs - out0, out1
452 * Return Type - as per RTYPE
453 * Details : halfword elements from 'in0' & 'in1' are copied selectively to
454 * 'out0' as per control vector 'mask0'
455 */
456 #define VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) do { \
457 out0 = (RTYPE)__msa_vshf_h((v8i16)mask0, (v8i16)in1, (v8i16)in0); \
458 out1 = (RTYPE)__msa_vshf_h((v8i16)mask1, (v8i16)in3, (v8i16)in2); \
459 } while (0)
460 #define VSHF_H2_UH(...) VSHF_H2(v8u16, __VA_ARGS__)
461 #define VSHF_H2_SH(...) VSHF_H2(v8i16, __VA_ARGS__)
462
463 /* Description : Dot product of byte vector elements
464 * Arguments : Inputs - mult0, mult1, cnst0, cnst1
465 * Outputs - out0, out1
466 * Return Type - as per RTYPE
467 * Details : Signed byte elements from 'mult0' are multiplied with
468 * signed byte elements from 'cnst0' producing a result
469 * twice the size of input i.e. signed halfword.
470 * The multiplication result of adjacent odd-even elements
471 * are added together and written to the 'out0' vector
472 */
473 #define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) do { \
474 out0 = (RTYPE)__msa_dotp_s_h((v16i8)mult0, (v16i8)cnst0); \
475 out1 = (RTYPE)__msa_dotp_s_h((v16i8)mult1, (v16i8)cnst1); \
476 } while (0)
477 #define DOTP_SB2_SH(...) DOTP_SB2(v8i16, __VA_ARGS__)
478
479 /* Description : Dot product of halfword vector elements
480 * Arguments : Inputs - mult0, mult1, cnst0, cnst1
481 * Outputs - out0, out1
482 * Return Type - as per RTYPE
483 * Details : Signed halfword elements from 'mult0' are multiplied with
484 * signed halfword elements from 'cnst0' producing a result
485 * twice the size of input i.e. signed word.
486 * The multiplication result of adjacent odd-even elements
487 * are added together and written to the 'out0' vector
488 */
489 #define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) do { \
490 out0 = (RTYPE)__msa_dotp_s_w((v8i16)mult0, (v8i16)cnst0); \
491 out1 = (RTYPE)__msa_dotp_s_w((v8i16)mult1, (v8i16)cnst1); \
492 } while (0)
493 #define DOTP_SH2_SW(...) DOTP_SH2(v4i32, __VA_ARGS__)
494
495 /* Description : Dot product of unsigned word vector elements
496 * Arguments : Inputs - mult0, mult1, cnst0, cnst1
497 * Outputs - out0, out1
498 * Return Type - as per RTYPE
499 * Details : Unsigned word elements from 'mult0' are multiplied with
500 * unsigned word elements from 'cnst0' producing a result
501 * twice the size of input i.e. unsigned double word.
502 * The multiplication result of adjacent odd-even elements
503 * are added together and written to the 'out0' vector
504 */
505 #define DOTP_UW2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) do { \
506 out0 = (RTYPE)__msa_dotp_u_d((v4u32)mult0, (v4u32)cnst0); \
507 out1 = (RTYPE)__msa_dotp_u_d((v4u32)mult1, (v4u32)cnst1); \
508 } while (0)
509 #define DOTP_UW2_UD(...) DOTP_UW2(v2u64, __VA_ARGS__)
510
511 /* Description : Dot product & addition of halfword vector elements
512 * Arguments : Inputs - mult0, mult1, cnst0, cnst1
513 * Outputs - out0, out1
514 * Return Type - as per RTYPE
515 * Details : Signed halfword elements from 'mult0' are multiplied with
516 * signed halfword elements from 'cnst0' producing a result
517 * twice the size of input i.e. signed word.
518 * The multiplication result of adjacent odd-even elements
519 * are added to the 'out0' vector
520 */
521 #define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) do { \
522 out0 = (RTYPE)__msa_dpadd_s_w((v4i32)out0, (v8i16)mult0, (v8i16)cnst0); \
523 out1 = (RTYPE)__msa_dpadd_s_w((v4i32)out1, (v8i16)mult1, (v8i16)cnst1); \
524 } while (0)
525 #define DPADD_SH2_SW(...) DPADD_SH2(v4i32, __VA_ARGS__)
526
527 /* Description : Clips all signed halfword elements of input vector
528 * between 0 & 255
529 * Arguments : Input/output - val
530 * Return Type - signed halfword
531 */
532 #define CLIP_SH_0_255(val) do { \
533 const v8i16 max_m = __msa_ldi_h(255); \
534 val = __msa_maxi_s_h((v8i16)val, 0); \
535 val = __msa_min_s_h(max_m, (v8i16)val); \
536 } while (0)
537
538 #define CLIP_SH2_0_255(in0, in1) do { \
539 CLIP_SH_0_255(in0); \
540 CLIP_SH_0_255(in1); \
541 } while (0)
542
543 #define CLIP_SH4_0_255(in0, in1, in2, in3) do { \
544 CLIP_SH2_0_255(in0, in1); \
545 CLIP_SH2_0_255(in2, in3); \
546 } while (0)
547
548 /* Description : Clips all unsigned halfword elements of input vector
549 * between 0 & 255
550 * Arguments : Input - in
551 * Output - out_m
552 * Return Type - unsigned halfword
553 */
554 #define CLIP_UH_0_255(in) do { \
555 const v8u16 max_m = (v8u16)__msa_ldi_h(255); \
556 in = __msa_maxi_u_h((v8u16) in, 0); \
557 in = __msa_min_u_h((v8u16) max_m, (v8u16) in); \
558 } while (0)
559
560 #define CLIP_UH2_0_255(in0, in1) do { \
561 CLIP_UH_0_255(in0); \
562 CLIP_UH_0_255(in1); \
563 } while (0)
564
565 /* Description : Clips all signed word elements of input vector
566 * between 0 & 255
567 * Arguments : Input/output - val
568 * Return Type - signed word
569 */
570 #define CLIP_SW_0_255(val) do { \
571 const v4i32 max_m = __msa_ldi_w(255); \
572 val = __msa_maxi_s_w((v4i32)val, 0); \
573 val = __msa_min_s_w(max_m, (v4i32)val); \
574 } while (0)
575
576 #define CLIP_SW4_0_255(in0, in1, in2, in3) do { \
577 CLIP_SW_0_255(in0); \
578 CLIP_SW_0_255(in1); \
579 CLIP_SW_0_255(in2); \
580 CLIP_SW_0_255(in3); \
581 } while (0)
582
583 /* Description : Horizontal addition of 4 signed word elements of input vector
584 * Arguments : Input - in (signed word vector)
585 * Output - sum_m (i32 sum)
586 * Return Type - signed word (GP)
587 * Details : 4 signed word elements of 'in' vector are added together and
588 * the resulting integer sum is returned
589 */
func_hadd_sw_s32(v4i32 in)590 static WEBP_INLINE int32_t func_hadd_sw_s32(v4i32 in) {
591 const v2i64 res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in);
592 const v2i64 res1_m = __msa_splati_d(res0_m, 1);
593 const v2i64 out = res0_m + res1_m;
594 int32_t sum_m = __msa_copy_s_w((v4i32)out, 0);
595 return sum_m;
596 }
597 #define HADD_SW_S32(in) func_hadd_sw_s32(in)
598
599 /* Description : Horizontal addition of 8 signed halfword elements
600 * Arguments : Input - in (signed halfword vector)
601 * Output - sum_m (s32 sum)
602 * Return Type - signed word
603 * Details : 8 signed halfword elements of input vector are added
604 * together and the resulting integer sum is returned
605 */
func_hadd_sh_s32(v8i16 in)606 static WEBP_INLINE int32_t func_hadd_sh_s32(v8i16 in) {
607 const v4i32 res = __msa_hadd_s_w(in, in);
608 const v2i64 res0 = __msa_hadd_s_d(res, res);
609 const v2i64 res1 = __msa_splati_d(res0, 1);
610 const v2i64 res2 = res0 + res1;
611 const int32_t sum_m = __msa_copy_s_w((v4i32)res2, 0);
612 return sum_m;
613 }
614 #define HADD_SH_S32(in) func_hadd_sh_s32(in)
615
616 /* Description : Horizontal addition of 8 unsigned halfword elements
617 * Arguments : Input - in (unsigned halfword vector)
618 * Output - sum_m (u32 sum)
619 * Return Type - unsigned word
620 * Details : 8 unsigned halfword elements of input vector are added
621 * together and the resulting integer sum is returned
622 */
func_hadd_uh_u32(v8u16 in)623 static WEBP_INLINE uint32_t func_hadd_uh_u32(v8u16 in) {
624 uint32_t sum_m;
625 const v4u32 res_m = __msa_hadd_u_w(in, in);
626 v2u64 res0_m = __msa_hadd_u_d(res_m, res_m);
627 v2u64 res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1);
628 res0_m = res0_m + res1_m;
629 sum_m = __msa_copy_s_w((v4i32)res0_m, 0);
630 return sum_m;
631 }
632 #define HADD_UH_U32(in) func_hadd_uh_u32(in)
633
634 /* Description : Horizontal addition of signed half word vector elements
635 Arguments : Inputs - in0, in1
636 Outputs - out0, out1
637 Return Type - as per RTYPE
638 Details : Each signed odd half word element from 'in0' is added to
639 even signed half word element from 'in0' (pairwise) and the
640 halfword result is written in 'out0'
641 */
642 #define HADD_SH2(RTYPE, in0, in1, out0, out1) do { \
643 out0 = (RTYPE)__msa_hadd_s_w((v8i16)in0, (v8i16)in0); \
644 out1 = (RTYPE)__msa_hadd_s_w((v8i16)in1, (v8i16)in1); \
645 } while (0)
646 #define HADD_SH2_SW(...) HADD_SH2(v4i32, __VA_ARGS__)
647
648 #define HADD_SH4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) do { \
649 HADD_SH2(RTYPE, in0, in1, out0, out1); \
650 HADD_SH2(RTYPE, in2, in3, out2, out3); \
651 } while (0)
652 #define HADD_SH4_SW(...) HADD_SH4(v4i32, __VA_ARGS__)
653
654 /* Description : Horizontal subtraction of unsigned byte vector elements
655 * Arguments : Inputs - in0, in1
656 * Outputs - out0, out1
657 * Return Type - as per RTYPE
658 * Details : Each unsigned odd byte element from 'in0' is subtracted from
659 * even unsigned byte element from 'in0' (pairwise) and the
660 * halfword result is written to 'out0'
661 */
662 #define HSUB_UB2(RTYPE, in0, in1, out0, out1) do { \
663 out0 = (RTYPE)__msa_hsub_u_h((v16u8)in0, (v16u8)in0); \
664 out1 = (RTYPE)__msa_hsub_u_h((v16u8)in1, (v16u8)in1); \
665 } while (0)
666 #define HSUB_UB2_UH(...) HSUB_UB2(v8u16, __VA_ARGS__)
667 #define HSUB_UB2_SH(...) HSUB_UB2(v8i16, __VA_ARGS__)
668 #define HSUB_UB2_SW(...) HSUB_UB2(v4i32, __VA_ARGS__)
669
670 /* Description : Set element n input vector to GPR value
671 * Arguments : Inputs - in0, in1, in2, in3
672 * Output - out
673 * Return Type - as per RTYPE
674 * Details : Set element 0 in vector 'out' to value specified in 'in0'
675 */
676 #define INSERT_W2(RTYPE, in0, in1, out) do { \
677 out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \
678 out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \
679 } while (0)
680 #define INSERT_W2_UB(...) INSERT_W2(v16u8, __VA_ARGS__)
681 #define INSERT_W2_SB(...) INSERT_W2(v16i8, __VA_ARGS__)
682
683 #define INSERT_W4(RTYPE, in0, in1, in2, in3, out) do { \
684 out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \
685 out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \
686 out = (RTYPE)__msa_insert_w((v4i32)out, 2, in2); \
687 out = (RTYPE)__msa_insert_w((v4i32)out, 3, in3); \
688 } while (0)
689 #define INSERT_W4_UB(...) INSERT_W4(v16u8, __VA_ARGS__)
690 #define INSERT_W4_SB(...) INSERT_W4(v16i8, __VA_ARGS__)
691 #define INSERT_W4_SW(...) INSERT_W4(v4i32, __VA_ARGS__)
692
693 /* Description : Set element n of double word input vector to GPR value
694 * Arguments : Inputs - in0, in1
695 * Output - out
696 * Return Type - as per RTYPE
697 * Details : Set element 0 in vector 'out' to GPR value specified in 'in0'
698 * Set element 1 in vector 'out' to GPR value specified in 'in1'
699 */
700 #define INSERT_D2(RTYPE, in0, in1, out) do { \
701 out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0); \
702 out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1); \
703 } while (0)
704 #define INSERT_D2_UB(...) INSERT_D2(v16u8, __VA_ARGS__)
705 #define INSERT_D2_SB(...) INSERT_D2(v16i8, __VA_ARGS__)
706
707 /* Description : Interleave even byte elements from vectors
708 * Arguments : Inputs - in0, in1, in2, in3
709 * Outputs - out0, out1
710 * Return Type - as per RTYPE
711 * Details : Even byte elements of 'in0' and 'in1' are interleaved
712 * and written to 'out0'
713 */
714 #define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
715 out0 = (RTYPE)__msa_ilvev_b((v16i8)in1, (v16i8)in0); \
716 out1 = (RTYPE)__msa_ilvev_b((v16i8)in3, (v16i8)in2); \
717 } while (0)
718 #define ILVEV_B2_UB(...) ILVEV_B2(v16u8, __VA_ARGS__)
719 #define ILVEV_B2_SB(...) ILVEV_B2(v16i8, __VA_ARGS__)
720 #define ILVEV_B2_UH(...) ILVEV_B2(v8u16, __VA_ARGS__)
721 #define ILVEV_B2_SH(...) ILVEV_B2(v8i16, __VA_ARGS__)
722 #define ILVEV_B2_SD(...) ILVEV_B2(v2i64, __VA_ARGS__)
723
724 /* Description : Interleave odd byte elements from vectors
725 * Arguments : Inputs - in0, in1, in2, in3
726 * Outputs - out0, out1
727 * Return Type - as per RTYPE
728 * Details : Odd byte elements of 'in0' and 'in1' are interleaved
729 * and written to 'out0'
730 */
731 #define ILVOD_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
732 out0 = (RTYPE)__msa_ilvod_b((v16i8)in1, (v16i8)in0); \
733 out1 = (RTYPE)__msa_ilvod_b((v16i8)in3, (v16i8)in2); \
734 } while (0)
735 #define ILVOD_B2_UB(...) ILVOD_B2(v16u8, __VA_ARGS__)
736 #define ILVOD_B2_SB(...) ILVOD_B2(v16i8, __VA_ARGS__)
737 #define ILVOD_B2_UH(...) ILVOD_B2(v8u16, __VA_ARGS__)
738 #define ILVOD_B2_SH(...) ILVOD_B2(v8i16, __VA_ARGS__)
739 #define ILVOD_B2_SD(...) ILVOD_B2(v2i64, __VA_ARGS__)
740
741 /* Description : Interleave even halfword elements from vectors
742 * Arguments : Inputs - in0, in1, in2, in3
743 * Outputs - out0, out1
744 * Return Type - as per RTYPE
745 * Details : Even halfword elements of 'in0' and 'in1' are interleaved
746 * and written to 'out0'
747 */
748 #define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
749 out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0); \
750 out1 = (RTYPE)__msa_ilvev_h((v8i16)in3, (v8i16)in2); \
751 } while (0)
752 #define ILVEV_H2_UB(...) ILVEV_H2(v16u8, __VA_ARGS__)
753 #define ILVEV_H2_UH(...) ILVEV_H2(v8u16, __VA_ARGS__)
754 #define ILVEV_H2_SH(...) ILVEV_H2(v8i16, __VA_ARGS__)
755 #define ILVEV_H2_SW(...) ILVEV_H2(v4i32, __VA_ARGS__)
756
757 /* Description : Interleave odd halfword elements from vectors
758 * Arguments : Inputs - in0, in1, in2, in3
759 * Outputs - out0, out1
760 * Return Type - as per RTYPE
761 * Details : Odd halfword elements of 'in0' and 'in1' are interleaved
762 * and written to 'out0'
763 */
764 #define ILVOD_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
765 out0 = (RTYPE)__msa_ilvod_h((v8i16)in1, (v8i16)in0); \
766 out1 = (RTYPE)__msa_ilvod_h((v8i16)in3, (v8i16)in2); \
767 } while (0)
768 #define ILVOD_H2_UB(...) ILVOD_H2(v16u8, __VA_ARGS__)
769 #define ILVOD_H2_UH(...) ILVOD_H2(v8u16, __VA_ARGS__)
770 #define ILVOD_H2_SH(...) ILVOD_H2(v8i16, __VA_ARGS__)
771 #define ILVOD_H2_SW(...) ILVOD_H2(v4i32, __VA_ARGS__)
772
773 /* Description : Interleave even word elements from vectors
774 * Arguments : Inputs - in0, in1, in2, in3
775 * Outputs - out0, out1
776 * Return Type - as per RTYPE
777 * Details : Even word elements of 'in0' and 'in1' are interleaved
778 * and written to 'out0'
779 */
780 #define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
781 out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \
782 out1 = (RTYPE)__msa_ilvev_w((v4i32)in3, (v4i32)in2); \
783 } while (0)
784 #define ILVEV_W2_UB(...) ILVEV_W2(v16u8, __VA_ARGS__)
785 #define ILVEV_W2_SB(...) ILVEV_W2(v16i8, __VA_ARGS__)
786 #define ILVEV_W2_UH(...) ILVEV_W2(v8u16, __VA_ARGS__)
787 #define ILVEV_W2_SD(...) ILVEV_W2(v2i64, __VA_ARGS__)
788
789 /* Description : Interleave even-odd word elements from vectors
790 * Arguments : Inputs - in0, in1, in2, in3
791 * Outputs - out0, out1
792 * Return Type - as per RTYPE
793 * Details : Even word elements of 'in0' and 'in1' are interleaved
794 * and written to 'out0'
795 * Odd word elements of 'in2' and 'in3' are interleaved
796 * and written to 'out1'
797 */
798 #define ILVEVOD_W2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
799 out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \
800 out1 = (RTYPE)__msa_ilvod_w((v4i32)in3, (v4i32)in2); \
801 } while (0)
802 #define ILVEVOD_W2_UB(...) ILVEVOD_W2(v16u8, __VA_ARGS__)
803 #define ILVEVOD_W2_UH(...) ILVEVOD_W2(v8u16, __VA_ARGS__)
804 #define ILVEVOD_W2_SH(...) ILVEVOD_W2(v8i16, __VA_ARGS__)
805 #define ILVEVOD_W2_SW(...) ILVEVOD_W2(v4i32, __VA_ARGS__)
806
807 /* Description : Interleave even-odd half-word elements from vectors
808 * Arguments : Inputs - in0, in1, in2, in3
809 * Outputs - out0, out1
810 * Return Type - as per RTYPE
811 * Details : Even half-word elements of 'in0' and 'in1' are interleaved
812 * and written to 'out0'
813 * Odd half-word elements of 'in2' and 'in3' are interleaved
814 * and written to 'out1'
815 */
816 #define ILVEVOD_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
817 out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0); \
818 out1 = (RTYPE)__msa_ilvod_h((v8i16)in3, (v8i16)in2); \
819 } while (0)
820 #define ILVEVOD_H2_UB(...) ILVEVOD_H2(v16u8, __VA_ARGS__)
821 #define ILVEVOD_H2_UH(...) ILVEVOD_H2(v8u16, __VA_ARGS__)
822 #define ILVEVOD_H2_SH(...) ILVEVOD_H2(v8i16, __VA_ARGS__)
823 #define ILVEVOD_H2_SW(...) ILVEVOD_H2(v4i32, __VA_ARGS__)
824
825 /* Description : Interleave even double word elements from vectors
826 * Arguments : Inputs - in0, in1, in2, in3
827 * Outputs - out0, out1
828 * Return Type - as per RTYPE
829 * Details : Even double word elements of 'in0' and 'in1' are interleaved
830 * and written to 'out0'
831 */
832 #define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
833 out0 = (RTYPE)__msa_ilvev_d((v2i64)in1, (v2i64)in0); \
834 out1 = (RTYPE)__msa_ilvev_d((v2i64)in3, (v2i64)in2); \
835 } while (0)
836 #define ILVEV_D2_UB(...) ILVEV_D2(v16u8, __VA_ARGS__)
837 #define ILVEV_D2_SB(...) ILVEV_D2(v16i8, __VA_ARGS__)
838 #define ILVEV_D2_SW(...) ILVEV_D2(v4i32, __VA_ARGS__)
839 #define ILVEV_D2_SD(...) ILVEV_D2(v2i64, __VA_ARGS__)
840
841 /* Description : Interleave left half of byte elements from vectors
842 * Arguments : Inputs - in0, in1, in2, in3
843 * Outputs - out0, out1
844 * Return Type - as per RTYPE
845 * Details : Left half of byte elements of 'in0' and 'in1' are interleaved
846 * and written to 'out0'.
847 */
848 #define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
849 out0 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \
850 out1 = (RTYPE)__msa_ilvl_b((v16i8)in2, (v16i8)in3); \
851 } while (0)
852 #define ILVL_B2_UB(...) ILVL_B2(v16u8, __VA_ARGS__)
853 #define ILVL_B2_SB(...) ILVL_B2(v16i8, __VA_ARGS__)
854 #define ILVL_B2_UH(...) ILVL_B2(v8u16, __VA_ARGS__)
855 #define ILVL_B2_SH(...) ILVL_B2(v8i16, __VA_ARGS__)
856 #define ILVL_B2_SW(...) ILVL_B2(v4i32, __VA_ARGS__)
857
858 /* Description : Interleave right half of byte elements from vectors
859 * Arguments : Inputs - in0, in1, in2, in3
860 * Outputs - out0, out1
861 * Return Type - as per RTYPE
862 * Details : Right half of byte elements of 'in0' and 'in1' are interleaved
863 * and written to out0.
864 */
865 #define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
866 out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
867 out1 = (RTYPE)__msa_ilvr_b((v16i8)in2, (v16i8)in3); \
868 } while (0)
869 #define ILVR_B2_UB(...) ILVR_B2(v16u8, __VA_ARGS__)
870 #define ILVR_B2_SB(...) ILVR_B2(v16i8, __VA_ARGS__)
871 #define ILVR_B2_UH(...) ILVR_B2(v8u16, __VA_ARGS__)
872 #define ILVR_B2_SH(...) ILVR_B2(v8i16, __VA_ARGS__)
873 #define ILVR_B2_SW(...) ILVR_B2(v4i32, __VA_ARGS__)
874
875 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
876 out0, out1, out2, out3) do { \
877 ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
878 ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
879 } while (0)
880 #define ILVR_B4_UB(...) ILVR_B4(v16u8, __VA_ARGS__)
881 #define ILVR_B4_SB(...) ILVR_B4(v16i8, __VA_ARGS__)
882 #define ILVR_B4_UH(...) ILVR_B4(v8u16, __VA_ARGS__)
883 #define ILVR_B4_SH(...) ILVR_B4(v8i16, __VA_ARGS__)
884 #define ILVR_B4_SW(...) ILVR_B4(v4i32, __VA_ARGS__)
885
886 /* Description : Interleave right half of halfword elements from vectors
887 * Arguments : Inputs - in0, in1, in2, in3
888 * Outputs - out0, out1
889 * Return Type - as per RTYPE
890 * Details : Right half of halfword elements of 'in0' and 'in1' are
891 * interleaved and written to 'out0'.
892 */
893 #define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
894 out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \
895 out1 = (RTYPE)__msa_ilvr_h((v8i16)in2, (v8i16)in3); \
896 } while (0)
897 #define ILVR_H2_UB(...) ILVR_H2(v16u8, __VA_ARGS__)
898 #define ILVR_H2_SH(...) ILVR_H2(v8i16, __VA_ARGS__)
899 #define ILVR_H2_SW(...) ILVR_H2(v4i32, __VA_ARGS__)
900
901 #define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
902 out0, out1, out2, out3) do { \
903 ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
904 ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
905 } while (0)
906 #define ILVR_H4_UB(...) ILVR_H4(v16u8, __VA_ARGS__)
907 #define ILVR_H4_SH(...) ILVR_H4(v8i16, __VA_ARGS__)
908 #define ILVR_H4_SW(...) ILVR_H4(v4i32, __VA_ARGS__)
909
910 /* Description : Interleave right half of double word elements from vectors
911 * Arguments : Inputs - in0, in1, in2, in3
912 * Outputs - out0, out1
913 * Return Type - as per RTYPE
914 * Details : Right half of double word elements of 'in0' and 'in1' are
915 * interleaved and written to 'out0'.
916 */
917 #define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
918 out0 = (RTYPE)__msa_ilvr_d((v2i64)in0, (v2i64)in1); \
919 out1 = (RTYPE)__msa_ilvr_d((v2i64)in2, (v2i64)in3); \
920 } while (0)
921 #define ILVR_D2_UB(...) ILVR_D2(v16u8, __VA_ARGS__)
922 #define ILVR_D2_SB(...) ILVR_D2(v16i8, __VA_ARGS__)
923 #define ILVR_D2_SH(...) ILVR_D2(v8i16, __VA_ARGS__)
924
925 #define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
926 out0, out1, out2, out3) do { \
927 ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
928 ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
929 } while (0)
930 #define ILVR_D4_SB(...) ILVR_D4(v16i8, __VA_ARGS__)
931 #define ILVR_D4_UB(...) ILVR_D4(v16u8, __VA_ARGS__)
932
933 /* Description : Interleave both left and right half of input vectors
934 * Arguments : Inputs - in0, in1
935 * Outputs - out0, out1
936 * Return Type - as per RTYPE
937 * Details : Right half of byte elements from 'in0' and 'in1' are
938 * interleaved and written to 'out0'
939 */
940 #define ILVRL_B2(RTYPE, in0, in1, out0, out1) do { \
941 out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
942 out1 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \
943 } while (0)
944 #define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__)
945 #define ILVRL_B2_SB(...) ILVRL_B2(v16i8, __VA_ARGS__)
946 #define ILVRL_B2_UH(...) ILVRL_B2(v8u16, __VA_ARGS__)
947 #define ILVRL_B2_SH(...) ILVRL_B2(v8i16, __VA_ARGS__)
948 #define ILVRL_B2_SW(...) ILVRL_B2(v4i32, __VA_ARGS__)
949
950 #define ILVRL_H2(RTYPE, in0, in1, out0, out1) do { \
951 out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \
952 out1 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \
953 } while (0)
954 #define ILVRL_H2_UB(...) ILVRL_H2(v16u8, __VA_ARGS__)
955 #define ILVRL_H2_SB(...) ILVRL_H2(v16i8, __VA_ARGS__)
956 #define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__)
957 #define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__)
958 #define ILVRL_H2_UW(...) ILVRL_H2(v4u32, __VA_ARGS__)
959
960 #define ILVRL_W2(RTYPE, in0, in1, out0, out1) do { \
961 out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \
962 out1 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \
963 } while (0)
964 #define ILVRL_W2_UB(...) ILVRL_W2(v16u8, __VA_ARGS__)
965 #define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__)
966 #define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__)
967 #define ILVRL_W2_UW(...) ILVRL_W2(v4u32, __VA_ARGS__)
968
969 /* Description : Pack even byte elements of vector pairs
970 * Arguments : Inputs - in0, in1, in2, in3
971 * Outputs - out0, out1
972 * Return Type - as per RTYPE
973 * Details : Even byte elements of 'in0' are copied to the left half of
974 * 'out0' & even byte elements of 'in1' are copied to the right
975 * half of 'out0'.
976 */
977 #define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
978 out0 = (RTYPE)__msa_pckev_b((v16i8)in0, (v16i8)in1); \
979 out1 = (RTYPE)__msa_pckev_b((v16i8)in2, (v16i8)in3); \
980 } while (0)
981 #define PCKEV_B2_SB(...) PCKEV_B2(v16i8, __VA_ARGS__)
982 #define PCKEV_B2_UB(...) PCKEV_B2(v16u8, __VA_ARGS__)
983 #define PCKEV_B2_SH(...) PCKEV_B2(v8i16, __VA_ARGS__)
984 #define PCKEV_B2_SW(...) PCKEV_B2(v4i32, __VA_ARGS__)
985
986 #define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
987 out0, out1, out2, out3) do { \
988 PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
989 PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
990 } while (0)
991 #define PCKEV_B4_SB(...) PCKEV_B4(v16i8, __VA_ARGS__)
992 #define PCKEV_B4_UB(...) PCKEV_B4(v16u8, __VA_ARGS__)
993 #define PCKEV_B4_SH(...) PCKEV_B4(v8i16, __VA_ARGS__)
994 #define PCKEV_B4_SW(...) PCKEV_B4(v4i32, __VA_ARGS__)
995
996 /* Description : Pack even halfword elements of vector pairs
997 * Arguments : Inputs - in0, in1, in2, in3
998 * Outputs - out0, out1
999 * Return Type - as per RTYPE
1000 * Details : Even halfword elements of 'in0' are copied to the left half of
1001 * 'out0' & even halfword elements of 'in1' are copied to the
1002 * right half of 'out0'.
1003 */
1004 #define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
1005 out0 = (RTYPE)__msa_pckev_h((v8i16)in0, (v8i16)in1); \
1006 out1 = (RTYPE)__msa_pckev_h((v8i16)in2, (v8i16)in3); \
1007 } while (0)
1008 #define PCKEV_H2_UH(...) PCKEV_H2(v8u16, __VA_ARGS__)
1009 #define PCKEV_H2_SH(...) PCKEV_H2(v8i16, __VA_ARGS__)
1010 #define PCKEV_H2_SW(...) PCKEV_H2(v4i32, __VA_ARGS__)
1011 #define PCKEV_H2_UW(...) PCKEV_H2(v4u32, __VA_ARGS__)
1012
1013 /* Description : Pack even word elements of vector pairs
1014 * Arguments : Inputs - in0, in1, in2, in3
1015 * Outputs - out0, out1
1016 * Return Type - as per RTYPE
1017 * Details : Even word elements of 'in0' are copied to the left half of
1018 * 'out0' & even word elements of 'in1' are copied to the
1019 * right half of 'out0'.
1020 */
1021 #define PCKEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
1022 out0 = (RTYPE)__msa_pckev_w((v4i32)in0, (v4i32)in1); \
1023 out1 = (RTYPE)__msa_pckev_w((v4i32)in2, (v4i32)in3); \
1024 } while (0)
1025 #define PCKEV_W2_UH(...) PCKEV_W2(v8u16, __VA_ARGS__)
1026 #define PCKEV_W2_SH(...) PCKEV_W2(v8i16, __VA_ARGS__)
1027 #define PCKEV_W2_SW(...) PCKEV_W2(v4i32, __VA_ARGS__)
1028 #define PCKEV_W2_UW(...) PCKEV_W2(v4u32, __VA_ARGS__)
1029
1030 /* Description : Pack odd halfword elements of vector pairs
1031 * Arguments : Inputs - in0, in1, in2, in3
1032 * Outputs - out0, out1
1033 * Return Type - as per RTYPE
1034 * Details : Odd halfword elements of 'in0' are copied to the left half of
1035 * 'out0' & odd halfword elements of 'in1' are copied to the
1036 * right half of 'out0'.
1037 */
1038 #define PCKOD_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
1039 out0 = (RTYPE)__msa_pckod_h((v8i16)in0, (v8i16)in1); \
1040 out1 = (RTYPE)__msa_pckod_h((v8i16)in2, (v8i16)in3); \
1041 } while (0)
1042 #define PCKOD_H2_UH(...) PCKOD_H2(v8u16, __VA_ARGS__)
1043 #define PCKOD_H2_SH(...) PCKOD_H2(v8i16, __VA_ARGS__)
1044 #define PCKOD_H2_SW(...) PCKOD_H2(v4i32, __VA_ARGS__)
1045 #define PCKOD_H2_UW(...) PCKOD_H2(v4u32, __VA_ARGS__)
1046
1047 /* Description : Arithmetic immediate shift right all elements of word vector
1048 * Arguments : Inputs - in0, in1, shift
1049 * Outputs - in place operation
1050 * Return Type - as per input vector RTYPE
1051 * Details : Each element of vector 'in0' is right shifted by 'shift' and
1052 * the result is written in-place. 'shift' is a GP variable.
1053 */
1054 #define SRAI_W2(RTYPE, in0, in1, shift_val) do { \
1055 in0 = (RTYPE)SRAI_W(in0, shift_val); \
1056 in1 = (RTYPE)SRAI_W(in1, shift_val); \
1057 } while (0)
1058 #define SRAI_W2_SW(...) SRAI_W2(v4i32, __VA_ARGS__)
1059 #define SRAI_W2_UW(...) SRAI_W2(v4u32, __VA_ARGS__)
1060
1061 #define SRAI_W4(RTYPE, in0, in1, in2, in3, shift_val) do { \
1062 SRAI_W2(RTYPE, in0, in1, shift_val); \
1063 SRAI_W2(RTYPE, in2, in3, shift_val); \
1064 } while (0)
1065 #define SRAI_W4_SW(...) SRAI_W4(v4i32, __VA_ARGS__)
1066 #define SRAI_W4_UW(...) SRAI_W4(v4u32, __VA_ARGS__)
1067
1068 /* Description : Arithmetic shift right all elements of half-word vector
1069 * Arguments : Inputs - in0, in1, shift
1070 * Outputs - in place operation
1071 * Return Type - as per input vector RTYPE
1072 * Details : Each element of vector 'in0' is right shifted by 'shift' and
1073 * the result is written in-place. 'shift' is a GP variable.
1074 */
1075 #define SRAI_H2(RTYPE, in0, in1, shift_val) do { \
1076 in0 = (RTYPE)SRAI_H(in0, shift_val); \
1077 in1 = (RTYPE)SRAI_H(in1, shift_val); \
1078 } while (0)
1079 #define SRAI_H2_SH(...) SRAI_H2(v8i16, __VA_ARGS__)
1080 #define SRAI_H2_UH(...) SRAI_H2(v8u16, __VA_ARGS__)
1081
1082 /* Description : Arithmetic rounded shift right all elements of word vector
1083 * Arguments : Inputs - in0, in1, shift
1084 * Outputs - in place operation
1085 * Return Type - as per input vector RTYPE
1086 * Details : Each element of vector 'in0' is right shifted by 'shift' and
1087 * the result is written in-place. 'shift' is a GP variable.
1088 */
1089 #define SRARI_W2(RTYPE, in0, in1, shift) do { \
1090 in0 = (RTYPE)__msa_srari_w((v4i32)in0, shift); \
1091 in1 = (RTYPE)__msa_srari_w((v4i32)in1, shift); \
1092 } while (0)
1093 #define SRARI_W2_SW(...) SRARI_W2(v4i32, __VA_ARGS__)
1094
1095 #define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) do { \
1096 SRARI_W2(RTYPE, in0, in1, shift); \
1097 SRARI_W2(RTYPE, in2, in3, shift); \
1098 } while (0)
1099 #define SRARI_W4_SH(...) SRARI_W4(v8i16, __VA_ARGS__)
1100 #define SRARI_W4_UW(...) SRARI_W4(v4u32, __VA_ARGS__)
1101 #define SRARI_W4_SW(...) SRARI_W4(v4i32, __VA_ARGS__)
1102
1103 /* Description : Shift right arithmetic rounded double words
1104 * Arguments : Inputs - in0, in1, shift
1105 * Outputs - in place operation
1106 * Return Type - as per RTYPE
1107 * Details : Each element of vector 'in0' is shifted right arithmetically by
1108 * the number of bits in the corresponding element in the vector
1109 * 'shift'. The last discarded bit is added to shifted value for
1110 * rounding and the result is written in-place.
1111 * 'shift' is a vector.
1112 */
1113 #define SRAR_D2(RTYPE, in0, in1, shift) do { \
1114 in0 = (RTYPE)__msa_srar_d((v2i64)in0, (v2i64)shift); \
1115 in1 = (RTYPE)__msa_srar_d((v2i64)in1, (v2i64)shift); \
1116 } while (0)
1117 #define SRAR_D2_SW(...) SRAR_D2(v4i32, __VA_ARGS__)
1118 #define SRAR_D2_SD(...) SRAR_D2(v2i64, __VA_ARGS__)
1119 #define SRAR_D2_UD(...) SRAR_D2(v2u64, __VA_ARGS__)
1120
1121 #define SRAR_D4(RTYPE, in0, in1, in2, in3, shift) do { \
1122 SRAR_D2(RTYPE, in0, in1, shift); \
1123 SRAR_D2(RTYPE, in2, in3, shift); \
1124 } while (0)
1125 #define SRAR_D4_SD(...) SRAR_D4(v2i64, __VA_ARGS__)
1126 #define SRAR_D4_UD(...) SRAR_D4(v2u64, __VA_ARGS__)
1127
1128 /* Description : Addition of 2 pairs of half-word vectors
1129 * Arguments : Inputs - in0, in1, in2, in3
1130 * Outputs - out0, out1
1131 * Details : Each element in 'in0' is added to 'in1' and result is written
1132 * to 'out0'.
1133 */
1134 #define ADDVI_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
1135 out0 = (RTYPE)ADDVI_H(in0, in1); \
1136 out1 = (RTYPE)ADDVI_H(in2, in3); \
1137 } while (0)
1138 #define ADDVI_H2_SH(...) ADDVI_H2(v8i16, __VA_ARGS__)
1139 #define ADDVI_H2_UH(...) ADDVI_H2(v8u16, __VA_ARGS__)
1140
1141 /* Description : Addition of 2 pairs of word vectors
1142 * Arguments : Inputs - in0, in1, in2, in3
1143 * Outputs - out0, out1
1144 * Details : Each element in 'in0' is added to 'in1' and result is written
1145 * to 'out0'.
1146 */
1147 #define ADDVI_W2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
1148 out0 = (RTYPE)ADDVI_W(in0, in1); \
1149 out1 = (RTYPE)ADDVI_W(in2, in3); \
1150 } while (0)
1151 #define ADDVI_W2_SW(...) ADDVI_W2(v4i32, __VA_ARGS__)
1152
1153 /* Description : Fill 2 pairs of word vectors with GP registers
1154 * Arguments : Inputs - in0, in1
1155 * Outputs - out0, out1
1156 * Details : GP register in0 is replicated in each word element of out0
1157 * GP register in1 is replicated in each word element of out1
1158 */
1159 #define FILL_W2(RTYPE, in0, in1, out0, out1) do { \
1160 out0 = (RTYPE)__msa_fill_w(in0); \
1161 out1 = (RTYPE)__msa_fill_w(in1); \
1162 } while (0)
1163 #define FILL_W2_SW(...) FILL_W2(v4i32, __VA_ARGS__)
1164
1165 /* Description : Addition of 2 pairs of vectors
1166 * Arguments : Inputs - in0, in1, in2, in3
1167 * Outputs - out0, out1
1168 * Details : Each element in 'in0' is added to 'in1' and result is written
1169 * to 'out0'.
1170 */
1171 #define ADD2(in0, in1, in2, in3, out0, out1) do { \
1172 out0 = in0 + in1; \
1173 out1 = in2 + in3; \
1174 } while (0)
1175
1176 #define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, \
1177 out0, out1, out2, out3) do { \
1178 ADD2(in0, in1, in2, in3, out0, out1); \
1179 ADD2(in4, in5, in6, in7, out2, out3); \
1180 } while (0)
1181
1182 /* Description : Subtraction of 2 pairs of vectors
1183 * Arguments : Inputs - in0, in1, in2, in3
1184 * Outputs - out0, out1
1185 * Details : Each element in 'in1' is subtracted from 'in0' and result is
1186 * written to 'out0'.
1187 */
1188 #define SUB2(in0, in1, in2, in3, out0, out1) do { \
1189 out0 = in0 - in1; \
1190 out1 = in2 - in3; \
1191 } while (0)
1192
1193 #define SUB3(in0, in1, in2, in3, in4, in5, out0, out1, out2) do { \
1194 out0 = in0 - in1; \
1195 out1 = in2 - in3; \
1196 out2 = in4 - in5; \
1197 } while (0)
1198
1199 #define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, \
1200 out0, out1, out2, out3) do { \
1201 out0 = in0 - in1; \
1202 out1 = in2 - in3; \
1203 out2 = in4 - in5; \
1204 out3 = in6 - in7; \
1205 } while (0)
1206
1207 /* Description : Addition - Subtraction of input vectors
1208 * Arguments : Inputs - in0, in1
1209 * Outputs - out0, out1
1210 * Details : Each element in 'in1' is added to 'in0' and result is
1211 * written to 'out0'.
1212 * Each element in 'in1' is subtracted from 'in0' and result is
1213 * written to 'out1'.
1214 */
1215 #define ADDSUB2(in0, in1, out0, out1) do { \
1216 out0 = in0 + in1; \
1217 out1 = in0 - in1; \
1218 } while (0)
1219
1220 /* Description : Multiplication of pairs of vectors
1221 * Arguments : Inputs - in0, in1, in2, in3
1222 * Outputs - out0, out1
1223 * Details : Each element from 'in0' is multiplied with elements from 'in1'
1224 * and the result is written to 'out0'
1225 */
1226 #define MUL2(in0, in1, in2, in3, out0, out1) do { \
1227 out0 = in0 * in1; \
1228 out1 = in2 * in3; \
1229 } while (0)
1230
1231 #define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, \
1232 out0, out1, out2, out3) do { \
1233 MUL2(in0, in1, in2, in3, out0, out1); \
1234 MUL2(in4, in5, in6, in7, out2, out3); \
1235 } while (0)
1236
1237 /* Description : Sign extend halfword elements from right half of the vector
1238 * Arguments : Input - in (halfword vector)
1239 * Output - out (sign extended word vector)
1240 * Return Type - signed word
1241 * Details : Sign bit of halfword elements from input vector 'in' is
1242 * extracted and interleaved with same vector 'in0' to generate
1243 * 4 word elements keeping sign intact
1244 */
1245 #define UNPCK_R_SH_SW(in, out) do { \
1246 const v8i16 sign_m = __msa_clti_s_h((v8i16)in, 0); \
1247 out = (v4i32)__msa_ilvr_h(sign_m, (v8i16)in); \
1248 } while (0)
1249
1250 /* Description : Sign extend halfword elements from input vector and return
1251 * the result in pair of vectors
1252 * Arguments : Input - in (halfword vector)
1253 * Outputs - out0, out1 (sign extended word vectors)
1254 * Return Type - signed word
1255 * Details : Sign bit of halfword elements from input vector 'in' is
1256 * extracted and interleaved right with same vector 'in0' to
1257 * generate 4 signed word elements in 'out0'
1258 * Then interleaved left with same vector 'in0' to
1259 * generate 4 signed word elements in 'out1'
1260 */
1261 #define UNPCK_SH_SW(in, out0, out1) do { \
1262 const v8i16 tmp_m = __msa_clti_s_h((v8i16)in, 0); \
1263 ILVRL_H2_SW(tmp_m, in, out0, out1); \
1264 } while (0)
1265
1266 /* Description : Butterfly of 4 input vectors
1267 * Arguments : Inputs - in0, in1, in2, in3
1268 * Outputs - out0, out1, out2, out3
1269 * Details : Butterfly operation
1270 */
1271 #define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) do { \
1272 out0 = in0 + in3; \
1273 out1 = in1 + in2; \
1274 out2 = in1 - in2; \
1275 out3 = in0 - in3; \
1276 } while (0)
1277
1278 /* Description : Transpose 16x4 block into 4x16 with byte elements in vectors
1279 * Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7,
1280 * in8, in9, in10, in11, in12, in13, in14, in15
1281 * Outputs - out0, out1, out2, out3
1282 * Return Type - unsigned byte
1283 */
1284 #define TRANSPOSE16x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
1285 in8, in9, in10, in11, in12, in13, in14, in15, \
1286 out0, out1, out2, out3) do { \
1287 v2i64 tmp0_m, tmp1_m, tmp2_m, tmp3_m, tmp4_m, tmp5_m; \
1288 ILVEV_W2_SD(in0, in4, in8, in12, tmp2_m, tmp3_m); \
1289 ILVEV_W2_SD(in1, in5, in9, in13, tmp0_m, tmp1_m); \
1290 ILVEV_D2_UB(tmp2_m, tmp3_m, tmp0_m, tmp1_m, out1, out3); \
1291 ILVEV_W2_SD(in2, in6, in10, in14, tmp4_m, tmp5_m); \
1292 ILVEV_W2_SD(in3, in7, in11, in15, tmp0_m, tmp1_m); \
1293 ILVEV_D2_SD(tmp4_m, tmp5_m, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
1294 ILVEV_B2_SD(out1, out3, tmp2_m, tmp3_m, tmp0_m, tmp1_m); \
1295 ILVEVOD_H2_UB(tmp0_m, tmp1_m, tmp0_m, tmp1_m, out0, out2); \
1296 ILVOD_B2_SD(out1, out3, tmp2_m, tmp3_m, tmp0_m, tmp1_m); \
1297 ILVEVOD_H2_UB(tmp0_m, tmp1_m, tmp0_m, tmp1_m, out1, out3); \
1298 } while (0)
1299
1300 /* Description : Transpose 16x8 block into 8x16 with byte elements in vectors
1301 * Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7,
1302 * in8, in9, in10, in11, in12, in13, in14, in15
1303 * Outputs - out0, out1, out2, out3, out4, out5, out6, out7
1304 * Return Type - unsigned byte
1305 */
1306 #define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
1307 in8, in9, in10, in11, in12, in13, in14, in15, \
1308 out0, out1, out2, out3, out4, out5, \
1309 out6, out7) do { \
1310 v8i16 tmp0_m, tmp1_m, tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
1311 v4i32 tmp2_m, tmp3_m; \
1312 ILVEV_D2_UB(in0, in8, in1, in9, out7, out6); \
1313 ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
1314 ILVEV_D2_UB(in4, in12, in5, in13, out3, out2); \
1315 ILVEV_D2_UB(in6, in14, in7, in15, out1, out0); \
1316 ILVEV_B2_SH(out7, out6, out5, out4, tmp0_m, tmp1_m); \
1317 ILVOD_B2_SH(out7, out6, out5, out4, tmp4_m, tmp5_m); \
1318 ILVEV_B2_UB(out3, out2, out1, out0, out5, out7); \
1319 ILVOD_B2_SH(out3, out2, out1, out0, tmp6_m, tmp7_m); \
1320 ILVEV_H2_SW(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \
1321 ILVEVOD_W2_UB(tmp2_m, tmp3_m, tmp2_m, tmp3_m, out0, out4); \
1322 ILVOD_H2_SW(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \
1323 ILVEVOD_W2_UB(tmp2_m, tmp3_m, tmp2_m, tmp3_m, out2, out6); \
1324 ILVEV_H2_SW(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \
1325 ILVEVOD_W2_UB(tmp2_m, tmp3_m, tmp2_m, tmp3_m, out1, out5); \
1326 ILVOD_H2_SW(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \
1327 ILVEVOD_W2_UB(tmp2_m, tmp3_m, tmp2_m, tmp3_m, out3, out7); \
1328 } while (0)
1329
1330 /* Description : Transpose 4x4 block with word elements in vectors
1331 * Arguments : Inputs - in0, in1, in2, in3
1332 * Outputs - out0, out1, out2, out3
1333 * Return Type - as per RTYPE
1334 */
1335 #define TRANSPOSE4x4_W(RTYPE, in0, in1, in2, in3, \
1336 out0, out1, out2, out3) do { \
1337 v4i32 s0_m, s1_m, s2_m, s3_m; \
1338 ILVRL_W2_SW(in1, in0, s0_m, s1_m); \
1339 ILVRL_W2_SW(in3, in2, s2_m, s3_m); \
1340 out0 = (RTYPE)__msa_ilvr_d((v2i64)s2_m, (v2i64)s0_m); \
1341 out1 = (RTYPE)__msa_ilvl_d((v2i64)s2_m, (v2i64)s0_m); \
1342 out2 = (RTYPE)__msa_ilvr_d((v2i64)s3_m, (v2i64)s1_m); \
1343 out3 = (RTYPE)__msa_ilvl_d((v2i64)s3_m, (v2i64)s1_m); \
1344 } while (0)
1345 #define TRANSPOSE4x4_SW_SW(...) TRANSPOSE4x4_W(v4i32, __VA_ARGS__)
1346
1347 /* Description : Add block 4x4
1348 * Arguments : Inputs - in0, in1, in2, in3, pdst, stride
1349 * Details : Least significant 4 bytes from each input vector are added to
1350 * the destination bytes, clipped between 0-255 and stored.
1351 */
1352 #define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride) do { \
1353 uint32_t src0_m, src1_m, src2_m, src3_m; \
1354 v8i16 inp0_m, inp1_m, res0_m, res1_m; \
1355 v16i8 dst0_m = { 0 }; \
1356 v16i8 dst1_m = { 0 }; \
1357 const v16i8 zero_m = { 0 }; \
1358 ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m); \
1359 LW4(pdst, stride, src0_m, src1_m, src2_m, src3_m); \
1360 INSERT_W2_SB(src0_m, src1_m, dst0_m); \
1361 INSERT_W2_SB(src2_m, src3_m, dst1_m); \
1362 ILVR_B2_SH(zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m); \
1363 ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m); \
1364 CLIP_SH2_0_255(res0_m, res1_m); \
1365 PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m); \
1366 ST4x4_UB(dst0_m, dst1_m, 0, 1, 0, 1, pdst, stride); \
1367 } while (0)
1368
1369 /* Description : Pack even byte elements, extract 0 & 2 index words from pair
1370 * of results and store 4 words in destination memory as per
1371 * stride
1372 * Arguments : Inputs - in0, in1, in2, in3, pdst, stride
1373 */
1374 #define PCKEV_ST4x4_UB(in0, in1, in2, in3, pdst, stride) do { \
1375 v16i8 tmp0_m, tmp1_m; \
1376 PCKEV_B2_SB(in1, in0, in3, in2, tmp0_m, tmp1_m); \
1377 ST4x4_UB(tmp0_m, tmp1_m, 0, 2, 0, 2, pdst, stride); \
1378 } while (0)
1379
1380 /* Description : average with rounding (in0 + in1 + 1) / 2.
1381 * Arguments : Inputs - in0, in1, in2, in3,
1382 * Outputs - out0, out1
1383 * Return Type - as per RTYPE
1384 * Details : Each unsigned byte element from 'in0' vector is added with
1385 * each unsigned byte element from 'in1' vector. Then the average
1386 * with rounding is calculated and written to 'out0'
1387 */
1388 #define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
1389 out0 = (RTYPE)__msa_aver_u_b((v16u8)in0, (v16u8)in1); \
1390 out1 = (RTYPE)__msa_aver_u_b((v16u8)in2, (v16u8)in3); \
1391 } while (0)
1392 #define AVER_UB2_UB(...) AVER_UB2(v16u8, __VA_ARGS__)
1393
1394 #endif // WEBP_USE_MSA
1395 #endif // WEBP_DSP_MSA_MACRO_H_
1396