1 /*
2 * Copyright 2023 The LibYuv Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include <stdlib.h>
13 #include <time.h>
14
15 #include "libyuv/basic_types.h"
16 #include "libyuv/compare.h"
17 #include "libyuv/convert.h"
18 #include "libyuv/convert_argb.h"
19 #include "libyuv/convert_from.h"
20 #include "libyuv/convert_from_argb.h"
21 #include "libyuv/cpu_id.h"
22 #ifdef HAVE_JPEG
23 #include "libyuv/mjpeg_decoder.h"
24 #endif
25 #include "../unit_test/unit_test.h"
26 #include "libyuv/planar_functions.h"
27 #include "libyuv/rotate.h"
28 #include "libyuv/video_common.h"
29
30 #ifdef ENABLE_ROW_TESTS
31 #include "libyuv/row.h" /* For ARGBToAR30Row_AVX2 */
32 #endif
33
34 #if defined(__riscv) && !defined(__clang__)
35 #define DISABLE_SLOW_TESTS
36 #undef ENABLE_FULL_TESTS
37 #undef ENABLE_ROW_TESTS
38 #define LEAN_TESTS
39 #endif
40
41 // Some functions fail on big endian. Enable these tests on all cpus except
42 // PowerPC, but they are not optimized so disabled by default.
43 #if !defined(DISABLE_SLOW_TESTS) && !defined(__powerpc__)
44 #define LITTLE_ENDIAN_ONLY_TEST 1
45 #endif
46 #if !defined(DISABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__)
47 // SLOW TESTS are those that are unoptimized C code.
48 // FULL TESTS are optimized but test many variations of the same code.
49 #define ENABLE_FULL_TESTS
50 #endif
51
52 namespace libyuv {
53
54 // Alias to copy pixels as is
55 #define AR30ToAR30 ARGBCopy
56 #define ABGRToABGR ARGBCopy
57
58 // subsample amount uses a divide.
59 #define SUBSAMPLE(v, a) ((((v) + (a)-1)) / (a))
60
61 #define ALIGNINT(V, ALIGN) (((V) + (ALIGN)-1) / (ALIGN) * (ALIGN))
62
63 #define TESTBPTOPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \
64 SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, \
65 DST_SUBSAMP_Y, W1280, N, NEG, OFF, SRC_DEPTH, TILE_WIDTH, \
66 TILE_HEIGHT) \
67 TEST_F(LibYUVConvertTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \
68 static_assert(SRC_BPC == 1 || SRC_BPC == 2, "SRC BPC unsupported"); \
69 static_assert(DST_BPC == 1 || DST_BPC == 2, "DST BPC unsupported"); \
70 static_assert(SRC_SUBSAMP_X == 1 || SRC_SUBSAMP_X == 2, \
71 "SRC_SUBSAMP_X unsupported"); \
72 static_assert(SRC_SUBSAMP_Y == 1 || SRC_SUBSAMP_Y == 2, \
73 "SRC_SUBSAMP_Y unsupported"); \
74 static_assert(DST_SUBSAMP_X == 1 || DST_SUBSAMP_X == 2, \
75 "DST_SUBSAMP_X unsupported"); \
76 static_assert(DST_SUBSAMP_Y == 1 || DST_SUBSAMP_Y == 2, \
77 "DST_SUBSAMP_Y unsupported"); \
78 const int kWidth = W1280; \
79 const int kHeight = benchmark_height_; \
80 const int kSrcHalfWidth = SUBSAMPLE(kWidth, SRC_SUBSAMP_X); \
81 const int kDstHalfWidth = SUBSAMPLE(kWidth, DST_SUBSAMP_X); \
82 const int kDstHalfHeight = SUBSAMPLE(kHeight, DST_SUBSAMP_Y); \
83 const int kPaddedWidth = (kWidth + (TILE_WIDTH - 1)) & ~(TILE_WIDTH - 1); \
84 const int kPaddedHeight = \
85 (kHeight + (TILE_HEIGHT - 1)) & ~(TILE_HEIGHT - 1); \
86 const int kSrcHalfPaddedWidth = SUBSAMPLE(kPaddedWidth, SRC_SUBSAMP_X); \
87 const int kSrcHalfPaddedHeight = SUBSAMPLE(kPaddedHeight, SRC_SUBSAMP_Y); \
88 align_buffer_page_end(src_y, kPaddedWidth* kPaddedHeight* SRC_BPC + OFF); \
89 align_buffer_page_end( \
90 src_uv, kSrcHalfPaddedWidth* kSrcHalfPaddedHeight* SRC_BPC * 2 + OFF); \
91 align_buffer_page_end(dst_y_c, kWidth* kHeight* DST_BPC); \
92 align_buffer_page_end(dst_u_c, kDstHalfWidth* kDstHalfHeight* DST_BPC); \
93 align_buffer_page_end(dst_v_c, kDstHalfWidth* kDstHalfHeight* DST_BPC); \
94 align_buffer_page_end(dst_y_opt, kWidth* kHeight* DST_BPC); \
95 align_buffer_page_end(dst_u_opt, kDstHalfWidth* kDstHalfHeight* DST_BPC); \
96 align_buffer_page_end(dst_v_opt, kDstHalfWidth* kDstHalfHeight* DST_BPC); \
97 SRC_T* src_y_p = reinterpret_cast<SRC_T*>(src_y + OFF); \
98 SRC_T* src_uv_p = reinterpret_cast<SRC_T*>(src_uv + OFF); \
99 for (int i = 0; i < kPaddedWidth * kPaddedHeight; ++i) { \
100 src_y_p[i] = \
101 (fastrand() & (((SRC_T)(-1)) << ((8 * SRC_BPC) - SRC_DEPTH))); \
102 } \
103 for (int i = 0; i < kSrcHalfPaddedWidth * kSrcHalfPaddedHeight * 2; ++i) { \
104 src_uv_p[i] = \
105 (fastrand() & (((SRC_T)(-1)) << ((8 * SRC_BPC) - SRC_DEPTH))); \
106 } \
107 memset(dst_y_c, 1, kWidth* kHeight* DST_BPC); \
108 memset(dst_u_c, 2, kDstHalfWidth* kDstHalfHeight* DST_BPC); \
109 memset(dst_v_c, 3, kDstHalfWidth* kDstHalfHeight* DST_BPC); \
110 memset(dst_y_opt, 101, kWidth* kHeight* DST_BPC); \
111 memset(dst_u_opt, 102, kDstHalfWidth* kDstHalfHeight* DST_BPC); \
112 memset(dst_v_opt, 103, kDstHalfWidth* kDstHalfHeight* DST_BPC); \
113 MaskCpuFlags(disable_cpu_flags_); \
114 SRC_FMT_PLANAR##To##FMT_PLANAR( \
115 src_y_p, kWidth, src_uv_p, kSrcHalfWidth * 2, \
116 reinterpret_cast<DST_T*>(dst_y_c), kWidth, \
117 reinterpret_cast<DST_T*>(dst_u_c), kDstHalfWidth, \
118 reinterpret_cast<DST_T*>(dst_v_c), kDstHalfWidth, kWidth, \
119 NEG kHeight); \
120 MaskCpuFlags(benchmark_cpu_info_); \
121 for (int i = 0; i < benchmark_iterations_; ++i) { \
122 SRC_FMT_PLANAR##To##FMT_PLANAR( \
123 src_y_p, kWidth, src_uv_p, kSrcHalfWidth * 2, \
124 reinterpret_cast<DST_T*>(dst_y_opt), kWidth, \
125 reinterpret_cast<DST_T*>(dst_u_opt), kDstHalfWidth, \
126 reinterpret_cast<DST_T*>(dst_v_opt), kDstHalfWidth, kWidth, \
127 NEG kHeight); \
128 } \
129 for (int i = 0; i < kHeight * kWidth * DST_BPC; ++i) { \
130 EXPECT_EQ(dst_y_c[i], dst_y_opt[i]); \
131 } \
132 for (int i = 0; i < kDstHalfWidth * kDstHalfHeight * DST_BPC; ++i) { \
133 EXPECT_EQ(dst_u_c[i], dst_u_opt[i]); \
134 EXPECT_EQ(dst_v_c[i], dst_v_opt[i]); \
135 } \
136 free_aligned_buffer_page_end(dst_y_c); \
137 free_aligned_buffer_page_end(dst_u_c); \
138 free_aligned_buffer_page_end(dst_v_c); \
139 free_aligned_buffer_page_end(dst_y_opt); \
140 free_aligned_buffer_page_end(dst_u_opt); \
141 free_aligned_buffer_page_end(dst_v_opt); \
142 free_aligned_buffer_page_end(src_y); \
143 free_aligned_buffer_page_end(src_uv); \
144 }
145
146 #if defined(ENABLE_FULL_TESTS)
147 #define TESTBPTOP(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \
148 SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, \
149 DST_SUBSAMP_Y, SRC_DEPTH, TILE_WIDTH, TILE_HEIGHT) \
150 TESTBPTOPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
151 FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, DST_SUBSAMP_Y, \
152 benchmark_width_ + 1, _Any, +, 0, SRC_DEPTH, TILE_WIDTH, \
153 TILE_HEIGHT) \
154 TESTBPTOPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
155 FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, DST_SUBSAMP_Y, \
156 benchmark_width_, _Unaligned, +, 2, SRC_DEPTH, TILE_WIDTH, \
157 TILE_HEIGHT) \
158 TESTBPTOPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
159 FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, DST_SUBSAMP_Y, \
160 benchmark_width_, _Invert, -, 0, SRC_DEPTH, TILE_WIDTH, \
161 TILE_HEIGHT) \
162 TESTBPTOPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
163 FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, DST_SUBSAMP_Y, \
164 benchmark_width_, _Opt, +, 0, SRC_DEPTH, TILE_WIDTH, TILE_HEIGHT)
165 #else
166 #define TESTBPTOP(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, \
167 SRC_SUBSAMP_Y, FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, \
168 DST_SUBSAMP_Y, SRC_DEPTH, TILE_WIDTH, TILE_HEIGHT) \
169 TESTBPTOPI(SRC_FMT_PLANAR, SRC_T, SRC_BPC, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
170 FMT_PLANAR, DST_T, DST_BPC, DST_SUBSAMP_X, DST_SUBSAMP_Y, \
171 benchmark_width_, _Opt, +, 0, SRC_DEPTH, TILE_WIDTH, TILE_HEIGHT)
172 #endif
173
174 TESTBPTOP(NV12, uint8_t, 1, 2, 2, I420, uint8_t, 1, 2, 2, 8, 1, 1)
175 TESTBPTOP(NV21, uint8_t, 1, 2, 2, I420, uint8_t, 1, 2, 2, 8, 1, 1)
176 TESTBPTOP(MM21, uint8_t, 1, 2, 2, I420, uint8_t, 1, 2, 2, 8, 16, 32)
177 TESTBPTOP(P010, uint16_t, 2, 2, 2, I010, uint16_t, 2, 2, 2, 10, 1, 1)
178 TESTBPTOP(P012, uint16_t, 2, 2, 2, I012, uint16_t, 2, 2, 2, 12, 1, 1)
179
180 // Provide matrix wrappers for full range bt.709
181 #define F420ToABGR(a, b, c, d, e, f, g, h, i, j) \
182 I420ToARGBMatrix(a, b, e, f, c, d, g, h, &kYvuF709Constants, i, j)
183 #define F420ToARGB(a, b, c, d, e, f, g, h, i, j) \
184 I420ToARGBMatrix(a, b, c, d, e, f, g, h, &kYuvF709Constants, i, j)
185 #define F422ToABGR(a, b, c, d, e, f, g, h, i, j) \
186 I422ToARGBMatrix(a, b, e, f, c, d, g, h, &kYvuF709Constants, i, j)
187 #define F422ToARGB(a, b, c, d, e, f, g, h, i, j) \
188 I422ToARGBMatrix(a, b, c, d, e, f, g, h, &kYuvF709Constants, i, j)
189 #define F444ToABGR(a, b, c, d, e, f, g, h, i, j) \
190 I444ToARGBMatrix(a, b, e, f, c, d, g, h, &kYvuF709Constants, i, j)
191 #define F444ToARGB(a, b, c, d, e, f, g, h, i, j) \
192 I444ToARGBMatrix(a, b, c, d, e, f, g, h, &kYuvF709Constants, i, j)
193
194 // Provide matrix wrappers for full range bt.2020
195 #define V420ToABGR(a, b, c, d, e, f, g, h, i, j) \
196 I420ToARGBMatrix(a, b, e, f, c, d, g, h, &kYvuV2020Constants, i, j)
197 #define V420ToARGB(a, b, c, d, e, f, g, h, i, j) \
198 I420ToARGBMatrix(a, b, c, d, e, f, g, h, &kYuvV2020Constants, i, j)
199 #define V422ToABGR(a, b, c, d, e, f, g, h, i, j) \
200 I422ToARGBMatrix(a, b, e, f, c, d, g, h, &kYvuV2020Constants, i, j)
201 #define V422ToARGB(a, b, c, d, e, f, g, h, i, j) \
202 I422ToARGBMatrix(a, b, c, d, e, f, g, h, &kYuvV2020Constants, i, j)
203 #define V444ToABGR(a, b, c, d, e, f, g, h, i, j) \
204 I444ToARGBMatrix(a, b, e, f, c, d, g, h, &kYvuV2020Constants, i, j)
205 #define V444ToARGB(a, b, c, d, e, f, g, h, i, j) \
206 I444ToARGBMatrix(a, b, c, d, e, f, g, h, &kYuvV2020Constants, i, j)
207
208 #define I420ToARGBFilter(a, b, c, d, e, f, g, h, i, j) \
209 I420ToARGBMatrixFilter(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j, \
210 kFilterBilinear)
211 #define I422ToARGBFilter(a, b, c, d, e, f, g, h, i, j) \
212 I422ToARGBMatrixFilter(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j, \
213 kFilterBilinear)
214 #define I420ToRGB24Filter(a, b, c, d, e, f, g, h, i, j) \
215 I420ToRGB24MatrixFilter(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j, \
216 kFilterBilinear)
217 #define I422ToRGB24Filter(a, b, c, d, e, f, g, h, i, j) \
218 I420ToRGB24MatrixFilter(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j, \
219 kFilterBilinear)
220
221 #define TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
222 YALIGN, W1280, N, NEG, OFF) \
223 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##N) { \
224 const int kWidth = W1280; \
225 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
226 const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \
227 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
228 const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \
229 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \
230 align_buffer_page_end(src_u, kSizeUV + OFF); \
231 align_buffer_page_end(src_v, kSizeUV + OFF); \
232 align_buffer_page_end(dst_argb_c, kStrideB* kHeight + OFF); \
233 align_buffer_page_end(dst_argb_opt, kStrideB* kHeight + OFF); \
234 for (int i = 0; i < kWidth * kHeight; ++i) { \
235 src_y[i + OFF] = (fastrand() & 0xff); \
236 } \
237 for (int i = 0; i < kSizeUV; ++i) { \
238 src_u[i + OFF] = (fastrand() & 0xff); \
239 src_v[i + OFF] = (fastrand() & 0xff); \
240 } \
241 memset(dst_argb_c + OFF, 1, kStrideB * kHeight); \
242 memset(dst_argb_opt + OFF, 101, kStrideB * kHeight); \
243 MaskCpuFlags(disable_cpu_flags_); \
244 double time0 = get_time(); \
245 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \
246 src_v + OFF, kStrideUV, dst_argb_c + OFF, kStrideB, \
247 kWidth, NEG kHeight); \
248 double time1 = get_time(); \
249 MaskCpuFlags(benchmark_cpu_info_); \
250 for (int i = 0; i < benchmark_iterations_; ++i) { \
251 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \
252 src_v + OFF, kStrideUV, dst_argb_opt + OFF, \
253 kStrideB, kWidth, NEG kHeight); \
254 } \
255 double time2 = get_time(); \
256 printf(" %8d us C - %8d us OPT\n", \
257 static_cast<int>((time1 - time0) * 1e6), \
258 static_cast<int>((time2 - time1) * 1e6 / benchmark_iterations_)); \
259 for (int i = 0; i < kWidth * BPP_B * kHeight; ++i) { \
260 EXPECT_EQ(dst_argb_c[i + OFF], dst_argb_opt[i + OFF]); \
261 } \
262 free_aligned_buffer_page_end(src_y); \
263 free_aligned_buffer_page_end(src_u); \
264 free_aligned_buffer_page_end(src_v); \
265 free_aligned_buffer_page_end(dst_argb_c); \
266 free_aligned_buffer_page_end(dst_argb_opt); \
267 }
268
269 #if defined(ENABLE_FULL_TESTS)
270 #define TESTPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
271 YALIGN) \
272 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
273 YALIGN, benchmark_width_ + 1, _Any, +, 0) \
274 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
275 YALIGN, benchmark_width_, _Unaligned, +, 4) \
276 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
277 YALIGN, benchmark_width_, _Invert, -, 0) \
278 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
279 YALIGN, benchmark_width_, _Opt, +, 0)
280 #else
281 #define TESTPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
282 YALIGN) \
283 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
284 YALIGN, benchmark_width_, _Opt, +, 0)
285 #endif
286
287 #if defined(ENABLE_FULL_TESTS)
288 TESTPLANARTOB(I420, 2, 2, ARGB, 4, 4, 1)
289 TESTPLANARTOB(I420, 2, 2, ABGR, 4, 4, 1)
290 TESTPLANARTOB(J420, 2, 2, ARGB, 4, 4, 1)
291 TESTPLANARTOB(J420, 2, 2, ABGR, 4, 4, 1)
292 TESTPLANARTOB(F420, 2, 2, ARGB, 4, 4, 1)
293 TESTPLANARTOB(F420, 2, 2, ABGR, 4, 4, 1)
294 TESTPLANARTOB(H420, 2, 2, ARGB, 4, 4, 1)
295 TESTPLANARTOB(H420, 2, 2, ABGR, 4, 4, 1)
296 TESTPLANARTOB(U420, 2, 2, ARGB, 4, 4, 1)
297 TESTPLANARTOB(U420, 2, 2, ABGR, 4, 4, 1)
298 TESTPLANARTOB(V420, 2, 2, ARGB, 4, 4, 1)
299 TESTPLANARTOB(V420, 2, 2, ABGR, 4, 4, 1)
300 TESTPLANARTOB(I420, 2, 2, BGRA, 4, 4, 1)
301 TESTPLANARTOB(I420, 2, 2, RGBA, 4, 4, 1)
302 TESTPLANARTOB(I420, 2, 2, RAW, 3, 3, 1)
303 TESTPLANARTOB(I420, 2, 2, RGB24, 3, 3, 1)
304 TESTPLANARTOB(J420, 2, 2, RAW, 3, 3, 1)
305 TESTPLANARTOB(J420, 2, 2, RGB24, 3, 3, 1)
306 TESTPLANARTOB(H420, 2, 2, RAW, 3, 3, 1)
307 TESTPLANARTOB(H420, 2, 2, RGB24, 3, 3, 1)
308 #ifdef LITTLE_ENDIAN_ONLY_TEST
309 TESTPLANARTOB(I420, 2, 2, RGB565, 2, 2, 1)
310 TESTPLANARTOB(J420, 2, 2, RGB565, 2, 2, 1)
311 TESTPLANARTOB(H420, 2, 2, RGB565, 2, 2, 1)
312 TESTPLANARTOB(I420, 2, 2, ARGB1555, 2, 2, 1)
313 TESTPLANARTOB(I420, 2, 2, ARGB4444, 2, 2, 1)
314 TESTPLANARTOB(I422, 2, 1, RGB565, 2, 2, 1)
315 #endif
316 TESTPLANARTOB(I422, 2, 1, ARGB, 4, 4, 1)
317 TESTPLANARTOB(I422, 2, 1, ABGR, 4, 4, 1)
318 TESTPLANARTOB(J422, 2, 1, ARGB, 4, 4, 1)
319 TESTPLANARTOB(J422, 2, 1, ABGR, 4, 4, 1)
320 TESTPLANARTOB(H422, 2, 1, ARGB, 4, 4, 1)
321 TESTPLANARTOB(H422, 2, 1, ABGR, 4, 4, 1)
322 TESTPLANARTOB(U422, 2, 1, ARGB, 4, 4, 1)
323 TESTPLANARTOB(U422, 2, 1, ABGR, 4, 4, 1)
324 TESTPLANARTOB(V422, 2, 1, ARGB, 4, 4, 1)
325 TESTPLANARTOB(V422, 2, 1, ABGR, 4, 4, 1)
326 TESTPLANARTOB(I422, 2, 1, BGRA, 4, 4, 1)
327 TESTPLANARTOB(I422, 2, 1, RGBA, 4, 4, 1)
328 TESTPLANARTOB(I422, 1, 1, RGB24, 3, 3, 1)
329 TESTPLANARTOB(I422, 1, 1, RAW, 3, 3, 1)
330 TESTPLANARTOB(I444, 1, 1, ARGB, 4, 4, 1)
331 TESTPLANARTOB(I444, 1, 1, ABGR, 4, 4, 1)
332 TESTPLANARTOB(I444, 1, 1, RGB24, 3, 3, 1)
333 TESTPLANARTOB(I444, 1, 1, RAW, 3, 3, 1)
334 TESTPLANARTOB(J444, 1, 1, ARGB, 4, 4, 1)
335 TESTPLANARTOB(J444, 1, 1, ABGR, 4, 4, 1)
336 TESTPLANARTOB(H444, 1, 1, ARGB, 4, 4, 1)
337 TESTPLANARTOB(H444, 1, 1, ABGR, 4, 4, 1)
338 TESTPLANARTOB(U444, 1, 1, ARGB, 4, 4, 1)
339 TESTPLANARTOB(U444, 1, 1, ABGR, 4, 4, 1)
340 TESTPLANARTOB(V444, 1, 1, ARGB, 4, 4, 1)
341 TESTPLANARTOB(V444, 1, 1, ABGR, 4, 4, 1)
342 TESTPLANARTOB(I420, 2, 2, YUY2, 2, 4, 1)
343 TESTPLANARTOB(I420, 2, 2, UYVY, 2, 4, 1)
344 TESTPLANARTOB(I422, 2, 1, YUY2, 2, 4, 1)
345 TESTPLANARTOB(I422, 2, 1, UYVY, 2, 4, 1)
346 TESTPLANARTOB(I420, 2, 2, I400, 1, 1, 1)
347 TESTPLANARTOB(J420, 2, 2, J400, 1, 1, 1)
348 #ifdef LITTLE_ENDIAN_ONLY_TEST
349 TESTPLANARTOB(I420, 2, 2, AR30, 4, 4, 1)
350 TESTPLANARTOB(H420, 2, 2, AR30, 4, 4, 1)
351 TESTPLANARTOB(I420, 2, 2, AB30, 4, 4, 1)
352 TESTPLANARTOB(H420, 2, 2, AB30, 4, 4, 1)
353 #endif
354 TESTPLANARTOB(I420, 2, 2, ARGBFilter, 4, 4, 1)
355 TESTPLANARTOB(I422, 2, 1, ARGBFilter, 4, 4, 1)
356 TESTPLANARTOB(I420, 2, 2, RGB24Filter, 3, 3, 1)
357 TESTPLANARTOB(I422, 2, 2, RGB24Filter, 3, 3, 1)
358 #else // FULL_TESTS
359 TESTPLANARTOB(I420, 2, 2, ABGR, 4, 4, 1)
360 TESTPLANARTOB(I420, 2, 2, ARGB, 4, 4, 1)
361 TESTPLANARTOB(I420, 2, 2, BGRA, 4, 4, 1)
362 TESTPLANARTOB(I420, 2, 2, RAW, 3, 3, 1)
363 TESTPLANARTOB(I420, 2, 2, RGB24, 3, 3, 1)
364 TESTPLANARTOB(I420, 2, 2, RGBA, 4, 4, 1)
365 #ifdef LITTLE_ENDIAN_ONLY_TEST
366 TESTPLANARTOB(I420, 2, 2, RGB565, 2, 2, 1)
367 TESTPLANARTOB(I420, 2, 2, ARGB1555, 2, 2, 1)
368 TESTPLANARTOB(I420, 2, 2, ARGB4444, 2, 2, 1)
369 TESTPLANARTOB(I422, 2, 1, RGB565, 2, 2, 1)
370 #endif
371 TESTPLANARTOB(I420, 2, 2, I400, 1, 1, 1)
372 TESTPLANARTOB(I420, 2, 2, UYVY, 2, 4, 1)
373 TESTPLANARTOB(I420, 2, 2, YUY2, 2, 4, 1)
374 TESTPLANARTOB(I422, 2, 1, ABGR, 4, 4, 1)
375 TESTPLANARTOB(I422, 2, 1, ARGB, 4, 4, 1)
376 TESTPLANARTOB(I422, 2, 1, BGRA, 4, 4, 1)
377 TESTPLANARTOB(I422, 2, 1, RGBA, 4, 4, 1)
378 TESTPLANARTOB(I422, 2, 1, UYVY, 2, 4, 1)
379 TESTPLANARTOB(I422, 2, 1, YUY2, 2, 4, 1)
380 TESTPLANARTOB(I420, 2, 2, ARGBFilter, 4, 4, 1)
381 TESTPLANARTOB(I422, 2, 1, ARGBFilter, 4, 4, 1)
382 TESTPLANARTOB(I420, 2, 2, RGB24Filter, 3, 3, 1)
383 TESTPLANARTOB(I444, 1, 1, ABGR, 4, 4, 1)
384 TESTPLANARTOB(I444, 1, 1, ARGB, 4, 4, 1)
385 #endif
386
387 #define TESTBPTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, FMT_C, BPP_B, \
388 W1280, N, NEG, OFF) \
389 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##N) { \
390 const int kWidth = W1280; \
391 const int kHeight = benchmark_height_; \
392 const int kStrideB = kWidth * BPP_B; \
393 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
394 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \
395 align_buffer_page_end(src_uv, \
396 kStrideUV* SUBSAMPLE(kHeight, SUBSAMP_Y) * 2 + OFF); \
397 align_buffer_page_end(dst_argb_c, kStrideB* kHeight); \
398 align_buffer_page_end(dst_argb_opt, kStrideB* kHeight); \
399 for (int i = 0; i < kHeight; ++i) \
400 for (int j = 0; j < kWidth; ++j) \
401 src_y[i * kWidth + j + OFF] = (fastrand() & 0xff); \
402 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
403 for (int j = 0; j < kStrideUV * 2; ++j) { \
404 src_uv[i * kStrideUV * 2 + j + OFF] = (fastrand() & 0xff); \
405 } \
406 } \
407 memset(dst_argb_c, 1, kStrideB* kHeight); \
408 memset(dst_argb_opt, 101, kStrideB* kHeight); \
409 MaskCpuFlags(disable_cpu_flags_); \
410 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_uv + OFF, kStrideUV * 2, \
411 dst_argb_c, kWidth * BPP_B, kWidth, NEG kHeight); \
412 MaskCpuFlags(benchmark_cpu_info_); \
413 for (int i = 0; i < benchmark_iterations_; ++i) { \
414 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_uv + OFF, kStrideUV * 2, \
415 dst_argb_opt, kWidth * BPP_B, kWidth, \
416 NEG kHeight); \
417 } \
418 /* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \
419 align_buffer_page_end(dst_argb32_c, kWidth * 4 * kHeight); \
420 align_buffer_page_end(dst_argb32_opt, kWidth * 4 * kHeight); \
421 memset(dst_argb32_c, 2, kWidth * 4 * kHeight); \
422 memset(dst_argb32_opt, 102, kWidth * 4 * kHeight); \
423 FMT_C##ToARGB(dst_argb_c, kStrideB, dst_argb32_c, kWidth * 4, kWidth, \
424 kHeight); \
425 FMT_C##ToARGB(dst_argb_opt, kStrideB, dst_argb32_opt, kWidth * 4, kWidth, \
426 kHeight); \
427 for (int i = 0; i < kHeight; ++i) { \
428 for (int j = 0; j < kWidth * 4; ++j) { \
429 EXPECT_EQ(dst_argb32_c[i * kWidth * 4 + j], \
430 dst_argb32_opt[i * kWidth * 4 + j]); \
431 } \
432 } \
433 free_aligned_buffer_page_end(src_y); \
434 free_aligned_buffer_page_end(src_uv); \
435 free_aligned_buffer_page_end(dst_argb_c); \
436 free_aligned_buffer_page_end(dst_argb_opt); \
437 free_aligned_buffer_page_end(dst_argb32_c); \
438 free_aligned_buffer_page_end(dst_argb32_opt); \
439 }
440
441 #if defined(ENABLE_FULL_TESTS)
442 #define TESTBPTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, FMT_C, BPP_B) \
443 TESTBPTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, FMT_C, BPP_B, \
444 benchmark_width_ + 1, _Any, +, 0) \
445 TESTBPTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, FMT_C, BPP_B, \
446 benchmark_width_, _Unaligned, +, 2) \
447 TESTBPTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, FMT_C, BPP_B, \
448 benchmark_width_, _Invert, -, 0) \
449 TESTBPTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, FMT_C, BPP_B, \
450 benchmark_width_, _Opt, +, 0)
451 #else
452 #define TESTBPTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, FMT_C, BPP_B) \
453 TESTBPTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, FMT_C, BPP_B, \
454 benchmark_width_, _Opt, +, 0)
455 #endif
456
457 #define JNV12ToARGB(a, b, c, d, e, f, g, h) \
458 NV12ToARGBMatrix(a, b, c, d, e, f, &kYuvJPEGConstants, g, h)
459 #define JNV21ToARGB(a, b, c, d, e, f, g, h) \
460 NV21ToARGBMatrix(a, b, c, d, e, f, &kYuvJPEGConstants, g, h)
461 #define JNV12ToABGR(a, b, c, d, e, f, g, h) \
462 NV21ToARGBMatrix(a, b, c, d, e, f, &kYvuJPEGConstants, g, h)
463 #define JNV21ToABGR(a, b, c, d, e, f, g, h) \
464 NV12ToARGBMatrix(a, b, c, d, e, f, &kYvuJPEGConstants, g, h)
465 #define JNV12ToRGB24(a, b, c, d, e, f, g, h) \
466 NV12ToRGB24Matrix(a, b, c, d, e, f, &kYuvJPEGConstants, g, h)
467 #define JNV21ToRGB24(a, b, c, d, e, f, g, h) \
468 NV21ToRGB24Matrix(a, b, c, d, e, f, &kYuvJPEGConstants, g, h)
469 #define JNV12ToRAW(a, b, c, d, e, f, g, h) \
470 NV21ToRGB24Matrix(a, b, c, d, e, f, &kYvuJPEGConstants, g, h)
471 #define JNV21ToRAW(a, b, c, d, e, f, g, h) \
472 NV12ToRGB24Matrix(a, b, c, d, e, f, &kYvuJPEGConstants, g, h)
473 #define JNV12ToRGB565(a, b, c, d, e, f, g, h) \
474 NV12ToRGB565Matrix(a, b, c, d, e, f, &kYuvJPEGConstants, g, h)
475
476 TESTBPTOB(JNV12, 2, 2, ARGB, ARGB, 4)
477 TESTBPTOB(JNV21, 2, 2, ARGB, ARGB, 4)
478 TESTBPTOB(JNV12, 2, 2, ABGR, ABGR, 4)
479 TESTBPTOB(JNV21, 2, 2, ABGR, ABGR, 4)
480 TESTBPTOB(JNV12, 2, 2, RGB24, RGB24, 3)
481 TESTBPTOB(JNV21, 2, 2, RGB24, RGB24, 3)
482 TESTBPTOB(JNV12, 2, 2, RAW, RAW, 3)
483 TESTBPTOB(JNV21, 2, 2, RAW, RAW, 3)
484 #ifdef LITTLE_ENDIAN_ONLY_TEST
485 TESTBPTOB(JNV12, 2, 2, RGB565, RGB565, 2)
486 #endif
487
488 TESTBPTOB(NV12, 2, 2, ARGB, ARGB, 4)
489 TESTBPTOB(NV21, 2, 2, ARGB, ARGB, 4)
490 TESTBPTOB(NV12, 2, 2, ABGR, ABGR, 4)
491 TESTBPTOB(NV21, 2, 2, ABGR, ABGR, 4)
492 TESTBPTOB(NV12, 2, 2, RGB24, RGB24, 3)
493 TESTBPTOB(NV21, 2, 2, RGB24, RGB24, 3)
494 TESTBPTOB(NV12, 2, 2, RAW, RAW, 3)
495 TESTBPTOB(NV21, 2, 2, RAW, RAW, 3)
496 TESTBPTOB(NV21, 2, 2, YUV24, RAW, 3)
497 #ifdef LITTLE_ENDIAN_ONLY_TEST
498 TESTBPTOB(NV12, 2, 2, RGB565, RGB565, 2)
499 #endif
500
501 #define TESTATOBI(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, \
502 EPP_B, STRIDE_B, HEIGHT_B, W1280, N, NEG, OFF) \
503 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##N) { \
504 const int kWidth = W1280; \
505 const int kHeight = benchmark_height_; \
506 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
507 const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \
508 const int kStrideA = \
509 (kWidth * EPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \
510 const int kStrideB = \
511 (kWidth * EPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \
512 align_buffer_page_end(src_argb, \
513 kStrideA* kHeightA*(int)sizeof(TYPE_A) + OFF); \
514 align_buffer_page_end(dst_argb_c, kStrideB* kHeightB*(int)sizeof(TYPE_B)); \
515 align_buffer_page_end(dst_argb_opt, \
516 kStrideB* kHeightB*(int)sizeof(TYPE_B)); \
517 for (int i = 0; i < kStrideA * kHeightA * (int)sizeof(TYPE_A); ++i) { \
518 src_argb[i + OFF] = (fastrand() & 0xff); \
519 } \
520 memset(dst_argb_c, 1, kStrideB* kHeightB); \
521 memset(dst_argb_opt, 101, kStrideB* kHeightB); \
522 MaskCpuFlags(disable_cpu_flags_); \
523 FMT_A##To##FMT_B((TYPE_A*)(src_argb + OFF), kStrideA, (TYPE_B*)dst_argb_c, \
524 kStrideB, kWidth, NEG kHeight); \
525 MaskCpuFlags(benchmark_cpu_info_); \
526 for (int i = 0; i < benchmark_iterations_; ++i) { \
527 FMT_A##To##FMT_B((TYPE_A*)(src_argb + OFF), kStrideA, \
528 (TYPE_B*)dst_argb_opt, kStrideB, kWidth, NEG kHeight); \
529 } \
530 for (int i = 0; i < kStrideB * kHeightB * (int)sizeof(TYPE_B); ++i) { \
531 EXPECT_EQ(dst_argb_c[i], dst_argb_opt[i]); \
532 } \
533 free_aligned_buffer_page_end(src_argb); \
534 free_aligned_buffer_page_end(dst_argb_c); \
535 free_aligned_buffer_page_end(dst_argb_opt); \
536 }
537
538 #define TESTATOBRANDOM(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, \
539 TYPE_B, EPP_B, STRIDE_B, HEIGHT_B) \
540 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##_Random) { \
541 for (int times = 0; times < benchmark_iterations_; ++times) { \
542 const int kWidth = (fastrand() & 63) + 1; \
543 const int kHeight = (fastrand() & 31) + 1; \
544 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
545 const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \
546 const int kStrideA = \
547 (kWidth * EPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \
548 const int kStrideB = \
549 (kWidth * EPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \
550 align_buffer_page_end(src_argb, kStrideA* kHeightA*(int)sizeof(TYPE_A)); \
551 align_buffer_page_end(dst_argb_c, \
552 kStrideB* kHeightB*(int)sizeof(TYPE_B)); \
553 align_buffer_page_end(dst_argb_opt, \
554 kStrideB* kHeightB*(int)sizeof(TYPE_B)); \
555 for (int i = 0; i < kStrideA * kHeightA * (int)sizeof(TYPE_A); ++i) { \
556 src_argb[i] = 0xfe; \
557 } \
558 memset(dst_argb_c, 123, kStrideB* kHeightB); \
559 memset(dst_argb_opt, 123, kStrideB* kHeightB); \
560 MaskCpuFlags(disable_cpu_flags_); \
561 FMT_A##To##FMT_B((TYPE_A*)src_argb, kStrideA, (TYPE_B*)dst_argb_c, \
562 kStrideB, kWidth, kHeight); \
563 MaskCpuFlags(benchmark_cpu_info_); \
564 FMT_A##To##FMT_B((TYPE_A*)src_argb, kStrideA, (TYPE_B*)dst_argb_opt, \
565 kStrideB, kWidth, kHeight); \
566 for (int i = 0; i < kStrideB * kHeightB * (int)sizeof(TYPE_B); ++i) { \
567 EXPECT_EQ(dst_argb_c[i], dst_argb_opt[i]); \
568 } \
569 free_aligned_buffer_page_end(src_argb); \
570 free_aligned_buffer_page_end(dst_argb_c); \
571 free_aligned_buffer_page_end(dst_argb_opt); \
572 } \
573 }
574
575 #if defined(ENABLE_FULL_TESTS)
576 #define TESTATOB(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, \
577 EPP_B, STRIDE_B, HEIGHT_B) \
578 TESTATOBI(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, EPP_B, \
579 STRIDE_B, HEIGHT_B, benchmark_width_ + 1, _Any, +, 0) \
580 TESTATOBI(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, EPP_B, \
581 STRIDE_B, HEIGHT_B, benchmark_width_, _Unaligned, +, 4) \
582 TESTATOBI(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, EPP_B, \
583 STRIDE_B, HEIGHT_B, benchmark_width_, _Invert, -, 0) \
584 TESTATOBI(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, EPP_B, \
585 STRIDE_B, HEIGHT_B, benchmark_width_, _Opt, +, 0) \
586 TESTATOBRANDOM(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, \
587 EPP_B, STRIDE_B, HEIGHT_B)
588 #else
589 #define TESTATOB(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, \
590 EPP_B, STRIDE_B, HEIGHT_B) \
591 TESTATOBI(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, EPP_B, \
592 STRIDE_B, HEIGHT_B, benchmark_width_, _Opt, +, 0)
593 #endif
594
595 TESTATOB(AB30, uint8_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1)
596 TESTATOB(AB30, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1)
597 #ifdef LITTLE_ENDIAN_ONLY_TEST
598 TESTATOB(ABGR, uint8_t, 4, 4, 1, AR30, uint8_t, 4, 4, 1)
599 #endif
600 TESTATOB(ABGR, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1)
601 #ifdef LITTLE_ENDIAN_ONLY_TEST
602 TESTATOB(AR30, uint8_t, 4, 4, 1, AB30, uint8_t, 4, 4, 1)
603 #endif
604 TESTATOB(AR30, uint8_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1)
605 #ifdef LITTLE_ENDIAN_ONLY_TEST
606 TESTATOB(AR30, uint8_t, 4, 4, 1, AR30, uint8_t, 4, 4, 1)
607 TESTATOB(AR30, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1)
608 #endif
609 TESTATOB(ARGB, uint8_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1)
610 #ifdef LITTLE_ENDIAN_ONLY_TEST
611 TESTATOB(ARGB, uint8_t, 4, 4, 1, AR30, uint8_t, 4, 4, 1)
612 #endif
613 TESTATOB(ARGB, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1)
614 TESTATOB(ARGB, uint8_t, 4, 4, 1, ARGB1555, uint8_t, 2, 2, 1)
615 TESTATOB(ARGB, uint8_t, 4, 4, 1, ARGB4444, uint8_t, 2, 2, 1)
616 TESTATOB(ARGB, uint8_t, 4, 4, 1, ARGBMirror, uint8_t, 4, 4, 1)
617 TESTATOB(ARGB, uint8_t, 4, 4, 1, BGRA, uint8_t, 4, 4, 1)
618 TESTATOB(ARGB, uint8_t, 4, 4, 1, I400, uint8_t, 1, 1, 1)
619 TESTATOB(ARGB, uint8_t, 4, 4, 1, J400, uint8_t, 1, 1, 1)
620 TESTATOB(ABGR, uint8_t, 4, 4, 1, J400, uint8_t, 1, 1, 1)
621 TESTATOB(RGBA, uint8_t, 4, 4, 1, J400, uint8_t, 1, 1, 1)
622 TESTATOB(ARGB, uint8_t, 4, 4, 1, RAW, uint8_t, 3, 3, 1)
623 TESTATOB(ARGB, uint8_t, 4, 4, 1, RGB24, uint8_t, 3, 3, 1)
624 TESTATOB(ABGR, uint8_t, 4, 4, 1, RAW, uint8_t, 3, 3, 1)
625 TESTATOB(ABGR, uint8_t, 4, 4, 1, RGB24, uint8_t, 3, 3, 1)
626 #ifdef LITTLE_ENDIAN_ONLY_TEST
627 TESTATOB(ARGB, uint8_t, 4, 4, 1, RGB565, uint8_t, 2, 2, 1)
628 #endif
629 TESTATOB(ARGB, uint8_t, 4, 4, 1, RGBA, uint8_t, 4, 4, 1)
630 TESTATOB(ARGB, uint8_t, 4, 4, 1, UYVY, uint8_t, 2, 4, 1)
631 TESTATOB(ARGB, uint8_t, 4, 4, 1, YUY2, uint8_t, 2, 4, 1)
632 TESTATOB(ARGB1555, uint8_t, 2, 2, 1, ARGB, uint8_t, 4, 4, 1)
633 TESTATOB(ARGB4444, uint8_t, 2, 2, 1, ARGB, uint8_t, 4, 4, 1)
634 TESTATOB(BGRA, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1)
635 TESTATOB(I400, uint8_t, 1, 1, 1, ARGB, uint8_t, 4, 4, 1)
636 TESTATOB(I400, uint8_t, 1, 1, 1, I400, uint8_t, 1, 1, 1)
637 TESTATOB(I400, uint8_t, 1, 1, 1, I400Mirror, uint8_t, 1, 1, 1)
638 TESTATOB(J400, uint8_t, 1, 1, 1, ARGB, uint8_t, 4, 4, 1)
639 TESTATOB(J400, uint8_t, 1, 1, 1, J400, uint8_t, 1, 1, 1)
640 TESTATOB(RAW, uint8_t, 3, 3, 1, ARGB, uint8_t, 4, 4, 1)
641 TESTATOB(RAW, uint8_t, 3, 3, 1, RGBA, uint8_t, 4, 4, 1)
642 TESTATOB(RAW, uint8_t, 3, 3, 1, RGB24, uint8_t, 3, 3, 1)
643 TESTATOB(RGB24, uint8_t, 3, 3, 1, ARGB, uint8_t, 4, 4, 1)
644 TESTATOB(RGB24, uint8_t, 3, 3, 1, J400, uint8_t, 1, 1, 1)
645 TESTATOB(RGB24, uint8_t, 3, 3, 1, RGB24Mirror, uint8_t, 3, 3, 1)
646 TESTATOB(RAW, uint8_t, 3, 3, 1, J400, uint8_t, 1, 1, 1)
647 #ifdef LITTLE_ENDIAN_ONLY_TEST
648 TESTATOB(RGB565, uint8_t, 2, 2, 1, ARGB, uint8_t, 4, 4, 1)
649 #endif
650 TESTATOB(RGBA, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1)
651 TESTATOB(UYVY, uint8_t, 2, 4, 1, ARGB, uint8_t, 4, 4, 1)
652 TESTATOB(YUY2, uint8_t, 2, 4, 1, ARGB, uint8_t, 4, 4, 1)
653 TESTATOB(YUY2, uint8_t, 2, 4, 1, Y, uint8_t, 1, 1, 1)
654 TESTATOB(ARGB, uint8_t, 4, 4, 1, AR64, uint16_t, 4, 4, 1)
655 TESTATOB(ARGB, uint8_t, 4, 4, 1, AB64, uint16_t, 4, 4, 1)
656 TESTATOB(ABGR, uint8_t, 4, 4, 1, AR64, uint16_t, 4, 4, 1)
657 TESTATOB(ABGR, uint8_t, 4, 4, 1, AB64, uint16_t, 4, 4, 1)
658 TESTATOB(AR64, uint16_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1)
659 TESTATOB(AB64, uint16_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1)
660 TESTATOB(AR64, uint16_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1)
661 TESTATOB(AB64, uint16_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1)
662 TESTATOB(AR64, uint16_t, 4, 4, 1, AB64, uint16_t, 4, 4, 1)
663 TESTATOB(AB64, uint16_t, 4, 4, 1, AR64, uint16_t, 4, 4, 1)
664
665 // in place test
666 #define TESTATOAI(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, \
667 EPP_B, STRIDE_B, HEIGHT_B, W1280, N, NEG, OFF) \
668 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##N) { \
669 const int kWidth = W1280; \
670 const int kHeight = benchmark_height_; \
671 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
672 const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \
673 const int kStrideA = \
674 (kWidth * EPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \
675 const int kStrideB = \
676 (kWidth * EPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \
677 align_buffer_page_end(src_argb, \
678 kStrideA* kHeightA*(int)sizeof(TYPE_A) + OFF); \
679 align_buffer_page_end(dst_argb_c, \
680 kStrideA* kHeightA*(int)sizeof(TYPE_A) + OFF); \
681 align_buffer_page_end(dst_argb_opt, \
682 kStrideA* kHeightA*(int)sizeof(TYPE_A) + OFF); \
683 for (int i = 0; i < kStrideA * kHeightA * (int)sizeof(TYPE_A); ++i) { \
684 src_argb[i + OFF] = (fastrand() & 0xff); \
685 } \
686 memcpy(dst_argb_c + OFF, src_argb, \
687 kStrideA * kHeightA * (int)sizeof(TYPE_A)); \
688 memcpy(dst_argb_opt + OFF, src_argb, \
689 kStrideA * kHeightA * (int)sizeof(TYPE_A)); \
690 MaskCpuFlags(disable_cpu_flags_); \
691 FMT_A##To##FMT_B((TYPE_A*)(dst_argb_c /* src */ + OFF), kStrideA, \
692 (TYPE_B*)dst_argb_c, kStrideB, kWidth, NEG kHeight); \
693 MaskCpuFlags(benchmark_cpu_info_); \
694 for (int i = 0; i < benchmark_iterations_; ++i) { \
695 FMT_A##To##FMT_B((TYPE_A*)(dst_argb_opt /* src */ + OFF), kStrideA, \
696 (TYPE_B*)dst_argb_opt, kStrideB, kWidth, NEG kHeight); \
697 } \
698 memcpy(dst_argb_opt + OFF, src_argb, \
699 kStrideA * kHeightA * (int)sizeof(TYPE_A)); \
700 FMT_A##To##FMT_B((TYPE_A*)(dst_argb_opt /* src */ + OFF), kStrideA, \
701 (TYPE_B*)dst_argb_opt, kStrideB, kWidth, NEG kHeight); \
702 for (int i = 0; i < kStrideB * kHeightB * (int)sizeof(TYPE_B); ++i) { \
703 EXPECT_EQ(dst_argb_c[i], dst_argb_opt[i]); \
704 } \
705 free_aligned_buffer_page_end(src_argb); \
706 free_aligned_buffer_page_end(dst_argb_c); \
707 free_aligned_buffer_page_end(dst_argb_opt); \
708 }
709
710 #define TESTATOA(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, \
711 EPP_B, STRIDE_B, HEIGHT_B) \
712 TESTATOAI(FMT_A, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, FMT_B, TYPE_B, EPP_B, \
713 STRIDE_B, HEIGHT_B, benchmark_width_, _Inplace, +, 0)
714
715 TESTATOA(AB30, uint8_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1)
716 TESTATOA(AB30, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1)
717 #ifdef LITTLE_ENDIAN_ONLY_TEST
718 TESTATOA(ABGR, uint8_t, 4, 4, 1, AR30, uint8_t, 4, 4, 1)
719 #endif
720 TESTATOA(ABGR, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1)
721 #ifdef LITTLE_ENDIAN_ONLY_TEST
722 TESTATOA(AR30, uint8_t, 4, 4, 1, AB30, uint8_t, 4, 4, 1)
723 #endif
724 TESTATOA(AR30, uint8_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1)
725 #ifdef LITTLE_ENDIAN_ONLY_TEST
726 TESTATOA(AR30, uint8_t, 4, 4, 1, AR30, uint8_t, 4, 4, 1)
727 TESTATOA(AR30, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1)
728 #endif
729 TESTATOA(ARGB, uint8_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1)
730 #ifdef LITTLE_ENDIAN_ONLY_TEST
731 TESTATOA(ARGB, uint8_t, 4, 4, 1, AR30, uint8_t, 4, 4, 1)
732 #endif
733 TESTATOA(ARGB, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1)
734 TESTATOA(ARGB, uint8_t, 4, 4, 1, ARGB1555, uint8_t, 2, 2, 1)
735 TESTATOA(ARGB, uint8_t, 4, 4, 1, ARGB4444, uint8_t, 2, 2, 1)
736 // TODO(fbarchard): Support in place for mirror.
737 // TESTATOA(ARGB, uint8_t, 4, 4, 1, ARGBMirror, uint8_t, 4, 4, 1)
738 TESTATOA(ARGB, uint8_t, 4, 4, 1, BGRA, uint8_t, 4, 4, 1)
739 TESTATOA(ARGB, uint8_t, 4, 4, 1, I400, uint8_t, 1, 1, 1)
740 TESTATOA(ARGB, uint8_t, 4, 4, 1, J400, uint8_t, 1, 1, 1)
741 TESTATOA(RGBA, uint8_t, 4, 4, 1, J400, uint8_t, 1, 1, 1)
742 TESTATOA(ARGB, uint8_t, 4, 4, 1, RAW, uint8_t, 3, 3, 1)
743 TESTATOA(ARGB, uint8_t, 4, 4, 1, RGB24, uint8_t, 3, 3, 1)
744 TESTATOA(ABGR, uint8_t, 4, 4, 1, RAW, uint8_t, 3, 3, 1)
745 TESTATOA(ABGR, uint8_t, 4, 4, 1, RGB24, uint8_t, 3, 3, 1)
746 #ifdef LITTLE_ENDIAN_ONLY_TEST
747 TESTATOA(ARGB, uint8_t, 4, 4, 1, RGB565, uint8_t, 2, 2, 1)
748 #endif
749 TESTATOA(ARGB, uint8_t, 4, 4, 1, RGBA, uint8_t, 4, 4, 1)
750 TESTATOA(ARGB, uint8_t, 4, 4, 1, UYVY, uint8_t, 2, 4, 1)
751 TESTATOA(ARGB, uint8_t, 4, 4, 1, YUY2, uint8_t, 2, 4, 1)
752 // TODO(fbarchard): Support in place for conversions that increase bpp.
753 // TESTATOA(ARGB1555, uint8_t, 2, 2, 1, ARGB, uint8_t, 4, 4, 1)
754 // TESTATOA(ARGB4444, uint8_t, 2, 2, 1, ARGB, uint8_t, 4, 4, 1)
755 TESTATOA(BGRA, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1)
756 // TESTATOA(I400, uint8_t, 1, 1, 1, ARGB, uint8_t, 4, 4, 1)
757 TESTATOA(I400, uint8_t, 1, 1, 1, I400, uint8_t, 1, 1, 1)
758 // TESTATOA(I400, uint8_t, 1, 1, 1, I400Mirror, uint8_t, 1, 1, 1)
759 // TESTATOA(J400, uint8_t, 1, 1, 1, ARGB, uint8_t, 4, 4, 1)
760 TESTATOA(J400, uint8_t, 1, 1, 1, J400, uint8_t, 1, 1, 1)
761 // TESTATOA(RAW, uint8_t, 3, 3, 1, ARGB, uint8_t, 4, 4, 1)
762 // TESTATOA(RAW, uint8_t, 3, 3, 1, RGBA, uint8_t, 4, 4, 1)
763 TESTATOA(RAW, uint8_t, 3, 3, 1, RGB24, uint8_t, 3, 3, 1)
764 // TESTATOA(RGB24, uint8_t, 3, 3, 1, ARGB, uint8_t, 4, 4, 1)
765 TESTATOA(RGB24, uint8_t, 3, 3, 1, J400, uint8_t, 1, 1, 1)
766 // TESTATOA(RGB24, uint8_t, 3, 3, 1, RGB24Mirror, uint8_t, 3, 3, 1)
767 TESTATOA(RAW, uint8_t, 3, 3, 1, J400, uint8_t, 1, 1, 1)
768 #ifdef LITTLE_ENDIAN_ONLY_TEST
769 // TESTATOA(RGB565, uint8_t, 2, 2, 1, ARGB, uint8_t, 4, 4, 1)
770 #endif
771 TESTATOA(RGBA, uint8_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1)
772 // TESTATOA(UYVY, uint8_t, 2, 4, 1, ARGB, uint8_t, 4, 4, 1)
773 // TESTATOA(YUY2, uint8_t, 2, 4, 1, ARGB, uint8_t, 4, 4, 1)
774 TESTATOA(YUY2, uint8_t, 2, 4, 1, Y, uint8_t, 1, 1, 1)
775 // TESTATOA(ARGB, uint8_t, 4, 4, 1, AR64, uint16_t, 4, 4, 1)
776 // TESTATOA(ARGB, uint8_t, 4, 4, 1, AB64, uint16_t, 4, 4, 1)
777 // TESTATOA(ABGR, uint8_t, 4, 4, 1, AR64, uint16_t, 4, 4, 1)
778 // TESTATOA(ABGR, uint8_t, 4, 4, 1, AB64, uint16_t, 4, 4, 1)
779 TESTATOA(AR64, uint16_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1)
780 TESTATOA(AB64, uint16_t, 4, 4, 1, ARGB, uint8_t, 4, 4, 1)
781 TESTATOA(AR64, uint16_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1)
782 TESTATOA(AB64, uint16_t, 4, 4, 1, ABGR, uint8_t, 4, 4, 1)
783 TESTATOA(AR64, uint16_t, 4, 4, 1, AB64, uint16_t, 4, 4, 1)
784 TESTATOA(AB64, uint16_t, 4, 4, 1, AR64, uint16_t, 4, 4, 1)
785
786 #define TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
787 HEIGHT_B, W1280, N, NEG, OFF) \
788 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##Dither##N) { \
789 const int kWidth = W1280; \
790 const int kHeight = benchmark_height_; \
791 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
792 const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \
793 const int kStrideA = \
794 (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \
795 const int kStrideB = \
796 (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \
797 align_buffer_page_end(src_argb, kStrideA* kHeightA + OFF); \
798 align_buffer_page_end(dst_argb_c, kStrideB* kHeightB); \
799 align_buffer_page_end(dst_argb_opt, kStrideB* kHeightB); \
800 for (int i = 0; i < kStrideA * kHeightA; ++i) { \
801 src_argb[i + OFF] = (fastrand() & 0xff); \
802 } \
803 memset(dst_argb_c, 1, kStrideB* kHeightB); \
804 memset(dst_argb_opt, 101, kStrideB* kHeightB); \
805 MaskCpuFlags(disable_cpu_flags_); \
806 FMT_A##To##FMT_B##Dither(src_argb + OFF, kStrideA, dst_argb_c, kStrideB, \
807 NULL, kWidth, NEG kHeight); \
808 MaskCpuFlags(benchmark_cpu_info_); \
809 for (int i = 0; i < benchmark_iterations_; ++i) { \
810 FMT_A##To##FMT_B##Dither(src_argb + OFF, kStrideA, dst_argb_opt, \
811 kStrideB, NULL, kWidth, NEG kHeight); \
812 } \
813 for (int i = 0; i < kStrideB * kHeightB; ++i) { \
814 EXPECT_EQ(dst_argb_c[i], dst_argb_opt[i]); \
815 } \
816 free_aligned_buffer_page_end(src_argb); \
817 free_aligned_buffer_page_end(dst_argb_c); \
818 free_aligned_buffer_page_end(dst_argb_opt); \
819 }
820
821 #define TESTATOBDRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, \
822 STRIDE_B, HEIGHT_B) \
823 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##Dither_Random) { \
824 for (int times = 0; times < benchmark_iterations_; ++times) { \
825 const int kWidth = (fastrand() & 63) + 1; \
826 const int kHeight = (fastrand() & 31) + 1; \
827 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
828 const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \
829 const int kStrideA = \
830 (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \
831 const int kStrideB = \
832 (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \
833 align_buffer_page_end(src_argb, kStrideA* kHeightA); \
834 align_buffer_page_end(dst_argb_c, kStrideB* kHeightB); \
835 align_buffer_page_end(dst_argb_opt, kStrideB* kHeightB); \
836 for (int i = 0; i < kStrideA * kHeightA; ++i) { \
837 src_argb[i] = (fastrand() & 0xff); \
838 } \
839 memset(dst_argb_c, 123, kStrideB* kHeightB); \
840 memset(dst_argb_opt, 123, kStrideB* kHeightB); \
841 MaskCpuFlags(disable_cpu_flags_); \
842 FMT_A##To##FMT_B##Dither(src_argb, kStrideA, dst_argb_c, kStrideB, NULL, \
843 kWidth, kHeight); \
844 MaskCpuFlags(benchmark_cpu_info_); \
845 FMT_A##To##FMT_B##Dither(src_argb, kStrideA, dst_argb_opt, kStrideB, \
846 NULL, kWidth, kHeight); \
847 for (int i = 0; i < kStrideB * kHeightB; ++i) { \
848 EXPECT_EQ(dst_argb_c[i], dst_argb_opt[i]); \
849 } \
850 free_aligned_buffer_page_end(src_argb); \
851 free_aligned_buffer_page_end(dst_argb_c); \
852 free_aligned_buffer_page_end(dst_argb_opt); \
853 } \
854 }
855
856 #if defined(ENABLE_FULL_TESTS)
857 #define TESTATOBD(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
858 HEIGHT_B) \
859 TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
860 HEIGHT_B, benchmark_width_ + 1, _Any, +, 0) \
861 TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
862 HEIGHT_B, benchmark_width_, _Unaligned, +, 2) \
863 TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
864 HEIGHT_B, benchmark_width_, _Invert, -, 0) \
865 TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
866 HEIGHT_B, benchmark_width_, _Opt, +, 0) \
867 TESTATOBDRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
868 HEIGHT_B)
869 #else
870 #define TESTATOBD(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
871 HEIGHT_B) \
872 TESTATOBDRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, FMT_B, BPP_B, STRIDE_B, \
873 HEIGHT_B)
874 #endif
875
876 #ifdef LITTLE_ENDIAN_ONLY_TEST
877 TESTATOBD(ARGB, 4, 4, 1, RGB565, 2, 2, 1)
878 #endif
879
880 // These conversions called twice, produce the original result.
881 // e.g. endian swap twice.
882 #define TESTENDI(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, W1280, N, NEG, \
883 OFF) \
884 TEST_F(LibYUVConvertTest, FMT_ATOB##_Endswap##N) { \
885 const int kWidth = W1280; \
886 const int kHeight = benchmark_height_; \
887 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
888 const int kStrideA = \
889 (kWidth * EPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \
890 align_buffer_page_end(src_argb, \
891 kStrideA* kHeightA*(int)sizeof(TYPE_A) + OFF); \
892 align_buffer_page_end(dst_argb_c, kStrideA* kHeightA*(int)sizeof(TYPE_A)); \
893 align_buffer_page_end(dst_argb_opt, \
894 kStrideA* kHeightA*(int)sizeof(TYPE_A)); \
895 for (int i = 0; i < kStrideA * kHeightA * (int)sizeof(TYPE_A); ++i) { \
896 src_argb[i + OFF] = (fastrand() & 0xff); \
897 } \
898 memset(dst_argb_c, 1, kStrideA* kHeightA); \
899 memset(dst_argb_opt, 101, kStrideA* kHeightA); \
900 MaskCpuFlags(disable_cpu_flags_); \
901 FMT_ATOB((TYPE_A*)(src_argb + OFF), kStrideA, (TYPE_A*)dst_argb_c, \
902 kStrideA, kWidth, NEG kHeight); \
903 MaskCpuFlags(benchmark_cpu_info_); \
904 for (int i = 0; i < benchmark_iterations_; ++i) { \
905 FMT_ATOB((TYPE_A*)(src_argb + OFF), kStrideA, (TYPE_A*)dst_argb_opt, \
906 kStrideA, kWidth, NEG kHeight); \
907 } \
908 MaskCpuFlags(disable_cpu_flags_); \
909 FMT_ATOB((TYPE_A*)dst_argb_c, kStrideA, (TYPE_A*)dst_argb_c, kStrideA, \
910 kWidth, NEG kHeight); \
911 MaskCpuFlags(benchmark_cpu_info_); \
912 FMT_ATOB((TYPE_A*)dst_argb_opt, kStrideA, (TYPE_A*)dst_argb_opt, kStrideA, \
913 kWidth, NEG kHeight); \
914 for (int i = 0; i < kStrideA * kHeightA * (int)sizeof(TYPE_A); ++i) { \
915 EXPECT_EQ(src_argb[i + OFF], dst_argb_opt[i]); \
916 EXPECT_EQ(dst_argb_c[i], dst_argb_opt[i]); \
917 } \
918 free_aligned_buffer_page_end(src_argb); \
919 free_aligned_buffer_page_end(dst_argb_c); \
920 free_aligned_buffer_page_end(dst_argb_opt); \
921 }
922
923 #if defined(ENABLE_FULL_TESTS)
924 #define TESTEND(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A) \
925 TESTENDI(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, benchmark_width_ + 1, \
926 _Any, +, 0) \
927 TESTENDI(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, benchmark_width_, \
928 _Unaligned, +, 2) \
929 TESTENDI(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, benchmark_width_, \
930 _Opt, +, 0)
931 #else
932 #define TESTEND(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A) \
933 TESTENDI(FMT_ATOB, TYPE_A, EPP_A, STRIDE_A, HEIGHT_A, benchmark_width_, \
934 _Opt, +, 0)
935 #endif
936
937 TESTEND(ARGBToBGRA, uint8_t, 4, 4, 1)
938 TESTEND(ARGBToABGR, uint8_t, 4, 4, 1)
939 TESTEND(BGRAToARGB, uint8_t, 4, 4, 1)
940 TESTEND(ABGRToARGB, uint8_t, 4, 4, 1)
941 TESTEND(AB64ToAR64, uint16_t, 4, 4, 1)
942
943 #define TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
944 YALIGN, W1280, N, NEG, OFF, ATTEN) \
945 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##N) { \
946 const int kWidth = W1280; \
947 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
948 const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \
949 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
950 const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \
951 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \
952 align_buffer_page_end(src_u, kSizeUV + OFF); \
953 align_buffer_page_end(src_v, kSizeUV + OFF); \
954 align_buffer_page_end(src_a, kWidth* kHeight + OFF); \
955 align_buffer_page_end(dst_argb_c, kStrideB* kHeight + OFF); \
956 align_buffer_page_end(dst_argb_opt, kStrideB* kHeight + OFF); \
957 for (int i = 0; i < kWidth * kHeight; ++i) { \
958 src_y[i + OFF] = (fastrand() & 0xff); \
959 src_a[i + OFF] = (fastrand() & 0xff); \
960 } \
961 for (int i = 0; i < kSizeUV; ++i) { \
962 src_u[i + OFF] = (fastrand() & 0xff); \
963 src_v[i + OFF] = (fastrand() & 0xff); \
964 } \
965 memset(dst_argb_c + OFF, 1, kStrideB * kHeight); \
966 memset(dst_argb_opt + OFF, 101, kStrideB * kHeight); \
967 MaskCpuFlags(disable_cpu_flags_); \
968 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \
969 src_v + OFF, kStrideUV, src_a + OFF, kWidth, \
970 dst_argb_c + OFF, kStrideB, kWidth, NEG kHeight, \
971 ATTEN); \
972 MaskCpuFlags(benchmark_cpu_info_); \
973 for (int i = 0; i < benchmark_iterations_; ++i) { \
974 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \
975 src_v + OFF, kStrideUV, src_a + OFF, kWidth, \
976 dst_argb_opt + OFF, kStrideB, kWidth, NEG kHeight, \
977 ATTEN); \
978 } \
979 for (int i = 0; i < kWidth * BPP_B * kHeight; ++i) { \
980 EXPECT_EQ(dst_argb_c[i + OFF], dst_argb_opt[i + OFF]); \
981 } \
982 free_aligned_buffer_page_end(src_y); \
983 free_aligned_buffer_page_end(src_u); \
984 free_aligned_buffer_page_end(src_v); \
985 free_aligned_buffer_page_end(src_a); \
986 free_aligned_buffer_page_end(dst_argb_c); \
987 free_aligned_buffer_page_end(dst_argb_opt); \
988 }
989
990 #if defined(ENABLE_FULL_TESTS)
991 #define TESTQPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
992 YALIGN) \
993 TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
994 YALIGN, benchmark_width_ + 1, _Any, +, 0, 0) \
995 TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
996 YALIGN, benchmark_width_, _Unaligned, +, 2, 0) \
997 TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
998 YALIGN, benchmark_width_, _Invert, -, 0, 0) \
999 TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1000 YALIGN, benchmark_width_, _Opt, +, 0, 0) \
1001 TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1002 YALIGN, benchmark_width_, _Premult, +, 0, 1)
1003 #else
1004 #define TESTQPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1005 YALIGN) \
1006 TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1007 YALIGN, benchmark_width_, _Opt, +, 0, 0)
1008 #endif
1009
1010 #define J420AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1011 I420AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
1012 l, m)
1013 #define J420AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1014 I420AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
1015 l, m)
1016 #define F420AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1017 I420AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
1018 l, m)
1019 #define F420AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1020 I420AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
1021 l, m)
1022 #define H420AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1023 I420AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
1024 l, m)
1025 #define H420AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1026 I420AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
1027 l, m)
1028 #define U420AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1029 I420AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuv2020Constants, k, \
1030 l, m)
1031 #define U420AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1032 I420AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuv2020Constants, k, \
1033 l, m)
1034 #define V420AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1035 I420AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvV2020Constants, k, \
1036 l, m)
1037 #define V420AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1038 I420AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvV2020Constants, k, \
1039 l, m)
1040 #define J422AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1041 I422AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
1042 l, m)
1043 #define J422AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1044 I422AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
1045 l, m)
1046 #define F422AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1047 I422AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
1048 l, m)
1049 #define F422AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1050 I422AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
1051 l, m)
1052 #define H422AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1053 I422AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
1054 l, m)
1055 #define H422AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1056 I422AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
1057 l, m)
1058 #define U422AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1059 I422AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuv2020Constants, k, \
1060 l, m)
1061 #define U422AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1062 I422AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuv2020Constants, k, \
1063 l, m)
1064 #define V422AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1065 I422AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvV2020Constants, k, \
1066 l, m)
1067 #define V422AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1068 I422AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvV2020Constants, k, \
1069 l, m)
1070 #define J444AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1071 I444AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
1072 l, m)
1073 #define J444AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1074 I444AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
1075 l, m)
1076 #define F444AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1077 I444AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
1078 l, m)
1079 #define F444AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1080 I444AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
1081 l, m)
1082 #define H444AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1083 I444AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
1084 l, m)
1085 #define H444AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1086 I444AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
1087 l, m)
1088 #define U444AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1089 I444AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuv2020Constants, k, \
1090 l, m)
1091 #define U444AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1092 I444AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuv2020Constants, k, \
1093 l, m)
1094 #define V444AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1095 I444AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvV2020Constants, k, \
1096 l, m)
1097 #define V444AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1098 I444AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvV2020Constants, k, \
1099 l, m)
1100
1101 #define I420AlphaToARGBFilter(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1102 I420AlphaToARGBMatrixFilter(a, b, c, d, e, f, g, h, i, j, \
1103 &kYuvI601Constants, k, l, m, kFilterBilinear)
1104 #define I422AlphaToARGBFilter(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1105 I422AlphaToARGBMatrixFilter(a, b, c, d, e, f, g, h, i, j, \
1106 &kYuvI601Constants, k, l, m, kFilterBilinear)
1107
1108 #if defined(ENABLE_FULL_TESTS)
1109 TESTQPLANARTOB(I420Alpha, 2, 2, ARGB, 4, 4, 1)
1110 TESTQPLANARTOB(I420Alpha, 2, 2, ABGR, 4, 4, 1)
1111 TESTQPLANARTOB(J420Alpha, 2, 2, ARGB, 4, 4, 1)
1112 TESTQPLANARTOB(J420Alpha, 2, 2, ABGR, 4, 4, 1)
1113 TESTQPLANARTOB(H420Alpha, 2, 2, ARGB, 4, 4, 1)
1114 TESTQPLANARTOB(H420Alpha, 2, 2, ABGR, 4, 4, 1)
1115 TESTQPLANARTOB(F420Alpha, 2, 2, ARGB, 4, 4, 1)
1116 TESTQPLANARTOB(F420Alpha, 2, 2, ABGR, 4, 4, 1)
1117 TESTQPLANARTOB(U420Alpha, 2, 2, ARGB, 4, 4, 1)
1118 TESTQPLANARTOB(U420Alpha, 2, 2, ABGR, 4, 4, 1)
1119 TESTQPLANARTOB(V420Alpha, 2, 2, ARGB, 4, 4, 1)
1120 TESTQPLANARTOB(V420Alpha, 2, 2, ABGR, 4, 4, 1)
1121 TESTQPLANARTOB(I422Alpha, 2, 1, ARGB, 4, 4, 1)
1122 TESTQPLANARTOB(I422Alpha, 2, 1, ABGR, 4, 4, 1)
1123 TESTQPLANARTOB(J422Alpha, 2, 1, ARGB, 4, 4, 1)
1124 TESTQPLANARTOB(J422Alpha, 2, 1, ABGR, 4, 4, 1)
1125 TESTQPLANARTOB(H422Alpha, 2, 1, ARGB, 4, 4, 1)
1126 TESTQPLANARTOB(H422Alpha, 2, 1, ABGR, 4, 4, 1)
1127 TESTQPLANARTOB(F422Alpha, 2, 1, ARGB, 4, 4, 1)
1128 TESTQPLANARTOB(F422Alpha, 2, 1, ABGR, 4, 4, 1)
1129 TESTQPLANARTOB(U422Alpha, 2, 1, ARGB, 4, 4, 1)
1130 TESTQPLANARTOB(U422Alpha, 2, 1, ABGR, 4, 4, 1)
1131 TESTQPLANARTOB(V422Alpha, 2, 1, ARGB, 4, 4, 1)
1132 TESTQPLANARTOB(V422Alpha, 2, 1, ABGR, 4, 4, 1)
1133 TESTQPLANARTOB(I444Alpha, 1, 1, ARGB, 4, 4, 1)
1134 TESTQPLANARTOB(I444Alpha, 1, 1, ABGR, 4, 4, 1)
1135 TESTQPLANARTOB(J444Alpha, 1, 1, ARGB, 4, 4, 1)
1136 TESTQPLANARTOB(J444Alpha, 1, 1, ABGR, 4, 4, 1)
1137 TESTQPLANARTOB(H444Alpha, 1, 1, ARGB, 4, 4, 1)
1138 TESTQPLANARTOB(H444Alpha, 1, 1, ABGR, 4, 4, 1)
1139 TESTQPLANARTOB(F444Alpha, 1, 1, ARGB, 4, 4, 1)
1140 TESTQPLANARTOB(F444Alpha, 1, 1, ABGR, 4, 4, 1)
1141 TESTQPLANARTOB(U444Alpha, 1, 1, ARGB, 4, 4, 1)
1142 TESTQPLANARTOB(U444Alpha, 1, 1, ABGR, 4, 4, 1)
1143 TESTQPLANARTOB(V444Alpha, 1, 1, ARGB, 4, 4, 1)
1144 TESTQPLANARTOB(V444Alpha, 1, 1, ABGR, 4, 4, 1)
1145 TESTQPLANARTOB(I420Alpha, 2, 2, ARGBFilter, 4, 4, 1)
1146 TESTQPLANARTOB(I422Alpha, 2, 1, ARGBFilter, 4, 4, 1)
1147 #else
1148 TESTQPLANARTOB(I420Alpha, 2, 2, ARGB, 4, 4, 1)
1149 TESTQPLANARTOB(I422Alpha, 2, 1, ARGB, 4, 4, 1)
1150 TESTQPLANARTOB(I444Alpha, 1, 1, ARGB, 4, 4, 1)
1151 TESTQPLANARTOB(I420Alpha, 2, 2, ARGBFilter, 4, 4, 1)
1152 TESTQPLANARTOB(I422Alpha, 2, 1, ARGBFilter, 4, 4, 1)
1153 #endif
1154
TEST_F(LibYUVConvertTest,TestYToARGB)1155 TEST_F(LibYUVConvertTest, TestYToARGB) {
1156 uint8_t y[32];
1157 uint8_t expectedg[32];
1158 for (int i = 0; i < 32; ++i) {
1159 y[i] = i * 5 + 17;
1160 expectedg[i] = static_cast<int>((y[i] - 16) * 1.164f + 0.5f);
1161 }
1162 uint8_t argb[32 * 4];
1163 YToARGB(y, 0, argb, 0, 32, 1);
1164
1165 for (int i = 0; i < 32; ++i) {
1166 printf("%2d %d: %d <-> %d,%d,%d,%d\n", i, y[i], expectedg[i],
1167 argb[i * 4 + 0], argb[i * 4 + 1], argb[i * 4 + 2], argb[i * 4 + 3]);
1168 }
1169 for (int i = 0; i < 32; ++i) {
1170 EXPECT_EQ(expectedg[i], argb[i * 4 + 0]);
1171 }
1172 }
1173
1174 static const uint8_t kNoDither4x4[16] = {
1175 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1176 };
1177
TEST_F(LibYUVConvertTest,TestNoDither)1178 TEST_F(LibYUVConvertTest, TestNoDither) {
1179 align_buffer_page_end(src_argb, benchmark_width_ * benchmark_height_ * 4);
1180 align_buffer_page_end(dst_rgb565, benchmark_width_ * benchmark_height_ * 2);
1181 align_buffer_page_end(dst_rgb565dither,
1182 benchmark_width_ * benchmark_height_ * 2);
1183 MemRandomize(src_argb, benchmark_width_ * benchmark_height_ * 4);
1184 MemRandomize(dst_rgb565, benchmark_width_ * benchmark_height_ * 2);
1185 MemRandomize(dst_rgb565dither, benchmark_width_ * benchmark_height_ * 2);
1186 ARGBToRGB565(src_argb, benchmark_width_ * 4, dst_rgb565, benchmark_width_ * 2,
1187 benchmark_width_, benchmark_height_);
1188 ARGBToRGB565Dither(src_argb, benchmark_width_ * 4, dst_rgb565dither,
1189 benchmark_width_ * 2, kNoDither4x4, benchmark_width_,
1190 benchmark_height_);
1191 for (int i = 0; i < benchmark_width_ * benchmark_height_ * 2; ++i) {
1192 EXPECT_EQ(dst_rgb565[i], dst_rgb565dither[i]);
1193 }
1194
1195 free_aligned_buffer_page_end(src_argb);
1196 free_aligned_buffer_page_end(dst_rgb565);
1197 free_aligned_buffer_page_end(dst_rgb565dither);
1198 }
1199
1200 // Ordered 4x4 dither for 888 to 565. Values from 0 to 7.
1201 static const uint8_t kDither565_4x4[16] = {
1202 0, 4, 1, 5, 6, 2, 7, 3, 1, 5, 0, 4, 7, 3, 6, 2,
1203 };
1204
TEST_F(LibYUVConvertTest,TestDither)1205 TEST_F(LibYUVConvertTest, TestDither) {
1206 align_buffer_page_end(src_argb, benchmark_width_ * benchmark_height_ * 4);
1207 align_buffer_page_end(dst_rgb565, benchmark_width_ * benchmark_height_ * 2);
1208 align_buffer_page_end(dst_rgb565dither,
1209 benchmark_width_ * benchmark_height_ * 2);
1210 align_buffer_page_end(dst_argb, benchmark_width_ * benchmark_height_ * 4);
1211 align_buffer_page_end(dst_argbdither,
1212 benchmark_width_ * benchmark_height_ * 4);
1213 MemRandomize(src_argb, benchmark_width_ * benchmark_height_ * 4);
1214 MemRandomize(dst_rgb565, benchmark_width_ * benchmark_height_ * 2);
1215 MemRandomize(dst_rgb565dither, benchmark_width_ * benchmark_height_ * 2);
1216 MemRandomize(dst_argb, benchmark_width_ * benchmark_height_ * 4);
1217 MemRandomize(dst_argbdither, benchmark_width_ * benchmark_height_ * 4);
1218 ARGBToRGB565(src_argb, benchmark_width_ * 4, dst_rgb565, benchmark_width_ * 2,
1219 benchmark_width_, benchmark_height_);
1220 ARGBToRGB565Dither(src_argb, benchmark_width_ * 4, dst_rgb565dither,
1221 benchmark_width_ * 2, kDither565_4x4, benchmark_width_,
1222 benchmark_height_);
1223 RGB565ToARGB(dst_rgb565, benchmark_width_ * 2, dst_argb, benchmark_width_ * 4,
1224 benchmark_width_, benchmark_height_);
1225 RGB565ToARGB(dst_rgb565dither, benchmark_width_ * 2, dst_argbdither,
1226 benchmark_width_ * 4, benchmark_width_, benchmark_height_);
1227
1228 for (int i = 0; i < benchmark_width_ * benchmark_height_ * 4; ++i) {
1229 EXPECT_NEAR(dst_argb[i], dst_argbdither[i], 9);
1230 }
1231 free_aligned_buffer_page_end(src_argb);
1232 free_aligned_buffer_page_end(dst_rgb565);
1233 free_aligned_buffer_page_end(dst_rgb565dither);
1234 free_aligned_buffer_page_end(dst_argb);
1235 free_aligned_buffer_page_end(dst_argbdither);
1236 }
1237
1238 #define TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1239 YALIGN, W1280, N, NEG, OFF, FMT_C, BPP_C) \
1240 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##Dither##N) { \
1241 const int kWidth = W1280; \
1242 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
1243 const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \
1244 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
1245 const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \
1246 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \
1247 align_buffer_page_end(src_u, kSizeUV + OFF); \
1248 align_buffer_page_end(src_v, kSizeUV + OFF); \
1249 align_buffer_page_end(dst_argb_c, kStrideB* kHeight + OFF); \
1250 align_buffer_page_end(dst_argb_opt, kStrideB* kHeight + OFF); \
1251 for (int i = 0; i < kWidth * kHeight; ++i) { \
1252 src_y[i + OFF] = (fastrand() & 0xff); \
1253 } \
1254 for (int i = 0; i < kSizeUV; ++i) { \
1255 src_u[i + OFF] = (fastrand() & 0xff); \
1256 src_v[i + OFF] = (fastrand() & 0xff); \
1257 } \
1258 memset(dst_argb_c + OFF, 1, kStrideB * kHeight); \
1259 memset(dst_argb_opt + OFF, 101, kStrideB * kHeight); \
1260 MaskCpuFlags(disable_cpu_flags_); \
1261 FMT_PLANAR##To##FMT_B##Dither(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \
1262 src_v + OFF, kStrideUV, dst_argb_c + OFF, \
1263 kStrideB, NULL, kWidth, NEG kHeight); \
1264 MaskCpuFlags(benchmark_cpu_info_); \
1265 for (int i = 0; i < benchmark_iterations_; ++i) { \
1266 FMT_PLANAR##To##FMT_B##Dither( \
1267 src_y + OFF, kWidth, src_u + OFF, kStrideUV, src_v + OFF, kStrideUV, \
1268 dst_argb_opt + OFF, kStrideB, NULL, kWidth, NEG kHeight); \
1269 } \
1270 /* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \
1271 align_buffer_page_end(dst_argb32_c, kWidth* BPP_C* kHeight); \
1272 align_buffer_page_end(dst_argb32_opt, kWidth* BPP_C* kHeight); \
1273 memset(dst_argb32_c, 2, kWidth* BPP_C* kHeight); \
1274 memset(dst_argb32_opt, 102, kWidth* BPP_C* kHeight); \
1275 FMT_B##To##FMT_C(dst_argb_c + OFF, kStrideB, dst_argb32_c, kWidth * BPP_C, \
1276 kWidth, kHeight); \
1277 FMT_B##To##FMT_C(dst_argb_opt + OFF, kStrideB, dst_argb32_opt, \
1278 kWidth * BPP_C, kWidth, kHeight); \
1279 for (int i = 0; i < kWidth * BPP_C * kHeight; ++i) { \
1280 EXPECT_EQ(dst_argb32_c[i], dst_argb32_opt[i]); \
1281 } \
1282 free_aligned_buffer_page_end(src_y); \
1283 free_aligned_buffer_page_end(src_u); \
1284 free_aligned_buffer_page_end(src_v); \
1285 free_aligned_buffer_page_end(dst_argb_c); \
1286 free_aligned_buffer_page_end(dst_argb_opt); \
1287 free_aligned_buffer_page_end(dst_argb32_c); \
1288 free_aligned_buffer_page_end(dst_argb32_opt); \
1289 }
1290
1291 #if defined(ENABLE_FULL_TESTS)
1292 #define TESTPLANARTOBD(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1293 YALIGN, FMT_C, BPP_C) \
1294 TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1295 YALIGN, benchmark_width_ + 1, _Any, +, 0, FMT_C, BPP_C) \
1296 TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1297 YALIGN, benchmark_width_, _Unaligned, +, 2, FMT_C, BPP_C) \
1298 TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1299 YALIGN, benchmark_width_, _Invert, -, 0, FMT_C, BPP_C) \
1300 TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1301 YALIGN, benchmark_width_, _Opt, +, 0, FMT_C, BPP_C)
1302 #else
1303 #define TESTPLANARTOBD(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1304 YALIGN, FMT_C, BPP_C) \
1305 TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1306 YALIGN, benchmark_width_, _Opt, +, 0, FMT_C, BPP_C)
1307 #endif
1308
1309 #ifdef LITTLE_ENDIAN_ONLY_TEST
1310 TESTPLANARTOBD(I420, 2, 2, RGB565, 2, 2, 1, ARGB, 4)
1311 #endif
1312
1313 // Transitive test. A to B to C is same as A to C.
1314 // Benchmarks A To B to C for comparison to 1 step, benchmarked elsewhere.
1315 #define TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1316 W1280, N, NEG, OFF, FMT_C, BPP_C) \
1317 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##To##FMT_C##N) { \
1318 const int kWidth = W1280; \
1319 const int kHeight = benchmark_height_; \
1320 const int kStrideB = SUBSAMPLE(kWidth, SUB_B) * BPP_B; \
1321 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
1322 const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \
1323 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \
1324 align_buffer_page_end(src_u, kSizeUV + OFF); \
1325 align_buffer_page_end(src_v, kSizeUV + OFF); \
1326 align_buffer_page_end(dst_argb_b, kStrideB* kHeight + OFF); \
1327 for (int i = 0; i < kWidth * kHeight; ++i) { \
1328 src_y[i + OFF] = (fastrand() & 0xff); \
1329 } \
1330 for (int i = 0; i < kSizeUV; ++i) { \
1331 src_u[i + OFF] = (fastrand() & 0xff); \
1332 src_v[i + OFF] = (fastrand() & 0xff); \
1333 } \
1334 memset(dst_argb_b + OFF, 1, kStrideB * kHeight); \
1335 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \
1336 src_v + OFF, kStrideUV, dst_argb_b + OFF, kStrideB, \
1337 kWidth, NEG kHeight); \
1338 /* Convert to a 3rd format in 1 step and 2 steps and compare */ \
1339 const int kStrideC = kWidth * BPP_C; \
1340 align_buffer_page_end(dst_argb_c, kStrideC* kHeight + OFF); \
1341 align_buffer_page_end(dst_argb_bc, kStrideC* kHeight + OFF); \
1342 memset(dst_argb_c + OFF, 2, kStrideC * kHeight); \
1343 memset(dst_argb_bc + OFF, 3, kStrideC * kHeight); \
1344 for (int i = 0; i < benchmark_iterations_; ++i) { \
1345 FMT_PLANAR##To##FMT_C(src_y + OFF, kWidth, src_u + OFF, kStrideUV, \
1346 src_v + OFF, kStrideUV, dst_argb_c + OFF, \
1347 kStrideC, kWidth, NEG kHeight); \
1348 /* Convert B to C */ \
1349 FMT_B##To##FMT_C(dst_argb_b + OFF, kStrideB, dst_argb_bc + OFF, \
1350 kStrideC, kWidth, kHeight); \
1351 } \
1352 for (int i = 0; i < kStrideC * kHeight; ++i) { \
1353 EXPECT_EQ(dst_argb_c[i + OFF], dst_argb_bc[i + OFF]); \
1354 } \
1355 free_aligned_buffer_page_end(src_y); \
1356 free_aligned_buffer_page_end(src_u); \
1357 free_aligned_buffer_page_end(src_v); \
1358 free_aligned_buffer_page_end(dst_argb_b); \
1359 free_aligned_buffer_page_end(dst_argb_c); \
1360 free_aligned_buffer_page_end(dst_argb_bc); \
1361 }
1362
1363 #if defined(ENABLE_FULL_TESTS)
1364 #define TESTPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1365 FMT_C, BPP_C) \
1366 TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1367 benchmark_width_ + 1, _Any, +, 0, FMT_C, BPP_C) \
1368 TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1369 benchmark_width_, _Unaligned, +, 2, FMT_C, BPP_C) \
1370 TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1371 benchmark_width_, _Invert, -, 0, FMT_C, BPP_C) \
1372 TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1373 benchmark_width_, _Opt, +, 0, FMT_C, BPP_C)
1374 #else
1375 #define TESTPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1376 FMT_C, BPP_C) \
1377 TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1378 benchmark_width_, _Opt, +, 0, FMT_C, BPP_C)
1379 #endif
1380
1381 #if defined(ENABLE_FULL_TESTS)
1382 TESTPLANARTOE(I420, 2, 2, ABGR, 1, 4, ARGB, 4)
1383 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ABGR, 4)
1384 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RAW, 3)
1385 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RGB24, 3)
1386 TESTPLANARTOE(I420, 2, 2, BGRA, 1, 4, ARGB, 4)
1387 TESTPLANARTOE(I420, 2, 2, RAW, 1, 3, ARGB, 4)
1388 TESTPLANARTOE(I420, 2, 2, RAW, 1, 3, RGB24, 3)
1389 TESTPLANARTOE(I420, 2, 2, RGB24, 1, 3, ARGB, 4)
1390 TESTPLANARTOE(I420, 2, 2, RGB24, 1, 3, RAW, 3)
1391 TESTPLANARTOE(I420, 2, 2, RGBA, 1, 4, ARGB, 4)
1392 TESTPLANARTOE(H420, 2, 2, ABGR, 1, 4, ARGB, 4)
1393 TESTPLANARTOE(H420, 2, 2, ARGB, 1, 4, ABGR, 4)
1394 TESTPLANARTOE(H420, 2, 2, ARGB, 1, 4, RAW, 3)
1395 TESTPLANARTOE(H420, 2, 2, ARGB, 1, 4, RGB24, 3)
1396 TESTPLANARTOE(H420, 2, 2, RAW, 1, 3, ARGB, 4)
1397 TESTPLANARTOE(H420, 2, 2, RAW, 1, 3, RGB24, 3)
1398 TESTPLANARTOE(H420, 2, 2, RGB24, 1, 3, ARGB, 4)
1399 TESTPLANARTOE(H420, 2, 2, RGB24, 1, 3, RAW, 3)
1400 TESTPLANARTOE(J420, 2, 2, ABGR, 1, 4, ARGB, 4)
1401 TESTPLANARTOE(J420, 2, 2, ARGB, 1, 4, ARGB, 4)
1402 TESTPLANARTOE(U420, 2, 2, ABGR, 1, 4, ARGB, 4)
1403 TESTPLANARTOE(U420, 2, 2, ARGB, 1, 4, ARGB, 4)
1404 #ifdef LITTLE_ENDIAN_ONLY_TEST
1405 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RGB565, 2)
1406 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ARGB1555, 2)
1407 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ARGB4444, 2)
1408 TESTPLANARTOE(I422, 2, 1, ARGB, 1, 4, RGB565, 2)
1409 #endif
1410 TESTPLANARTOE(I422, 2, 1, ARGB, 1, 4, ABGR, 4)
1411 TESTPLANARTOE(I422, 2, 1, ABGR, 1, 4, ARGB, 4)
1412 TESTPLANARTOE(J422, 2, 1, ARGB, 1, 4, ARGB, 4)
1413 TESTPLANARTOE(J422, 2, 1, ABGR, 1, 4, ARGB, 4)
1414 TESTPLANARTOE(H422, 2, 1, ARGB, 1, 4, ARGB, 4)
1415 TESTPLANARTOE(H422, 2, 1, ABGR, 1, 4, ARGB, 4)
1416 TESTPLANARTOE(U422, 2, 1, ARGB, 1, 4, ARGB, 4)
1417 TESTPLANARTOE(U422, 2, 1, ABGR, 1, 4, ARGB, 4)
1418 TESTPLANARTOE(V422, 2, 1, ARGB, 1, 4, ARGB, 4)
1419 TESTPLANARTOE(V422, 2, 1, ABGR, 1, 4, ARGB, 4)
1420 TESTPLANARTOE(I422, 2, 1, BGRA, 1, 4, ARGB, 4)
1421 TESTPLANARTOE(I422, 2, 1, RGBA, 1, 4, ARGB, 4)
1422 TESTPLANARTOE(I444, 1, 1, ARGB, 1, 4, ABGR, 4)
1423 TESTPLANARTOE(I444, 1, 1, ABGR, 1, 4, ARGB, 4)
1424 TESTPLANARTOE(J444, 1, 1, ARGB, 1, 4, ARGB, 4)
1425 TESTPLANARTOE(J444, 1, 1, ABGR, 1, 4, ARGB, 4)
1426 TESTPLANARTOE(H444, 1, 1, ARGB, 1, 4, ARGB, 4)
1427 TESTPLANARTOE(H444, 1, 1, ABGR, 1, 4, ARGB, 4)
1428 TESTPLANARTOE(U444, 1, 1, ARGB, 1, 4, ARGB, 4)
1429 TESTPLANARTOE(U444, 1, 1, ABGR, 1, 4, ARGB, 4)
1430 TESTPLANARTOE(V444, 1, 1, ARGB, 1, 4, ARGB, 4)
1431 TESTPLANARTOE(V444, 1, 1, ABGR, 1, 4, ARGB, 4)
1432 TESTPLANARTOE(I420, 2, 2, YUY2, 2, 4, ARGB, 4)
1433 TESTPLANARTOE(I420, 2, 2, UYVY, 2, 4, ARGB, 4)
1434 TESTPLANARTOE(I422, 2, 1, YUY2, 2, 4, ARGB, 4)
1435 TESTPLANARTOE(I422, 2, 1, UYVY, 2, 4, ARGB, 4)
1436 #else
1437 TESTPLANARTOE(I420, 2, 2, ABGR, 1, 4, ARGB, 4)
1438 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ARGB1555, 2)
1439 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ARGB4444, 2)
1440 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RAW, 3)
1441 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RGB24, 3)
1442 TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RGB565, 2)
1443 TESTPLANARTOE(I420, 2, 2, BGRA, 1, 4, ARGB, 4)
1444 TESTPLANARTOE(I420, 2, 2, RAW, 1, 3, ARGB, 4)
1445 TESTPLANARTOE(I420, 2, 2, RAW, 1, 3, RGB24, 3)
1446 TESTPLANARTOE(I420, 2, 2, RGB24, 1, 3, ARGB, 4)
1447 TESTPLANARTOE(I420, 2, 2, RGB24, 1, 3, RAW, 3)
1448 TESTPLANARTOE(I420, 2, 2, RGBA, 1, 4, ARGB, 4)
1449 TESTPLANARTOE(I420, 2, 2, UYVY, 2, 4, ARGB, 4)
1450 TESTPLANARTOE(I420, 2, 2, YUY2, 2, 4, ARGB, 4)
1451 TESTPLANARTOE(I422, 2, 1, ABGR, 1, 4, ARGB, 4)
1452 TESTPLANARTOE(I422, 2, 1, ARGB, 1, 4, RGB565, 2)
1453 TESTPLANARTOE(I422, 2, 1, BGRA, 1, 4, ARGB, 4)
1454 TESTPLANARTOE(I422, 2, 1, RGBA, 1, 4, ARGB, 4)
1455 TESTPLANARTOE(I422, 2, 1, UYVY, 2, 4, ARGB, 4)
1456 TESTPLANARTOE(I422, 2, 1, YUY2, 2, 4, ARGB, 4)
1457 TESTPLANARTOE(I444, 1, 1, ABGR, 1, 4, ARGB, 4)
1458 #endif
1459
1460 // Transitive test: Compare 1 step vs 2 step conversion for YUVA to ARGB.
1461 // Benchmark 2 step conversion for comparison to 1 step conversion.
1462 #define TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1463 W1280, N, NEG, OFF, FMT_C, BPP_C, ATTEN) \
1464 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##To##FMT_C##N) { \
1465 const int kWidth = W1280; \
1466 const int kHeight = benchmark_height_; \
1467 const int kStrideB = SUBSAMPLE(kWidth, SUB_B) * BPP_B; \
1468 const int kSizeUV = \
1469 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y); \
1470 align_buffer_page_end(src_y, kWidth* kHeight + OFF); \
1471 align_buffer_page_end(src_u, kSizeUV + OFF); \
1472 align_buffer_page_end(src_v, kSizeUV + OFF); \
1473 align_buffer_page_end(src_a, kWidth* kHeight + OFF); \
1474 align_buffer_page_end(dst_argb_b, kStrideB* kHeight + OFF); \
1475 const int kStrideC = kWidth * BPP_C; \
1476 align_buffer_page_end(dst_argb_c, kStrideC* kHeight + OFF); \
1477 align_buffer_page_end(dst_argb_bc, kStrideC* kHeight + OFF); \
1478 memset(dst_argb_c + OFF, 2, kStrideC * kHeight); \
1479 memset(dst_argb_b + OFF, 1, kStrideB * kHeight); \
1480 memset(dst_argb_bc + OFF, 3, kStrideC * kHeight); \
1481 for (int i = 0; i < kWidth * kHeight; ++i) { \
1482 src_y[i + OFF] = (fastrand() & 0xff); \
1483 src_a[i + OFF] = (fastrand() & 0xff); \
1484 } \
1485 for (int i = 0; i < kSizeUV; ++i) { \
1486 src_u[i + OFF] = (fastrand() & 0xff); \
1487 src_v[i + OFF] = (fastrand() & 0xff); \
1488 } \
1489 for (int i = 0; i < benchmark_iterations_; ++i) { \
1490 /* Convert A to B */ \
1491 FMT_PLANAR##To##FMT_B( \
1492 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \
1493 src_v + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), src_a + OFF, kWidth, \
1494 dst_argb_b + OFF, kStrideB, kWidth, NEG kHeight, ATTEN); \
1495 /* Convert B to C */ \
1496 FMT_B##To##FMT_C(dst_argb_b + OFF, kStrideB, dst_argb_bc + OFF, \
1497 kStrideC, kWidth, kHeight); \
1498 } \
1499 /* Convert A to C */ \
1500 FMT_PLANAR##To##FMT_C( \
1501 src_y + OFF, kWidth, src_u + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \
1502 src_v + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), src_a + OFF, kWidth, \
1503 dst_argb_c + OFF, kStrideC, kWidth, NEG kHeight, ATTEN); \
1504 for (int i = 0; i < kStrideC * kHeight; ++i) { \
1505 EXPECT_EQ(dst_argb_c[i + OFF], dst_argb_bc[i + OFF]); \
1506 } \
1507 free_aligned_buffer_page_end(src_y); \
1508 free_aligned_buffer_page_end(src_u); \
1509 free_aligned_buffer_page_end(src_v); \
1510 free_aligned_buffer_page_end(src_a); \
1511 free_aligned_buffer_page_end(dst_argb_b); \
1512 free_aligned_buffer_page_end(dst_argb_c); \
1513 free_aligned_buffer_page_end(dst_argb_bc); \
1514 }
1515
1516 #if defined(ENABLE_FULL_TESTS)
1517 #define TESTQPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1518 FMT_C, BPP_C) \
1519 TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1520 benchmark_width_ + 1, _Any, +, 0, FMT_C, BPP_C, 0) \
1521 TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1522 benchmark_width_, _Unaligned, +, 2, FMT_C, BPP_C, 0) \
1523 TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1524 benchmark_width_, _Invert, -, 0, FMT_C, BPP_C, 0) \
1525 TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1526 benchmark_width_, _Opt, +, 0, FMT_C, BPP_C, 0) \
1527 TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1528 benchmark_width_, _Premult, +, 0, FMT_C, BPP_C, 1)
1529 #else
1530 #define TESTQPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1531 FMT_C, BPP_C) \
1532 TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
1533 benchmark_width_, _Opt, +, 0, FMT_C, BPP_C, 0)
1534 #endif
1535
1536 #if defined(ENABLE_FULL_TESTS)
1537 TESTQPLANARTOE(I420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4)
1538 TESTQPLANARTOE(I420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
1539 TESTQPLANARTOE(J420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4)
1540 TESTQPLANARTOE(J420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
1541 TESTQPLANARTOE(H420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4)
1542 TESTQPLANARTOE(H420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
1543 TESTQPLANARTOE(F420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4)
1544 TESTQPLANARTOE(F420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
1545 TESTQPLANARTOE(U420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4)
1546 TESTQPLANARTOE(U420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
1547 TESTQPLANARTOE(V420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4)
1548 TESTQPLANARTOE(V420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
1549 TESTQPLANARTOE(I422Alpha, 2, 1, ARGB, 1, 4, ABGR, 4)
1550 TESTQPLANARTOE(I422Alpha, 2, 1, ABGR, 1, 4, ARGB, 4)
1551 TESTQPLANARTOE(J422Alpha, 2, 1, ARGB, 1, 4, ABGR, 4)
1552 TESTQPLANARTOE(J422Alpha, 2, 1, ABGR, 1, 4, ARGB, 4)
1553 TESTQPLANARTOE(F422Alpha, 2, 1, ARGB, 1, 4, ABGR, 4)
1554 TESTQPLANARTOE(F422Alpha, 2, 1, ABGR, 1, 4, ARGB, 4)
1555 TESTQPLANARTOE(H422Alpha, 2, 1, ARGB, 1, 4, ABGR, 4)
1556 TESTQPLANARTOE(H422Alpha, 2, 1, ABGR, 1, 4, ARGB, 4)
1557 TESTQPLANARTOE(U422Alpha, 2, 1, ARGB, 1, 4, ABGR, 4)
1558 TESTQPLANARTOE(U422Alpha, 2, 1, ABGR, 1, 4, ARGB, 4)
1559 TESTQPLANARTOE(V422Alpha, 2, 1, ARGB, 1, 4, ABGR, 4)
1560 TESTQPLANARTOE(V422Alpha, 2, 1, ABGR, 1, 4, ARGB, 4)
1561 TESTQPLANARTOE(I444Alpha, 1, 1, ARGB, 1, 4, ABGR, 4)
1562 TESTQPLANARTOE(I444Alpha, 1, 1, ABGR, 1, 4, ARGB, 4)
1563 TESTQPLANARTOE(J444Alpha, 1, 1, ARGB, 1, 4, ABGR, 4)
1564 TESTQPLANARTOE(J444Alpha, 1, 1, ABGR, 1, 4, ARGB, 4)
1565 TESTQPLANARTOE(H444Alpha, 1, 1, ARGB, 1, 4, ABGR, 4)
1566 TESTQPLANARTOE(H444Alpha, 1, 1, ABGR, 1, 4, ARGB, 4)
1567 TESTQPLANARTOE(U444Alpha, 1, 1, ARGB, 1, 4, ABGR, 4)
1568 TESTQPLANARTOE(U444Alpha, 1, 1, ABGR, 1, 4, ARGB, 4)
1569 TESTQPLANARTOE(V444Alpha, 1, 1, ARGB, 1, 4, ABGR, 4)
1570 TESTQPLANARTOE(V444Alpha, 1, 1, ABGR, 1, 4, ARGB, 4)
1571 #else
1572 TESTQPLANARTOE(I420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
1573 TESTQPLANARTOE(I422Alpha, 2, 1, ABGR, 1, 4, ARGB, 4)
1574 TESTQPLANARTOE(I444Alpha, 1, 1, ABGR, 1, 4, ARGB, 4)
1575 #endif
1576
1577 #define TESTPLANETOEI(FMT_A, SUB_A, BPP_A, FMT_B, SUB_B, BPP_B, W1280, N, NEG, \
1578 OFF, FMT_C, BPP_C) \
1579 TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##To##FMT_C##N) { \
1580 const int kWidth = W1280; \
1581 const int kHeight = benchmark_height_; \
1582 const int kStrideA = SUBSAMPLE(kWidth, SUB_A) * BPP_A; \
1583 const int kStrideB = SUBSAMPLE(kWidth, SUB_B) * BPP_B; \
1584 align_buffer_page_end(src_argb_a, kStrideA* kHeight + OFF); \
1585 align_buffer_page_end(dst_argb_b, kStrideB* kHeight + OFF); \
1586 MemRandomize(src_argb_a + OFF, kStrideA * kHeight); \
1587 memset(dst_argb_b + OFF, 1, kStrideB * kHeight); \
1588 FMT_A##To##FMT_B(src_argb_a + OFF, kStrideA, dst_argb_b + OFF, kStrideB, \
1589 kWidth, NEG kHeight); \
1590 /* Convert to a 3rd format in 1 step and 2 steps and compare */ \
1591 const int kStrideC = kWidth * BPP_C; \
1592 align_buffer_page_end(dst_argb_c, kStrideC* kHeight + OFF); \
1593 align_buffer_page_end(dst_argb_bc, kStrideC* kHeight + OFF); \
1594 memset(dst_argb_c + OFF, 2, kStrideC * kHeight); \
1595 memset(dst_argb_bc + OFF, 3, kStrideC * kHeight); \
1596 for (int i = 0; i < benchmark_iterations_; ++i) { \
1597 FMT_A##To##FMT_C(src_argb_a + OFF, kStrideA, dst_argb_c + OFF, kStrideC, \
1598 kWidth, NEG kHeight); \
1599 /* Convert B to C */ \
1600 FMT_B##To##FMT_C(dst_argb_b + OFF, kStrideB, dst_argb_bc + OFF, \
1601 kStrideC, kWidth, kHeight); \
1602 } \
1603 for (int i = 0; i < kStrideC * kHeight; i += 4) { \
1604 EXPECT_EQ(dst_argb_c[i + OFF + 0], dst_argb_bc[i + OFF + 0]); \
1605 EXPECT_EQ(dst_argb_c[i + OFF + 1], dst_argb_bc[i + OFF + 1]); \
1606 EXPECT_EQ(dst_argb_c[i + OFF + 2], dst_argb_bc[i + OFF + 2]); \
1607 EXPECT_NEAR(dst_argb_c[i + OFF + 3], dst_argb_bc[i + OFF + 3], 64); \
1608 } \
1609 free_aligned_buffer_page_end(src_argb_a); \
1610 free_aligned_buffer_page_end(dst_argb_b); \
1611 free_aligned_buffer_page_end(dst_argb_c); \
1612 free_aligned_buffer_page_end(dst_argb_bc); \
1613 }
1614
1615 #if defined(ENABLE_FULL_TESTS)
1616 #define TESTPLANETOE(FMT_A, SUB_A, BPP_A, FMT_B, SUB_B, BPP_B, FMT_C, BPP_C) \
1617 TESTPLANETOEI(FMT_A, SUB_A, BPP_A, FMT_B, SUB_B, BPP_B, \
1618 benchmark_width_ + 1, _Any, +, 0, FMT_C, BPP_C) \
1619 TESTPLANETOEI(FMT_A, SUB_A, BPP_A, FMT_B, SUB_B, BPP_B, benchmark_width_, \
1620 _Unaligned, +, 4, FMT_C, BPP_C) \
1621 TESTPLANETOEI(FMT_A, SUB_A, BPP_A, FMT_B, SUB_B, BPP_B, benchmark_width_, \
1622 _Invert, -, 0, FMT_C, BPP_C) \
1623 TESTPLANETOEI(FMT_A, SUB_A, BPP_A, FMT_B, SUB_B, BPP_B, benchmark_width_, \
1624 _Opt, +, 0, FMT_C, BPP_C)
1625 #else
1626 #define TESTPLANETOE(FMT_A, SUB_A, BPP_A, FMT_B, SUB_B, BPP_B, FMT_C, BPP_C) \
1627 TESTPLANETOEI(FMT_A, SUB_A, BPP_A, FMT_B, SUB_B, BPP_B, benchmark_width_, \
1628 _Opt, +, 0, FMT_C, BPP_C)
1629 #endif
1630
1631 // Caveat: Destination needs to be 4 bytes
1632 #ifdef LITTLE_ENDIAN_ONLY_TEST
1633 TESTPLANETOE(ARGB, 1, 4, AR30, 1, 4, ARGB, 4)
1634 TESTPLANETOE(ABGR, 1, 4, AR30, 1, 4, ABGR, 4)
1635 TESTPLANETOE(AR30, 1, 4, ARGB, 1, 4, ABGR, 4)
1636 TESTPLANETOE(AR30, 1, 4, ABGR, 1, 4, ARGB, 4)
1637 TESTPLANETOE(ARGB, 1, 4, AB30, 1, 4, ARGB, 4)
1638 TESTPLANETOE(ABGR, 1, 4, AB30, 1, 4, ABGR, 4)
1639 TESTPLANETOE(AB30, 1, 4, ARGB, 1, 4, ABGR, 4)
1640 TESTPLANETOE(AB30, 1, 4, ABGR, 1, 4, ARGB, 4)
1641 #endif
1642
TEST_F(LibYUVConvertTest,RotateWithARGBSource)1643 TEST_F(LibYUVConvertTest, RotateWithARGBSource) {
1644 // 2x2 frames
1645 uint32_t src[4];
1646 uint32_t dst[4];
1647 // some random input
1648 src[0] = 0x11000000;
1649 src[1] = 0x00450000;
1650 src[2] = 0x00009f00;
1651 src[3] = 0x000000ff;
1652 // zeros on destination
1653 dst[0] = 0x00000000;
1654 dst[1] = 0x00000000;
1655 dst[2] = 0x00000000;
1656 dst[3] = 0x00000000;
1657
1658 int r = ConvertToARGB(reinterpret_cast<uint8_t*>(src),
1659 16, // input size
1660 reinterpret_cast<uint8_t*>(dst),
1661 8, // destination stride
1662 0, // crop_x
1663 0, // crop_y
1664 2, // width
1665 2, // height
1666 2, // crop width
1667 2, // crop height
1668 kRotate90, FOURCC_ARGB);
1669
1670 EXPECT_EQ(r, 0);
1671 // 90 degrees rotation, no conversion
1672 EXPECT_EQ(dst[0], src[2]);
1673 EXPECT_EQ(dst[1], src[0]);
1674 EXPECT_EQ(dst[2], src[3]);
1675 EXPECT_EQ(dst[3], src[1]);
1676 }
1677
1678 #ifdef HAS_ARGBTOAR30ROW_AVX2
TEST_F(LibYUVConvertTest,ARGBToAR30Row_Opt)1679 TEST_F(LibYUVConvertTest, ARGBToAR30Row_Opt) {
1680 // ARGBToAR30Row_AVX2 expects a multiple of 8 pixels.
1681 const int kPixels = (benchmark_width_ * benchmark_height_ + 7) & ~7;
1682 align_buffer_page_end(src, kPixels * 4);
1683 align_buffer_page_end(dst_opt, kPixels * 4);
1684 align_buffer_page_end(dst_c, kPixels * 4);
1685 MemRandomize(src, kPixels * 4);
1686 memset(dst_opt, 0, kPixels * 4);
1687 memset(dst_c, 1, kPixels * 4);
1688
1689 ARGBToAR30Row_C(src, dst_c, kPixels);
1690
1691 int has_avx2 = TestCpuFlag(kCpuHasAVX2);
1692 int has_ssse3 = TestCpuFlag(kCpuHasSSSE3);
1693 for (int i = 0; i < benchmark_iterations_; ++i) {
1694 if (has_avx2) {
1695 ARGBToAR30Row_AVX2(src, dst_opt, kPixels);
1696 } else if (has_ssse3) {
1697 ARGBToAR30Row_SSSE3(src, dst_opt, kPixels);
1698 } else {
1699 ARGBToAR30Row_C(src, dst_opt, kPixels);
1700 }
1701 }
1702 for (int i = 0; i < kPixels * 4; ++i) {
1703 EXPECT_EQ(dst_opt[i], dst_c[i]);
1704 }
1705
1706 free_aligned_buffer_page_end(src);
1707 free_aligned_buffer_page_end(dst_opt);
1708 free_aligned_buffer_page_end(dst_c);
1709 }
1710 #endif // HAS_ARGBTOAR30ROW_AVX2
1711
1712 #ifdef HAS_ABGRTOAR30ROW_AVX2
TEST_F(LibYUVConvertTest,ABGRToAR30Row_Opt)1713 TEST_F(LibYUVConvertTest, ABGRToAR30Row_Opt) {
1714 // ABGRToAR30Row_AVX2 expects a multiple of 8 pixels.
1715 const int kPixels = (benchmark_width_ * benchmark_height_ + 7) & ~7;
1716 align_buffer_page_end(src, kPixels * 4);
1717 align_buffer_page_end(dst_opt, kPixels * 4);
1718 align_buffer_page_end(dst_c, kPixels * 4);
1719 MemRandomize(src, kPixels * 4);
1720 memset(dst_opt, 0, kPixels * 4);
1721 memset(dst_c, 1, kPixels * 4);
1722
1723 ABGRToAR30Row_C(src, dst_c, kPixels);
1724
1725 int has_avx2 = TestCpuFlag(kCpuHasAVX2);
1726 int has_ssse3 = TestCpuFlag(kCpuHasSSSE3);
1727 for (int i = 0; i < benchmark_iterations_; ++i) {
1728 if (has_avx2) {
1729 ABGRToAR30Row_AVX2(src, dst_opt, kPixels);
1730 } else if (has_ssse3) {
1731 ABGRToAR30Row_SSSE3(src, dst_opt, kPixels);
1732 } else {
1733 ABGRToAR30Row_C(src, dst_opt, kPixels);
1734 }
1735 }
1736 for (int i = 0; i < kPixels * 4; ++i) {
1737 EXPECT_EQ(dst_opt[i], dst_c[i]);
1738 }
1739
1740 free_aligned_buffer_page_end(src);
1741 free_aligned_buffer_page_end(dst_opt);
1742 free_aligned_buffer_page_end(dst_c);
1743 }
1744 #endif // HAS_ABGRTOAR30ROW_AVX2
1745
1746 #if !defined(LEAN_TESTS)
1747
1748 // Provide matrix wrappers for 12 bit YUV
1749 #define I012ToARGB(a, b, c, d, e, f, g, h, i, j) \
1750 I012ToARGBMatrix(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j)
1751 #define I012ToAR30(a, b, c, d, e, f, g, h, i, j) \
1752 I012ToAR30Matrix(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j)
1753 #define I012ToAB30(a, b, c, d, e, f, g, h, i, j) \
1754 I012ToAB30Matrix(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j)
1755
1756 #define I410ToARGB(a, b, c, d, e, f, g, h, i, j) \
1757 I410ToARGBMatrix(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j)
1758 #define I410ToABGR(a, b, c, d, e, f, g, h, i, j) \
1759 I410ToABGRMatrix(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j)
1760 #define H410ToARGB(a, b, c, d, e, f, g, h, i, j) \
1761 I410ToARGBMatrix(a, b, c, d, e, f, g, h, &kYuvH709Constants, i, j)
1762 #define H410ToABGR(a, b, c, d, e, f, g, h, i, j) \
1763 I410ToABGRMatrix(a, b, c, d, e, f, g, h, &kYuvH709Constants, i, j)
1764 #define U410ToARGB(a, b, c, d, e, f, g, h, i, j) \
1765 I410ToARGBMatrix(a, b, c, d, e, f, g, h, &kYuv2020Constants, i, j)
1766 #define U410ToABGR(a, b, c, d, e, f, g, h, i, j) \
1767 I410ToABGRMatrix(a, b, c, d, e, f, g, h, &kYuv2020Constants, i, j)
1768 #define I410ToAR30(a, b, c, d, e, f, g, h, i, j) \
1769 I410ToAR30Matrix(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j)
1770 #define I410ToAB30(a, b, c, d, e, f, g, h, i, j) \
1771 I410ToAB30Matrix(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j)
1772 #define H410ToAR30(a, b, c, d, e, f, g, h, i, j) \
1773 I410ToAR30Matrix(a, b, c, d, e, f, g, h, &kYuvH709Constants, i, j)
1774 #define H410ToAB30(a, b, c, d, e, f, g, h, i, j) \
1775 I410ToAB30Matrix(a, b, c, d, e, f, g, h, &kYuvH709Constants, i, j)
1776 #define U410ToAR30(a, b, c, d, e, f, g, h, i, j) \
1777 I410ToAR30Matrix(a, b, c, d, e, f, g, h, &kYuv2020Constants, i, j)
1778 #define U410ToAB30(a, b, c, d, e, f, g, h, i, j) \
1779 I410ToAB30Matrix(a, b, c, d, e, f, g, h, &kYuv2020Constants, i, j)
1780
1781 #define I010ToARGBFilter(a, b, c, d, e, f, g, h, i, j) \
1782 I010ToARGBMatrixFilter(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j, \
1783 kFilterBilinear)
1784 #define I010ToAR30Filter(a, b, c, d, e, f, g, h, i, j) \
1785 I010ToAR30MatrixFilter(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j, \
1786 kFilterBilinear)
1787 #define I210ToARGBFilter(a, b, c, d, e, f, g, h, i, j) \
1788 I210ToARGBMatrixFilter(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j, \
1789 kFilterBilinear)
1790 #define I210ToAR30Filter(a, b, c, d, e, f, g, h, i, j) \
1791 I210ToAR30MatrixFilter(a, b, c, d, e, f, g, h, &kYuvI601Constants, i, j, \
1792 kFilterBilinear)
1793
1794 // TODO(fbarchard): Fix clamping issue affected by U channel.
1795 #define TESTPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_MASK, FMT_B, \
1796 BPP_B, ALIGN, YALIGN, W1280, N, NEG, SOFF, DOFF) \
1797 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##N) { \
1798 const int kWidth = W1280; \
1799 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
1800 const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \
1801 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
1802 const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \
1803 const int kBpc = 2; \
1804 align_buffer_page_end(src_y, kWidth* kHeight* kBpc + SOFF); \
1805 align_buffer_page_end(src_u, kSizeUV* kBpc + SOFF); \
1806 align_buffer_page_end(src_v, kSizeUV* kBpc + SOFF); \
1807 align_buffer_page_end(dst_argb_c, kStrideB* kHeight + DOFF); \
1808 align_buffer_page_end(dst_argb_opt, kStrideB* kHeight + DOFF); \
1809 for (int i = 0; i < kWidth * kHeight; ++i) { \
1810 reinterpret_cast<uint16_t*>(src_y + SOFF)[i] = (fastrand() & FMT_MASK); \
1811 } \
1812 for (int i = 0; i < kSizeUV; ++i) { \
1813 reinterpret_cast<uint16_t*>(src_u + SOFF)[i] = (fastrand() & FMT_MASK); \
1814 reinterpret_cast<uint16_t*>(src_v + SOFF)[i] = (fastrand() & FMT_MASK); \
1815 } \
1816 memset(dst_argb_c + DOFF, 1, kStrideB * kHeight); \
1817 memset(dst_argb_opt + DOFF, 101, kStrideB * kHeight); \
1818 MaskCpuFlags(disable_cpu_flags_); \
1819 FMT_PLANAR##To##FMT_B( \
1820 reinterpret_cast<uint16_t*>(src_y + SOFF), kWidth, \
1821 reinterpret_cast<uint16_t*>(src_u + SOFF), kStrideUV, \
1822 reinterpret_cast<uint16_t*>(src_v + SOFF), kStrideUV, \
1823 dst_argb_c + DOFF, kStrideB, kWidth, NEG kHeight); \
1824 MaskCpuFlags(benchmark_cpu_info_); \
1825 for (int i = 0; i < benchmark_iterations_; ++i) { \
1826 FMT_PLANAR##To##FMT_B( \
1827 reinterpret_cast<uint16_t*>(src_y + SOFF), kWidth, \
1828 reinterpret_cast<uint16_t*>(src_u + SOFF), kStrideUV, \
1829 reinterpret_cast<uint16_t*>(src_v + SOFF), kStrideUV, \
1830 dst_argb_opt + DOFF, kStrideB, kWidth, NEG kHeight); \
1831 } \
1832 for (int i = 0; i < kWidth * BPP_B * kHeight; ++i) { \
1833 EXPECT_EQ(dst_argb_c[i + DOFF], dst_argb_opt[i + DOFF]); \
1834 } \
1835 free_aligned_buffer_page_end(src_y); \
1836 free_aligned_buffer_page_end(src_u); \
1837 free_aligned_buffer_page_end(src_v); \
1838 free_aligned_buffer_page_end(dst_argb_c); \
1839 free_aligned_buffer_page_end(dst_argb_opt); \
1840 }
1841
1842 #if defined(ENABLE_FULL_TESTS)
1843 #define TESTPLANAR16TOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_MASK, FMT_B, \
1844 BPP_B, ALIGN, YALIGN) \
1845 TESTPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_MASK, FMT_B, BPP_B, \
1846 ALIGN, YALIGN, benchmark_width_ + 1, _Any, +, 0, 0) \
1847 TESTPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_MASK, FMT_B, BPP_B, \
1848 ALIGN, YALIGN, benchmark_width_, _Unaligned, +, 4, 4) \
1849 TESTPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_MASK, FMT_B, BPP_B, \
1850 ALIGN, YALIGN, benchmark_width_, _Invert, -, 0, 0) \
1851 TESTPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_MASK, FMT_B, BPP_B, \
1852 ALIGN, YALIGN, benchmark_width_, _Opt, +, 0, 0)
1853 #else
1854 #define TESTPLANAR16TOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_MASK, FMT_B, \
1855 BPP_B, ALIGN, YALIGN) \
1856 TESTPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_MASK, FMT_B, BPP_B, \
1857 ALIGN, YALIGN, benchmark_width_, _Opt, +, 0, 0)
1858 #endif
1859
1860 // These conversions are only optimized for x86
1861 #if !defined(DISABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__)
1862 TESTPLANAR16TOB(I010, 2, 2, 0x3ff, ARGB, 4, 4, 1)
1863 TESTPLANAR16TOB(I010, 2, 2, 0x3ff, ABGR, 4, 4, 1)
1864 TESTPLANAR16TOB(H010, 2, 2, 0x3ff, ARGB, 4, 4, 1)
1865 TESTPLANAR16TOB(H010, 2, 2, 0x3ff, ABGR, 4, 4, 1)
1866 TESTPLANAR16TOB(U010, 2, 2, 0x3ff, ARGB, 4, 4, 1)
1867 TESTPLANAR16TOB(U010, 2, 2, 0x3ff, ABGR, 4, 4, 1)
1868 TESTPLANAR16TOB(I210, 2, 1, 0x3ff, ARGB, 4, 4, 1)
1869 TESTPLANAR16TOB(I210, 2, 1, 0x3ff, ABGR, 4, 4, 1)
1870 TESTPLANAR16TOB(H210, 2, 1, 0x3ff, ARGB, 4, 4, 1)
1871 TESTPLANAR16TOB(H210, 2, 1, 0x3ff, ABGR, 4, 4, 1)
1872 TESTPLANAR16TOB(U210, 2, 1, 0x3ff, ARGB, 4, 4, 1)
1873 TESTPLANAR16TOB(U210, 2, 1, 0x3ff, ABGR, 4, 4, 1)
1874 TESTPLANAR16TOB(I410, 1, 1, 0x3ff, ARGB, 4, 4, 1)
1875 TESTPLANAR16TOB(I410, 1, 1, 0x3ff, ABGR, 4, 4, 1)
1876 TESTPLANAR16TOB(H410, 1, 1, 0x3ff, ARGB, 4, 4, 1)
1877 TESTPLANAR16TOB(H410, 1, 1, 0x3ff, ABGR, 4, 4, 1)
1878 TESTPLANAR16TOB(U410, 1, 1, 0x3ff, ARGB, 4, 4, 1)
1879 TESTPLANAR16TOB(U410, 1, 1, 0x3ff, ABGR, 4, 4, 1)
1880 TESTPLANAR16TOB(I012, 2, 2, 0xfff, ARGB, 4, 4, 1)
1881 TESTPLANAR16TOB(I010, 2, 2, 0x3ff, ARGBFilter, 4, 4, 1)
1882 TESTPLANAR16TOB(I210, 2, 1, 0x3ff, ARGBFilter, 4, 4, 1)
1883
1884 #ifdef LITTLE_ENDIAN_ONLY_TEST
1885 TESTPLANAR16TOB(I010, 2, 2, 0x3ff, AR30, 4, 4, 1)
1886 TESTPLANAR16TOB(I010, 2, 2, 0x3ff, AB30, 4, 4, 1)
1887 TESTPLANAR16TOB(H010, 2, 2, 0x3ff, AR30, 4, 4, 1)
1888 TESTPLANAR16TOB(H010, 2, 2, 0x3ff, AB30, 4, 4, 1)
1889 TESTPLANAR16TOB(U010, 2, 2, 0x3ff, AR30, 4, 4, 1)
1890 TESTPLANAR16TOB(U010, 2, 2, 0x3ff, AB30, 4, 4, 1)
1891 TESTPLANAR16TOB(I210, 2, 1, 0x3ff, AR30, 4, 4, 1)
1892 TESTPLANAR16TOB(I210, 2, 1, 0x3ff, AB30, 4, 4, 1)
1893 TESTPLANAR16TOB(H210, 2, 1, 0x3ff, AR30, 4, 4, 1)
1894 TESTPLANAR16TOB(H210, 2, 1, 0x3ff, AB30, 4, 4, 1)
1895 TESTPLANAR16TOB(U210, 2, 1, 0x3ff, AR30, 4, 4, 1)
1896 TESTPLANAR16TOB(U210, 2, 1, 0x3ff, AB30, 4, 4, 1)
1897 TESTPLANAR16TOB(I410, 1, 1, 0x3ff, AR30, 4, 4, 1)
1898 TESTPLANAR16TOB(I410, 1, 1, 0x3ff, AB30, 4, 4, 1)
1899 TESTPLANAR16TOB(H410, 1, 1, 0x3ff, AR30, 4, 4, 1)
1900 TESTPLANAR16TOB(H410, 1, 1, 0x3ff, AB30, 4, 4, 1)
1901 TESTPLANAR16TOB(U410, 1, 1, 0x3ff, AR30, 4, 4, 1)
1902 TESTPLANAR16TOB(U410, 1, 1, 0x3ff, AB30, 4, 4, 1)
1903 TESTPLANAR16TOB(I012, 2, 2, 0xfff, AR30, 4, 4, 1)
1904 TESTPLANAR16TOB(I012, 2, 2, 0xfff, AB30, 4, 4, 1)
1905 TESTPLANAR16TOB(I010, 2, 2, 0x3ff, AR30Filter, 4, 4, 1)
1906 TESTPLANAR16TOB(I210, 2, 1, 0x3ff, AR30Filter, 4, 4, 1)
1907 #endif // LITTLE_ENDIAN_ONLY_TEST
1908 #endif // DISABLE_SLOW_TESTS
1909
1910 #define TESTQPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
1911 ALIGN, YALIGN, W1280, N, NEG, OFF, ATTEN, S_DEPTH) \
1912 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##N) { \
1913 const int kWidth = W1280; \
1914 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
1915 const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \
1916 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
1917 const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \
1918 const int kBpc = 2; \
1919 align_buffer_page_end(src_y, kWidth* kHeight* kBpc + OFF); \
1920 align_buffer_page_end(src_u, kSizeUV* kBpc + OFF); \
1921 align_buffer_page_end(src_v, kSizeUV* kBpc + OFF); \
1922 align_buffer_page_end(src_a, kWidth* kHeight* kBpc + OFF); \
1923 align_buffer_page_end(dst_argb_c, kStrideB* kHeight + OFF); \
1924 align_buffer_page_end(dst_argb_opt, kStrideB* kHeight + OFF); \
1925 for (int i = 0; i < kWidth * kHeight; ++i) { \
1926 reinterpret_cast<uint16_t*>(src_y + OFF)[i] = \
1927 (fastrand() & ((1 << S_DEPTH) - 1)); \
1928 reinterpret_cast<uint16_t*>(src_a + OFF)[i] = \
1929 (fastrand() & ((1 << S_DEPTH) - 1)); \
1930 } \
1931 for (int i = 0; i < kSizeUV; ++i) { \
1932 reinterpret_cast<uint16_t*>(src_u + OFF)[i] = \
1933 (fastrand() & ((1 << S_DEPTH) - 1)); \
1934 reinterpret_cast<uint16_t*>(src_v + OFF)[i] = \
1935 (fastrand() & ((1 << S_DEPTH) - 1)); \
1936 } \
1937 memset(dst_argb_c + OFF, 1, kStrideB * kHeight); \
1938 memset(dst_argb_opt + OFF, 101, kStrideB * kHeight); \
1939 MaskCpuFlags(disable_cpu_flags_); \
1940 FMT_PLANAR##To##FMT_B(reinterpret_cast<uint16_t*>(src_y + OFF), kWidth, \
1941 reinterpret_cast<uint16_t*>(src_u + OFF), kStrideUV, \
1942 reinterpret_cast<uint16_t*>(src_v + OFF), kStrideUV, \
1943 reinterpret_cast<uint16_t*>(src_a + OFF), kWidth, \
1944 dst_argb_c + OFF, kStrideB, kWidth, NEG kHeight, \
1945 ATTEN); \
1946 MaskCpuFlags(benchmark_cpu_info_); \
1947 for (int i = 0; i < benchmark_iterations_; ++i) { \
1948 FMT_PLANAR##To##FMT_B( \
1949 reinterpret_cast<uint16_t*>(src_y + OFF), kWidth, \
1950 reinterpret_cast<uint16_t*>(src_u + OFF), kStrideUV, \
1951 reinterpret_cast<uint16_t*>(src_v + OFF), kStrideUV, \
1952 reinterpret_cast<uint16_t*>(src_a + OFF), kWidth, \
1953 dst_argb_opt + OFF, kStrideB, kWidth, NEG kHeight, ATTEN); \
1954 } \
1955 for (int i = 0; i < kWidth * BPP_B * kHeight; ++i) { \
1956 EXPECT_EQ(dst_argb_c[i + OFF], dst_argb_opt[i + OFF]); \
1957 } \
1958 free_aligned_buffer_page_end(src_y); \
1959 free_aligned_buffer_page_end(src_u); \
1960 free_aligned_buffer_page_end(src_v); \
1961 free_aligned_buffer_page_end(src_a); \
1962 free_aligned_buffer_page_end(dst_argb_c); \
1963 free_aligned_buffer_page_end(dst_argb_opt); \
1964 }
1965
1966 #if defined(ENABLE_FULL_TESTS)
1967 #define TESTQPLANAR16TOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
1968 ALIGN, YALIGN, S_DEPTH) \
1969 TESTQPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1970 YALIGN, benchmark_width_ + 1, _Any, +, 0, 0, S_DEPTH) \
1971 TESTQPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1972 YALIGN, benchmark_width_, _Unaligned, +, 2, 0, S_DEPTH) \
1973 TESTQPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1974 YALIGN, benchmark_width_, _Invert, -, 0, 0, S_DEPTH) \
1975 TESTQPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1976 YALIGN, benchmark_width_, _Opt, +, 0, 0, S_DEPTH) \
1977 TESTQPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1978 YALIGN, benchmark_width_, _Premult, +, 0, 1, S_DEPTH)
1979 #else
1980 #define TESTQPLANAR16TOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
1981 ALIGN, YALIGN, S_DEPTH) \
1982 TESTQPLANAR16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
1983 YALIGN, benchmark_width_, _Opt, +, 0, 0, S_DEPTH)
1984 #endif
1985
1986 #define I010AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1987 I010AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvI601Constants, k, \
1988 l, m)
1989 #define I010AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1990 I010AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvI601Constants, k, \
1991 l, m)
1992 #define J010AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1993 I010AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
1994 l, m)
1995 #define J010AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1996 I010AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
1997 l, m)
1998 #define F010AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
1999 I010AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
2000 l, m)
2001 #define F010AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2002 I010AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
2003 l, m)
2004 #define H010AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2005 I010AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
2006 l, m)
2007 #define H010AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2008 I010AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
2009 l, m)
2010 #define U010AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2011 I010AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuv2020Constants, k, \
2012 l, m)
2013 #define U010AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2014 I010AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuv2020Constants, k, \
2015 l, m)
2016 #define V010AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2017 I010AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvV2020Constants, k, \
2018 l, m)
2019 #define V010AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2020 I010AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvV2020Constants, k, \
2021 l, m)
2022 #define I210AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2023 I210AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvI601Constants, k, \
2024 l, m)
2025 #define I210AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2026 I210AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvI601Constants, k, \
2027 l, m)
2028 #define J210AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2029 I210AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
2030 l, m)
2031 #define J210AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2032 I210AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
2033 l, m)
2034 #define F210AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2035 I210AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
2036 l, m)
2037 #define F210AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2038 I210AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
2039 l, m)
2040 #define H210AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2041 I210AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
2042 l, m)
2043 #define H210AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2044 I210AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
2045 l, m)
2046 #define U210AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2047 I210AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuv2020Constants, k, \
2048 l, m)
2049 #define U210AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2050 I210AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuv2020Constants, k, \
2051 l, m)
2052 #define V210AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2053 I210AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvV2020Constants, k, \
2054 l, m)
2055 #define V210AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2056 I210AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvV2020Constants, k, \
2057 l, m)
2058 #define I410AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2059 I410AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvI601Constants, k, \
2060 l, m)
2061 #define I410AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2062 I410AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvI601Constants, k, \
2063 l, m)
2064 #define J410AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2065 I410AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
2066 l, m)
2067 #define J410AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2068 I410AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvJPEGConstants, k, \
2069 l, m)
2070 #define F410AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2071 I410AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
2072 l, m)
2073 #define F410AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2074 I410AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvF709Constants, k, \
2075 l, m)
2076 #define H410AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2077 I410AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
2078 l, m)
2079 #define H410AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2080 I410AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvH709Constants, k, \
2081 l, m)
2082 #define U410AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2083 I410AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuv2020Constants, k, \
2084 l, m)
2085 #define U410AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2086 I410AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuv2020Constants, k, \
2087 l, m)
2088 #define V410AlphaToARGB(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2089 I410AlphaToARGBMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvV2020Constants, k, \
2090 l, m)
2091 #define V410AlphaToABGR(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2092 I410AlphaToABGRMatrix(a, b, c, d, e, f, g, h, i, j, &kYuvV2020Constants, k, \
2093 l, m)
2094 #define I010AlphaToARGBFilter(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2095 I010AlphaToARGBMatrixFilter(a, b, c, d, e, f, g, h, i, j, \
2096 &kYuvI601Constants, k, l, m, kFilterBilinear)
2097 #define I210AlphaToARGBFilter(a, b, c, d, e, f, g, h, i, j, k, l, m) \
2098 I010AlphaToARGBMatrixFilter(a, b, c, d, e, f, g, h, i, j, \
2099 &kYuvI601Constants, k, l, m, kFilterBilinear)
2100
2101 // These conversions are only optimized for x86
2102 #if !defined(DISABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__)
2103 TESTQPLANAR16TOB(I010Alpha, 2, 2, ARGB, 4, 4, 1, 10)
2104 TESTQPLANAR16TOB(I010Alpha, 2, 2, ABGR, 4, 4, 1, 10)
2105 TESTQPLANAR16TOB(J010Alpha, 2, 2, ARGB, 4, 4, 1, 10)
2106 TESTQPLANAR16TOB(J010Alpha, 2, 2, ABGR, 4, 4, 1, 10)
2107 TESTQPLANAR16TOB(H010Alpha, 2, 2, ARGB, 4, 4, 1, 10)
2108 TESTQPLANAR16TOB(H010Alpha, 2, 2, ABGR, 4, 4, 1, 10)
2109 TESTQPLANAR16TOB(F010Alpha, 2, 2, ARGB, 4, 4, 1, 10)
2110 TESTQPLANAR16TOB(F010Alpha, 2, 2, ABGR, 4, 4, 1, 10)
2111 TESTQPLANAR16TOB(U010Alpha, 2, 2, ARGB, 4, 4, 1, 10)
2112 TESTQPLANAR16TOB(U010Alpha, 2, 2, ABGR, 4, 4, 1, 10)
2113 TESTQPLANAR16TOB(V010Alpha, 2, 2, ARGB, 4, 4, 1, 10)
2114 TESTQPLANAR16TOB(V010Alpha, 2, 2, ABGR, 4, 4, 1, 10)
2115 TESTQPLANAR16TOB(I210Alpha, 2, 1, ARGB, 4, 4, 1, 10)
2116 TESTQPLANAR16TOB(I210Alpha, 2, 1, ABGR, 4, 4, 1, 10)
2117 TESTQPLANAR16TOB(J210Alpha, 2, 1, ARGB, 4, 4, 1, 10)
2118 TESTQPLANAR16TOB(J210Alpha, 2, 1, ABGR, 4, 4, 1, 10)
2119 TESTQPLANAR16TOB(H210Alpha, 2, 1, ARGB, 4, 4, 1, 10)
2120 TESTQPLANAR16TOB(H210Alpha, 2, 1, ABGR, 4, 4, 1, 10)
2121 TESTQPLANAR16TOB(F210Alpha, 2, 1, ARGB, 4, 4, 1, 10)
2122 TESTQPLANAR16TOB(F210Alpha, 2, 1, ABGR, 4, 4, 1, 10)
2123 TESTQPLANAR16TOB(U210Alpha, 2, 1, ARGB, 4, 4, 1, 10)
2124 TESTQPLANAR16TOB(U210Alpha, 2, 1, ABGR, 4, 4, 1, 10)
2125 TESTQPLANAR16TOB(V210Alpha, 2, 1, ARGB, 4, 4, 1, 10)
2126 TESTQPLANAR16TOB(V210Alpha, 2, 1, ABGR, 4, 4, 1, 10)
2127 TESTQPLANAR16TOB(I410Alpha, 1, 1, ARGB, 4, 4, 1, 10)
2128 TESTQPLANAR16TOB(I410Alpha, 1, 1, ABGR, 4, 4, 1, 10)
2129 TESTQPLANAR16TOB(J410Alpha, 1, 1, ARGB, 4, 4, 1, 10)
2130 TESTQPLANAR16TOB(J410Alpha, 1, 1, ABGR, 4, 4, 1, 10)
2131 TESTQPLANAR16TOB(H410Alpha, 1, 1, ARGB, 4, 4, 1, 10)
2132 TESTQPLANAR16TOB(H410Alpha, 1, 1, ABGR, 4, 4, 1, 10)
2133 TESTQPLANAR16TOB(F410Alpha, 1, 1, ARGB, 4, 4, 1, 10)
2134 TESTQPLANAR16TOB(F410Alpha, 1, 1, ABGR, 4, 4, 1, 10)
2135 TESTQPLANAR16TOB(U410Alpha, 1, 1, ARGB, 4, 4, 1, 10)
2136 TESTQPLANAR16TOB(U410Alpha, 1, 1, ABGR, 4, 4, 1, 10)
2137 TESTQPLANAR16TOB(V410Alpha, 1, 1, ARGB, 4, 4, 1, 10)
2138 TESTQPLANAR16TOB(V410Alpha, 1, 1, ABGR, 4, 4, 1, 10)
2139 TESTQPLANAR16TOB(I010Alpha, 2, 2, ARGBFilter, 4, 4, 1, 10)
2140 TESTQPLANAR16TOB(I210Alpha, 2, 1, ARGBFilter, 4, 4, 1, 10)
2141 #endif // DISABLE_SLOW_TESTS
2142
2143 #define TESTBP16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
2144 YALIGN, W1280, N, NEG, SOFF, DOFF, S_DEPTH) \
2145 TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##N) { \
2146 const int kWidth = W1280; \
2147 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
2148 const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \
2149 const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X) * 2; \
2150 const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y) * 2; \
2151 const int kBpc = 2; \
2152 align_buffer_page_end(src_y, kWidth* kHeight* kBpc + SOFF); \
2153 align_buffer_page_end(src_uv, kSizeUV* kBpc + SOFF); \
2154 align_buffer_page_end(dst_argb_c, kStrideB* kHeight + DOFF); \
2155 align_buffer_page_end(dst_argb_opt, kStrideB* kHeight + DOFF); \
2156 for (int i = 0; i < kWidth * kHeight; ++i) { \
2157 reinterpret_cast<uint16_t*>(src_y + SOFF)[i] = \
2158 (fastrand() & (((uint16_t)(-1)) << (16 - S_DEPTH))); \
2159 } \
2160 for (int i = 0; i < kSizeUV; ++i) { \
2161 reinterpret_cast<uint16_t*>(src_uv + SOFF)[i] = \
2162 (fastrand() & (((uint16_t)(-1)) << (16 - S_DEPTH))); \
2163 } \
2164 memset(dst_argb_c + DOFF, 1, kStrideB * kHeight); \
2165 memset(dst_argb_opt + DOFF, 101, kStrideB * kHeight); \
2166 MaskCpuFlags(disable_cpu_flags_); \
2167 FMT_PLANAR##To##FMT_B(reinterpret_cast<uint16_t*>(src_y + SOFF), kWidth, \
2168 reinterpret_cast<uint16_t*>(src_uv + SOFF), \
2169 kStrideUV, dst_argb_c + DOFF, kStrideB, kWidth, \
2170 NEG kHeight); \
2171 MaskCpuFlags(benchmark_cpu_info_); \
2172 for (int i = 0; i < benchmark_iterations_; ++i) { \
2173 FMT_PLANAR##To##FMT_B(reinterpret_cast<uint16_t*>(src_y + SOFF), kWidth, \
2174 reinterpret_cast<uint16_t*>(src_uv + SOFF), \
2175 kStrideUV, dst_argb_opt + DOFF, kStrideB, kWidth, \
2176 NEG kHeight); \
2177 } \
2178 for (int i = 0; i < kWidth * BPP_B * kHeight; ++i) { \
2179 EXPECT_EQ(dst_argb_c[i + DOFF], dst_argb_opt[i + DOFF]); \
2180 } \
2181 free_aligned_buffer_page_end(src_y); \
2182 free_aligned_buffer_page_end(src_uv); \
2183 free_aligned_buffer_page_end(dst_argb_c); \
2184 free_aligned_buffer_page_end(dst_argb_opt); \
2185 }
2186
2187 #if defined(ENABLE_FULL_TESTS)
2188 #define TESTBP16TOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
2189 YALIGN, S_DEPTH) \
2190 TESTBP16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, YALIGN, \
2191 benchmark_width_ + 1, _Any, +, 0, 0, S_DEPTH) \
2192 TESTBP16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, YALIGN, \
2193 benchmark_width_, _Unaligned, +, 4, 4, S_DEPTH) \
2194 TESTBP16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, YALIGN, \
2195 benchmark_width_, _Invert, -, 0, 0, S_DEPTH) \
2196 TESTBP16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, YALIGN, \
2197 benchmark_width_, _Opt, +, 0, 0, S_DEPTH)
2198 #else
2199 #define TESTBP16TOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
2200 YALIGN, S_DEPTH) \
2201 TESTBP16TOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, YALIGN, \
2202 benchmark_width_, _Opt, +, 0, 0, S_DEPTH)
2203 #endif
2204
2205 #define P010ToARGB(a, b, c, d, e, f, g, h) \
2206 P010ToARGBMatrix(a, b, c, d, e, f, &kYuvH709Constants, g, h)
2207 #define P210ToARGB(a, b, c, d, e, f, g, h) \
2208 P210ToARGBMatrix(a, b, c, d, e, f, &kYuvH709Constants, g, h)
2209 #define P010ToAR30(a, b, c, d, e, f, g, h) \
2210 P010ToAR30Matrix(a, b, c, d, e, f, &kYuvH709Constants, g, h)
2211 #define P210ToAR30(a, b, c, d, e, f, g, h) \
2212 P210ToAR30Matrix(a, b, c, d, e, f, &kYuvH709Constants, g, h)
2213
2214 #define P012ToARGB(a, b, c, d, e, f, g, h) \
2215 P012ToARGBMatrix(a, b, c, d, e, f, &kYuvH709Constants, g, h)
2216 #define P212ToARGB(a, b, c, d, e, f, g, h) \
2217 P212ToARGBMatrix(a, b, c, d, e, f, &kYuvH709Constants, g, h)
2218 #define P012ToAR30(a, b, c, d, e, f, g, h) \
2219 P012ToAR30Matrix(a, b, c, d, e, f, &kYuvH709Constants, g, h)
2220 #define P212ToAR30(a, b, c, d, e, f, g, h) \
2221 P212ToAR30Matrix(a, b, c, d, e, f, &kYuvH709Constants, g, h)
2222
2223 #define P016ToARGB(a, b, c, d, e, f, g, h) \
2224 P016ToARGBMatrix(a, b, c, d, e, f, &kYuvH709Constants, g, h)
2225 #define P216ToARGB(a, b, c, d, e, f, g, h) \
2226 P216ToARGBMatrix(a, b, c, d, e, f, &kYuvH709Constants, g, h)
2227 #define P016ToAR30(a, b, c, d, e, f, g, h) \
2228 P016ToAR30Matrix(a, b, c, d, e, f, &kYuvH709Constants, g, h)
2229 #define P216ToAR30(a, b, c, d, e, f, g, h) \
2230 P216ToAR30Matrix(a, b, c, d, e, f, &kYuvH709Constants, g, h)
2231
2232 #define P010ToARGBFilter(a, b, c, d, e, f, g, h) \
2233 P010ToARGBMatrixFilter(a, b, c, d, e, f, &kYuvH709Constants, g, h, \
2234 kFilterBilinear)
2235 #define P210ToARGBFilter(a, b, c, d, e, f, g, h) \
2236 P210ToARGBMatrixFilter(a, b, c, d, e, f, &kYuvH709Constants, g, h, \
2237 kFilterBilinear)
2238 #define P010ToAR30Filter(a, b, c, d, e, f, g, h) \
2239 P010ToAR30MatrixFilter(a, b, c, d, e, f, &kYuvH709Constants, g, h, \
2240 kFilterBilinear)
2241 #define P210ToAR30Filter(a, b, c, d, e, f, g, h) \
2242 P210ToAR30MatrixFilter(a, b, c, d, e, f, &kYuvH709Constants, g, h, \
2243 kFilterBilinear)
2244
2245 #if !defined(DISABLE_SLOW_TESTS) || defined(__x86_64__) || defined(__i386__)
2246 TESTBP16TOB(P010, 2, 2, ARGB, 4, 4, 1, 10)
2247 TESTBP16TOB(P210, 2, 1, ARGB, 4, 4, 1, 10)
2248 TESTBP16TOB(P012, 2, 2, ARGB, 4, 4, 1, 12)
2249 TESTBP16TOB(P212, 2, 1, ARGB, 4, 4, 1, 12)
2250 TESTBP16TOB(P016, 2, 2, ARGB, 4, 4, 1, 16)
2251 TESTBP16TOB(P216, 2, 1, ARGB, 4, 4, 1, 16)
2252 TESTBP16TOB(P010, 2, 2, ARGBFilter, 4, 4, 1, 10)
2253 TESTBP16TOB(P210, 2, 1, ARGBFilter, 4, 4, 1, 10)
2254 #ifdef LITTLE_ENDIAN_ONLY_TEST
2255 TESTBP16TOB(P010, 2, 2, AR30, 4, 4, 1, 10)
2256 TESTBP16TOB(P210, 2, 1, AR30, 4, 4, 1, 10)
2257 TESTBP16TOB(P012, 2, 2, AR30, 4, 4, 1, 12)
2258 TESTBP16TOB(P212, 2, 1, AR30, 4, 4, 1, 12)
2259 TESTBP16TOB(P016, 2, 2, AR30, 4, 4, 1, 16)
2260 TESTBP16TOB(P216, 2, 1, AR30, 4, 4, 1, 16)
2261 TESTBP16TOB(P010, 2, 2, AR30Filter, 4, 4, 1, 10)
2262 TESTBP16TOB(P210, 2, 1, AR30Filter, 4, 4, 1, 10)
2263 #endif // LITTLE_ENDIAN_ONLY_TEST
2264 #endif // DISABLE_SLOW_TESTS
2265
Clamp(int y)2266 static int Clamp(int y) {
2267 if (y < 0) {
2268 y = 0;
2269 }
2270 if (y > 255) {
2271 y = 255;
2272 }
2273 return y;
2274 }
2275
Clamp10(int y)2276 static int Clamp10(int y) {
2277 if (y < 0) {
2278 y = 0;
2279 }
2280 if (y > 1023) {
2281 y = 1023;
2282 }
2283 return y;
2284 }
2285
2286 // Test 8 bit YUV to 8 bit RGB
TEST_F(LibYUVConvertTest,TestH420ToARGB)2287 TEST_F(LibYUVConvertTest, TestH420ToARGB) {
2288 const int kSize = 256;
2289 int histogram_b[256];
2290 int histogram_g[256];
2291 int histogram_r[256];
2292 memset(histogram_b, 0, sizeof(histogram_b));
2293 memset(histogram_g, 0, sizeof(histogram_g));
2294 memset(histogram_r, 0, sizeof(histogram_r));
2295 align_buffer_page_end(orig_yuv, kSize + kSize / 2 * 2);
2296 align_buffer_page_end(argb_pixels, kSize * 4);
2297 uint8_t* orig_y = orig_yuv;
2298 uint8_t* orig_u = orig_y + kSize;
2299 uint8_t* orig_v = orig_u + kSize / 2;
2300
2301 // Test grey scale
2302 for (int i = 0; i < kSize; ++i) {
2303 orig_y[i] = i;
2304 }
2305 for (int i = 0; i < kSize / 2; ++i) {
2306 orig_u[i] = 128; // 128 is 0.
2307 orig_v[i] = 128;
2308 }
2309
2310 H420ToARGB(orig_y, 0, orig_u, 0, orig_v, 0, argb_pixels, 0, kSize, 1);
2311
2312 for (int i = 0; i < kSize; ++i) {
2313 int b = argb_pixels[i * 4 + 0];
2314 int g = argb_pixels[i * 4 + 1];
2315 int r = argb_pixels[i * 4 + 2];
2316 int a = argb_pixels[i * 4 + 3];
2317 ++histogram_b[b];
2318 ++histogram_g[g];
2319 ++histogram_r[r];
2320 // Reference formula for Y channel contribution in YUV to RGB conversions:
2321 int expected_y = Clamp(static_cast<int>((i - 16) * 1.164f + 0.5f));
2322 EXPECT_EQ(b, expected_y);
2323 EXPECT_EQ(g, expected_y);
2324 EXPECT_EQ(r, expected_y);
2325 EXPECT_EQ(a, 255);
2326 }
2327
2328 int count_b = 0;
2329 int count_g = 0;
2330 int count_r = 0;
2331 for (int i = 0; i < kSize; ++i) {
2332 if (histogram_b[i]) {
2333 ++count_b;
2334 }
2335 if (histogram_g[i]) {
2336 ++count_g;
2337 }
2338 if (histogram_r[i]) {
2339 ++count_r;
2340 }
2341 }
2342 printf("uniques: B %d, G, %d, R %d\n", count_b, count_g, count_r);
2343
2344 free_aligned_buffer_page_end(orig_yuv);
2345 free_aligned_buffer_page_end(argb_pixels);
2346 }
2347
2348 // Test 10 bit YUV to 8 bit RGB
TEST_F(LibYUVConvertTest,TestH010ToARGB)2349 TEST_F(LibYUVConvertTest, TestH010ToARGB) {
2350 const int kSize = 1024;
2351 int histogram_b[1024];
2352 int histogram_g[1024];
2353 int histogram_r[1024];
2354 memset(histogram_b, 0, sizeof(histogram_b));
2355 memset(histogram_g, 0, sizeof(histogram_g));
2356 memset(histogram_r, 0, sizeof(histogram_r));
2357 align_buffer_page_end(orig_yuv, kSize * 2 + kSize / 2 * 2 * 2);
2358 align_buffer_page_end(argb_pixels, kSize * 4);
2359 uint16_t* orig_y = reinterpret_cast<uint16_t*>(orig_yuv);
2360 uint16_t* orig_u = orig_y + kSize;
2361 uint16_t* orig_v = orig_u + kSize / 2;
2362
2363 // Test grey scale
2364 for (int i = 0; i < kSize; ++i) {
2365 orig_y[i] = i;
2366 }
2367 for (int i = 0; i < kSize / 2; ++i) {
2368 orig_u[i] = 512; // 512 is 0.
2369 orig_v[i] = 512;
2370 }
2371
2372 H010ToARGB(orig_y, 0, orig_u, 0, orig_v, 0, argb_pixels, 0, kSize, 1);
2373
2374 for (int i = 0; i < kSize; ++i) {
2375 int b = argb_pixels[i * 4 + 0];
2376 int g = argb_pixels[i * 4 + 1];
2377 int r = argb_pixels[i * 4 + 2];
2378 int a = argb_pixels[i * 4 + 3];
2379 ++histogram_b[b];
2380 ++histogram_g[g];
2381 ++histogram_r[r];
2382 int expected_y = Clamp(static_cast<int>((i - 64) * 1.164f / 4));
2383 EXPECT_NEAR(b, expected_y, 1);
2384 EXPECT_NEAR(g, expected_y, 1);
2385 EXPECT_NEAR(r, expected_y, 1);
2386 EXPECT_EQ(a, 255);
2387 }
2388
2389 int count_b = 0;
2390 int count_g = 0;
2391 int count_r = 0;
2392 for (int i = 0; i < kSize; ++i) {
2393 if (histogram_b[i]) {
2394 ++count_b;
2395 }
2396 if (histogram_g[i]) {
2397 ++count_g;
2398 }
2399 if (histogram_r[i]) {
2400 ++count_r;
2401 }
2402 }
2403 printf("uniques: B %d, G, %d, R %d\n", count_b, count_g, count_r);
2404
2405 free_aligned_buffer_page_end(orig_yuv);
2406 free_aligned_buffer_page_end(argb_pixels);
2407 }
2408
2409 // Test 10 bit YUV to 10 bit RGB
2410 // Caveat: Result is near due to float rounding in expected
2411 // result.
TEST_F(LibYUVConvertTest,TestH010ToAR30)2412 TEST_F(LibYUVConvertTest, TestH010ToAR30) {
2413 const int kSize = 1024;
2414 int histogram_b[1024];
2415 int histogram_g[1024];
2416 int histogram_r[1024];
2417 memset(histogram_b, 0, sizeof(histogram_b));
2418 memset(histogram_g, 0, sizeof(histogram_g));
2419 memset(histogram_r, 0, sizeof(histogram_r));
2420
2421 align_buffer_page_end(orig_yuv, kSize * 2 + kSize / 2 * 2 * 2);
2422 align_buffer_page_end(ar30_pixels, kSize * 4);
2423 uint16_t* orig_y = reinterpret_cast<uint16_t*>(orig_yuv);
2424 uint16_t* orig_u = orig_y + kSize;
2425 uint16_t* orig_v = orig_u + kSize / 2;
2426
2427 // Test grey scale
2428 for (int i = 0; i < kSize; ++i) {
2429 orig_y[i] = i;
2430 }
2431 for (int i = 0; i < kSize / 2; ++i) {
2432 orig_u[i] = 512; // 512 is 0.
2433 orig_v[i] = 512;
2434 }
2435
2436 H010ToAR30(orig_y, 0, orig_u, 0, orig_v, 0, ar30_pixels, 0, kSize, 1);
2437
2438 for (int i = 0; i < kSize; ++i) {
2439 int b10 = reinterpret_cast<uint32_t*>(ar30_pixels)[i] & 1023;
2440 int g10 = (reinterpret_cast<uint32_t*>(ar30_pixels)[i] >> 10) & 1023;
2441 int r10 = (reinterpret_cast<uint32_t*>(ar30_pixels)[i] >> 20) & 1023;
2442 int a2 = (reinterpret_cast<uint32_t*>(ar30_pixels)[i] >> 30) & 3;
2443 ++histogram_b[b10];
2444 ++histogram_g[g10];
2445 ++histogram_r[r10];
2446 int expected_y = Clamp10(static_cast<int>((i - 64) * 1.164f + 0.5));
2447 EXPECT_NEAR(b10, expected_y, 4);
2448 EXPECT_NEAR(g10, expected_y, 4);
2449 EXPECT_NEAR(r10, expected_y, 4);
2450 EXPECT_EQ(a2, 3);
2451 }
2452
2453 int count_b = 0;
2454 int count_g = 0;
2455 int count_r = 0;
2456 for (int i = 0; i < kSize; ++i) {
2457 if (histogram_b[i]) {
2458 ++count_b;
2459 }
2460 if (histogram_g[i]) {
2461 ++count_g;
2462 }
2463 if (histogram_r[i]) {
2464 ++count_r;
2465 }
2466 }
2467 printf("uniques: B %d, G, %d, R %d\n", count_b, count_g, count_r);
2468
2469 free_aligned_buffer_page_end(orig_yuv);
2470 free_aligned_buffer_page_end(ar30_pixels);
2471 }
2472
2473 // Test 10 bit YUV to 10 bit RGB
2474 // Caveat: Result is near due to float rounding in expected
2475 // result.
TEST_F(LibYUVConvertTest,TestH010ToAB30)2476 TEST_F(LibYUVConvertTest, TestH010ToAB30) {
2477 const int kSize = 1024;
2478 int histogram_b[1024];
2479 int histogram_g[1024];
2480 int histogram_r[1024];
2481 memset(histogram_b, 0, sizeof(histogram_b));
2482 memset(histogram_g, 0, sizeof(histogram_g));
2483 memset(histogram_r, 0, sizeof(histogram_r));
2484
2485 align_buffer_page_end(orig_yuv, kSize * 2 + kSize / 2 * 2 * 2);
2486 align_buffer_page_end(ab30_pixels, kSize * 4);
2487 uint16_t* orig_y = reinterpret_cast<uint16_t*>(orig_yuv);
2488 uint16_t* orig_u = orig_y + kSize;
2489 uint16_t* orig_v = orig_u + kSize / 2;
2490
2491 // Test grey scale
2492 for (int i = 0; i < kSize; ++i) {
2493 orig_y[i] = i;
2494 }
2495 for (int i = 0; i < kSize / 2; ++i) {
2496 orig_u[i] = 512; // 512 is 0.
2497 orig_v[i] = 512;
2498 }
2499
2500 H010ToAB30(orig_y, 0, orig_u, 0, orig_v, 0, ab30_pixels, 0, kSize, 1);
2501
2502 for (int i = 0; i < kSize; ++i) {
2503 int r10 = reinterpret_cast<uint32_t*>(ab30_pixels)[i] & 1023;
2504 int g10 = (reinterpret_cast<uint32_t*>(ab30_pixels)[i] >> 10) & 1023;
2505 int b10 = (reinterpret_cast<uint32_t*>(ab30_pixels)[i] >> 20) & 1023;
2506 int a2 = (reinterpret_cast<uint32_t*>(ab30_pixels)[i] >> 30) & 3;
2507 ++histogram_b[b10];
2508 ++histogram_g[g10];
2509 ++histogram_r[r10];
2510 int expected_y = Clamp10(static_cast<int>((i - 64) * 1.164f));
2511 EXPECT_NEAR(b10, expected_y, 4);
2512 EXPECT_NEAR(g10, expected_y, 4);
2513 EXPECT_NEAR(r10, expected_y, 4);
2514 EXPECT_EQ(a2, 3);
2515 }
2516
2517 int count_b = 0;
2518 int count_g = 0;
2519 int count_r = 0;
2520 for (int i = 0; i < kSize; ++i) {
2521 if (histogram_b[i]) {
2522 ++count_b;
2523 }
2524 if (histogram_g[i]) {
2525 ++count_g;
2526 }
2527 if (histogram_r[i]) {
2528 ++count_r;
2529 }
2530 }
2531 printf("uniques: B %d, G, %d, R %d\n", count_b, count_g, count_r);
2532
2533 free_aligned_buffer_page_end(orig_yuv);
2534 free_aligned_buffer_page_end(ab30_pixels);
2535 }
2536
2537 // Test 8 bit YUV to 10 bit RGB
TEST_F(LibYUVConvertTest,TestH420ToAR30)2538 TEST_F(LibYUVConvertTest, TestH420ToAR30) {
2539 const int kSize = 256;
2540 const int kHistSize = 1024;
2541 int histogram_b[kHistSize];
2542 int histogram_g[kHistSize];
2543 int histogram_r[kHistSize];
2544 memset(histogram_b, 0, sizeof(histogram_b));
2545 memset(histogram_g, 0, sizeof(histogram_g));
2546 memset(histogram_r, 0, sizeof(histogram_r));
2547 align_buffer_page_end(orig_yuv, kSize + kSize / 2 * 2);
2548 align_buffer_page_end(ar30_pixels, kSize * 4);
2549 uint8_t* orig_y = orig_yuv;
2550 uint8_t* orig_u = orig_y + kSize;
2551 uint8_t* orig_v = orig_u + kSize / 2;
2552
2553 // Test grey scale
2554 for (int i = 0; i < kSize; ++i) {
2555 orig_y[i] = i;
2556 }
2557 for (int i = 0; i < kSize / 2; ++i) {
2558 orig_u[i] = 128; // 128 is 0.
2559 orig_v[i] = 128;
2560 }
2561
2562 H420ToAR30(orig_y, 0, orig_u, 0, orig_v, 0, ar30_pixels, 0, kSize, 1);
2563
2564 for (int i = 0; i < kSize; ++i) {
2565 int b10 = reinterpret_cast<uint32_t*>(ar30_pixels)[i] & 1023;
2566 int g10 = (reinterpret_cast<uint32_t*>(ar30_pixels)[i] >> 10) & 1023;
2567 int r10 = (reinterpret_cast<uint32_t*>(ar30_pixels)[i] >> 20) & 1023;
2568 int a2 = (reinterpret_cast<uint32_t*>(ar30_pixels)[i] >> 30) & 3;
2569 ++histogram_b[b10];
2570 ++histogram_g[g10];
2571 ++histogram_r[r10];
2572 int expected_y = Clamp10(static_cast<int>((i - 16) * 1.164f * 4.f));
2573 EXPECT_NEAR(b10, expected_y, 4);
2574 EXPECT_NEAR(g10, expected_y, 4);
2575 EXPECT_NEAR(r10, expected_y, 4);
2576 EXPECT_EQ(a2, 3);
2577 }
2578
2579 int count_b = 0;
2580 int count_g = 0;
2581 int count_r = 0;
2582 for (int i = 0; i < kHistSize; ++i) {
2583 if (histogram_b[i]) {
2584 ++count_b;
2585 }
2586 if (histogram_g[i]) {
2587 ++count_g;
2588 }
2589 if (histogram_r[i]) {
2590 ++count_r;
2591 }
2592 }
2593 printf("uniques: B %d, G, %d, R %d\n", count_b, count_g, count_r);
2594
2595 free_aligned_buffer_page_end(orig_yuv);
2596 free_aligned_buffer_page_end(ar30_pixels);
2597 }
2598
2599 // Test I400 with jpeg matrix is same as J400
TEST_F(LibYUVConvertTest,TestI400)2600 TEST_F(LibYUVConvertTest, TestI400) {
2601 const int kSize = 256;
2602 align_buffer_page_end(orig_i400, kSize);
2603 align_buffer_page_end(argb_pixels_i400, kSize * 4);
2604 align_buffer_page_end(argb_pixels_j400, kSize * 4);
2605 align_buffer_page_end(argb_pixels_jpeg_i400, kSize * 4);
2606 align_buffer_page_end(argb_pixels_h709_i400, kSize * 4);
2607 align_buffer_page_end(argb_pixels_2020_i400, kSize * 4);
2608
2609 // Test grey scale
2610 for (int i = 0; i < kSize; ++i) {
2611 orig_i400[i] = i;
2612 }
2613
2614 J400ToARGB(orig_i400, 0, argb_pixels_j400, 0, kSize, 1);
2615 I400ToARGB(orig_i400, 0, argb_pixels_i400, 0, kSize, 1);
2616 I400ToARGBMatrix(orig_i400, 0, argb_pixels_jpeg_i400, 0, &kYuvJPEGConstants,
2617 kSize, 1);
2618 I400ToARGBMatrix(orig_i400, 0, argb_pixels_h709_i400, 0, &kYuvH709Constants,
2619 kSize, 1);
2620 I400ToARGBMatrix(orig_i400, 0, argb_pixels_2020_i400, 0, &kYuv2020Constants,
2621 kSize, 1);
2622
2623 EXPECT_EQ(0, argb_pixels_i400[0]);
2624 EXPECT_EQ(0, argb_pixels_j400[0]);
2625 EXPECT_EQ(0, argb_pixels_jpeg_i400[0]);
2626 EXPECT_EQ(0, argb_pixels_h709_i400[0]);
2627 EXPECT_EQ(0, argb_pixels_2020_i400[0]);
2628 EXPECT_EQ(0, argb_pixels_i400[16 * 4]);
2629 EXPECT_EQ(16, argb_pixels_j400[16 * 4]);
2630 EXPECT_EQ(16, argb_pixels_jpeg_i400[16 * 4]);
2631 EXPECT_EQ(0, argb_pixels_h709_i400[16 * 4]);
2632 EXPECT_EQ(0, argb_pixels_2020_i400[16 * 4]);
2633 EXPECT_EQ(130, argb_pixels_i400[128 * 4]);
2634 EXPECT_EQ(128, argb_pixels_j400[128 * 4]);
2635 EXPECT_EQ(128, argb_pixels_jpeg_i400[128 * 4]);
2636 EXPECT_EQ(130, argb_pixels_h709_i400[128 * 4]);
2637 EXPECT_EQ(130, argb_pixels_2020_i400[128 * 4]);
2638 EXPECT_EQ(255, argb_pixels_i400[255 * 4]);
2639 EXPECT_EQ(255, argb_pixels_j400[255 * 4]);
2640 EXPECT_EQ(255, argb_pixels_jpeg_i400[255 * 4]);
2641 EXPECT_EQ(255, argb_pixels_h709_i400[255 * 4]);
2642 EXPECT_EQ(255, argb_pixels_2020_i400[255 * 4]);
2643
2644 for (int i = 0; i < kSize * 4; ++i) {
2645 if ((i & 3) == 3) {
2646 EXPECT_EQ(255, argb_pixels_j400[i]);
2647 } else {
2648 EXPECT_EQ(i / 4, argb_pixels_j400[i]);
2649 }
2650 EXPECT_EQ(argb_pixels_jpeg_i400[i], argb_pixels_j400[i]);
2651 }
2652
2653 free_aligned_buffer_page_end(orig_i400);
2654 free_aligned_buffer_page_end(argb_pixels_i400);
2655 free_aligned_buffer_page_end(argb_pixels_j400);
2656 free_aligned_buffer_page_end(argb_pixels_jpeg_i400);
2657 free_aligned_buffer_page_end(argb_pixels_h709_i400);
2658 free_aligned_buffer_page_end(argb_pixels_2020_i400);
2659 }
2660
2661 // Test RGB24 to ARGB and back to RGB24
TEST_F(LibYUVConvertTest,TestARGBToRGB24)2662 TEST_F(LibYUVConvertTest, TestARGBToRGB24) {
2663 const int kSize = 256;
2664 align_buffer_page_end(orig_rgb24, kSize * 3);
2665 align_buffer_page_end(argb_pixels, kSize * 4);
2666 align_buffer_page_end(dest_rgb24, kSize * 3);
2667
2668 // Test grey scale
2669 for (int i = 0; i < kSize * 3; ++i) {
2670 orig_rgb24[i] = i;
2671 }
2672
2673 RGB24ToARGB(orig_rgb24, 0, argb_pixels, 0, kSize, 1);
2674 ARGBToRGB24(argb_pixels, 0, dest_rgb24, 0, kSize, 1);
2675
2676 for (int i = 0; i < kSize * 3; ++i) {
2677 EXPECT_EQ(orig_rgb24[i], dest_rgb24[i]);
2678 }
2679
2680 free_aligned_buffer_page_end(orig_rgb24);
2681 free_aligned_buffer_page_end(argb_pixels);
2682 free_aligned_buffer_page_end(dest_rgb24);
2683 }
2684
TEST_F(LibYUVConvertTest,Test565)2685 TEST_F(LibYUVConvertTest, Test565) {
2686 SIMD_ALIGNED(uint8_t orig_pixels[256][4]);
2687 SIMD_ALIGNED(uint8_t pixels565[256][2]);
2688
2689 for (int i = 0; i < 256; ++i) {
2690 for (int j = 0; j < 4; ++j) {
2691 orig_pixels[i][j] = i;
2692 }
2693 }
2694 ARGBToRGB565(&orig_pixels[0][0], 0, &pixels565[0][0], 0, 256, 1);
2695 uint32_t checksum = HashDjb2(&pixels565[0][0], sizeof(pixels565), 5381);
2696 EXPECT_EQ(610919429u, checksum);
2697 }
2698 #endif // !defined(LEAN_TESTS)
2699
2700 } // namespace libyuv
2701