1*b2055c35SXin Li // Copyright 2011 Google Inc. All Rights Reserved.
2*b2055c35SXin Li //
3*b2055c35SXin Li // Use of this source code is governed by a BSD-style license
4*b2055c35SXin Li // that can be found in the COPYING file in the root of the source
5*b2055c35SXin Li // tree. An additional intellectual property rights grant can be found
6*b2055c35SXin Li // in the file PATENTS. All contributing project authors may
7*b2055c35SXin Li // be found in the AUTHORS file in the root of the source tree.
8*b2055c35SXin Li // -----------------------------------------------------------------------------
9*b2055c35SXin Li //
10*b2055c35SXin Li // SSE41 version of YUV to RGB upsampling functions.
11*b2055c35SXin Li //
12*b2055c35SXin Li // Author: [email protected] (Somnath Banerjee)
13*b2055c35SXin Li
14*b2055c35SXin Li #include "src/dsp/dsp.h"
15*b2055c35SXin Li
16*b2055c35SXin Li #if defined(WEBP_USE_SSE41)
17*b2055c35SXin Li
18*b2055c35SXin Li #include <assert.h>
19*b2055c35SXin Li #include <smmintrin.h>
20*b2055c35SXin Li #include <string.h>
21*b2055c35SXin Li #include "src/dsp/yuv.h"
22*b2055c35SXin Li
23*b2055c35SXin Li #ifdef FANCY_UPSAMPLING
24*b2055c35SXin Li
25*b2055c35SXin Li #if !defined(WEBP_REDUCE_CSP)
26*b2055c35SXin Li
27*b2055c35SXin Li // We compute (9*a + 3*b + 3*c + d + 8) / 16 as follows
28*b2055c35SXin Li // u = (9*a + 3*b + 3*c + d + 8) / 16
29*b2055c35SXin Li // = (a + (a + 3*b + 3*c + d) / 8 + 1) / 2
30*b2055c35SXin Li // = (a + m + 1) / 2
31*b2055c35SXin Li // where m = (a + 3*b + 3*c + d) / 8
32*b2055c35SXin Li // = ((a + b + c + d) / 2 + b + c) / 4
33*b2055c35SXin Li //
34*b2055c35SXin Li // Let's say k = (a + b + c + d) / 4.
35*b2055c35SXin Li // We can compute k as
36*b2055c35SXin Li // k = (s + t + 1) / 2 - ((a^d) | (b^c) | (s^t)) & 1
37*b2055c35SXin Li // where s = (a + d + 1) / 2 and t = (b + c + 1) / 2
38*b2055c35SXin Li //
39*b2055c35SXin Li // Then m can be written as
40*b2055c35SXin Li // m = (k + t + 1) / 2 - (((b^c) & (s^t)) | (k^t)) & 1
41*b2055c35SXin Li
42*b2055c35SXin Li // Computes out = (k + in + 1) / 2 - ((ij & (s^t)) | (k^in)) & 1
43*b2055c35SXin Li #define GET_M(ij, in, out) do { \
44*b2055c35SXin Li const __m128i tmp0 = _mm_avg_epu8(k, (in)); /* (k + in + 1) / 2 */ \
45*b2055c35SXin Li const __m128i tmp1 = _mm_and_si128((ij), st); /* (ij) & (s^t) */ \
46*b2055c35SXin Li const __m128i tmp2 = _mm_xor_si128(k, (in)); /* (k^in) */ \
47*b2055c35SXin Li const __m128i tmp3 = _mm_or_si128(tmp1, tmp2); /* ((ij) & (s^t)) | (k^in) */\
48*b2055c35SXin Li const __m128i tmp4 = _mm_and_si128(tmp3, one); /* & 1 -> lsb_correction */ \
49*b2055c35SXin Li (out) = _mm_sub_epi8(tmp0, tmp4); /* (k + in + 1) / 2 - lsb_correction */ \
50*b2055c35SXin Li } while (0)
51*b2055c35SXin Li
52*b2055c35SXin Li // pack and store two alternating pixel rows
53*b2055c35SXin Li #define PACK_AND_STORE(a, b, da, db, out) do { \
54*b2055c35SXin Li const __m128i t_a = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \
55*b2055c35SXin Li const __m128i t_b = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \
56*b2055c35SXin Li const __m128i t_1 = _mm_unpacklo_epi8(t_a, t_b); \
57*b2055c35SXin Li const __m128i t_2 = _mm_unpackhi_epi8(t_a, t_b); \
58*b2055c35SXin Li _mm_store_si128(((__m128i*)(out)) + 0, t_1); \
59*b2055c35SXin Li _mm_store_si128(((__m128i*)(out)) + 1, t_2); \
60*b2055c35SXin Li } while (0)
61*b2055c35SXin Li
62*b2055c35SXin Li // Loads 17 pixels each from rows r1 and r2 and generates 32 pixels.
63*b2055c35SXin Li #define UPSAMPLE_32PIXELS(r1, r2, out) do { \
64*b2055c35SXin Li const __m128i one = _mm_set1_epi8(1); \
65*b2055c35SXin Li const __m128i a = _mm_loadu_si128((const __m128i*)&(r1)[0]); \
66*b2055c35SXin Li const __m128i b = _mm_loadu_si128((const __m128i*)&(r1)[1]); \
67*b2055c35SXin Li const __m128i c = _mm_loadu_si128((const __m128i*)&(r2)[0]); \
68*b2055c35SXin Li const __m128i d = _mm_loadu_si128((const __m128i*)&(r2)[1]); \
69*b2055c35SXin Li \
70*b2055c35SXin Li const __m128i s = _mm_avg_epu8(a, d); /* s = (a + d + 1) / 2 */ \
71*b2055c35SXin Li const __m128i t = _mm_avg_epu8(b, c); /* t = (b + c + 1) / 2 */ \
72*b2055c35SXin Li const __m128i st = _mm_xor_si128(s, t); /* st = s^t */ \
73*b2055c35SXin Li \
74*b2055c35SXin Li const __m128i ad = _mm_xor_si128(a, d); /* ad = a^d */ \
75*b2055c35SXin Li const __m128i bc = _mm_xor_si128(b, c); /* bc = b^c */ \
76*b2055c35SXin Li \
77*b2055c35SXin Li const __m128i t1 = _mm_or_si128(ad, bc); /* (a^d) | (b^c) */ \
78*b2055c35SXin Li const __m128i t2 = _mm_or_si128(t1, st); /* (a^d) | (b^c) | (s^t) */ \
79*b2055c35SXin Li const __m128i t3 = _mm_and_si128(t2, one); /* (a^d) | (b^c) | (s^t) & 1 */ \
80*b2055c35SXin Li const __m128i t4 = _mm_avg_epu8(s, t); \
81*b2055c35SXin Li const __m128i k = _mm_sub_epi8(t4, t3); /* k = (a + b + c + d) / 4 */ \
82*b2055c35SXin Li __m128i diag1, diag2; \
83*b2055c35SXin Li \
84*b2055c35SXin Li GET_M(bc, t, diag1); /* diag1 = (a + 3b + 3c + d) / 8 */ \
85*b2055c35SXin Li GET_M(ad, s, diag2); /* diag2 = (3a + b + c + 3d) / 8 */ \
86*b2055c35SXin Li \
87*b2055c35SXin Li /* pack the alternate pixels */ \
88*b2055c35SXin Li PACK_AND_STORE(a, b, diag1, diag2, (out) + 0); /* store top */ \
89*b2055c35SXin Li PACK_AND_STORE(c, d, diag2, diag1, (out) + 2 * 32); /* store bottom */ \
90*b2055c35SXin Li } while (0)
91*b2055c35SXin Li
92*b2055c35SXin Li // Turn the macro into a function for reducing code-size when non-critical
Upsample32Pixels_SSE41(const uint8_t r1[],const uint8_t r2[],uint8_t * const out)93*b2055c35SXin Li static void Upsample32Pixels_SSE41(const uint8_t r1[], const uint8_t r2[],
94*b2055c35SXin Li uint8_t* const out) {
95*b2055c35SXin Li UPSAMPLE_32PIXELS(r1, r2, out);
96*b2055c35SXin Li }
97*b2055c35SXin Li
98*b2055c35SXin Li #define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \
99*b2055c35SXin Li uint8_t r1[17], r2[17]; \
100*b2055c35SXin Li memcpy(r1, (tb), (num_pixels)); \
101*b2055c35SXin Li memcpy(r2, (bb), (num_pixels)); \
102*b2055c35SXin Li /* replicate last byte */ \
103*b2055c35SXin Li memset(r1 + (num_pixels), r1[(num_pixels) - 1], 17 - (num_pixels)); \
104*b2055c35SXin Li memset(r2 + (num_pixels), r2[(num_pixels) - 1], 17 - (num_pixels)); \
105*b2055c35SXin Li /* using the shared function instead of the macro saves ~3k code size */ \
106*b2055c35SXin Li Upsample32Pixels_SSE41(r1, r2, out); \
107*b2055c35SXin Li }
108*b2055c35SXin Li
109*b2055c35SXin Li #define CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, \
110*b2055c35SXin Li top_dst, bottom_dst, cur_x) do { \
111*b2055c35SXin Li FUNC##32_SSE41((top_y) + (cur_x), r_u, r_v, (top_dst) + (cur_x) * (XSTEP)); \
112*b2055c35SXin Li if ((bottom_y) != NULL) { \
113*b2055c35SXin Li FUNC##32_SSE41((bottom_y) + (cur_x), r_u + 64, r_v + 64, \
114*b2055c35SXin Li (bottom_dst) + (cur_x) * (XSTEP)); \
115*b2055c35SXin Li } \
116*b2055c35SXin Li } while (0)
117*b2055c35SXin Li
118*b2055c35SXin Li #define SSE4_UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \
119*b2055c35SXin Li static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
120*b2055c35SXin Li const uint8_t* top_u, const uint8_t* top_v, \
121*b2055c35SXin Li const uint8_t* cur_u, const uint8_t* cur_v, \
122*b2055c35SXin Li uint8_t* top_dst, uint8_t* bottom_dst, int len) { \
123*b2055c35SXin Li int uv_pos, pos; \
124*b2055c35SXin Li /* 16byte-aligned array to cache reconstructed u and v */ \
125*b2055c35SXin Li uint8_t uv_buf[14 * 32 + 15] = { 0 }; \
126*b2055c35SXin Li uint8_t* const r_u = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
127*b2055c35SXin Li uint8_t* const r_v = r_u + 32; \
128*b2055c35SXin Li \
129*b2055c35SXin Li assert(top_y != NULL); \
130*b2055c35SXin Li { /* Treat the first pixel in regular way */ \
131*b2055c35SXin Li const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \
132*b2055c35SXin Li const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \
133*b2055c35SXin Li const int u0_t = (top_u[0] + u_diag) >> 1; \
134*b2055c35SXin Li const int v0_t = (top_v[0] + v_diag) >> 1; \
135*b2055c35SXin Li FUNC(top_y[0], u0_t, v0_t, top_dst); \
136*b2055c35SXin Li if (bottom_y != NULL) { \
137*b2055c35SXin Li const int u0_b = (cur_u[0] + u_diag) >> 1; \
138*b2055c35SXin Li const int v0_b = (cur_v[0] + v_diag) >> 1; \
139*b2055c35SXin Li FUNC(bottom_y[0], u0_b, v0_b, bottom_dst); \
140*b2055c35SXin Li } \
141*b2055c35SXin Li } \
142*b2055c35SXin Li /* For UPSAMPLE_32PIXELS, 17 u/v values must be read-able for each block */ \
143*b2055c35SXin Li for (pos = 1, uv_pos = 0; pos + 32 + 1 <= len; pos += 32, uv_pos += 16) { \
144*b2055c35SXin Li UPSAMPLE_32PIXELS(top_u + uv_pos, cur_u + uv_pos, r_u); \
145*b2055c35SXin Li UPSAMPLE_32PIXELS(top_v + uv_pos, cur_v + uv_pos, r_v); \
146*b2055c35SXin Li CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, top_dst, bottom_dst, pos); \
147*b2055c35SXin Li } \
148*b2055c35SXin Li if (len > 1) { \
149*b2055c35SXin Li const int left_over = ((len + 1) >> 1) - (pos >> 1); \
150*b2055c35SXin Li uint8_t* const tmp_top_dst = r_u + 4 * 32; \
151*b2055c35SXin Li uint8_t* const tmp_bottom_dst = tmp_top_dst + 4 * 32; \
152*b2055c35SXin Li uint8_t* const tmp_top = tmp_bottom_dst + 4 * 32; \
153*b2055c35SXin Li uint8_t* const tmp_bottom = (bottom_y == NULL) ? NULL : tmp_top + 32; \
154*b2055c35SXin Li assert(left_over > 0); \
155*b2055c35SXin Li UPSAMPLE_LAST_BLOCK(top_u + uv_pos, cur_u + uv_pos, left_over, r_u); \
156*b2055c35SXin Li UPSAMPLE_LAST_BLOCK(top_v + uv_pos, cur_v + uv_pos, left_over, r_v); \
157*b2055c35SXin Li memcpy(tmp_top, top_y + pos, len - pos); \
158*b2055c35SXin Li if (bottom_y != NULL) memcpy(tmp_bottom, bottom_y + pos, len - pos); \
159*b2055c35SXin Li CONVERT2RGB_32(FUNC, XSTEP, tmp_top, tmp_bottom, tmp_top_dst, \
160*b2055c35SXin Li tmp_bottom_dst, 0); \
161*b2055c35SXin Li memcpy(top_dst + pos * (XSTEP), tmp_top_dst, (len - pos) * (XSTEP)); \
162*b2055c35SXin Li if (bottom_y != NULL) { \
163*b2055c35SXin Li memcpy(bottom_dst + pos * (XSTEP), tmp_bottom_dst, \
164*b2055c35SXin Li (len - pos) * (XSTEP)); \
165*b2055c35SXin Li } \
166*b2055c35SXin Li } \
167*b2055c35SXin Li }
168*b2055c35SXin Li
169*b2055c35SXin Li // SSE4 variants of the fancy upsampler.
170*b2055c35SXin Li SSE4_UPSAMPLE_FUNC(UpsampleRgbLinePair_SSE41, VP8YuvToRgb, 3)
171*b2055c35SXin Li SSE4_UPSAMPLE_FUNC(UpsampleBgrLinePair_SSE41, VP8YuvToBgr, 3)
172*b2055c35SXin Li
173*b2055c35SXin Li #undef GET_M
174*b2055c35SXin Li #undef PACK_AND_STORE
175*b2055c35SXin Li #undef UPSAMPLE_32PIXELS
176*b2055c35SXin Li #undef UPSAMPLE_LAST_BLOCK
177*b2055c35SXin Li #undef CONVERT2RGB
178*b2055c35SXin Li #undef CONVERT2RGB_32
179*b2055c35SXin Li #undef SSE4_UPSAMPLE_FUNC
180*b2055c35SXin Li
181*b2055c35SXin Li #endif // WEBP_REDUCE_CSP
182*b2055c35SXin Li
183*b2055c35SXin Li //------------------------------------------------------------------------------
184*b2055c35SXin Li // Entry point
185*b2055c35SXin Li
186*b2055c35SXin Li extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
187*b2055c35SXin Li
188*b2055c35SXin Li extern void WebPInitUpsamplersSSE41(void);
189*b2055c35SXin Li
WebPInitUpsamplersSSE41(void)190*b2055c35SXin Li WEBP_TSAN_IGNORE_FUNCTION void WebPInitUpsamplersSSE41(void) {
191*b2055c35SXin Li #if !defined(WEBP_REDUCE_CSP)
192*b2055c35SXin Li WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair_SSE41;
193*b2055c35SXin Li WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair_SSE41;
194*b2055c35SXin Li #endif // WEBP_REDUCE_CSP
195*b2055c35SXin Li }
196*b2055c35SXin Li
197*b2055c35SXin Li #endif // FANCY_UPSAMPLING
198*b2055c35SXin Li
199*b2055c35SXin Li //------------------------------------------------------------------------------
200*b2055c35SXin Li
201*b2055c35SXin Li extern WebPYUV444Converter WebPYUV444Converters[/* MODE_LAST */];
202*b2055c35SXin Li extern void WebPInitYUV444ConvertersSSE41(void);
203*b2055c35SXin Li
204*b2055c35SXin Li #define YUV444_FUNC(FUNC_NAME, CALL, CALL_C, XSTEP) \
205*b2055c35SXin Li extern void CALL_C(const uint8_t* y, const uint8_t* u, const uint8_t* v, \
206*b2055c35SXin Li uint8_t* dst, int len); \
207*b2055c35SXin Li static void FUNC_NAME(const uint8_t* y, const uint8_t* u, const uint8_t* v, \
208*b2055c35SXin Li uint8_t* dst, int len) { \
209*b2055c35SXin Li int i; \
210*b2055c35SXin Li const int max_len = len & ~31; \
211*b2055c35SXin Li for (i = 0; i < max_len; i += 32) { \
212*b2055c35SXin Li CALL(y + i, u + i, v + i, dst + i * (XSTEP)); \
213*b2055c35SXin Li } \
214*b2055c35SXin Li if (i < len) { /* C-fallback */ \
215*b2055c35SXin Li CALL_C(y + i, u + i, v + i, dst + i * (XSTEP), len - i); \
216*b2055c35SXin Li } \
217*b2055c35SXin Li }
218*b2055c35SXin Li
219*b2055c35SXin Li #if !defined(WEBP_REDUCE_CSP)
220*b2055c35SXin Li YUV444_FUNC(Yuv444ToRgb_SSE41, VP8YuvToRgb32_SSE41, WebPYuv444ToRgb_C, 3)
221*b2055c35SXin Li YUV444_FUNC(Yuv444ToBgr_SSE41, VP8YuvToBgr32_SSE41, WebPYuv444ToBgr_C, 3)
222*b2055c35SXin Li #endif // WEBP_REDUCE_CSP
223*b2055c35SXin Li
WebPInitYUV444ConvertersSSE41(void)224*b2055c35SXin Li WEBP_TSAN_IGNORE_FUNCTION void WebPInitYUV444ConvertersSSE41(void) {
225*b2055c35SXin Li #if !defined(WEBP_REDUCE_CSP)
226*b2055c35SXin Li WebPYUV444Converters[MODE_RGB] = Yuv444ToRgb_SSE41;
227*b2055c35SXin Li WebPYUV444Converters[MODE_BGR] = Yuv444ToBgr_SSE41;
228*b2055c35SXin Li #endif // WEBP_REDUCE_CSP
229*b2055c35SXin Li }
230*b2055c35SXin Li
231*b2055c35SXin Li #else
232*b2055c35SXin Li
233*b2055c35SXin Li WEBP_DSP_INIT_STUB(WebPInitYUV444ConvertersSSE41)
234*b2055c35SXin Li
235*b2055c35SXin Li #endif // WEBP_USE_SSE41
236*b2055c35SXin Li
237*b2055c35SXin Li #if !(defined(FANCY_UPSAMPLING) && defined(WEBP_USE_SSE41))
238*b2055c35SXin Li WEBP_DSP_INIT_STUB(WebPInitUpsamplersSSE41)
239*b2055c35SXin Li #endif
240