1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include <limits.h>
13 #include <stdlib.h>
14 #include <string.h>
15
16 #include "vpx/vpx_image.h"
17 #include "vpx/vpx_integer.h"
18 #include "vpx_mem/vpx_mem.h"
19
img_alloc_helper(vpx_image_t * img,vpx_img_fmt_t fmt,unsigned int d_w,unsigned int d_h,unsigned int buf_align,unsigned int stride_align,unsigned char * img_data)20 static vpx_image_t *img_alloc_helper(vpx_image_t *img, vpx_img_fmt_t fmt,
21 unsigned int d_w, unsigned int d_h,
22 unsigned int buf_align,
23 unsigned int stride_align,
24 unsigned char *img_data) {
25 unsigned int h, w, xcs, ycs, bps;
26 uint64_t s;
27 int stride_in_bytes;
28 unsigned int align;
29
30 if (img != NULL) memset(img, 0, sizeof(vpx_image_t));
31
32 if (fmt == VPX_IMG_FMT_NONE) goto fail;
33
34 /* Impose maximum values on input parameters so that this function can
35 * perform arithmetic operations without worrying about overflows.
36 */
37 if (d_w > 0x08000000 || d_h > 0x08000000 || buf_align > 65536 ||
38 stride_align > 65536) {
39 goto fail;
40 }
41
42 /* Treat align==0 like align==1 */
43 if (!buf_align) buf_align = 1;
44
45 /* Validate alignment (must be power of 2) */
46 if (buf_align & (buf_align - 1)) goto fail;
47
48 /* Treat align==0 like align==1 */
49 if (!stride_align) stride_align = 1;
50
51 /* Validate alignment (must be power of 2) */
52 if (stride_align & (stride_align - 1)) goto fail;
53
54 /* Get sample size for this format */
55 switch (fmt) {
56 case VPX_IMG_FMT_I420:
57 case VPX_IMG_FMT_YV12:
58 case VPX_IMG_FMT_NV12: bps = 12; break;
59 case VPX_IMG_FMT_I422:
60 case VPX_IMG_FMT_I440: bps = 16; break;
61 case VPX_IMG_FMT_I444: bps = 24; break;
62 case VPX_IMG_FMT_I42016: bps = 24; break;
63 case VPX_IMG_FMT_I42216:
64 case VPX_IMG_FMT_I44016: bps = 32; break;
65 case VPX_IMG_FMT_I44416: bps = 48; break;
66 default: bps = 16; break;
67 }
68
69 /* Get chroma shift values for this format */
70 // For VPX_IMG_FMT_NV12, xcs needs to be 0 such that UV data is all read at
71 // once.
72 switch (fmt) {
73 case VPX_IMG_FMT_I420:
74 case VPX_IMG_FMT_YV12:
75 case VPX_IMG_FMT_I422:
76 case VPX_IMG_FMT_I42016:
77 case VPX_IMG_FMT_I42216: xcs = 1; break;
78 default: xcs = 0; break;
79 }
80
81 switch (fmt) {
82 case VPX_IMG_FMT_I420:
83 case VPX_IMG_FMT_NV12:
84 case VPX_IMG_FMT_I440:
85 case VPX_IMG_FMT_YV12:
86 case VPX_IMG_FMT_I42016:
87 case VPX_IMG_FMT_I44016: ycs = 1; break;
88 default: ycs = 0; break;
89 }
90
91 /* Calculate storage sizes. */
92 if (img_data) {
93 /* If the buffer was allocated externally, the width and height shouldn't
94 * be adjusted. */
95 w = d_w;
96 h = d_h;
97 } else {
98 /* Calculate storage sizes given the chroma subsampling */
99 align = (1 << xcs) - 1;
100 w = (d_w + align) & ~align;
101 assert(d_w <= w);
102 align = (1 << ycs) - 1;
103 h = (d_h + align) & ~align;
104 assert(d_h <= h);
105 }
106
107 s = (fmt & VPX_IMG_FMT_PLANAR) ? w : (uint64_t)bps * w / 8;
108 s = (fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? s * 2 : s;
109 s = (s + stride_align - 1) & ~((uint64_t)stride_align - 1);
110 if (s > INT_MAX) goto fail;
111 stride_in_bytes = (int)s;
112 s = (fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? s / 2 : s;
113
114 /* Allocate the new image */
115 if (!img) {
116 img = (vpx_image_t *)calloc(1, sizeof(vpx_image_t));
117
118 if (!img) goto fail;
119
120 img->self_allocd = 1;
121 }
122
123 img->img_data = img_data;
124
125 if (!img_data) {
126 uint64_t alloc_size;
127 alloc_size = (fmt & VPX_IMG_FMT_PLANAR) ? (uint64_t)h * s * bps / 8
128 : (uint64_t)h * s;
129
130 if (alloc_size != (size_t)alloc_size) goto fail;
131
132 img->img_data = (uint8_t *)vpx_memalign(buf_align, (size_t)alloc_size);
133 img->img_data_owner = 1;
134 }
135
136 if (!img->img_data) goto fail;
137
138 img->fmt = fmt;
139 img->bit_depth = (fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 16 : 8;
140 img->w = w;
141 img->h = h;
142 img->x_chroma_shift = xcs;
143 img->y_chroma_shift = ycs;
144 img->bps = bps;
145
146 /* Calculate strides */
147 img->stride[VPX_PLANE_Y] = img->stride[VPX_PLANE_ALPHA] = stride_in_bytes;
148 img->stride[VPX_PLANE_U] = img->stride[VPX_PLANE_V] = stride_in_bytes >> xcs;
149
150 /* Default viewport to entire image. (This vpx_img_set_rect call always
151 * succeeds.) */
152 int ret = vpx_img_set_rect(img, 0, 0, d_w, d_h);
153 assert(ret == 0);
154 (void)ret;
155 return img;
156
157 fail:
158 vpx_img_free(img);
159 return NULL;
160 }
161
vpx_img_alloc(vpx_image_t * img,vpx_img_fmt_t fmt,unsigned int d_w,unsigned int d_h,unsigned int align)162 vpx_image_t *vpx_img_alloc(vpx_image_t *img, vpx_img_fmt_t fmt,
163 unsigned int d_w, unsigned int d_h,
164 unsigned int align) {
165 return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL);
166 }
167
vpx_img_wrap(vpx_image_t * img,vpx_img_fmt_t fmt,unsigned int d_w,unsigned int d_h,unsigned int stride_align,unsigned char * img_data)168 vpx_image_t *vpx_img_wrap(vpx_image_t *img, vpx_img_fmt_t fmt, unsigned int d_w,
169 unsigned int d_h, unsigned int stride_align,
170 unsigned char *img_data) {
171 /* Set buf_align = 1. It is ignored by img_alloc_helper because img_data is
172 * not NULL. */
173 return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data);
174 }
175
vpx_img_set_rect(vpx_image_t * img,unsigned int x,unsigned int y,unsigned int w,unsigned int h)176 int vpx_img_set_rect(vpx_image_t *img, unsigned int x, unsigned int y,
177 unsigned int w, unsigned int h) {
178 if (x <= UINT_MAX - w && x + w <= img->w && y <= UINT_MAX - h &&
179 y + h <= img->h) {
180 img->d_w = w;
181 img->d_h = h;
182
183 /* Calculate plane pointers */
184 if (!(img->fmt & VPX_IMG_FMT_PLANAR)) {
185 img->planes[VPX_PLANE_PACKED] =
186 img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PACKED];
187 } else {
188 const int bytes_per_sample =
189 (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
190 unsigned char *data = img->img_data;
191
192 if (img->fmt & VPX_IMG_FMT_HAS_ALPHA) {
193 img->planes[VPX_PLANE_ALPHA] =
194 data + x * bytes_per_sample + y * img->stride[VPX_PLANE_ALPHA];
195 data += (size_t)img->h * img->stride[VPX_PLANE_ALPHA];
196 }
197
198 img->planes[VPX_PLANE_Y] =
199 data + x * bytes_per_sample + y * img->stride[VPX_PLANE_Y];
200 data += (size_t)img->h * img->stride[VPX_PLANE_Y];
201
202 unsigned int uv_x = x >> img->x_chroma_shift;
203 unsigned int uv_y = y >> img->y_chroma_shift;
204 if (img->fmt == VPX_IMG_FMT_NV12) {
205 img->planes[VPX_PLANE_U] =
206 data + uv_x + uv_y * img->stride[VPX_PLANE_U];
207 img->planes[VPX_PLANE_V] = img->planes[VPX_PLANE_U] + 1;
208 } else if (!(img->fmt & VPX_IMG_FMT_UV_FLIP)) {
209 img->planes[VPX_PLANE_U] =
210 data + uv_x * bytes_per_sample + uv_y * img->stride[VPX_PLANE_U];
211 data +=
212 (size_t)(img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_U];
213 img->planes[VPX_PLANE_V] =
214 data + uv_x * bytes_per_sample + uv_y * img->stride[VPX_PLANE_V];
215 } else {
216 img->planes[VPX_PLANE_V] =
217 data + uv_x * bytes_per_sample + uv_y * img->stride[VPX_PLANE_V];
218 data +=
219 (size_t)(img->h >> img->y_chroma_shift) * img->stride[VPX_PLANE_V];
220 img->planes[VPX_PLANE_U] =
221 data + uv_x * bytes_per_sample + uv_y * img->stride[VPX_PLANE_U];
222 }
223 }
224 return 0;
225 }
226 return -1;
227 }
228
vpx_img_flip(vpx_image_t * img)229 void vpx_img_flip(vpx_image_t *img) {
230 /* Note: In the calculation pointer adjustment calculation, we want the
231 * rhs to be promoted to a signed type. Section 6.3.1.8 of the ISO C99
232 * standard indicates that if the adjustment parameter is unsigned, the
233 * stride parameter will be promoted to unsigned, causing errors when
234 * the lhs is a larger type than the rhs.
235 */
236 img->planes[VPX_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_Y];
237 img->stride[VPX_PLANE_Y] = -img->stride[VPX_PLANE_Y];
238
239 img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1) *
240 img->stride[VPX_PLANE_U];
241 img->stride[VPX_PLANE_U] = -img->stride[VPX_PLANE_U];
242
243 img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1) *
244 img->stride[VPX_PLANE_V];
245 img->stride[VPX_PLANE_V] = -img->stride[VPX_PLANE_V];
246
247 img->planes[VPX_PLANE_ALPHA] +=
248 (signed)(img->d_h - 1) * img->stride[VPX_PLANE_ALPHA];
249 img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA];
250 }
251
vpx_img_free(vpx_image_t * img)252 void vpx_img_free(vpx_image_t *img) {
253 if (img) {
254 if (img->img_data && img->img_data_owner) vpx_free(img->img_data);
255
256 if (img->self_allocd) free(img);
257 }
258 }
259