1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/core/SkRasterPipeline.h"
9
10 #include "include/core/SkColorType.h"
11 #include "include/core/SkImageInfo.h"
12 #include "include/core/SkMatrix.h"
13 #include "include/private/base/SkDebug.h"
14 #include "include/private/base/SkTemplates.h"
15 #include "modules/skcms/skcms.h"
16 #include "src/base/SkVx.h"
17 #include "src/core/SkImageInfoPriv.h"
18 #include "src/core/SkOpts.h"
19 #include "src/core/SkRasterPipelineOpContexts.h"
20 #include "src/core/SkRasterPipelineOpList.h"
21
22 #include <algorithm>
23 #include <cstring>
24 #include <vector>
25
26 using namespace skia_private;
27 using Op = SkRasterPipelineOp;
28
29 bool gForceHighPrecisionRasterPipeline;
30
SkRasterPipeline(SkArenaAlloc * alloc)31 SkRasterPipeline::SkRasterPipeline(SkArenaAlloc* alloc) : fAlloc(alloc) {
32 this->reset();
33 }
34
reset()35 void SkRasterPipeline::reset() {
36 // We intentionally leave the alloc alone here; we don't own it.
37 fRewindCtx = nullptr;
38 fStages = nullptr;
39 fTailPointer = nullptr;
40 fNumStages = 0;
41 fMemoryCtxInfos.clear();
42 }
43
append(SkRasterPipelineOp op,void * ctx)44 void SkRasterPipeline::append(SkRasterPipelineOp op, void* ctx) {
45 SkASSERT(op != Op::uniform_color); // Please use appendConstantColor().
46 SkASSERT(op != Op::unbounded_uniform_color); // Please use appendConstantColor().
47 SkASSERT(op != Op::set_rgb); // Please use appendSetRGB().
48 SkASSERT(op != Op::unbounded_set_rgb); // Please use appendSetRGB().
49 SkASSERT(op != Op::parametric); // Please use appendTransferFunction().
50 SkASSERT(op != Op::gamma_); // Please use appendTransferFunction().
51 SkASSERT(op != Op::PQish); // Please use appendTransferFunction().
52 SkASSERT(op != Op::HLGish); // Please use appendTransferFunction().
53 SkASSERT(op != Op::HLGinvish); // Please use appendTransferFunction().
54 SkASSERT(op != Op::stack_checkpoint); // Please use appendStackRewind().
55 SkASSERT(op != Op::stack_rewind); // Please use appendStackRewind().
56 this->uncheckedAppend(op, ctx);
57 }
58
tailPointer()59 uint8_t* SkRasterPipeline::tailPointer() {
60 if (!fTailPointer) {
61 // All ops in the pipeline that use the tail value share the same value.
62 fTailPointer = fAlloc->make<uint8_t>(0xFF);
63 }
64 return fTailPointer;
65 }
66
uncheckedAppend(SkRasterPipelineOp op,void * ctx)67 void SkRasterPipeline::uncheckedAppend(SkRasterPipelineOp op, void* ctx) {
68 bool isLoad = false, isStore = false;
69 SkColorType ct = kUnknown_SkColorType;
70
71 #define COLOR_TYPE_CASE(stage_ct, sk_ct) \
72 case Op::load_##stage_ct: \
73 case Op::load_##stage_ct##_dst: \
74 ct = sk_ct; \
75 isLoad = true; \
76 break; \
77 case Op::store_##stage_ct: \
78 ct = sk_ct; \
79 isStore = true; \
80 break;
81
82 switch (op) {
83 COLOR_TYPE_CASE(a8, kAlpha_8_SkColorType)
84 COLOR_TYPE_CASE(565, kRGB_565_SkColorType)
85 COLOR_TYPE_CASE(4444, kARGB_4444_SkColorType)
86 COLOR_TYPE_CASE(8888, kRGBA_8888_SkColorType)
87 COLOR_TYPE_CASE(rg88, kR8G8_unorm_SkColorType)
88 COLOR_TYPE_CASE(16161616, kR16G16B16A16_unorm_SkColorType)
89 COLOR_TYPE_CASE(a16, kA16_unorm_SkColorType)
90 COLOR_TYPE_CASE(rg1616, kR16G16_unorm_SkColorType)
91 COLOR_TYPE_CASE(f16, kRGBA_F16_SkColorType)
92 COLOR_TYPE_CASE(af16, kA16_float_SkColorType)
93 COLOR_TYPE_CASE(rgf16, kR16G16_float_SkColorType)
94 COLOR_TYPE_CASE(f32, kRGBA_F32_SkColorType)
95 COLOR_TYPE_CASE(1010102, kRGBA_1010102_SkColorType)
96 COLOR_TYPE_CASE(1010102_xr, kBGR_101010x_XR_SkColorType)
97 COLOR_TYPE_CASE(10101010_xr, kBGRA_10101010_XR_SkColorType)
98 COLOR_TYPE_CASE(10x6, kRGBA_10x6_SkColorType)
99
100 #undef COLOR_TYPE_CASE
101
102 // Odd stage that doesn't have a load variant (appendLoad uses load_a8 + alpha_to_red)
103 case Op::store_r8: {
104 ct = kR8_unorm_SkColorType;
105 isStore = true;
106 break;
107 }
108 case Op::srcover_rgba_8888: {
109 ct = kRGBA_8888_SkColorType;
110 isLoad = true;
111 isStore = true;
112 break;
113 }
114 case Op::scale_u8:
115 case Op::lerp_u8: {
116 ct = kAlpha_8_SkColorType;
117 isLoad = true;
118 break;
119 }
120 case Op::scale_565:
121 case Op::lerp_565: {
122 ct = kRGB_565_SkColorType;
123 isLoad = true;
124 break;
125 }
126 case Op::emboss: {
127 // Special-case, this op uses a context that holds *two* MemoryCtxs
128 SkRasterPipeline_EmbossCtx* embossCtx = (SkRasterPipeline_EmbossCtx*)ctx;
129 this->addMemoryContext(&embossCtx->add,
130 SkColorTypeBytesPerPixel(kAlpha_8_SkColorType),
131 /*load=*/true, /*store=*/false);
132 this->addMemoryContext(&embossCtx->mul,
133 SkColorTypeBytesPerPixel(kAlpha_8_SkColorType),
134 /*load=*/true, /*store=*/false);
135 break;
136 }
137 case Op::init_lane_masks: {
138 auto* initCtx = (SkRasterPipeline_InitLaneMasksCtx*)ctx;
139 initCtx->tail = this->tailPointer();
140 break;
141 }
142 case Op::branch_if_all_lanes_active: {
143 auto* branchCtx = (SkRasterPipeline_BranchIfAllLanesActiveCtx*)ctx;
144 branchCtx->tail = this->tailPointer();
145 break;
146 }
147 default:
148 break;
149 }
150
151 fStages = fAlloc->make<StageList>(StageList{fStages, op, ctx});
152 fNumStages += 1;
153
154 if (isLoad || isStore) {
155 SkASSERT(ct != kUnknown_SkColorType);
156 this->addMemoryContext(
157 (SkRasterPipeline_MemoryCtx*)ctx, SkColorTypeBytesPerPixel(ct), isLoad, isStore);
158 }
159 }
160
append(SkRasterPipelineOp op,uintptr_t ctx)161 void SkRasterPipeline::append(SkRasterPipelineOp op, uintptr_t ctx) {
162 void* ptrCtx;
163 memcpy(&ptrCtx, &ctx, sizeof(ctx));
164 this->append(op, ptrCtx);
165 }
166
extend(const SkRasterPipeline & src)167 void SkRasterPipeline::extend(const SkRasterPipeline& src) {
168 if (src.empty()) {
169 return;
170 }
171 // Create a rewind context if `src` has one already, but we don't. If we _do_ already have one,
172 // we need to keep it, since we already have rewind ops that reference it. Either way, we need
173 // to rewrite all the rewind ops to point to _our_ rewind context; we only get that checkpoint.
174 if (src.fRewindCtx && !fRewindCtx) {
175 fRewindCtx = fAlloc->make<SkRasterPipeline_RewindCtx>();
176 }
177 auto stages = fAlloc->makeArrayDefault<StageList>(src.fNumStages);
178
179 int n = src.fNumStages;
180 const StageList* st = src.fStages;
181 while (n --> 1) {
182 stages[n] = *st;
183 stages[n].prev = &stages[n-1];
184
185 // We make sure that all ops use _our_ stack context and tail pointer.
186 switch (stages[n].stage) {
187 case Op::stack_rewind: {
188 stages[n].ctx = fRewindCtx;
189 break;
190 }
191 case Op::init_lane_masks: {
192 auto* ctx = (SkRasterPipeline_InitLaneMasksCtx*)stages[n].ctx;
193 ctx->tail = this->tailPointer();
194 break;
195 }
196 case Op::branch_if_all_lanes_active: {
197 auto* ctx = (SkRasterPipeline_BranchIfAllLanesActiveCtx*)stages[n].ctx;
198 ctx->tail = this->tailPointer();
199 break;
200 }
201 default:
202 break;
203 }
204
205 st = st->prev;
206 }
207 stages[0] = *st;
208 stages[0].prev = fStages;
209
210 fStages = &stages[src.fNumStages - 1];
211 fNumStages += src.fNumStages;
212 for (const SkRasterPipeline_MemoryCtxInfo& info : src.fMemoryCtxInfos) {
213 this->addMemoryContext(info.context, info.bytesPerPixel, info.load, info.store);
214 }
215 }
216
GetOpName(SkRasterPipelineOp op)217 const char* SkRasterPipeline::GetOpName(SkRasterPipelineOp op) {
218 const char* name = "";
219 switch (op) {
220 #define M(x) case Op::x: name = #x; break;
221 SK_RASTER_PIPELINE_OPS_ALL(M)
222 #undef M
223 }
224 return name;
225 }
226
dump() const227 void SkRasterPipeline::dump() const {
228 SkDebugf("SkRasterPipeline, %d stages\n", fNumStages);
229 std::vector<const char*> stages;
230 for (auto st = fStages; st; st = st->prev) {
231 stages.push_back(GetOpName(st->stage));
232 }
233 std::reverse(stages.begin(), stages.end());
234 for (const char* name : stages) {
235 SkDebugf("\t%s\n", name);
236 }
237 SkDebugf("\n");
238 }
239
appendSetRGB(SkArenaAlloc * alloc,const float rgb[3])240 void SkRasterPipeline::appendSetRGB(SkArenaAlloc* alloc, const float rgb[3]) {
241 auto arg = alloc->makeArrayDefault<float>(3);
242 arg[0] = rgb[0];
243 arg[1] = rgb[1];
244 arg[2] = rgb[2];
245
246 auto op = Op::unbounded_set_rgb;
247 if (0 <= rgb[0] && rgb[0] <= 1 &&
248 0 <= rgb[1] && rgb[1] <= 1 &&
249 0 <= rgb[2] && rgb[2] <= 1)
250 {
251 op = Op::set_rgb;
252 }
253
254 this->uncheckedAppend(op, arg);
255 }
256
appendConstantColor(SkArenaAlloc * alloc,const float rgba[4])257 void SkRasterPipeline::appendConstantColor(SkArenaAlloc* alloc, const float rgba[4]) {
258 // r,g,b might be outside [0,1], but alpha should probably always be in [0,1].
259 SkASSERT(0 <= rgba[3] && rgba[3] <= 1);
260
261 if (rgba[0] == 0 && rgba[1] == 0 && rgba[2] == 0 && rgba[3] == 1) {
262 this->append(Op::black_color);
263 } else if (rgba[0] == 1 && rgba[1] == 1 && rgba[2] == 1 && rgba[3] == 1) {
264 this->append(Op::white_color);
265 } else {
266 auto ctx = alloc->make<SkRasterPipeline_UniformColorCtx>();
267 skvx::float4 color = skvx::float4::Load(rgba);
268 color.store(&ctx->r);
269
270 // uniform_color requires colors in range and can go lowp,
271 // while unbounded_uniform_color supports out-of-range colors too but not lowp.
272 if (0 <= rgba[0] && rgba[0] <= rgba[3] &&
273 0 <= rgba[1] && rgba[1] <= rgba[3] &&
274 0 <= rgba[2] && rgba[2] <= rgba[3]) {
275 // To make loads more direct, we store 8-bit values in 16-bit slots.
276 color = color * 255.0f + 0.5f;
277 ctx->rgba[0] = (uint16_t)color[0];
278 ctx->rgba[1] = (uint16_t)color[1];
279 ctx->rgba[2] = (uint16_t)color[2];
280 ctx->rgba[3] = (uint16_t)color[3];
281 this->uncheckedAppend(Op::uniform_color, ctx);
282 } else {
283 this->uncheckedAppend(Op::unbounded_uniform_color, ctx);
284 }
285 }
286 }
287
appendMatrix(SkArenaAlloc * alloc,const SkMatrix & matrix)288 void SkRasterPipeline::appendMatrix(SkArenaAlloc* alloc, const SkMatrix& matrix) {
289 SkMatrix::TypeMask mt = matrix.getType();
290
291 if (mt == SkMatrix::kIdentity_Mask) {
292 return;
293 }
294 if (mt == SkMatrix::kTranslate_Mask) {
295 float* trans = alloc->makeArrayDefault<float>(2);
296 trans[0] = matrix.getTranslateX();
297 trans[1] = matrix.getTranslateY();
298 this->append(Op::matrix_translate, trans);
299 } else if ((mt | (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask)) ==
300 (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask)) {
301 float* scaleTrans = alloc->makeArrayDefault<float>(4);
302 scaleTrans[0] = matrix.getScaleX();
303 scaleTrans[1] = matrix.getScaleY();
304 scaleTrans[2] = matrix.getTranslateX();
305 scaleTrans[3] = matrix.getTranslateY();
306 this->append(Op::matrix_scale_translate, scaleTrans);
307 } else {
308 float* storage = alloc->makeArrayDefault<float>(9);
309 matrix.get9(storage);
310 if (!matrix.hasPerspective()) {
311 // note: asAffine and the 2x3 stage really only need 6 entries
312 this->append(Op::matrix_2x3, storage);
313 } else {
314 this->append(Op::matrix_perspective, storage);
315 }
316 }
317 }
318
appendLoad(SkColorType ct,const SkRasterPipeline_MemoryCtx * ctx)319 void SkRasterPipeline::appendLoad(SkColorType ct, const SkRasterPipeline_MemoryCtx* ctx) {
320 switch (ct) {
321 case kUnknown_SkColorType: SkASSERT(false); break;
322
323 case kAlpha_8_SkColorType: this->append(Op::load_a8, ctx); break;
324 case kA16_unorm_SkColorType: this->append(Op::load_a16, ctx); break;
325 case kA16_float_SkColorType: this->append(Op::load_af16, ctx); break;
326 case kRGB_565_SkColorType: this->append(Op::load_565, ctx); break;
327 case kARGB_4444_SkColorType: this->append(Op::load_4444, ctx); break;
328 case kR8G8_unorm_SkColorType: this->append(Op::load_rg88, ctx); break;
329 case kR16G16_unorm_SkColorType: this->append(Op::load_rg1616, ctx); break;
330 case kR16G16_float_SkColorType: this->append(Op::load_rgf16, ctx); break;
331 case kRGBA_8888_SkColorType: this->append(Op::load_8888, ctx); break;
332 case kRGBA_1010102_SkColorType: this->append(Op::load_1010102, ctx); break;
333 case kR16G16B16A16_unorm_SkColorType:this->append(Op::load_16161616,ctx); break;
334 case kRGBA_F16Norm_SkColorType:
335 case kRGBA_F16_SkColorType: this->append(Op::load_f16, ctx); break;
336 case kRGBA_F32_SkColorType: this->append(Op::load_f32, ctx); break;
337 case kRGBA_10x6_SkColorType: this->append(Op::load_10x6, ctx); break;
338
339 case kGray_8_SkColorType: this->append(Op::load_a8, ctx);
340 this->append(Op::alpha_to_gray);
341 break;
342
343 case kR8_unorm_SkColorType: this->append(Op::load_a8, ctx);
344 this->append(Op::alpha_to_red);
345 break;
346
347 case kRGB_888x_SkColorType: this->append(Op::load_8888, ctx);
348 this->append(Op::force_opaque);
349 break;
350
351 case kBGRA_1010102_SkColorType: this->append(Op::load_1010102, ctx);
352 this->append(Op::swap_rb);
353 break;
354
355 case kRGB_101010x_SkColorType: this->append(Op::load_1010102, ctx);
356 this->append(Op::force_opaque);
357 break;
358
359 case kBGR_101010x_SkColorType: this->append(Op::load_1010102, ctx);
360 this->append(Op::force_opaque);
361 this->append(Op::swap_rb);
362 break;
363
364 case kBGRA_10101010_XR_SkColorType: this->append(Op::load_10101010_xr, ctx);
365 this->append(Op::swap_rb);
366 break;
367
368 case kBGR_101010x_XR_SkColorType: this->append(Op::load_1010102_xr, ctx);
369 this->append(Op::force_opaque);
370 this->append(Op::swap_rb);
371 break;
372 case kRGB_F16F16F16x_SkColorType: this->append(Op::load_f16, ctx);
373 this->append(Op::force_opaque);
374 break;
375
376 case kBGRA_8888_SkColorType: this->append(Op::load_8888, ctx);
377 this->append(Op::swap_rb);
378 break;
379
380 case kSRGBA_8888_SkColorType:
381 this->append(Op::load_8888, ctx);
382 this->appendTransferFunction(*skcms_sRGB_TransferFunction());
383 break;
384 }
385 }
386
appendLoadDst(SkColorType ct,const SkRasterPipeline_MemoryCtx * ctx)387 void SkRasterPipeline::appendLoadDst(SkColorType ct, const SkRasterPipeline_MemoryCtx* ctx) {
388 switch (ct) {
389 case kUnknown_SkColorType: SkASSERT(false); break;
390
391 case kAlpha_8_SkColorType: this->append(Op::load_a8_dst, ctx); break;
392 case kA16_unorm_SkColorType: this->append(Op::load_a16_dst, ctx); break;
393 case kA16_float_SkColorType: this->append(Op::load_af16_dst, ctx); break;
394 case kRGB_565_SkColorType: this->append(Op::load_565_dst, ctx); break;
395 case kARGB_4444_SkColorType: this->append(Op::load_4444_dst, ctx); break;
396 case kR8G8_unorm_SkColorType: this->append(Op::load_rg88_dst, ctx); break;
397 case kR16G16_unorm_SkColorType: this->append(Op::load_rg1616_dst, ctx); break;
398 case kR16G16_float_SkColorType: this->append(Op::load_rgf16_dst, ctx); break;
399 case kRGBA_8888_SkColorType: this->append(Op::load_8888_dst, ctx); break;
400 case kRGBA_1010102_SkColorType: this->append(Op::load_1010102_dst, ctx); break;
401 case kR16G16B16A16_unorm_SkColorType: this->append(Op::load_16161616_dst,ctx); break;
402 case kRGBA_F16Norm_SkColorType:
403 case kRGBA_F16_SkColorType: this->append(Op::load_f16_dst, ctx); break;
404 case kRGBA_F32_SkColorType: this->append(Op::load_f32_dst, ctx); break;
405 case kRGBA_10x6_SkColorType: this->append(Op::load_10x6_dst, ctx); break;
406
407 case kGray_8_SkColorType: this->append(Op::load_a8_dst, ctx);
408 this->append(Op::alpha_to_gray_dst);
409 break;
410
411 case kR8_unorm_SkColorType: this->append(Op::load_a8_dst, ctx);
412 this->append(Op::alpha_to_red_dst);
413 break;
414
415 case kRGB_888x_SkColorType: this->append(Op::load_8888_dst, ctx);
416 this->append(Op::force_opaque_dst);
417 break;
418
419 case kBGRA_1010102_SkColorType: this->append(Op::load_1010102_dst, ctx);
420 this->append(Op::swap_rb_dst);
421 break;
422
423 case kRGB_101010x_SkColorType: this->append(Op::load_1010102_dst, ctx);
424 this->append(Op::force_opaque_dst);
425 break;
426
427 case kBGR_101010x_SkColorType: this->append(Op::load_1010102_dst, ctx);
428 this->append(Op::force_opaque_dst);
429 this->append(Op::swap_rb_dst);
430 break;
431
432 case kBGR_101010x_XR_SkColorType: this->append(Op::load_1010102_xr_dst, ctx);
433 this->append(Op::force_opaque_dst);
434 this->append(Op::swap_rb_dst);
435 break;
436
437 case kBGRA_10101010_XR_SkColorType: this->append(Op::load_10101010_xr_dst, ctx);
438 this->append(Op::swap_rb_dst);
439 break;
440 case kRGB_F16F16F16x_SkColorType: this->append(Op::load_f16_dst, ctx);
441 this->append(Op::force_opaque_dst);
442 break;
443
444 case kBGRA_8888_SkColorType: this->append(Op::load_8888_dst, ctx);
445 this->append(Op::swap_rb_dst);
446 break;
447
448 case kSRGBA_8888_SkColorType:
449 // TODO: We could remove the double-swap if we had _dst versions of all the TF stages
450 this->append(Op::load_8888_dst, ctx);
451 this->append(Op::swap_src_dst);
452 this->appendTransferFunction(*skcms_sRGB_TransferFunction());
453 this->append(Op::swap_src_dst);
454 break;
455 }
456 }
457
appendStore(SkColorType ct,const SkRasterPipeline_MemoryCtx * ctx)458 void SkRasterPipeline::appendStore(SkColorType ct, const SkRasterPipeline_MemoryCtx* ctx) {
459 switch (ct) {
460 case kUnknown_SkColorType: SkASSERT(false); break;
461
462 case kAlpha_8_SkColorType: this->append(Op::store_a8, ctx); break;
463 case kR8_unorm_SkColorType: this->append(Op::store_r8, ctx); break;
464 case kA16_unorm_SkColorType: this->append(Op::store_a16, ctx); break;
465 case kA16_float_SkColorType: this->append(Op::store_af16, ctx); break;
466 case kRGB_565_SkColorType: this->append(Op::store_565, ctx); break;
467 case kARGB_4444_SkColorType: this->append(Op::store_4444, ctx); break;
468 case kR8G8_unorm_SkColorType: this->append(Op::store_rg88, ctx); break;
469 case kR16G16_unorm_SkColorType: this->append(Op::store_rg1616, ctx); break;
470 case kR16G16_float_SkColorType: this->append(Op::store_rgf16, ctx); break;
471 case kRGBA_8888_SkColorType: this->append(Op::store_8888, ctx); break;
472 case kRGBA_1010102_SkColorType: this->append(Op::store_1010102, ctx); break;
473 case kR16G16B16A16_unorm_SkColorType: this->append(Op::store_16161616,ctx); break;
474 case kRGBA_F16Norm_SkColorType:
475 case kRGBA_F16_SkColorType: this->append(Op::store_f16, ctx); break;
476 case kRGBA_F32_SkColorType: this->append(Op::store_f32, ctx); break;
477 case kRGBA_10x6_SkColorType: this->append(Op::store_10x6, ctx); break;
478
479 case kRGB_888x_SkColorType: this->append(Op::force_opaque);
480 this->append(Op::store_8888, ctx);
481 break;
482
483 case kBGRA_1010102_SkColorType: this->append(Op::swap_rb);
484 this->append(Op::store_1010102, ctx);
485 break;
486
487 case kRGB_101010x_SkColorType: this->append(Op::force_opaque);
488 this->append(Op::store_1010102, ctx);
489 break;
490
491 case kBGR_101010x_SkColorType: this->append(Op::force_opaque);
492 this->append(Op::swap_rb);
493 this->append(Op::store_1010102, ctx);
494 break;
495
496 case kBGR_101010x_XR_SkColorType: this->append(Op::force_opaque);
497 this->append(Op::swap_rb);
498 this->append(Op::store_1010102_xr, ctx);
499 break;
500 case kRGB_F16F16F16x_SkColorType: this->append(Op::force_opaque);
501 this->append(Op::store_f16, ctx);
502 break;
503
504 case kBGRA_10101010_XR_SkColorType: this->append(Op::swap_rb);
505 this->append(Op::store_10101010_xr, ctx);
506 break;
507
508 case kGray_8_SkColorType: this->append(Op::bt709_luminance_or_luma_to_alpha);
509 this->append(Op::store_a8, ctx);
510 break;
511
512 case kBGRA_8888_SkColorType: this->append(Op::swap_rb);
513 this->append(Op::store_8888, ctx);
514 break;
515
516 case kSRGBA_8888_SkColorType:
517 this->appendTransferFunction(*skcms_sRGB_Inverse_TransferFunction());
518 this->append(Op::store_8888, ctx);
519 break;
520 }
521 }
522
appendTransferFunction(const skcms_TransferFunction & tf)523 void SkRasterPipeline::appendTransferFunction(const skcms_TransferFunction& tf) {
524 void* ctx = const_cast<void*>(static_cast<const void*>(&tf));
525 switch (skcms_TransferFunction_getType(&tf)) {
526 case skcms_TFType_Invalid: SkASSERT(false); break;
527
528 case skcms_TFType_sRGBish:
529 if (tf.a == 1 && tf.b == 0 && tf.c == 0 && tf.d == 0 && tf.e == 0 && tf.f == 0) {
530 this->uncheckedAppend(Op::gamma_, ctx);
531 } else {
532 this->uncheckedAppend(Op::parametric, ctx);
533 }
534 break;
535 case skcms_TFType_PQish: this->uncheckedAppend(Op::PQish, ctx); break;
536 case skcms_TFType_HLGish: this->uncheckedAppend(Op::HLGish, ctx); break;
537 case skcms_TFType_HLGinvish: this->uncheckedAppend(Op::HLGinvish, ctx); break;
538 }
539 }
540
541 // GPUs clamp all color channels to the limits of the format just before the blend step. To match
542 // that auto-clamp, the RP blitter uses this helper immediately before appending blending stages.
appendClampIfNormalized(const SkImageInfo & info)543 void SkRasterPipeline::appendClampIfNormalized(const SkImageInfo& info) {
544 if (SkColorTypeIsNormalized(info.colorType())) {
545 this->uncheckedAppend(Op::clamp_01, nullptr);
546 }
547 }
548
appendStackRewind()549 void SkRasterPipeline::appendStackRewind() {
550 if (!fRewindCtx) {
551 fRewindCtx = fAlloc->make<SkRasterPipeline_RewindCtx>();
552 }
553 this->uncheckedAppend(Op::stack_rewind, fRewindCtx);
554 }
555
prepend_to_pipeline(SkRasterPipelineStage * & ip,SkOpts::StageFn stageFn,void * ctx)556 static void prepend_to_pipeline(SkRasterPipelineStage*& ip, SkOpts::StageFn stageFn, void* ctx) {
557 --ip;
558 ip->fn = stageFn;
559 ip->ctx = ctx;
560 }
561
buildLowpPipeline(SkRasterPipelineStage * ip) const562 bool SkRasterPipeline::buildLowpPipeline(SkRasterPipelineStage* ip) const {
563 if (gForceHighPrecisionRasterPipeline || fRewindCtx) {
564 return false;
565 }
566 // Stages are stored backwards in fStages; to compensate, we assemble the pipeline in reverse
567 // here, back to front.
568 prepend_to_pipeline(ip, SkOpts::just_return_lowp, /*ctx=*/nullptr);
569 for (const StageList* st = fStages; st; st = st->prev) {
570 int opIndex = (int)st->stage;
571 if (opIndex >= kNumRasterPipelineLowpOps || !SkOpts::ops_lowp[opIndex]) {
572 // This program contains a stage that doesn't exist in lowp.
573 return false;
574 }
575 prepend_to_pipeline(ip, SkOpts::ops_lowp[opIndex], st->ctx);
576 }
577 return true;
578 }
579
buildHighpPipeline(SkRasterPipelineStage * ip) const580 void SkRasterPipeline::buildHighpPipeline(SkRasterPipelineStage* ip) const {
581 // We assemble the pipeline in reverse, since the stage list is stored backwards.
582 prepend_to_pipeline(ip, SkOpts::just_return_highp, /*ctx=*/nullptr);
583 for (const StageList* st = fStages; st; st = st->prev) {
584 int opIndex = (int)st->stage;
585 prepend_to_pipeline(ip, SkOpts::ops_highp[opIndex], st->ctx);
586 }
587
588 // stack_checkpoint and stack_rewind are only implemented in highp. We only need these stages
589 // when generating long (or looping) pipelines from SkSL. The other stages used by the SkSL
590 // Raster Pipeline generator will only have highp implementations, because we can't execute SkSL
591 // code without floating point.
592 if (fRewindCtx) {
593 const int rewindIndex = (int)Op::stack_checkpoint;
594 prepend_to_pipeline(ip, SkOpts::ops_highp[rewindIndex], fRewindCtx);
595 }
596 }
597
buildPipeline(SkRasterPipelineStage * ip) const598 SkRasterPipeline::StartPipelineFn SkRasterPipeline::buildPipeline(SkRasterPipelineStage* ip) const {
599 // We try to build a lowp pipeline first; if that fails, we fall back to a highp float pipeline.
600 if (this->buildLowpPipeline(ip)) {
601 return SkOpts::start_pipeline_lowp;
602 }
603
604 this->buildHighpPipeline(ip);
605 return SkOpts::start_pipeline_highp;
606 }
607
stagesNeeded() const608 int SkRasterPipeline::stagesNeeded() const {
609 // Add 1 to budget for a `just_return` stage at the end.
610 int stages = fNumStages + 1;
611
612 // If we have any stack_rewind stages, we will need to inject a stack_checkpoint stage.
613 if (fRewindCtx) {
614 stages += 1;
615 }
616 return stages;
617 }
618
run(size_t x,size_t y,size_t w,size_t h) const619 void SkRasterPipeline::run(size_t x, size_t y, size_t w, size_t h) const {
620 if (this->empty()) {
621 return;
622 }
623
624 int stagesNeeded = this->stagesNeeded();
625
626 // Best to not use fAlloc here... we can't bound how often run() will be called.
627 AutoSTMalloc<32, SkRasterPipelineStage> program(stagesNeeded);
628
629 int numMemoryCtxs = fMemoryCtxInfos.size();
630 AutoSTMalloc<2, SkRasterPipeline_MemoryCtxPatch> patches(numMemoryCtxs);
631 for (int i = 0; i < numMemoryCtxs; ++i) {
632 patches[i].info = fMemoryCtxInfos[i];
633 patches[i].backup = nullptr;
634 memset(patches[i].scratch, 0, sizeof(patches[i].scratch));
635 }
636
637 auto start_pipeline = this->buildPipeline(program.get() + stagesNeeded);
638 start_pipeline(x, y, x + w, y + h, program.get(),
639 SkSpan{patches.data(), numMemoryCtxs},
640 fTailPointer);
641 }
642
compile() const643 std::function<void(size_t, size_t, size_t, size_t)> SkRasterPipeline::compile() const {
644 if (this->empty()) {
645 return [](size_t, size_t, size_t, size_t) {};
646 }
647
648 int stagesNeeded = this->stagesNeeded();
649
650 SkRasterPipelineStage* program = fAlloc->makeArray<SkRasterPipelineStage>(stagesNeeded);
651
652 int numMemoryCtxs = fMemoryCtxInfos.size();
653 SkRasterPipeline_MemoryCtxPatch* patches =
654 fAlloc->makeArray<SkRasterPipeline_MemoryCtxPatch>(numMemoryCtxs);
655 for (int i = 0; i < numMemoryCtxs; ++i) {
656 patches[i].info = fMemoryCtxInfos[i];
657 patches[i].backup = nullptr;
658 memset(patches[i].scratch, 0, sizeof(patches[i].scratch));
659 }
660 uint8_t* tailPointer = fTailPointer;
661
662 auto start_pipeline = this->buildPipeline(program + stagesNeeded);
663 return [=](size_t x, size_t y, size_t w, size_t h) {
664 start_pipeline(x, y, x + w, y + h, program,
665 SkSpan{patches, numMemoryCtxs},
666 tailPointer);
667 };
668 }
669
addMemoryContext(SkRasterPipeline_MemoryCtx * ctx,int bytesPerPixel,bool load,bool store)670 void SkRasterPipeline::addMemoryContext(SkRasterPipeline_MemoryCtx* ctx,
671 int bytesPerPixel,
672 bool load,
673 bool store) {
674 SkRasterPipeline_MemoryCtxInfo* info =
675 std::find_if(fMemoryCtxInfos.begin(), fMemoryCtxInfos.end(),
676 [=](const SkRasterPipeline_MemoryCtxInfo& i) { return i.context == ctx; });
677 if (info != fMemoryCtxInfos.end()) {
678 SkASSERT(bytesPerPixel == info->bytesPerPixel);
679 info->load = info->load || load;
680 info->store = info->store || store;
681 } else {
682 fMemoryCtxInfos.push_back(SkRasterPipeline_MemoryCtxInfo{ctx, bytesPerPixel, load, store});
683 }
684 }
685