xref: /aosp_15_r20/external/skia/src/core/SkRasterPipeline.h (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2016 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SkRasterPipeline_DEFINED
9 #define SkRasterPipeline_DEFINED
10 
11 #include "include/core/SkColor.h"
12 #include "include/core/SkTypes.h"
13 #include "include/private/base/SkMacros.h"
14 #include "include/private/base/SkSpan_impl.h"
15 #include "include/private/base/SkTArray.h"
16 #include "src/base/SkArenaAlloc.h"
17 #include "src/core/SkRasterPipelineOpContexts.h"
18 
19 #include <cstddef>
20 #include <cstdint>
21 #include <functional>
22 
23 class SkMatrix;
24 enum class SkRasterPipelineOp;
25 enum SkColorType : int;
26 struct SkImageInfo;
27 struct skcms_TransferFunction;
28 
29 #if __has_cpp_attribute(clang::musttail) && !defined(__EMSCRIPTEN__) && !defined(SK_CPU_ARM32) && \
30         !defined(SK_CPU_LOONGARCH) && !defined(SK_CPU_PPC) && \
31         !(defined(_WIN32) && defined(SK_BUILD_FOR_ANDROID_FRAMEWORK))
32     // [[clang::musttail]] is disabled for the Android version of Skia running on Windows as it
33     // causes crashes (This is probably related to http://crbug.com/1505442).
34     #define SK_HAS_MUSTTAIL 1
35 #else
36     #define SK_HAS_MUSTTAIL 0
37 #endif
38 
39 /**
40  * SkRasterPipeline provides a cheap way to chain together a pixel processing pipeline.
41  *
42  * It's particularly designed for situations where the potential pipeline is extremely
43  * combinatoric: {N dst formats} x {M source formats} x {K mask formats} x {C transfer modes} ...
44  * No one wants to write specialized routines for all those combinations, and if we did, we'd
45  * end up bloating our code size dramatically.  SkRasterPipeline stages can be chained together
46  * at runtime, so we can scale this problem linearly rather than combinatorically.
47  *
48  * Each stage is represented by a function conforming to a common interface and by an
49  * arbitrary context pointer.  The stage function arguments and calling convention are
50  * designed to maximize the amount of data we can pass along the pipeline cheaply, and
51  * vary depending on CPU feature detection.
52  */
53 
54 // Raster pipeline programs are stored as a contiguous array of SkRasterPipelineStages.
55 SK_BEGIN_REQUIRE_DENSE
56 struct SkRasterPipelineStage {
57     // `fn` holds a function pointer from `ops_lowp` or `ops_highp` in SkOpts.cpp. These functions
58     // correspond to operations from the SkRasterPipelineOp enum in SkRasterPipelineOpList.h. The
59     // exact function pointer type varies depending on architecture (specifically, look for `using
60     // Stage =` in SkRasterPipeline_opts.h).
61     void (*fn)();
62 
63     // `ctx` holds data used by the stage function.
64     // Most context structures are declared in SkRasterPipelineOpContexts.h, and have names ending
65     // in Ctx (e.g. "SkRasterPipeline_SamplerCtx"). Some Raster Pipeline stages pack non-pointer
66     // data into this field using `SkRPCtxUtils::Pack`.
67     void* ctx;
68 };
69 SK_END_REQUIRE_DENSE
70 
71 class SkRasterPipeline {
72 public:
73     explicit SkRasterPipeline(SkArenaAlloc*);
74 
75     SkRasterPipeline(const SkRasterPipeline&) = delete;
76     SkRasterPipeline(SkRasterPipeline&&)      = default;
77 
78     SkRasterPipeline& operator=(const SkRasterPipeline&) = delete;
79     SkRasterPipeline& operator=(SkRasterPipeline&&)      = default;
80 
81     void reset();
82 
83     void append(SkRasterPipelineOp, void* = nullptr);
append(SkRasterPipelineOp op,const void * ctx)84     void append(SkRasterPipelineOp op, const void* ctx) { this->append(op,const_cast<void*>(ctx)); }
85     void append(SkRasterPipelineOp, uintptr_t ctx);
86 
87     // Append all stages to this pipeline.
88     void extend(const SkRasterPipeline&);
89 
90     // Runs the pipeline in 2d from (x,y) inclusive to (x+w,y+h) exclusive.
91     void run(size_t x, size_t y, size_t w, size_t h) const;
92 
93     // Allocates a thunk which amortizes run() setup cost in alloc.
94     std::function<void(size_t, size_t, size_t, size_t)> compile() const;
95 
96     // Callers can inspect the stage list for debugging purposes.
97     struct StageList {
98         StageList*          prev;
99         SkRasterPipelineOp  stage;
100         void*               ctx;
101     };
102 
103     static const char* GetOpName(SkRasterPipelineOp op);
getStageList()104     const StageList* getStageList() const { return fStages; }
getNumStages()105     int getNumStages() const { return fNumStages; }
106 
107     // Prints the entire StageList using SkDebugf.
108     void dump() const;
109 
110     // Appends a stage for the specified matrix.
111     // Tries to optimize the stage by analyzing the type of matrix.
112     void appendMatrix(SkArenaAlloc*, const SkMatrix&);
113 
114     // Appends a stage for a constant uniform color.
115     // Tries to optimize the stage based on the color.
116     void appendConstantColor(SkArenaAlloc*, const float rgba[4]);
117 
appendConstantColor(SkArenaAlloc * alloc,const SkColor4f & color)118     void appendConstantColor(SkArenaAlloc* alloc, const SkColor4f& color) {
119         this->appendConstantColor(alloc, color.vec());
120     }
121 
122     // Like appendConstantColor() but only affecting r,g,b, ignoring the alpha channel.
123     void appendSetRGB(SkArenaAlloc*, const float rgb[3]);
124 
appendSetRGB(SkArenaAlloc * alloc,const SkColor4f & color)125     void appendSetRGB(SkArenaAlloc* alloc, const SkColor4f& color) {
126         this->appendSetRGB(alloc, color.vec());
127     }
128 
129     void appendLoad   (SkColorType, const SkRasterPipeline_MemoryCtx*);
130     void appendLoadDst(SkColorType, const SkRasterPipeline_MemoryCtx*);
131     void appendStore  (SkColorType, const SkRasterPipeline_MemoryCtx*);
132 
133     void appendClampIfNormalized(const SkImageInfo&);
134 
135     void appendTransferFunction(const skcms_TransferFunction&);
136 
137     void appendStackRewind();
138 
empty()139     bool empty() const { return fStages == nullptr; }
140 
141 private:
142     bool buildLowpPipeline(SkRasterPipelineStage* ip) const;
143     void buildHighpPipeline(SkRasterPipelineStage* ip) const;
144 
145     using StartPipelineFn = void (*)(size_t, size_t, size_t, size_t,
146                                      SkRasterPipelineStage* program,
147                                      SkSpan<SkRasterPipeline_MemoryCtxPatch>,
148                                      uint8_t*);
149     StartPipelineFn buildPipeline(SkRasterPipelineStage*) const;
150 
151     void uncheckedAppend(SkRasterPipelineOp, void*);
152     int stagesNeeded() const;
153 
154     void addMemoryContext(SkRasterPipeline_MemoryCtx*, int bytesPerPixel, bool load, bool store);
155     uint8_t* tailPointer();
156 
157     SkArenaAlloc*               fAlloc;
158     SkRasterPipeline_RewindCtx* fRewindCtx;
159     StageList*                  fStages;
160     uint8_t*                    fTailPointer;
161     int                         fNumStages;
162 
163     // Only 1 in 2 million CPU-backend pipelines used more than two MemoryCtxs.
164     // (See the comment in SkRasterPipelineOpContexts.h for how MemoryCtx patching works)
165     skia_private::STArray<2, SkRasterPipeline_MemoryCtxInfo> fMemoryCtxInfos;
166 };
167 
168 template <size_t bytes>
169 class SkRasterPipeline_ : public SkRasterPipeline {
170 public:
SkRasterPipeline_()171     SkRasterPipeline_()
172         : SkRasterPipeline(&fBuiltinAlloc) {}
173 
174 private:
175     SkSTArenaAlloc<bytes> fBuiltinAlloc;
176 };
177 
178 
179 #endif//SkRasterPipeline_DEFINED
180