xref: /aosp_15_r20/external/skia/tools/flags/CommonFlagsGanesh.cpp (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "include/core/SkExecutor.h"
9 #include "include/gpu/ganesh/GrContextOptions.h"
10 #include "tools/flags/CommonFlagsGanesh.h"
11 
12 DEFINE_int(gpuThreads,
13              2,
14              "Create this many extra threads to assist with GPU work, "
15              "including software path rendering. Defaults to two.");
16 
17 namespace CommonFlags {
18 
19 static DEFINE_bool(cachePathMasks, true,
20                    "Allows path mask textures to be cached in GPU configs.");
21 static DEFINE_bool(failFlushTimeCallbacks, false,
22                    "Causes all flush-time callbacks to fail.");
23 static DEFINE_bool(allPathsVolatile, false,
24                    "Causes all GPU paths to be processed as if 'setIsVolatile' had been called.");
25 
26 static DEFINE_string(pr, "",
27               "Set of enabled gpu path renderers. Defined as a list of: "
28               "[~]none [~]dashline [~]aahairline [~]aaconvex [~]aalinearizing [~]small [~]tri "
29               "[~]atlas [~]tess [~]all");
30 
31 static DEFINE_int(internalSamples, -1,
32         "Number of samples for internal draws that use MSAA, or default value if negative.");
33 
34 static DEFINE_int(maxAtlasSize, -1,
35         "Maximum width and height of internal texture atlases, or default value if negative.");
36 
37 static DEFINE_bool(disableDriverCorrectnessWorkarounds, false,
38                    "Disables all GPU driver correctness workarounds");
39 
40 static DEFINE_bool(dontReduceOpsTaskSplitting, false,
41                    "Don't reorder tasks to reduce render passes");
42 
43 static DEFINE_int(gpuResourceCacheLimit, -1,
44                   "Maximum number of bytes to use for budgeted GPU resources. "
45                   "Default is -1, which means GrResourceCache::kDefaultMaxSize.");
46 
47 static DEFINE_bool(allowMSAAOnNewIntel, false,
48                    "Allows MSAA to be enabled on newer intel GPUs.");
49 
get_named_pathrenderers_flags(const char * name)50 static GpuPathRenderers get_named_pathrenderers_flags(const char* name) {
51     if (!strcmp(name, "none")) {
52         return GpuPathRenderers::kNone;
53     } else if (!strcmp(name, "dashline")) {
54         return GpuPathRenderers::kDashLine;
55     } else if (!strcmp(name, "aahairline")) {
56         return GpuPathRenderers::kAAHairline;
57     } else if (!strcmp(name, "aaconvex")) {
58         return GpuPathRenderers::kAAConvex;
59     } else if (!strcmp(name, "aalinearizing")) {
60         return GpuPathRenderers::kAALinearizing;
61     } else if (!strcmp(name, "small")) {
62         return GpuPathRenderers::kSmall;
63     } else if (!strcmp(name, "tri")) {
64         return GpuPathRenderers::kTriangulating;
65     } else if (!strcmp(name, "atlas")) {
66         return GpuPathRenderers::kAtlas;
67     } else if (!strcmp(name, "tess")) {
68         return GpuPathRenderers::kTessellation;
69     } else if (!strcmp(name, "default")) {
70         return GpuPathRenderers::kDefault;
71     }
72     SK_ABORT("error: unknown named path renderer \"%s\"\n", name);
73 }
74 
collect_gpu_path_renderers_from_flags()75 static GpuPathRenderers collect_gpu_path_renderers_from_flags() {
76     if (FLAGS_pr.isEmpty()) {
77         return GpuPathRenderers::kDefault;
78     }
79 
80     GpuPathRenderers gpuPathRenderers = ('~' == FLAGS_pr[0][0])
81             ? GpuPathRenderers::kDefault
82             : GpuPathRenderers::kNone;
83 
84     for (int i = 0; i < FLAGS_pr.size(); ++i) {
85         const char* name = FLAGS_pr[i];
86         if (name[0] == '~') {
87             gpuPathRenderers &= ~get_named_pathrenderers_flags(&name[1]);
88         } else {
89             gpuPathRenderers |= get_named_pathrenderers_flags(name);
90         }
91     }
92     return gpuPathRenderers;
93 }
94 
SetCtxOptions(GrContextOptions * ctxOptions)95 void SetCtxOptions(GrContextOptions* ctxOptions) {
96     static std::unique_ptr<SkExecutor> gGpuExecutor = (0 != FLAGS_gpuThreads)
97         ? SkExecutor::MakeFIFOThreadPool(FLAGS_gpuThreads)
98         : nullptr;
99 
100     ctxOptions->fExecutor                            = gGpuExecutor.get();
101     ctxOptions->fAllowPathMaskCaching                = FLAGS_cachePathMasks;
102     ctxOptions->fFailFlushTimeCallbacks              = FLAGS_failFlushTimeCallbacks;
103     ctxOptions->fAllPathsVolatile                    = FLAGS_allPathsVolatile;
104     ctxOptions->fGpuPathRenderers                    = collect_gpu_path_renderers_from_flags();
105     ctxOptions->fDisableDriverCorrectnessWorkarounds = FLAGS_disableDriverCorrectnessWorkarounds;
106     ctxOptions->fResourceCacheLimitOverride          = FLAGS_gpuResourceCacheLimit;
107 
108     if (FLAGS_internalSamples >= 0) {
109         ctxOptions->fInternalMultisampleCount = FLAGS_internalSamples;
110     }
111     if (FLAGS_maxAtlasSize >= 0) {
112         ctxOptions->fMaxTextureAtlasSize = FLAGS_maxAtlasSize;
113     }
114 
115     if (FLAGS_dontReduceOpsTaskSplitting) {
116         ctxOptions->fReduceOpsTaskSplitting = GrContextOptions::Enable::kNo;
117     } else {
118         ctxOptions->fReduceOpsTaskSplitting = GrContextOptions::Enable::kYes;
119     }
120     ctxOptions->fAllowMSAAOnNewIntel = FLAGS_allowMSAAOnNewIntel;
121 }
122 
123 }  // namespace CommonFlags
124