Adds a bitfield to GrContextOptions that masks out path renderers.
Adds commandline flags support to set this bitfield in tools apps.
Removes GrGLInterfaceRemoveNVPR since we can now accomplish the same
thing in the context options.
BUG=skia:
Change-Id: Icf2a4df36374b3ba2f69ebf0db56e8aedd6cf65f
Reviewed-on: https://skia-review.googlesource.com/8786
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Chris Dalton <csmartdalton@google.com>
#include "SkCodec.h"
#include "SkCommonFlags.h"
#include "SkCommonFlagsConfig.h"
+#include "SkCommonFlagsPathRenderer.h"
#include "SkData.h"
#include "SkGraphics.h"
#include "SkLeanWindows.h"
DEFINE_string(benchType, "",
"Apply usual --match rules to bench type: micro, recording, piping, playback, skcodec, etc.");
+#if SK_SUPPORT_GPU
+DEFINE_pathrenderer_flag;
+#endif
+
static double now_ms() { return SkTime::GetNSecs() * 1e-6; }
static SkString humanize(double ms) {
#if SK_SUPPORT_GPU
GrContextOptions grContextOpts;
+ grContextOpts.fGpuPathRenderers = CollectGpuPathRenderersFromFlags();
gGrFactory.reset(new GrContextFactory(grContextOpts));
#endif
#include "SkColorSpace.h"
#include "SkCommonFlags.h"
#include "SkCommonFlagsConfig.h"
+#include "SkCommonFlagsPathRenderer.h"
#include "SkData.h"
#include "SkFontMgr.h"
#include "SkGraphics.h"
DEFINE_string(mskps, "", "Directory to read mskps from, or a single mskp file.");
+#if SK_SUPPORT_GPU
+DEFINE_pathrenderer_flag;
+#endif
+
using namespace DM;
using sk_gpu_test::GrContextFactory;
using sk_gpu_test::GLTestContext;
#endif
}
-static Sink* create_sink(const SkCommandLineConfig* config) {
+static Sink* create_sink(const GrContextOptions& grCtxOptions, const SkCommandLineConfig* config) {
#if SK_SUPPORT_GPU
if (gpu_supported()) {
if (const SkCommandLineConfigGpu* gpuConfig = config->asConfigGpu()) {
GrContextFactory::ContextType contextType = gpuConfig->getContextType();
GrContextFactory::ContextOverrides contextOverrides = gpuConfig->getContextOverrides();
- GrContextFactory testFactory;
+ GrContextFactory testFactory(grCtxOptions);
if (!testFactory.get(contextType, contextOverrides)) {
info("WARNING: can not create GPU context for config '%s'. "
"GM tests will be skipped.\n", gpuConfig->getTag().c_str());
return nullptr;
}
-static bool gather_sinks() {
+static bool gather_sinks(const GrContextOptions& grCtxOptions) {
SkCommandLineConfigArray configs;
ParseConfigs(FLAGS_config, &configs);
for (int i = 0; i < configs.count(); i++) {
const SkCommandLineConfig& config = *configs[i];
- Sink* sink = create_sink(&config);
+ Sink* sink = create_sink(grCtxOptions, &config);
if (sink == nullptr) {
info("Skipping config %s: Don't understand '%s'.\n", config.getTag().c_str(),
config.getTag().c_str());
}
}
-static void run_test(skiatest::Test test) {
+static void run_test(skiatest::Test test, const GrContextOptions& grCtxOptions) {
struct : public skiatest::Reporter {
void reportFailed(const skiatest::Failure& failure) override {
fail(failure.toString());
if (!FLAGS_dryRun && !is_blacklisted("_", "tests", "_", test.name)) {
start("unit", "test", "", test.name);
- GrContextFactory factory;
+ GrContextFactory factory(grCtxOptions);
test.proc(&reporter, &factory);
}
done("unit", "test", "", test.name);
gVLog = fopen(SkOSPath::Join(FLAGS_writePath[0], "verbose.log").c_str(), "w");
}
+ GrContextOptions grCtxOptions;
+#if SK_SUPPORT_GPU
+ grCtxOptions.fGpuPathRenderers = CollectGpuPathRenderersFromFlags();
+#endif
+
JsonWriter::DumpJson(); // It's handy for the bots to assume this is ~never missing.
SkAutoGraphics ag;
SkTaskGroup::Enabler enabled(FLAGS_threads);
if (!gather_srcs()) {
return 1;
}
- if (!gather_sinks()) {
+ if (!gather_sinks(grCtxOptions)) {
return 1;
}
gather_tests();
}
}
for (auto test : gParallelTests) {
- parallel.add([test] { run_test(test); });
+ parallel.add([test, grCtxOptions] { run_test(test, grCtxOptions); });
}
// With the parallel work running, run serial tasks and tests here on main thread.
for (auto task : serial) { Task::Run(task); }
- for (auto test : gSerialTests) { run_test(test); }
+ for (auto test : gSerialTests) { run_test(test, grCtxOptions); }
// Wait for any remaining parallel work to complete (including any spun off of serial tasks).
parallel.wait();
// setup GrContext
sk_sp<const GrGLInterface> interface(GrGLCreateNativeInterface());
- // To use NVPR, comment this out
- interface.reset(GrGLInterfaceRemoveNVPR(interface));
- SkASSERT(interface);
-
// setup contexts
sk_sp<GrContext> grContext(GrContext::Create(kOpenGL_GrBackend,
(GrBackendContext)interface.get()));
}
void modifyGrContextOptions(GrContextOptions* options) override {
- options->fForceSWPathMasks = true;
+ options->fGpuPathRenderers = GrContextOptions::GpuPathRenderers::kNone;
options->fAllowPathMaskCaching = true;
}
#define GrContextOptions_DEFINED
#include "SkTypes.h"
+#include "GrTypes.h"
struct GrContextOptions {
GrContextOptions() {}
Instanced rendering is still experimental at this point and disabled by default. */
bool fEnableInstancedRendering = false;
- /** Disables distance field rendering for paths. Distance field computation can be expensive
- and yields no benefit if a path is not rendered multiple times with different transforms */
- bool fDisableDistanceFieldPaths = false;
-
/**
* If true this allows path mask textures to be cached. This is only really useful if paths
* are commonly rendered at the same scale and fractional translation.
bool fAllowPathMaskCaching = false;
/**
- * Force all path draws to go through through the sw-rasterize-to-texture code path (assuming
- * the path is not recognized as a simpler shape (e.g. a rrect). This is intended for testing
- * purposes.
- */
- bool fForceSWPathMasks = false;
-
- /**
* If true, sRGB support will not be enabled unless sRGB decoding can be disabled (via an
* extension). If mixed use of "legacy" mode and sRGB/color-correct mode is not required, this
* can be set to false, which will significantly expand the number of devices that qualify for
* textures from codec-backed images.
*/
bool fDisableGpuYUVConversion = false;
+
+ /**
+ * If true, the caps will never report driver support for path rendering.
+ */
+ bool fSuppressPathRendering = false;
+
+ /**
+ * Allows the client to include or exclude specific GPU path renderers.
+ */
+ enum class GpuPathRenderers {
+ kNone = 0, // Always use sofware masks.
+ kDashLine = 1 << 0,
+ kStencilAndCover = 1 << 1,
+ kMSAA = 1 << 2,
+ kAAHairline = 1 << 3,
+ kAAConvex = 1 << 4,
+ kAALinearizing = 1 << 5,
+ kPLS = 1 << 6,
+ kDistanceField = 1 << 7,
+ kTesselating = 1 << 8,
+ kDefault = 1 << 9,
+
+ kAll = kDefault | (kDefault - 1)
+ };
+
+ GpuPathRenderers fGpuPathRenderers = GpuPathRenderers::kAll;
};
+GR_MAKE_BITFIELD_CLASS_OPS(GrContextOptions::GpuPathRenderers)
+
#endif
*/
const SK_API GrGLInterface* GrGLCreateNullInterface(bool enableNVPR = false);
-/** Function that returns a new interface identical to "interface" but without support for
- GL_NV_path_rendering. */
-const GrGLInterface* GrGLInterfaceRemoveNVPR(const GrGLInterface*);
-
/** Function that returns a new interface identical to "interface" but with support for
test version of GL_EXT_debug_marker. */
const GrGLInterface* GrGLInterfaceAddTestDebugMarker(const GrGLInterface*,
#include "SkCanvas.h"
#include "SkColorSpace_XYZ.h"
#include "SkCommandLineFlags.h"
+#include "SkCommonFlagsPathRenderer.h"
#include "SkData.h"
#include "SkDocument.h"
#include "SkGraphics.h"
#endif
}
- void setUpBackend(SampleWindow* win, int msaaSampleCount, bool deepColor) override {
+ void setUpBackend(SampleWindow* win, const BackendOptions& backendOptions) override {
SkASSERT(kNone_BackEndType == fBackend);
fBackend = kNone_BackEndType;
break;
}
AttachmentInfo attachmentInfo;
- bool result = win->attach(fBackend, msaaSampleCount, deepColor, &attachmentInfo);
+ bool result = win->attach(fBackend, backendOptions.fMSAASampleCount,
+ backendOptions.fDeepColor, &attachmentInfo);
if (!result) {
SkDebugf("Failed to initialize GL");
return;
}
- fMSAASampleCount = msaaSampleCount;
- fDeepColor = deepColor;
+ fMSAASampleCount = backendOptions.fMSAASampleCount;
+ fDeepColor = backendOptions.fDeepColor;
// Assume that we have at least 24-bit output, for backends that don't supply this data
fActualColorBits = SkTMax(attachmentInfo.fColorBits, 24);
SkASSERT(nullptr == fCurIntf);
- sk_sp<const GrGLInterface> glInterface;
switch (win->getDeviceType()) {
case kRaster_DeviceType: // fallthrough
case kGPU_DeviceType:
// all these guys use the native interface
- glInterface.reset(GrGLCreateNativeInterface());
+ fCurIntf = GrGLCreateNativeInterface();
break;
#if SK_ANGLE
case kANGLE_DeviceType:
- glInterface.reset(sk_gpu_test::CreateANGLEGLInterface());
+ fCurIntf = sk_gpu_test::CreateANGLEGLInterface();
break;
#endif // SK_ANGLE
default:
break;
}
- // Currently SampleApp does not use NVPR. TODO: Provide an NVPR device type that is skipped
- // when the driver doesn't support NVPR.
- fCurIntf = GrGLInterfaceRemoveNVPR(glInterface.get());
-
SkASSERT(nullptr == fCurContext);
- fCurContext = GrContext::Create(kOpenGL_GrBackend, (GrBackendContext) fCurIntf);
+ fCurContext = GrContext::Create(kOpenGL_GrBackend, (GrBackendContext) fCurIntf,
+ backendOptions.fGrContextOptions);
if (nullptr == fCurContext || nullptr == fCurIntf) {
// We need some context and interface to see results
}
DEFINE_string(slide, "", "Start on this sample.");
-DEFINE_int32(msaa, 0, "Request multisampling with this count.");
-DEFINE_bool(deepColor, false, "Request deep color (10-bit/channel or more) display buffer.");
DEFINE_string(pictureDir, "", "Read pictures from here.");
DEFINE_string(picture, "", "Path to single picture.");
DEFINE_string(svg, "", "Path to single SVG file.");
#ifdef SAMPLE_PDF_FILE_VIEWER
DEFINE_string(pdfPath, "", "Path to direcotry of pdf files.");
#endif
+#if SK_SUPPORT_GPU
+DEFINE_pathrenderer_flag;
+DEFINE_int32(msaa, 0, "Request multisampling with this count.");
+DEFINE_bool(deepColor, false, "Request deep color (10-bit/channel or more) display buffer.");
+#endif
#include "SkTaskGroup.h"
}
}
- fMSAASampleCount = FLAGS_msaa;
- fDeepColor = FLAGS_deepColor;
+#if SK_SUPPORT_GPU
+ fBackendOptions.fGrContextOptions.fGpuPathRenderers = CollectGpuPathRenderersFromFlags();
+ fBackendOptions.fMSAASampleCount = FLAGS_msaa;
+ fBackendOptions.fDeepColor = FLAGS_deepColor;
+#endif
fColorConfigIndex = 0;
if (FLAGS_list) {
devManager->ref();
fDevManager = devManager;
}
- fDevManager->setUpBackend(this, fMSAASampleCount, fDeepColor);
+ fDevManager->setUpBackend(this, fBackendOptions);
// If another constructor set our dimensions, ensure that our
// onSizeChange gets called.
fDevManager->tearDownBackend(this);
fDeviceType = type;
- fDevManager->setUpBackend(this, fMSAASampleCount, fDeepColor);
+ fDevManager->setUpBackend(this, fBackendOptions);
this->updateTitle();
this->inval(nullptr);
this->setColorType(ct, std::move(cs));
fDevManager->tearDownBackend(this);
- fDevManager->setUpBackend(this, fMSAASampleCount, fDeepColor);
+ fDevManager->setUpBackend(this, fBackendOptions);
this->updateTitle();
this->inval(nullptr);
void SampleWindow::toggleDistanceFieldFonts() {
// reset backend
fDevManager->tearDownBackend(this);
- fDevManager->setUpBackend(this, fMSAASampleCount, fDeepColor);
+ fDevManager->setUpBackend(this, fBackendOptions);
SkSurfaceProps props = this->getSurfaceProps();
uint32_t flags = props.flags() ^ SkSurfaceProps::kUseDeviceIndependentFonts_Flag;
void SampleWindow::setPixelGeometry(int pixelGeometryIndex) {
// reset backend
fDevManager->tearDownBackend(this);
- fDevManager->setUpBackend(this, fMSAASampleCount, fDeepColor);
+ fDevManager->setUpBackend(this, fBackendOptions);
const SkSurfaceProps& oldProps = this->getSurfaceProps();
SkSurfaceProps newProps(oldProps.flags(), SkSurfaceProps::kLegacyFontHost_InitType);
#include "SkPipe.h"
+#if SK_SUPPORT_GPU
+#include "GrContextOptions.h"
+#endif
+
class GrContext;
class GrRenderTarget;
*/
class DeviceManager : public SkRefCnt {
public:
+ struct BackendOptions {
+#if SK_SUPPORT_GPU
+ GrContextOptions fGrContextOptions;
+ int fMSAASampleCount;
+ bool fDeepColor;
+#endif
+ };
-
- virtual void setUpBackend(SampleWindow* win, int msaaSampleCount, bool deepColor) = 0;
+ virtual void setUpBackend(SampleWindow* win, const BackendOptions&) = 0;
virtual void tearDownBackend(SampleWindow* win) = 0;
int fFilterQualityIndex;
unsigned fFlipAxis;
- int fMSAASampleCount;
- bool fDeepColor;
+ DeviceManager::BackendOptions fBackendOptions;
+
int fColorConfigIndex;
SkScalar fZoomCenterX, fZoomCenterY;
rtOpListOptions.fMaxOpCombineLookback = options.fMaxOpCombineLookback;
rtOpListOptions.fMaxOpCombineLookahead = options.fMaxOpCombineLookahead;
GrPathRendererChain::Options prcOptions;
- prcOptions.fDisableDistanceFieldRenderer = options.fDisableDistanceFieldPaths;
prcOptions.fAllowPathMaskCaching = options.fAllowPathMaskCaching;
- prcOptions.fDisableAllPathRenderers = options.fForceSWPathMasks;
+ prcOptions.fGpuPathRenderers = options.fGpuPathRenderers;
fDrawingManager.reset(new GrDrawingManager(this, rtOpListOptions, prcOptions,
options.fImmediateMode, &fSingleOwner));
#include "ops/GrTessellatingPathRenderer.h"
GrPathRendererChain::GrPathRendererChain(GrContext* context, const Options& options) {
- if (!options.fDisableAllPathRenderers) {
- const GrCaps& caps = *context->caps();
- this->addPathRenderer(new GrDashLinePathRenderer)->unref();
-
- if (GrPathRenderer* pr = GrStencilAndCoverPathRenderer::Create(context->resourceProvider(),
- caps)) {
- this->addPathRenderer(pr)->unref();
+ using GpuPathRenderers = GrContextOptions::GpuPathRenderers;
+ const GrCaps& caps = *context->caps();
+ if (options.fGpuPathRenderers & GpuPathRenderers::kDashLine) {
+ fChain.push_back(sk_make_sp<GrDashLinePathRenderer>());
+ }
+ if (options.fGpuPathRenderers & GpuPathRenderers::kStencilAndCover) {
+ sk_sp<GrPathRenderer> pr(
+ GrStencilAndCoverPathRenderer::Create(context->resourceProvider(), caps));
+ if (pr) {
+ fChain.push_back(std::move(pr));
}
- #ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ }
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ if (options.fGpuPathRenderers & GpuPathRenderers::kMSAA) {
if (caps.sampleShadingSupport()) {
- this->addPathRenderer(new GrMSAAPathRenderer)->unref();
+ fChain.push_back(sk_make_sp<GrMSAAPathRenderer>());
}
- #endif
- this->addPathRenderer(new GrAAHairLinePathRenderer)->unref();
- this->addPathRenderer(new GrAAConvexPathRenderer)->unref();
- this->addPathRenderer(new GrAALinearizingConvexPathRenderer)->unref();
+ }
+#endif
+ if (options.fGpuPathRenderers & GpuPathRenderers::kAAHairline) {
+ fChain.push_back(sk_make_sp<GrAAHairLinePathRenderer>());
+ }
+ if (options.fGpuPathRenderers & GpuPathRenderers::kAAConvex) {
+ fChain.push_back(sk_make_sp<GrAAConvexPathRenderer>());
+ }
+ if (options.fGpuPathRenderers & GpuPathRenderers::kAALinearizing) {
+ fChain.push_back(sk_make_sp<GrAALinearizingConvexPathRenderer>());
+ }
+ if (options.fGpuPathRenderers & GpuPathRenderers::kPLS) {
if (caps.shaderCaps()->plsPathRenderingSupport()) {
- this->addPathRenderer(new GrPLSPathRenderer)->unref();
- }
- if (!options.fDisableDistanceFieldRenderer) {
- this->addPathRenderer(new GrAADistanceFieldPathRenderer)->unref();
+ fChain.push_back(sk_make_sp<GrPLSPathRenderer>());
}
- this->addPathRenderer(new GrTessellatingPathRenderer)->unref();
- this->addPathRenderer(new GrDefaultPathRenderer(caps.twoSidedStencilSupport(),
- caps.stencilWrapOpsSupport()))->unref();
}
-}
-
-GrPathRendererChain::~GrPathRendererChain() {
- for (int i = 0; i < fChain.count(); ++i) {
- fChain[i]->unref();
+ if (options.fGpuPathRenderers & GpuPathRenderers::kDistanceField) {
+ fChain.push_back(sk_make_sp<GrAADistanceFieldPathRenderer>());
+ }
+ if (options.fGpuPathRenderers & GpuPathRenderers::kTesselating) {
+ fChain.push_back(sk_make_sp<GrTessellatingPathRenderer>());
+ }
+ if (options.fGpuPathRenderers & GpuPathRenderers::kDefault) {
+ fChain.push_back(sk_make_sp<GrDefaultPathRenderer>(caps.twoSidedStencilSupport(),
+ caps.stencilWrapOpsSupport()));
}
-}
-
-GrPathRenderer* GrPathRendererChain::addPathRenderer(GrPathRenderer* pr) {
- fChain.push_back() = pr;
- pr->ref();
- return pr;
}
GrPathRenderer* GrPathRendererChain::getPathRenderer(
*stencilSupport = support;
}
}
- return fChain[i];
+ return fChain[i].get();
}
}
return nullptr;
#include "GrPathRenderer.h"
+#include "GrContextOptions.h"
#include "SkTypes.h"
#include "SkTArray.h"
class GrPathRendererChain : public SkNoncopyable {
public:
struct Options {
- bool fDisableDistanceFieldRenderer = false;
+ using GpuPathRenderers = GrContextOptions::GpuPathRenderers;
bool fAllowPathMaskCaching = false;
- bool fDisableAllPathRenderers = false;
+ GpuPathRenderers fGpuPathRenderers = GpuPathRenderers::kAll;
};
GrPathRendererChain(GrContext* context, const Options&);
- ~GrPathRendererChain();
-
/** Documents how the caller plans to use a GrPathRenderer to draw a path. It affects the PR
returned by getPathRenderer */
enum class DrawType {
GrPathRenderer::StencilSupport* stencilSupport);
private:
- // takes a ref and unrefs in destructor
- GrPathRenderer* addPathRenderer(GrPathRenderer* pr);
-
enum {
kPreAllocCount = 8,
};
- SkSTArray<kPreAllocCount, GrPathRenderer*, true> fChain;
+ SkSTArray<kPreAllocCount, sk_sp<GrPathRenderer>> fChain;
};
#endif
this->initGLSL(ctxInfo);
GrShaderCaps* shaderCaps = fShaderCaps.get();
- shaderCaps->fPathRenderingSupport = this->hasPathRenderingSupport(ctxInfo, gli);
+ if (!contextOptions.fSuppressPathRendering) {
+ shaderCaps->fPathRenderingSupport = this->hasPathRenderingSupport(ctxInfo, gli);
+ }
// For now these two are equivalent but we could have dst read in shader via some other method.
// Before setting this, initGLSL() must have been called.
return newInterface;
}
-const GrGLInterface* GrGLInterfaceRemoveNVPR(const GrGLInterface* interface) {
- GrGLInterface* newInterface = GrGLInterface::NewClone(interface);
-
- newInterface->fExtensions.remove("GL_NV_path_rendering");
- newInterface->fExtensions.remove("GL_CHROMIUM_path_rendering");
- newInterface->fFunctions.fMatrixLoadf = nullptr;
- newInterface->fFunctions.fMatrixLoadIdentity = nullptr;
- newInterface->fFunctions.fPathCommands = nullptr;
- newInterface->fFunctions.fPathParameteri = nullptr;
- newInterface->fFunctions.fPathParameterf = nullptr;
- newInterface->fFunctions.fGenPaths = nullptr;
- newInterface->fFunctions.fDeletePaths = nullptr;
- newInterface->fFunctions.fIsPath = nullptr;
- newInterface->fFunctions.fPathStencilFunc = nullptr;
- newInterface->fFunctions.fStencilFillPath = nullptr;
- newInterface->fFunctions.fStencilStrokePath = nullptr;
- newInterface->fFunctions.fStencilFillPathInstanced = nullptr;
- newInterface->fFunctions.fStencilStrokePathInstanced = nullptr;
- newInterface->fFunctions.fCoverFillPath = nullptr;
- newInterface->fFunctions.fCoverStrokePath = nullptr;
- newInterface->fFunctions.fCoverFillPathInstanced = nullptr;
- newInterface->fFunctions.fCoverStrokePathInstanced = nullptr;
- newInterface->fFunctions.fStencilThenCoverFillPath = nullptr;
- newInterface->fFunctions.fStencilThenCoverStrokePath = nullptr;
- newInterface->fFunctions.fStencilThenCoverFillPathInstanced = nullptr;
- newInterface->fFunctions.fStencilThenCoverStrokePathInstanced = nullptr;
- newInterface->fFunctions.fProgramPathFragmentInputGen = nullptr;
- newInterface->fFunctions.fBindFragmentInputLocation = nullptr;
- return newInterface;
-}
-
GrGLInterface::GrGLInterface() {
fStandard = kNone_GrGLStandard;
}
--- /dev/null
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SK_COMMON_FLAGS_PATH_RENDERER_H
+#define SK_COMMON_FLAGS_PATH_RENDERER_H
+
+#if SK_SUPPORT_GPU
+
+#include "GrContextFactory.h"
+#include "SkCommandLineFlags.h"
+#include "SkTypes.h"
+
+DECLARE_string(pr);
+
+#define DEFINE_pathrenderer_flag \
+ DEFINE_string(pr, "all", \
+ "Set of enabled gpu path renderers. Defined as a list of: " \
+ "[[~]all [~]dashline [~]nvpr [~]msaa [~]aahairline [~]aaconvex " \
+ "[~]aalinearizing [~]pls [~]sdf [~]tess [~]grdefault]")
+
+inline GrContextOptions::GpuPathRenderers get_named_pathrenderers_flags(const char* name) {
+ using GpuPathRenderers = GrContextOptions::GpuPathRenderers;
+ if (!strcmp(name, "all")) {
+ return GpuPathRenderers::kAll;
+ } else if (!strcmp(name, "dashline")) {
+ return GpuPathRenderers::kDashLine;
+ } else if (!strcmp(name, "nvpr")) {
+ return GpuPathRenderers::kStencilAndCover;
+ } else if (!strcmp(name, "msaa")) {
+ return GpuPathRenderers::kMSAA;
+ } else if (!strcmp(name, "aahairline")) {
+ return GpuPathRenderers::kAAHairline;
+ } else if (!strcmp(name, "aaconvex")) {
+ return GpuPathRenderers::kAAConvex;
+ } else if (!strcmp(name, "aalinearizing")) {
+ return GpuPathRenderers::kAALinearizing;
+ } else if (!strcmp(name, "pls")) {
+ return GpuPathRenderers::kPLS;
+ } else if (!strcmp(name, "sdf")) {
+ return GpuPathRenderers::kDistanceField;
+ } else if (!strcmp(name, "tess")) {
+ return GpuPathRenderers::kTesselating;
+ } else if (!strcmp(name, "grdefault")) {
+ return GpuPathRenderers::kDefault;
+ }
+ SK_ABORT(SkStringPrintf("error: unknown named path renderer \"%s\"\n", name).c_str());
+ return GpuPathRenderers::kNone;
+}
+
+inline GrContextOptions::GpuPathRenderers CollectGpuPathRenderersFromFlags() {
+ using GpuPathRenderers = GrContextOptions::GpuPathRenderers;
+ if (FLAGS_pr.isEmpty()) {
+ return GpuPathRenderers::kAll;
+ }
+ GpuPathRenderers gpuPathRenderers = '~' == FLAGS_pr[0][0] ?
+ GpuPathRenderers::kAll : GpuPathRenderers::kNone;
+ for (int i = 0; i < FLAGS_pr.count(); ++i) {
+ const char* name = FLAGS_pr[i];
+ if (name[0] == '~') {
+ gpuPathRenderers &= ~get_named_pathrenderers_flags(&name[1]);
+ } else {
+ gpuPathRenderers |= get_named_pathrenderers_flags(name);
+ }
+ }
+ return gpuPathRenderers;
+}
+
+#endif // SK_SUPPORT_GPU
+
+#endif
}
testCtx.reset(glCtx);
glInterface.reset(SkRef(glCtx->gl()));
- if (ContextOverrides::kDisableNVPR & overrides) {
- glInterface.reset(GrGLInterfaceRemoveNVPR(glInterface.get()));
- if (!glInterface) {
- return ContextInfo();
- }
- }
backendContext = reinterpret_cast<GrBackendContext>(glInterface.get());
break;
}
testCtx->makeCurrent();
SkASSERT(testCtx && testCtx->backend() == backend);
GrContextOptions grOptions = fGlobalOptions;
+ if (ContextOverrides::kDisableNVPR & overrides) {
+ grOptions.fSuppressPathRendering = true;
+ }
if (ContextOverrides::kUseInstanced & overrides) {
grOptions.fEnableInstancedRendering = true;
}
#include "GpuTimer.h"
#include "GrContextFactory.h"
#include "SkCanvas.h"
+#include "SkCommonFlagsPathRenderer.h"
#include "SkOSFile.h"
#include "SkOSPath.h"
#include "SkPerlinNoiseShader.h"
DEFINE_string(png, "", "if set, save a .png proof to disk at this file location");
DEFINE_int32(verbosity, 4, "level of verbosity (0=none to 5=debug)");
DEFINE_bool(suppressHeader, false, "don't print a header row before the results");
+DEFINE_pathrenderer_flag;
static const char* header =
" accum median max min stddev samples sample_ms clock metric config bench";
}
// Create a context.
- sk_gpu_test::GrContextFactory factory;
+ GrContextOptions ctxOptions;
+ ctxOptions.fGpuPathRenderers = CollectGpuPathRenderersFromFlags();
+ sk_gpu_test::GrContextFactory factory(ctxOptions);
sk_gpu_test::ContextInfo ctxInfo =
factory.getContextInfo(config->getContextType(), config->getContextOverrides());
GrContext* ctx = ctxInfo.grContext();
#include "SkATrace.h"
#include "SkCanvas.h"
#include "SkCommandLineFlags.h"
+#include "SkCommonFlagsPathRenderer.h"
#include "SkDashPathEffect.h"
#include "SkGraphics.h"
#include "SkImagePriv.h"
static DEFINE_bool(atrace, false, "Enable support for using ATrace. ATrace is only supported on Android.");
+DEFINE_pathrenderer_flag;
+
const char *kBackendTypeStrings[sk_app::Window::kBackendTypeCount] = {
" [OpenGL]",
#ifdef SK_VULKAN
*/
#include "GrContext.h"
+#include "SkCommonFlagsPathRenderer.h"
#include "SkSurface.h"
#include "GLWindowContext.h"
void GLWindowContext::initializeContext() {
this->onInitializeContext();
- sk_sp<const GrGLInterface> glInterface;
- glInterface.reset(GrGLCreateNativeInterface());
- fBackendContext.reset(GrGLInterfaceRemoveNVPR(glInterface.get()));
-
SkASSERT(nullptr == fContext);
- fContext = GrContext::Create(kOpenGL_GrBackend, (GrBackendContext)fBackendContext.get());
+
+ GrContextOptions ctxOptions;
+ ctxOptions.fGpuPathRenderers = CollectGpuPathRenderersFromFlags();
+ fBackendContext.reset(GrGLCreateNativeInterface());
+ fContext = GrContext::Create(kOpenGL_GrBackend, (GrBackendContext)fBackendContext.get(),
+ ctxOptions);
// We may not have real sRGB support (ANGLE, in particular), so check for
// that, and fall back to L32:
#include "GrContext.h"
#include "GrRenderTarget.h"
+#include "SkCommonFlagsPathRenderer.h"
#include "SkAutoMalloc.h"
#include "SkSurface.h"
#include "VulkanWindowContext.h"
GET_DEV_PROC(AcquireNextImageKHR);
GET_DEV_PROC(QueuePresentKHR);
- fContext = GrContext::Create(kVulkan_GrBackend, (GrBackendContext) fBackendContext.get());
+ GrContextOptions ctxOptions;
+ ctxOptions.fGpuPathRenderers = CollectGpuPathRenderersFromFlags();
+ fContext = GrContext::Create(kVulkan_GrBackend, (GrBackendContext) fBackendContext.get(),
+ ctxOptions);
fSurface = createVkSurface(instance);
if (VK_NULL_HANDLE == fSurface) {