2 * Copyright 2013 Google Inc.
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
8 #include "CrashHandler.h"
9 #include "DMJsonWriter.h"
10 #include "DMSrcSink.h"
11 #include "DMSrcSinkAndroid.h"
12 #include "OverwriteLine.h"
13 #include "ProcStats.h"
14 #include "SkBBHFactory.h"
15 #include "SkChecksum.h"
16 #include "SkCommonFlags.h"
17 #include "SkForceLinking.h"
18 #include "SkGraphics.h"
19 #include "SkInstCnt.h"
23 #include "SkTaskGroup.h"
24 #include "SkThreadUtils.h"
28 DEFINE_string(src, "tests gm skp image subset codec scanline", "Source types to test.");
29 DEFINE_bool(nameByHash, false,
30 "If true, write to FLAGS_writePath[0]/<hash>.png instead of "
31 "to FLAGS_writePath[0]/<config>/<sourceType>/<name>.png");
32 DEFINE_bool2(pathOpsExtended, x, false, "Run extended pathOps tests.");
33 DEFINE_string(matrix, "1 0 0 1",
34 "2x2 scale+skew matrix to apply or upright when using "
35 "'matrix' or 'upright' in config.");
36 DEFINE_bool(gpu_threading, false, "Allow GPU work to run on multiple threads?");
38 DEFINE_string(blacklist, "",
39 "Space-separated config/src/name triples to blacklist. '_' matches anything. E.g. \n"
40 "'--blacklist gpu skp _' will blacklist all SKPs drawn into the gpu config.\n"
41 "'--blacklist gpu skp _ 8888 gm aarects' will also blacklist the aarects GM on 8888.");
43 DEFINE_string2(readPath, r, "", "If set check for equality with golden results in this directory.");
45 __SK_FORCE_IMAGE_DECODER_LINKING;
48 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
50 SK_DECLARE_STATIC_MUTEX(gFailuresMutex);
51 static SkTArray<SkString> gFailures;
53 static void fail(ImplicitString err) {
54 SkAutoMutexAcquire lock(gFailuresMutex);
55 SkDebugf("\n\nFAILURE: %s\n\n", err.c_str());
56 gFailures.push_back(err);
59 static int32_t gPending = 0; // Atomic. Total number of running and queued tasks.
61 SK_DECLARE_STATIC_MUTEX(gRunningMutex);
62 static SkTArray<SkString> gRunning;
64 static void done(double ms,
65 ImplicitString config, ImplicitString src, ImplicitString name,
66 ImplicitString note, ImplicitString log) {
67 SkString id = SkStringPrintf("%s %s %s", config.c_str(), src.c_str(), name.c_str());
69 SkAutoMutexAcquire lock(gRunningMutex);
70 for (int i = 0; i < gRunning.count(); i++) {
71 if (gRunning[i] == id) {
72 gRunning.removeShuffle(i);
83 auto pending = sk_atomic_dec(&gPending)-1;
84 SkDebugf("%s(%4dMB %5d) %s\t%s%s%s", FLAGS_verbose ? "\n" : kSkOverwriteLine
85 , sk_tools::getBestResidentSetSizeMB()
87 , HumanizeMs(ms).c_str()
91 // We write our dm.json file every once in a while in case we crash.
92 // Notice this also handles the final dm.json when pending == 0.
93 if (pending % 500 == 0) {
94 JsonWriter::DumpJson();
98 static void start(ImplicitString config, ImplicitString src, ImplicitString name) {
99 SkString id = SkStringPrintf("%s %s %s", config.c_str(), src.c_str(), name.c_str());
100 SkAutoMutexAcquire lock(gRunningMutex);
101 gRunning.push_back(id);
104 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
106 struct Gold : public SkString {
107 Gold() : SkString("") {}
108 Gold(ImplicitString sink, ImplicitString src, ImplicitString name, ImplicitString md5)
115 static uint32_t Hash(const Gold& g) { return SkGoodHash((const SkString&)g); }
117 static SkTHashSet<Gold, Gold::Hash> gGold;
119 static void add_gold(JsonWriter::BitmapResult r) {
120 gGold.add(Gold(r.config, r.sourceType, r.name, r.md5));
123 static void gather_gold() {
124 if (!FLAGS_readPath.isEmpty()) {
125 SkString path(FLAGS_readPath[0]);
126 path.append("/dm.json");
127 if (!JsonWriter::ReadJson(path.c_str(), add_gold)) {
128 fail(SkStringPrintf("Couldn't read %s for golden results.", path.c_str()));
133 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
135 template <typename T>
136 struct Tagged : public SkAutoTDelete<T> { const char* tag; };
138 static const bool kMemcpyOK = true;
140 static SkTArray<Tagged<Src>, kMemcpyOK> gSrcs;
141 static SkTArray<Tagged<Sink>, kMemcpyOK> gSinks;
143 static void push_src(const char* tag, Src* s) {
144 SkAutoTDelete<Src> src(s);
145 if (FLAGS_src.contains(tag) &&
146 !SkCommandLineFlags::ShouldSkip(FLAGS_match, src->name().c_str())) {
147 Tagged<Src>& s = gSrcs.push_back();
148 s.reset(src.detach());
153 static bool codec_supported(const char* ext) {
154 // FIXME: Once other versions of SkCodec are available, we can add them to this
155 // list (and eventually we can remove this check once they are all supported).
156 return strcmp(ext, "png") == 0 || strcmp(ext, "PNG") == 0 ||
157 strcmp(ext, "bmp") == 0 || strcmp(ext, "BMP") == 0 ||
158 strcmp(ext, "ico") == 0 || strcmp(ext, "ICO") == 0;
161 static void gather_srcs() {
162 for (const skiagm::GMRegistry* r = skiagm::GMRegistry::Head(); r; r = r->next()) {
163 push_src("gm", new GMSrc(r->factory()));
165 for (int i = 0; i < FLAGS_skps.count(); i++) {
166 const char* path = FLAGS_skps[i];
167 if (sk_isdir(path)) {
168 SkOSFile::Iter it(path, "skp");
169 for (SkString file; it.next(&file); ) {
170 push_src("skp", new SKPSrc(SkOSPath::Join(path, file.c_str())));
173 push_src("skp", new SKPSrc(path));
176 static const char* const exts[] = {
177 "bmp", "gif", "jpg", "jpeg", "png", "webp", "ktx", "astc", "wbmp", "ico",
178 "BMP", "GIF", "JPG", "JPEG", "PNG", "WEBP", "KTX", "ASTC", "WBMP", "ICO",
180 for (int i = 0; i < FLAGS_images.count(); i++) {
181 const char* flag = FLAGS_images[i];
182 if (sk_isdir(flag)) {
183 for (size_t j = 0; j < SK_ARRAY_COUNT(exts); j++) {
184 SkOSFile::Iter it(flag, exts[j]);
185 for (SkString file; it.next(&file); ) {
186 SkString path = SkOSPath::Join(flag, file.c_str());
187 push_src("image", new ImageSrc(path)); // Decode entire image.
188 push_src("subset", new ImageSrc(path, 2)); // Decode into 2 x 2 subsets
189 if (codec_supported(exts[j])) {
190 push_src("codec", new CodecSrc(path, CodecSrc::kNormal_Mode));
191 push_src("scanline", new CodecSrc(path, CodecSrc::kScanline_Mode));
195 } else if (sk_exists(flag)) {
196 // assume that FLAGS_images[i] is a valid image if it is a file.
197 push_src("image", new ImageSrc(flag)); // Decode entire image.
198 push_src("subset", new ImageSrc(flag, 2)); // Decode into 2 x 2 subsets
199 push_src("codec", new CodecSrc(flag, CodecSrc::kNormal_Mode));
200 push_src("scanline", new CodecSrc(flag, CodecSrc::kScanline_Mode));
205 static GrGLStandard get_gpu_api() {
206 if (FLAGS_gpuAPI.contains("gl")) { return kGL_GrGLStandard; }
207 if (FLAGS_gpuAPI.contains("gles")) { return kGLES_GrGLStandard; }
208 return kNone_GrGLStandard;
211 static void push_sink(const char* tag, Sink* s) {
212 SkAutoTDelete<Sink> sink(s);
213 if (!FLAGS_config.contains(tag)) {
216 // Try a noop Src as a canary. If it fails, skip this sink.
217 struct : public Src {
218 Error draw(SkCanvas*) const SK_OVERRIDE { return ""; }
219 SkISize size() const SK_OVERRIDE { return SkISize::Make(16, 16); }
220 Name name() const SK_OVERRIDE { return "noop"; }
224 SkDynamicMemoryWStream stream;
226 Error err = sink->draw(noop, &bitmap, &stream, &log);
228 SkDebugf("Skipping %s: %s\n", tag, err.c_str());
232 Tagged<Sink>& ts = gSinks.push_back();
233 ts.reset(sink.detach());
237 static bool gpu_supported() {
245 static Sink* create_sink(const char* tag) {
246 #define SINK(t, sink, ...) if (0 == strcmp(t, tag)) { return new sink(__VA_ARGS__); }
247 if (gpu_supported()) {
248 typedef GrContextFactory Gr;
249 const GrGLStandard api = get_gpu_api();
250 SINK("gpunull", GPUSink, Gr::kNull_GLContextType, api, 0, false, FLAGS_gpu_threading);
251 SINK("gpudebug", GPUSink, Gr::kDebug_GLContextType, api, 0, false, FLAGS_gpu_threading);
252 SINK("gpu", GPUSink, Gr::kNative_GLContextType, api, 0, false, FLAGS_gpu_threading);
253 SINK("gpudft", GPUSink, Gr::kNative_GLContextType, api, 0, true, FLAGS_gpu_threading);
254 SINK("msaa4", GPUSink, Gr::kNative_GLContextType, api, 4, false, FLAGS_gpu_threading);
255 SINK("msaa16", GPUSink, Gr::kNative_GLContextType, api, 16, false, FLAGS_gpu_threading);
256 SINK("nvprmsaa4", GPUSink, Gr::kNVPR_GLContextType, api, 4, false, FLAGS_gpu_threading);
257 SINK("nvprmsaa16", GPUSink, Gr::kNVPR_GLContextType, api, 16, false, FLAGS_gpu_threading);
259 SINK("angle", GPUSink, Gr::kANGLE_GLContextType, api, 0, false, FLAGS_gpu_threading);
262 SINK("mesa", GPUSink, Gr::kMESA_GLContextType, api, 0, false, FLAGS_gpu_threading);
266 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
267 SINK("hwui", HWUISink);
271 SINK("565", RasterSink, kRGB_565_SkColorType);
272 SINK("8888", RasterSink, kN32_SkColorType);
273 SINK("pdf", PDFSink);
274 SINK("skp", SKPSink);
275 SINK("svg", SVGSink);
276 SINK("null", NullSink);
277 SINK("xps", XPSSink);
283 static Sink* create_via(const char* tag, Sink* wrapped) {
284 #define VIA(t, via, ...) if (0 == strcmp(t, tag)) { return new via(__VA_ARGS__); }
285 VIA("pipe", ViaPipe, wrapped);
286 VIA("serialize", ViaSerialization, wrapped);
287 VIA("tiles", ViaTiles, 256, 256, NULL, wrapped);
288 VIA("tiles_rt", ViaTiles, 256, 256, new SkRTreeFactory, wrapped);
290 if (FLAGS_matrix.count() == 4) {
293 m.setScaleX((SkScalar)atof(FLAGS_matrix[0]));
294 m.setSkewX ((SkScalar)atof(FLAGS_matrix[1]));
295 m.setSkewY ((SkScalar)atof(FLAGS_matrix[2]));
296 m.setScaleY((SkScalar)atof(FLAGS_matrix[3]));
297 VIA("matrix", ViaMatrix, m, wrapped);
298 VIA("upright", ViaUpright, m, wrapped);
301 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
302 VIA("androidsdk", ViaAndroidSDK, wrapped);
309 static void gather_sinks() {
310 for (int i = 0; i < FLAGS_config.count(); i++) {
311 const char* config = FLAGS_config[i];
312 SkTArray<SkString> parts;
313 SkStrSplit(config, "-", &parts);
316 for (int i = parts.count(); i-- > 0;) {
317 const char* part = parts[i].c_str();
318 Sink* next = (sink == NULL) ? create_sink(part) : create_via(part, sink);
320 SkDebugf("Skipping %s: Don't understand '%s'.\n", config, part);
328 push_sink(config, sink);
333 static bool match(const char* needle, const char* haystack) {
334 return 0 == strcmp("_", needle) || NULL != strstr(haystack, needle);
337 static ImplicitString is_blacklisted(const char* sink, const char* src, const char* name) {
338 for (int i = 0; i < FLAGS_blacklist.count() - 2; i += 3) {
339 if (match(FLAGS_blacklist[i+0], sink) &&
340 match(FLAGS_blacklist[i+1], src) &&
341 match(FLAGS_blacklist[i+2], name)) {
342 return SkStringPrintf("%s %s %s",
343 FLAGS_blacklist[i+0], FLAGS_blacklist[i+1], FLAGS_blacklist[i+2]);
349 // The finest-grained unit of work we can run: draw a single Src into a single Sink,
350 // report any errors, and perhaps write out the output: a .png of the bitmap, or a raw stream.
352 Task(const Tagged<Src>& src, const Tagged<Sink>& sink) : src(src), sink(sink) {}
353 const Tagged<Src>& src;
354 const Tagged<Sink>& sink;
356 static void Run(Task* task) {
357 SkString name = task->src->name();
359 SkString whyBlacklisted = is_blacklisted(task->sink.tag, task->src.tag, name.c_str());
360 if (!whyBlacklisted.isEmpty()) {
361 note.appendf(" (--blacklist %s)", whyBlacklisted.c_str());
366 if (!FLAGS_dryRun && whyBlacklisted.isEmpty()) {
368 SkDynamicMemoryWStream stream;
369 start(task->sink.tag, task->src.tag, name.c_str());
370 Error err = task->sink->draw(*task->src, &bitmap, &stream, &log);
371 if (!err.isEmpty()) {
374 fail(SkStringPrintf("%s %s %s: %s",
380 note.appendf(" (skipped: %s)", err.c_str());
382 done(timer.fWall, task->sink.tag, task->src.tag, name, note, log);
385 SkAutoTDelete<SkStreamAsset> data(stream.detachAsStream());
388 if (!FLAGS_writePath.isEmpty() || !FLAGS_readPath.isEmpty()) {
390 if (data->getLength()) {
391 hash.writeStream(data, data->getLength());
394 hash.write(bitmap.getPixels(), bitmap.getSize());
396 SkMD5::Digest digest;
398 for (int i = 0; i < 16; i++) {
399 md5.appendf("%02x", digest.data[i]);
403 if (!FLAGS_readPath.isEmpty() &&
404 !gGold.contains(Gold(task->sink.tag, task->src.tag, name, md5))) {
405 fail(SkStringPrintf("%s not found for %s %s %s in %s",
413 if (!FLAGS_writePath.isEmpty()) {
414 const char* ext = task->sink->fileExtension();
415 if (data->getLength()) {
416 WriteToDisk(*task, md5, ext, data, data->getLength(), NULL);
417 SkASSERT(bitmap.drawsNothing());
418 } else if (!bitmap.drawsNothing()) {
419 WriteToDisk(*task, md5, ext, NULL, 0, &bitmap);
424 done(timer.fWall, task->sink.tag, task->src.tag, name, note, log);
427 static void WriteToDisk(const Task& task,
430 SkStream* data, size_t len,
431 const SkBitmap* bitmap) {
432 JsonWriter::BitmapResult result;
433 result.name = task.src->name();
434 result.config = task.sink.tag;
435 result.sourceType = task.src.tag;
438 JsonWriter::AddBitmapResult(result);
440 const char* dir = FLAGS_writePath[0];
441 if (0 == strcmp(dir, "@")) { // Needed for iOS.
442 dir = FLAGS_resourcePath[0];
447 if (FLAGS_nameByHash) {
448 path = SkOSPath::Join(dir, result.md5.c_str());
451 if (sk_exists(path.c_str())) {
452 return; // Content-addressed. If it exists already, we're done.
455 path = SkOSPath::Join(dir, task.sink.tag);
456 sk_mkdir(path.c_str());
457 path = SkOSPath::Join(path.c_str(), task.src.tag);
458 sk_mkdir(path.c_str());
459 path = SkOSPath::Join(path.c_str(), task.src->name().c_str());
464 SkFILEWStream file(path.c_str());
465 if (!file.isValid()) {
466 fail(SkStringPrintf("Can't open %s for writing.\n", path.c_str()));
471 // We can't encode A8 bitmaps as PNGs. Convert them to 8888 first.
473 if (bitmap->info().colorType() == kAlpha_8_SkColorType) {
474 if (!bitmap->copyTo(&converted, kN32_SkColorType)) {
475 fail("Can't convert A8 to 8888.\n");
480 if (!SkImageEncoder::EncodeStream(&file, *bitmap, SkImageEncoder::kPNG_Type, 100)) {
481 fail(SkStringPrintf("Can't encode PNG to %s.\n", path.c_str()));
485 if (!file.writeStream(data, len)) {
486 fail(SkStringPrintf("Can't write to %s.\n", path.c_str()));
493 // Run all tasks in the same enclave serially on the same thread.
494 // They can't possibly run concurrently with each other.
495 static void run_enclave(SkTArray<Task>* tasks) {
496 for (int i = 0; i < tasks->count(); i++) {
497 Task::Run(tasks->begin() + i);
501 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
503 // Unit tests don't fit so well into the Src/Sink model, so we give them special treatment.
505 static SkTDArray<skiatest::Test> gThreadedTests, gGPUTests;
507 static void gather_tests() {
508 if (!FLAGS_src.contains("tests")) {
511 for (const skiatest::TestRegistry* r = skiatest::TestRegistry::Head(); r;
513 // Despite its name, factory() is returning a reference to
514 // link-time static const POD data.
515 const skiatest::Test& test = r->factory();
516 if (SkCommandLineFlags::ShouldSkip(FLAGS_match, test.name)) {
519 if (test.needsGpu && gpu_supported()) {
520 (FLAGS_gpu_threading ? gThreadedTests : gGPUTests).push(test);
521 } else if (!test.needsGpu && FLAGS_cpu) {
522 gThreadedTests.push(test);
527 static void run_test(skiatest::Test* test) {
528 struct : public skiatest::Reporter {
529 void reportFailed(const skiatest::Failure& failure) SK_OVERRIDE {
530 fail(failure.toString());
531 JsonWriter::AddTestFailure(failure);
533 bool allowExtendedTest() const SK_OVERRIDE {
534 return FLAGS_pathOpsExtended;
536 bool verbose() const SK_OVERRIDE { return FLAGS_veryVerbose; }
541 start("unit", "test", test->name);
542 GrContextFactory factory;
543 test->proc(&reporter, &factory);
546 done(timer.fWall, "unit", "test", test->name, "", "");
549 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
551 // If we're isolating all GPU-bound work to one thread (the default), this function runs all that.
552 static void run_enclave_and_gpu_tests(SkTArray<Task>* tasks) {
554 for (int i = 0; i < gGPUTests.count(); i++) {
555 run_test(&gGPUTests[i]);
559 // Some runs (mostly, Valgrind) are so slow that the bot framework thinks we've hung.
560 // This prints something every once in a while so that it knows we're still working.
561 static void start_keepalive() {
563 static void forever(void*) {
565 static const int kSec = 300;
566 #if defined(SK_BUILD_FOR_WIN)
573 SkAutoMutexAcquire lock(gRunningMutex);
574 for (int i = 0; i < gRunning.count(); i++) {
575 running.appendf("\n\t%s", gRunning[i].c_str());
578 SkDebugf("\nCurrently running:%s\n", running.c_str());
582 static SkThread* intentionallyLeaked = new SkThread(Loop::forever);
583 intentionallyLeaked->start();
590 SkTaskGroup::Enabler enabled(FLAGS_threads);
592 SkInstCountPrintLeaksOnExit();
603 gPending = gSrcs.count() * gSinks.count() + gThreadedTests.count() + gGPUTests.count();
604 SkDebugf("%d srcs * %d sinks + %d tests == %d tasks\n",
605 gSrcs.count(), gSinks.count(), gThreadedTests.count() + gGPUTests.count(), gPending);
607 // We try to exploit as much parallelism as is safe. Most Src/Sink pairs run on any thread,
608 // but Sinks that identify as part of a particular enclave run serially on a single thread.
609 // CPU tests run on any thread. GPU tests depend on --gpu_threading.
610 SkTArray<Task> enclaves[kNumEnclaves];
611 for (int j = 0; j < gSinks.count(); j++) {
612 SkTArray<Task>& tasks = enclaves[gSinks[j]->enclave()];
613 for (int i = 0; i < gSrcs.count(); i++) {
614 tasks.push_back(Task(gSrcs[i], gSinks[j]));
619 tg.batch(run_test, gThreadedTests.begin(), gThreadedTests.count());
620 for (int i = 0; i < kNumEnclaves; i++) {
622 case kAnyThread_Enclave:
623 tg.batch(Task::Run, enclaves[i].begin(), enclaves[i].count());
626 tg.add(run_enclave_and_gpu_tests, &enclaves[i]);
629 tg.add(run_enclave, &enclaves[i]);
634 // At this point we're back in single-threaded land.
637 if (gFailures.count() > 0) {
638 SkDebugf("Failures:\n");
639 for (int i = 0; i < gFailures.count(); i++) {
640 SkDebugf("\t%s\n", gFailures[i].c_str());
642 SkDebugf("%d failures\n", gFailures.count());
646 SkDebugf("Hrm, we didn't seem to run everything we intended to! Please file a bug.\n");
652 #if !defined(SK_BUILD_FOR_IOS) && !defined(SK_BUILD_FOR_NACL)
653 int main(int argc, char** argv) {
654 SkCommandLineFlags::Parse(argc, argv);