From: Rui Ueyama Date: Fri, 25 Nov 2016 20:05:08 +0000 (+0000) Subject: Parallelize uncompress() and splitIntoPieces(). X-Git-Tag: llvmorg-4.0.0-rc1~3651 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=2555952ba86e464420d1a5e36e578a0c26d4abaf;p=platform%2Fupstream%2Fllvm.git Parallelize uncompress() and splitIntoPieces(). Uncompressing section contents and spliting mergeable section contents into smaller chunks are heavy tasks. They scan entire section contents and do CPU-intensive tasks such as uncompressing zlib-compressed data or computing a hash value for each section piece. Luckily, these tasks are independent to each other, so we can do that in parallel_for_each. The number of input sections is large (as opposed to the number of output sections), so there's a large parallelism here. Actually the current design to call uncompress() and splitIntoPieces() in batch was chosen with doing this in mind. Basically what we need to do here is to replace `for` with `parallel_for_each`. It seems this patch improves latency significantly if linked programs contain debug info (which in turn contain lots of mergeable strings.) For example, the latency to link Clang (debug build) improved by 20% on my machine as shown below. Note that ld.gold took 19.2 seconds to do the same thing. Before: 30801.782712 task-clock (msec) # 3.652 CPUs utilized ( +- 2.59% ) 104,084 context-switches # 0.003 M/sec ( +- 1.02% ) 5,063 cpu-migrations # 0.164 K/sec ( +- 13.66% ) 2,528,130 page-faults # 0.082 M/sec ( +- 0.47% ) 85,317,809,130 cycles # 2.770 GHz ( +- 2.62% ) 67,352,463,373 stalled-cycles-frontend # 78.94% frontend cycles idle ( +- 3.06% ) stalled-cycles-backend 44,295,945,493 instructions # 0.52 insns per cycle # 1.52 stalled cycles per insn ( +- 0.44% ) 8,572,384,877 branches # 278.308 M/sec ( +- 0.66% ) 141,806,726 branch-misses # 1.65% of all branches ( +- 0.13% ) 8.433424003 seconds time elapsed ( +- 1.20% ) After: 35523.764575 task-clock (msec) # 5.265 CPUs utilized ( +- 2.67% ) 159,107 context-switches # 0.004 M/sec ( +- 0.48% ) 8,123 cpu-migrations # 0.229 K/sec ( +- 23.34% ) 2,372,483 page-faults # 0.067 M/sec ( +- 0.36% ) 98,395,342,152 cycles # 2.770 GHz ( +- 2.62% ) 79,294,670,125 stalled-cycles-frontend # 80.59% frontend cycles idle ( +- 3.03% ) stalled-cycles-backend 46,274,151,813 instructions # 0.47 insns per cycle # 1.71 stalled cycles per insn ( +- 0.47% ) 8,987,621,670 branches # 253.003 M/sec ( +- 0.60% ) 148,900,624 branch-misses # 1.66% of all branches ( +- 0.27% ) 6.747548004 seconds time elapsed ( +- 0.40% ) llvm-svn: 287946 --- diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp index ef1a727..9f1bb74 100644 --- a/lld/ELF/Driver.cpp +++ b/lld/ELF/Driver.cpp @@ -20,6 +20,7 @@ #include "Target.h" #include "Writer.h" #include "lld/Config/Version.h" +#include "lld/Core/Parallel.h" #include "lld/Driver/Driver.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringSwitch.h" @@ -800,14 +801,15 @@ template void LinkerDriver::link(opt::InputArgList &Args) { // MergeInputSection::splitIntoPieces needs to be called before // any call of MergeInputSection::getOffset. Do that. - for (InputSectionBase *S : Symtab.Sections) { - if (!S->Live) - continue; - if (S->Compressed) - S->uncompress(); - if (auto *MS = dyn_cast>(S)) - MS->splitIntoPieces(); - } + parallel_for_each(Symtab.Sections.begin(), Symtab.Sections.end(), + [](InputSectionBase *S) { + if (!S->Live) + return; + if (S->Compressed) + S->uncompress(); + if (auto *MS = dyn_cast>(S)) + MS->splitIntoPieces(); + }); // Write the result to the file. writeResult(); diff --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp index 3d8c236..9b2feaa 100644 --- a/lld/ELF/InputSection.cpp +++ b/lld/ELF/InputSection.cpp @@ -22,6 +22,7 @@ #include "llvm/Support/Compression.h" #include "llvm/Support/Endian.h" +#include using namespace llvm; using namespace llvm::ELF; @@ -160,6 +161,8 @@ InputSectionBase::getRawCompressedData(ArrayRef Data) { return {Data.slice(sizeof(*Hdr)), read64be(Hdr->Size)}; } +// Uncompress section contents. Note that this function is called +// from parallel_for_each, so it must be thread-safe. template void InputSectionBase::uncompress() { if (!zlib::isAvailable()) fatal(toString(this) + @@ -179,7 +182,12 @@ template void InputSectionBase::uncompress() { std::tie(Buf, Size) = getRawCompressedData(Data); // Uncompress Buf. - char *OutputBuf = BAlloc.Allocate(Size); + char *OutputBuf; + { + static std::mutex Mu; + std::lock_guard Lock(Mu); + OutputBuf = BAlloc.Allocate(Size); + } if (zlib::uncompress(toStringRef(Buf), OutputBuf, Size) != zlib::StatusOK) fatal(toString(this) + ": error while uncompressing section"); Data = ArrayRef((uint8_t *)OutputBuf, Size); @@ -746,6 +754,12 @@ MergeInputSection::MergeInputSection(elf::ObjectFile *F, StringRef Name) : InputSectionBase(F, Header, Name, InputSectionBase::Merge) {} +// This function is called after we obtain a complete list of input sections +// that need to be linked. This is responsible to split section contents +// into small chunks for further processing. +// +// Note that this function is called from parallel_for_each. This must be +// thread-safe (i.e. no memory allocation from the pools). template void MergeInputSection::splitIntoPieces() { ArrayRef Data = this->Data; uintX_t EntSize = this->Entsize;