From: palfia@homejinni.com Date: Fri, 19 Jul 2013 14:46:23 +0000 (+0000) Subject: Fix unaligned accesses in back_edge tables. X-Git-Tag: upstream/4.7.83~13283 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4c3269a43d8f515d6b378d3c22778b53d1b47675;p=platform%2Fupstream%2Fv8.git Fix unaligned accesses in back_edge tables. This patch fixes the step size of masm->pc_ in back_edge tables to words (4 bytes) to ensure 4 bytes alignment for read/write operations. Read and write of words (4 bytes) data from aligned space (address % 4 == 0) is more efficient on all platforms and especially on MIPS where without this alignment fix a kernel exception handler is used for every unaligned access. This patch increases the size of back_edge tables by 3 bytes in every row. By the test it seem the back_edge table quite small in every/most cases (maximal length is 18 so in that case there are only 54 additional bytes with this patch). BUG= Patch from Douglas Leung Review URL: https://codereview.chromium.org/19248002 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15782 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc index bfe2e18..fd7c282 100644 --- a/src/deoptimizer.cc +++ b/src/deoptimizer.cc @@ -2371,8 +2371,8 @@ void Deoptimizer::PatchInterruptCode(Code* unoptimized_code, uint32_t table_length = Memory::uint32_at(back_edge_cursor); back_edge_cursor += kIntSize; for (uint32_t i = 0; i < table_length; ++i) { - uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize); - if (loop_depth == loop_nesting_level) { + uint32_t loop_depth = Memory::uint32_at(back_edge_cursor + 2 * kIntSize); + if (static_cast(loop_depth) == loop_nesting_level) { // Loop back edge has the loop depth that we want to patch. uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize); Address pc_after = unoptimized_code->instruction_start() + pc_offset; @@ -2403,8 +2403,8 @@ void Deoptimizer::RevertInterruptCode(Code* unoptimized_code, uint32_t table_length = Memory::uint32_at(back_edge_cursor); back_edge_cursor += kIntSize; for (uint32_t i = 0; i < table_length; ++i) { - uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize); - if (loop_depth <= loop_nesting_level) { + uint32_t loop_depth = Memory::uint32_at(back_edge_cursor + 2 * kIntSize); + if (static_cast(loop_depth) <= loop_nesting_level) { uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize); Address pc_after = unoptimized_code->instruction_start() + pc_offset; RevertInterruptCodeAt(unoptimized_code, @@ -2435,13 +2435,13 @@ void Deoptimizer::VerifyInterruptCode(Code* unoptimized_code, uint32_t table_length = Memory::uint32_at(back_edge_cursor); back_edge_cursor += kIntSize; for (uint32_t i = 0; i < table_length; ++i) { - uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize); - CHECK_LE(loop_depth, Code::kMaxLoopNestingMarker); + uint32_t loop_depth = Memory::uint32_at(back_edge_cursor + 2 * kIntSize); + CHECK_LE(static_cast(loop_depth), Code::kMaxLoopNestingMarker); // Assert that all back edges for shallower loops (and only those) // have already been patched. uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize); Address pc_after = unoptimized_code->instruction_start() + pc_offset; - CHECK_EQ((loop_depth <= loop_nesting_level), + CHECK_EQ((static_cast(loop_depth) <= loop_nesting_level), InterruptCodeIsPatched(unoptimized_code, pc_after, interrupt_code, diff --git a/src/full-codegen.cc b/src/full-codegen.cc index 76d3fff..6d802e9 100644 --- a/src/full-codegen.cc +++ b/src/full-codegen.cc @@ -379,7 +379,7 @@ unsigned FullCodeGenerator::EmitBackEdgeTable() { for (unsigned i = 0; i < length; ++i) { __ dd(back_edges_[i].id.ToInt()); __ dd(back_edges_[i].pc); - __ db(back_edges_[i].loop_depth); + __ dd(back_edges_[i].loop_depth); } return offset; } diff --git a/src/full-codegen.h b/src/full-codegen.h index 7e64506..a9db54e 100644 --- a/src/full-codegen.h +++ b/src/full-codegen.h @@ -136,7 +136,7 @@ class FullCodeGenerator: public AstVisitor { #error Unsupported target architecture. #endif - static const int kBackEdgeEntrySize = 2 * kIntSize + kOneByteSize; + static const int kBackEdgeEntrySize = 3 * kIntSize; private: class Breakable; @@ -648,7 +648,7 @@ class FullCodeGenerator: public AstVisitor { struct BackEdgeEntry { BailoutId id; unsigned pc; - uint8_t loop_depth; + uint32_t loop_depth; }; struct TypeFeedbackCellEntry { diff --git a/src/objects.cc b/src/objects.cc index 6365667..c83a40e 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -10799,7 +10799,8 @@ void Code::Disassemble(const char* name, FILE* out) { for (uint32_t i = 0; i < table_length; ++i) { uint32_t ast_id = Memory::uint32_at(back_edge_cursor); uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize); - uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize); + uint32_t loop_depth = Memory::uint32_at(back_edge_cursor + + 2 * kIntSize); PrintF(out, "%6u %9u %10u\n", ast_id, pc_offset, loop_depth); back_edge_cursor += FullCodeGenerator::kBackEdgeEntrySize; } diff --git a/src/runtime.cc b/src/runtime.cc index 69cfe14..bad089b 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -8539,13 +8539,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) { Address table_cursor = start + unoptimized->back_edge_table_offset(); uint32_t table_length = Memory::uint32_at(table_cursor); table_cursor += kIntSize; - uint8_t loop_depth = 0; + uint32_t loop_depth = 0; for (unsigned i = 0; i < table_length; ++i) { // Table entries are (AST id, pc offset) pairs. uint32_t pc_offset = Memory::uint32_at(table_cursor + kIntSize); if (pc_offset == target_pc_offset) { ast_id = BailoutId(static_cast(Memory::uint32_at(table_cursor))); - loop_depth = Memory::uint8_at(table_cursor + 2 * kIntSize); + loop_depth = Memory::uint32_at(table_cursor + 2 * kIntSize); break; } table_cursor += FullCodeGenerator::kBackEdgeEntrySize;