From: Ryan Dahl Date: Fri, 17 Sep 2010 04:33:32 +0000 (-0700) Subject: Upgrade V8 to 2.4.4 X-Git-Tag: v0.3.0~166 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=431e43009c1a90781bea57aa993797d0161bbf95;p=platform%2Fupstream%2Fnodejs.git Upgrade V8 to 2.4.4 --- diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 65b8965..2403fbb 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -9,6 +9,8 @@ ARM Ltd. Alexander Botero-Lowry Alexandre Vassalotti +Andreas Anyuru +Burcu Dogan Craig Schlenter Daniel Andersson Daniel James @@ -21,6 +23,7 @@ John Jozwiak Kun Zhang Matt Hanselman Martyn Capewell +Michael Smith Paolo Giarrusso Patrick Gansterer Rafal Krypa @@ -28,6 +31,4 @@ Rene Rebe Rodolph Perfetta Ryan Dahl Subrato K De -Burcu Dogan Vlad Burlik - diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 95a3640..c31f5fc 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,21 @@ +2010-09-15: Version 2.4.4 + + Fix bug with hangs on very large sparse arrays. + + Try harder to free up memory when running out of space. + + Add heap snapshots to JSON format to API. + + Recalibrate benchmarks. + + +2010-09-13: Version 2.4.3 + + Made Date.parse properly handle TZ offsets (issue 857). + + Performance improvements on all platforms. + + 2010-09-08: Version 2.4.2 Fixed GC crash bug. diff --git a/deps/v8/benchmarks/crypto.js b/deps/v8/benchmarks/crypto.js index 7e9829d..ffa69b5 100644 --- a/deps/v8/benchmarks/crypto.js +++ b/deps/v8/benchmarks/crypto.js @@ -31,7 +31,7 @@ // The code has been adapted for use as a benchmark by Google. -var Crypto = new BenchmarkSuite('Crypto', 110465, [ +var Crypto = new BenchmarkSuite('Crypto', 266181, [ new Benchmark("Encrypt", encrypt), new Benchmark("Decrypt", decrypt) ]); diff --git a/deps/v8/benchmarks/deltablue.js b/deps/v8/benchmarks/deltablue.js index 4af8387..548fd96 100644 --- a/deps/v8/benchmarks/deltablue.js +++ b/deps/v8/benchmarks/deltablue.js @@ -23,7 +23,7 @@ // more like a JavaScript program. -var DeltaBlue = new BenchmarkSuite('DeltaBlue', 30282, [ +var DeltaBlue = new BenchmarkSuite('DeltaBlue', 66118, [ new Benchmark('DeltaBlue', deltaBlue) ]); diff --git a/deps/v8/benchmarks/earley-boyer.js b/deps/v8/benchmarks/earley-boyer.js index b1efe4a..1be480e 100644 --- a/deps/v8/benchmarks/earley-boyer.js +++ b/deps/v8/benchmarks/earley-boyer.js @@ -1,7 +1,7 @@ // This file is automatically generated by scheme2js, except for the // benchmark harness code at the beginning and end of the file. -var EarleyBoyer = new BenchmarkSuite('EarleyBoyer', 280581, [ +var EarleyBoyer = new BenchmarkSuite('EarleyBoyer', 666463, [ new Benchmark("Earley", function () { BgL_earleyzd2benchmarkzd2(); }), new Benchmark("Boyer", function () { BgL_nboyerzd2benchmarkzd2(); }) ]); diff --git a/deps/v8/benchmarks/raytrace.js b/deps/v8/benchmarks/raytrace.js index eaf61a1..971ef72 100644 --- a/deps/v8/benchmarks/raytrace.js +++ b/deps/v8/benchmarks/raytrace.js @@ -8,7 +8,7 @@ // untouched. This file also contains a copy of parts of the Prototype // JavaScript framework which is used by the ray tracer. -var RayTrace = new BenchmarkSuite('RayTrace', 533115, [ +var RayTrace = new BenchmarkSuite('RayTrace', 739989, [ new Benchmark('RayTrace', renderScene) ]); diff --git a/deps/v8/benchmarks/regexp.js b/deps/v8/benchmarks/regexp.js index f9f816c..f760866 100644 --- a/deps/v8/benchmarks/regexp.js +++ b/deps/v8/benchmarks/regexp.js @@ -35,7 +35,7 @@ // letters in the data are encoded using ROT13 in a way that does not // affect how the regexps match their input. -var RegRxp = new BenchmarkSuite('RegExp', 601250, [ +var RegRxp = new BenchmarkSuite('RegExp', 910985, [ new Benchmark("RegExp", runRegExpBenchmark) ]); diff --git a/deps/v8/benchmarks/richards.js b/deps/v8/benchmarks/richards.js index b5736f7..054928d 100644 --- a/deps/v8/benchmarks/richards.js +++ b/deps/v8/benchmarks/richards.js @@ -35,7 +35,7 @@ // Martin Richards. -var Richards = new BenchmarkSuite('Richards', 20687, [ +var Richards = new BenchmarkSuite('Richards', 35302, [ new Benchmark("Richards", runRichards) ]); diff --git a/deps/v8/benchmarks/splay.js b/deps/v8/benchmarks/splay.js index d63ab8b..6b4f56d 100644 --- a/deps/v8/benchmarks/splay.js +++ b/deps/v8/benchmarks/splay.js @@ -33,7 +33,7 @@ // also has to deal with a lot of changes to the large tree object // graph. -var Splay = new BenchmarkSuite('Splay', 21915, [ +var Splay = new BenchmarkSuite('Splay', 81491, [ new Benchmark("Splay", SplayRun, SplaySetup, SplayTearDown) ]); diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index dd1b8ca..27da418 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -323,7 +323,10 @@ class V8EXPORT HeapSnapshot { enum Type { kFull = 0, // Heap snapshot with all instances and references. kAggregated = 1 // Snapshot doesn't contain individual heap entries, - //instead they are grouped by constructor name. + // instead they are grouped by constructor name. + }; + enum SerializationFormat { + kJSON = 0 // See format description near 'Serialize' method. }; /** Returns heap snapshot type. */ @@ -343,6 +346,30 @@ class V8EXPORT HeapSnapshot { * of the same type can be compared. */ const HeapSnapshotsDiff* CompareWith(const HeapSnapshot* snapshot) const; + + /** + * Prepare a serialized representation of the snapshot. The result + * is written into the stream provided in chunks of specified size. + * The total length of the serialized snapshot is unknown in + * advance, it is can be roughly equal to JS heap size (that means, + * it can be really big - tens of megabytes). + * + * For the JSON format, heap contents are represented as an object + * with the following structure: + * + * { + * snapshot: {title: "...", uid: nnn}, + * nodes: [ + * meta-info (JSON string), + * nodes themselves + * ], + * strings: [strings] + * } + * + * Outgoing node links are stored after each node. Nodes reference strings + * and other nodes by their indexes in corresponding arrays. + */ + void Serialize(OutputStream* stream, SerializationFormat format) const; }; diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index b89c244..0613d58 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -3196,6 +3196,34 @@ class V8EXPORT Locker { }; +/** + * An interface for exporting data from V8, using "push" model. + */ +class V8EXPORT OutputStream { +public: + enum OutputEncoding { + kAscii = 0 // 7-bit ASCII. + }; + enum WriteResult { + kContinue = 0, + kAbort = 1 + }; + virtual ~OutputStream() {} + /** Notify about the end of stream. */ + virtual void EndOfStream() = 0; + /** Get preferred output chunk size. Called only once. */ + virtual int GetChunkSize() { return 1024; } + /** Get preferred output encoding. Called only once. */ + virtual OutputEncoding GetOutputEncoding() { return kAscii; } + /** + * Writes the next chunk of snapshot data into the stream. Writing + * can be stopped by returning kAbort as function result. EndOfStream + * will not be called in case writing was aborted. + */ + virtual WriteResult WriteAsciiChunk(char* data, int size) = 0; +}; + + // --- I m p l e m e n t a t i o n --- diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 0d01fcc..e09d4c9 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -4739,6 +4739,23 @@ const HeapSnapshotsDiff* HeapSnapshot::CompareWith( } +void HeapSnapshot::Serialize(OutputStream* stream, + HeapSnapshot::SerializationFormat format) const { + IsDeadCheck("v8::HeapSnapshot::Serialize"); + ApiCheck(format == kJSON, + "v8::HeapSnapshot::Serialize", + "Unknown serialization format"); + ApiCheck(stream->GetOutputEncoding() == OutputStream::kAscii, + "v8::HeapSnapshot::Serialize", + "Unsupported output encoding"); + ApiCheck(stream->GetChunkSize() > 0, + "v8::HeapSnapshot::Serialize", + "Invalid stream chunk size"); + i::HeapSnapshotJSONSerializer serializer(ToInternal(this)); + serializer.Serialize(stream); +} + + int HeapProfiler::GetSnapshotsCount() { IsDeadCheck("v8::HeapProfiler::GetSnapshotsCount"); return i::HeapProfiler::GetSnapshotsCount(); diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index fa93030..8f801cf 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -930,6 +930,24 @@ void CompareStub::Generate(MacroAssembler* masm) { Label slow; // Call builtin. Label not_smis, both_loaded_as_doubles, lhs_not_nan; + if (include_smi_compare_) { + Label not_two_smis, smi_done; + __ orr(r2, r1, r0); + __ tst(r2, Operand(kSmiTagMask)); + __ b(ne, ¬_two_smis); + __ sub(r0, r1, r0); + __ b(vc, &smi_done); + // Correct the sign in case of overflow. + __ rsb(r0, r0, Operand(0, RelocInfo::NONE)); + __ bind(&smi_done); + __ Ret(); + __ bind(¬_two_smis); + } else if (FLAG_debug_code) { + __ orr(r2, r1, r0); + __ tst(r2, Operand(kSmiTagMask)); + __ Assert(nz, "CompareStub: unexpected smi operands."); + } + // NOTICE! This code is only reached after a smi-fast-case check, so // it is certain that at least one operand isn't a smi. @@ -2288,7 +2306,7 @@ void StackCheckStub::Generate(MacroAssembler* masm) { __ push(r0); __ TailCallRuntime(Runtime::kStackGuard, 1, 1); - __ StubReturn(1); + __ Ret(); } @@ -2299,32 +2317,37 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); if (op_ == Token::SUB) { - // Check whether the value is a smi. - Label try_float; - __ tst(r0, Operand(kSmiTagMask)); - __ b(ne, &try_float); - - // Go slow case if the value of the expression is zero - // to make sure that we switch between 0 and -0. - if (negative_zero_ == kStrictNegativeZero) { - // If we have to check for zero, then we can check for the max negative - // smi while we are at it. - __ bic(ip, r0, Operand(0x80000000), SetCC); - __ b(eq, &slow); - __ rsb(r0, r0, Operand(0, RelocInfo::NONE)); - __ StubReturn(1); - } else { - // The value of the expression is a smi and 0 is OK for -0. Try - // optimistic subtraction '0 - value'. - __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC); - __ StubReturn(1, vc); - // We don't have to reverse the optimistic neg since the only case - // where we fall through is the minimum negative Smi, which is the case - // where the neg leaves the register unchanged. - __ jmp(&slow); // Go slow on max negative Smi. + if (include_smi_code_) { + // Check whether the value is a smi. + Label try_float; + __ tst(r0, Operand(kSmiTagMask)); + __ b(ne, &try_float); + + // Go slow case if the value of the expression is zero + // to make sure that we switch between 0 and -0. + if (negative_zero_ == kStrictNegativeZero) { + // If we have to check for zero, then we can check for the max negative + // smi while we are at it. + __ bic(ip, r0, Operand(0x80000000), SetCC); + __ b(eq, &slow); + __ rsb(r0, r0, Operand(0, RelocInfo::NONE)); + __ Ret(); + } else { + // The value of the expression is a smi and 0 is OK for -0. Try + // optimistic subtraction '0 - value'. + __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC); + __ Ret(vc); + // We don't have to reverse the optimistic neg since the only case + // where we fall through is the minimum negative Smi, which is the case + // where the neg leaves the register unchanged. + __ jmp(&slow); // Go slow on max negative Smi. + } + __ bind(&try_float); + } else if (FLAG_debug_code) { + __ tst(r0, Operand(kSmiTagMask)); + __ Assert(ne, "Unexpected smi operand."); } - __ bind(&try_float); __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); __ cmp(r1, heap_number_map); @@ -2344,6 +2367,19 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { __ mov(r0, Operand(r1)); } } else if (op_ == Token::BIT_NOT) { + if (include_smi_code_) { + Label non_smi; + __ BranchOnNotSmi(r0, &non_smi); + __ mvn(r0, Operand(r0)); + // Bit-clear inverted smi-tag. + __ bic(r0, r0, Operand(kSmiTagMask)); + __ Ret(); + __ bind(&non_smi); + } else if (FLAG_debug_code) { + __ tst(r0, Operand(kSmiTagMask)); + __ Assert(ne, "Unexpected smi operand."); + } + // Check if the operand is a heap number. __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); @@ -2391,7 +2427,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { } __ bind(&done); - __ StubReturn(1); + __ Ret(); // Handle the slow case by jumping to the JavaScript builtin. __ bind(&slow); @@ -3499,6 +3535,11 @@ const char* CompareStub::GetName() { include_number_compare_name = "_NO_NUMBER"; } + const char* include_smi_compare_name = ""; + if (!include_smi_compare_) { + include_smi_compare_name = "_NO_SMI"; + } + OS::SNPrintF(Vector(name_, kMaxNameLength), "CompareStub_%s%s%s%s%s%s", cc_name, @@ -3506,7 +3547,8 @@ const char* CompareStub::GetName() { rhs_name, strict_name, never_nan_nan_name, - include_number_compare_name); + include_number_compare_name, + include_smi_compare_name); return name_; } @@ -3522,7 +3564,8 @@ int CompareStub::MinorKey() { | RegisterField::encode(lhs_.is(r0)) | StrictField::encode(strict_) | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) - | IncludeNumberCompareField::encode(include_number_compare_); + | IncludeNumberCompareField::encode(include_number_compare_) + | IncludeSmiCompareField::encode(include_smi_compare_); } @@ -4144,17 +4187,21 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Check bounds and smi-ness. - __ ldr(r7, MemOperand(sp, kToOffset)); - __ ldr(r6, MemOperand(sp, kFromOffset)); + Register to = r6; + Register from = r7; + __ Ldrd(to, from, MemOperand(sp, kToOffset)); + STATIC_ASSERT(kFromOffset == kToOffset + 4); STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); // I.e., arithmetic shift right by one un-smi-tags. - __ mov(r2, Operand(r7, ASR, 1), SetCC); - __ mov(r3, Operand(r6, ASR, 1), SetCC, cc); - // If either r2 or r6 had the smi tag bit set, then carry is set now. + __ mov(r2, Operand(to, ASR, 1), SetCC); + __ mov(r3, Operand(from, ASR, 1), SetCC, cc); + // If either to or from had the smi tag bit set, then carry is set now. __ b(cs, &runtime); // Either "from" or "to" is not a smi. __ b(mi, &runtime); // From is negative. + // Both to and from are smis. + __ sub(r2, r2, Operand(r3), SetCC); __ b(mi, &runtime); // Fail if from > to. // Special handling of sub-strings of length 1 and 2. One character strings @@ -4165,8 +4212,8 @@ void SubStringStub::Generate(MacroAssembler* masm) { // r2: length // r3: from index (untaged smi) - // r6: from (smi) - // r7: to (smi) + // r6 (a.k.a. to): to (smi) + // r7 (a.k.a. from): from offset (smi) // Make sure first argument is a sequential (or flat) string. __ ldr(r5, MemOperand(sp, kStringOffset)); @@ -4178,10 +4225,10 @@ void SubStringStub::Generate(MacroAssembler* masm) { // r1: instance type // r2: length - // r3: from index (untaged smi) + // r3: from index (untagged smi) // r5: string - // r6: from (smi) - // r7: to (smi) + // r6 (a.k.a. to): to (smi) + // r7 (a.k.a. from): from offset (smi) Label seq_string; __ and_(r4, r1, Operand(kStringRepresentationMask)); STATIC_ASSERT(kSeqStringTag < kConsStringTag); @@ -4207,17 +4254,18 @@ void SubStringStub::Generate(MacroAssembler* masm) { // r2: length // r3: from index (untaged smi) // r5: string - // r6: from (smi) - // r7: to (smi) + // r6 (a.k.a. to): to (smi) + // r7 (a.k.a. from): from offset (smi) __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset)); - __ cmp(r4, Operand(r7)); + __ cmp(r4, Operand(to)); __ b(lt, &runtime); // Fail if to > length. + to = no_reg; // r1: instance type. // r2: result string length. // r3: from index (untaged smi) // r5: string. - // r6: from offset (smi) + // r7 (a.k.a. from): from offset (smi) // Check for flat ascii string. Label non_ascii_flat; __ tst(r1, Operand(kStringEncodingMask)); @@ -4259,12 +4307,12 @@ void SubStringStub::Generate(MacroAssembler* masm) { // r0: result string. // r2: result string length. // r5: string. - // r6: from offset (smi) + // r7 (a.k.a. from): from offset (smi) // Locate first character of result. __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // Locate 'from' character of string. __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - __ add(r5, r5, Operand(r6, ASR, 1)); + __ add(r5, r5, Operand(from, ASR, 1)); // r0: result string. // r1: first character of result string. @@ -4280,7 +4328,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ bind(&non_ascii_flat); // r2: result string length. // r5: string. - // r6: from offset (smi) + // r7 (a.k.a. from): from offset (smi) // Check for flat two byte string. // Allocate the result. @@ -4292,18 +4340,19 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Locate first character of result. __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); // Locate 'from' character of string. - __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); // As "from" is a smi it is 2 times the value which matches the size of a two // byte character. - __ add(r5, r5, Operand(r6)); + __ add(r5, r5, Operand(from)); + from = no_reg; // r0: result string. // r1: first character of result. // r2: result length. // r5: first character of string to copy. STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); - StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, - DEST_ALWAYS_ALIGNED); + StringHelper::GenerateCopyCharactersLong( + masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED); __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); __ add(sp, sp, Operand(3 * kPointerSize)); __ Ret(); @@ -4379,8 +4428,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { // Stack frame on entry. // sp[0]: right string // sp[4]: left string - __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left - __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right + __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1. Label not_same; __ cmp(r0, r1); @@ -4395,12 +4443,12 @@ void StringCompareStub::Generate(MacroAssembler* masm) { __ bind(¬_same); // Check that both objects are sequential ascii strings. - __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime); + __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime); // Compare flat ascii strings natively. Remove arguments from stack first. __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); - GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5); + GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5); // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index f985fb4..6ba166f 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -1651,7 +1651,7 @@ void CodeGenerator::Comparison(Condition cc, // Perform non-smi comparison by stub. // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0. // We call with 0 args because there are 0 on the stack. - CompareStub stub(cc, strict, kBothCouldBeNaN, true, lhs, rhs); + CompareStub stub(cc, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs); frame_->CallStub(&stub, 0); __ cmp(r0, Operand(0, RelocInfo::NONE)); exit.Jump(); @@ -5985,6 +5985,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { GenericUnaryOpStub stub( Token::SUB, overwrite, + NO_UNARY_FLAGS, no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero); frame_->CallStub(&stub, 0); frame_->EmitPush(r0); // r0 has result @@ -6009,7 +6010,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { not_smi_label.Bind(); frame_->SpillAll(); __ Move(r0, tos); - GenericUnaryOpStub stub(Token::BIT_NOT, overwrite); + GenericUnaryOpStub stub(Token::BIT_NOT, + overwrite, + NO_UNARY_SMI_CODE_IN_STUB); frame_->CallStub(&stub, 0); frame_->EmitPush(r0); diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 162d97f..d4c3522 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -271,10 +271,6 @@ class CodeGenerator: public AstVisitor { void AddDeferred(DeferredCode* code) { deferred_.Add(code); } - // If the name is an inline runtime function call return the number of - // expected arguments. Otherwise return -1. - static int InlineRuntimeCallArgumentsCount(Handle name); - // Constants related to patching of inlined load/store. static int GetInlinedKeyedLoadInstructionsAfterPatch() { return FLAG_debug_code ? 32 : 13; @@ -290,6 +286,12 @@ class CodeGenerator: public AstVisitor { } private: + // Type of a member function that generates inline code for a native function. + typedef void (CodeGenerator::*InlineFunctionGenerator) + (ZoneList*); + + static const InlineFunctionGenerator kInlineFunctionGenerators[]; + // Construction/Destruction explicit CodeGenerator(MacroAssembler* masm); @@ -447,13 +449,9 @@ class CodeGenerator: public AstVisitor { void Branch(bool if_true, JumpTarget* target); void CheckStack(); - struct InlineRuntimeLUT { - void (CodeGenerator::*method)(ZoneList*); - const char* name; - int nargs; - }; + static InlineFunctionGenerator FindInlineFunctionGenerator( + Runtime::FunctionId function_id); - static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle name); bool CheckForInlineRuntimeCall(CallRuntime* node); static Handle ComputeLazyCompile(int argc); @@ -599,8 +597,6 @@ class CodeGenerator: public AstVisitor { // Size of inlined write barriers generated by EmitNamedStore. static int inlined_write_barrier_size_; - static InlineRuntimeLUT kInlineRuntimeLUT[]; - friend class VirtualFrame; friend class JumpTarget; friend class Reference; diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index f32da6d..c776d67 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -493,7 +493,7 @@ MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) { int context_chain_length = scope()->ContextChainLength(slot->var()->scope()); __ LoadContext(scratch, context_chain_length); - return CodeGenerator::ContextOperand(scratch, slot->index()); + return ContextOperand(scratch, slot->index()); } case Slot::LOOKUP: UNREACHABLE(); @@ -557,19 +557,17 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); if (FLAG_debug_code) { // Check if we have the correct context pointer. - __ ldr(r1, - CodeGenerator::ContextOperand(cp, Context::FCONTEXT_INDEX)); + __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX)); __ cmp(r1, cp); __ Check(eq, "Unexpected declaration in current context."); } if (mode == Variable::CONST) { __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ str(ip, CodeGenerator::ContextOperand(cp, slot->index())); + __ str(ip, ContextOperand(cp, slot->index())); // No write barrier since the_hole_value is in old space. } else if (function != NULL) { VisitForValue(function, kAccumulator); - __ str(result_register(), - CodeGenerator::ContextOperand(cp, slot->index())); + __ str(result_register(), ContextOperand(cp, slot->index())); int offset = Context::SlotOffset(slot->index()); // We know that we have written a function, which is not a smi. __ mov(r1, Operand(cp)); @@ -674,7 +672,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { // Perform the comparison as if via '==='. __ ldr(r1, MemOperand(sp, 0)); // Switch value. - if (ShouldInlineSmiCase(Token::EQ_STRICT)) { + bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT); + if (inline_smi_code) { Label slow_case; __ orr(r2, r1, r0); __ tst(r2, Operand(kSmiTagMask)); @@ -686,7 +685,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { __ bind(&slow_case); } - CompareStub stub(eq, true, kBothCouldBeNaN, true, r1, r0); + CompareFlags flags = inline_smi_code + ? NO_SMI_COMPARE_IN_STUB + : NO_COMPARE_FLAGS; + CompareStub stub(eq, true, flags, r1, r0); __ CallStub(&stub); __ cmp(r0, Operand(0, RelocInfo::NONE)); __ b(ne, &next_test); @@ -746,11 +748,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ bind(&done_convert); __ push(r0); - // TODO(kasperl): Check cache validity in generated code. This is a - // fast case for the JSObject::IsSimpleEnum cache validity - // checks. If we cannot guarantee cache validity, call the runtime - // system to check cache validity or get the property names in a - // fixed array. + // BUG(867): Check cache validity in generated code. This is a fast + // case for the JSObject::IsSimpleEnum cache validity checks. If we + // cannot guarantee cache validity, call the runtime system to check + // cache validity or get the property names in a fixed array. // Get the set of properties to enumerate. __ push(r0); // Duplicate the enumerable object on the stack. @@ -881,6 +882,150 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) { } +MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions( + Slot* slot, + Label* slow) { + ASSERT(slot->type() == Slot::CONTEXT); + Register current = cp; + Register next = r3; + Register temp = r4; + + for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) { + if (s->num_heap_slots() > 0) { + if (s->calls_eval()) { + // Check that extension is NULL. + __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX)); + __ tst(temp, temp); + __ b(ne, slow); + } + __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX)); + __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset)); + // Walk the rest of the chain without clobbering cp. + current = next; + } + } + // Check that last extension is NULL. + __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX)); + __ tst(temp, temp); + __ b(ne, slow); + __ ldr(temp, ContextOperand(current, Context::FCONTEXT_INDEX)); + return ContextOperand(temp, slot->index()); +} + + +void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase( + Slot* slot, + TypeofState typeof_state, + Label* slow, + Label* done) { + // Generate fast-case code for variables that might be shadowed by + // eval-introduced variables. Eval is used a lot without + // introducing variables. In those cases, we do not want to + // perform a runtime call for all variables in the scope + // containing the eval. + if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) { + EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow); + __ jmp(done); + } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { + Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); + Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite(); + if (potential_slot != NULL) { + // Generate fast case for locals that rewrite to slots. + __ ldr(r0, ContextSlotOperandCheckExtensions(potential_slot, slow)); + if (potential_slot->var()->mode() == Variable::CONST) { + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r0, ip); + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); + } + __ jmp(done); + } else if (rewrite != NULL) { + // Generate fast case for calls of an argument function. + Property* property = rewrite->AsProperty(); + if (property != NULL) { + VariableProxy* obj_proxy = property->obj()->AsVariableProxy(); + Literal* key_literal = property->key()->AsLiteral(); + if (obj_proxy != NULL && + key_literal != NULL && + obj_proxy->IsArguments() && + key_literal->handle()->IsSmi()) { + // Load arguments object if there are no eval-introduced + // variables. Then load the argument from the arguments + // object using keyed load. + __ ldr(r1, + ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(), + slow)); + __ mov(r0, Operand(key_literal->handle())); + Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + __ Call(ic, RelocInfo::CODE_TARGET); + __ jmp(done); + } + } + } + } +} + + +void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions( + Slot* slot, + TypeofState typeof_state, + Label* slow) { + Register current = cp; + Register next = r1; + Register temp = r2; + + Scope* s = scope(); + while (s != NULL) { + if (s->num_heap_slots() > 0) { + if (s->calls_eval()) { + // Check that extension is NULL. + __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX)); + __ tst(temp, temp); + __ b(ne, slow); + } + // Load next context in chain. + __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX)); + __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset)); + // Walk the rest of the chain without clobbering cp. + current = next; + } + // If no outer scope calls eval, we do not need to check more + // context extensions. + if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break; + s = s->outer_scope(); + } + + if (s->is_eval_scope()) { + Label loop, fast; + if (!current.is(next)) { + __ Move(next, current); + } + __ bind(&loop); + // Terminate at global context. + __ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex); + __ cmp(temp, ip); + __ b(eq, &fast); + // Check that extension is NULL. + __ ldr(temp, ContextOperand(next, Context::EXTENSION_INDEX)); + __ tst(temp, temp); + __ b(ne, slow); + // Load next context in chain. + __ ldr(next, ContextOperand(next, Context::CLOSURE_INDEX)); + __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset)); + __ b(&loop); + __ bind(&fast); + } + + __ ldr(r0, CodeGenerator::GlobalObject()); + __ mov(r2, Operand(slot->var()->name())); + RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) + ? RelocInfo::CODE_TARGET + : RelocInfo::CODE_TARGET_CONTEXT; + Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize)); + __ Call(ic, mode); +} + + void FullCodeGenerator::EmitVariableLoad(Variable* var, Expression::Context context) { // Four cases: non-this global variables, lookup slots, all other @@ -900,10 +1045,19 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var, Apply(context, r0); } else if (slot != NULL && slot->type() == Slot::LOOKUP) { + Label done, slow; + + // Generate code for loading from variables potentially shadowed + // by eval-introduced variables. + EmitDynamicLoadFromSlotFastCase(slot, NOT_INSIDE_TYPEOF, &slow, &done); + + __ bind(&slow); Comment cmnt(masm_, "Lookup slot"); __ mov(r1, Operand(var->name())); __ Push(cp, r1); // Context and name. __ CallRuntime(Runtime::kLoadContextSlot, 2); + __ bind(&done); + Apply(context, r0); } else if (slot != NULL) { @@ -913,14 +1067,11 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var, if (var->mode() == Variable::CONST) { // Constants may be the hole value if they have not been initialized. // Unhole them. - Label done; MemOperand slot_operand = EmitSlotSearch(slot, r0); __ ldr(r0, slot_operand); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ cmp(r0, ip); - __ b(ne, &done); - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); - __ bind(&done); + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); Apply(context, r0); } else { Apply(context, slot); @@ -1647,15 +1798,41 @@ void FullCodeGenerator::VisitCall(Call* expr) { EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT); } else if (var != NULL && var->slot() != NULL && var->slot()->type() == Slot::LOOKUP) { - // Call to a lookup slot (dynamically introduced variable). Call the - // runtime to find the function to call (returned in eax) and the object - // holding it (returned in edx). + // Call to a lookup slot (dynamically introduced variable). + Label slow, done; + + // Generate code for loading from variables potentially shadowed + // by eval-introduced variables. + EmitDynamicLoadFromSlotFastCase(var->slot(), + NOT_INSIDE_TYPEOF, + &slow, + &done); + + __ bind(&slow); + // Call the runtime to find the function to call (returned in eax) + // and the object holding it (returned in edx). __ push(context_register()); __ mov(r2, Operand(var->name())); __ push(r2); __ CallRuntime(Runtime::kLoadContextSlot, 2); - __ push(r0); // Function. - __ push(r1); // Receiver. + __ Push(r0, r1); // Function, receiver. + + // If fast case code has been generated, emit code to push the + // function and receiver and have the slow path jump around this + // code. + if (done.is_linked()) { + Label call; + __ b(&call); + __ bind(&done); + // Push function. + __ push(r0); + // Push global receiver. + __ ldr(r1, CodeGenerator::GlobalObject()); + __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); + __ push(r1); + __ bind(&call); + } + EmitCallWithStub(expr); } else if (fun->AsProperty() != NULL) { // Call to an object property. @@ -1678,12 +1855,9 @@ void FullCodeGenerator::VisitCall(Call* expr) { Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); - // Push result (function). - __ push(r0); - // Push Global receiver. __ ldr(r1, CodeGenerator::GlobalObject()); __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); - __ push(r1); + __ Push(r0, r1); // Function, receiver. EmitCallWithStub(expr); } else { EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET); @@ -2464,11 +2638,9 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList* args) { Register key = r0; Register cache = r1; - __ ldr(cache, CodeGenerator::ContextOperand(cp, Context::GLOBAL_INDEX)); + __ ldr(cache, ContextOperand(cp, Context::GLOBAL_INDEX)); __ ldr(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset)); - __ ldr(cache, - CodeGenerator::ContextOperand( - cache, Context::JSFUNCTION_RESULT_CACHES_INDEX)); + __ ldr(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX)); __ ldr(cache, FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id))); @@ -2720,7 +2892,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { bool can_overwrite = expr->expression()->ResultOverwriteAllowed(); UnaryOverwriteMode overwrite = can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; - GenericUnaryOpStub stub(Token::SUB, overwrite); + GenericUnaryOpStub stub(Token::SUB, + overwrite, + NO_UNARY_FLAGS); // GenericUnaryOpStub expects the argument to be in the // accumulator register r0. VisitForValue(expr->expression(), kAccumulator); @@ -2735,7 +2909,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { // in the accumulator register r0. VisitForValue(expr->expression(), kAccumulator); Label done; - if (ShouldInlineSmiCase(expr->op())) { + bool inline_smi_code = ShouldInlineSmiCase(expr->op()); + if (inline_smi_code) { Label call_stub; __ BranchOnNotSmi(r0, &call_stub); __ mvn(r0, Operand(r0)); @@ -2745,9 +2920,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { __ bind(&call_stub); } bool overwrite = expr->expression()->ResultOverwriteAllowed(); + UnaryOpFlags flags = inline_smi_code + ? NO_UNARY_SMI_CODE_IN_STUB + : NO_UNARY_FLAGS; UnaryOverwriteMode mode = overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; - GenericUnaryOpStub stub(Token::BIT_NOT, mode); + GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags); __ CallStub(&stub); __ bind(&done); Apply(context_, r0); @@ -2929,9 +3107,19 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) { } else if (proxy != NULL && proxy->var()->slot() != NULL && proxy->var()->slot()->type() == Slot::LOOKUP) { + Label done, slow; + + // Generate code for loading from variables potentially shadowed + // by eval-introduced variables. + Slot* slot = proxy->var()->slot(); + EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done); + + __ bind(&slow); __ mov(r0, Operand(proxy->name())); __ Push(cp, r0); __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); + __ bind(&done); + if (where == kStack) __ push(r0); } else { // This expression cannot throw a reference error at the top level. @@ -3114,7 +3302,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { UNREACHABLE(); } - if (ShouldInlineSmiCase(op)) { + bool inline_smi_code = ShouldInlineSmiCase(op); + if (inline_smi_code) { Label slow_case; __ orr(r2, r0, Operand(r1)); __ BranchOnNotSmi(r2, &slow_case); @@ -3122,8 +3311,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { Split(cc, if_true, if_false, NULL); __ bind(&slow_case); } - - CompareStub stub(cc, strict, kBothCouldBeNaN, true, r1, r0); + CompareFlags flags = inline_smi_code + ? NO_SMI_COMPARE_IN_STUB + : NO_COMPARE_FLAGS; + CompareStub stub(cc, strict, flags, r1, r0); __ CallStub(&stub); __ cmp(r0, Operand(0, RelocInfo::NONE)); Split(cc, if_true, if_false, fall_through); @@ -3187,7 +3378,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) { void FullCodeGenerator::LoadContextField(Register dst, int context_index) { - __ ldr(dst, CodeGenerator::ContextOperand(cp, context_index)); + __ ldr(dst, ContextOperand(cp, context_index)); } diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 3554431..0e2c49e 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -1242,15 +1242,6 @@ void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { } -void MacroAssembler::StubReturn(int argc, Condition cond) { - ASSERT(argc >= 1 && generating_stub()); - if (argc > 1) { - add(sp, sp, Operand((argc - 1) * kPointerSize), LeaveCC, cond); - } - Ret(cond); -} - - void MacroAssembler::IllegalOperation(int num_arguments) { if (num_arguments > 0) { add(sp, sp, Operand(num_arguments * kPointerSize)); diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index febd87e..48a8059 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -531,9 +531,6 @@ class MacroAssembler: public Assembler { // Call a code stub. void TailCallStub(CodeStub* stub, Condition cond = al); - // Return from a code stub after popping its arguments. - void StubReturn(int argc, Condition cond = al); - // Call a runtime routine. void CallRuntime(Runtime::Function* f, int num_arguments); diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 344cb6f..0da5f64 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -1220,6 +1220,62 @@ void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) { } +void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object, + JSObject* holder, + String* name, + Label* miss) { + ASSERT(holder->IsGlobalObject()); + + // Get the number of arguments. + const int argc = arguments().immediate(); + + // Get the receiver from the stack. + __ ldr(r0, MemOperand(sp, argc * kPointerSize)); + + // If the object is the holder then we know that it's a global + // object which can only happen for contextual calls. In this case, + // the receiver cannot be a smi. + if (object != holder) { + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, miss); + } + + // Check that the maps haven't changed. + CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss); +} + + +void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell, + JSFunction* function, + Label* miss) { + // Get the value from the cell. + __ mov(r3, Operand(Handle(cell))); + __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset)); + + // Check that the cell contains the same function. + if (Heap::InNewSpace(function)) { + // We can't embed a pointer to a function in new space so we have + // to verify that the shared function info is unchanged. This has + // the nice side effect that multiple closures based on the same + // function can all use this call IC. Before we load through the + // function, we have to verify that it still is a function. + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, miss); + __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); + __ b(ne, miss); + + // Check the shared function info. Make sure it hasn't changed. + __ Move(r3, Handle(function->shared())); + __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + __ cmp(r4, r3); + __ b(ne, miss); + } else { + __ cmp(r1, Operand(Handle(function))); + __ b(ne, miss); + } +} + + Object* CallStubCompiler::GenerateMissBranch() { Object* obj = StubCache::ComputeCallMiss(arguments().immediate(), kind_); if (obj->IsFailure()) return obj; @@ -1266,21 +1322,18 @@ Object* CallStubCompiler::CompileCallField(JSObject* object, Object* CallStubCompiler::CompileArrayPushCall(Object* object, JSObject* holder, + JSGlobalPropertyCell* cell, JSFunction* function, - String* name, - CheckType check) { + String* name) { // ----------- S t a t e ------------- // -- r2 : name // -- lr : return address // ----------------------------------- - // If object is not an array, bail out to regular call. - if (!object->IsJSArray()) { - return Heap::undefined_value(); - } - // TODO(639): faster implementation. - ASSERT(check == RECEIVER_MAP_CHECK); + + // If object is not an array, bail out to regular call. + if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value(); Label miss; @@ -1313,21 +1366,18 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, Object* CallStubCompiler::CompileArrayPopCall(Object* object, JSObject* holder, + JSGlobalPropertyCell* cell, JSFunction* function, - String* name, - CheckType check) { + String* name) { // ----------- S t a t e ------------- // -- r2 : name // -- lr : return address // ----------------------------------- - // If object is not an array, bail out to regular call. - if (!object->IsJSArray()) { - return Heap::undefined_value(); - } - // TODO(642): faster implementation. - ASSERT(check == RECEIVER_MAP_CHECK); + + // If object is not an array, bail out to regular call. + if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value(); Label miss; @@ -1358,11 +1408,12 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, } -Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object, - JSObject* holder, - JSFunction* function, - String* name, - CheckType check) { +Object* CallStubCompiler::CompileStringCharCodeAtCall( + Object* object, + JSObject* holder, + JSGlobalPropertyCell* cell, + JSFunction* function, + String* name) { // ----------- S t a t e ------------- // -- r2 : function name // -- lr : return address @@ -1372,7 +1423,7 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object, // ----------------------------------- // If object is not a string, bail out to regular call. - if (!object->IsString()) return Heap::undefined_value(); + if (!object->IsString() || cell != NULL) return Heap::undefined_value(); const int argc = arguments().immediate(); @@ -1430,9 +1481,9 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object, Object* CallStubCompiler::CompileStringCharAtCall(Object* object, JSObject* holder, + JSGlobalPropertyCell* cell, JSFunction* function, - String* name, - CheckType check) { + String* name) { // ----------- S t a t e ------------- // -- r2 : function name // -- lr : return address @@ -1442,7 +1493,7 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object, // ----------------------------------- // If object is not a string, bail out to regular call. - if (!object->IsString()) return Heap::undefined_value(); + if (!object->IsString() || cell != NULL) return Heap::undefined_value(); const int argc = arguments().immediate(); @@ -1501,6 +1552,80 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object, } +Object* CallStubCompiler::CompileStringFromCharCodeCall( + Object* object, + JSObject* holder, + JSGlobalPropertyCell* cell, + JSFunction* function, + String* name) { + // ----------- S t a t e ------------- + // -- r2 : function name + // -- lr : return address + // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based) + // -- ... + // -- sp[argc * 4] : receiver + // ----------------------------------- + + const int argc = arguments().immediate(); + + // If the object is not a JSObject or we got an unexpected number of + // arguments, bail out to the regular call. + if (!object->IsJSObject() || argc != 1) return Heap::undefined_value(); + + Label miss; + GenerateNameCheck(name, &miss); + + if (cell == NULL) { + __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); + + STATIC_ASSERT(kSmiTag == 0); + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &miss); + + CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name, + &miss); + } else { + ASSERT(cell->value() == function); + GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss); + GenerateLoadFunctionFromCell(cell, function, &miss); + } + + // Load the char code argument. + Register code = r1; + __ ldr(code, MemOperand(sp, 0 * kPointerSize)); + + // Check the code is a smi. + Label slow; + STATIC_ASSERT(kSmiTag == 0); + __ tst(code, Operand(kSmiTagMask)); + __ b(ne, &slow); + + // Convert the smi code to uint16. + __ and_(code, code, Operand(Smi::FromInt(0xffff))); + + StringCharFromCodeGenerator char_from_code_generator(code, r0); + char_from_code_generator.GenerateFast(masm()); + __ Drop(argc + 1); + __ Ret(); + + ICRuntimeCallHelper call_helper; + char_from_code_generator.GenerateSlow(masm(), call_helper); + + // Tail call the full function. We do not have to patch the receiver + // because the function makes no use of it. + __ bind(&slow); + __ InvokeFunction(function, arguments(), JUMP_FUNCTION); + + __ bind(&miss); + // r2: function name. + Object* obj = GenerateMissBranch(); + if (obj->IsFailure()) return obj; + + // Return the generated code. + return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name); +} + + Object* CallStubCompiler::CompileCallConstant(Object* object, JSObject* holder, JSFunction* function, @@ -1513,8 +1638,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, SharedFunctionInfo* function_info = function->shared(); if (function_info->HasCustomCallGenerator()) { const int id = function_info->custom_call_generator_id(); - Object* result = - CompileCustomCall(id, object, holder, function, name, check); + Object* result = CompileCustomCall( + id, object, holder, NULL, function, name); // undefined means bail out to regular compiler. if (!result->IsUndefined()) { return result; @@ -1714,6 +1839,16 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, // -- r2 : name // -- lr : return address // ----------------------------------- + + SharedFunctionInfo* function_info = function->shared(); + if (function_info->HasCustomCallGenerator()) { + const int id = function_info->custom_call_generator_id(); + Object* result = CompileCustomCall( + id, object, holder, cell, function, name); + // undefined means bail out to regular compiler. + if (!result->IsUndefined()) return result; + } + Label miss; GenerateNameCheck(name, &miss); @@ -1721,45 +1856,9 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, // Get the number of arguments. const int argc = arguments().immediate(); - // Get the receiver from the stack. - __ ldr(r0, MemOperand(sp, argc * kPointerSize)); - - // If the object is the holder then we know that it's a global - // object which can only happen for contextual calls. In this case, - // the receiver cannot be a smi. - if (object != holder) { - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, &miss); - } - - // Check that the maps haven't changed. - CheckPrototypes(object, r0, holder, r3, r1, r4, name, &miss); - - // Get the value from the cell. - __ mov(r3, Operand(Handle(cell))); - __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset)); - - // Check that the cell contains the same function. - if (Heap::InNewSpace(function)) { - // We can't embed a pointer to a function in new space so we have - // to verify that the shared function info is unchanged. This has - // the nice side effect that multiple closures based on the same - // function can all use this call IC. Before we load through the - // function, we have to verify that it still is a function. - __ tst(r1, Operand(kSmiTagMask)); - __ b(eq, &miss); - __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); - __ b(ne, &miss); + GenerateGlobalReceiverCheck(object, holder, name, &miss); - // Check the shared function info. Make sure it hasn't changed. - __ mov(r3, Operand(Handle(function->shared()))); - __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - __ cmp(r4, r3); - __ b(ne, &miss); - } else { - __ cmp(r1, Operand(Handle(function))); - __ b(ne, &miss); - } + GenerateLoadFunctionFromCell(cell, function, &miss); // Patch the receiver on the stack with the global proxy if // necessary. diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index e12df64..b2ebece 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -957,14 +957,41 @@ function ArrayIndexOf(element, index) { // If index is still negative, search the entire array. if (index < 0) index = 0; } + var min = index; + var max = length; + if (UseSparseVariant(this, length, true)) { + var intervals = %GetArrayKeys(this, length); + if (intervals.length == 2 && intervals[0] < 0) { + // A single interval. + var intervalMin = -(intervals[0] + 1); + var intervalMax = intervalMin + intervals[1]; + min = MAX(min, intervalMin); + max = intervalMax; // Capped by length already. + // Fall through to loop below. + } else { + if (intervals.length == 0) return -1; + // Get all the keys in sorted order. + var sortedKeys = GetSortedArrayKeys(this, intervals); + var n = sortedKeys.length; + var i = 0; + while (i < n && sortedKeys[i] < index) i++; + while (i < n) { + var key = sortedKeys[i]; + if (!IS_UNDEFINED(key) && this[key] === element) return key; + i++; + } + return -1; + } + } // Lookup through the array. if (!IS_UNDEFINED(element)) { - for (var i = index; i < length; i++) { + for (var i = min; i < max; i++) { if (this[i] === element) return i; } return -1; } - for (var i = index; i < length; i++) { + // Lookup through the array. + for (var i = min; i < max; i++) { if (IS_UNDEFINED(this[i]) && i in this) { return i; } @@ -981,19 +1008,43 @@ function ArrayLastIndexOf(element, index) { } else { index = TO_INTEGER(index); // If index is negative, index from end of the array. - if (index < 0) index = length + index; + if (index < 0) index += length; // If index is still negative, do not search the array. - if (index < 0) index = -1; + if (index < 0) return -1; else if (index >= length) index = length - 1; } + var min = 0; + var max = index; + if (UseSparseVariant(this, length, true)) { + var intervals = %GetArrayKeys(this, index + 1); + if (intervals.length == 2 && intervals[0] < 0) { + // A single interval. + var intervalMin = -(intervals[0] + 1); + var intervalMax = intervalMin + intervals[1]; + min = MAX(min, intervalMin); + max = intervalMax; // Capped by index already. + // Fall through to loop below. + } else { + if (intervals.length == 0) return -1; + // Get all the keys in sorted order. + var sortedKeys = GetSortedArrayKeys(this, intervals); + var i = sortedKeys.length - 1; + while (i >= 0) { + var key = sortedKeys[i]; + if (!IS_UNDEFINED(key) && this[key] === element) return key; + i--; + } + return -1; + } + } // Lookup through the array. if (!IS_UNDEFINED(element)) { - for (var i = index; i >= 0; i--) { + for (var i = max; i >= min; i--) { if (this[i] === element) return i; } return -1; } - for (var i = index; i >= 0; i--) { + for (var i = max; i >= min; i--) { if (IS_UNDEFINED(this[i]) && i in this) { return i; } diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index a82d1d6..6e6c2c6 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -1344,23 +1344,33 @@ bool Genesis::InstallNatives() { } -static void InstallCustomCallGenerator(Handle holder_function, - const char* function_name, - int id) { - Handle proto(JSObject::cast(holder_function->instance_prototype())); +static void InstallCustomCallGenerator( + Handle holder_function, + CallStubCompiler::CustomGeneratorOwner owner_flag, + const char* function_name, + int id) { + Handle owner; + if (owner_flag == CallStubCompiler::FUNCTION) { + owner = Handle::cast(holder_function); + } else { + ASSERT(owner_flag == CallStubCompiler::INSTANCE_PROTOTYPE); + owner = Handle( + JSObject::cast(holder_function->instance_prototype())); + } Handle name = Factory::LookupAsciiSymbol(function_name); - Handle function(JSFunction::cast(proto->GetProperty(*name))); + Handle function(JSFunction::cast(owner->GetProperty(*name))); function->shared()->set_function_data(Smi::FromInt(id)); } void Genesis::InstallCustomCallGenerators() { HandleScope scope; -#define INSTALL_CALL_GENERATOR(holder_fun, fun_name, name) \ +#define INSTALL_CALL_GENERATOR(holder_fun, owner_flag, fun_name, name) \ { \ Handle holder(global_context()->holder_fun##_function()); \ const int id = CallStubCompiler::k##name##CallGenerator; \ - InstallCustomCallGenerator(holder, #fun_name, id); \ + InstallCustomCallGenerator(holder, CallStubCompiler::owner_flag, \ + #fun_name, id); \ } CUSTOM_CALL_IC_GENERATORS(INSTALL_CALL_GENERATOR) #undef INSTALL_CALL_GENERATOR diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index 98a5cf6..912d43d 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -340,27 +340,40 @@ enum NegativeZeroHandling { }; +enum UnaryOpFlags { + NO_UNARY_FLAGS = 0, + NO_UNARY_SMI_CODE_IN_STUB = 1 << 0 +}; + + class GenericUnaryOpStub : public CodeStub { public: GenericUnaryOpStub(Token::Value op, UnaryOverwriteMode overwrite, + UnaryOpFlags flags, NegativeZeroHandling negative_zero = kStrictNegativeZero) - : op_(op), overwrite_(overwrite), negative_zero_(negative_zero) { } + : op_(op), + overwrite_(overwrite), + include_smi_code_((flags & NO_UNARY_SMI_CODE_IN_STUB) == 0), + negative_zero_(negative_zero) { } private: Token::Value op_; UnaryOverwriteMode overwrite_; + bool include_smi_code_; NegativeZeroHandling negative_zero_; class OverwriteField: public BitField {}; - class NegativeZeroField: public BitField {}; - class OpField: public BitField {}; + class IncludeSmiCodeField: public BitField {}; + class NegativeZeroField: public BitField {}; + class OpField: public BitField {}; Major MajorKey() { return GenericUnaryOp; } int MinorKey() { return OpField::encode(op_) | - OverwriteField::encode(overwrite_) | - NegativeZeroField::encode(negative_zero_); + OverwriteField::encode(overwrite_) | + IncludeSmiCodeField::encode(include_smi_code_) | + NegativeZeroField::encode(negative_zero_); } void Generate(MacroAssembler* masm); @@ -375,22 +388,43 @@ enum NaNInformation { }; +// Flags that control the compare stub code generation. +enum CompareFlags { + NO_COMPARE_FLAGS = 0, + NO_SMI_COMPARE_IN_STUB = 1 << 0, + NO_NUMBER_COMPARE_IN_STUB = 1 << 1, + CANT_BOTH_BE_NAN = 1 << 2 +}; + + class CompareStub: public CodeStub { public: CompareStub(Condition cc, bool strict, - NaNInformation nan_info = kBothCouldBeNaN, - bool include_number_compare = true, - Register lhs = no_reg, - Register rhs = no_reg) : + CompareFlags flags, + Register lhs, + Register rhs) : cc_(cc), strict_(strict), - never_nan_nan_(nan_info == kCantBothBeNaN), - include_number_compare_(include_number_compare), + never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0), + include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0), + include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0), lhs_(lhs), rhs_(rhs), name_(NULL) { } + CompareStub(Condition cc, + bool strict, + CompareFlags flags) : + cc_(cc), + strict_(strict), + never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0), + include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0), + include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0), + lhs_(no_reg), + rhs_(no_reg), + name_(NULL) { } + void Generate(MacroAssembler* masm); private: @@ -406,6 +440,10 @@ class CompareStub: public CodeStub { // comparison code is used when the number comparison has been inlined, and // the stub will be called if one of the operands is not a number. bool include_number_compare_; + + // Generate the comparison code for two smi operands in the stub. + bool include_smi_compare_; + // Register holding the left hand side of the comparison if the stub gives // a choice, no_reg otherwise. Register lhs_; @@ -413,12 +451,13 @@ class CompareStub: public CodeStub { // a choice, no_reg otherwise. Register rhs_; - // Encoding of the minor key CCCCCCCCCCCCRCNS. + // Encoding of the minor key in 16 bits. class StrictField: public BitField {}; class NeverNanNanField: public BitField {}; class IncludeNumberCompareField: public BitField {}; - class RegisterField: public BitField {}; - class ConditionField: public BitField {}; + class IncludeSmiCompareField: public BitField {}; + class RegisterField: public BitField {}; + class ConditionField: public BitField {}; Major MajorKey() { return Compare; } @@ -436,11 +475,13 @@ class CompareStub: public CodeStub { const char* GetName(); #ifdef DEBUG void Print() { - PrintF("CompareStub (cc %d), (strict %s), " - "(never_nan_nan %s), (number_compare %s) ", + PrintF("CompareStub (minor %d) (cc %d), (strict %s), " + "(never_nan_nan %s), (smi_compare %s) (number_compare %s) ", + MinorKey(), static_cast(cc_), strict_ ? "true" : "false", never_nan_nan_ ? "true" : "false", + include_smi_compare_ ? "inluded" : "not included", include_number_compare_ ? "included" : "not included"); if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) { diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index 148cefc..daf1c0d 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -344,40 +344,35 @@ void CodeGenerator::VisitIncrementOperation(IncrementOperation* expr) { } -// List of special runtime calls which are generated inline. For some of these -// functions the code will be generated inline, and for others a call to a code -// stub will be inlined. +// Lookup table for code generators for special runtime calls which are +// generated inline. +#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \ + &CodeGenerator::Generate##Name, -#define INLINE_RUNTIME_ENTRY(Name, argc, ressize) \ - {&CodeGenerator::Generate##Name, "_" #Name, argc}, \ - -CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = { - INLINE_RUNTIME_FUNCTION_LIST(INLINE_RUNTIME_ENTRY) +const CodeGenerator::InlineFunctionGenerator + CodeGenerator::kInlineFunctionGenerators[] = { + INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS) + INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS) }; +#undef INLINE_FUNCTION_GENERATOR_ADDRESS -#undef INLINE_RUNTIME_ENTRY -CodeGenerator::InlineRuntimeLUT* CodeGenerator::FindInlineRuntimeLUT( - Handle name) { - const int entries_count = - sizeof(kInlineRuntimeLUT) / sizeof(InlineRuntimeLUT); - for (int i = 0; i < entries_count; i++) { - InlineRuntimeLUT* entry = &kInlineRuntimeLUT[i]; - if (name->IsEqualTo(CStrVector(entry->name))) { - return entry; - } - } - return NULL; +CodeGenerator::InlineFunctionGenerator + CodeGenerator::FindInlineFunctionGenerator(Runtime::FunctionId id) { + return kInlineFunctionGenerators[ + static_cast(id) - static_cast(Runtime::kFirstInlineFunction)]; } bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) { ZoneList* args = node->arguments(); Handle name = node->name(); - if (name->length() > 0 && name->Get(0) == '_') { - InlineRuntimeLUT* entry = FindInlineRuntimeLUT(name); - if (entry != NULL) { - ((*this).*(entry->method))(args); + Runtime::Function* function = node->function(); + if (function != NULL && function->intrinsic_type == Runtime::INLINE) { + InlineFunctionGenerator generator = + FindInlineFunctionGenerator(function->function_id); + if (generator != NULL) { + ((*this).*(generator))(args); return true; } } @@ -385,14 +380,6 @@ bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) { } -int CodeGenerator::InlineRuntimeCallArgumentsCount(Handle name) { - CodeGenerator::InlineRuntimeLUT* f = - CodeGenerator::FindInlineRuntimeLUT(name); - if (f != NULL) return f->nargs; - return -1; -} - - // Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a // known result for the test expression, with no side effects. CodeGenerator::ConditionAnalysis CodeGenerator::AnalyzeCondition( diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index aa2d442..2a4d9d4 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -71,48 +71,6 @@ // CodeForDoWhileConditionPosition // CodeForSourcePosition - -#define INLINE_RUNTIME_FUNCTION_LIST(F) \ - F(IsSmi, 1, 1) \ - F(IsNonNegativeSmi, 1, 1) \ - F(IsArray, 1, 1) \ - F(IsRegExp, 1, 1) \ - F(CallFunction, -1 /* receiver + n args + function */, 1) \ - F(IsConstructCall, 0, 1) \ - F(ArgumentsLength, 0, 1) \ - F(Arguments, 1, 1) \ - F(ClassOf, 1, 1) \ - F(ValueOf, 1, 1) \ - F(SetValueOf, 2, 1) \ - F(StringCharCodeAt, 2, 1) \ - F(StringCharFromCode, 1, 1) \ - F(StringCharAt, 2, 1) \ - F(ObjectEquals, 2, 1) \ - F(Log, 3, 1) \ - F(RandomHeapNumber, 0, 1) \ - F(IsObject, 1, 1) \ - F(IsFunction, 1, 1) \ - F(IsUndetectableObject, 1, 1) \ - F(IsSpecObject, 1, 1) \ - F(IsStringWrapperSafeForDefaultValueOf, 1, 1) \ - F(StringAdd, 2, 1) \ - F(SubString, 3, 1) \ - F(StringCompare, 2, 1) \ - F(RegExpExec, 4, 1) \ - F(RegExpConstructResult, 3, 1) \ - F(RegExpCloneResult, 1, 1) \ - F(GetFromCache, 2, 1) \ - F(NumberToString, 1, 1) \ - F(SwapElements, 3, 1) \ - F(MathPow, 2, 1) \ - F(MathSin, 1, 1) \ - F(MathCos, 1, 1) \ - F(MathSqrt, 1, 1) \ - F(IsRegExpEquivalent, 2, 1) \ - F(HasCachedArrayIndex, 1, 1) \ - F(GetCachedArrayIndex, 1, 1) - - #if V8_TARGET_ARCH_IA32 #include "ia32/codegen-ia32.h" #elif V8_TARGET_ARCH_X64 diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index bf6d41d..f65f941 100755 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -269,10 +269,19 @@ Handle Compiler::Compile(Handle source, } if (result.is_null()) { - // No cache entry found. Do pre-parsing and compile the script. + // No cache entry found. Do pre-parsing, if it makes sense, and compile + // the script. + // Building preparse data that is only used immediately after is only a + // saving if we might skip building the AST for lazily compiled functions. + // I.e., preparse data isn't relevant when the lazy flag is off, and + // for small sources, odds are that there aren't many functions + // that would be compiled lazily anyway, so we skip the preparse step + // in that case too. ScriptDataImpl* pre_data = input_pre_data; - if (pre_data == NULL && source_length >= FLAG_min_preparse_length) { - pre_data = PreParse(source, NULL, extension); + if (pre_data == NULL + && FLAG_lazy + && source_length >= FLAG_min_preparse_length) { + pre_data = PartialPreParse(source, NULL, extension); } // Create a script object describing the script to be compiled. diff --git a/deps/v8/src/dateparser-inl.h b/deps/v8/src/dateparser-inl.h index be353a3..e52cc94 100644 --- a/deps/v8/src/dateparser-inl.h +++ b/deps/v8/src/dateparser-inl.h @@ -65,8 +65,10 @@ bool DateParser::Parse(Vector str, FixedArray* out) { tz.SetAbsoluteMinute(n); } else if (time.IsExpecting(n)) { time.AddFinal(n); - // Require end, white space or Z immediately after finalizing time. - if (!in.IsEnd() && !in.SkipWhiteSpace() && !in.Is('Z')) return false; + // Require end, white space, "Z", "+" or "-" immediately after + // finalizing time. + if (!in.IsEnd() && !in.SkipWhiteSpace() && !in.Is('Z') && + !in.IsAsciiSign()) return false; } else { if (!day.Add(n)) return false; in.Skip('-'); // Ignore suffix '-' for year, month, or day. diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index a63088d..263a2a4 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -174,6 +174,10 @@ DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature") DEFINE_int(max_stack_trace_source_length, 300, "maximum length of function source code printed in a stack trace.") +// full-codegen.cc +DEFINE_bool(always_inline_smi_code, false, + "always inline smi code in non-opt code") + // heap.cc DEFINE_int(max_new_space_size, 0, "max size of the new generation") DEFINE_int(max_old_space_size, 0, "max size of the old generation") diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc index 5ffebfb..a1c5ec3 100644 --- a/deps/v8/src/full-codegen.cc +++ b/deps/v8/src/full-codegen.cc @@ -298,6 +298,11 @@ Handle FullCodeGenerator::MakeCode(CompilationInfo* info) { } +MemOperand FullCodeGenerator::ContextOperand(Register context, int index) { + return CodeGenerator::ContextOperand(context, index); +} + + int FullCodeGenerator::SlotOffset(Slot* slot) { ASSERT(slot != NULL); // Offset is negative because higher indexes are at lower addresses. @@ -319,15 +324,11 @@ int FullCodeGenerator::SlotOffset(Slot* slot) { bool FullCodeGenerator::ShouldInlineSmiCase(Token::Value op) { - // TODO(kasperl): Once the compare stub allows leaving out the - // inlined smi case, we should get rid of this check. - if (Token::IsCompareOp(op)) return true; - // TODO(kasperl): Once the unary bit not stub allows leaving out - // the inlined smi case, we should get rid of this check. - if (op == Token::BIT_NOT) return true; // Inline smi case inside loops, but not division and modulo which // are too complicated and take up too much space. - return (op != Token::DIV) && (op != Token::MOD) && (loop_depth_ > 0); + if (op == Token::DIV ||op == Token::MOD) return false; + if (FLAG_always_inline_smi_code) return true; + return loop_depth_ > 0; } @@ -500,18 +501,36 @@ void FullCodeGenerator::SetSourcePosition(int pos) { } -void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) { - Handle name = expr->name(); - SmartPointer cstring = name->ToCString(); +// Lookup table for code generators for special runtime calls which are +// generated inline. +#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \ + &FullCodeGenerator::Emit##Name, -#define CHECK_EMIT_INLINE_CALL(name, x, y) \ - if (strcmp("_"#name, *cstring) == 0) { \ - Emit##name(expr->arguments()); \ - return; \ - } - INLINE_RUNTIME_FUNCTION_LIST(CHECK_EMIT_INLINE_CALL) -#undef CHECK_EMIT_INLINE_CALL - UNREACHABLE(); +const FullCodeGenerator::InlineFunctionGenerator + FullCodeGenerator::kInlineFunctionGenerators[] = { + INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS) + INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS) + }; +#undef INLINE_FUNCTION_GENERATOR_ADDRESS + + +FullCodeGenerator::InlineFunctionGenerator + FullCodeGenerator::FindInlineFunctionGenerator(Runtime::FunctionId id) { + return kInlineFunctionGenerators[ + static_cast(id) - static_cast(Runtime::kFirstInlineFunction)]; +} + + +void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* node) { + ZoneList* args = node->arguments(); + Handle name = node->name(); + Runtime::Function* function = node->function(); + ASSERT(function != NULL); + ASSERT(function->intrinsic_type == Runtime::INLINE); + InlineFunctionGenerator generator = + FindInlineFunctionGenerator(function->function_id); + ASSERT(generator != NULL); + ((*this).*(generator))(args); } diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h index 840c825..9db233c 100644 --- a/deps/v8/src/full-codegen.h +++ b/deps/v8/src/full-codegen.h @@ -243,6 +243,12 @@ class FullCodeGenerator: public AstVisitor { kRightConstant }; + // Type of a member function that generates inline code for a native function. + typedef void (FullCodeGenerator::*InlineFunctionGenerator) + (ZoneList*); + + static const InlineFunctionGenerator kInlineFunctionGenerators[]; + // Compute the frame pointer relative offset for a given local or // parameter slot. int SlotOffset(Slot* slot); @@ -373,14 +379,25 @@ class FullCodeGenerator: public AstVisitor { void EmitKeyedCallWithIC(Call* expr, Expression* key, RelocInfo::Mode mode); // Platform-specific code for inline runtime calls. + InlineFunctionGenerator FindInlineFunctionGenerator(Runtime::FunctionId id); + void EmitInlineRuntimeCall(CallRuntime* expr); #define EMIT_INLINE_RUNTIME_CALL(name, x, y) \ void Emit##name(ZoneList* arguments); + INLINE_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL) INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL) #undef EMIT_INLINE_RUNTIME_CALL // Platform-specific code for loading variables. + void EmitLoadGlobalSlotCheckExtensions(Slot* slot, + TypeofState typeof_state, + Label* slow); + MemOperand ContextSlotOperandCheckExtensions(Slot* slot, Label* slow); + void EmitDynamicLoadFromSlotFastCase(Slot* slot, + TypeofState typeof_state, + Label* slow, + Label* done); void EmitVariableLoad(Variable* expr, Expression::Context context); // Platform-specific support for allocating a new closure based on @@ -500,6 +517,9 @@ class FullCodeGenerator: public AstVisitor { // in v8::internal::Context. void LoadContextField(Register dst, int context_index); + // Create an operand for a context field. + MemOperand ContextOperand(Register context, int context_index); + // AST node visit functions. #define DECLARE_VISIT(type) virtual void Visit##type(type* node); AST_NODE_LIST(DECLARE_VISIT) diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc index f089b85..a909caf 100644 --- a/deps/v8/src/global-handles.cc +++ b/deps/v8/src/global-handles.cc @@ -372,13 +372,14 @@ void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) { int post_gc_processing_count = 0; -void GlobalHandles::PostGarbageCollectionProcessing() { +bool GlobalHandles::PostGarbageCollectionProcessing() { // Process weak global handle callbacks. This must be done after the // GC is completely done, because the callbacks may invoke arbitrary // API functions. // At the same time deallocate all DESTROYED nodes. ASSERT(Heap::gc_state() == Heap::NOT_IN_GC); const int initial_post_gc_processing_count = ++post_gc_processing_count; + bool weak_callback_invoked = false; Node** p = &head_; while (*p != NULL) { if ((*p)->PostGarbageCollectionProcessing()) { @@ -389,6 +390,7 @@ void GlobalHandles::PostGarbageCollectionProcessing() { // restart the processing). break; } + weak_callback_invoked = true; } if ((*p)->state_ == Node::DESTROYED) { // Delete the link. @@ -407,6 +409,7 @@ void GlobalHandles::PostGarbageCollectionProcessing() { if (first_deallocated()) { first_deallocated()->set_next(head()); } + return weak_callback_invoked; } diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h index 659f86e..c4c59fd 100644 --- a/deps/v8/src/global-handles.h +++ b/deps/v8/src/global-handles.h @@ -95,8 +95,9 @@ class GlobalHandles : public AllStatic { // Tells whether global handle is weak. static bool IsWeak(Object** location); - // Process pending weak handles. - static void PostGarbageCollectionProcessing(); + // Process pending weak handles. Returns true if any weak handle + // callback has been invoked. + static bool PostGarbageCollectionProcessing(); // Iterates over all strong handles. static void IterateStrongRoots(ObjectVisitor* v); diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc index 0146401..655254c 100644 --- a/deps/v8/src/handles.cc +++ b/deps/v8/src/handles.cc @@ -31,7 +31,6 @@ #include "api.h" #include "arguments.h" #include "bootstrapper.h" -#include "codegen.h" #include "compiler.h" #include "debug.h" #include "execution.h" diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h index 0d1ad5a..8f7dd3b 100644 --- a/deps/v8/src/heap-inl.h +++ b/deps/v8/src/heap-inl.h @@ -35,6 +35,16 @@ namespace v8 { namespace internal { +void Heap::UpdateOldSpaceLimits() { + int old_gen_size = PromotedSpaceSize(); + old_gen_promotion_limit_ = + old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3); + old_gen_allocation_limit_ = + old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2); + old_gen_exhausted_ = false; +} + + int Heap::MaxObjectSizeInPagedSpace() { return Page::kMaxHeapObjectSize; } @@ -403,7 +413,7 @@ void Heap::SetLastScriptId(Object* last_script_id) { } \ if (!__object__->IsRetryAfterGC()) RETURN_EMPTY; \ Counters::gc_last_resort_from_handles.Increment(); \ - Heap::CollectAllGarbage(false); \ + Heap::CollectAllAvailableGarbage(); \ { \ AlwaysAllocateScope __scope__; \ __object__ = FUNCTION_CALL; \ diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index 443c926..650800f 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -55,7 +55,6 @@ namespace internal { String* Heap::hidden_symbol_; Object* Heap::roots_[Heap::kRootListLength]; - NewSpace Heap::new_space_; OldSpace* Heap::old_pointer_space_ = NULL; OldSpace* Heap::old_data_space_ = NULL; @@ -64,9 +63,6 @@ MapSpace* Heap::map_space_ = NULL; CellSpace* Heap::cell_space_ = NULL; LargeObjectSpace* Heap::lo_space_ = NULL; -static const int kMinimumPromotionLimit = 2*MB; -static const int kMinimumAllocationLimit = 8*MB; - int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit; int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit; @@ -405,17 +401,26 @@ void Heap::GarbageCollectionEpilogue() { } -void Heap::CollectAllGarbage(bool force_compaction) { +void Heap::CollectAllGarbage(bool force_compaction, + CollectionPolicy collectionPolicy) { // Since we are ignoring the return value, the exact choice of space does // not matter, so long as we do not specify NEW_SPACE, which would not // cause a full GC. MarkCompactCollector::SetForceCompaction(force_compaction); - CollectGarbage(0, OLD_POINTER_SPACE); + CollectGarbage(0, OLD_POINTER_SPACE, collectionPolicy); MarkCompactCollector::SetForceCompaction(false); } -bool Heap::CollectGarbage(int requested_size, AllocationSpace space) { +void Heap::CollectAllAvailableGarbage() { + CompilationCache::Clear(); + CollectAllGarbage(true, AGGRESSIVE); +} + + +bool Heap::CollectGarbage(int requested_size, + AllocationSpace space, + CollectionPolicy collectionPolicy) { // The VM is in the GC state until exiting this function. VMState state(GC); @@ -442,7 +447,7 @@ bool Heap::CollectGarbage(int requested_size, AllocationSpace space) { ? &Counters::gc_scavenger : &Counters::gc_compactor; rate->Start(); - PerformGarbageCollection(space, collector, &tracer); + PerformGarbageCollection(collector, &tracer, collectionPolicy); rate->Stop(); GarbageCollectionEpilogue(); @@ -475,7 +480,7 @@ bool Heap::CollectGarbage(int requested_size, AllocationSpace space) { void Heap::PerformScavenge() { GCTracer tracer; - PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer); + PerformGarbageCollection(SCAVENGER, &tracer, NORMAL); } @@ -664,9 +669,9 @@ void Heap::UpdateSurvivalRateTrend(int start_new_space_size) { survival_rate_ = survival_rate; } -void Heap::PerformGarbageCollection(AllocationSpace space, - GarbageCollector collector, - GCTracer* tracer) { +void Heap::PerformGarbageCollection(GarbageCollector collector, + GCTracer* tracer, + CollectionPolicy collectionPolicy) { VerifySymbolTable(); if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { ASSERT(!allocation_allowed_); @@ -696,25 +701,45 @@ void Heap::PerformGarbageCollection(AllocationSpace space, UpdateSurvivalRateTrend(start_new_space_size); - int old_gen_size = PromotedSpaceSize(); - old_gen_promotion_limit_ = - old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3); - old_gen_allocation_limit_ = - old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2); - - if (high_survival_rate_during_scavenges && - IsStableOrIncreasingSurvivalTrend()) { - // Stable high survival rates of young objects both during partial and - // full collection indicate that mutator is either building or modifying - // a structure with a long lifetime. - // In this case we aggressively raise old generation memory limits to - // postpone subsequent mark-sweep collection and thus trade memory - // space for the mutation speed. - old_gen_promotion_limit_ *= 2; - old_gen_allocation_limit_ *= 2; + UpdateOldSpaceLimits(); + + // Major GC would invoke weak handle callbacks on weakly reachable + // handles, but won't collect weakly reachable objects until next + // major GC. Therefore if we collect aggressively and weak handle callback + // has been invoked, we rerun major GC to release objects which become + // garbage. + if (collectionPolicy == AGGRESSIVE) { + // Note: as weak callbacks can execute arbitrary code, we cannot + // hope that eventually there will be no weak callbacks invocations. + // Therefore stop recollecting after several attempts. + const int kMaxNumberOfAttempts = 7; + for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { + { DisableAssertNoAllocation allow_allocation; + GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); + if (!GlobalHandles::PostGarbageCollectionProcessing()) break; + } + MarkCompact(tracer); + // Weak handle callbacks can allocate data, so keep limits correct. + UpdateOldSpaceLimits(); + } + } else { + if (high_survival_rate_during_scavenges && + IsStableOrIncreasingSurvivalTrend()) { + // Stable high survival rates of young objects both during partial and + // full collection indicate that mutator is either building or modifying + // a structure with a long lifetime. + // In this case we aggressively raise old generation memory limits to + // postpone subsequent mark-sweep collection and thus trade memory + // space for the mutation speed. + old_gen_promotion_limit_ *= 2; + old_gen_allocation_limit_ *= 2; + } } - old_gen_exhausted_ = false; + { DisableAssertNoAllocation allow_allocation; + GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); + GlobalHandles::PostGarbageCollectionProcessing(); + } } else { tracer_ = tracer; Scavenge(); @@ -725,12 +750,6 @@ void Heap::PerformGarbageCollection(AllocationSpace space, Counters::objs_since_last_young.Set(0); - if (collector == MARK_COMPACTOR) { - DisableAssertNoAllocation allow_allocation; - GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); - GlobalHandles::PostGarbageCollectionProcessing(); - } - // Update relocatables. Relocatable::PostGarbageCollectionProcessing(); @@ -1834,6 +1853,13 @@ bool Heap::CreateInitialObjects() { CreateFixedStubs(); + // Allocate the dictionary of intrinsic function names. + obj = StringDictionary::Allocate(Runtime::kNumFunctions); + if (obj->IsFailure()) return false; + obj = Runtime::InitializeIntrinsicFunctionNames(obj); + if (obj->IsFailure()) return false; + set_intrinsic_function_names(StringDictionary::cast(obj)); + if (InitializeNumberStringCache()->IsFailure()) return false; // Allocate cache for single character ASCII strings. diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index 484cd22..cfb3b6a 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -114,6 +114,7 @@ namespace internal { V(Object, last_script_id, LastScriptId) \ V(Script, empty_script, EmptyScript) \ V(Smi, real_stack_limit, RealStackLimit) \ + V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \ #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP #define STRONG_ROOT_LIST(V) \ @@ -686,13 +687,21 @@ class Heap : public AllStatic { static void GarbageCollectionPrologue(); static void GarbageCollectionEpilogue(); + enum CollectionPolicy { NORMAL, AGGRESSIVE }; + // Performs garbage collection operation. // Returns whether required_space bytes are available after the collection. - static bool CollectGarbage(int required_space, AllocationSpace space); + static bool CollectGarbage(int required_space, + AllocationSpace space, + CollectionPolicy collectionPolicy = NORMAL); // Performs a full garbage collection. Force compaction if the // parameter is true. - static void CollectAllGarbage(bool force_compaction); + static void CollectAllGarbage(bool force_compaction, + CollectionPolicy collectionPolicy = NORMAL); + + // Last hope GC, should try to squeeze as much as possible. + static void CollectAllAvailableGarbage(); // Notify the heap that a context has been disposed. static int NotifyContextDisposed() { return ++contexts_disposed_; } @@ -1213,9 +1222,14 @@ class Heap : public AllStatic { static GarbageCollector SelectGarbageCollector(AllocationSpace space); // Performs garbage collection - static void PerformGarbageCollection(AllocationSpace space, - GarbageCollector collector, - GCTracer* tracer); + static void PerformGarbageCollection(GarbageCollector collector, + GCTracer* tracer, + CollectionPolicy collectionPolicy); + + static const int kMinimumPromotionLimit = 2 * MB; + static const int kMinimumAllocationLimit = 8 * MB; + + inline static void UpdateOldSpaceLimits(); // Allocate an uninitialized object in map space. The behavior is identical // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index 2565acb..eef307d 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -860,9 +860,14 @@ void Assembler::add(const Operand& dst, const Immediate& x) { void Assembler::and_(Register dst, int32_t imm32) { + and_(dst, Immediate(imm32)); +} + + +void Assembler::and_(Register dst, const Immediate& x) { EnsureSpace ensure_space(this); last_pc_ = pc_; - emit_arith(4, Operand(dst), Immediate(imm32)); + emit_arith(4, Operand(dst), x); } diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h index 8a5a4c5..928f172 100644 --- a/deps/v8/src/ia32/assembler-ia32.h +++ b/deps/v8/src/ia32/assembler-ia32.h @@ -577,6 +577,7 @@ class Assembler : public Malloced { void add(const Operand& dst, const Immediate& x); void and_(Register dst, int32_t imm32); + void and_(Register dst, const Immediate& x); void and_(Register dst, const Operand& src); void and_(const Operand& src, Register dst); void and_(const Operand& dst, const Immediate& x); diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc index 366b91e..dccf36b 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.cc +++ b/deps/v8/src/ia32/code-stubs-ia32.cc @@ -1879,36 +1879,36 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, void GenericUnaryOpStub::Generate(MacroAssembler* masm) { - Label slow, done; + Label slow, done, undo; if (op_ == Token::SUB) { - // Check whether the value is a smi. - Label try_float; - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &try_float, not_taken); - - if (negative_zero_ == kStrictNegativeZero) { - // Go slow case if the value of the expression is zero - // to make sure that we switch between 0 and -0. - __ test(eax, Operand(eax)); - __ j(zero, &slow, not_taken); - } + if (include_smi_code_) { + // Check whether the value is a smi. + Label try_float; + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &try_float, not_taken); - // The value of the expression is a smi that is not zero. Try - // optimistic subtraction '0 - value'. - Label undo; - __ mov(edx, Operand(eax)); - __ Set(eax, Immediate(0)); - __ sub(eax, Operand(edx)); - __ j(no_overflow, &done, taken); + if (negative_zero_ == kStrictNegativeZero) { + // Go slow case if the value of the expression is zero + // to make sure that we switch between 0 and -0. + __ test(eax, Operand(eax)); + __ j(zero, &slow, not_taken); + } - // Restore eax and go slow case. - __ bind(&undo); - __ mov(eax, Operand(edx)); - __ jmp(&slow); + // The value of the expression is a smi that is not zero. Try + // optimistic subtraction '0 - value'. + __ mov(edx, Operand(eax)); + __ Set(eax, Immediate(0)); + __ sub(eax, Operand(edx)); + __ j(overflow, &undo, not_taken); + __ StubReturn(1); + + // Try floating point case. + __ bind(&try_float); + } else if (FLAG_debug_code) { + __ AbortIfSmi(eax); + } - // Try floating point case. - __ bind(&try_float); __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); __ cmp(edx, Factory::heap_number_map()); __ j(not_equal, &slow); @@ -1928,6 +1928,18 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx); } } else if (op_ == Token::BIT_NOT) { + if (include_smi_code_) { + Label non_smi; + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &non_smi); + __ not_(eax); + __ and_(eax, ~kSmiTagMask); // Remove inverted smi-tag. + __ ret(0); + __ bind(&non_smi); + } else if (FLAG_debug_code) { + __ AbortIfSmi(eax); + } + // Check if the operand is a heap number. __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); __ cmp(edx, Factory::heap_number_map()); @@ -1978,6 +1990,10 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { __ bind(&done); __ StubReturn(1); + // Restore eax and go slow case. + __ bind(&undo); + __ mov(eax, Operand(edx)); + // Handle the slow case by jumping to the JavaScript builtin. __ bind(&slow); __ pop(ecx); // pop return address. @@ -2613,6 +2629,27 @@ void CompareStub::Generate(MacroAssembler* masm) { Label check_unequal_objects, done; + // Compare two smis if required. + if (include_smi_compare_) { + Label non_smi, smi_done; + __ mov(ecx, Operand(edx)); + __ or_(ecx, Operand(eax)); + __ test(ecx, Immediate(kSmiTagMask)); + __ j(not_zero, &non_smi, not_taken); + __ sub(edx, Operand(eax)); // Return on the result of the subtraction. + __ j(no_overflow, &smi_done); + __ neg(edx); // Correct sign in case of overflow. + __ bind(&smi_done); + __ mov(eax, edx); + __ ret(0); + __ bind(&non_smi); + } else if (FLAG_debug_code) { + __ mov(ecx, Operand(edx)); + __ or_(ecx, Operand(eax)); + __ test(ecx, Immediate(kSmiTagMask)); + __ Assert(not_zero, "Unexpected smi operands."); + } + // NOTICE! This code is only reached after a smi-fast-case check, so // it is certain that at least one operand isn't a smi. @@ -3501,7 +3538,8 @@ int CompareStub::MinorKey() { | RegisterField::encode(false) // lhs_ and rhs_ are not used | StrictField::encode(strict_) | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false) - | IncludeNumberCompareField::encode(include_number_compare_); + | IncludeNumberCompareField::encode(include_number_compare_) + | IncludeSmiCompareField::encode(include_smi_compare_); } @@ -3541,12 +3579,18 @@ const char* CompareStub::GetName() { include_number_compare_name = "_NO_NUMBER"; } + const char* include_smi_compare_name = ""; + if (!include_smi_compare_) { + include_smi_compare_name = "_NO_SMI"; + } + OS::SNPrintF(Vector(name_, kMaxNameLength), - "CompareStub_%s%s%s%s", + "CompareStub_%s%s%s%s%s", cc_name, strict_name, never_nan_nan_name, - include_number_compare_name); + include_number_compare_name, + include_smi_compare_name); return name_; } diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index 854052a..86f3877 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -2646,6 +2646,19 @@ static Condition DoubleCondition(Condition cc) { } +static CompareFlags ComputeCompareFlags(NaNInformation nan_info, + bool inline_number_compare) { + CompareFlags flags = NO_SMI_COMPARE_IN_STUB; + if (nan_info == kCantBothBeNaN) { + flags = static_cast(flags | CANT_BOTH_BE_NAN); + } + if (inline_number_compare) { + flags = static_cast(flags | NO_NUMBER_COMPARE_IN_STUB); + } + return flags; +} + + void CodeGenerator::Comparison(AstNode* node, Condition cc, bool strict, @@ -2773,7 +2786,9 @@ void CodeGenerator::Comparison(AstNode* node, // Setup and call the compare stub. is_not_string.Bind(&left_side); - CompareStub stub(cc, strict, kCantBothBeNaN); + CompareFlags flags = + static_cast(CANT_BOTH_BE_NAN | NO_SMI_COMPARE_IN_STUB); + CompareStub stub(cc, strict, flags); Result result = frame_->CallStub(&stub, &left_side, &right_side); result.ToRegister(); __ cmp(result.reg(), 0); @@ -2867,7 +2882,8 @@ void CodeGenerator::Comparison(AstNode* node, // End of in-line compare, call out to the compare stub. Don't include // number comparison in the stub if it was inlined. - CompareStub stub(cc, strict, nan_info, !inline_number_compare); + CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare); + CompareStub stub(cc, strict, flags); Result answer = frame_->CallStub(&stub, &left_side, &right_side); __ test(answer.reg(), Operand(answer.reg())); answer.Unuse(); @@ -2900,7 +2916,9 @@ void CodeGenerator::Comparison(AstNode* node, // End of in-line compare, call out to the compare stub. Don't include // number comparison in the stub if it was inlined. - CompareStub stub(cc, strict, nan_info, !inline_number_compare); + CompareFlags flags = + ComputeCompareFlags(nan_info, inline_number_compare); + CompareStub stub(cc, strict, flags); Result answer = frame_->CallStub(&stub, &left_side, &right_side); __ test(answer.reg(), Operand(answer.reg())); answer.Unuse(); @@ -2994,7 +3012,6 @@ void CodeGenerator::ConstantSmiComparison(Condition cc, dest->false_target()->Branch(zero); } else { // Do the smi check, then the comparison. - JumpTarget is_not_smi; __ test(left_reg, Immediate(kSmiTagMask)); is_smi.Branch(zero, left_side, right_side); } @@ -3031,7 +3048,9 @@ void CodeGenerator::ConstantSmiComparison(Condition cc, } // Setup and call the compare stub. - CompareStub stub(cc, strict, kCantBothBeNaN); + CompareFlags flags = + static_cast(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB); + CompareStub stub(cc, strict, flags); Result result = frame_->CallStub(&stub, left_side, right_side); result.ToRegister(); __ test(result.reg(), Operand(result.reg())); @@ -8146,6 +8165,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { GenericUnaryOpStub stub( Token::SUB, overwrite, + NO_UNARY_FLAGS, no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero); Result operand = frame_->Pop(); Result answer = frame_->CallStub(&stub, &operand); @@ -8173,7 +8193,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { __ test(operand.reg(), Immediate(kSmiTagMask)); smi_label.Branch(zero, &operand, taken); - GenericUnaryOpStub stub(Token::BIT_NOT, overwrite); + GenericUnaryOpStub stub(Token::BIT_NOT, + overwrite, + NO_UNARY_SMI_CODE_IN_STUB); Result answer = frame_->CallStub(&stub, &operand); continue_label.Jump(&answer); diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h index adc0005..c4a03d1 100644 --- a/deps/v8/src/ia32/codegen-ia32.h +++ b/deps/v8/src/ia32/codegen-ia32.h @@ -345,10 +345,6 @@ class CodeGenerator: public AstVisitor { bool in_spilled_code() const { return in_spilled_code_; } void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; } - // If the name is an inline runtime function call return the number of - // expected arguments. Otherwise return -1. - static int InlineRuntimeCallArgumentsCount(Handle name); - // Return a position of the element at |index_as_smi| + |additional_offset| // in FixedArray pointer to which is held in |array|. |index_as_smi| is Smi. static Operand FixedArrayElementOperand(Register array, @@ -363,6 +359,12 @@ class CodeGenerator: public AstVisitor { } private: + // Type of a member function that generates inline code for a native function. + typedef void (CodeGenerator::*InlineFunctionGenerator) + (ZoneList*); + + static const InlineFunctionGenerator kInlineFunctionGenerators[]; + // Construction/Destruction explicit CodeGenerator(MacroAssembler* masm); @@ -624,13 +626,9 @@ class CodeGenerator: public AstVisitor { void CheckStack(); - struct InlineRuntimeLUT { - void (CodeGenerator::*method)(ZoneList*); - const char* name; - int nargs; - }; + static InlineFunctionGenerator FindInlineFunctionGenerator( + Runtime::FunctionId function_id); - static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle name); bool CheckForInlineRuntimeCall(CallRuntime* node); void ProcessDeclarations(ZoneList* declarations); @@ -792,8 +790,6 @@ class CodeGenerator: public AstVisitor { // in a spilled state. bool in_spilled_code_; - static InlineRuntimeLUT kInlineRuntimeLUT[]; - friend class VirtualFrame; friend class JumpTarget; friend class Reference; diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc index 1631b04..1e65c4b 100644 --- a/deps/v8/src/ia32/full-codegen-ia32.cc +++ b/deps/v8/src/ia32/full-codegen-ia32.cc @@ -514,7 +514,7 @@ MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) { int context_chain_length = scope()->ContextChainLength(slot->var()->scope()); __ LoadContext(scratch, context_chain_length); - return CodeGenerator::ContextOperand(scratch, slot->index()); + return ContextOperand(scratch, slot->index()); } case Slot::LOOKUP: UNREACHABLE(); @@ -574,19 +574,17 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); if (FLAG_debug_code) { // Check if we have the correct context pointer. - __ mov(ebx, - CodeGenerator::ContextOperand(esi, Context::FCONTEXT_INDEX)); + __ mov(ebx, ContextOperand(esi, Context::FCONTEXT_INDEX)); __ cmp(ebx, Operand(esi)); __ Check(equal, "Unexpected declaration in current context."); } if (mode == Variable::CONST) { - __ mov(CodeGenerator::ContextOperand(esi, slot->index()), + __ mov(ContextOperand(esi, slot->index()), Immediate(Factory::the_hole_value())); // No write barrier since the hole value is in old space. } else if (function != NULL) { VisitForValue(function, kAccumulator); - __ mov(CodeGenerator::ContextOperand(esi, slot->index()), - result_register()); + __ mov(ContextOperand(esi, slot->index()), result_register()); int offset = Context::SlotOffset(slot->index()); __ mov(ebx, esi); __ RecordWrite(ebx, offset, result_register(), ecx); @@ -686,7 +684,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { // Perform the comparison as if via '==='. __ mov(edx, Operand(esp, 0)); // Switch value. - if (ShouldInlineSmiCase(Token::EQ_STRICT)) { + bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT); + if (inline_smi_code) { Label slow_case; __ mov(ecx, edx); __ or_(ecx, Operand(eax)); @@ -699,7 +698,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { __ bind(&slow_case); } - CompareStub stub(equal, true); + CompareFlags flags = inline_smi_code + ? NO_SMI_COMPARE_IN_STUB + : NO_COMPARE_FLAGS; + CompareStub stub(equal, true, flags); __ CallStub(&stub); __ test(eax, Operand(eax)); __ j(not_equal, &next_test); @@ -758,13 +760,57 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ bind(&done_convert); __ push(eax); - // TODO(kasperl): Check cache validity in generated code. This is a - // fast case for the JSObject::IsSimpleEnum cache validity - // checks. If we cannot guarantee cache validity, call the runtime - // system to check cache validity or get the property names in a - // fixed array. + // Check cache validity in generated code. This is a fast case for + // the JSObject::IsSimpleEnum cache validity checks. If we cannot + // guarantee cache validity, call the runtime system to check cache + // validity or get the property names in a fixed array. + Label next, call_runtime; + __ mov(ecx, eax); + __ bind(&next); + + // Check that there are no elements. Register ecx contains the + // current JS object we've reached through the prototype chain. + __ cmp(FieldOperand(ecx, JSObject::kElementsOffset), + Factory::empty_fixed_array()); + __ j(not_equal, &call_runtime); + + // Check that instance descriptors are not empty so that we can + // check for an enum cache. Leave the map in ebx for the subsequent + // prototype load. + __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset)); + __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset)); + __ cmp(edx, Factory::empty_descriptor_array()); + __ j(equal, &call_runtime); + + // Check that there in an enum cache in the non-empty instance + // descriptors (edx). This is the case if the next enumeration + // index field does not contain a smi. + __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset)); + __ test(edx, Immediate(kSmiTagMask)); + __ j(zero, &call_runtime); + + // For all objects but the receiver, check that the cache is empty. + Label check_prototype; + __ cmp(ecx, Operand(eax)); + __ j(equal, &check_prototype); + __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset)); + __ cmp(edx, Factory::empty_fixed_array()); + __ j(not_equal, &call_runtime); + + // Load the prototype from the map and loop if non-null. + __ bind(&check_prototype); + __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset)); + __ cmp(ecx, Factory::null_value()); + __ j(not_equal, &next); + + // The enum cache is valid. Load the map of the object being + // iterated over and use the cache for the iteration. + Label use_cache; + __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); + __ jmp(&use_cache); // Get the set of properties to enumerate. + __ bind(&call_runtime); __ push(eax); // Duplicate the enumerable object on the stack. __ CallRuntime(Runtime::kGetPropertyNamesFast, 1); @@ -776,6 +822,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ j(not_equal, &fixed_array); // We got a map in register eax. Get the enumeration cache from it. + __ bind(&use_cache); __ mov(ecx, FieldOperand(eax, Map::kInstanceDescriptorsOffset)); __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset)); __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset)); @@ -885,6 +932,152 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) { } +void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions( + Slot* slot, + TypeofState typeof_state, + Label* slow) { + Register context = esi; + Register temp = edx; + + Scope* s = scope(); + while (s != NULL) { + if (s->num_heap_slots() > 0) { + if (s->calls_eval()) { + // Check that extension is NULL. + __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), + Immediate(0)); + __ j(not_equal, slow); + } + // Load next context in chain. + __ mov(temp, ContextOperand(context, Context::CLOSURE_INDEX)); + __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset)); + // Walk the rest of the chain without clobbering esi. + context = temp; + } + // If no outer scope calls eval, we do not need to check more + // context extensions. If we have reached an eval scope, we check + // all extensions from this point. + if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break; + s = s->outer_scope(); + } + + if (s != NULL && s->is_eval_scope()) { + // Loop up the context chain. There is no frame effect so it is + // safe to use raw labels here. + Label next, fast; + if (!context.is(temp)) { + __ mov(temp, context); + } + __ bind(&next); + // Terminate at global context. + __ cmp(FieldOperand(temp, HeapObject::kMapOffset), + Immediate(Factory::global_context_map())); + __ j(equal, &fast); + // Check that extension is NULL. + __ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0)); + __ j(not_equal, slow); + // Load next context in chain. + __ mov(temp, ContextOperand(temp, Context::CLOSURE_INDEX)); + __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset)); + __ jmp(&next); + __ bind(&fast); + } + + // All extension objects were empty and it is safe to use a global + // load IC call. + __ mov(eax, CodeGenerator::GlobalObject()); + __ mov(ecx, slot->var()->name()); + Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize)); + RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) + ? RelocInfo::CODE_TARGET + : RelocInfo::CODE_TARGET_CONTEXT; + __ call(ic, mode); + __ nop(); // Signal no inlined code. +} + + +MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions( + Slot* slot, + Label* slow) { + ASSERT(slot->type() == Slot::CONTEXT); + Register context = esi; + Register temp = ebx; + + for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) { + if (s->num_heap_slots() > 0) { + if (s->calls_eval()) { + // Check that extension is NULL. + __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), + Immediate(0)); + __ j(not_equal, slow); + } + __ mov(temp, ContextOperand(context, Context::CLOSURE_INDEX)); + __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset)); + // Walk the rest of the chain without clobbering esi. + context = temp; + } + } + // Check that last extension is NULL. + __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0)); + __ j(not_equal, slow); + __ mov(temp, ContextOperand(context, Context::FCONTEXT_INDEX)); + return ContextOperand(temp, slot->index()); +} + + +void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase( + Slot* slot, + TypeofState typeof_state, + Label* slow, + Label* done) { + // Generate fast-case code for variables that might be shadowed by + // eval-introduced variables. Eval is used a lot without + // introducing variables. In those cases, we do not want to + // perform a runtime call for all variables in the scope + // containing the eval. + if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) { + EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow); + __ jmp(done); + } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { + Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); + Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite(); + if (potential_slot != NULL) { + // Generate fast case for locals that rewrite to slots. + __ mov(eax, + ContextSlotOperandCheckExtensions(potential_slot, slow)); + if (potential_slot->var()->mode() == Variable::CONST) { + __ cmp(eax, Factory::the_hole_value()); + __ j(not_equal, done); + __ mov(eax, Factory::undefined_value()); + } + __ jmp(done); + } else if (rewrite != NULL) { + // Generate fast case for calls of an argument function. + Property* property = rewrite->AsProperty(); + if (property != NULL) { + VariableProxy* obj_proxy = property->obj()->AsVariableProxy(); + Literal* key_literal = property->key()->AsLiteral(); + if (obj_proxy != NULL && + key_literal != NULL && + obj_proxy->IsArguments() && + key_literal->handle()->IsSmi()) { + // Load arguments object if there are no eval-introduced + // variables. Then load the argument from the arguments + // object using keyed load. + __ mov(edx, + ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(), + slow)); + __ mov(eax, Immediate(key_literal->handle())); + Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + __ call(ic, RelocInfo::CODE_TARGET); + __ jmp(done); + } + } + } + } +} + + void FullCodeGenerator::EmitVariableLoad(Variable* var, Expression::Context context) { // Four cases: non-this global variables, lookup slots, all other @@ -909,10 +1102,19 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var, Apply(context, eax); } else if (slot != NULL && slot->type() == Slot::LOOKUP) { + Label done, slow; + + // Generate code for loading from variables potentially shadowed + // by eval-introduced variables. + EmitDynamicLoadFromSlotFastCase(slot, NOT_INSIDE_TYPEOF, &slow, &done); + + __ bind(&slow); Comment cmnt(masm_, "Lookup slot"); __ push(esi); // Context. __ push(Immediate(var->name())); __ CallRuntime(Runtime::kLoadContextSlot, 2); + __ bind(&done); + Apply(context, eax); } else if (slot != NULL) { @@ -1953,14 +2155,40 @@ void FullCodeGenerator::VisitCall(Call* expr) { EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT); } else if (var != NULL && var->slot() != NULL && var->slot()->type() == Slot::LOOKUP) { - // Call to a lookup slot (dynamically introduced variable). Call the - // runtime to find the function to call (returned in eax) and the object - // holding it (returned in edx). + // Call to a lookup slot (dynamically introduced variable). + Label slow, done; + + // Generate code for loading from variables potentially shadowed + // by eval-introduced variables. + EmitDynamicLoadFromSlotFastCase(var->slot(), + NOT_INSIDE_TYPEOF, + &slow, + &done); + + __ bind(&slow); + // Call the runtime to find the function to call (returned in eax) + // and the object holding it (returned in edx). __ push(context_register()); __ push(Immediate(var->name())); __ CallRuntime(Runtime::kLoadContextSlot, 2); __ push(eax); // Function. __ push(edx); // Receiver. + + // If fast case code has been generated, emit code to push the + // function and receiver and have the slow path jump around this + // code. + if (done.is_linked()) { + Label call; + __ jmp(&call); + __ bind(&done); + // Push function. + __ push(eax); + // Push global receiver. + __ mov(ebx, CodeGenerator::GlobalObject()); + __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset)); + __ bind(&call); + } + EmitCallWithStub(expr); } else if (fun->AsProperty() != NULL) { // Call to an object property. @@ -2781,12 +3009,10 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList* args) { Register key = eax; Register cache = ebx; Register tmp = ecx; - __ mov(cache, CodeGenerator::ContextOperand(esi, Context::GLOBAL_INDEX)); + __ mov(cache, ContextOperand(esi, Context::GLOBAL_INDEX)); __ mov(cache, FieldOperand(cache, GlobalObject::kGlobalContextOffset)); - __ mov(cache, - CodeGenerator::ContextOperand( - cache, Context::JSFUNCTION_RESULT_CACHES_INDEX)); + __ mov(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX)); __ mov(cache, FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id))); @@ -2917,7 +3143,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP; Handle ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop); __ call(ic, RelocInfo::CODE_TARGET); - // Restore context register. + // Restore context register. __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); } else { // Call the C runtime function. @@ -3036,7 +3262,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { bool can_overwrite = expr->expression()->ResultOverwriteAllowed(); UnaryOverwriteMode overwrite = can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; - GenericUnaryOpStub stub(Token::SUB, overwrite); + GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS); // GenericUnaryOpStub expects the argument to be in the // accumulator register eax. VisitForValue(expr->expression(), kAccumulator); @@ -3051,7 +3277,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { // in the accumulator register eax. VisitForValue(expr->expression(), kAccumulator); Label done; - if (ShouldInlineSmiCase(expr->op())) { + bool inline_smi_case = ShouldInlineSmiCase(expr->op()); + if (inline_smi_case) { Label call_stub; __ test(eax, Immediate(kSmiTagMask)); __ j(not_zero, &call_stub); @@ -3063,7 +3290,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { bool overwrite = expr->expression()->ResultOverwriteAllowed(); UnaryOverwriteMode mode = overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; - GenericUnaryOpStub stub(Token::BIT_NOT, mode); + UnaryOpFlags flags = inline_smi_case + ? NO_UNARY_SMI_CODE_IN_STUB + : NO_UNARY_FLAGS; + GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags); __ CallStub(&stub); __ bind(&done); Apply(context_, eax); @@ -3262,13 +3492,24 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) { // Use a regular load, not a contextual load, to avoid a reference // error. __ call(ic, RelocInfo::CODE_TARGET); + __ nop(); // Signal no inlined code. if (where == kStack) __ push(eax); } else if (proxy != NULL && proxy->var()->slot() != NULL && proxy->var()->slot()->type() == Slot::LOOKUP) { + Label done, slow; + + // Generate code for loading from variables potentially shadowed + // by eval-introduced variables. + Slot* slot = proxy->var()->slot(); + EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done); + + __ bind(&slow); __ push(esi); __ push(Immediate(proxy->name())); __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); + __ bind(&done); + if (where == kStack) __ push(eax); } else { // This expression cannot throw a reference error at the top level. @@ -3441,7 +3682,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { UNREACHABLE(); } - if (ShouldInlineSmiCase(op)) { + bool inline_smi_code = ShouldInlineSmiCase(op); + if (inline_smi_code) { Label slow_case; __ mov(ecx, Operand(edx)); __ or_(ecx, Operand(eax)); @@ -3452,7 +3694,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { __ bind(&slow_case); } - CompareStub stub(cc, strict); + CompareFlags flags = inline_smi_code + ? NO_SMI_COMPARE_IN_STUB + : NO_COMPARE_FLAGS; + CompareStub stub(cc, strict, flags); __ CallStub(&stub); __ test(eax, Operand(eax)); Split(cc, if_true, if_false, fall_through); @@ -3512,7 +3757,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) { void FullCodeGenerator::LoadContextField(Register dst, int context_index) { - __ mov(dst, CodeGenerator::ContextOperand(esi, context_index)); + __ mov(dst, ContextOperand(esi, context_index)); } diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index 7fc3f81..828e71a 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -1255,6 +1255,61 @@ void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) { } +void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object, + JSObject* holder, + String* name, + Label* miss) { + ASSERT(holder->IsGlobalObject()); + + // Get the number of arguments. + const int argc = arguments().immediate(); + + // Get the receiver from the stack. + __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); + + // If the object is the holder then we know that it's a global + // object which can only happen for contextual calls. In this case, + // the receiver cannot be a smi. + if (object != holder) { + __ test(edx, Immediate(kSmiTagMask)); + __ j(zero, miss, not_taken); + } + + // Check that the maps haven't changed. + CheckPrototypes(object, edx, holder, ebx, eax, edi, name, miss); +} + + +void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell, + JSFunction* function, + Label* miss) { + // Get the value from the cell. + __ mov(edi, Immediate(Handle(cell))); + __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset)); + + // Check that the cell contains the same function. + if (Heap::InNewSpace(function)) { + // We can't embed a pointer to a function in new space so we have + // to verify that the shared function info is unchanged. This has + // the nice side effect that multiple closures based on the same + // function can all use this call IC. Before we load through the + // function, we have to verify that it still is a function. + __ test(edi, Immediate(kSmiTagMask)); + __ j(zero, miss, not_taken); + __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx); + __ j(not_equal, miss, not_taken); + + // Check the shared function info. Make sure it hasn't changed. + __ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset), + Immediate(Handle(function->shared()))); + __ j(not_equal, miss, not_taken); + } else { + __ cmp(Operand(edi), Immediate(Handle(function))); + __ j(not_equal, miss, not_taken); + } +} + + Object* CallStubCompiler::GenerateMissBranch() { Object* obj = StubCache::ComputeCallMiss(arguments().immediate(), kind_); if (obj->IsFailure()) return obj; @@ -1320,9 +1375,9 @@ Object* CallStubCompiler::CompileCallField(JSObject* object, Object* CallStubCompiler::CompileArrayPushCall(Object* object, JSObject* holder, + JSGlobalPropertyCell* cell, JSFunction* function, - String* name, - CheckType check) { + String* name) { // ----------- S t a t e ------------- // -- ecx : name // -- esp[0] : return address @@ -1330,12 +1385,9 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, // -- ... // -- esp[(argc + 1) * 4] : receiver // ----------------------------------- - ASSERT(check == RECEIVER_MAP_CHECK); // If object is not an array, bail out to regular call. - if (!object->IsJSArray()) { - return Heap::undefined_value(); - } + if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value(); Label miss; @@ -1469,9 +1521,9 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, Object* CallStubCompiler::CompileArrayPopCall(Object* object, JSObject* holder, + JSGlobalPropertyCell* cell, JSFunction* function, - String* name, - CheckType check) { + String* name) { // ----------- S t a t e ------------- // -- ecx : name // -- esp[0] : return address @@ -1479,12 +1531,9 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, // -- ... // -- esp[(argc + 1) * 4] : receiver // ----------------------------------- - ASSERT(check == RECEIVER_MAP_CHECK); // If object is not an array, bail out to regular call. - if (!object->IsJSArray()) { - return Heap::undefined_value(); - } + if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value(); Label miss, return_undefined, call_builtin; @@ -1551,11 +1600,12 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, } -Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object, - JSObject* holder, - JSFunction* function, - String* name, - CheckType check) { +Object* CallStubCompiler::CompileStringCharCodeAtCall( + Object* object, + JSObject* holder, + JSGlobalPropertyCell* cell, + JSFunction* function, + String* name) { // ----------- S t a t e ------------- // -- ecx : function name // -- esp[0] : return address @@ -1565,7 +1615,7 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object, // ----------------------------------- // If object is not a string, bail out to regular call. - if (!object->IsString()) return Heap::undefined_value(); + if (!object->IsString() || cell != NULL) return Heap::undefined_value(); const int argc = arguments().immediate(); @@ -1621,9 +1671,9 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object, Object* CallStubCompiler::CompileStringCharAtCall(Object* object, JSObject* holder, + JSGlobalPropertyCell* cell, JSFunction* function, - String* name, - CheckType check) { + String* name) { // ----------- S t a t e ------------- // -- ecx : function name // -- esp[0] : return address @@ -1633,7 +1683,7 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object, // ----------------------------------- // If object is not a string, bail out to regular call. - if (!object->IsString()) return Heap::undefined_value(); + if (!object->IsString() || cell != NULL) return Heap::undefined_value(); const int argc = arguments().immediate(); @@ -1690,6 +1740,79 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object, } +Object* CallStubCompiler::CompileStringFromCharCodeCall( + Object* object, + JSObject* holder, + JSGlobalPropertyCell* cell, + JSFunction* function, + String* name) { + // ----------- S t a t e ------------- + // -- ecx : function name + // -- esp[0] : return address + // -- esp[(argc - n) * 4] : arg[n] (zero-based) + // -- ... + // -- esp[(argc + 1) * 4] : receiver + // ----------------------------------- + + const int argc = arguments().immediate(); + + // If the object is not a JSObject or we got an unexpected number of + // arguments, bail out to the regular call. + if (!object->IsJSObject() || argc != 1) return Heap::undefined_value(); + + Label miss; + GenerateNameCheck(name, &miss); + + if (cell == NULL) { + __ mov(edx, Operand(esp, 2 * kPointerSize)); + + STATIC_ASSERT(kSmiTag == 0); + __ test(edx, Immediate(kSmiTagMask)); + __ j(zero, &miss); + + CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name, + &miss); + } else { + ASSERT(cell->value() == function); + GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss); + GenerateLoadFunctionFromCell(cell, function, &miss); + } + + // Load the char code argument. + Register code = ebx; + __ mov(code, Operand(esp, 1 * kPointerSize)); + + // Check the code is a smi. + Label slow; + STATIC_ASSERT(kSmiTag == 0); + __ test(code, Immediate(kSmiTagMask)); + __ j(not_zero, &slow); + + // Convert the smi code to uint16. + __ and_(code, Immediate(Smi::FromInt(0xffff))); + + StringCharFromCodeGenerator char_from_code_generator(code, eax); + char_from_code_generator.GenerateFast(masm()); + __ ret(2 * kPointerSize); + + ICRuntimeCallHelper call_helper; + char_from_code_generator.GenerateSlow(masm(), call_helper); + + // Tail call the full function. We do not have to patch the receiver + // because the function makes no use of it. + __ bind(&slow); + __ InvokeFunction(function, arguments(), JUMP_FUNCTION); + + __ bind(&miss); + // ecx: function name. + Object* obj = GenerateMissBranch(); + if (obj->IsFailure()) return obj; + + // Return the generated code. + return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name); +} + + Object* CallStubCompiler::CompileCallConstant(Object* object, JSObject* holder, JSFunction* function, @@ -1706,12 +1829,10 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, SharedFunctionInfo* function_info = function->shared(); if (function_info->HasCustomCallGenerator()) { const int id = function_info->custom_call_generator_id(); - Object* result = - CompileCustomCall(id, object, holder, function, name, check); + Object* result = CompileCustomCall( + id, object, holder, NULL, function, name); // undefined means bail out to regular compiler. - if (!result->IsUndefined()) { - return result; - } + if (!result->IsUndefined()) return result; } Label miss_in_smi_check; @@ -1922,6 +2043,16 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, // -- ... // -- esp[(argc + 1) * 4] : receiver // ----------------------------------- + + SharedFunctionInfo* function_info = function->shared(); + if (function_info->HasCustomCallGenerator()) { + const int id = function_info->custom_call_generator_id(); + Object* result = CompileCustomCall( + id, object, holder, cell, function, name); + // undefined means bail out to regular compiler. + if (!result->IsUndefined()) return result; + } + Label miss; GenerateNameCheck(name, &miss); @@ -1929,44 +2060,9 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, // Get the number of arguments. const int argc = arguments().immediate(); - // Get the receiver from the stack. - __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); + GenerateGlobalReceiverCheck(object, holder, name, &miss); - // If the object is the holder then we know that it's a global - // object which can only happen for contextual calls. In this case, - // the receiver cannot be a smi. - if (object != holder) { - __ test(edx, Immediate(kSmiTagMask)); - __ j(zero, &miss, not_taken); - } - - // Check that the maps haven't changed. - CheckPrototypes(object, edx, holder, ebx, eax, edi, name, &miss); - - // Get the value from the cell. - __ mov(edi, Immediate(Handle(cell))); - __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset)); - - // Check that the cell contains the same function. - if (Heap::InNewSpace(function)) { - // We can't embed a pointer to a function in new space so we have - // to verify that the shared function info is unchanged. This has - // the nice side effect that multiple closures based on the same - // function can all use this call IC. Before we load through the - // function, we have to verify that it still is a function. - __ test(edi, Immediate(kSmiTagMask)); - __ j(zero, &miss, not_taken); - __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx); - __ j(not_equal, &miss, not_taken); - - // Check the shared function info. Make sure it hasn't changed. - __ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset), - Immediate(Handle(function->shared()))); - __ j(not_equal, &miss, not_taken); - } else { - __ cmp(Operand(edi), Immediate(Handle(function))); - __ j(not_equal, &miss, not_taken); - } + GenerateLoadFunctionFromCell(cell, function, &miss); // Patch the receiver on the stack with the global proxy. if (object->IsGlobalObject()) { diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc index 5a8749e..41523a8 100644 --- a/deps/v8/src/liveedit.cc +++ b/deps/v8/src/liveedit.cc @@ -617,9 +617,33 @@ class FunctionInfoListener { current_parent_index_ = info.GetParentIndex(); } -// TODO(LiveEdit): Move private method below. -// This private section was created here to avoid moving the function -// to keep already complex diff simpler. + public: + // Saves only function code, because for a script function we + // may never create a SharedFunctionInfo object. + void FunctionCode(Handle function_code) { + FunctionInfoWrapper info = + FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_)); + info.SetFunctionCode(function_code, Handle(Heap::null_value())); + } + + // Saves full information about a function: its code, its scope info + // and a SharedFunctionInfo object. + void FunctionInfo(Handle shared, Scope* scope) { + if (!shared->IsSharedFunctionInfo()) { + return; + } + FunctionInfoWrapper info = + FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_)); + info.SetFunctionCode(Handle(shared->code()), + Handle(shared->scope_info())); + info.SetSharedFunctionInfo(shared); + + Handle scope_info_list(SerializeFunctionScope(scope)); + info.SetOuterScopeInfo(scope_info_list); + } + + Handle GetResult() { return result_; } + private: Object* SerializeFunctionScope(Scope* scope) { HandleScope handle_scope; @@ -676,36 +700,6 @@ class FunctionInfoListener { return *scope_info_list; } - public: - // Saves only function code, because for a script function we - // may never create a SharedFunctionInfo object. - void FunctionCode(Handle function_code) { - FunctionInfoWrapper info = - FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_)); - info.SetFunctionCode(function_code, Handle(Heap::null_value())); - } - - // Saves full information about a function: its code, its scope info - // and a SharedFunctionInfo object. - void FunctionInfo(Handle shared, Scope* scope) { - if (!shared->IsSharedFunctionInfo()) { - return; - } - FunctionInfoWrapper info = - FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_)); - info.SetFunctionCode(Handle(shared->code()), - Handle(shared->scope_info())); - info.SetSharedFunctionInfo(shared); - - Handle scope_info_list(SerializeFunctionScope(scope)); - info.SetOuterScopeInfo(scope_info_list); - } - - Handle GetResult() { - return result_; - } - - private: Handle result_; int len_; int current_parent_index_; diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc index 7667e89..856c474 100644 --- a/deps/v8/src/parser.cc +++ b/deps/v8/src/parser.cc @@ -872,11 +872,14 @@ class ParserLog BASE_EMBEDDED { // Records the occurrence of a function. virtual FunctionEntry LogFunction(int start) { return FunctionEntry(); } virtual void LogSymbol(int start, Vector symbol) {} + virtual void LogError() { } // Return the current position in the function entry log. virtual int function_position() { return 0; } virtual int symbol_position() { return 0; } virtual int symbol_ids() { return 0; } - virtual void LogError() { } + virtual Vector ExtractData() { + return Vector(); + }; }; @@ -889,9 +892,14 @@ class AstBuildingParserFactory : public ParserFactory { virtual Handle LookupSymbol(int symbol_id, Vector string) { - // If there is no preparse data, we have no simpler way to identify similar - // symbols. - if (symbol_id < 0) return Factory::LookupSymbol(string); + // Length of symbol cache is the number of identified symbols. + // If we are larger than that, or negative, it's not a cached symbol. + // This might also happen if there is no preparser symbol data, even + // if there is some preparser data. + if (static_cast(symbol_id) + >= static_cast(symbol_cache_.length())) { + return Factory::LookupSymbol(string); + } return LookupCachedSymbol(symbol_id, string); } @@ -933,34 +941,78 @@ class AstBuildingParserFactory : public ParserFactory { }; -class ParserRecorder: public ParserLog { +// Record only functions. +class PartialParserRecorder: public ParserLog { public: - ParserRecorder(); + PartialParserRecorder(); virtual FunctionEntry LogFunction(int start); + + virtual int function_position() { return function_store_.size(); } + + virtual void LogError() { } + + virtual void LogMessage(Scanner::Location loc, + const char* message, + Vector args); + + virtual Vector ExtractData() { + int function_size = function_store_.size(); + int total_size = ScriptDataImpl::kHeaderSize + function_size; + Vector data = Vector::New(total_size); + preamble_[ScriptDataImpl::kFunctionsSizeOffset] = function_size; + preamble_[ScriptDataImpl::kSymbolCountOffset] = 0; + memcpy(data.start(), preamble_, sizeof(preamble_)); + int symbol_start = ScriptDataImpl::kHeaderSize + function_size; + if (function_size > 0) { + function_store_.WriteTo(data.SubVector(ScriptDataImpl::kHeaderSize, + symbol_start)); + } + return data; + } + + protected: + bool has_error() { + return static_cast(preamble_[ScriptDataImpl::kHasErrorOffset]); + } + + void WriteString(Vector str); + + Collector function_store_; + unsigned preamble_[ScriptDataImpl::kHeaderSize]; +#ifdef DEBUG + int prev_start; +#endif +}; + + +// Record both functions and symbols. +class CompleteParserRecorder: public PartialParserRecorder { + public: + CompleteParserRecorder(); + virtual void LogSymbol(int start, Vector literal) { int hash = vector_hash(literal); HashMap::Entry* entry = symbol_table_.Lookup(&literal, hash, true); int id = static_cast(reinterpret_cast(entry->value)); if (id == 0) { // Put (symbol_id_ + 1) into entry and increment it. - symbol_id_++; - entry->value = reinterpret_cast(symbol_id_); + id = ++symbol_id_; + entry->value = reinterpret_cast(id); Vector > symbol = symbol_entries_.AddBlock(1, literal); entry->key = &symbol[0]; - } else { - // Log a reuse of an earlier seen symbol. - symbol_store_.Add(start); - symbol_store_.Add(id - 1); } + symbol_store_.Add(id - 1); } - virtual void LogError() { } - virtual void LogMessage(Scanner::Location loc, - const char* message, - Vector args); - Vector ExtractData() { + + virtual Vector ExtractData() { int function_size = function_store_.size(); + // Add terminator to symbols, then pad to unsigned size. int symbol_size = symbol_store_.size(); - int total_size = ScriptDataImpl::kHeaderSize + function_size + symbol_size; + int padding = sizeof(unsigned) - (symbol_size % sizeof(unsigned)); + symbol_store_.AddBlock(padding, ScriptDataImpl::kNumberTerminator); + symbol_size += padding; + int total_size = ScriptDataImpl::kHeaderSize + function_size + + (symbol_size / sizeof(unsigned)); Vector data = Vector::New(total_size); preamble_[ScriptDataImpl::kFunctionsSizeOffset] = function_size; preamble_[ScriptDataImpl::kSymbolCountOffset] = symbol_id_; @@ -970,23 +1022,17 @@ class ParserRecorder: public ParserLog { function_store_.WriteTo(data.SubVector(ScriptDataImpl::kHeaderSize, symbol_start)); } - if (symbol_size > 0) { - symbol_store_.WriteTo(data.SubVector(symbol_start, total_size)); + if (!has_error()) { + symbol_store_.WriteTo( + Vector::cast(data.SubVector(symbol_start, total_size))); } return data; } - virtual int function_position() { return function_store_.size(); } virtual int symbol_position() { return symbol_store_.size(); } virtual int symbol_ids() { return symbol_id_; } private: - Collector function_store_; - Collector symbol_store_; - Collector > symbol_entries_; - HashMap symbol_table_; - int symbol_id_; - - static int vector_hash(Vector string) { + static int vector_hash(Vector string) { int hash = 0; for (int i = 0; i < string.length(); i++) { int c = string[i]; @@ -1005,15 +1051,13 @@ class ParserRecorder: public ParserLog { return memcmp(string1->start(), string2->start(), length) == 0; } - unsigned preamble_[ScriptDataImpl::kHeaderSize]; -#ifdef DEBUG - int prev_start; -#endif + // Write a non-negative number to the symbol store. + void WriteNumber(int number); - bool has_error() { - return static_cast(preamble_[ScriptDataImpl::kHasErrorOffset]); - } - void WriteString(Vector str); + Collector symbol_store_; + Collector > symbol_entries_; + HashMap symbol_table_; + int symbol_id_; }; @@ -1038,18 +1082,11 @@ FunctionEntry ScriptDataImpl::GetFunctionEntry(int start) { } -int ScriptDataImpl::GetSymbolIdentifier(int start) { - int next = symbol_index_ + 2; - if (next <= store_.length() - && static_cast(store_[symbol_index_]) == start) { - symbol_index_ = next; - return store_[next - 1]; - } - return symbol_id_++; +int ScriptDataImpl::GetSymbolIdentifier() { + return ReadNumber(&symbol_data_); } - bool ScriptDataImpl::SanityCheck() { // Check that the header data is valid and doesn't specify // point to positions outside the store. @@ -1080,7 +1117,7 @@ bool ScriptDataImpl::SanityCheck() { int symbol_count = static_cast(store_[ScriptDataImpl::kSymbolCountOffset]); if (symbol_count < 0) return false; - // Check that the total size has room both function entries. + // Check that the total size has room for header and function entries. int minimum_size = ScriptDataImpl::kHeaderSize + functions_size; if (store_.length() < minimum_size) return false; @@ -1088,15 +1125,8 @@ bool ScriptDataImpl::SanityCheck() { } -ParserRecorder::ParserRecorder() - : function_store_(0), - symbol_store_(0), - symbol_entries_(0), - symbol_table_(vector_compare), - symbol_id_(0) { -#ifdef DEBUG - prev_start = -1; -#endif + +PartialParserRecorder::PartialParserRecorder() : function_store_(0) { preamble_[ScriptDataImpl::kMagicOffset] = ScriptDataImpl::kMagicNumber; preamble_[ScriptDataImpl::kVersionOffset] = ScriptDataImpl::kCurrentVersion; preamble_[ScriptDataImpl::kHasErrorOffset] = false; @@ -1104,10 +1134,22 @@ ParserRecorder::ParserRecorder() preamble_[ScriptDataImpl::kSymbolCountOffset] = 0; preamble_[ScriptDataImpl::kSizeOffset] = 0; ASSERT_EQ(6, ScriptDataImpl::kHeaderSize); +#ifdef DEBUG + prev_start = -1; +#endif } -void ParserRecorder::WriteString(Vector str) { +CompleteParserRecorder::CompleteParserRecorder() + : PartialParserRecorder(), + symbol_store_(0), + symbol_entries_(0), + symbol_table_(vector_compare), + symbol_id_(0) { +} + + +void PartialParserRecorder::WriteString(Vector str) { function_store_.Add(str.length()); for (int i = 0; i < str.length(); i++) { function_store_.Add(str[i]); @@ -1115,6 +1157,22 @@ void ParserRecorder::WriteString(Vector str) { } +void CompleteParserRecorder::WriteNumber(int number) { + ASSERT(number >= 0); + + int mask = (1 << 28) - 1; + for (int i = 28; i > 0; i -= 7) { + if (number > mask) { + symbol_store_.Add(static_cast(number >> i) | 0x80u); + number &= mask; + } + mask >>= 7; + } + symbol_store_.Add(static_cast(number)); +} + + + const char* ScriptDataImpl::ReadString(unsigned* start, int* chars) { int length = start[0]; char* result = NewArray(length + 1); @@ -1127,8 +1185,9 @@ const char* ScriptDataImpl::ReadString(unsigned* start, int* chars) { } -void ParserRecorder::LogMessage(Scanner::Location loc, const char* message, - Vector args) { +void PartialParserRecorder::LogMessage(Scanner::Location loc, + const char* message, + Vector args) { if (has_error()) return; preamble_[ScriptDataImpl::kHasErrorOffset] = true; function_store_.Reset(); @@ -1162,7 +1221,8 @@ const char* ScriptDataImpl::BuildMessage() { Vector ScriptDataImpl::BuildArgs() { int arg_count = Read(kMessageArgCountPos); const char** array = NewArray(arg_count); - // Position after the string starting at position 3. + // Position after text found by skipping past length field and + // length field content words. int pos = kMessageTextPos + 1 + Read(kMessageTextPos); for (int i = 0; i < arg_count; i++) { int count = 0; @@ -1183,7 +1243,7 @@ unsigned* ScriptDataImpl::ReadAddress(int position) { } -FunctionEntry ParserRecorder::LogFunction(int start) { +FunctionEntry PartialParserRecorder::LogFunction(int start) { #ifdef DEBUG ASSERT(start > prev_start); prev_start = start; @@ -1206,7 +1266,7 @@ class AstBuildingParser : public Parser { factory(), log(), pre_data), - factory_(pre_data ? pre_data->symbol_count() : 16) { } + factory_(pre_data ? pre_data->symbol_count() : 0) { } virtual void ReportMessageAt(Scanner::Location loc, const char* message, Vector args); virtual VariableProxy* Declare(Handle name, Variable::Mode mode, @@ -1223,23 +1283,46 @@ class AstBuildingParser : public Parser { class PreParser : public Parser { public: PreParser(Handle