From: Ryan Dahl Date: Fri, 26 Mar 2010 16:09:40 +0000 (-0700) Subject: Upgrade V8 to 2.1.10 X-Git-Tag: v0.1.92~61 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=6192b8659a3dedb86393cfb78121a26f2a3e31e6;p=platform%2Fupstream%2Fnodejs.git Upgrade V8 to 2.1.10 --- diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 5b1240d..0de3308 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,10 @@ +2010-03-26: Version 2.1.10 + + Fixed scons build issues. + + Fixed a couple of minor bugs. + + 2010-03-25: Version 2.1.9 Added API support for reattaching a global object to a context. diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index f950409..d33b567 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -275,6 +275,7 @@ V8_EXTRA_FLAGS = { 'gcc': { 'all': { 'WARNINGFLAGS': ['-Wall', + '-Werror', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor'] @@ -1008,7 +1009,6 @@ def BuildSpecific(env, mode, env_overrides): # Link the object files into a library. env.Replace(**context.flags['v8']) - env.Prepend(LIBS=[library_name]) context.ApplyEnvOverrides(env) if context.options['library'] == 'static': @@ -1043,7 +1043,9 @@ def BuildSpecific(env, mode, env_overrides): sample_env.Depends(sample_program, library) context.sample_targets.append(sample_program) - cctest_program = env.SConscript( + cctest_env = env.Copy() + cctest_env.Prepend(LIBS=[library_name]) + cctest_program = cctest_env.SConscript( join('test', 'cctest', 'SConscript'), build_dir=join('obj', 'test', target_id), exports='context object_files', diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 7b42178..f64b386 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -3361,7 +3361,7 @@ External* External::Cast(v8::Value* value) { Local AccessorInfo::Data() const { - return Local(reinterpret_cast(&args_[-3])); + return Local(reinterpret_cast(&args_[-2])); } diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index e7f6efd..a1d4796 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -61,6 +61,7 @@ SOURCES = { execution.cc factory.cc flags.cc + flow-graph.cc frame-element.cc frames.cc full-codegen.cc diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h index 3fed223..c17f4cf 100644 --- a/deps/v8/src/arguments.h +++ b/deps/v8/src/arguments.h @@ -72,7 +72,7 @@ class Arguments BASE_EMBEDDED { }; -// Cursom arguments replicate a small segment of stack that can be +// Custom arguments replicate a small segment of stack that can be // accessed through an Arguments object the same way the actual stack // can. class CustomArguments : public Relocatable { @@ -80,15 +80,14 @@ class CustomArguments : public Relocatable { inline CustomArguments(Object* data, JSObject* self, JSObject* holder) { - values_[3] = self; - values_[2] = holder; - values_[1] = Smi::FromInt(0); + values_[2] = self; + values_[1] = holder; values_[0] = data; } void IterateInstance(ObjectVisitor* v); - Object** end() { return values_ + 3; } + Object** end() { return values_ + ARRAY_SIZE(values_) - 1; } private: - Object* values_[4]; + Object* values_[3]; }; diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 5e00677..0ca4d35 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -3996,14 +3996,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { } -void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { -#ifdef DEBUG - int original_height = frame_->height(); -#endif - VirtualFrame::SpilledScope spilled_scope; - Comment cmnt(masm_, "[ BinaryOperation"); - Token::Value op = node->op(); - +void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) { // According to ECMA-262 section 11.11, page 58, the binary logical // operators must yield the result of one of the two expressions // before any ToBoolean() conversions. This means that the value @@ -4015,8 +4008,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { // after evaluating the left hand side (due to the shortcut // semantics), but the compiler must (statically) know if the result // of compiling the binary operation is materialized or not. - - if (op == Token::AND) { + if (node->op() == Token::AND) { JumpTarget is_true; LoadConditionAndSpill(node->left(), &is_true, @@ -4062,7 +4054,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked()); } - } else if (op == Token::OR) { + } else { + ASSERT(node->op() == Token::OR); JumpTarget is_false; LoadConditionAndSpill(node->left(), true_target(), @@ -4107,7 +4100,19 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { // Nothing to do. ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked()); } + } +} + + +void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + VirtualFrame::SpilledScope spilled_scope; + Comment cmnt(masm_, "[ BinaryOperation"); + if (node->op() == Token::AND || node->op() == Token::OR) { + GenerateLogicalBooleanOperation(node); } else { // Optimize for the case where (at least) one of the expressions // is a literal small integer. diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 4bea341..0d1a385 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -306,6 +306,9 @@ class CodeGenerator: public AstVisitor { void ToBoolean(JumpTarget* true_target, JumpTarget* false_target); + // Generate code that computes a shortcutting logical operation. + void GenerateLogicalBooleanOperation(BinaryOperation* node); + void GenericBinaryOperation(Token::Value op, OverwriteMode overwrite_mode, int known_rhs = kUnknownIntValue); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 2259aea..cc7cab7 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -65,11 +65,11 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, // Check for the absence of an interceptor. // Load the map into t0. __ ldr(t0, FieldMemOperand(t1, JSObject::kMapOffset)); - // Test the has_named_interceptor bit in the map. - __ ldr(r3, FieldMemOperand(t0, Map::kInstanceAttributesOffset)); - __ tst(r3, Operand(1 << (Map::kHasNamedInterceptor + (3 * 8)))); - // Jump to miss if the interceptor bit is set. - __ b(ne, miss); + + // Bail out if the receiver has a named interceptor. + __ ldrb(r3, FieldMemOperand(t0, Map::kBitFieldOffset)); + __ tst(r3, Operand(1 << Map::kHasNamedInterceptor)); + __ b(nz, miss); // Bail out if we have a JS global proxy object. __ ldrb(r3, FieldMemOperand(t0, Map::kInstanceTypeOffset)); @@ -144,6 +144,95 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, } +static void GenerateNumberDictionaryLoad(MacroAssembler* masm, + Label* miss, + Register elements, + Register key, + Register t0, + Register t1, + Register t2) { + // Register use: + // + // elements - holds the slow-case elements of the receiver and is unchanged. + // + // key - holds the smi key on entry and is unchanged if a branch is + // performed to the miss label. + // + // Scratch registers: + // + // t0 - holds the untagged key on entry and holds the hash once computed. + // Holds the result on exit if the load succeeded. + // + // t1 - used to hold the capacity mask of the dictionary + // + // t2 - used for the index into the dictionary. + Label done; + + // Compute the hash code from the untagged key. This must be kept in sync + // with ComputeIntegerHash in utils.h. + // + // hash = ~hash + (hash << 15); + __ mvn(t1, Operand(t0)); + __ add(t0, t1, Operand(t0, LSL, 15)); + // hash = hash ^ (hash >> 12); + __ eor(t0, t0, Operand(t0, LSR, 12)); + // hash = hash + (hash << 2); + __ add(t0, t0, Operand(t0, LSL, 2)); + // hash = hash ^ (hash >> 4); + __ eor(t0, t0, Operand(t0, LSR, 4)); + // hash = hash * 2057; + __ mov(t1, Operand(2057)); + __ mul(t0, t0, t1); + // hash = hash ^ (hash >> 16); + __ eor(t0, t0, Operand(t0, LSR, 16)); + + // Compute the capacity mask. + __ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset)); + __ mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int + __ sub(t1, t1, Operand(1)); + + // Generate an unrolled loop that performs a few probes before giving up. + static const int kProbes = 4; + for (int i = 0; i < kProbes; i++) { + // Use t2 for index calculations and keep the hash intact in t0. + __ mov(t2, t0); + // Compute the masked index: (hash + i + i * i) & mask. + if (i > 0) { + __ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i))); + } + __ and_(t2, t2, Operand(t1)); + + // Scale the index by multiplying by the element size. + ASSERT(NumberDictionary::kEntrySize == 3); + __ add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3 + + // Check if the key is identical to the name. + __ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2)); + __ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset)); + __ cmp(key, Operand(ip)); + if (i != kProbes - 1) { + __ b(eq, &done); + } else { + __ b(ne, miss); + } + } + + __ bind(&done); + // Check that the value is a normal property. + // t2: elements + (index * kPointerSize) + const int kDetailsOffset = + NumberDictionary::kElementsStartOffset + 2 * kPointerSize; + __ ldr(t1, FieldMemOperand(t2, kDetailsOffset)); + __ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask()))); + __ b(ne, miss); + + // Get the value at the masked, scaled index and return. + const int kValueOffset = + NumberDictionary::kElementsStartOffset + kPointerSize; + __ ldr(t0, FieldMemOperand(t2, kValueOffset)); +} + + void LoadIC::GenerateArrayLength(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r2 : name @@ -530,7 +619,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // -- sp[0] : key // -- sp[4] : receiver // ----------------------------------- - Label slow, fast, check_pixel_array; + Label slow, fast, check_pixel_array, check_number_dictionary; // Get the key and receiver object from the stack. __ ldm(ia, sp, r0.bit() | r1.bit()); @@ -554,6 +643,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Check that the key is a smi. __ BranchOnNotSmi(r0, &slow); + // Save key in r2 in case we want it for the number dictionary case. + __ mov(r2, r0); __ mov(r0, Operand(r0, ASR, kSmiTagSize)); // Get the elements array of the object. @@ -562,17 +653,26 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); __ cmp(r3, ip); - __ b(ne, &slow); + __ b(ne, &check_pixel_array); // Check that the key (index) is within bounds. __ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset)); __ cmp(r0, Operand(r3)); - __ b(lo, &fast); + __ b(ge, &slow); + // Fast case: Do the load. + __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2)); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r0, ip); + // In case the loaded value is the_hole we have to consult GetProperty + // to ensure the prototype chain is searched. + __ b(eq, &slow); + __ Ret(); // Check whether the elements is a pixel array. __ bind(&check_pixel_array); __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); __ cmp(r3, ip); - __ b(ne, &slow); + __ b(ne, &check_number_dictionary); __ ldr(ip, FieldMemOperand(r1, PixelArray::kLengthOffset)); __ cmp(r0, ip); __ b(hs, &slow); @@ -581,22 +681,21 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Tag result as smi. __ Ret(); + __ bind(&check_number_dictionary); + // Check whether the elements is a number dictionary. + // r0: untagged index + // r1: elements + // r2: key + __ LoadRoot(ip, Heap::kHashTableMapRootIndex); + __ cmp(r3, ip); + __ b(ne, &slow); + GenerateNumberDictionaryLoad(masm, &slow, r1, r2, r0, r3, r4); + __ Ret(); + // Slow case: Push extra copies of the arguments (2). __ bind(&slow); __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1); GenerateRuntimeGetProperty(masm); - - // Fast case: Do the load. - __ bind(&fast); - __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2)); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r0, ip); - // In case the loaded value is the_hole we have to consult GetProperty - // to ensure the prototype chain is searched. - __ b(eq, &slow); - - __ Ret(); } diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index abf2f64..bbffef2 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -396,15 +396,14 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register holder, Register name, JSObject* holder_obj) { - __ push(receiver); - __ push(holder); __ push(name); InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor(); ASSERT(!Heap::InNewSpace(interceptor)); - - Register scratch = receiver; + Register scratch = name; __ mov(scratch, Operand(Handle(interceptor))); __ push(scratch); + __ push(receiver); + __ push(holder); __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset)); __ push(scratch); } diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index e42f758..ea74898 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -61,13 +61,9 @@ void CodeStub::GenerateCode(MacroAssembler* masm) { void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) { code->set_major_key(MajorKey()); -#ifdef ENABLE_OPROFILE_AGENT - // Register the generated stub with the OPROFILE agent. - OProfileAgent::CreateNativeCodeRegion(GetName(), - code->instruction_start(), - code->instruction_size()); -#endif - + OPROFILE(CreateNativeCodeRegion(GetName(), + code->instruction_start(), + code->instruction_size())); LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName())); Counters::total_stubs_code_size.Increment(code->instruction_size()); diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index 4634f4c..0dfea8d 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -450,7 +450,7 @@ class ApiGetterEntryStub : public CodeStub { virtual bool GetCustomCache(Code** code_out); virtual void SetCustomCache(Code* value); - static const int kStackSpace = 6; + static const int kStackSpace = 5; static const int kArgc = 4; private: Handle info() { return info_; } diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index e2021fa..c9dd107 100755 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -34,6 +34,7 @@ #include "data-flow.h" #include "debug.h" #include "fast-codegen.h" +#include "flow-graph.h" #include "full-codegen.h" #include "liveedit.h" #include "oprofile-agent.h" @@ -235,27 +236,19 @@ static Handle MakeFunctionInfo(bool is_global, return Handle::null(); } -#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT - // Log the code generation for the script. Check explicit whether logging is - // to avoid allocating when not required. - if (Logger::is_logging() || OProfileAgent::is_enabled()) { - if (script->name()->IsString()) { - SmartPointer data = - String::cast(script->name())->ToCString(DISALLOW_NULLS); - LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG, - *code, *data)); - OProfileAgent::CreateNativeCodeRegion(*data, - code->instruction_start(), - code->instruction_size()); - } else { - LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG, - *code, "")); - OProfileAgent::CreateNativeCodeRegion(is_eval ? "Eval" : "Script", - code->instruction_start(), - code->instruction_size()); - } + if (script->name()->IsString()) { + LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG, + *code, String::cast(script->name()))); + OPROFILE(CreateNativeCodeRegion(String::cast(script->name()), + code->instruction_start(), + code->instruction_size())); + } else { + LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG, + *code, "")); + OPROFILE(CreateNativeCodeRegion(is_eval ? "Eval" : "Script", + code->instruction_start(), + code->instruction_size())); } -#endif // Allocate function. Handle result = @@ -443,14 +436,12 @@ bool Compiler::CompileLazy(CompilationInfo* info) { return false; } -#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT - LogCodeCreateEvent(Logger::LAZY_COMPILE_TAG, - name, - Handle(shared->inferred_name()), - start_position, - info->script(), - code); -#endif + RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, + name, + Handle(shared->inferred_name()), + start_position, + info->script(), + code); // Update the shared function info with the compiled code. shared->set_code(*code); @@ -578,15 +569,12 @@ Handle Compiler::BuildFunctionInfo(FunctionLiteral* literal, } // Function compilation complete. - -#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT - LogCodeCreateEvent(Logger::FUNCTION_TAG, - literal->name(), - literal->inferred_name(), - literal->start_position(), - script, - code); -#endif + RecordFunctionCompilation(Logger::FUNCTION_TAG, + literal->name(), + literal->inferred_name(), + literal->start_position(), + script, + code); } // Create a boilerplate function. @@ -628,13 +616,12 @@ void Compiler::SetFunctionInfo(Handle function_info, } -#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT -void Compiler::LogCodeCreateEvent(Logger::LogEventsAndTags tag, - Handle name, - Handle inferred_name, - int start_position, - Handle