Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Unified Diff: src/arm/full-codegen-arm.cc

Issue 6614010: [Isolates] Merge 6700:7030 from bleeding_edge to isolates. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/isolates/
Patch Set: '' Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/arm/deoptimizer-arm.cc ('k') | src/arm/ic-arm.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/arm/full-codegen-arm.cc
===================================================================
--- src/arm/full-codegen-arm.cc (revision 7031)
+++ src/arm/full-codegen-arm.cc (working copy)
@@ -45,6 +45,67 @@
#define __ ACCESS_MASM(masm_)
+
+// A patch site is a location in the code which it is possible to patch. This
+// class has a number of methods to emit the code which is patchable and the
+// method EmitPatchInfo to record a marker back to the patchable code. This
+// marker is a cmp rx, #yyy instruction, and x * 0x00000fff + yyy (raw 12 bit
+// immediate value is used) is the delta from the pc to the first instruction of
+// the patchable code.
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ ASSERT(patch_site_.is_bound() == info_emitted_);
+ }
+
+ // When initially emitting this ensure that a jump is always generated to skip
+ // the inlined smi code.
+ void EmitJumpIfNotSmi(Register reg, Label* target) {
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ __ bind(&patch_site_);
+ __ cmp(reg, Operand(reg));
+ // Don't use b(al, ...) as that might emit the constant pool right after the
+ // branch. After patching when the branch is no longer unconditional
+ // execution can continue into the constant pool.
+ __ b(eq, target); // Always taken before patched.
+ }
+
+ // When initially emitting this ensure that a jump is never generated to skip
+ // the inlined smi code.
+ void EmitJumpIfSmi(Register reg, Label* target) {
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ __ bind(&patch_site_);
+ __ cmp(reg, Operand(reg));
+ __ b(ne, target); // Never taken before patched.
+ }
+
+ void EmitPatchInfo() {
+ int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+ Register reg;
+ reg.set_code(delta_to_patch_site / kOff12Mask);
+ __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ }
+
+ bool is_bound() const { return patch_site_.is_bound(); }
+
+ private:
+ MacroAssembler* masm_;
+ Label patch_site_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
@@ -158,46 +219,47 @@
Move(dot_arguments_slot, r3, r1, r2);
}
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- EmitDeclaration(scope()->function(), Variable::CONST, NULL);
- }
- // Visit all the explicit declarations unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- scope()->VisitIllegalRedeclaration(this);
- } else {
- VisitDeclarations(scope()->declarations());
- }
- }
-
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
- // Check the stack for overflow or break request.
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
+ } else {
+ { Comment cmnt(masm_, "[ Declarations");
+ // For named function expressions, declare the function name as a
+ // constant.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailout(info->function(), NO_REGISTERS);
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ bind(&ok);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
}
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
{ Comment cmnt(masm_, "[ return <undefined>;");
- // Emit a 'return undefined' in case control fell off the end of the
- // body.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
}
EmitReturnSequence();
@@ -277,13 +339,6 @@
}
-FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
- Token::Value op, Expression* left, Expression* right) {
- ASSERT(ShouldInlineSmiCase(op));
- return kNoConstants;
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
}
@@ -502,13 +557,38 @@
void FullCodeGenerator::DoTest(Label* if_true,
Label* if_false,
Label* fall_through) {
- // Call the runtime to find the boolean value of the source and then
- // translate it into control flow to the pair of labels.
- __ push(result_register());
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- Split(eq, if_true, if_false, fall_through);
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Emit the inlined tests assumed by the stub.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(result_register(), ip);
+ __ b(eq, if_false);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(result_register(), ip);
+ __ b(eq, if_true);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(result_register(), ip);
+ __ b(eq, if_false);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(result_register(), result_register());
+ __ b(eq, if_false);
+ __ JumpIfSmi(result_register(), if_true);
+
+ // Call the ToBoolean stub for all other cases.
+ ToBooleanStub stub(result_register());
+ __ CallStub(&stub);
+ __ tst(result_register(), result_register());
+ } else {
+ // Call the runtime to find the boolean value of the source and then
+ // translate it into control flow to the pair of labels.
+ __ push(result_register());
+ __ CallRuntime(Runtime::kToBool, 1);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r0, ip);
+ }
+
+ // The stub returns nonzero for true.
+ Split(ne, if_true, if_false, fall_through);
}
@@ -623,10 +703,11 @@
// We bypass the general EmitSlotSearch because we know more about
// this specific context.
- // The variable in the decl always resides in the current context.
+ // The variable in the decl always resides in the current function
+ // context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check if we have the correct context pointer.
+ // Check that we're not inside a 'with'.
__ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
__ cmp(r1, cp);
__ Check(eq, "Unexpected declaration in current context.");
@@ -695,8 +776,9 @@
prop->key()->AsLiteral()->handle()->IsSmi());
__ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic(isolate()->builtins()->builtin(
- Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(isolate()->builtins()->builtin(is_strict()
+ ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// Value in r0 is ignored (declarations are statements).
}
@@ -712,10 +794,11 @@
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
// The context is the first argument.
- __ mov(r1, Operand(pairs));
- __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
- __ Push(cp, r1, r0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ mov(r2, Operand(pairs));
+ __ mov(r1, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+ __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
+ __ Push(cp, r2, r1, r0);
+ __ CallRuntime(Runtime::kDeclareGlobals, 4);
// Return value is ignored.
}
@@ -724,9 +807,9 @@
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
+
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
-
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
@@ -754,24 +837,24 @@
// Perform the comparison as if via '==='.
__ ldr(r1, MemOperand(sp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &slow_case);
+ patch_site.EmitJumpIfNotSmi(r2, &slow_case);
+
__ cmp(r1, r0);
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target()->entry_label());
- __ bind(&slow_case);
+ __ bind(&slow_case);
}
- CompareFlags flags = inline_smi_code
- ? NO_SMI_COMPARE_IN_STUB
- : NO_COMPARE_FLAGS;
- CompareStub stub(eq, true, flags, r1, r0);
- __ CallStub(&stub);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ EmitCallIC(ic, &patch_site);
+ __ cmp(r0, Operand(0));
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target()->entry_label());
@@ -815,8 +898,9 @@
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
__ b(eq, &exit);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r0, ip);
+ Register null_value = r5;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ cmp(r0, null_value);
__ b(eq, &exit);
// Convert the object to a JS object.
@@ -830,12 +914,62 @@
__ bind(&done_convert);
__ push(r0);
- // BUG(867): Check cache validity in generated code. This is a fast
- // case for the JSObject::IsSimpleEnum cache validity checks. If we
- // cannot guarantee cache validity, call the runtime system to check
- // cache validity or get the property names in a fixed array.
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ Label next, call_runtime;
+ // Preload a couple of values used in the loop.
+ Register empty_fixed_array_value = r6;
+ __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Register empty_descriptor_array_value = r7;
+ __ LoadRoot(empty_descriptor_array_value,
+ Heap::kEmptyDescriptorArrayRootIndex);
+ __ mov(r1, r0);
+ __ bind(&next);
+ // Check that there are no elements. Register r1 contains the
+ // current JS object we've reached through the prototype chain.
+ __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ cmp(r2, empty_fixed_array_value);
+ __ b(ne, &call_runtime);
+
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in r2 for the subsequent
+ // prototype load.
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOffset));
+ __ cmp(r3, empty_descriptor_array_value);
+ __ b(eq, &call_runtime);
+
+ // Check that there is an enum cache in the non-empty instance
+ // descriptors (r3). This is the case if the next enumeration
+ // index field does not contain a smi.
+ __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
+ __ JumpIfSmi(r3, &call_runtime);
+
+ // For all objects but the receiver, check that the cache is empty.
+ Label check_prototype;
+ __ cmp(r1, r0);
+ __ b(eq, &check_prototype);
+ __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ cmp(r3, empty_fixed_array_value);
+ __ b(ne, &call_runtime);
+
+ // Load the prototype from the map and loop if non-null.
+ __ bind(&check_prototype);
+ __ ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
+ __ cmp(r1, null_value);
+ __ b(ne, &next);
+
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ Label use_cache;
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ b(&use_cache);
+
// Get the set of properties to enumerate.
+ __ bind(&call_runtime);
__ push(r0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
@@ -850,6 +984,7 @@
__ b(ne, &fixed_array);
// We got a map in register r0. Get the enumeration cache from it.
+ __ bind(&use_cache);
__ ldr(r1, FieldMemOperand(r0, Map::kInstanceDescriptorsOffset));
__ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
__ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
@@ -938,8 +1073,14 @@
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (scope()->is_function_scope() &&
+ // space for nested functions that don't need literals cloning. If
+ // we're running with the --always-opt or the --prepare-always-opt
+ // flag, we need to use the runtime function so that the new function
+ // we are creating here gets a chance to have its code optimized and
+ // doesn't just get a copy of the existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ scope()->is_function_scope() &&
info->num_literals() == 0 &&
!pretenure) {
FastNewClosureStub stub;
@@ -967,7 +1108,7 @@
Slot* slot,
Label* slow) {
ASSERT(slot->type() == Slot::CONTEXT);
- Register current = cp;
+ Register context = cp;
Register next = r3;
Register temp = r4;
@@ -975,22 +1116,25 @@
if (s->num_heap_slots() > 0) {
if (s->calls_eval()) {
// Check that extension is NULL.
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+ __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
__ b(ne, slow);
}
- __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX));
+ __ ldr(next, ContextOperand(context, Context::CLOSURE_INDEX));
__ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
// Walk the rest of the chain without clobbering cp.
- current = next;
+ context = next;
}
}
// Check that last extension is NULL.
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+ __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
__ b(ne, slow);
- __ ldr(temp, ContextOperand(current, Context::FCONTEXT_INDEX));
- return ContextOperand(temp, slot->index());
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an cp-based operand (the write barrier cannot be allowed to
+ // destroy the cp register).
+ return ContextOperand(context, slot->index());
}
@@ -1194,18 +1338,19 @@
Comment cmnt(masm_, "[ RegExpLiteral");
Label materialized;
// Registers will be used as follows:
+ // r5 = materialized value (RegExp literal)
// r4 = JS function, literals array
// r3 = literal index
// r2 = RegExp pattern
// r1 = RegExp flags
- // r0 = temp + materialized value (RegExp literal)
+ // r0 = RegExp literal clone
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ ldr(r0, FieldMemOperand(r4, literal_offset));
+ __ ldr(r5, FieldMemOperand(r4, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
+ __ cmp(r5, ip);
__ b(ne, &materialized);
// Create regexp literal using runtime function.
@@ -1215,20 +1360,27 @@
__ mov(r1, Operand(expr->flags()));
__ Push(r4, r3, r2, r1);
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ mov(r5, r0);
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- __ push(r0);
+ Label allocated, runtime_allocate;
+ __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ push(r5);
__ mov(r0, Operand(Smi::FromInt(size)));
__ push(r0);
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ pop(r5);
+ __ bind(&allocated);
// After this, registers are used as follows:
// r0: Newly allocated regexp.
- // r1: Materialized regexp.
+ // r5: Materialized regexp.
// r2: temp.
- __ pop(r1);
- __ CopyFields(r0, r1, r2.bit(), size / kPointerSize);
+ __ CopyFields(r0, r5, r2.bit(), size / kPointerSize);
context()->Plug(r0);
}
@@ -1295,7 +1447,9 @@
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- __ CallRuntime(Runtime::kSetProperty, 3);
+ __ mov(r0, Operand(Smi::FromInt(NONE))); // PropertyAttributes
+ __ push(r0);
+ __ CallRuntime(Runtime::kSetProperty, 4);
} else {
__ Drop(3);
}
@@ -1475,14 +1629,8 @@
}
Token::Value op = expr->binary_op();
- ConstantOperand constant = ShouldInlineSmiCase(op)
- ? GetConstantOperand(op, expr->target(), expr->value())
- : kNoConstants;
- ASSERT(constant == kRightConstant || constant == kNoConstants);
- if (constant == kNoConstants) {
- __ push(r0); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
- }
+ __ push(r0); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
? OVERWRITE_RIGHT
@@ -1494,8 +1642,7 @@
op,
mode,
expr->target(),
- expr->value(),
- constant);
+ expr->value());
} else {
EmitBinaryOp(op, mode);
}
@@ -1550,31 +1697,107 @@
void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
- Expression* left,
- Expression* right,
- ConstantOperand constant) {
- ASSERT(constant == kNoConstants); // Only handled case.
- EmitBinaryOp(op, mode);
+ Expression* left_expr,
+ Expression* right_expr) {
+ Label done, smi_case, stub_call;
+
+ Register scratch1 = r2;
+ Register scratch2 = r3;
+
+ // Get the arguments.
+ Register left = r1;
+ Register right = r0;
+ __ pop(left);
+
+ // Perform combined smi check on both operands.
+ __ orr(scratch1, left, Operand(right));
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(scratch1, &smi_case);
+
+ __ bind(&stub_call);
+ TypeRecordingBinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), &patch_site);
+ __ jmp(&done);
+
+ __ bind(&smi_case);
+ // Smi case. This code works the same way as the smi-smi case in the type
+ // recording binary operation stub, see
+ // TypeRecordingBinaryOpStub::GenerateSmiSmiOperation for comments.
+ switch (op) {
+ case Token::SAR:
+ __ b(&stub_call);
+ __ GetLeastBitsFromSmi(scratch1, right, 5);
+ __ mov(right, Operand(left, ASR, scratch1));
+ __ bic(right, right, Operand(kSmiTagMask));
+ break;
+ case Token::SHL: {
+ __ b(&stub_call);
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ mov(scratch1, Operand(scratch1, LSL, scratch2));
+ __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
+ __ b(mi, &stub_call);
+ __ SmiTag(right, scratch1);
+ break;
+ }
+ case Token::SHR: {
+ __ b(&stub_call);
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ mov(scratch1, Operand(scratch1, LSR, scratch2));
+ __ tst(scratch1, Operand(0xc0000000));
+ __ b(ne, &stub_call);
+ __ SmiTag(right, scratch1);
+ break;
+ }
+ case Token::ADD:
+ __ add(scratch1, left, Operand(right), SetCC);
+ __ b(vs, &stub_call);
+ __ mov(right, scratch1);
+ break;
+ case Token::SUB:
+ __ sub(scratch1, left, Operand(right), SetCC);
+ __ b(vs, &stub_call);
+ __ mov(right, scratch1);
+ break;
+ case Token::MUL: {
+ __ SmiUntag(ip, right);
+ __ smull(scratch1, scratch2, left, ip);
+ __ mov(ip, Operand(scratch1, ASR, 31));
+ __ cmp(ip, Operand(scratch2));
+ __ b(ne, &stub_call);
+ __ tst(scratch1, Operand(scratch1));
+ __ mov(right, Operand(scratch1), LeaveCC, ne);
+ __ b(ne, &done);
+ __ add(scratch2, right, Operand(left), SetCC);
+ __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
+ __ b(mi, &stub_call);
+ break;
+ }
+ case Token::BIT_OR:
+ __ orr(right, left, Operand(right));
+ break;
+ case Token::BIT_AND:
+ __ and_(right, left, Operand(right));
+ break;
+ case Token::BIT_XOR:
+ __ eor(right, left, Operand(right));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ context()->Plug(r0);
}
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
OverwriteMode mode) {
__ pop(r1);
- if (op == Token::ADD ||
- op == Token::SUB ||
- op == Token::MUL ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::BIT_OR ||
- op == Token::BIT_AND ||
- op == Token::BIT_XOR) {
- TypeRecordingBinaryOpStub stub(op, mode);
- __ CallStub(&stub);
- } else {
- GenericBinaryOpStub stub(op, mode, r1, r0);
- __ CallStub(&stub);
- }
+ TypeRecordingBinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), NULL);
context()->Plug(r0);
}
@@ -1612,19 +1835,31 @@
__ pop(r0); // Restore value.
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
Handle<Code> ic(isolate()->builtins()->builtin(
- Builtins::StoreIC_Initialize));
+ is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
case KEYED_PROPERTY: {
__ push(r0); // Preserve value.
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ mov(r1, r0);
- __ pop(r2);
+ if (prop->is_synthetic()) {
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+ { AccumulatorValueContext for_object(this);
+ EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+ }
+ __ mov(r2, r0);
+ __ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
+ } else {
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ mov(r1, r0);
+ __ pop(r2);
+ }
__ pop(r0); // Restore value.
Handle<Code> ic(isolate()->builtins()->builtin(
- Builtins::KeyedStoreIC_Initialize));
+ is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
@@ -1649,37 +1884,64 @@
__ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand());
Handle<Code> ic(isolate()->builtins()->builtin(
- Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
- // Perform the assignment for non-const variables and for initialization
- // of const variables. Const assignments are simply skipped.
- Label done;
+ } else if (op == Token::INIT_CONST) {
+ // Like var declarations, const declarations are hoisted to function
+ // scope. However, unlike var initializers, const initializers are able
+ // to drill a hole to that function context, even from inside a 'with'
+ // context. We thus bypass the normal static scope lookup.
Slot* slot = var->AsSlot();
+ Label skip;
switch (slot->type()) {
case Slot::PARAMETER:
+ // No const parameters.
+ UNREACHABLE();
+ break;
case Slot::LOCAL:
- if (op == Token::INIT_CONST) {
- // Detect const reinitialization by checking for the hole value.
- __ ldr(r1, MemOperand(fp, SlotOffset(slot)));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &done);
- }
+ // Detect const reinitialization by checking for the hole value.
+ __ ldr(r1, MemOperand(fp, SlotOffset(slot)));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r1, ip);
+ __ b(ne, &skip);
+ __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
+ break;
+ case Slot::CONTEXT: {
+ __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
+ __ ldr(r2, ContextOperand(r1, slot->index()));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r2, ip);
+ __ b(ne, &skip);
+ __ str(r0, ContextOperand(r1, slot->index()));
+ int offset = Context::SlotOffset(slot->index());
+ __ mov(r3, r0); // Preserve the stored value in r0.
+ __ RecordWrite(r1, Operand(offset), r3, r2);
+ break;
+ }
+ case Slot::LOOKUP:
+ __ push(r0);
+ __ mov(r0, Operand(slot->var()->name()));
+ __ Push(cp, r0); // Context and name.
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ break;
+ }
+ __ bind(&skip);
+
+ } else if (var->mode() != Variable::CONST) {
+ // Perform the assignment for non-const variables. Const assignments
+ // are simply skipped.
+ Slot* slot = var->AsSlot();
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
// Perform the assignment.
__ str(result_register(), MemOperand(fp, SlotOffset(slot)));
break;
case Slot::CONTEXT: {
MemOperand target = EmitSlotSearch(slot, r1);
- if (op == Token::INIT_CONST) {
- // Detect const reinitialization by checking for the hole value.
- __ ldr(r2, target);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r2, ip);
- __ b(ne, &done);
- }
// Perform the assignment and issue the write barrier.
__ str(result_register(), target);
// RecordWrite may destroy all its register arguments.
@@ -1690,20 +1952,14 @@
}
case Slot::LOOKUP:
- // Call the runtime for the assignment. The runtime will ignore
- // const reinitialization.
+ // Call the runtime for the assignment.
__ push(r0); // Value.
- __ mov(r0, Operand(slot->var()->name()));
- __ Push(cp, r0); // Context and name.
- if (op == Token::INIT_CONST) {
- // The runtime will ignore const redeclaration.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- }
+ __ mov(r1, Operand(slot->var()->name()));
+ __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
+ __ Push(cp, r1, r0); // Context, name, strict mode.
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
break;
}
- __ bind(&done);
}
}
@@ -1737,7 +1993,8 @@
}
Handle<Code> ic(isolate()->builtins()->builtin(
- Builtins::StoreIC_Initialize));
+ is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
@@ -1782,7 +2039,8 @@
}
Handle<Code> ic(isolate()->builtins()->builtin(
- Builtins::KeyedStoreIC_Initialize));
+ is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
@@ -1899,6 +2157,29 @@
}
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
+ int arg_count) {
+ // Push copy of the first argument or undefined if it doesn't exist.
+ if (arg_count > 0) {
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+ } else {
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ }
+ __ push(r1);
+
+ // Push the receiver of the enclosing function and do runtime call.
+ __ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
+ __ push(r1);
+ // Push the strict mode flag.
+ __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+ __ push(r1);
+
+ __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
+ ? Runtime::kResolvePossiblyDirectEvalNoLookup
+ : Runtime::kResolvePossiblyDirectEval, 4);
+}
+
+
void FullCodeGenerator::VisitCall(Call* expr) {
#ifdef DEBUG
// We want to verify that RecordJSReturnSite gets called on all paths
@@ -1928,27 +2209,32 @@
VisitForStackValue(args->at(i));
}
- // Push copy of the function - found below the arguments.
+ // If we know that eval can only be shadowed by eval-introduced
+ // variables we attempt to load the global eval function directly
+ // in generated code. If we succeed, there is no need to perform a
+ // context lookup in the runtime system.
+ Label done;
+ if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+ Label slow;
+ EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
+ NOT_INSIDE_TYPEOF,
+ &slow);
+ // Push the function and resolve eval.
+ __ push(r0);
+ EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
+ __ jmp(&done);
+ __ bind(&slow);
+ }
+
+ // Push copy of the function (found below the arguments) and
+ // resolve eval.
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ push(r1);
-
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- __ push(r1);
- } else {
- __ push(r2);
+ EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
+ if (done.is_linked()) {
+ __ bind(&done);
}
- // Push the receiver of the enclosing function and do runtime call.
- __ ldr(r1,
- MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
- __ push(r1);
- // Push the strict mode flag.
- __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
- __ push(r1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
// The runtime call returns a pair of values in r0 (function) and
// r1 (receiver). Touch up the stack with the right values.
__ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -2764,37 +3050,43 @@
void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the runtime.
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::SIN,
+ TranscendentalCacheStub::TAGGED);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sin, 1);
+ __ CallStub(&stub);
context()->Plug(r0);
}
void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the runtime.
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::COS,
+ TranscendentalCacheStub::TAGGED);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_cos, 1);
+ __ CallStub(&stub);
context()->Plug(r0);
}
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
- // Load the argument on the stack and call the runtime function.
+void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::TAGGED);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
+ __ CallStub(&stub);
context()->Plug(r0);
}
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the runtime function.
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_log, 1);
+ __ CallRuntime(Runtime::kMath_sqrt, 1);
context()->Plug(r0);
}
@@ -2953,16 +3245,248 @@
void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(r0);
+ }
+
__ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
__ IndexFromHash(r0, r0);
+
context()->Plug(r0);
}
void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop,
+ empty_separator_loop, one_char_separator_loop,
+ one_char_separator_loop_entry, long_separator_loop;
+
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(0));
+
+ // All aliases of the same register have disjoint lifetimes.
+ Register array = r0;
+ Register elements = no_reg; // Will be r0.
+ Register result = no_reg; // Will be r0.
+ Register separator = r1;
+ Register array_length = r2;
+ Register result_pos = no_reg; // Will be r2
+ Register string_length = r3;
+ Register string = r4;
+ Register element = r5;
+ Register elements_end = r6;
+ Register scratch1 = r7;
+ Register scratch2 = r9;
+
+ // Separator operand is on the stack.
+ __ pop(separator);
+
+ // Check that the array is a JSArray.
+ __ JumpIfSmi(array, &bailout);
+ __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
+ __ b(ne, &bailout);
+
+ // Check that the array has fast elements.
+ __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
+ __ tst(scratch2, Operand(1 << Map::kHasFastElements));
+ __ b(eq, &bailout);
+
+ // If the array has length zero, return the empty string.
+ __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
+ __ SmiUntag(array_length, SetCC);
+ __ b(ne, &non_trivial_array);
+ __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
+ __ b(&done);
+
+ __ bind(&non_trivial_array);
+
+ // Get the FixedArray containing array's elements.
+ elements = array;
+ __ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+ array = no_reg; // End of array's live range.
+
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths, as a smi-encoded value.
+ __ mov(string_length, Operand(0));
+ __ add(element,
+ elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ // Loop condition: while (element < elements_end).
+ // Live values in registers:
+ // elements: Fixed array of strings.
+ // array_length: Length of the fixed array of strings (not smi)
+ // separator: Separator string
+ // string_length: Accumulated sum of string lengths (smi).
+ // element: Current array element.
+ // elements_end: Array end.
+ if (FLAG_debug_code) {
+ __ cmp(array_length, Operand(0));
+ __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin");
+ }
+ __ bind(&loop);
+ __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ JumpIfSmi(string, &bailout);
+ __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
+ __ add(string_length, string_length, Operand(scratch1));
+ __ b(vs, &bailout);
+ __ cmp(element, elements_end);
+ __ b(lt, &loop);
+
+ // If array_length is 1, return elements[0], a string.
+ __ cmp(array_length, Operand(1));
+ __ b(ne, &not_size_one_array);
+ __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ __ b(&done);
+
+ __ bind(&not_size_one_array);
+
+ // Live values in registers:
+ // separator: Separator string
+ // array_length: Length of the array.
+ // string_length: Sum of string lengths (smi).
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ JumpIfSmi(separator, &bailout);
+ __ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+
+ // Add (separator length times array_length) - separator length to the
+ // string_length to get the length of the result string. array_length is not
+ // smi but the other values are, so the result is a smi
+ __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ sub(string_length, string_length, Operand(scratch1));
+ __ smull(scratch2, ip, array_length, scratch1);
+ // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
+ // zero.
+ __ cmp(ip, Operand(0));
+ __ b(ne, &bailout);
+ __ tst(scratch2, Operand(0x80000000));
+ __ b(ne, &bailout);
+ __ add(string_length, string_length, Operand(scratch2));
+ __ b(vs, &bailout);
+ __ SmiUntag(string_length);
+
+ // Get first element in the array to free up the elements register to be used
+ // for the result.
+ __ add(element,
+ elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ result = elements; // End of live range for elements.
+ elements = no_reg;
+ // Live values in registers:
+ // element: First array element
+ // separator: Separator string
+ // string_length: Length of result string (not smi)
+ // array_length: Length of the array.
+ __ AllocateAsciiString(result,
+ string_length,
+ scratch1,
+ scratch2,
+ elements_end,
+ &bailout);
+ // Prepare for looping. Set up elements_end to end of the array. Set
+ // result_pos to the position of the result where to write the first
+ // character.
+ __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ result_pos = array_length; // End of live range for array_length.
+ array_length = no_reg;
+ __ add(result_pos,
+ result,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+
+ // Check the length of the separator.
+ __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ cmp(scratch1, Operand(Smi::FromInt(1)));
+ __ b(eq, &one_char_separator);
+ __ b(gt, &long_separator);
+
+ // Empty separator case
+ __ bind(&empty_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+
+ // Copy next array element to the result.
+ __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ cmp(element, elements_end);
+ __ b(lt, &empty_separator_loop); // End while (element < elements_end).
+ ASSERT(result.is(r0));
+ __ b(&done);
+
+ // One-character separator case
+ __ bind(&one_char_separator);
+ // Replace separator with its ascii character value.
+ __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&one_char_separator_loop_entry);
+
+ __ bind(&one_char_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Single separator ascii char (in lower byte).
+
+ // Copy the separator character to the result.
+ __ strb(separator, MemOperand(result_pos, 1, PostIndex));
+
+ // Copy next array element to the result.
+ __ bind(&one_char_separator_loop_entry);
+ __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ cmp(element, elements_end);
+ __ b(lt, &one_char_separator_loop); // End while (element < elements_end).
+ ASSERT(result.is(r0));
+ __ b(&done);
+
+ // Long separator case (separator is more than one character). Entry is at the
+ // label long_separator below.
+ __ bind(&long_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Separator string.
+
+ // Copy the separator to the result.
+ __ ldr(string_length, FieldMemOperand(separator, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ add(string,
+ separator,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+
+ __ bind(&long_separator);
+ __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ cmp(element, elements_end);
+ __ b(lt, &long_separator_loop); // End while (element < elements_end).
+ ASSERT(result.is(r0));
+ __ b(&done);
+
+ __ bind(&bailout);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ bind(&done);
context()->Plug(r0);
- return;
}
@@ -3012,39 +3536,50 @@
Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
Property* prop = expr->expression()->AsProperty();
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- if (prop == NULL && var == NULL) {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- } else if (var != NULL &&
- !var->is_global() &&
- var->AsSlot() != NULL &&
- var->AsSlot()->type() != Slot::LOOKUP) {
- // Result of deleting non-global, non-dynamic variables is false.
- // The subexpression does not have side effects.
- context()->Plug(false);
- } else {
- // Property or variable reference. Call the delete builtin with
- // object and property name as arguments.
- if (prop != NULL) {
+
+ if (prop != NULL) {
+ if (prop->is_synthetic()) {
+ // Result of deleting parameters is false, even when they rewrite
+ // to accesses on the arguments object.
+ context()->Plug(false);
+ } else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
+ __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+ __ push(r1);
__ InvokeBuiltin(Builtins::DELETE, CALL_JS);
- } else if (var->is_global()) {
- __ ldr(r1, GlobalObjectOperand());
- __ mov(r0, Operand(var->name()));
- __ Push(r1, r0);
+ context()->Plug(r0);
+ }
+ } else if (var != NULL) {
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // but "delete this" is.
+ ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+ if (var->is_global()) {
+ __ ldr(r2, GlobalObjectOperand());
+ __ mov(r1, Operand(var->name()));
+ __ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
+ __ Push(r2, r1, r0);
__ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+ context()->Plug(r0);
+ } else if (var->AsSlot() != NULL &&
+ var->AsSlot()->type() != Slot::LOOKUP) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ context()->Plug(false);
} else {
- // Non-global variable. Call the runtime to delete from the
+ // Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
__ push(context_register());
__ mov(r2, Operand(var->name()));
__ push(r2);
__ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ context()->Plug(r0);
}
- context()->Plug(r0);
+ } else {
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
}
break;
}
@@ -3058,17 +3593,23 @@
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ // Notice that the labels are swapped.
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
+ if (context()->IsTest()) ForwardBailoutToChild(expr);
+ VisitForControl(expr->expression(), if_true, if_false, fall_through);
+ context()->Plug(if_false, if_true); // Labels swapped.
+ }
break;
}
@@ -3100,9 +3641,7 @@
bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::SUB,
- overwrite,
- NO_UNARY_FLAGS);
+ GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
// GenericUnaryOpStub expects the argument to be in the
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
@@ -3235,13 +3774,16 @@
// Inline smi case if we are in a loop.
Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
int count_value = expr->op() == Token::INC ? 1 : -1;
if (ShouldInlineSmiCase(expr->op())) {
__ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
__ b(vs, &stub_call);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
- __ JumpIfSmi(r0, &done);
+ patch_site.EmitJumpIfSmi(r0, &done);
+
__ bind(&stub_call);
// Call stub. Undo operation first.
__ sub(r0, r0, Operand(Smi::FromInt(count_value)));
@@ -3251,8 +3793,8 @@
// Record position before stub call.
SetSourcePosition(expr->position());
- GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0);
- __ CallStub(&stub);
+ TypeRecordingBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
+ EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
// Store the value returned in r0.
@@ -3281,7 +3823,8 @@
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
__ pop(r1);
Handle<Code> ic(isolate()->builtins()->builtin(
- Builtins::StoreIC_Initialize));
+ is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3297,7 +3840,8 @@
__ pop(r1); // Key.
__ pop(r2); // Receiver.
Handle<Code> ic(isolate()->builtins()->builtin(
- Builtins::KeyedStoreIC_Initialize));
+ is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3534,21 +4078,22 @@
}
bool inline_smi_code = ShouldInlineSmiCase(op);
+ JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ orr(r2, r0, Operand(r1));
- __ JumpIfNotSmi(r2, &slow_case);
+ patch_site.EmitJumpIfNotSmi(r2, &slow_case);
__ cmp(r1, r0);
Split(cond, if_true, if_false, NULL);
__ bind(&slow_case);
}
- CompareFlags flags = inline_smi_code
- ? NO_SMI_COMPARE_IN_STUB
- : NO_COMPARE_FLAGS;
- CompareStub stub(cond, strict, flags, r1, r0);
- __ CallStub(&stub);
+
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ EmitCallIC(ic, &patch_site);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand(0));
Split(cond, if_true, if_false, fall_through);
}
}
@@ -3611,10 +4156,52 @@
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
ASSERT(mode == RelocInfo::CODE_TARGET ||
mode == RelocInfo::CODE_TARGET_CONTEXT);
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ __ IncrementCounter(COUNTERS->named_load_full(), 1, r1, r2);
+ break;
+ case Code::KEYED_LOAD_IC:
+ __ IncrementCounter(COUNTERS->keyed_load_full(), 1, r1, r2);
+ break;
+ case Code::STORE_IC:
+ __ IncrementCounter(COUNTERS->named_store_full(), 1, r1, r2);
+ break;
+ case Code::KEYED_STORE_IC:
+ __ IncrementCounter(COUNTERS->keyed_store_full(), 1, r1, r2);
+ default:
+ break;
+ }
+
__ Call(ic, mode);
}
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ __ IncrementCounter(COUNTERS->named_load_full(), 1, r1, r2);
+ break;
+ case Code::KEYED_LOAD_IC:
+ __ IncrementCounter(COUNTERS->keyed_load_full(), 1, r1, r2);
+ break;
+ case Code::STORE_IC:
+ __ IncrementCounter(COUNTERS->named_store_full(), 1, r1, r2);
+ break;
+ case Code::KEYED_STORE_IC:
+ __ IncrementCounter(COUNTERS->keyed_store_full(), 1, r1, r2);
+ default:
+ break;
+ }
+
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ if (patch_site != NULL && patch_site->is_bound()) {
+ patch_site->EmitPatchInfo();
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
+}
+
+
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ str(value, MemOperand(fp, frame_offset));
« no previous file with comments | « src/arm/deoptimizer-arm.cc ('k') | src/arm/ic-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698