Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(544)

Unified Diff: src/x64/lithium-codegen-x64.cc

Issue 6614010: [Isolates] Merge 6700:7030 from bleeding_edge to isolates. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/isolates/
Patch Set: '' Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/x64/lithium-codegen-x64.h ('k') | src/x64/lithium-x64.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/x64/lithium-codegen-x64.cc
===================================================================
--- src/x64/lithium-codegen-x64.cc (revision 7031)
+++ src/x64/lithium-codegen-x64.cc (working copy)
@@ -37,6 +37,37 @@
namespace internal {
+// When invoking builtins, we need to record the safepoint in the middle of
+// the invoke instruction sequence generated by the macro assembler.
+class SafepointGenerator : public PostCallGenerator {
+ public:
+ SafepointGenerator(LCodeGen* codegen,
+ LPointerMap* pointers,
+ int deoptimization_index,
+ bool ensure_reloc_space = false)
+ : codegen_(codegen),
+ pointers_(pointers),
+ deoptimization_index_(deoptimization_index),
+ ensure_reloc_space_(ensure_reloc_space) { }
+ virtual ~SafepointGenerator() { }
+
+ virtual void Generate() {
+ // Ensure that we have enough space in the reloc info to patch
+ // this with calls when doing deoptimization.
+ if (ensure_reloc_space_) {
+ codegen_->masm()->RecordComment(RelocInfo::kFillerCommentString, true);
+ }
+ codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+ }
+
+ private:
+ LCodeGen* codegen_;
+ LPointerMap* pointers_;
+ int deoptimization_index_;
+ bool ensure_reloc_space_;
+};
+
+
#define __ masm()->
bool LCodeGen::GenerateCode() {
@@ -46,6 +77,7 @@
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
+ GenerateJumpTable() &&
GenerateSafepointTable();
}
@@ -132,6 +164,45 @@
}
}
+ // Possibly allocate a local context.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ // Argument to NewContext is the function, which is still in rdi.
+ __ push(rdi);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+ // Context is returned in both rax and rsi. It replaces the context
+ // passed to us. It's saved in the stack and kept live in rsi.
+ __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = scope()->parameter(i)->AsSlot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ movq(rax, Operand(rbp, parameter_offset));
+ // Store it in the context.
+ int context_offset = Context::SlotOffset(slot->index());
+ __ movq(Operand(rsi, context_offset), rax);
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering rsi.
+ __ movq(rcx, rsi);
+ __ RecordWrite(rcx, context_offset, rax, rbx);
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
// Trace the call.
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
@@ -170,6 +241,16 @@
}
+bool LCodeGen::GenerateJumpTable() {
+ for (int i = 0; i < jump_table_.length(); i++) {
+ JumpTableEntry* info = jump_table_[i];
+ __ bind(&(info->label_));
+ __ Jump(info->address_, RelocInfo::RUNTIME_ENTRY);
+ }
+ return !is_aborted();
+}
+
+
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
@@ -252,8 +333,7 @@
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
Handle<Object> literal = chunk_->LookupLiteral(op);
- Representation r = chunk_->LookupLiteralRepresentation(op);
- ASSERT(r.IsTagged());
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
return literal;
}
@@ -443,10 +523,17 @@
if (cc == no_condition) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- NearLabel done;
- __ j(NegateCondition(cc), &done);
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- __ bind(&done);
+ JumpTableEntry* jump_info = NULL;
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (jump_table_.length() > 0 &&
+ jump_table_[jump_table_.length() - 1]->address_ == entry) {
+ jump_info = jump_table_[jump_table_.length() - 1];
+ } else {
+ jump_info = new JumpTableEntry(entry);
+ jump_table_.Add(jump_info);
+ }
+ __ j(cc, &jump_info->label_);
}
}
@@ -458,7 +545,8 @@
Handle<DeoptimizationInputData> data =
FACTORY->NewDeoptimizationInputData(length, TENURED);
- data->SetTranslationByteArray(*translations_.CreateByteArray());
+ Handle<ByteArray> translations = translations_.CreateByteArray();
+ data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals =
@@ -539,6 +627,12 @@
}
+void LCodeGen::RecordSafepoint(int deoptimization_index) {
+ LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ RecordSafepoint(&empty_pointers, deoptimization_index);
+}
+
+
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
@@ -611,13 +705,13 @@
break;
}
case CodeStub::StringCharAt: {
- // TODO(1116): Add StringCharAt stub to x64.
- Abort("Unimplemented: %s", "StringCharAt Stub");
+ StringCharAtStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::MathPow: {
- // TODO(1115): Add MathPow stub to x64.
- Abort("Unimplemented: %s", "MathPow Stub");
+ MathPowStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::NumberToString: {
@@ -636,7 +730,8 @@
break;
}
case CodeStub::TranscendentalCache: {
- TranscendentalCacheStub stub(instr->transcendental_type());
+ TranscendentalCacheStub stub(instr->transcendental_type(),
+ TranscendentalCacheStub::TAGGED);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
@@ -652,7 +747,42 @@
void LCodeGen::DoModI(LModI* instr) {
- Abort("Unimplemented: %s", "DoModI");
+ LOperand* right = instr->InputAt(1);
+ ASSERT(ToRegister(instr->result()).is(rdx));
+ ASSERT(ToRegister(instr->InputAt(0)).is(rax));
+ ASSERT(!ToRegister(instr->InputAt(1)).is(rax));
+ ASSERT(!ToRegister(instr->InputAt(1)).is(rdx));
+
+ Register right_reg = ToRegister(right);
+
+ // Check for x % 0.
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(right_reg, right_reg);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Sign extend eax to edx. (We are using only the low 32 bits of the values.)
+ __ cdq();
+
+ // Check for (0 % -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ NearLabel positive_left;
+ NearLabel done;
+ __ testl(rax, rax);
+ __ j(not_sign, &positive_left);
+ __ idivl(right_reg);
+
+ // Test the remainder for 0, because then the result would be -0.
+ __ testl(rdx, rdx);
+ __ j(not_zero, &done);
+
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&positive_left);
+ __ idivl(right_reg);
+ __ bind(&done);
+ } else {
+ __ idivl(right_reg);
+ }
}
@@ -888,21 +1018,15 @@
ASSERT(instr->result()->IsDoubleRegister());
XMMRegister res = ToDoubleRegister(instr->result());
double v = instr->value();
+ uint64_t int_val = BitCast<uint64_t, double>(v);
// Use xor to produce +0.0 in a fast and compact way, but avoid to
// do so if the constant is -0.0.
- if (BitCast<uint64_t, double>(v) == 0) {
+ if (int_val == 0) {
__ xorpd(res, res);
} else {
Register tmp = ToRegister(instr->TempAt(0));
- int32_t v_int32 = static_cast<int32_t>(v);
- if (static_cast<double>(v_int32) == v) {
- __ movl(tmp, Immediate(v_int32));
- __ cvtlsi2sd(res, tmp);
- } else {
- uint64_t int_val = BitCast<uint64_t, double>(v);
- __ Set(tmp, int_val);
- __ movd(res, tmp);
- }
+ __ Set(tmp, int_val);
+ __ movq(res, tmp);
}
}
@@ -927,8 +1051,27 @@
}
+void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->InputAt(0));
+ __ movq(result, FieldOperand(array, PixelArray::kLengthOffset));
+}
+
+
void LCodeGen::DoValueOf(LValueOf* instr) {
- Abort("Unimplemented: %s", "DoValueOf");
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ ASSERT(input.is(result));
+ NearLabel done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(input, &done);
+
+ // If the object is not a value type, return the object.
+ __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
+ __ j(not_equal, &done);
+ __ movq(result, FieldOperand(input, JSValue::kValueOffset));
+
+ __ bind(&done);
}
@@ -971,7 +1114,36 @@
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- Abort("Unimplemented: %s", "DoArithmeticD");
+ XMMRegister left = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister right = ToDoubleRegister(instr->InputAt(1));
+ XMMRegister result = ToDoubleRegister(instr->result());
+ // All operations except MOD are computed in-place.
+ ASSERT(instr->op() == Token::MOD || left.is(result));
+ switch (instr->op()) {
+ case Token::ADD:
+ __ addsd(left, right);
+ break;
+ case Token::SUB:
+ __ subsd(left, right);
+ break;
+ case Token::MUL:
+ __ mulsd(left, right);
+ break;
+ case Token::DIV:
+ __ divsd(left, right);
+ break;
+ case Token::MOD:
+ __ PrepareCallCFunction(2);
+ __ movsd(xmm0, left);
+ ASSERT(right.is(xmm1));
+ __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 2);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movsd(result, xmm0);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
}
@@ -1433,7 +1605,20 @@
void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Abort("Unimplemented: %s", "DoHasInstanceType");
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+ __ testl(input, Immediate(kSmiTagMask));
+ NearLabel done, is_false;
+ __ j(zero, &is_false);
+ __ CmpObjectType(input, TestType(instr->hydrogen()), result);
+ __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&is_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
@@ -1453,7 +1638,17 @@
void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
- Abort("Unimplemented: %s", "DoHasCachedArrayIndex");
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ testl(FieldOperand(input, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ NearLabel done;
+ __ j(not_zero, &done);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
@@ -1575,7 +1770,18 @@
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- Abort("Unimplemented: %s", "DoInstanceOf");
+ InstanceofStub stub(InstanceofStub::kNoFlags);
+ __ push(ToRegister(instr->InputAt(0)));
+ __ push(ToRegister(instr->InputAt(1)));
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ NearLabel true_value, done;
+ __ testq(rax, rax);
+ __ j(zero, &true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+ __ bind(&true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+ __ bind(&done);
}
@@ -1583,7 +1789,9 @@
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+ InstanceofStub stub(InstanceofStub::kNoFlags);
+ __ push(ToRegister(instr->InputAt(0)));
+ __ push(ToRegister(instr->InputAt(1)));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ testq(rax, rax);
EmitBranch(true_block, false_block, zero);
@@ -1591,13 +1799,63 @@
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- Abort("Unimplemented: %s", "DoInstanceOfKnowGLobal");
+ class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredLInstanceOfKnownGlobal(instr_);
+ }
+
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ };
+
+
+ DeferredInstanceOfKnownGlobal* deferred;
+ deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+
+ Label false_result;
+ Register object = ToRegister(instr->InputAt(0));
+
+ // A Smi is not an instance of anything.
+ __ JumpIfSmi(object, &false_result);
+
+ // Null is not an instance of anything.
+ __ CompareRoot(object, Heap::kNullValueRootIndex);
+ __ j(equal, &false_result);
+
+ // String values are not instances of anything.
+ __ JumpIfNotString(object, kScratchRegister, deferred->entry());
+
+ __ bind(&false_result);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+
+ __ bind(deferred->exit());
}
-void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- Abort("Unimplemented: %s", "DoDeferredLInstanceOfKnownGlobakl");
+void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ __ PushSafepointRegisters();
+
+ InstanceofStub stub(InstanceofStub::kNoFlags);
+
+ __ push(ToRegister(instr->InputAt(0)));
+ __ Push(instr->function());
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ movq(kScratchRegister, rax);
+ __ PopSafepointRegisters();
+ __ testq(kScratchRegister, kScratchRegister);
+ Label load_false;
+ Label done;
+ __ j(not_zero, &load_false);
+ __ LoadRoot(rax, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&load_false);
+ __ LoadRoot(rax, Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
@@ -1694,10 +1952,23 @@
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Abort("Unimplemented: %s", "DoLoadContextSlot");
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ movq(result, ContextOperand(context, instr->slot_index()));
}
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ __ movq(ContextOperand(context, instr->slot_index()), value);
+ if (instr->needs_write_barrier()) {
+ int offset = Context::SlotOffset(instr->slot_index());
+ __ RecordWrite(context, offset, value, kScratchRegister);
+ }
+}
+
+
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -1721,20 +1992,59 @@
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Abort("Unimplemented: %s", "DoLoadFunctionPrototype");
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+
+ // Check that the function really is a function.
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
+ DeoptimizeIf(not_equal, instr->environment());
+
+ // Check whether the function has an instance prototype.
+ NearLabel non_instance;
+ __ testb(FieldOperand(result, Map::kBitFieldOffset),
+ Immediate(1 << Map::kHasNonInstancePrototype));
+ __ j(not_zero, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ __ movq(result,
+ FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+
+ // If the function does not have an initial map, we're done.
+ NearLabel done;
+ __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
+ __ j(not_equal, &done);
+
+ // Get the prototype from the initial map.
+ __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
+ __ jmp(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in the function's map.
+ __ bind(&non_instance);
+ __ movq(result, FieldOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ __ bind(&done);
}
void LCodeGen::DoLoadElements(LLoadElements* instr) {
- ASSERT(instr->result()->Equals(instr->InputAt(0)));
- Register reg = ToRegister(instr->InputAt(0));
- __ movq(reg, FieldOperand(reg, JSObject::kElementsOffset));
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+ __ movq(result, FieldOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
NearLabel done;
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
FACTORY->fixed_array_map());
__ j(equal, &done);
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
+ FACTORY->pixel_array_map());
+ __ j(equal, &done);
+ __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
FACTORY->fixed_cow_array_map());
__ Check(equal, "Check for fast elements failed.");
__ bind(&done);
@@ -1742,8 +2052,29 @@
}
+void LCodeGen::DoLoadPixelArrayExternalPointer(
+ LLoadPixelArrayExternalPointer* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+ __ movq(result, FieldOperand(input, PixelArray::kExternalPointerOffset));
+}
+
+
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Abort("Unimplemented: %s", "DoAccessArgumentsAt");
+ Register arguments = ToRegister(instr->arguments());
+ Register length = ToRegister(instr->length());
+ Register result = ToRegister(instr->result());
+
+ if (instr->index()->IsRegister()) {
+ __ subl(length, ToRegister(instr->index()));
+ } else {
+ __ subl(length, ToOperand(instr->index()));
+ }
+ DeoptimizeIf(below_equal, instr->environment());
+
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ __ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize));
}
@@ -1765,41 +2096,148 @@
}
+void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
+ Register external_elements = ToRegister(instr->external_pointer());
+ Register key = ToRegister(instr->key());
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(external_elements));
+
+ // Load the result.
+ __ movzxbq(result, Operand(external_elements, key, times_1, 0));
+}
+
+
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- Abort("Unimplemented: %s", "DoLoadKeyedGeneric");
+ ASSERT(ToRegister(instr->object()).is(rdx));
+ ASSERT(ToRegister(instr->key()).is(rax));
+
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ Builtins::KeyedLoadIC_Initialize));
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Abort("Unimplemented: %s", "DoArgumentsElements");
+ Register result = ToRegister(instr->result());
+
+ // Check for arguments adapter frame.
+ NearLabel done, adapted;
+ __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(result, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adapted);
+
+ // No arguments adaptor frame.
+ __ movq(result, rbp);
+ __ jmp(&done);
+
+ // Arguments adaptor frame present.
+ __ bind(&adapted);
+ __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ bind(&done);
}
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Abort("Unimplemented: %s", "DoArgumentsLength");
+ Register result = ToRegister(instr->result());
+
+ NearLabel done;
+
+ // If no arguments adaptor frame the number of arguments is fixed.
+ if (instr->InputAt(0)->IsRegister()) {
+ __ cmpq(rbp, ToRegister(instr->InputAt(0)));
+ } else {
+ __ cmpq(rbp, ToOperand(instr->InputAt(0)));
+ }
+ __ movq(result, Immediate(scope()->num_parameters()));
+ __ j(equal, &done);
+
+ // Arguments adaptor frame present. Get argument length from there.
+ __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(result, Operand(result,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToInteger32(result, result);
+
+ // Argument length is in result register.
+ __ bind(&done);
}
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Abort("Unimplemented: %s", "DoApplyArguments");
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ ASSERT(receiver.is(rax)); // Used for parameter count.
+ ASSERT(function.is(rdi)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ // If the receiver is null or undefined, we have to pass the global object
+ // as a receiver.
+ NearLabel global_object, receiver_ok;
+ __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+ __ j(equal, &global_object);
+ __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &global_object);
+
+ // The receiver should be a JS object.
+ Condition is_smi = __ CheckSmi(receiver);
+ DeoptimizeIf(is_smi, instr->environment());
+ __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ DeoptimizeIf(below, instr->environment());
+ __ jmp(&receiver_ok);
+
+ __ bind(&global_object);
+ // TODO(kmillikin): We have a hydrogen value for the global object. See
+ // if it's better to use it than to explicitly fetch it from the context
+ // here.
+ __ movq(receiver, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movq(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
+ __ bind(&receiver_ok);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ __ cmpq(length, Immediate(kArgumentsLimit));
+ DeoptimizeIf(above, instr->environment());
+
+ __ push(receiver);
+ __ movq(receiver, length);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ NearLabel invoke, loop;
+ // length is a small non-negative integer, due to the test above.
+ __ testl(length, length);
+ __ j(zero, &invoke);
+ __ bind(&loop);
+ __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
+ __ decl(length);
+ __ j(not_zero, &loop);
+
+ // Invoke the function.
+ __ bind(&invoke);
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ SafepointGenerator safepoint_generator(this,
+ pointers,
+ env->deoptimization_index(),
+ true);
+ v8::internal::ParameterCount actual(rax);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
}
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->InputAt(0);
if (argument->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(argument);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- __ push(Immediate(static_cast<int32_t>(literal->Number())));
- } else if (r.IsDouble()) {
- Abort("unsupported double immediate");
- } else {
- ASSERT(r.IsTagged());
- __ Push(literal);
- }
+ EmitPushConstantOperand(argument);
} else if (argument->IsRegister()) {
__ push(ToRegister(argument));
} else {
@@ -1809,6 +2247,21 @@
}
+void LCodeGen::DoContext(LContext* instr) {
+ Register result = ToRegister(instr->result());
+ __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoOuterContext(LOuterContext* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ movq(result,
+ Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ movq(result, FieldOperand(result, JSFunction::kContextOffset));
+}
+
+
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register result = ToRegister(instr->result());
__ movq(result, GlobalObjectOperand());
@@ -1866,77 +2319,334 @@
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoDeferredMathAbsTaggedHeapNumber");
+ Register input_reg = ToRegister(instr->InputAt(0));
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(not_equal, instr->environment());
+
+ Label done;
+ Register tmp = input_reg.is(rax) ? rcx : rax;
+ Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
+
+ // Preserve the value of all registers.
+ __ PushSafepointRegisters();
+
+ Label negative;
+ __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ // Check the sign of the argument. If the argument is positive, just
+ // return it. We do not need to patch the stack since |input| and
+ // |result| are the same register and |input| will be restored
+ // unchanged by popping safepoint registers.
+ __ testl(tmp, Immediate(HeapNumber::kSignMask));
+ __ j(not_zero, &negative);
+ __ jmp(&done);
+
+ __ bind(&negative);
+
+ Label allocated, slow;
+ __ AllocateHeapNumber(tmp, tmp2, &slow);
+ __ jmp(&allocated);
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ // Set the pointer to the new heap number in tmp.
+ if (!tmp.is(rax)) {
+ __ movq(tmp, rax);
+ }
+
+ // Restore input_reg after call to runtime.
+ __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
+
+ __ bind(&allocated);
+ __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ shl(tmp2, Immediate(1));
+ __ shr(tmp2, Immediate(1));
+ __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
+ __ StoreToSafepointRegisterSlot(input_reg, tmp);
+
+ __ bind(&done);
+ __ PopSafepointRegisters();
}
+void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+ Register input_reg = ToRegister(instr->InputAt(0));
+ __ testl(input_reg, input_reg);
+ Label is_positive;
+ __ j(not_sign, &is_positive);
+ __ negl(input_reg); // Sets flags.
+ DeoptimizeIf(negative, instr->environment());
+ __ bind(&is_positive);
+}
+
+
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathAbs");
+ // Class for deferred case.
+ class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ public:
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+ LUnaryMathOperation* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+ }
+ private:
+ LUnaryMathOperation* instr_;
+ };
+
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
+ Representation r = instr->hydrogen()->value()->representation();
+
+ if (r.IsDouble()) {
+ XMMRegister scratch = xmm0;
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ __ xorpd(scratch, scratch);
+ __ subsd(scratch, input_reg);
+ __ andpd(input_reg, scratch);
+ } else if (r.IsInteger32()) {
+ EmitIntegerMathAbs(instr);
+ } else { // Tagged case.
+ DeferredMathAbsTaggedHeapNumber* deferred =
+ new DeferredMathAbsTaggedHeapNumber(this, instr);
+ Register input_reg = ToRegister(instr->InputAt(0));
+ // Smi check.
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ EmitIntegerMathAbs(instr);
+ __ bind(deferred->exit());
+ }
}
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathFloor");
+ XMMRegister xmm_scratch = xmm0;
+ Register output_reg = ToRegister(instr->result());
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
+ __ ucomisd(input_reg, xmm_scratch);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(below_equal, instr->environment());
+ } else {
+ DeoptimizeIf(below, instr->environment());
+ }
+
+ // Use truncating instruction (OK because input is positive).
+ __ cvttsd2si(output_reg, input_reg);
+
+ // Overflow is signalled with minint.
+ __ cmpl(output_reg, Immediate(0x80000000));
+ DeoptimizeIf(equal, instr->environment());
}
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathRound");
+ const XMMRegister xmm_scratch = xmm0;
+ Register output_reg = ToRegister(instr->result());
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+
+ // xmm_scratch = 0.5
+ __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
+ __ movq(xmm_scratch, kScratchRegister);
+
+ // input = input + 0.5
+ __ addsd(input_reg, xmm_scratch);
+
+ // We need to return -0 for the input range [-0.5, 0[, otherwise
+ // compute Math.floor(value + 0.5).
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ ucomisd(input_reg, xmm_scratch);
+ DeoptimizeIf(below_equal, instr->environment());
+ } else {
+ // If we don't need to bailout on -0, we check only bailout
+ // on negative inputs.
+ __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
+ __ ucomisd(input_reg, xmm_scratch);
+ DeoptimizeIf(below, instr->environment());
+ }
+
+ // Compute Math.floor(value + 0.5).
+ // Use truncating instruction (OK because input is positive).
+ __ cvttsd2si(output_reg, input_reg);
+
+ // Overflow is signalled with minint.
+ __ cmpl(output_reg, Immediate(0x80000000));
+ DeoptimizeIf(equal, instr->environment());
}
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathSqrt");
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+ __ sqrtsd(input_reg, input_reg);
}
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathPowHalf");
+ XMMRegister xmm_scratch = xmm0;
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+ __ xorpd(xmm_scratch, xmm_scratch);
+ __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
+ __ sqrtsd(input_reg, input_reg);
}
void LCodeGen::DoPower(LPower* instr) {
- Abort("Unimplemented: %s", "DoPower");
+ LOperand* left = instr->InputAt(0);
+ XMMRegister left_reg = ToDoubleRegister(left);
+ ASSERT(!left_reg.is(xmm1));
+ LOperand* right = instr->InputAt(1);
+ XMMRegister result_reg = ToDoubleRegister(instr->result());
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+ if (exponent_type.IsDouble()) {
+ __ PrepareCallCFunction(2);
+ // Move arguments to correct registers
+ __ movsd(xmm0, left_reg);
+ ASSERT(ToDoubleRegister(right).is(xmm1));
+ __ CallCFunction(ExternalReference::power_double_double_function(), 2);
+ } else if (exponent_type.IsInteger32()) {
+ __ PrepareCallCFunction(2);
+ // Move arguments to correct registers: xmm0 and edi (not rdi).
+ // On Windows, the registers are xmm0 and edx.
+ __ movsd(xmm0, left_reg);
+#ifdef _WIN64
+ ASSERT(ToRegister(right).is(rdx));
+#else
+ ASSERT(ToRegister(right).is(rdi));
+#endif
+ __ CallCFunction(ExternalReference::power_double_int_function(), 2);
+ } else {
+ ASSERT(exponent_type.IsTagged());
+ CpuFeatures::Scope scope(SSE2);
+ Register right_reg = ToRegister(right);
+
+ Label non_smi, call;
+ __ JumpIfNotSmi(right_reg, &non_smi);
+ __ SmiToInteger32(right_reg, right_reg);
+ __ cvtlsi2sd(xmm1, right_reg);
+ __ jmp(&call);
+
+ __ bind(&non_smi);
+ __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , kScratchRegister);
+ DeoptimizeIf(not_equal, instr->environment());
+ __ movsd(xmm1, FieldOperand(right_reg, HeapNumber::kValueOffset));
+
+ __ bind(&call);
+ __ PrepareCallCFunction(2);
+ // Move arguments to correct registers xmm0 and xmm1.
+ __ movsd(xmm0, left_reg);
+ // Right argument is already in xmm1.
+ __ CallCFunction(ExternalReference::power_double_double_function(), 2);
+ }
+ // Return value is in xmm0.
+ __ movsd(result_reg, xmm0);
}
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathLog");
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathCos");
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathSin");
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoUnaryMathOperation");
+ switch (instr->op()) {
+ case kMathAbs:
+ DoMathAbs(instr);
+ break;
+ case kMathFloor:
+ DoMathFloor(instr);
+ break;
+ case kMathRound:
+ DoMathRound(instr);
+ break;
+ case kMathSqrt:
+ DoMathSqrt(instr);
+ break;
+ case kMathPowHalf:
+ DoMathPowHalf(instr);
+ break;
+ case kMathCos:
+ DoMathCos(instr);
+ break;
+ case kMathSin:
+ DoMathSin(instr);
+ break;
+ case kMathLog:
+ DoMathLog(instr);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
}
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- Abort("Unimplemented: %s", "DoCallKeyed");
+ ASSERT(ToRegister(instr->key()).is(rcx));
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ int arity = instr->arity();
+ Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(
+ arity, NOT_IN_LOOP);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallNamed(LCallNamed* instr) {
- Abort("Unimplemented: %s", "DoCallNamed");
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ int arity = instr->arity();
+ Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
+ arity, NOT_IN_LOOP);
+ __ Move(rcx, instr->name());
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallFunction(LCallFunction* instr) {
- Abort("Unimplemented: %s", "DoCallFunction");
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ int arity = instr->arity();
+ CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Drop(1);
}
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- Abort("Unimplemented: %s", "DoCallGlobal");
+ ASSERT(ToRegister(instr->result()).is(rax));
+ int arity = instr->arity();
+ Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
+ arity, NOT_IN_LOOP);
+ __ Move(rcx, instr->name());
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -1959,7 +2669,7 @@
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- Abort("Unimplemented: %s", "DoCallRuntime");
+ CallRuntime(instr->function(), instr->arity(), instr);
}
@@ -1994,10 +2704,35 @@
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- Abort("Unimplemented: %s", "DoStoreNamedGeneric");
+ ASSERT(ToRegister(instr->object()).is(rdx));
+ ASSERT(ToRegister(instr->value()).is(rax));
+
+ __ Move(rcx, instr->hydrogen()->name());
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoStorePixelArrayElement(LStorePixelArrayElement* instr) {
+ Register external_pointer = ToRegister(instr->external_pointer());
+ Register key = ToRegister(instr->key());
+ Register value = ToRegister(instr->value());
+
+ { // Clamp the value to [0..255].
+ NearLabel done;
+ __ testl(value, Immediate(0xFFFFFF00));
+ __ j(zero, &done);
+ __ setcc(negative, value); // 1 if negative, 0 if positive.
+ __ decb(value); // 0 if negative, 255 if positive.
+ __ bind(&done);
+ }
+
+ __ movb(Operand(external_pointer, key, times_1, 0), value);
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
if (instr->length()->IsRegister()) {
__ cmpq(ToRegister(instr->index()), ToRegister(instr->length()));
@@ -2040,16 +2775,174 @@
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- Abort("Unimplemented: %s", "DoStoreKeyedGeneric");
+ ASSERT(ToRegister(instr->object()).is(rdx));
+ ASSERT(ToRegister(instr->key()).is(rcx));
+ ASSERT(ToRegister(instr->value()).is(rax));
+
+ Handle<Code> ic(isolate()->builtins()->builtin(
+ info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt: public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ Register string = ToRegister(instr->string());
+ Register index = no_reg;
+ int const_index = -1;
+ if (instr->index()->IsConstantOperand()) {
+ const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ if (!Smi::IsValid(const_index)) {
+ // Guaranteed to be out of bounds because of the assert above.
+ // So the bounds check that must dominate this instruction must
+ // have deoptimized already.
+ if (FLAG_debug_code) {
+ __ Abort("StringCharCodeAt: out of bounds index.");
+ }
+ // No code needs to be generated.
+ return;
+ }
+ } else {
+ index = ToRegister(instr->index());
+ }
+ Register result = ToRegister(instr->result());
+
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(this, instr);
+
+ NearLabel flat_string, ascii_string, done;
+
+ // Fetch the instance type of the receiver into result register.
+ __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for non-sequential strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(result, Immediate(kStringRepresentationMask));
+ __ j(zero, &flat_string);
+
+ // Handle cons strings and go to deferred code for the rest.
+ __ testb(result, Immediate(kIsConsStringMask));
+ __ j(zero, deferred->entry());
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
+ Heap::kEmptyStringRootIndex);
+ __ j(not_equal, deferred->entry());
+ // Get the first of the two strings and load its instance type.
+ __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
+ __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(result, Immediate(kStringRepresentationMask));
+ __ j(not_zero, deferred->entry());
+
+ // Check for ASCII or two-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ testb(result, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string);
+
+ // Two-byte string.
+ // Load the two-byte character code into the result register.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ if (instr->index()->IsConstantOperand()) {
+ __ movzxwl(result,
+ FieldOperand(string,
+ SeqTwoByteString::kHeaderSize +
+ (kUC16Size * const_index)));
+ } else {
+ __ movzxwl(result, FieldOperand(string,
+ index,
+ times_2,
+ SeqTwoByteString::kHeaderSize));
+ }
+ __ jmp(&done);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ if (instr->index()->IsConstantOperand()) {
+ __ movzxbl(result, FieldOperand(string,
+ SeqAsciiString::kHeaderSize + const_index));
+ } else {
+ __ movzxbl(result, FieldOperand(string,
+ index,
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ }
+ __ bind(&done);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Set(result, 0);
+
+ __ PushSafepointRegisters();
+ __ push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ __ Push(Smi::FromInt(const_index));
+ } else {
+ Register index = ToRegister(instr->index());
+ __ Integer32ToSmi(index, index);
+ __ push(index);
+ }
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(rax);
+ }
+ __ SmiToInteger32(rax, rax);
+ __ StoreToSafepointRegisterSlot(result, rax);
+ __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoStringLength(LStringLength* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ __ movq(result, FieldOperand(string, String::kLengthOffset));
+}
+
+
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
- __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
+ if (input->IsRegister()) {
+ __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
+ } else {
+ __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
+ }
}
@@ -2152,7 +3045,7 @@
// Smi to XMM conversion
__ bind(&load_smi);
- __ SmiToInteger32(kScratchRegister, input_reg); // Untag smi first.
+ __ SmiToInteger32(kScratchRegister, input_reg);
__ cvtlsi2sd(result_reg, kScratchRegister);
__ bind(&done);
}
@@ -2229,12 +3122,55 @@
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- Abort("Unimplemented: %s", "DoNumberUntagD");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsDoubleRegister());
+
+ Register input_reg = ToRegister(input);
+ XMMRegister result_reg = ToDoubleRegister(result);
+
+ EmitNumberUntagD(input_reg, result_reg, instr->environment());
}
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- Abort("Unimplemented: %s", "DoDoubleToI");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsDoubleRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsRegister());
+
+ XMMRegister input_reg = ToDoubleRegister(input);
+ Register result_reg = ToRegister(result);
+
+ if (instr->truncating()) {
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations.
+ __ cvttsd2siq(result_reg, input_reg);
+ __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
+ __ cmpl(result_reg, kScratchRegister);
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ cvttsd2si(result_reg, input_reg);
+ __ cvtlsi2sd(xmm0, result_reg);
+ __ ucomisd(xmm0, input_reg);
+ DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(parity_even, instr->environment()); // NaN.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ NearLabel done;
+ // The integer converted back is equal to the original. We
+ // only have to test if we got -0 as an input.
+ __ testl(result_reg, result_reg);
+ __ j(not_zero, &done);
+ __ movmskpd(result_reg, input_reg);
+ // Bit 0 contains the sign of the double in input_reg.
+ // If input was positive, we are ok and return 0, otherwise
+ // deoptimize.
+ __ andl(result_reg, Immediate(1));
+ DeoptimizeIf(not_zero, instr->environment());
+ __ bind(&done);
+ }
+ }
}
@@ -2383,7 +3319,54 @@
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- Abort("Unimplemented: %s", "DoRegExpLiteral");
+ NearLabel materialized;
+ // Registers will be used as follows:
+ // rdi = JS function.
+ // rcx = literals array.
+ // rbx = regexp literal.
+ // rax = regexp literal clone.
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ int literal_offset = FixedArray::kHeaderSize +
+ instr->hydrogen()->literal_index() * kPointerSize;
+ __ movq(rbx, FieldOperand(rcx, literal_offset));
+ __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &materialized);
+
+ // Create regexp literal using runtime function
+ // Result will be in rax.
+ __ push(rcx);
+ __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Push(instr->hydrogen()->pattern());
+ __ Push(instr->hydrogen()->flags());
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ __ movq(rbx, rax);
+
+ __ bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+ __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ push(rbx);
+ __ Push(Smi::FromInt(size));
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ __ pop(rbx);
+
+ __ bind(&allocated);
+ // Copy the content into the newly allocated memory.
+ // (Unroll copy loop once for better throughput).
+ for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+ __ movq(rdx, FieldOperand(rbx, i));
+ __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
+ __ movq(FieldOperand(rax, i), rdx);
+ __ movq(FieldOperand(rax, i + kPointerSize), rcx);
+ }
+ if ((size % (2 * kPointerSize)) != 0) {
+ __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
+ __ movq(FieldOperand(rax, size - kPointerSize), rdx);
+ }
}
@@ -2406,63 +3389,59 @@
void LCodeGen::DoTypeof(LTypeof* instr) {
- Abort("Unimplemented: %s", "DoTypeof");
+ LOperand* input = instr->InputAt(0);
+ if (input->IsConstantOperand()) {
+ __ Push(ToHandle(LConstantOperand::cast(input)));
+ } else if (input->IsRegister()) {
+ __ push(ToRegister(input));
+ } else {
+ ASSERT(input->IsStackSlot());
+ __ push(ToOperand(input));
+ }
+ CallRuntime(Runtime::kTypeof, 1, instr);
}
void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Abort("Unimplemented: %s", "DoTypeofIs");
-}
-
-
-void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
+ Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- NearLabel true_label;
- NearLabel false_label;
+ Label true_label;
+ Label false_label;
NearLabel done;
- EmitIsConstructCall(result);
- __ j(equal, &true_label);
-
+ Condition final_branch_condition = EmitTypeofIs(&true_label,
+ &false_label,
+ input,
+ instr->type_literal());
+ __ j(final_branch_condition, &true_label);
+ __ bind(&false_label);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ jmp(&done);
__ bind(&true_label);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
-
__ bind(&done);
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->TempAt(0));
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- EmitIsConstructCall(temp);
- EmitBranch(true_block, false_block, equal);
+void LCodeGen::EmitPushConstantOperand(LOperand* operand) {
+ ASSERT(operand->IsConstantOperand());
+ LConstantOperand* const_op = LConstantOperand::cast(operand);
+ Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(literal->IsNumber());
+ __ push(Immediate(static_cast<int32_t>(literal->Number())));
+ } else if (r.IsDouble()) {
+ Abort("unsupported double immediate");
+ } else {
+ ASSERT(r.IsTagged());
+ __ Push(literal);
+ }
}
-void LCodeGen::EmitIsConstructCall(Register temp) {
- // Get the frame pointer for the calling frame.
- __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- NearLabel check_frame_marker;
- __ SmiCompare(Operand(temp, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker);
- __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ SmiCompare(Operand(temp, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
-}
-
-
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -2543,6 +3522,54 @@
}
+void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
+ Register result = ToRegister(instr->result());
+ NearLabel true_label;
+ NearLabel false_label;
+ NearLabel done;
+
+ EmitIsConstructCall(result);
+ __ j(equal, &true_label);
+
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&true_label);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp = ToRegister(instr->TempAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ EmitIsConstructCall(temp);
+ EmitBranch(true_block, false_block, equal);
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp) {
+ // Get the frame pointer for the calling frame.
+ __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ NearLabel check_frame_marker;
+ __ SmiCompare(Operand(temp, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &check_frame_marker);
+ __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ SmiCompare(Operand(temp, StandardFrameConstants::kMarkerOffset),
+ Smi::FromInt(StackFrame::CONSTRUCT));
+}
+
+
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
// No code for lazy bailout instruction. Used to capture environment after a
// call for populating the safepoint data with deoptimization data.
@@ -2555,7 +3582,36 @@
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- Abort("Unimplemented: %s", "DoDeleteProperty");
+ LOperand* obj = instr->object();
+ LOperand* key = instr->key();
+ // Push object.
+ if (obj->IsRegister()) {
+ __ push(ToRegister(obj));
+ } else {
+ __ push(ToOperand(obj));
+ }
+ // Push key.
+ if (key->IsConstantOperand()) {
+ EmitPushConstantOperand(key);
+ } else if (key->IsRegister()) {
+ __ push(ToRegister(key));
+ } else {
+ __ push(ToOperand(key));
+ }
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ // Create safepoint generator that will also ensure enough space in the
+ // reloc info for patching in deoptimization (since this is invoking a
+ // builtin)
+ SafepointGenerator safepoint_generator(this,
+ pointers,
+ env->deoptimization_index(),
+ true);
+ __ Push(Smi::FromInt(strict_mode_flag()));
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
}
@@ -2572,7 +3628,19 @@
void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- Abort("Unimplemented: %s", "DoOsrEntry");
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+ environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
+ instr->SpilledDoubleRegisterArray());
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ ASSERT(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment);
+ ASSERT(osr_pc_offset_ == -1);
+ osr_pc_offset_ = masm()->pc_offset();
}
#undef __
« no previous file with comments | « src/x64/lithium-codegen-x64.h ('k') | src/x64/lithium-x64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698