| Index: src/x64/codegen-x64.cc
|
| diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
|
| index 97702db71ff3680180cd457651f6d625c760be07..d99ba6843f432a53f2139b88e000ca2eced74d64 100644
|
| --- a/src/x64/codegen-x64.cc
|
| +++ b/src/x64/codegen-x64.cc
|
| @@ -1895,8 +1895,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
|
|
|
| frame_->EmitPush(rax); // <- slot 3
|
| frame_->EmitPush(rdx); // <- slot 2
|
| - __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
|
| - __ Integer32ToSmi(rax, rax);
|
| + __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
|
| frame_->EmitPush(rax); // <- slot 1
|
| frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
|
| entry.Jump();
|
| @@ -1907,8 +1906,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
|
| frame_->EmitPush(rax); // <- slot 2
|
|
|
| // Push the length of the array and the initial index onto the stack.
|
| - __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
|
| - __ Integer32ToSmi(rax, rax);
|
| + __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
|
| frame_->EmitPush(rax); // <- slot 1
|
| frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
|
|
|
| @@ -4472,7 +4470,8 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
|
| __ Move(FieldOperand(rcx, HeapObject::kMapOffset),
|
| Factory::fixed_array_map());
|
| // Set length.
|
| - __ movl(FieldOperand(rcx, FixedArray::kLengthOffset), rbx);
|
| + __ Integer32ToSmi(rdx, rbx);
|
| + __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
|
| // Fill contents of fixed-array with the-hole.
|
| __ Move(rdx, Factory::the_hole_value());
|
| __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
|
| @@ -4591,15 +4590,15 @@ void DeferredSearchCache::Generate() {
|
| // cache miss this optimization would hardly matter much.
|
|
|
| // Check if we could add new entry to cache.
|
| - __ movl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
|
| + __ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
|
| __ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
|
| - __ SmiToInteger32(r9, r9);
|
| - __ cmpq(rbx, r9);
|
| + __ SmiCompare(rbx, r9);
|
| __ j(greater, &add_new_entry);
|
|
|
| // Check if we could evict entry after finger.
|
| __ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
|
| __ SmiToInteger32(rdx, rdx);
|
| + __ SmiToInteger32(rbx, rbx);
|
| __ addq(rdx, kEntrySizeImm);
|
| Label forward;
|
| __ cmpq(rbx, rdx);
|
| @@ -4611,9 +4610,8 @@ void DeferredSearchCache::Generate() {
|
| __ jmp(&update_cache);
|
|
|
| __ bind(&add_new_entry);
|
| - // r9 holds cache size as int.
|
| - __ movq(rdx, r9);
|
| - __ Integer32ToSmi(r9, r9);
|
| + // r9 holds cache size as smi.
|
| + __ SmiToInteger32(rdx, r9);
|
| __ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize));
|
| __ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
|
|
|
| @@ -7171,13 +7169,8 @@ Result CodeGenerator::EmitKeyedLoad(bool is_global) {
|
| Result elements = allocator()->Allocate();
|
| ASSERT(elements.is_valid());
|
|
|
| - // Use a fresh temporary for the index and later the loaded
|
| - // value.
|
| - Result index = allocator()->Allocate();
|
| - ASSERT(index.is_valid());
|
| -
|
| DeferredReferenceGetKeyedValue* deferred =
|
| - new DeferredReferenceGetKeyedValue(index.reg(),
|
| + new DeferredReferenceGetKeyedValue(elements.reg(),
|
| receiver.reg(),
|
| key.reg(),
|
| is_global);
|
| @@ -7213,31 +7206,21 @@ Result CodeGenerator::EmitKeyedLoad(bool is_global) {
|
| Factory::fixed_array_map());
|
| deferred->Branch(not_equal);
|
|
|
| - // Shift the key to get the actual index value and check that
|
| - // it is within bounds.
|
| - __ SmiToInteger32(index.reg(), key.reg());
|
| - __ cmpl(index.reg(),
|
| - FieldOperand(elements.reg(), FixedArray::kLengthOffset));
|
| + // Check that key is within bounds.
|
| + __ SmiCompare(key.reg(),
|
| + FieldOperand(elements.reg(), FixedArray::kLengthOffset));
|
| deferred->Branch(above_equal);
|
|
|
| - // The index register holds the un-smi-tagged key. It has been
|
| - // zero-extended to 64-bits, so it can be used directly as index in the
|
| - // operand below.
|
| - // Load and check that the result is not the hole. We could
|
| - // reuse the index or elements register for the value.
|
| - //
|
| - // TODO(206): Consider whether it makes sense to try some
|
| - // heuristic about which register to reuse. For example, if
|
| - // one is rax, the we can reuse that one because the value
|
| - // coming from the deferred code will be in rax.
|
| - Result value = index;
|
| + // The key register holds the smi-tagged key. Load the value and
|
| + // check that it is not the hole value.
|
| + Result value = elements;
|
| + SmiIndex index =
|
| + masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
|
| __ movq(value.reg(),
|
| - Operand(elements.reg(),
|
| - index.reg(),
|
| - times_pointer_size,
|
| - FixedArray::kHeaderSize - kHeapObjectTag));
|
| - elements.Unuse();
|
| - index.Unuse();
|
| + FieldOperand(elements.reg(),
|
| + index.reg,
|
| + index.scale,
|
| + FixedArray::kHeaderSize));
|
| __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
|
| deferred->Branch(equal);
|
| __ IncrementCounter(&Counters::keyed_load_inline, 1);
|
| @@ -7536,7 +7519,7 @@ void Reference::SetValue(InitState init_state) {
|
|
|
| // Check whether it is possible to omit the write barrier. If the
|
| // elements array is in new space or the value written is a smi we can
|
| - // safely update the elements array without updating the remembered set.
|
| + // safely update the elements array without write barrier.
|
| Label in_new_space;
|
| __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
|
| if (!value_is_constant) {
|
| @@ -7561,10 +7544,10 @@ void Reference::SetValue(InitState init_state) {
|
| // Store the value.
|
| SmiIndex index =
|
| masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
|
| - __ movq(Operand(tmp.reg(),
|
| - index.reg,
|
| - index.scale,
|
| - FixedArray::kHeaderSize - kHeapObjectTag),
|
| + __ movq(FieldOperand(tmp.reg(),
|
| + index.reg,
|
| + index.scale,
|
| + FixedArray::kHeaderSize),
|
| value.reg());
|
| __ IncrementCounter(&Counters::keyed_store_inline, 1);
|
|
|
| @@ -7644,7 +7627,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
|
| // Setup the object header.
|
| __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
|
| __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
|
| - __ movl(FieldOperand(rax, Array::kLengthOffset), Immediate(length));
|
| + __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
|
|
|
| // Setup the fixed slots.
|
| __ xor_(rbx, rbx); // Set to NULL.
|
| @@ -8319,7 +8302,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| // Check that the last match info has space for the capture registers and the
|
| // additional information. Ensure no overflow in add.
|
| ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
|
| - __ movl(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
|
| + __ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
|
| + __ SmiToInteger32(rax, rax);
|
| __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
|
| __ cmpl(rdx, rax);
|
| __ j(greater, &runtime);
|
| @@ -8597,9 +8581,10 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
|
|
| // Make the hash mask from the length of the number string cache. It
|
| // contains two elements (number and string) for each cache entry.
|
| - __ movl(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
|
| - __ shrl(mask, Immediate(1)); // Divide length by two (length is not a smi).
|
| - __ subl(mask, Immediate(1)); // Make mask.
|
| + __ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
|
| + // Divide smi tagged length by two.
|
| + __ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1);
|
| + __ subq(mask, Immediate(1)); // Make mask.
|
|
|
| // Calculate the entry in the number string cache. The hash value in the
|
| // number string cache for smis is just the smi value, and the hash for
|
| @@ -9119,7 +9104,6 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
|
|
|
| // Get the parameters pointer from the stack and untag the length.
|
| __ movq(rdx, Operand(rsp, 2 * kPointerSize));
|
| - __ SmiToInteger32(rcx, rcx);
|
|
|
| // Setup the elements pointer in the allocated arguments object and
|
| // initialize the header in the elements fixed array.
|
| @@ -9127,7 +9111,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
|
| __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
|
| __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
|
| __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
|
| - __ movl(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
|
| + __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
|
| + __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
|
|
|
| // Copy the fixed array slots.
|
| Label loop;
|
| @@ -10898,7 +10883,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
| __ bind(&allocated);
|
| // Fill the fields of the cons string.
|
| __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
|
| - __ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
|
| + __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
|
| Immediate(String::kEmptyHashField));
|
| __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
|
| __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
|
|
|