| OLD | NEW |
| 1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 81 Register addr, | 81 Register addr, |
| 82 Register scratch) { | 82 Register scratch) { |
| 83 if (FLAG_debug_code) { | 83 if (FLAG_debug_code) { |
| 84 // Check that the object is not in new space. | 84 // Check that the object is not in new space. |
| 85 Label not_in_new_space; | 85 Label not_in_new_space; |
| 86 InNewSpace(object, scratch, not_equal, ¬_in_new_space); | 86 InNewSpace(object, scratch, not_equal, ¬_in_new_space); |
| 87 Abort("new-space object passed to RecordWriteHelper"); | 87 Abort("new-space object passed to RecordWriteHelper"); |
| 88 bind(¬_in_new_space); | 88 bind(¬_in_new_space); |
| 89 } | 89 } |
| 90 | 90 |
| 91 Label fast; | |
| 92 | |
| 93 // Compute the page start address from the heap object pointer, and reuse | 91 // Compute the page start address from the heap object pointer, and reuse |
| 94 // the 'object' register for it. | 92 // the 'object' register for it. |
| 95 ASSERT(is_int32(~Page::kPageAlignmentMask)); | 93 and_(object, Immediate(~Page::kPageAlignmentMask)); |
| 96 and_(object, | |
| 97 Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask))); | |
| 98 Register page_start = object; | |
| 99 | 94 |
| 100 // Compute the bit addr in the remembered set/index of the pointer in the | 95 // Compute number of region covering addr. See Page::GetRegionNumberForAddress |
| 101 // page. Reuse 'addr' as pointer_offset. | 96 // method for more details. |
| 102 subq(addr, page_start); | 97 and_(addr, Immediate(Page::kPageAlignmentMask)); |
| 103 shr(addr, Immediate(kPointerSizeLog2)); | 98 shrl(addr, Immediate(Page::kRegionSizeLog2)); |
| 104 Register pointer_offset = addr; | |
| 105 | 99 |
| 106 // If the bit offset lies beyond the normal remembered set range, it is in | 100 // Set dirty mark for region. |
| 107 // the extra remembered set area of a large object. | 101 bts(Operand(object, Page::kDirtyFlagOffset), addr); |
| 108 cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize)); | |
| 109 j(below, &fast); | |
| 110 | |
| 111 // We have a large object containing pointers. It must be a FixedArray. | |
| 112 | |
| 113 // Adjust 'page_start' so that addressing using 'pointer_offset' hits the | |
| 114 // extra remembered set after the large object. | |
| 115 | |
| 116 // Load the array length into 'scratch'. | |
| 117 movl(scratch, | |
| 118 Operand(page_start, | |
| 119 Page::kObjectStartOffset + FixedArray::kLengthOffset)); | |
| 120 Register array_length = scratch; | |
| 121 | |
| 122 // Extra remembered set starts right after the large object (a FixedArray), at | |
| 123 // page_start + kObjectStartOffset + objectSize | |
| 124 // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length. | |
| 125 // Add the delta between the end of the normal RSet and the start of the | |
| 126 // extra RSet to 'page_start', so that addressing the bit using | |
| 127 // 'pointer_offset' hits the extra RSet words. | |
| 128 lea(page_start, | |
| 129 Operand(page_start, array_length, times_pointer_size, | |
| 130 Page::kObjectStartOffset + FixedArray::kHeaderSize | |
| 131 - Page::kRSetEndOffset)); | |
| 132 | |
| 133 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction | |
| 134 // to limit code size. We should probably evaluate this decision by | |
| 135 // measuring the performance of an equivalent implementation using | |
| 136 // "simpler" instructions | |
| 137 bind(&fast); | |
| 138 bts(Operand(page_start, Page::kRSetOffset), pointer_offset); | |
| 139 } | 102 } |
| 140 | 103 |
| 141 | 104 |
| 142 // Set the remembered set bit for [object+offset]. | 105 // For page containing |object| mark region covering [object+offset] dirty. |
| 143 // object is the object being stored into, value is the object being stored. | 106 // object is the object being stored into, value is the object being stored. |
| 144 // If offset is zero, then the smi_index register contains the array index into | 107 // If offset is zero, then the smi_index register contains the array index into |
| 145 // the elements array represented as a smi. Otherwise it can be used as a | 108 // the elements array represented as a smi. Otherwise it can be used as a |
| 146 // scratch register. | 109 // scratch register. |
| 147 // All registers are clobbered by the operation. | 110 // All registers are clobbered by the operation. |
| 148 void MacroAssembler::RecordWrite(Register object, | 111 void MacroAssembler::RecordWrite(Register object, |
| 149 int offset, | 112 int offset, |
| 150 Register value, | 113 Register value, |
| 151 Register smi_index) { | 114 Register smi_index) { |
| 152 // The compiled code assumes that record write doesn't change the | 115 // The compiled code assumes that record write doesn't change the |
| 153 // context register, so we check that none of the clobbered | 116 // context register, so we check that none of the clobbered |
| 154 // registers are rsi. | 117 // registers are rsi. |
| 155 ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi)); | 118 ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi)); |
| 156 | 119 |
| 157 // First, check if a remembered set write is even needed. The tests below | 120 // First, check if a write barrier is even needed. The tests below |
| 158 // catch stores of Smis and stores into young gen (which does not have space | 121 // catch stores of Smis and stores into young gen. |
| 159 // for the remembered set bits). | |
| 160 Label done; | 122 Label done; |
| 161 JumpIfSmi(value, &done); | 123 JumpIfSmi(value, &done); |
| 162 | 124 |
| 163 RecordWriteNonSmi(object, offset, value, smi_index); | 125 RecordWriteNonSmi(object, offset, value, smi_index); |
| 164 bind(&done); | 126 bind(&done); |
| 165 | 127 |
| 166 // Clobber all input registers when running with the debug-code flag | 128 // Clobber all input registers when running with the debug-code flag |
| 167 // turned on to provoke errors. This clobbering repeats the | 129 // turned on to provoke errors. This clobbering repeats the |
| 168 // clobbering done inside RecordWriteNonSmi but it's necessary to | 130 // clobbering done inside RecordWriteNonSmi but it's necessary to |
| 169 // avoid having the fast case for smis leave the registers | 131 // avoid having the fast case for smis leave the registers |
| (...skipping 12 matching lines...) Expand all Loading... |
| 182 Register smi_index) { | 144 Register smi_index) { |
| 183 Label done; | 145 Label done; |
| 184 | 146 |
| 185 if (FLAG_debug_code) { | 147 if (FLAG_debug_code) { |
| 186 Label okay; | 148 Label okay; |
| 187 JumpIfNotSmi(object, &okay); | 149 JumpIfNotSmi(object, &okay); |
| 188 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); | 150 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); |
| 189 bind(&okay); | 151 bind(&okay); |
| 190 } | 152 } |
| 191 | 153 |
| 192 // Test that the object address is not in the new space. We cannot | 154 // Test that the object address is not in the new space. We cannot |
| 193 // set remembered set bits in the new space. | 155 // update page dirty marks for new space pages. |
| 194 InNewSpace(object, scratch, equal, &done); | 156 InNewSpace(object, scratch, equal, &done); |
| 195 | 157 |
| 196 // The offset is relative to a tagged or untagged HeapObject pointer, | 158 // The offset is relative to a tagged or untagged HeapObject pointer, |
| 197 // so either offset or offset + kHeapObjectTag must be a | 159 // so either offset or offset + kHeapObjectTag must be a |
| 198 // multiple of kPointerSize. | 160 // multiple of kPointerSize. |
| 199 ASSERT(IsAligned(offset, kPointerSize) || | 161 ASSERT(IsAligned(offset, kPointerSize) || |
| 200 IsAligned(offset + kHeapObjectTag, kPointerSize)); | 162 IsAligned(offset + kHeapObjectTag, kPointerSize)); |
| 201 | 163 |
| 202 // We use optimized write barrier code if the word being written to is not in | 164 Register dst = smi_index; |
| 203 // a large object page, or is in the first "page" of a large object page. | 165 if (offset != 0) { |
| 204 // We make sure that an offset is inside the right limits whether it is | 166 lea(dst, Operand(object, offset)); |
| 205 // tagged or untagged. | |
| 206 if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) { | |
| 207 // Compute the bit offset in the remembered set, leave it in 'scratch'. | |
| 208 lea(scratch, Operand(object, offset)); | |
| 209 ASSERT(is_int32(Page::kPageAlignmentMask)); | |
| 210 and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask))); | |
| 211 shr(scratch, Immediate(kPointerSizeLog2)); | |
| 212 | |
| 213 // Compute the page address from the heap object pointer, leave it in | |
| 214 // 'object' (immediate value is sign extended). | |
| 215 and_(object, Immediate(~Page::kPageAlignmentMask)); | |
| 216 | |
| 217 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction | |
| 218 // to limit code size. We should probably evaluate this decision by | |
| 219 // measuring the performance of an equivalent implementation using | |
| 220 // "simpler" instructions | |
| 221 bts(Operand(object, Page::kRSetOffset), scratch); | |
| 222 } else { | 167 } else { |
| 223 Register dst = smi_index; | 168 // array access: calculate the destination address in the same manner as |
| 224 if (offset != 0) { | 169 // KeyedStoreIC::GenerateGeneric. |
| 225 lea(dst, Operand(object, offset)); | 170 SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2); |
| 226 } else { | 171 lea(dst, FieldOperand(object, |
| 227 // array access: calculate the destination address in the same manner as | 172 index.reg, |
| 228 // KeyedStoreIC::GenerateGeneric. | 173 index.scale, |
| 229 SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2); | 174 FixedArray::kHeaderSize)); |
| 230 lea(dst, FieldOperand(object, | |
| 231 index.reg, | |
| 232 index.scale, | |
| 233 FixedArray::kHeaderSize)); | |
| 234 } | |
| 235 // If we are already generating a shared stub, not inlining the | |
| 236 // record write code isn't going to save us any memory. | |
| 237 if (generating_stub()) { | |
| 238 RecordWriteHelper(object, dst, scratch); | |
| 239 } else { | |
| 240 RecordWriteStub stub(object, dst, scratch); | |
| 241 CallStub(&stub); | |
| 242 } | |
| 243 } | 175 } |
| 176 RecordWriteHelper(object, dst, scratch); |
| 244 | 177 |
| 245 bind(&done); | 178 bind(&done); |
| 246 | 179 |
| 247 // Clobber all input registers when running with the debug-code flag | 180 // Clobber all input registers when running with the debug-code flag |
| 248 // turned on to provoke errors. | 181 // turned on to provoke errors. |
| 249 if (FLAG_debug_code) { | 182 if (FLAG_debug_code) { |
| 250 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 183 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
| 251 movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 184 movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
| 252 movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 185 movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
| 253 } | 186 } |
| (...skipping 381 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 635 movq(dst, src); | 568 movq(dst, src); |
| 636 } | 569 } |
| 637 if (power < kSmiShift) { | 570 if (power < kSmiShift) { |
| 638 sar(dst, Immediate(kSmiShift - power)); | 571 sar(dst, Immediate(kSmiShift - power)); |
| 639 } else if (power > kSmiShift) { | 572 } else if (power > kSmiShift) { |
| 640 shl(dst, Immediate(power - kSmiShift)); | 573 shl(dst, Immediate(power - kSmiShift)); |
| 641 } | 574 } |
| 642 } | 575 } |
| 643 | 576 |
| 644 | 577 |
| 578 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst, |
| 579 Register src, |
| 580 int power) { |
| 581 ASSERT((0 <= power) && (power < 32)); |
| 582 if (dst.is(src)) { |
| 583 shr(dst, Immediate(power + kSmiShift)); |
| 584 } else { |
| 585 UNIMPLEMENTED(); // Not used. |
| 586 } |
| 587 } |
| 588 |
| 589 |
| 645 Condition MacroAssembler::CheckSmi(Register src) { | 590 Condition MacroAssembler::CheckSmi(Register src) { |
| 646 ASSERT_EQ(0, kSmiTag); | 591 ASSERT_EQ(0, kSmiTag); |
| 647 testb(src, Immediate(kSmiTagMask)); | 592 testb(src, Immediate(kSmiTagMask)); |
| 648 return zero; | 593 return zero; |
| 649 } | 594 } |
| 650 | 595 |
| 651 | 596 |
| 652 Condition MacroAssembler::CheckPositiveSmi(Register src) { | 597 Condition MacroAssembler::CheckPositiveSmi(Register src) { |
| 653 ASSERT_EQ(0, kSmiTag); | 598 ASSERT_EQ(0, kSmiTag); |
| 654 movq(kScratchRegister, src); | 599 movq(kScratchRegister, src); |
| (...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 911 | 856 |
| 912 Move(kScratchRegister, constant); | 857 Move(kScratchRegister, constant); |
| 913 addq(dst, kScratchRegister); | 858 addq(dst, kScratchRegister); |
| 914 } else { | 859 } else { |
| 915 Move(dst, constant); | 860 Move(dst, constant); |
| 916 addq(dst, src); | 861 addq(dst, src); |
| 917 } | 862 } |
| 918 } | 863 } |
| 919 | 864 |
| 920 | 865 |
| 866 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { |
| 867 if (constant->value() != 0) { |
| 868 Move(kScratchRegister, constant); |
| 869 addq(dst, kScratchRegister); |
| 870 } |
| 871 } |
| 872 |
| 873 |
| 921 void MacroAssembler::SmiAddConstant(Register dst, | 874 void MacroAssembler::SmiAddConstant(Register dst, |
| 922 Register src, | 875 Register src, |
| 923 Smi* constant, | 876 Smi* constant, |
| 924 Label* on_not_smi_result) { | 877 Label* on_not_smi_result) { |
| 925 if (constant->value() == 0) { | 878 if (constant->value() == 0) { |
| 926 if (!dst.is(src)) { | 879 if (!dst.is(src)) { |
| 927 movq(dst, src); | 880 movq(dst, src); |
| 928 } | 881 } |
| 929 } else if (dst.is(src)) { | 882 } else if (dst.is(src)) { |
| 930 ASSERT(!dst.is(kScratchRegister)); | 883 ASSERT(!dst.is(kScratchRegister)); |
| (...skipping 1661 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2592 scratch2, | 2545 scratch2, |
| 2593 scratch3, | 2546 scratch3, |
| 2594 gc_required, | 2547 gc_required, |
| 2595 TAG_OBJECT); | 2548 TAG_OBJECT); |
| 2596 | 2549 |
| 2597 // Set the map, length and hash field. | 2550 // Set the map, length and hash field. |
| 2598 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex); | 2551 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex); |
| 2599 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); | 2552 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); |
| 2600 Integer32ToSmi(scratch1, length); | 2553 Integer32ToSmi(scratch1, length); |
| 2601 movq(FieldOperand(result, String::kLengthOffset), scratch1); | 2554 movq(FieldOperand(result, String::kLengthOffset), scratch1); |
| 2602 movl(FieldOperand(result, String::kHashFieldOffset), | 2555 movq(FieldOperand(result, String::kHashFieldOffset), |
| 2603 Immediate(String::kEmptyHashField)); | 2556 Immediate(String::kEmptyHashField)); |
| 2604 } | 2557 } |
| 2605 | 2558 |
| 2606 | 2559 |
| 2607 void MacroAssembler::AllocateAsciiString(Register result, | 2560 void MacroAssembler::AllocateAsciiString(Register result, |
| 2608 Register length, | 2561 Register length, |
| 2609 Register scratch1, | 2562 Register scratch1, |
| 2610 Register scratch2, | 2563 Register scratch2, |
| 2611 Register scratch3, | 2564 Register scratch3, |
| 2612 Label* gc_required) { | 2565 Label* gc_required) { |
| (...skipping 17 matching lines...) Expand all Loading... |
| 2630 scratch2, | 2583 scratch2, |
| 2631 scratch3, | 2584 scratch3, |
| 2632 gc_required, | 2585 gc_required, |
| 2633 TAG_OBJECT); | 2586 TAG_OBJECT); |
| 2634 | 2587 |
| 2635 // Set the map, length and hash field. | 2588 // Set the map, length and hash field. |
| 2636 LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex); | 2589 LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex); |
| 2637 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); | 2590 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); |
| 2638 Integer32ToSmi(scratch1, length); | 2591 Integer32ToSmi(scratch1, length); |
| 2639 movq(FieldOperand(result, String::kLengthOffset), scratch1); | 2592 movq(FieldOperand(result, String::kLengthOffset), scratch1); |
| 2640 movl(FieldOperand(result, String::kHashFieldOffset), | 2593 movq(FieldOperand(result, String::kHashFieldOffset), |
| 2641 Immediate(String::kEmptyHashField)); | 2594 Immediate(String::kEmptyHashField)); |
| 2642 } | 2595 } |
| 2643 | 2596 |
| 2644 | 2597 |
| 2645 void MacroAssembler::AllocateConsString(Register result, | 2598 void MacroAssembler::AllocateConsString(Register result, |
| 2646 Register scratch1, | 2599 Register scratch1, |
| 2647 Register scratch2, | 2600 Register scratch2, |
| 2648 Label* gc_required) { | 2601 Label* gc_required) { |
| 2649 // Allocate heap number in new space. | 2602 // Allocate heap number in new space. |
| 2650 AllocateInNewSpace(ConsString::kSize, | 2603 AllocateInNewSpace(ConsString::kSize, |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2759 CodePatcher::~CodePatcher() { | 2712 CodePatcher::~CodePatcher() { |
| 2760 // Indicate that code has changed. | 2713 // Indicate that code has changed. |
| 2761 CPU::FlushICache(address_, size_); | 2714 CPU::FlushICache(address_, size_); |
| 2762 | 2715 |
| 2763 // Check that the code was patched as expected. | 2716 // Check that the code was patched as expected. |
| 2764 ASSERT(masm_.pc_ == address_ + size_); | 2717 ASSERT(masm_.pc_ == address_ + size_); |
| 2765 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 2718 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 2766 } | 2719 } |
| 2767 | 2720 |
| 2768 } } // namespace v8::internal | 2721 } } // namespace v8::internal |
| OLD | NEW |