Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #include "bootstrapper.h" | 30 #include "bootstrapper.h" |
| 31 #include "codegen-inl.h" | 31 #include "codegen-inl.h" |
| 32 #include "assembler-x64.h" | 32 #include "assembler-x64.h" |
| 33 #include "macro-assembler-x64.h" | 33 #include "macro-assembler-x64.h" |
| 34 #include "serialize.h" | |
| 34 #include "debug.h" | 35 #include "debug.h" |
| 35 | 36 |
| 36 namespace v8 { | 37 namespace v8 { |
| 37 namespace internal { | 38 namespace internal { |
| 38 | 39 |
| 39 MacroAssembler::MacroAssembler(void* buffer, int size) | 40 MacroAssembler::MacroAssembler(void* buffer, int size) |
| 40 : Assembler(buffer, size), | 41 : Assembler(buffer, size), |
| 41 unresolved_(0), | 42 unresolved_(0), |
| 42 generating_stub_(false), | 43 generating_stub_(false), |
| 43 allow_stub_calls_(true), | 44 allow_stub_calls_(true), |
| 44 code_object_(Heap::undefined_value()) { | 45 code_object_(Heap::undefined_value()) { |
| 45 } | 46 } |
| 46 | 47 |
| 47 | 48 |
| 48 // TODO(x64): For now, the write barrier is disabled on x64 and we | 49 |
| 49 // therefore generate no code. This should be fixed when the write | 50 static void RecordWriteHelper(MacroAssembler* masm, |
| 50 // barrier is enabled. | 51 Register object, |
| 51 void MacroAssembler::RecordWrite(Register object, int offset, | 52 Register addr, |
| 52 Register value, Register scratch) { | 53 Register scratch) { |
| 54 Label fast; | |
| 55 | |
| 56 // Compute the page address from the heap object pointer, leave it | |
| 57 // in 'object'. | |
| 58 ASSERT(is_int32(~Page::kPageAlignmentMask)); | |
| 59 masm->and_(object, | |
| 60 Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask))); | |
| 61 | |
| 62 // Compute the bit addr in the remembered set, leave it in "addr". | |
| 63 masm->subq(addr, object); | |
| 64 masm->shr(addr, Immediate(kPointerSizeLog2)); | |
| 65 | |
| 66 // If the bit offset lies beyond the normal remembered set range, it is in | |
| 67 // the extra remembered set area of a large object. | |
| 68 masm->cmpq(addr, Immediate(Page::kPageSize / kPointerSize)); | |
| 69 masm->j(less, &fast); | |
| 70 | |
| 71 // Adjust 'addr' to be relative to the start of the extra remembered set | |
| 72 // and the page address in 'object' to be the address of the extra | |
| 73 // remembered set. | |
| 74 masm->subq(addr, Immediate(Page::kPageSize / kPointerSize)); | |
| 75 // Load the array length into 'scratch'. | |
| 76 masm->movl(scratch, | |
| 77 Operand(object, | |
| 78 Page::kObjectStartOffset + FixedArray::kLengthOffset)); | |
| 79 // Extra remembered set starts right after FixedArray. | |
| 80 // Add the page header, array header, and array body size | |
| 81 // (length * pointer size) to the page address to find the extra remembered | |
| 82 // set start. | |
| 83 masm->lea(object, | |
| 84 Operand(object, scratch, times_pointer_size, | |
| 85 Page::kObjectStartOffset + FixedArray::kHeaderSize)); | |
| 86 | |
| 87 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction | |
| 88 // to limit code size. We should probably evaluate this decision by | |
| 89 // measuring the performance of an equivalent implementation using | |
| 90 // "simpler" instructions | |
| 91 masm->bind(&fast); | |
| 92 masm->bts(Operand(object, Page::kRSetOffset), addr); | |
| 53 } | 93 } |
| 54 | 94 |
| 55 | 95 |
| 96 class RecordWriteStub : public CodeStub { | |
| 97 public: | |
| 98 RecordWriteStub(Register object, Register addr, Register scratch) | |
| 99 : object_(object), addr_(addr), scratch_(scratch) { } | |
| 100 | |
| 101 void Generate(MacroAssembler* masm); | |
| 102 | |
| 103 private: | |
| 104 Register object_; | |
| 105 Register addr_; | |
| 106 Register scratch_; | |
| 107 | |
| 108 #ifdef DEBUG | |
| 109 void Print() { | |
| 110 PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n", | |
| 111 object_.code(), addr_.code(), scratch_.code()); | |
| 112 } | |
| 113 #endif | |
| 114 | |
| 115 // Minor key encoding in 12 bits of three registers (object, address and | |
| 116 // scratch) OOOOAAAASSSS. | |
| 117 class ScratchBits: public BitField<uint32_t, 0, 4> {}; | |
| 118 class AddressBits: public BitField<uint32_t, 4, 4> {}; | |
| 119 class ObjectBits: public BitField<uint32_t, 8, 4> {}; | |
| 120 | |
| 121 Major MajorKey() { return RecordWrite; } | |
| 122 | |
| 123 int MinorKey() { | |
| 124 // Encode the registers. | |
| 125 return ObjectBits::encode(object_.code()) | | |
| 126 AddressBits::encode(addr_.code()) | | |
| 127 ScratchBits::encode(scratch_.code()); | |
| 128 } | |
| 129 }; | |
| 130 | |
| 131 | |
| 132 void RecordWriteStub::Generate(MacroAssembler* masm) { | |
| 133 RecordWriteHelper(masm, object_, addr_, scratch_); | |
| 134 masm->ret(0); | |
| 135 } | |
| 136 | |
| 137 | |
| 138 // Set the remembered set bit for [object+offset]. | |
| 139 // object is the object being stored into, value is the object being stored. | |
| 140 // If offset is zero, then the scratch register contains the array index into | |
| 141 // the elements array represented as a Smi. | |
| 142 // All registers are clobbered by the operation. | |
| 143 void MacroAssembler::RecordWrite(Register object, | |
| 144 int offset, | |
| 145 Register value, | |
| 146 Register scratch) { | |
| 147 // First, check if a remembered set write is even needed. The tests below | |
| 148 // catch stores of Smis and stores into young gen (which does not have space | |
| 149 // for the remembered set bits. | |
| 150 Label done; | |
| 151 | |
| 152 // This optimization cannot survive serialization and deserialization, | |
| 153 // so we disable as long as serialization can take place. | |
| 154 intptr_t new_space_start = | |
| 155 reinterpret_cast<intptr_t>( | |
| 156 ExternalReference::new_space_start().address()); | |
| 157 if (Serializer::enabled() || new_space_start < 0) { | |
| 158 // Cannot do smart bit-twiddling. Need to do two consecutive checks. | |
| 159 // Check for Smi first. | |
| 160 testl(value, Immediate(kSmiTagMask)); | |
| 161 j(zero, &done); | |
|
William Hesse
2009/08/03 09:03:28
Remove this smi test - we think it isn't worth it.
Lasse Reichstein
2009/08/03 10:45:45
Done.
| |
| 162 // Test that the object address is not in the new space. We cannot | |
| 163 // set remembered set bits in the new space. | |
| 164 movq(value, object); | |
| 165 ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask()))); | |
| 166 and_(value, Immediate(static_cast<int32_t>(Heap::NewSpaceMask()))); | |
| 167 movq(kScratchRegister, ExternalReference::new_space_start()); | |
| 168 cmpq(value, kScratchRegister); | |
| 169 j(equal, &done); | |
| 170 } else { | |
|
William Hesse
2009/08/03 09:03:28
This optimization is unsafe if object can be in th
Lasse Reichstein
2009/08/03 10:45:45
Done.
| |
| 171 // move the value SmiTag into the sign bit | |
| 172 ASSERT(kSmiTagSize == 1); | |
| 173 ASSERT(kSmiTag == 0); | |
| 174 shl(value, Immediate(63)); | |
| 175 | |
| 176 // remove the uninteresting bits inside the page | |
| 177 movq(kScratchRegister, object); | |
| 178 ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask()))); | |
| 179 and_(kScratchRegister, | |
| 180 Immediate(static_cast<int32_t>(Heap::NewSpaceMask()))); | |
| 181 // combine the object with value SmiTag | |
| 182 or_(value, kScratchRegister); | |
| 183 // xor has two effects: | |
| 184 // - if the value was a smi, then the result will be negative | |
| 185 // - if the object is pointing into new space area the page bits will | |
| 186 // all be zero. | |
| 187 movq(kScratchRegister, new_space_start | (static_cast<int64_t>(1) << 63), | |
| 188 RelocInfo::NONE); | |
| 189 xor_(value, kScratchRegister); | |
| 190 // Check for both conditions in one branch | |
| 191 j(less_equal, &done); // Jump if either zero or sign flag set. | |
| 192 } | |
| 193 | |
| 194 if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) { | |
| 195 // Compute the bit offset in the remembered set, leave it in 'value'. | |
| 196 movq(value, object); | |
| 197 ASSERT(is_uint32(Page::kPageAlignmentMask)); | |
| 198 and_(value, Immediate(static_cast<uint32_t>(Page::kPageAlignmentMask))); | |
| 199 addq(value, Immediate(offset)); | |
| 200 shr(value, Immediate(kObjectAlignmentBits)); | |
| 201 | |
| 202 // Compute the page address from the heap object pointer, leave it in | |
| 203 // 'object' (immediate value is sign extended). | |
| 204 and_(object, Immediate(~Page::kPageAlignmentMask)); | |
| 205 | |
| 206 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction | |
| 207 // to limit code size. We should probably evaluate this decision by | |
| 208 // measuring the performance of an equivalent implementation using | |
| 209 // "simpler" instructions | |
| 210 bts(Operand(object, Page::kRSetOffset), value); | |
| 211 } else { | |
| 212 Register dst = scratch; | |
| 213 if (offset != 0) { | |
| 214 lea(dst, Operand(object, offset)); | |
| 215 } else { | |
| 216 // array access: calculate the destination address in the same manner as | |
| 217 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 4 to get an offset | |
| 218 // into an array of words. | |
| 219 lea(dst, Operand(object, dst, times_half_pointer_size, | |
| 220 FixedArray::kHeaderSize - kHeapObjectTag)); | |
| 221 } | |
| 222 // If we are already generating a shared stub, not inlining the | |
| 223 // record write code isn't going to save us any memory. | |
| 224 if (generating_stub()) { | |
| 225 RecordWriteHelper(this, object, dst, value); | |
| 226 } else { | |
| 227 RecordWriteStub stub(object, dst, value); | |
| 228 CallStub(&stub); | |
| 229 } | |
| 230 } | |
| 231 | |
| 232 bind(&done); | |
| 233 } | |
| 234 | |
| 235 | |
| 56 void MacroAssembler::Assert(Condition cc, const char* msg) { | 236 void MacroAssembler::Assert(Condition cc, const char* msg) { |
| 57 if (FLAG_debug_code) Check(cc, msg); | 237 if (FLAG_debug_code) Check(cc, msg); |
| 58 } | 238 } |
| 59 | 239 |
| 60 | 240 |
| 61 void MacroAssembler::Check(Condition cc, const char* msg) { | 241 void MacroAssembler::Check(Condition cc, const char* msg) { |
| 62 Label L; | 242 Label L; |
| 63 j(cc, &L); | 243 j(cc, &L); |
| 64 Abort(msg); | 244 Abort(msg); |
| 65 // will not return here | 245 // will not return here |
| (...skipping 979 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1045 Context::SECURITY_TOKEN_INDEX * kPointerSize; | 1225 Context::SECURITY_TOKEN_INDEX * kPointerSize; |
| 1046 movq(scratch, FieldOperand(scratch, token_offset)); | 1226 movq(scratch, FieldOperand(scratch, token_offset)); |
| 1047 cmpq(scratch, FieldOperand(kScratchRegister, token_offset)); | 1227 cmpq(scratch, FieldOperand(kScratchRegister, token_offset)); |
| 1048 j(not_equal, miss); | 1228 j(not_equal, miss); |
| 1049 | 1229 |
| 1050 bind(&same_contexts); | 1230 bind(&same_contexts); |
| 1051 } | 1231 } |
| 1052 | 1232 |
| 1053 | 1233 |
| 1054 } } // namespace v8::internal | 1234 } } // namespace v8::internal |
| OLD | NEW |