| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #if V8_TARGET_ARCH_ARM | 5 #if V8_TARGET_ARCH_ARM |
| 6 | 6 |
| 7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
| 8 #include "src/ic/ic.h" | 8 #include "src/ic/ic.h" |
| 9 #include "src/ic/stub-cache.h" | 9 #include "src/ic/stub-cache.h" |
| 10 #include "src/interface-descriptors.h" | 10 #include "src/interface-descriptors.h" |
| 11 | 11 |
| 12 namespace v8 { | 12 namespace v8 { |
| 13 namespace internal { | 13 namespace internal { |
| 14 | 14 |
| 15 #define __ ACCESS_MASM(masm) | 15 #define __ ACCESS_MASM(masm) |
| 16 | 16 |
| 17 static void ProbeTable(Isolate* isolate, MacroAssembler* masm, | 17 static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm, |
| 18 Code::Flags flags, StubCache::Table table, | 18 Code::Flags flags, StubCache::Table table, |
| 19 Register receiver, Register name, | 19 Register receiver, Register name, |
| 20 // Number of the cache entry, not scaled. | 20 // Number of the cache entry, not scaled. |
| 21 Register offset, Register scratch, Register scratch2, | 21 Register offset, Register scratch, Register scratch2, |
| 22 Register offset_scratch) { | 22 Register offset_scratch) { |
| 23 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); | 23 ExternalReference key_offset(stub_cache->key_reference(table)); |
| 24 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); | 24 ExternalReference value_offset(stub_cache->value_reference(table)); |
| 25 ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); | 25 ExternalReference map_offset(stub_cache->map_reference(table)); |
| 26 | 26 |
| 27 uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address()); | 27 uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address()); |
| 28 uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address()); | 28 uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address()); |
| 29 uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address()); | 29 uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address()); |
| 30 | 30 |
| 31 // Check the relative positions of the address fields. | 31 // Check the relative positions of the address fields. |
| 32 DCHECK(value_off_addr > key_off_addr); | 32 DCHECK(value_off_addr > key_off_addr); |
| 33 DCHECK((value_off_addr - key_off_addr) % 4 == 0); | 33 DCHECK((value_off_addr - key_off_addr) % 4 == 0); |
| 34 DCHECK((value_off_addr - key_off_addr) < (256 * 4)); | 34 DCHECK((value_off_addr - key_off_addr) < (256 * 4)); |
| 35 DCHECK(map_off_addr > key_off_addr); | 35 DCHECK(map_off_addr > key_off_addr); |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 83 } | 83 } |
| 84 #endif | 84 #endif |
| 85 | 85 |
| 86 // Jump to the first instruction in the code stub. | 86 // Jump to the first instruction in the code stub. |
| 87 __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag)); | 87 __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 88 | 88 |
| 89 // Miss: fall through. | 89 // Miss: fall through. |
| 90 __ bind(&miss); | 90 __ bind(&miss); |
| 91 } | 91 } |
| 92 | 92 |
| 93 | 93 void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver, |
| 94 void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind, | |
| 95 Code::Flags flags, Register receiver, | |
| 96 Register name, Register scratch, Register extra, | 94 Register name, Register scratch, Register extra, |
| 97 Register extra2, Register extra3) { | 95 Register extra2, Register extra3) { |
| 98 Isolate* isolate = masm->isolate(); | 96 Code::Flags flags = |
| 97 Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(ic_kind_)); |
| 98 |
| 99 Label miss; | 99 Label miss; |
| 100 | 100 |
| 101 // Make sure that code is valid. The multiplying code relies on the | 101 // Make sure that code is valid. The multiplying code relies on the |
| 102 // entry size being 12. | 102 // entry size being 12. |
| 103 DCHECK(sizeof(Entry) == 12); | 103 DCHECK(sizeof(Entry) == 12); |
| 104 | 104 |
| 105 // Make sure that there are no register conflicts. | 105 // Make sure that there are no register conflicts. |
| 106 DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3)); | 106 DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3)); |
| 107 | 107 |
| 108 // Check scratch, extra and extra2 registers are valid. | 108 // Check scratch, extra and extra2 registers are valid. |
| 109 DCHECK(!scratch.is(no_reg)); | 109 DCHECK(!scratch.is(no_reg)); |
| 110 DCHECK(!extra.is(no_reg)); | 110 DCHECK(!extra.is(no_reg)); |
| 111 DCHECK(!extra2.is(no_reg)); | 111 DCHECK(!extra2.is(no_reg)); |
| 112 DCHECK(!extra3.is(no_reg)); | 112 DCHECK(!extra3.is(no_reg)); |
| 113 | 113 |
| 114 #ifdef DEBUG | 114 #ifdef DEBUG |
| 115 // If vector-based ics are in use, ensure that scratch, extra, extra2 and | 115 // If vector-based ics are in use, ensure that scratch, extra, extra2 and |
| 116 // extra3 don't conflict with the vector and slot registers, which need | 116 // extra3 don't conflict with the vector and slot registers, which need |
| 117 // to be preserved for a handler call or miss. | 117 // to be preserved for a handler call or miss. |
| 118 if (IC::ICUseVector(ic_kind)) { | 118 if (IC::ICUseVector(ic_kind_)) { |
| 119 Register vector, slot; | 119 Register vector, slot; |
| 120 if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) { | 120 if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) { |
| 121 vector = VectorStoreICDescriptor::VectorRegister(); | 121 vector = VectorStoreICDescriptor::VectorRegister(); |
| 122 slot = VectorStoreICDescriptor::SlotRegister(); | 122 slot = VectorStoreICDescriptor::SlotRegister(); |
| 123 } else { | 123 } else { |
| 124 DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC); |
| 124 vector = LoadWithVectorDescriptor::VectorRegister(); | 125 vector = LoadWithVectorDescriptor::VectorRegister(); |
| 125 slot = LoadWithVectorDescriptor::SlotRegister(); | 126 slot = LoadWithVectorDescriptor::SlotRegister(); |
| 126 } | 127 } |
| 127 DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3)); | 128 DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3)); |
| 128 } | 129 } |
| 129 #endif | 130 #endif |
| 130 | 131 |
| 131 Counters* counters = masm->isolate()->counters(); | 132 Counters* counters = masm->isolate()->counters(); |
| 132 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2, | 133 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2, |
| 133 extra3); | 134 extra3); |
| 134 | 135 |
| 135 // Check that the receiver isn't a smi. | 136 // Check that the receiver isn't a smi. |
| 136 __ JumpIfSmi(receiver, &miss); | 137 __ JumpIfSmi(receiver, &miss); |
| 137 | 138 |
| 138 // Get the map of the receiver and compute the hash. | 139 // Get the map of the receiver and compute the hash. |
| 139 __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); | 140 __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); |
| 140 __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 141 __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 141 __ add(scratch, scratch, Operand(ip)); | 142 __ add(scratch, scratch, Operand(ip)); |
| 142 uint32_t mask = kPrimaryTableSize - 1; | 143 uint32_t mask = kPrimaryTableSize - 1; |
| 143 // We shift out the last two bits because they are not part of the hash and | 144 // We shift out the last two bits because they are not part of the hash and |
| 144 // they are always 01 for maps. | 145 // they are always 01 for maps. |
| 145 __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift)); | 146 __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift)); |
| 146 // Mask down the eor argument to the minimum to keep the immediate | 147 // Mask down the eor argument to the minimum to keep the immediate |
| 147 // ARM-encodable. | 148 // ARM-encodable. |
| 148 __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask)); | 149 __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask)); |
| 149 // Prefer and_ to ubfx here because ubfx takes 2 cycles. | 150 // Prefer and_ to ubfx here because ubfx takes 2 cycles. |
| 150 __ and_(scratch, scratch, Operand(mask)); | 151 __ and_(scratch, scratch, Operand(mask)); |
| 151 | 152 |
| 152 // Probe the primary table. | 153 // Probe the primary table. |
| 153 ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch, extra, | 154 ProbeTable(this, masm, flags, kPrimary, receiver, name, scratch, extra, |
| 154 extra2, extra3); | 155 extra2, extra3); |
| 155 | 156 |
| 156 // Primary miss: Compute hash for secondary probe. | 157 // Primary miss: Compute hash for secondary probe. |
| 157 __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift)); | 158 __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift)); |
| 158 uint32_t mask2 = kSecondaryTableSize - 1; | 159 uint32_t mask2 = kSecondaryTableSize - 1; |
| 159 __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2)); | 160 __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2)); |
| 160 __ and_(scratch, scratch, Operand(mask2)); | 161 __ and_(scratch, scratch, Operand(mask2)); |
| 161 | 162 |
| 162 // Probe the secondary table. | 163 // Probe the secondary table. |
| 163 ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch, extra, | 164 ProbeTable(this, masm, flags, kSecondary, receiver, name, scratch, extra, |
| 164 extra2, extra3); | 165 extra2, extra3); |
| 165 | 166 |
| 166 // Cache miss: Fall-through and let caller handle the miss by | 167 // Cache miss: Fall-through and let caller handle the miss by |
| 167 // entering the runtime system. | 168 // entering the runtime system. |
| 168 __ bind(&miss); | 169 __ bind(&miss); |
| 169 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2, | 170 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2, |
| 170 extra3); | 171 extra3); |
| 171 } | 172 } |
| 172 | 173 |
| 173 | 174 |
| 174 #undef __ | 175 #undef __ |
| 175 } // namespace internal | 176 } // namespace internal |
| 176 } // namespace v8 | 177 } // namespace v8 |
| 177 | 178 |
| 178 #endif // V8_TARGET_ARCH_ARM | 179 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |