| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_ARM | 7 #if V8_TARGET_ARCH_PPC |
| 8 | 8 |
| 9 #include "src/codegen.h" | 9 #include "src/codegen.h" |
| 10 #include "src/ic/stub-cache.h" | 10 #include "src/ic/stub-cache.h" |
| 11 | 11 |
| 12 namespace v8 { | 12 namespace v8 { |
| 13 namespace internal { | 13 namespace internal { |
| 14 | 14 |
| 15 #define __ ACCESS_MASM(masm) | 15 #define __ ACCESS_MASM(masm) |
| 16 | 16 |
| 17 | 17 |
| 18 static void ProbeTable(Isolate* isolate, MacroAssembler* masm, | 18 static void ProbeTable(Isolate* isolate, MacroAssembler* masm, |
| 19 Code::Flags flags, bool leave_frame, | 19 Code::Flags flags, bool leave_frame, |
| 20 StubCache::Table table, Register receiver, Register name, | 20 StubCache::Table table, Register receiver, Register name, |
| 21 // Number of the cache entry, not scaled. | 21 // Number of the cache entry, not scaled. |
| 22 Register offset, Register scratch, Register scratch2, | 22 Register offset, Register scratch, Register scratch2, |
| 23 Register offset_scratch) { | 23 Register offset_scratch) { |
| 24 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); | 24 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); |
| 25 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); | 25 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); |
| 26 ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); | 26 ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); |
| 27 | 27 |
| 28 uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address()); | 28 uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address()); |
| 29 uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address()); | 29 uintptr_t value_off_addr = |
| 30 uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address()); | 30 reinterpret_cast<uintptr_t>(value_offset.address()); |
| 31 uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address()); |
| 31 | 32 |
| 32 // Check the relative positions of the address fields. | 33 // Check the relative positions of the address fields. |
| 33 DCHECK(value_off_addr > key_off_addr); | 34 DCHECK(value_off_addr > key_off_addr); |
| 34 DCHECK((value_off_addr - key_off_addr) % 4 == 0); | 35 DCHECK((value_off_addr - key_off_addr) % 4 == 0); |
| 35 DCHECK((value_off_addr - key_off_addr) < (256 * 4)); | 36 DCHECK((value_off_addr - key_off_addr) < (256 * 4)); |
| 36 DCHECK(map_off_addr > key_off_addr); | 37 DCHECK(map_off_addr > key_off_addr); |
| 37 DCHECK((map_off_addr - key_off_addr) % 4 == 0); | 38 DCHECK((map_off_addr - key_off_addr) % 4 == 0); |
| 38 DCHECK((map_off_addr - key_off_addr) < (256 * 4)); | 39 DCHECK((map_off_addr - key_off_addr) < (256 * 4)); |
| 39 | 40 |
| 40 Label miss; | 41 Label miss; |
| 41 Register base_addr = scratch; | 42 Register base_addr = scratch; |
| 42 scratch = no_reg; | 43 scratch = no_reg; |
| 43 | 44 |
| 44 // Multiply by 3 because there are 3 fields per entry (name, code, map). | 45 // Multiply by 3 because there are 3 fields per entry (name, code, map). |
| 45 __ add(offset_scratch, offset, Operand(offset, LSL, 1)); | 46 __ ShiftLeftImm(offset_scratch, offset, Operand(1)); |
| 47 __ add(offset_scratch, offset, offset_scratch); |
| 46 | 48 |
| 47 // Calculate the base address of the entry. | 49 // Calculate the base address of the entry. |
| 48 __ mov(base_addr, Operand(key_offset)); | 50 __ mov(base_addr, Operand(key_offset)); |
| 49 __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2)); | 51 __ ShiftLeftImm(scratch2, offset_scratch, Operand(kPointerSizeLog2)); |
| 52 __ add(base_addr, base_addr, scratch2); |
| 50 | 53 |
| 51 // Check that the key in the entry matches the name. | 54 // Check that the key in the entry matches the name. |
| 52 __ ldr(ip, MemOperand(base_addr, 0)); | 55 __ LoadP(ip, MemOperand(base_addr, 0)); |
| 53 __ cmp(name, ip); | 56 __ cmp(name, ip); |
| 54 __ b(ne, &miss); | 57 __ bne(&miss); |
| 55 | 58 |
| 56 // Check the map matches. | 59 // Check the map matches. |
| 57 __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr)); | 60 __ LoadP(ip, MemOperand(base_addr, map_off_addr - key_off_addr)); |
| 58 __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 61 __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 59 __ cmp(ip, scratch2); | 62 __ cmp(ip, scratch2); |
| 60 __ b(ne, &miss); | 63 __ bne(&miss); |
| 61 | 64 |
| 62 // Get the code entry from the cache. | 65 // Get the code entry from the cache. |
| 63 Register code = scratch2; | 66 Register code = scratch2; |
| 64 scratch2 = no_reg; | 67 scratch2 = no_reg; |
| 65 __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr)); | 68 __ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr)); |
| 66 | 69 |
| 67 // Check that the flags match what we're looking for. | 70 // Check that the flags match what we're looking for. |
| 68 Register flags_reg = base_addr; | 71 Register flags_reg = base_addr; |
| 69 base_addr = no_reg; | 72 base_addr = no_reg; |
| 70 __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); | 73 __ lwz(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); |
| 71 // It's a nice optimization if this constant is encodable in the bic insn. | |
| 72 | 74 |
| 73 uint32_t mask = Code::kFlagsNotUsedInLookup; | 75 DCHECK(!r0.is(flags_reg)); |
| 74 DCHECK(__ ImmediateFitsAddrMode1Instruction(mask)); | 76 __ li(r0, Operand(Code::kFlagsNotUsedInLookup)); |
| 75 __ bic(flags_reg, flags_reg, Operand(mask)); | 77 __ andc(flags_reg, flags_reg, r0); |
| 76 __ cmp(flags_reg, Operand(flags)); | 78 __ mov(r0, Operand(flags)); |
| 77 __ b(ne, &miss); | 79 __ cmpl(flags_reg, r0); |
| 80 __ bne(&miss); |
| 78 | 81 |
| 79 #ifdef DEBUG | 82 #ifdef DEBUG |
| 80 if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { | 83 if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { |
| 81 __ jmp(&miss); | 84 __ b(&miss); |
| 82 } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { | 85 } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { |
| 83 __ jmp(&miss); | 86 __ b(&miss); |
| 84 } | 87 } |
| 85 #endif | 88 #endif |
| 86 | 89 |
| 87 if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL); | 90 if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL); |
| 88 | 91 |
| 89 // Jump to the first instruction in the code stub. | 92 // Jump to the first instruction in the code stub. |
| 90 __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag)); | 93 __ addi(r0, code, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 94 __ mtctr(r0); |
| 95 __ bctr(); |
| 91 | 96 |
| 92 // Miss: fall through. | 97 // Miss: fall through. |
| 93 __ bind(&miss); | 98 __ bind(&miss); |
| 94 } | 99 } |
| 95 | 100 |
| 96 | 101 |
| 97 void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags, | 102 void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags, |
| 98 bool leave_frame, Register receiver, | 103 bool leave_frame, Register receiver, |
| 99 Register name, Register scratch, Register extra, | 104 Register name, Register scratch, Register extra, |
| 100 Register extra2, Register extra3) { | 105 Register extra2, Register extra3) { |
| 101 Isolate* isolate = masm->isolate(); | 106 Isolate* isolate = masm->isolate(); |
| 102 Label miss; | 107 Label miss; |
| 103 | 108 |
| 109 #if V8_TARGET_ARCH_PPC64 |
| 110 // Make sure that code is valid. The multiplying code relies on the |
| 111 // entry size being 24. |
| 112 DCHECK(sizeof(Entry) == 24); |
| 113 #else |
| 104 // Make sure that code is valid. The multiplying code relies on the | 114 // Make sure that code is valid. The multiplying code relies on the |
| 105 // entry size being 12. | 115 // entry size being 12. |
| 106 DCHECK(sizeof(Entry) == 12); | 116 DCHECK(sizeof(Entry) == 12); |
| 117 #endif |
| 107 | 118 |
| 108 // Make sure the flags does not name a specific type. | 119 // Make sure the flags does not name a specific type. |
| 109 DCHECK(Code::ExtractTypeFromFlags(flags) == 0); | 120 DCHECK(Code::ExtractTypeFromFlags(flags) == 0); |
| 110 | 121 |
| 111 // Make sure that there are no register conflicts. | 122 // Make sure that there are no register conflicts. |
| 112 DCHECK(!scratch.is(receiver)); | 123 DCHECK(!scratch.is(receiver)); |
| 113 DCHECK(!scratch.is(name)); | 124 DCHECK(!scratch.is(name)); |
| 114 DCHECK(!extra.is(receiver)); | 125 DCHECK(!extra.is(receiver)); |
| 115 DCHECK(!extra.is(name)); | 126 DCHECK(!extra.is(name)); |
| 116 DCHECK(!extra.is(scratch)); | 127 DCHECK(!extra.is(scratch)); |
| 117 DCHECK(!extra2.is(receiver)); | 128 DCHECK(!extra2.is(receiver)); |
| 118 DCHECK(!extra2.is(name)); | 129 DCHECK(!extra2.is(name)); |
| 119 DCHECK(!extra2.is(scratch)); | 130 DCHECK(!extra2.is(scratch)); |
| 120 DCHECK(!extra2.is(extra)); | 131 DCHECK(!extra2.is(extra)); |
| 121 | 132 |
| 122 // Check scratch, extra and extra2 registers are valid. | 133 // Check scratch, extra and extra2 registers are valid. |
| 123 DCHECK(!scratch.is(no_reg)); | 134 DCHECK(!scratch.is(no_reg)); |
| 124 DCHECK(!extra.is(no_reg)); | 135 DCHECK(!extra.is(no_reg)); |
| 125 DCHECK(!extra2.is(no_reg)); | 136 DCHECK(!extra2.is(no_reg)); |
| 126 DCHECK(!extra3.is(no_reg)); | 137 DCHECK(!extra3.is(no_reg)); |
| 127 | 138 |
| 128 Counters* counters = masm->isolate()->counters(); | 139 Counters* counters = masm->isolate()->counters(); |
| 129 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2, | 140 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2, |
| 130 extra3); | 141 extra3); |
| 131 | 142 |
| 132 // Check that the receiver isn't a smi. | 143 // Check that the receiver isn't a smi. |
| 133 __ JumpIfSmi(receiver, &miss); | 144 __ JumpIfSmi(receiver, &miss); |
| 134 | 145 |
| 135 // Get the map of the receiver and compute the hash. | 146 // Get the map of the receiver and compute the hash. |
| 136 __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); | 147 __ lwz(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); |
| 137 __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 148 __ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 138 __ add(scratch, scratch, Operand(ip)); | 149 __ add(scratch, scratch, ip); |
| 150 #if V8_TARGET_ARCH_PPC64 |
| 151 // Use only the low 32 bits of the map pointer. |
| 152 __ rldicl(scratch, scratch, 0, 32); |
| 153 #endif |
| 139 uint32_t mask = kPrimaryTableSize - 1; | 154 uint32_t mask = kPrimaryTableSize - 1; |
| 140 // We shift out the last two bits because they are not part of the hash and | 155 // We shift out the last two bits because they are not part of the hash and |
| 141 // they are always 01 for maps. | 156 // they are always 01 for maps. |
| 142 __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift)); | 157 __ ShiftRightImm(scratch, scratch, Operand(kCacheIndexShift)); |
| 143 // Mask down the eor argument to the minimum to keep the immediate | 158 // Mask down the eor argument to the minimum to keep the immediate |
| 144 // ARM-encodable. | 159 // encodable. |
| 145 __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask)); | 160 __ xori(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask)); |
| 146 // Prefer and_ to ubfx here because ubfx takes 2 cycles. | 161 // Prefer and_ to ubfx here because ubfx takes 2 cycles. |
| 147 __ and_(scratch, scratch, Operand(mask)); | 162 __ andi(scratch, scratch, Operand(mask)); |
| 148 | 163 |
| 149 // Probe the primary table. | 164 // Probe the primary table. |
| 150 ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name, | 165 ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name, |
| 151 scratch, extra, extra2, extra3); | 166 scratch, extra, extra2, extra3); |
| 152 | 167 |
| 153 // Primary miss: Compute hash for secondary probe. | 168 // Primary miss: Compute hash for secondary probe. |
| 154 __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift)); | 169 __ ShiftRightImm(extra, name, Operand(kCacheIndexShift)); |
| 170 __ sub(scratch, scratch, extra); |
| 155 uint32_t mask2 = kSecondaryTableSize - 1; | 171 uint32_t mask2 = kSecondaryTableSize - 1; |
| 156 __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2)); | 172 __ addi(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2)); |
| 157 __ and_(scratch, scratch, Operand(mask2)); | 173 __ andi(scratch, scratch, Operand(mask2)); |
| 158 | 174 |
| 159 // Probe the secondary table. | 175 // Probe the secondary table. |
| 160 ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name, | 176 ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name, |
| 161 scratch, extra, extra2, extra3); | 177 scratch, extra, extra2, extra3); |
| 162 | 178 |
| 163 // Cache miss: Fall-through and let caller handle the miss by | 179 // Cache miss: Fall-through and let caller handle the miss by |
| 164 // entering the runtime system. | 180 // entering the runtime system. |
| 165 __ bind(&miss); | 181 __ bind(&miss); |
| 166 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2, | 182 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2, |
| 167 extra3); | 183 extra3); |
| 168 } | 184 } |
| 169 | 185 |
| 170 | 186 |
| 171 #undef __ | 187 #undef __ |
| 172 } | 188 } |
| 173 } // namespace v8::internal | 189 } // namespace v8::internal |
| 174 | 190 |
| 175 #endif // V8_TARGET_ARCH_ARM | 191 #endif // V8_TARGET_ARCH_PPC |
| OLD | NEW |