OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_X64 | 5 #if V8_TARGET_ARCH_X64 |
6 | 6 |
7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
8 #include "src/ic/ic.h" | 8 #include "src/ic/ic.h" |
9 #include "src/ic/stub-cache.h" | 9 #include "src/ic/stub-cache.h" |
10 #include "src/interface-descriptors.h" | 10 #include "src/interface-descriptors.h" |
11 | 11 |
12 namespace v8 { | 12 namespace v8 { |
13 namespace internal { | 13 namespace internal { |
14 | 14 |
15 #define __ ACCESS_MASM(masm) | 15 #define __ ACCESS_MASM(masm) |
16 | 16 |
17 static void ProbeTable(Isolate* isolate, MacroAssembler* masm, | 17 static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm, |
18 Code::Flags flags, StubCache::Table table, | 18 Code::Flags flags, StubCache::Table table, |
19 Register receiver, Register name, | 19 Register receiver, Register name, |
20 // The offset is scaled by 4, based on | 20 // The offset is scaled by 4, based on |
21 // kCacheIndexShift, which is two bits | 21 // kCacheIndexShift, which is two bits |
22 Register offset) { | 22 Register offset) { |
23 // We need to scale up the pointer by 2 when the offset is scaled by less | 23 // We need to scale up the pointer by 2 when the offset is scaled by less |
24 // than the pointer size. | 24 // than the pointer size. |
25 DCHECK(kPointerSize == kInt64Size | 25 DCHECK(kPointerSize == kInt64Size |
26 ? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1 | 26 ? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1 |
27 : kPointerSizeLog2 == StubCache::kCacheIndexShift); | 27 : kPointerSizeLog2 == StubCache::kCacheIndexShift); |
28 ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1; | 28 ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1; |
29 | 29 |
30 DCHECK_EQ(3u * kPointerSize, sizeof(StubCache::Entry)); | 30 DCHECK_EQ(3u * kPointerSize, sizeof(StubCache::Entry)); |
31 // The offset register holds the entry offset times four (due to masking | 31 // The offset register holds the entry offset times four (due to masking |
32 // and shifting optimizations). | 32 // and shifting optimizations). |
33 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); | 33 ExternalReference key_offset(stub_cache->key_reference(table)); |
34 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); | 34 ExternalReference value_offset(stub_cache->value_reference(table)); |
35 Label miss; | 35 Label miss; |
36 | 36 |
37 // Multiply by 3 because there are 3 fields per entry (name, code, map). | 37 // Multiply by 3 because there are 3 fields per entry (name, code, map). |
38 __ leap(offset, Operand(offset, offset, times_2, 0)); | 38 __ leap(offset, Operand(offset, offset, times_2, 0)); |
39 | 39 |
40 __ LoadAddress(kScratchRegister, key_offset); | 40 __ LoadAddress(kScratchRegister, key_offset); |
41 | 41 |
42 // Check that the key in the entry matches the name. | 42 // Check that the key in the entry matches the name. |
43 __ cmpp(name, Operand(kScratchRegister, offset, scale_factor, 0)); | 43 __ cmpp(name, Operand(kScratchRegister, offset, scale_factor, 0)); |
44 __ j(not_equal, &miss); | 44 __ j(not_equal, &miss); |
45 | 45 |
46 // Get the map entry from the cache. | 46 // Get the map entry from the cache. |
47 // Use key_offset + kPointerSize * 2, rather than loading map_offset. | 47 // Use key_offset + kPointerSize * 2, rather than loading map_offset. |
48 DCHECK(isolate->stub_cache()->map_reference(table).address() - | 48 DCHECK(stub_cache->map_reference(table).address() - |
49 isolate->stub_cache()->key_reference(table).address() == | 49 stub_cache->key_reference(table).address() == |
50 kPointerSize * 2); | 50 kPointerSize * 2); |
51 __ movp(kScratchRegister, | 51 __ movp(kScratchRegister, |
52 Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2)); | 52 Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2)); |
53 __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset)); | 53 __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset)); |
54 __ j(not_equal, &miss); | 54 __ j(not_equal, &miss); |
55 | 55 |
56 // Get the code entry from the cache. | 56 // Get the code entry from the cache. |
57 __ LoadAddress(kScratchRegister, value_offset); | 57 __ LoadAddress(kScratchRegister, value_offset); |
58 __ movp(kScratchRegister, Operand(kScratchRegister, offset, scale_factor, 0)); | 58 __ movp(kScratchRegister, Operand(kScratchRegister, offset, scale_factor, 0)); |
59 | 59 |
(...skipping 11 matching lines...) Expand all Loading... | |
71 } | 71 } |
72 #endif | 72 #endif |
73 | 73 |
74 // Jump to the first instruction in the code stub. | 74 // Jump to the first instruction in the code stub. |
75 __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag)); | 75 __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag)); |
76 __ jmp(kScratchRegister); | 76 __ jmp(kScratchRegister); |
77 | 77 |
78 __ bind(&miss); | 78 __ bind(&miss); |
79 } | 79 } |
80 | 80 |
81 | 81 void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver, |
82 void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind, | |
83 Code::Flags flags, Register receiver, | |
84 Register name, Register scratch, Register extra, | 82 Register name, Register scratch, Register extra, |
85 Register extra2, Register extra3) { | 83 Register extra2, Register extra3) { |
86 Isolate* isolate = masm->isolate(); | 84 Code::Flags flags = |
85 Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(ic_kind_)); | |
86 | |
87 Label miss; | 87 Label miss; |
88 USE(extra); // The register extra is not used on the X64 platform. | 88 USE(extra); // The register extra is not used on the X64 platform. |
89 USE(extra2); // The register extra2 is not used on the X64 platform. | 89 USE(extra2); // The register extra2 is not used on the X64 platform. |
90 USE(extra3); // The register extra2 is not used on the X64 platform. | 90 USE(extra3); // The register extra2 is not used on the X64 platform. |
91 // Make sure that code is valid. The multiplying code relies on the | 91 // Make sure that code is valid. The multiplying code relies on the |
92 // entry size being 3 * kPointerSize. | 92 // entry size being 3 * kPointerSize. |
93 DCHECK(sizeof(Entry) == 3 * kPointerSize); | 93 DCHECK(sizeof(Entry) == 3 * kPointerSize); |
94 | 94 |
95 // Make sure that there are no register conflicts. | 95 // Make sure that there are no register conflicts. |
96 DCHECK(!scratch.is(receiver)); | 96 DCHECK(!scratch.is(receiver)); |
97 DCHECK(!scratch.is(name)); | 97 DCHECK(!scratch.is(name)); |
98 | 98 |
99 // Check scratch register is valid, extra and extra2 are unused. | 99 // Check scratch register is valid, extra and extra2 are unused. |
100 DCHECK(!scratch.is(no_reg)); | 100 DCHECK(!scratch.is(no_reg)); |
101 DCHECK(extra2.is(no_reg)); | 101 DCHECK(extra2.is(no_reg)); |
102 DCHECK(extra3.is(no_reg)); | 102 DCHECK(extra3.is(no_reg)); |
103 | 103 |
104 #ifdef DEBUG | 104 #ifdef DEBUG |
105 // If vector-based ics are in use, ensure that scratch doesn't conflict with | 105 // If vector-based ics are in use, ensure that scratch doesn't conflict with |
106 // the vector and slot registers, which need to be preserved for a handler | 106 // the vector and slot registers, which need to be preserved for a handler |
107 // call or miss. | 107 // call or miss. |
108 if (IC::ICUseVector(ic_kind)) { | 108 if (IC::ICUseVector(ic_kind_)) { |
Jakob Kummerow
2016/07/13 09:54:47
This can be a DCHECK now (or dropped entirely); al
| |
109 if (ic_kind == Code::LOAD_IC || ic_kind == Code::LOAD_GLOBAL_IC || | 109 if (ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC) { |
110 ic_kind == Code::KEYED_LOAD_IC) { | |
111 Register vector = LoadWithVectorDescriptor::VectorRegister(); | 110 Register vector = LoadWithVectorDescriptor::VectorRegister(); |
112 Register slot = LoadDescriptor::SlotRegister(); | 111 Register slot = LoadDescriptor::SlotRegister(); |
113 DCHECK(!AreAliased(vector, slot, scratch)); | 112 DCHECK(!AreAliased(vector, slot, scratch)); |
114 } else { | 113 } else { |
115 DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC); | 114 DCHECK(ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC); |
116 Register vector = VectorStoreICDescriptor::VectorRegister(); | 115 Register vector = VectorStoreICDescriptor::VectorRegister(); |
117 Register slot = VectorStoreICDescriptor::SlotRegister(); | 116 Register slot = VectorStoreICDescriptor::SlotRegister(); |
118 DCHECK(!AreAliased(vector, slot, scratch)); | 117 DCHECK(!AreAliased(vector, slot, scratch)); |
119 } | 118 } |
120 } | 119 } |
121 #endif | 120 #endif |
122 | 121 |
123 Counters* counters = masm->isolate()->counters(); | 122 Counters* counters = masm->isolate()->counters(); |
124 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1); | 123 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1); |
125 | 124 |
126 // Check that the receiver isn't a smi. | 125 // Check that the receiver isn't a smi. |
127 __ JumpIfSmi(receiver, &miss); | 126 __ JumpIfSmi(receiver, &miss); |
128 | 127 |
129 // Get the map of the receiver and compute the hash. | 128 // Get the map of the receiver and compute the hash. |
130 __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset)); | 129 __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset)); |
131 // Use only the low 32 bits of the map pointer. | 130 // Use only the low 32 bits of the map pointer. |
132 __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); | 131 __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); |
133 __ xorp(scratch, Immediate(flags)); | 132 __ xorp(scratch, Immediate(flags)); |
134 // We mask out the last two bits because they are not part of the hash and | 133 // We mask out the last two bits because they are not part of the hash and |
135 // they are always 01 for maps. Also in the two 'and' instructions below. | 134 // they are always 01 for maps. Also in the two 'and' instructions below. |
136 __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift)); | 135 __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift)); |
137 | 136 |
138 // Probe the primary table. | 137 // Probe the primary table. |
139 ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch); | 138 ProbeTable(this, masm, flags, kPrimary, receiver, name, scratch); |
140 | 139 |
141 // Primary miss: Compute hash for secondary probe. | 140 // Primary miss: Compute hash for secondary probe. |
142 __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset)); | 141 __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset)); |
143 __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); | 142 __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); |
144 __ xorp(scratch, Immediate(flags)); | 143 __ xorp(scratch, Immediate(flags)); |
145 __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift)); | 144 __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift)); |
146 __ subl(scratch, name); | 145 __ subl(scratch, name); |
147 __ addl(scratch, Immediate(flags)); | 146 __ addl(scratch, Immediate(flags)); |
148 __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift)); | 147 __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift)); |
149 | 148 |
150 // Probe the secondary table. | 149 // Probe the secondary table. |
151 ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch); | 150 ProbeTable(this, masm, flags, kSecondary, receiver, name, scratch); |
152 | 151 |
153 // Cache miss: Fall-through and let caller handle the miss by | 152 // Cache miss: Fall-through and let caller handle the miss by |
154 // entering the runtime system. | 153 // entering the runtime system. |
155 __ bind(&miss); | 154 __ bind(&miss); |
156 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1); | 155 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1); |
157 } | 156 } |
158 | 157 |
159 | 158 |
160 #undef __ | 159 #undef __ |
161 } // namespace internal | 160 } // namespace internal |
162 } // namespace v8 | 161 } // namespace v8 |
163 | 162 |
164 #endif // V8_TARGET_ARCH_X64 | 163 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |