OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
11 // with the distribution. | 11 // with the distribution. |
12 // * Neither the name of Google Inc. nor the names of its | 12 // * Neither the name of Google Inc. nor the names of its |
13 // contributors may be used to endorse or promote products derived | 13 // contributors may be used to endorse or promote products derived |
14 // from this software without specific prior written permission. | 14 // from this software without specific prior written permission. |
15 // | 15 // |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | 27 |
28 #include "v8.h" | 28 #include "v8.h" |
29 | 29 |
30 #if V8_TARGET_ARCH_ARM | 30 #if V8_TARGET_ARCH_A64 |
31 | 31 |
32 #include "ic-inl.h" | 32 #include "ic-inl.h" |
33 #include "codegen.h" | 33 #include "codegen.h" |
34 #include "stub-cache.h" | 34 #include "stub-cache.h" |
35 | 35 |
36 namespace v8 { | 36 namespace v8 { |
37 namespace internal { | 37 namespace internal { |
38 | 38 |
| 39 |
39 #define __ ACCESS_MASM(masm) | 40 #define __ ACCESS_MASM(masm) |
40 | 41 |
41 | 42 |
42 static void ProbeTable(Isolate* isolate, | |
43 MacroAssembler* masm, | |
44 Code::Flags flags, | |
45 StubCache::Table table, | |
46 Register receiver, | |
47 Register name, | |
48 // Number of the cache entry, not scaled. | |
49 Register offset, | |
50 Register scratch, | |
51 Register scratch2, | |
52 Register offset_scratch) { | |
53 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); | |
54 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); | |
55 ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); | |
56 | |
57 uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address()); | |
58 uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address()); | |
59 uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address()); | |
60 | |
61 // Check the relative positions of the address fields. | |
62 ASSERT(value_off_addr > key_off_addr); | |
63 ASSERT((value_off_addr - key_off_addr) % 4 == 0); | |
64 ASSERT((value_off_addr - key_off_addr) < (256 * 4)); | |
65 ASSERT(map_off_addr > key_off_addr); | |
66 ASSERT((map_off_addr - key_off_addr) % 4 == 0); | |
67 ASSERT((map_off_addr - key_off_addr) < (256 * 4)); | |
68 | |
69 Label miss; | |
70 Register base_addr = scratch; | |
71 scratch = no_reg; | |
72 | |
73 // Multiply by 3 because there are 3 fields per entry (name, code, map). | |
74 __ add(offset_scratch, offset, Operand(offset, LSL, 1)); | |
75 | |
76 // Calculate the base address of the entry. | |
77 __ mov(base_addr, Operand(key_offset)); | |
78 __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2)); | |
79 | |
80 // Check that the key in the entry matches the name. | |
81 __ ldr(ip, MemOperand(base_addr, 0)); | |
82 __ cmp(name, ip); | |
83 __ b(ne, &miss); | |
84 | |
85 // Check the map matches. | |
86 __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr)); | |
87 __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
88 __ cmp(ip, scratch2); | |
89 __ b(ne, &miss); | |
90 | |
91 // Get the code entry from the cache. | |
92 Register code = scratch2; | |
93 scratch2 = no_reg; | |
94 __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr)); | |
95 | |
96 // Check that the flags match what we're looking for. | |
97 Register flags_reg = base_addr; | |
98 base_addr = no_reg; | |
99 __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); | |
100 // It's a nice optimization if this constant is encodable in the bic insn. | |
101 | |
102 uint32_t mask = Code::kFlagsNotUsedInLookup; | |
103 ASSERT(__ ImmediateFitsAddrMode1Instruction(mask)); | |
104 __ bic(flags_reg, flags_reg, Operand(mask)); | |
105 __ cmp(flags_reg, Operand(flags)); | |
106 __ b(ne, &miss); | |
107 | |
108 #ifdef DEBUG | |
109 if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { | |
110 __ jmp(&miss); | |
111 } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { | |
112 __ jmp(&miss); | |
113 } | |
114 #endif | |
115 | |
116 // Jump to the first instruction in the code stub. | |
117 __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
118 | |
119 // Miss: fall through. | |
120 __ bind(&miss); | |
121 } | |
122 | |
123 | |
124 void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm, | 43 void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm, |
125 Label* miss_label, | 44 Label* miss_label, |
126 Register receiver, | 45 Register receiver, |
127 Handle<Name> name, | 46 Handle<Name> name, |
128 Register scratch0, | 47 Register scratch0, |
129 Register scratch1) { | 48 Register scratch1) { |
| 49 ASSERT(!AreAliased(receiver, scratch0, scratch1)); |
130 ASSERT(name->IsUniqueName()); | 50 ASSERT(name->IsUniqueName()); |
131 ASSERT(!receiver.is(scratch0)); | |
132 Counters* counters = masm->isolate()->counters(); | 51 Counters* counters = masm->isolate()->counters(); |
133 __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); | 52 __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); |
134 __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); | 53 __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); |
135 | 54 |
136 Label done; | 55 Label done; |
137 | 56 |
138 const int kInterceptorOrAccessCheckNeededMask = | 57 const int kInterceptorOrAccessCheckNeededMask = |
139 (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded); | 58 (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded); |
140 | 59 |
141 // Bail out if the receiver has a named interceptor or requires access checks. | 60 // Bail out if the receiver has a named interceptor or requires access checks. |
142 Register map = scratch1; | 61 Register map = scratch1; |
143 __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 62 __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
144 __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset)); | 63 __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset)); |
145 __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask)); | 64 __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask); |
146 __ b(ne, miss_label); | 65 __ B(ne, miss_label); |
147 | 66 |
148 // Check that receiver is a JSObject. | 67 // Check that receiver is a JSObject. |
149 __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 68 __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
150 __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); | 69 __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE); |
151 __ b(lt, miss_label); | 70 __ B(lt, miss_label); |
152 | 71 |
153 // Load properties array. | 72 // Load properties array. |
154 Register properties = scratch0; | 73 Register properties = scratch0; |
155 __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 74 __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
156 // Check that the properties array is a dictionary. | 75 // Check that the properties array is a dictionary. |
157 __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset)); | 76 __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset)); |
158 Register tmp = properties; | 77 __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label); |
159 __ LoadRoot(tmp, Heap::kHashTableMapRootIndex); | |
160 __ cmp(map, tmp); | |
161 __ b(ne, miss_label); | |
162 | |
163 // Restore the temporarily used register. | |
164 __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
165 | |
166 | 78 |
167 NameDictionaryLookupStub::GenerateNegativeLookup(masm, | 79 NameDictionaryLookupStub::GenerateNegativeLookup(masm, |
168 miss_label, | 80 miss_label, |
169 &done, | 81 &done, |
170 receiver, | 82 receiver, |
171 properties, | 83 properties, |
172 name, | 84 name, |
173 scratch1); | 85 scratch1); |
174 __ bind(&done); | 86 __ Bind(&done); |
175 __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); | 87 __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); |
176 } | 88 } |
177 | 89 |
178 | 90 |
| 91 // Probe primary or secondary table. |
| 92 // If the entry is found in the cache, the generated code jump to the first |
| 93 // instruction of the stub in the cache. |
| 94 // If there is a miss the code fall trough. |
| 95 // |
| 96 // 'receiver', 'name' and 'offset' registers are preserved on miss. |
| 97 static void ProbeTable(Isolate* isolate, |
| 98 MacroAssembler* masm, |
| 99 Code::Flags flags, |
| 100 StubCache::Table table, |
| 101 Register receiver, |
| 102 Register name, |
| 103 Register offset, |
| 104 Register scratch, |
| 105 Register scratch2, |
| 106 Register scratch3) { |
| 107 // Some code below relies on the fact that the Entry struct contains |
| 108 // 3 pointers (name, code, map). |
| 109 STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize)); |
| 110 |
| 111 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); |
| 112 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); |
| 113 ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); |
| 114 |
| 115 uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address()); |
| 116 uintptr_t value_off_addr = |
| 117 reinterpret_cast<uintptr_t>(value_offset.address()); |
| 118 uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address()); |
| 119 |
| 120 Label miss; |
| 121 |
| 122 ASSERT(!AreAliased(name, offset, scratch, scratch2, scratch3)); |
| 123 |
| 124 // Multiply by 3 because there are 3 fields per entry. |
| 125 __ Add(scratch3, offset, Operand(offset, LSL, 1)); |
| 126 |
| 127 // Calculate the base address of the entry. |
| 128 __ Mov(scratch, Operand(key_offset)); |
| 129 __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2)); |
| 130 |
| 131 // Check that the key in the entry matches the name. |
| 132 __ Ldr(scratch2, MemOperand(scratch)); |
| 133 __ Cmp(name, scratch2); |
| 134 __ B(ne, &miss); |
| 135 |
| 136 // Check the map matches. |
| 137 __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr)); |
| 138 __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 139 __ Cmp(scratch2, scratch3); |
| 140 __ B(ne, &miss); |
| 141 |
| 142 // Get the code entry from the cache. |
| 143 __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr)); |
| 144 |
| 145 // Check that the flags match what we're looking for. |
| 146 __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset)); |
| 147 __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup); |
| 148 __ Cmp(scratch2.W(), flags); |
| 149 __ B(ne, &miss); |
| 150 |
| 151 #ifdef DEBUG |
| 152 if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { |
| 153 __ B(&miss); |
| 154 } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { |
| 155 __ B(&miss); |
| 156 } |
| 157 #endif |
| 158 |
| 159 // Jump to the first instruction in the code stub. |
| 160 __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag); |
| 161 __ Br(scratch); |
| 162 |
| 163 // Miss: fall through. |
| 164 __ Bind(&miss); |
| 165 } |
| 166 |
| 167 |
179 void StubCache::GenerateProbe(MacroAssembler* masm, | 168 void StubCache::GenerateProbe(MacroAssembler* masm, |
180 Code::Flags flags, | 169 Code::Flags flags, |
181 Register receiver, | 170 Register receiver, |
182 Register name, | 171 Register name, |
183 Register scratch, | 172 Register scratch, |
184 Register extra, | 173 Register extra, |
185 Register extra2, | 174 Register extra2, |
186 Register extra3) { | 175 Register extra3) { |
187 Isolate* isolate = masm->isolate(); | 176 Isolate* isolate = masm->isolate(); |
188 Label miss; | 177 Label miss; |
189 | 178 |
190 // Make sure that code is valid. The multiplying code relies on the | |
191 // entry size being 12. | |
192 ASSERT(sizeof(Entry) == 12); | |
193 | |
194 // Make sure the flags does not name a specific type. | 179 // Make sure the flags does not name a specific type. |
195 ASSERT(Code::ExtractTypeFromFlags(flags) == 0); | 180 ASSERT(Code::ExtractTypeFromFlags(flags) == 0); |
196 | 181 |
197 // Make sure that there are no register conflicts. | 182 // Make sure that there are no register conflicts. |
198 ASSERT(!scratch.is(receiver)); | 183 ASSERT(!AreAliased(receiver, name, scratch, extra, extra2, extra3)); |
199 ASSERT(!scratch.is(name)); | |
200 ASSERT(!extra.is(receiver)); | |
201 ASSERT(!extra.is(name)); | |
202 ASSERT(!extra.is(scratch)); | |
203 ASSERT(!extra2.is(receiver)); | |
204 ASSERT(!extra2.is(name)); | |
205 ASSERT(!extra2.is(scratch)); | |
206 ASSERT(!extra2.is(extra)); | |
207 | 184 |
208 // Check scratch, extra and extra2 registers are valid. | 185 // Make sure extra and extra2 registers are valid. |
209 ASSERT(!scratch.is(no_reg)); | |
210 ASSERT(!extra.is(no_reg)); | 186 ASSERT(!extra.is(no_reg)); |
211 ASSERT(!extra2.is(no_reg)); | 187 ASSERT(!extra2.is(no_reg)); |
212 ASSERT(!extra3.is(no_reg)); | 188 ASSERT(!extra3.is(no_reg)); |
213 | 189 |
214 Counters* counters = masm->isolate()->counters(); | 190 Counters* counters = masm->isolate()->counters(); |
215 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, | 191 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, |
216 extra2, extra3); | 192 extra2, extra3); |
217 | 193 |
218 // Check that the receiver isn't a smi. | 194 // Check that the receiver isn't a smi. |
219 __ JumpIfSmi(receiver, &miss); | 195 __ JumpIfSmi(receiver, &miss); |
220 | 196 |
221 // Get the map of the receiver and compute the hash. | 197 // Compute the hash for primary table. |
222 __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); | 198 __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); |
223 __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 199 __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
224 __ add(scratch, scratch, Operand(ip)); | 200 __ Add(scratch, scratch, extra); |
225 uint32_t mask = kPrimaryTableSize - 1; | 201 __ Eor(scratch, scratch, flags); |
226 // We shift out the last two bits because they are not part of the hash and | 202 // We shift out the last two bits because they are not part of the hash. |
227 // they are always 01 for maps. | 203 __ Ubfx(scratch, scratch, kHeapObjectTagSize, |
228 __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize)); | 204 CountTrailingZeros(kPrimaryTableSize, 64)); |
229 // Mask down the eor argument to the minimum to keep the immediate | |
230 // ARM-encodable. | |
231 __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask)); | |
232 // Prefer and_ to ubfx here because ubfx takes 2 cycles. | |
233 __ and_(scratch, scratch, Operand(mask)); | |
234 | 205 |
235 // Probe the primary table. | 206 // Probe the primary table. |
236 ProbeTable(isolate, | 207 ProbeTable(isolate, masm, flags, kPrimary, receiver, name, |
237 masm, | 208 scratch, extra, extra2, extra3); |
238 flags, | |
239 kPrimary, | |
240 receiver, | |
241 name, | |
242 scratch, | |
243 extra, | |
244 extra2, | |
245 extra3); | |
246 | 209 |
247 // Primary miss: Compute hash for secondary probe. | 210 // Primary miss: Compute hash for secondary table. |
248 __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize)); | 211 __ Sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize)); |
249 uint32_t mask2 = kSecondaryTableSize - 1; | 212 __ Add(scratch, scratch, flags >> kHeapObjectTagSize); |
250 __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2)); | 213 __ And(scratch, scratch, kSecondaryTableSize - 1); |
251 __ and_(scratch, scratch, Operand(mask2)); | |
252 | 214 |
253 // Probe the secondary table. | 215 // Probe the secondary table. |
254 ProbeTable(isolate, | 216 ProbeTable(isolate, masm, flags, kSecondary, receiver, name, |
255 masm, | 217 scratch, extra, extra2, extra3); |
256 flags, | |
257 kSecondary, | |
258 receiver, | |
259 name, | |
260 scratch, | |
261 extra, | |
262 extra2, | |
263 extra3); | |
264 | 218 |
265 // Cache miss: Fall-through and let caller handle the miss by | 219 // Cache miss: Fall-through and let caller handle the miss by |
266 // entering the runtime system. | 220 // entering the runtime system. |
267 __ bind(&miss); | 221 __ Bind(&miss); |
268 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, | 222 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, |
269 extra2, extra3); | 223 extra2, extra3); |
270 } | 224 } |
271 | 225 |
272 | 226 |
273 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, | 227 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, |
274 int index, | 228 int index, |
275 Register prototype) { | 229 Register prototype) { |
276 // Load the global or builtins object from the current context. | 230 // Load the global or builtins object from the current context. |
277 __ ldr(prototype, | 231 __ Ldr(prototype, GlobalObjectMemOperand()); |
278 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | |
279 // Load the native context from the global or builtins object. | 232 // Load the native context from the global or builtins object. |
280 __ ldr(prototype, | 233 __ Ldr(prototype, |
281 FieldMemOperand(prototype, GlobalObject::kNativeContextOffset)); | 234 FieldMemOperand(prototype, GlobalObject::kNativeContextOffset)); |
282 // Load the function from the native context. | 235 // Load the function from the native context. |
283 __ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index))); | 236 __ Ldr(prototype, ContextMemOperand(prototype, index)); |
284 // Load the initial map. The global functions all have initial maps. | 237 // Load the initial map. The global functions all have initial maps. |
285 __ ldr(prototype, | 238 __ Ldr(prototype, |
286 FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset)); | 239 FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset)); |
287 // Load the prototype from the initial map. | 240 // Load the prototype from the initial map. |
288 __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); | 241 __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); |
289 } | 242 } |
290 | 243 |
291 | 244 |
292 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( | 245 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( |
293 MacroAssembler* masm, | 246 MacroAssembler* masm, |
294 int index, | 247 int index, |
295 Register prototype, | 248 Register prototype, |
296 Label* miss) { | 249 Label* miss) { |
297 Isolate* isolate = masm->isolate(); | 250 Isolate* isolate = masm->isolate(); |
298 // Get the global function with the given index. | 251 // Get the global function with the given index. |
299 Handle<JSFunction> function( | 252 Handle<JSFunction> function( |
300 JSFunction::cast(isolate->native_context()->get(index))); | 253 JSFunction::cast(isolate->native_context()->get(index))); |
301 | 254 |
302 // Check we're still in the same context. | 255 // Check we're still in the same context. |
303 Register scratch = prototype; | 256 Register scratch = prototype; |
304 const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); | 257 __ Ldr(scratch, GlobalObjectMemOperand()); |
305 __ ldr(scratch, MemOperand(cp, offset)); | 258 __ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); |
306 __ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); | 259 __ Ldr(scratch, ContextMemOperand(scratch, index)); |
307 __ ldr(scratch, MemOperand(scratch, Context::SlotOffset(index))); | 260 __ Cmp(scratch, Operand(function)); |
308 __ Move(ip, function); | 261 __ B(ne, miss); |
309 __ cmp(ip, scratch); | |
310 __ b(ne, miss); | |
311 | 262 |
312 // Load its initial map. The global functions all have initial maps. | 263 // Load its initial map. The global functions all have initial maps. |
313 __ Move(prototype, Handle<Map>(function->initial_map())); | 264 __ Mov(prototype, Operand(Handle<Map>(function->initial_map()))); |
314 // Load the prototype from the initial map. | 265 // Load the prototype from the initial map. |
315 __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); | 266 __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); |
316 } | 267 } |
317 | 268 |
318 | 269 |
319 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, | 270 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, |
320 Register dst, | 271 Register dst, |
321 Register src, | 272 Register src, |
322 bool inobject, | 273 bool inobject, |
323 int index, | 274 int index, |
324 Representation representation) { | 275 Representation representation) { |
325 ASSERT(!FLAG_track_double_fields || !representation.IsDouble()); | 276 ASSERT(!FLAG_track_double_fields || !representation.IsDouble()); |
326 int offset = index * kPointerSize; | 277 USE(representation); |
327 if (!inobject) { | 278 if (inobject) { |
| 279 int offset = index * kPointerSize; |
| 280 __ Ldr(dst, FieldMemOperand(src, offset)); |
| 281 } else { |
328 // Calculate the offset into the properties array. | 282 // Calculate the offset into the properties array. |
329 offset = offset + FixedArray::kHeaderSize; | 283 int offset = index * kPointerSize + FixedArray::kHeaderSize; |
330 __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); | 284 __ Ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); |
331 src = dst; | 285 __ Ldr(dst, FieldMemOperand(dst, offset)); |
332 } | 286 } |
333 __ ldr(dst, FieldMemOperand(src, offset)); | |
334 } | 287 } |
335 | 288 |
336 | 289 |
337 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, | 290 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, |
338 Register receiver, | 291 Register receiver, |
339 Register scratch, | 292 Register scratch, |
340 Label* miss_label) { | 293 Label* miss_label) { |
| 294 ASSERT(!AreAliased(receiver, scratch)); |
| 295 |
341 // Check that the receiver isn't a smi. | 296 // Check that the receiver isn't a smi. |
342 __ JumpIfSmi(receiver, miss_label); | 297 __ JumpIfSmi(receiver, miss_label); |
343 | 298 |
344 // Check that the object is a JS array. | 299 // Check that the object is a JS array. |
345 __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); | 300 __ JumpIfNotObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE, |
346 __ b(ne, miss_label); | 301 miss_label); |
347 | 302 |
348 // Load length directly from the JS array. | 303 // Load length directly from the JS array. |
349 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 304 __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
350 __ Ret(); | 305 __ Ret(); |
351 } | 306 } |
352 | 307 |
353 | 308 |
354 // Generate code to check if an object is a string. If the object is a | 309 // Generate code to check if an object is a string. If the object is a |
355 // heap object, its map's instance type is left in the scratch1 register. | 310 // heap object, its map's instance type is left in the scratch1 register. |
356 // If this is not needed, scratch1 and scratch2 may be the same register. | |
357 static void GenerateStringCheck(MacroAssembler* masm, | 311 static void GenerateStringCheck(MacroAssembler* masm, |
358 Register receiver, | 312 Register receiver, |
359 Register scratch1, | 313 Register scratch1, |
360 Register scratch2, | |
361 Label* smi, | 314 Label* smi, |
362 Label* non_string_object) { | 315 Label* non_string_object) { |
363 // Check that the receiver isn't a smi. | 316 // Check that the receiver isn't a smi. |
364 __ JumpIfSmi(receiver, smi); | 317 __ JumpIfSmi(receiver, smi); |
365 | 318 |
366 // Check that the object is a string. | 319 // Get the object's instance type filed. |
367 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 320 __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
368 __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); | 321 __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
369 __ and_(scratch2, scratch1, Operand(kIsNotStringMask)); | 322 // Check if the "not string" bit is set. |
370 // The cast is to resolve the overload for the argument of 0x0. | 323 __ Tbnz(scratch1, MaskToBit(kNotStringTag), non_string_object); |
371 __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag))); | |
372 __ b(ne, non_string_object); | |
373 } | 324 } |
374 | 325 |
375 | 326 |
376 // Generate code to load the length from a string object and return the length. | 327 // Generate code to load the length from a string object and return the length. |
377 // If the receiver object is not a string or a wrapped string object the | 328 // If the receiver object is not a string or a wrapped string object the |
378 // execution continues at the miss label. The register containing the | 329 // execution continues at the miss label. The register containing the |
379 // receiver is potentially clobbered. | 330 // receiver is not clobbered if the receiver is not a string. |
380 void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, | 331 void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, |
381 Register receiver, | 332 Register receiver, |
382 Register scratch1, | 333 Register scratch1, |
383 Register scratch2, | 334 Register scratch2, |
384 Label* miss) { | 335 Label* miss) { |
| 336 // Input registers can't alias because we don't want to clobber the |
| 337 // receiver register if the object is not a string. |
| 338 ASSERT(!AreAliased(receiver, scratch1, scratch2)); |
| 339 |
385 Label check_wrapper; | 340 Label check_wrapper; |
386 | 341 |
387 // Check if the object is a string leaving the instance type in the | 342 // Check if the object is a string leaving the instance type in the |
388 // scratch1 register. | 343 // scratch1 register. |
389 GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper); | 344 GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper); |
390 | 345 |
391 // Load length directly from the string. | 346 // Load length directly from the string. |
392 __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset)); | 347 __ Ldr(x0, FieldMemOperand(receiver, String::kLengthOffset)); |
393 __ Ret(); | 348 __ Ret(); |
394 | 349 |
395 // Check if the object is a JSValue wrapper. | 350 // Check if the object is a JSValue wrapper. |
396 __ bind(&check_wrapper); | 351 __ Bind(&check_wrapper); |
397 __ cmp(scratch1, Operand(JS_VALUE_TYPE)); | 352 __ Cmp(scratch1, Operand(JS_VALUE_TYPE)); |
398 __ b(ne, miss); | 353 __ B(ne, miss); |
399 | 354 |
400 // Unwrap the value and check if the wrapped value is a string. | 355 // Unwrap the value and check if the wrapped value is a string. |
401 __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset)); | 356 __ Ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset)); |
402 GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss); | 357 GenerateStringCheck(masm, scratch1, scratch2, miss, miss); |
403 __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset)); | 358 __ Ldr(x0, FieldMemOperand(scratch1, String::kLengthOffset)); |
404 __ Ret(); | 359 __ Ret(); |
405 } | 360 } |
406 | 361 |
407 | 362 |
408 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, | 363 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, |
409 Register receiver, | 364 Register receiver, |
410 Register scratch1, | 365 Register scratch1, |
411 Register scratch2, | 366 Register scratch2, |
412 Label* miss_label) { | 367 Label* miss_label) { |
413 __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); | 368 __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); |
414 __ mov(r0, scratch1); | 369 // TryGetFunctionPrototype can't put the result directly in x0 because the |
| 370 // 3 inputs registers can't alias and we call this function from |
| 371 // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly |
| 372 // move the result in x0. |
| 373 __ Mov(x0, scratch1); |
415 __ Ret(); | 374 __ Ret(); |
416 } | 375 } |
417 | 376 |
418 | 377 |
419 // Generate code to check that a global property cell is empty. Create | 378 // Generate code to check that a global property cell is empty. Create |
420 // the property cell at compilation time if no cell exists for the | 379 // the property cell at compilation time if no cell exists for the |
421 // property. | 380 // property. |
422 void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm, | 381 void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm, |
423 Handle<JSGlobalObject> global, | 382 Handle<JSGlobalObject> global, |
424 Handle<Name> name, | 383 Handle<Name> name, |
425 Register scratch, | 384 Register scratch, |
426 Label* miss) { | 385 Label* miss) { |
427 Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name); | 386 Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name); |
428 ASSERT(cell->value()->IsTheHole()); | 387 ASSERT(cell->value()->IsTheHole()); |
429 __ mov(scratch, Operand(cell)); | 388 __ Mov(scratch, Operand(cell)); |
430 __ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset)); | 389 __ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset)); |
431 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 390 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss); |
432 __ cmp(scratch, ip); | |
433 __ b(ne, miss); | |
434 } | 391 } |
435 | 392 |
436 | 393 |
437 void StoreStubCompiler::GenerateNegativeHolderLookup( | 394 void StoreStubCompiler::GenerateNegativeHolderLookup( |
438 MacroAssembler* masm, | 395 MacroAssembler* masm, |
439 Handle<JSObject> holder, | 396 Handle<JSObject> holder, |
440 Register holder_reg, | 397 Register holder_reg, |
441 Handle<Name> name, | 398 Handle<Name> name, |
442 Label* miss) { | 399 Label* miss) { |
443 if (holder->IsJSGlobalObject()) { | 400 if (holder->IsJSGlobalObject()) { |
444 GenerateCheckPropertyCell( | 401 GenerateCheckPropertyCell( |
445 masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss); | 402 masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss); |
446 } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) { | 403 } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) { |
447 GenerateDictionaryNegativeLookup( | 404 GenerateDictionaryNegativeLookup( |
448 masm, miss, holder_reg, name, scratch1(), scratch2()); | 405 masm, miss, holder_reg, name, scratch1(), scratch2()); |
449 } | 406 } |
450 } | 407 } |
451 | 408 |
452 | 409 |
453 // Generate StoreTransition code, value is passed in r0 register. | 410 // Generate StoreTransition code, value is passed in x0 register. |
454 // When leaving generated code after success, the receiver_reg and name_reg | 411 // When leaving generated code after success, the receiver_reg and storage_reg |
455 // may be clobbered. Upon branch to miss_label, the receiver and name | 412 // may be clobbered. Upon branch to miss_label, the receiver and name registers |
456 // registers have their original values. | 413 // have their original values. |
457 void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, | 414 void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, |
458 Handle<JSObject> object, | 415 Handle<JSObject> object, |
459 LookupResult* lookup, | 416 LookupResult* lookup, |
460 Handle<Map> transition, | 417 Handle<Map> transition, |
461 Handle<Name> name, | 418 Handle<Name> name, |
462 Register receiver_reg, | 419 Register receiver_reg, |
463 Register storage_reg, | 420 Register storage_reg, |
464 Register value_reg, | 421 Register value_reg, |
465 Register scratch1, | 422 Register scratch1, |
466 Register scratch2, | 423 Register scratch2, |
467 Register scratch3, | 424 Register scratch3, |
468 Label* miss_label, | 425 Label* miss_label, |
469 Label* slow) { | 426 Label* slow) { |
470 // r0 : value | |
471 Label exit; | 427 Label exit; |
472 | 428 |
| 429 ASSERT(!AreAliased(receiver_reg, storage_reg, value_reg, |
| 430 scratch1, scratch2, scratch3)); |
| 431 |
| 432 // We don't need scratch3. |
| 433 scratch3 = NoReg; |
| 434 |
473 int descriptor = transition->LastAdded(); | 435 int descriptor = transition->LastAdded(); |
474 DescriptorArray* descriptors = transition->instance_descriptors(); | 436 DescriptorArray* descriptors = transition->instance_descriptors(); |
475 PropertyDetails details = descriptors->GetDetails(descriptor); | 437 PropertyDetails details = descriptors->GetDetails(descriptor); |
476 Representation representation = details.representation(); | 438 Representation representation = details.representation(); |
477 ASSERT(!representation.IsNone()); | 439 ASSERT(!representation.IsNone()); |
478 | 440 |
479 if (details.type() == CONSTANT) { | 441 if (details.type() == CONSTANT) { |
480 Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate()); | 442 Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate()); |
481 __ Move(scratch1, constant); | 443 __ LoadObject(scratch1, constant); |
482 __ cmp(value_reg, scratch1); | 444 __ Cmp(value_reg, scratch1); |
483 __ b(ne, miss_label); | 445 __ B(ne, miss_label); |
484 } else if (FLAG_track_fields && representation.IsSmi()) { | 446 } else if (FLAG_track_fields && representation.IsSmi()) { |
485 __ JumpIfNotSmi(value_reg, miss_label); | 447 __ JumpIfNotSmi(value_reg, miss_label); |
486 } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { | 448 } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { |
487 __ JumpIfSmi(value_reg, miss_label); | 449 __ JumpIfSmi(value_reg, miss_label); |
488 } else if (FLAG_track_double_fields && representation.IsDouble()) { | 450 } else if (FLAG_track_double_fields && representation.IsDouble()) { |
489 Label do_store, heap_number; | 451 Label do_store, heap_number; |
490 __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex); | 452 __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2); |
491 __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow); | |
492 | 453 |
| 454 // TODO(jbramley): Is fp_scratch the most appropriate FP scratch register? |
| 455 // It's only used in Fcmp, but it's not really safe to use it like this. |
493 __ JumpIfNotSmi(value_reg, &heap_number); | 456 __ JumpIfNotSmi(value_reg, &heap_number); |
494 __ SmiUntag(scratch1, value_reg); | 457 __ SmiUntagToDouble(fp_scratch, value_reg); |
495 __ vmov(s0, scratch1); | 458 __ B(&do_store); |
496 __ vcvt_f64_s32(d0, s0); | |
497 __ jmp(&do_store); | |
498 | 459 |
499 __ bind(&heap_number); | 460 __ Bind(&heap_number); |
500 __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, | 461 __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, |
501 miss_label, DONT_DO_SMI_CHECK); | 462 miss_label, DONT_DO_SMI_CHECK); |
502 __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); | 463 __ Ldr(fp_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
503 | 464 |
504 __ bind(&do_store); | 465 __ Bind(&do_store); |
505 __ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset)); | 466 __ Str(fp_scratch, FieldMemOperand(storage_reg, HeapNumber::kValueOffset)); |
506 } | 467 } |
507 | 468 |
508 // Stub never generated for non-global objects that require access | 469 // Stub never generated for non-global objects that require access checks. |
509 // checks. | |
510 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); | 470 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); |
511 | 471 |
512 // Perform map transition for the receiver if necessary. | 472 // Perform map transition for the receiver if necessary. |
513 if (details.type() == FIELD && | 473 if ((details.type() == FIELD) && |
514 object->map()->unused_property_fields() == 0) { | 474 (object->map()->unused_property_fields() == 0)) { |
515 // The properties must be extended before we can store the value. | 475 // The properties must be extended before we can store the value. |
516 // We jump to a runtime call that extends the properties array. | 476 // We jump to a runtime call that extends the properties array. |
517 __ push(receiver_reg); | 477 __ Mov(scratch1, Operand(transition)); |
518 __ mov(r2, Operand(transition)); | 478 __ Push(receiver_reg, scratch1, value_reg); |
519 __ Push(r2, r0); | |
520 __ TailCallExternalReference( | 479 __ TailCallExternalReference( |
521 ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), | 480 ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), |
522 masm->isolate()), | 481 masm->isolate()), |
523 3, | 482 3, |
524 1); | 483 1); |
525 return; | 484 return; |
526 } | 485 } |
527 | 486 |
528 // Update the map of the object. | 487 // Update the map of the object. |
529 __ mov(scratch1, Operand(transition)); | 488 __ Mov(scratch1, Operand(transition)); |
530 __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); | 489 __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); |
531 | 490 |
532 // Update the write barrier for the map field. | 491 // Update the write barrier for the map field. |
533 __ RecordWriteField(receiver_reg, | 492 __ RecordWriteField(receiver_reg, |
534 HeapObject::kMapOffset, | 493 HeapObject::kMapOffset, |
535 scratch1, | 494 scratch1, |
536 scratch2, | 495 scratch2, |
537 kLRHasNotBeenSaved, | 496 kLRHasNotBeenSaved, |
538 kDontSaveFPRegs, | 497 kDontSaveFPRegs, |
539 OMIT_REMEMBERED_SET, | 498 OMIT_REMEMBERED_SET, |
540 OMIT_SMI_CHECK); | 499 OMIT_SMI_CHECK); |
541 | 500 |
542 if (details.type() == CONSTANT) { | 501 if (details.type() == CONSTANT) { |
543 ASSERT(value_reg.is(r0)); | 502 ASSERT(value_reg.is(x0)); |
544 __ Ret(); | 503 __ Ret(); |
545 return; | 504 return; |
546 } | 505 } |
547 | 506 |
548 int index = transition->instance_descriptors()->GetFieldIndex( | 507 int index = transition->instance_descriptors()->GetFieldIndex( |
549 transition->LastAdded()); | 508 transition->LastAdded()); |
550 | 509 |
551 // Adjust for the number of properties stored in the object. Even in the | 510 // Adjust for the number of properties stored in the object. Even in the |
552 // face of a transition we can use the old map here because the size of the | 511 // face of a transition we can use the old map here because the size of the |
553 // object and the number of in-object properties is not going to change. | 512 // object and the number of in-object properties is not going to change. |
554 index -= object->map()->inobject_properties(); | 513 index -= object->map()->inobject_properties(); |
555 | 514 |
556 // TODO(verwaest): Share this code as a code stub. | 515 // TODO(verwaest): Share this code as a code stub. |
557 SmiCheck smi_check = representation.IsTagged() | 516 SmiCheck smi_check = representation.IsTagged() |
558 ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; | 517 ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; |
559 if (index < 0) { | 518 if (index < 0) { |
560 // Set the property straight into the object. | 519 // Set the property straight into the object. |
561 int offset = object->map()->instance_size() + (index * kPointerSize); | 520 int offset = object->map()->instance_size() + (index * kPointerSize); |
| 521 // TODO(jbramley): This construct appears in several places in this |
| 522 // function. Try to clean it up, perhaps using a result_reg. |
562 if (FLAG_track_double_fields && representation.IsDouble()) { | 523 if (FLAG_track_double_fields && representation.IsDouble()) { |
563 __ str(storage_reg, FieldMemOperand(receiver_reg, offset)); | 524 __ Str(storage_reg, FieldMemOperand(receiver_reg, offset)); |
564 } else { | 525 } else { |
565 __ str(value_reg, FieldMemOperand(receiver_reg, offset)); | 526 __ Str(value_reg, FieldMemOperand(receiver_reg, offset)); |
566 } | 527 } |
567 | 528 |
568 if (!FLAG_track_fields || !representation.IsSmi()) { | 529 if (!FLAG_track_fields || !representation.IsSmi()) { |
569 // Update the write barrier for the array address. | 530 // Update the write barrier for the array address. |
570 if (!FLAG_track_double_fields || !representation.IsDouble()) { | 531 if (!FLAG_track_double_fields || !representation.IsDouble()) { |
571 __ mov(storage_reg, value_reg); | 532 __ Mov(storage_reg, value_reg); |
572 } | 533 } |
573 __ RecordWriteField(receiver_reg, | 534 __ RecordWriteField(receiver_reg, |
574 offset, | 535 offset, |
575 storage_reg, | 536 storage_reg, |
576 scratch1, | 537 scratch1, |
577 kLRHasNotBeenSaved, | 538 kLRHasNotBeenSaved, |
578 kDontSaveFPRegs, | 539 kDontSaveFPRegs, |
579 EMIT_REMEMBERED_SET, | 540 EMIT_REMEMBERED_SET, |
580 smi_check); | 541 smi_check); |
581 } | 542 } |
582 } else { | 543 } else { |
583 // Write to the properties array. | 544 // Write to the properties array. |
584 int offset = index * kPointerSize + FixedArray::kHeaderSize; | 545 int offset = index * kPointerSize + FixedArray::kHeaderSize; |
585 // Get the properties array | 546 // Get the properties array |
586 __ ldr(scratch1, | 547 __ Ldr(scratch1, |
587 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); | 548 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); |
588 if (FLAG_track_double_fields && representation.IsDouble()) { | 549 if (FLAG_track_double_fields && representation.IsDouble()) { |
589 __ str(storage_reg, FieldMemOperand(scratch1, offset)); | 550 __ Str(storage_reg, FieldMemOperand(scratch1, offset)); |
590 } else { | 551 } else { |
591 __ str(value_reg, FieldMemOperand(scratch1, offset)); | 552 __ Str(value_reg, FieldMemOperand(scratch1, offset)); |
592 } | 553 } |
593 | 554 |
594 if (!FLAG_track_fields || !representation.IsSmi()) { | 555 if (!FLAG_track_fields || !representation.IsSmi()) { |
595 // Update the write barrier for the array address. | 556 // Update the write barrier for the array address. |
596 if (!FLAG_track_double_fields || !representation.IsDouble()) { | 557 if (!FLAG_track_double_fields || !representation.IsDouble()) { |
597 __ mov(storage_reg, value_reg); | 558 __ Mov(storage_reg, value_reg); |
598 } | 559 } |
599 __ RecordWriteField(scratch1, | 560 __ RecordWriteField(scratch1, |
600 offset, | 561 offset, |
601 storage_reg, | 562 storage_reg, |
602 receiver_reg, | 563 receiver_reg, |
603 kLRHasNotBeenSaved, | 564 kLRHasNotBeenSaved, |
604 kDontSaveFPRegs, | 565 kDontSaveFPRegs, |
605 EMIT_REMEMBERED_SET, | 566 EMIT_REMEMBERED_SET, |
606 smi_check); | 567 smi_check); |
607 } | 568 } |
608 } | 569 } |
609 | 570 |
610 // Return the value (register r0). | 571 __ Bind(&exit); |
611 ASSERT(value_reg.is(r0)); | 572 // Return the value (register x0). |
612 __ bind(&exit); | 573 ASSERT(value_reg.is(x0)); |
613 __ Ret(); | 574 __ Ret(); |
614 } | 575 } |
615 | 576 |
616 | 577 |
617 // Generate StoreField code, value is passed in r0 register. | 578 // Generate StoreField code, value is passed in x0 register. |
618 // When leaving generated code after success, the receiver_reg and name_reg | 579 // When leaving generated code after success, the receiver_reg and name_reg may |
619 // may be clobbered. Upon branch to miss_label, the receiver and name | 580 // be clobbered. Upon branch to miss_label, the receiver and name registers have |
620 // registers have their original values. | 581 // their original values. |
621 void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, | 582 void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, |
622 Handle<JSObject> object, | 583 Handle<JSObject> object, |
623 LookupResult* lookup, | 584 LookupResult* lookup, |
624 Register receiver_reg, | 585 Register receiver_reg, |
625 Register name_reg, | 586 Register name_reg, |
626 Register value_reg, | 587 Register value_reg, |
627 Register scratch1, | 588 Register scratch1, |
628 Register scratch2, | 589 Register scratch2, |
629 Label* miss_label) { | 590 Label* miss_label) { |
630 // r0 : value | 591 // x0 : value |
631 Label exit; | 592 Label exit; |
632 | 593 |
633 // Stub never generated for non-global objects that require access | 594 // Stub never generated for non-global objects that require access |
634 // checks. | 595 // checks. |
635 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); | 596 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); |
636 | 597 |
637 int index = lookup->GetFieldIndex().field_index(); | 598 int index = lookup->GetFieldIndex().field_index(); |
638 | 599 |
639 // Adjust for the number of properties stored in the object. Even in the | 600 // Adjust for the number of properties stored in the object. Even in the |
640 // face of a transition we can use the old map here because the size of the | 601 // face of a transition we can use the old map here because the size of the |
641 // object and the number of in-object properties is not going to change. | 602 // object and the number of in-object properties is not going to change. |
642 index -= object->map()->inobject_properties(); | 603 index -= object->map()->inobject_properties(); |
643 | 604 |
644 Representation representation = lookup->representation(); | 605 Representation representation = lookup->representation(); |
645 ASSERT(!representation.IsNone()); | 606 ASSERT(!representation.IsNone()); |
646 if (FLAG_track_fields && representation.IsSmi()) { | 607 if (FLAG_track_fields && representation.IsSmi()) { |
647 __ JumpIfNotSmi(value_reg, miss_label); | 608 __ JumpIfNotSmi(value_reg, miss_label); |
648 } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { | 609 } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { |
649 __ JumpIfSmi(value_reg, miss_label); | 610 __ JumpIfSmi(value_reg, miss_label); |
650 } else if (FLAG_track_double_fields && representation.IsDouble()) { | 611 } else if (FLAG_track_double_fields && representation.IsDouble()) { |
651 // Load the double storage. | 612 // Load the double storage. |
652 if (index < 0) { | 613 if (index < 0) { |
653 int offset = object->map()->instance_size() + (index * kPointerSize); | 614 int offset = (index * kPointerSize) + object->map()->instance_size(); |
654 __ ldr(scratch1, FieldMemOperand(receiver_reg, offset)); | 615 __ Ldr(scratch1, FieldMemOperand(receiver_reg, offset)); |
655 } else { | 616 } else { |
656 __ ldr(scratch1, | 617 int offset = (index * kPointerSize) + FixedArray::kHeaderSize; |
| 618 __ Ldr(scratch1, |
657 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); | 619 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); |
658 int offset = index * kPointerSize + FixedArray::kHeaderSize; | 620 __ Ldr(scratch1, FieldMemOperand(scratch1, offset)); |
659 __ ldr(scratch1, FieldMemOperand(scratch1, offset)); | |
660 } | 621 } |
661 | 622 |
662 // Store the value into the storage. | 623 // Store the value into the storage. |
663 Label do_store, heap_number; | 624 Label do_store, heap_number; |
| 625 // TODO(jbramley): Is fp_scratch the most appropriate FP scratch register? |
| 626 // It's only used in Fcmp, but it's not really safe to use it like this. |
664 __ JumpIfNotSmi(value_reg, &heap_number); | 627 __ JumpIfNotSmi(value_reg, &heap_number); |
665 __ SmiUntag(scratch2, value_reg); | 628 __ SmiUntagToDouble(fp_scratch, value_reg); |
666 __ vmov(s0, scratch2); | 629 __ B(&do_store); |
667 __ vcvt_f64_s32(d0, s0); | |
668 __ jmp(&do_store); | |
669 | 630 |
670 __ bind(&heap_number); | 631 __ Bind(&heap_number); |
671 __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex, | 632 __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex, |
672 miss_label, DONT_DO_SMI_CHECK); | 633 miss_label, DONT_DO_SMI_CHECK); |
673 __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); | 634 __ Ldr(fp_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
674 | 635 |
675 __ bind(&do_store); | 636 __ Bind(&do_store); |
676 __ vstr(d0, FieldMemOperand(scratch1, HeapNumber::kValueOffset)); | 637 __ Str(fp_scratch, FieldMemOperand(scratch1, HeapNumber::kValueOffset)); |
677 // Return the value (register r0). | 638 |
678 ASSERT(value_reg.is(r0)); | 639 // Return the value (register x0). |
| 640 ASSERT(value_reg.is(x0)); |
679 __ Ret(); | 641 __ Ret(); |
680 return; | 642 return; |
681 } | 643 } |
682 | 644 |
683 // TODO(verwaest): Share this code as a code stub. | 645 // TODO(verwaest): Share this code as a code stub. |
684 SmiCheck smi_check = representation.IsTagged() | 646 SmiCheck smi_check = representation.IsTagged() |
685 ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; | 647 ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; |
686 if (index < 0) { | 648 if (index < 0) { |
687 // Set the property straight into the object. | 649 // Set the property straight into the object. |
688 int offset = object->map()->instance_size() + (index * kPointerSize); | 650 int offset = object->map()->instance_size() + (index * kPointerSize); |
689 __ str(value_reg, FieldMemOperand(receiver_reg, offset)); | 651 __ Str(value_reg, FieldMemOperand(receiver_reg, offset)); |
690 | 652 |
691 if (!FLAG_track_fields || !representation.IsSmi()) { | 653 if (!FLAG_track_fields || !representation.IsSmi()) { |
692 // Skip updating write barrier if storing a smi. | 654 // Skip updating write barrier if storing a smi. |
693 __ JumpIfSmi(value_reg, &exit); | 655 __ JumpIfSmi(value_reg, &exit); |
694 | 656 |
695 // Update the write barrier for the array address. | 657 // Update the write barrier for the array address. |
696 // Pass the now unused name_reg as a scratch register. | 658 // Pass the now unused name_reg as a scratch register. |
697 __ mov(name_reg, value_reg); | 659 __ Mov(name_reg, value_reg); |
698 __ RecordWriteField(receiver_reg, | 660 __ RecordWriteField(receiver_reg, |
699 offset, | 661 offset, |
700 name_reg, | 662 name_reg, |
701 scratch1, | 663 scratch1, |
702 kLRHasNotBeenSaved, | 664 kLRHasNotBeenSaved, |
703 kDontSaveFPRegs, | 665 kDontSaveFPRegs, |
704 EMIT_REMEMBERED_SET, | 666 EMIT_REMEMBERED_SET, |
705 smi_check); | 667 smi_check); |
706 } | 668 } |
707 } else { | 669 } else { |
708 // Write to the properties array. | 670 // Write to the properties array. |
709 int offset = index * kPointerSize + FixedArray::kHeaderSize; | 671 int offset = index * kPointerSize + FixedArray::kHeaderSize; |
710 // Get the properties array | 672 // Get the properties array |
711 __ ldr(scratch1, | 673 __ Ldr(scratch1, |
712 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); | 674 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); |
713 __ str(value_reg, FieldMemOperand(scratch1, offset)); | 675 __ Str(value_reg, FieldMemOperand(scratch1, offset)); |
714 | 676 |
715 if (!FLAG_track_fields || !representation.IsSmi()) { | 677 if (!FLAG_track_fields || !representation.IsSmi()) { |
716 // Skip updating write barrier if storing a smi. | 678 // Skip updating write barrier if storing a smi. |
717 __ JumpIfSmi(value_reg, &exit); | 679 __ JumpIfSmi(value_reg, &exit); |
718 | 680 |
719 // Update the write barrier for the array address. | 681 // Update the write barrier for the array address. |
720 // Ok to clobber receiver_reg and name_reg, since we return. | 682 // Ok to clobber receiver_reg and name_reg, since we return. |
721 __ mov(name_reg, value_reg); | 683 __ Mov(name_reg, value_reg); |
722 __ RecordWriteField(scratch1, | 684 __ RecordWriteField(scratch1, |
723 offset, | 685 offset, |
724 name_reg, | 686 name_reg, |
725 receiver_reg, | 687 receiver_reg, |
726 kLRHasNotBeenSaved, | 688 kLRHasNotBeenSaved, |
727 kDontSaveFPRegs, | 689 kDontSaveFPRegs, |
728 EMIT_REMEMBERED_SET, | 690 EMIT_REMEMBERED_SET, |
729 smi_check); | 691 smi_check); |
730 } | 692 } |
731 } | 693 } |
732 | 694 |
733 // Return the value (register r0). | 695 __ Bind(&exit); |
734 ASSERT(value_reg.is(r0)); | 696 // Return the value (register x0). |
735 __ bind(&exit); | 697 ASSERT(value_reg.is(x0)); |
736 __ Ret(); | 698 __ Ret(); |
737 } | 699 } |
738 | 700 |
739 | 701 |
740 void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, | 702 void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, |
741 Label* label, | 703 Label* label, |
742 Handle<Name> name) { | 704 Handle<Name> name) { |
743 if (!label->is_unused()) { | 705 if (!label->is_unused()) { |
744 __ bind(label); | 706 __ Bind(label); |
745 __ mov(this->name(), Operand(name)); | 707 __ Mov(this->name(), Operand(name)); |
746 } | 708 } |
747 } | 709 } |
748 | 710 |
749 | 711 |
750 static void PushInterceptorArguments(MacroAssembler* masm, | 712 static void PushInterceptorArguments(MacroAssembler* masm, |
751 Register receiver, | 713 Register receiver, |
752 Register holder, | 714 Register holder, |
753 Register name, | 715 Register name, |
754 Handle<JSObject> holder_obj) { | 716 Handle<JSObject> holder_obj) { |
755 STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0); | 717 STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0); |
756 STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1); | 718 STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1); |
757 STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2); | 719 STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2); |
758 STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3); | 720 STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3); |
759 STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4); | 721 STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4); |
760 __ push(name); | 722 |
| 723 __ Push(name); |
761 Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor()); | 724 Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor()); |
762 ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor)); | 725 ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor)); |
763 Register scratch = name; | 726 Register scratch = name; |
764 __ mov(scratch, Operand(interceptor)); | 727 __ Mov(scratch, Operand(interceptor)); |
765 __ push(scratch); | 728 __ Push(scratch, receiver, holder); |
766 __ push(receiver); | |
767 __ push(holder); | |
768 } | 729 } |
769 | 730 |
770 | 731 |
771 static void CompileCallLoadPropertyWithInterceptor( | 732 static void CompileCallLoadPropertyWithInterceptor( |
772 MacroAssembler* masm, | 733 MacroAssembler* masm, |
773 Register receiver, | 734 Register receiver, |
774 Register holder, | 735 Register holder, |
775 Register name, | 736 Register name, |
776 Handle<JSObject> holder_obj, | 737 Handle<JSObject> holder_obj, |
777 IC::UtilityId id) { | 738 IC::UtilityId id) { |
778 PushInterceptorArguments(masm, receiver, holder, name, holder_obj); | 739 PushInterceptorArguments(masm, receiver, holder, name, holder_obj); |
| 740 |
779 __ CallExternalReference( | 741 __ CallExternalReference( |
780 ExternalReference(IC_Utility(id), masm->isolate()), | 742 ExternalReference(IC_Utility(id), masm->isolate()), |
781 StubCache::kInterceptorArgsLength); | 743 StubCache::kInterceptorArgsLength); |
782 } | 744 } |
783 | 745 |
784 | 746 |
785 // Generate call to api function. | 747 // Generate call to api function. |
786 static void GenerateFastApiCall(MacroAssembler* masm, | 748 static void GenerateFastApiCall(MacroAssembler* masm, |
787 const CallOptimization& optimization, | 749 const CallOptimization& optimization, |
788 Handle<Map> receiver_map, | 750 Handle<Map> receiver_map, |
789 Register receiver, | 751 Register receiver, |
790 Register scratch_in, | 752 Register scratch, |
791 int argc, | 753 int argc, |
792 Register* values) { | 754 Register* values) { |
793 ASSERT(!receiver.is(scratch_in)); | 755 ASSERT(!AreAliased(receiver, scratch)); |
794 __ push(receiver); | 756 __ Push(receiver); |
795 // Write the arguments to stack frame. | 757 // Write the arguments to stack frame. |
796 for (int i = 0; i < argc; i++) { | 758 for (int i = 0; i < argc; i++) { |
| 759 // TODO(jbramley): Push these in as few Push() calls as possible. |
797 Register arg = values[argc-1-i]; | 760 Register arg = values[argc-1-i]; |
798 ASSERT(!receiver.is(arg)); | 761 ASSERT(!AreAliased(receiver, scratch, arg)); |
799 ASSERT(!scratch_in.is(arg)); | 762 __ Push(arg); |
800 __ push(arg); | |
801 } | 763 } |
| 764 |
802 ASSERT(optimization.is_simple_api_call()); | 765 ASSERT(optimization.is_simple_api_call()); |
803 | 766 |
804 // Abi for CallApiFunctionStub. | 767 // Abi for CallApiFunctionStub. |
805 Register callee = r0; | 768 Register callee = x0; |
806 Register call_data = r4; | 769 Register call_data = x4; |
807 Register holder = r2; | 770 Register holder = x2; |
808 Register api_function_address = r1; | 771 Register api_function_address = x1; |
809 | 772 |
810 // Put holder in place. | 773 // Put holder in place. |
811 CallOptimization::HolderLookup holder_lookup; | 774 CallOptimization::HolderLookup holder_lookup; |
812 Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType( | 775 Handle<JSObject> api_holder = |
813 receiver_map, | 776 optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup); |
814 &holder_lookup); | |
815 switch (holder_lookup) { | 777 switch (holder_lookup) { |
816 case CallOptimization::kHolderIsReceiver: | 778 case CallOptimization::kHolderIsReceiver: |
817 __ Move(holder, receiver); | 779 __ Mov(holder, receiver); |
818 break; | 780 break; |
819 case CallOptimization::kHolderFound: | 781 case CallOptimization::kHolderFound: |
820 __ Move(holder, api_holder); | 782 __ LoadObject(holder, api_holder); |
821 break; | 783 break; |
822 case CallOptimization::kHolderNotFound: | 784 case CallOptimization::kHolderNotFound: |
823 UNREACHABLE(); | 785 UNREACHABLE(); |
824 break; | 786 break; |
825 } | 787 } |
826 | 788 |
827 Isolate* isolate = masm->isolate(); | 789 Isolate* isolate = masm->isolate(); |
828 Handle<JSFunction> function = optimization.constant_function(); | 790 Handle<JSFunction> function = optimization.constant_function(); |
829 Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); | 791 Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); |
830 Handle<Object> call_data_obj(api_call_info->data(), isolate); | 792 Handle<Object> call_data_obj(api_call_info->data(), isolate); |
831 | 793 |
832 // Put callee in place. | 794 // Put callee in place. |
833 __ Move(callee, function); | 795 __ LoadObject(callee, function); |
834 | 796 |
835 bool call_data_undefined = false; | 797 bool call_data_undefined = false; |
836 // Put call_data in place. | 798 // Put call_data in place. |
837 if (isolate->heap()->InNewSpace(*call_data_obj)) { | 799 if (isolate->heap()->InNewSpace(*call_data_obj)) { |
838 __ Move(call_data, api_call_info); | 800 __ LoadObject(call_data, api_call_info); |
839 __ ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset)); | 801 __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset)); |
840 } else if (call_data_obj->IsUndefined()) { | 802 } else if (call_data_obj->IsUndefined()) { |
841 call_data_undefined = true; | 803 call_data_undefined = true; |
842 __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); | 804 __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); |
843 } else { | 805 } else { |
844 __ Move(call_data, call_data_obj); | 806 __ LoadObject(call_data, call_data_obj); |
845 } | 807 } |
846 | 808 |
847 // Put api_function_address in place. | 809 // Put api_function_address in place. |
848 Address function_address = v8::ToCData<Address>(api_call_info->callback()); | 810 Address function_address = v8::ToCData<Address>(api_call_info->callback()); |
849 ApiFunction fun(function_address); | 811 ApiFunction fun(function_address); |
850 ExternalReference::Type type = ExternalReference::DIRECT_API_CALL; | |
851 ExternalReference ref = ExternalReference(&fun, | 812 ExternalReference ref = ExternalReference(&fun, |
852 type, | 813 ExternalReference::DIRECT_API_CALL, |
853 masm->isolate()); | 814 masm->isolate()); |
854 __ mov(api_function_address, Operand(ref)); | 815 __ Mov(api_function_address, Operand(ref)); |
855 | 816 |
856 // Jump to stub. | 817 // Jump to stub. |
857 CallApiFunctionStub stub(true, call_data_undefined, argc); | 818 CallApiFunctionStub stub(true, call_data_undefined, argc); |
858 __ TailCallStub(&stub); | 819 __ TailCallStub(&stub); |
859 } | 820 } |
860 | 821 |
861 | 822 |
862 void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) { | 823 void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) { |
863 __ Jump(code, RelocInfo::CODE_TARGET); | 824 __ Jump(code, RelocInfo::CODE_TARGET); |
864 } | 825 } |
865 | 826 |
866 | 827 |
867 #undef __ | 828 #undef __ |
868 #define __ ACCESS_MASM(masm()) | 829 #define __ ACCESS_MASM(masm()) |
869 | 830 |
870 | 831 |
871 Register StubCompiler::CheckPrototypes(Handle<HeapType> type, | 832 Register StubCompiler::CheckPrototypes(Handle<HeapType> type, |
872 Register object_reg, | 833 Register object_reg, |
873 Handle<JSObject> holder, | 834 Handle<JSObject> holder, |
874 Register holder_reg, | 835 Register holder_reg, |
875 Register scratch1, | 836 Register scratch1, |
876 Register scratch2, | 837 Register scratch2, |
877 Handle<Name> name, | 838 Handle<Name> name, |
878 Label* miss, | 839 Label* miss, |
879 PrototypeCheckType check) { | 840 PrototypeCheckType check) { |
880 Handle<Map> receiver_map(IC::TypeToMap(*type, isolate())); | 841 Handle<Map> receiver_map(IC::TypeToMap(*type, isolate())); |
881 // Make sure that the type feedback oracle harvests the receiver map. | 842 // Make sure that the type feedback oracle harvests the receiver map. |
882 // TODO(svenpanne) Remove this hack when all ICs are reworked. | 843 // TODO(svenpanne) Remove this hack when all ICs are reworked. |
883 __ mov(scratch1, Operand(receiver_map)); | 844 __ Mov(scratch1, Operand(receiver_map)); |
884 | 845 |
885 // Make sure there's no overlap between holder and object registers. | 846 // object_reg and holder_reg registers can alias. |
886 ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); | 847 ASSERT(!AreAliased(object_reg, scratch1, scratch2)); |
887 ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg) | 848 ASSERT(!AreAliased(holder_reg, scratch1, scratch2)); |
888 && !scratch2.is(scratch1)); | |
889 | 849 |
890 // Keep track of the current object in register reg. | 850 // Keep track of the current object in register reg. |
891 Register reg = object_reg; | 851 Register reg = object_reg; |
892 int depth = 0; | 852 int depth = 0; |
893 | 853 |
894 Handle<JSObject> current = Handle<JSObject>::null(); | 854 Handle<JSObject> current = Handle<JSObject>::null(); |
895 if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant()); | 855 if (type->IsConstant()) { |
| 856 current = Handle<JSObject>::cast(type->AsConstant()); |
| 857 } |
896 Handle<JSObject> prototype = Handle<JSObject>::null(); | 858 Handle<JSObject> prototype = Handle<JSObject>::null(); |
897 Handle<Map> current_map = receiver_map; | 859 Handle<Map> current_map = receiver_map; |
898 Handle<Map> holder_map(holder->map()); | 860 Handle<Map> holder_map(holder->map()); |
899 // Traverse the prototype chain and check the maps in the prototype chain for | 861 // Traverse the prototype chain and check the maps in the prototype chain for |
900 // fast and global objects or do negative lookup for normal objects. | 862 // fast and global objects or do negative lookup for normal objects. |
901 while (!current_map.is_identical_to(holder_map)) { | 863 while (!current_map.is_identical_to(holder_map)) { |
902 ++depth; | 864 ++depth; |
903 | 865 |
904 // Only global objects and objects that do not require access | 866 // Only global objects and objects that do not require access |
905 // checks are allowed in stubs. | 867 // checks are allowed in stubs. |
906 ASSERT(current_map->IsJSGlobalProxyMap() || | 868 ASSERT(current_map->IsJSGlobalProxyMap() || |
907 !current_map->is_access_check_needed()); | 869 !current_map->is_access_check_needed()); |
908 | 870 |
909 prototype = handle(JSObject::cast(current_map->prototype())); | 871 prototype = handle(JSObject::cast(current_map->prototype())); |
910 if (current_map->is_dictionary_map() && | 872 if (current_map->is_dictionary_map() && |
911 !current_map->IsJSGlobalObjectMap() && | 873 !current_map->IsJSGlobalObjectMap() && |
912 !current_map->IsJSGlobalProxyMap()) { | 874 !current_map->IsJSGlobalProxyMap()) { |
913 if (!name->IsUniqueName()) { | 875 if (!name->IsUniqueName()) { |
914 ASSERT(name->IsString()); | 876 ASSERT(name->IsString()); |
915 name = factory()->InternalizeString(Handle<String>::cast(name)); | 877 name = factory()->InternalizeString(Handle<String>::cast(name)); |
916 } | 878 } |
917 ASSERT(current.is_null() || | 879 ASSERT(current.is_null() || |
918 current->property_dictionary()->FindEntry(*name) == | 880 (current->property_dictionary()->FindEntry(*name) == |
919 NameDictionary::kNotFound); | 881 NameDictionary::kNotFound)); |
920 | 882 |
921 GenerateDictionaryNegativeLookup(masm(), miss, reg, name, | 883 GenerateDictionaryNegativeLookup(masm(), miss, reg, name, |
922 scratch1, scratch2); | 884 scratch1, scratch2); |
923 | 885 |
924 __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); | 886 __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); |
925 reg = holder_reg; // From now on the object will be in holder_reg. | 887 reg = holder_reg; // From now on the object will be in holder_reg. |
926 __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); | 888 __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); |
927 } else { | 889 } else { |
928 Register map_reg = scratch1; | 890 Register map_reg = scratch1; |
| 891 // TODO(jbramley): Skip this load when we don't need the map. |
| 892 __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 893 |
929 if (depth != 1 || check == CHECK_ALL_MAPS) { | 894 if (depth != 1 || check == CHECK_ALL_MAPS) { |
930 // CheckMap implicitly loads the map of |reg| into |map_reg|. | 895 __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK); |
931 __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK); | |
932 } else { | |
933 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); | |
934 } | 896 } |
935 | 897 |
936 // Check access rights to the global object. This has to happen after | 898 // Check access rights to the global object. This has to happen after |
937 // the map check so that we know that the object is actually a global | 899 // the map check so that we know that the object is actually a global |
938 // object. | 900 // object. |
939 if (current_map->IsJSGlobalProxyMap()) { | 901 if (current_map->IsJSGlobalProxyMap()) { |
940 __ CheckAccessGlobalProxy(reg, scratch2, miss); | 902 __ CheckAccessGlobalProxy(reg, scratch2, miss); |
941 } else if (current_map->IsJSGlobalObjectMap()) { | 903 } else if (current_map->IsJSGlobalObjectMap()) { |
942 GenerateCheckPropertyCell( | 904 GenerateCheckPropertyCell( |
943 masm(), Handle<JSGlobalObject>::cast(current), name, | 905 masm(), Handle<JSGlobalObject>::cast(current), name, |
944 scratch2, miss); | 906 scratch2, miss); |
945 } | 907 } |
946 | 908 |
947 reg = holder_reg; // From now on the object will be in holder_reg. | 909 reg = holder_reg; // From now on the object will be in holder_reg. |
948 | 910 |
949 if (heap()->InNewSpace(*prototype)) { | 911 if (heap()->InNewSpace(*prototype)) { |
950 // The prototype is in new space; we cannot store a reference to it | 912 // The prototype is in new space; we cannot store a reference to it |
951 // in the code. Load it from the map. | 913 // in the code. Load it from the map. |
952 __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); | 914 __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); |
953 } else { | 915 } else { |
954 // The prototype is in old space; load it directly. | 916 // The prototype is in old space; load it directly. |
955 __ mov(reg, Operand(prototype)); | 917 __ Mov(reg, Operand(prototype)); |
956 } | 918 } |
957 } | 919 } |
958 | 920 |
959 // Go to the next object in the prototype chain. | 921 // Go to the next object in the prototype chain. |
960 current = prototype; | 922 current = prototype; |
961 current_map = handle(current->map()); | 923 current_map = handle(current->map()); |
962 } | 924 } |
963 | 925 |
964 // Log the check depth. | 926 // Log the check depth. |
965 LOG(isolate(), IntEvent("check-maps-depth", depth + 1)); | 927 LOG(isolate(), IntEvent("check-maps-depth", depth + 1)); |
966 | 928 |
| 929 // Check the holder map. |
967 if (depth != 0 || check == CHECK_ALL_MAPS) { | 930 if (depth != 0 || check == CHECK_ALL_MAPS) { |
968 // Check the holder map. | 931 // Check the holder map. |
969 __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK); | 932 __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK); |
970 } | 933 } |
971 | 934 |
972 // Perform security check for access to the global object. | 935 // Perform security check for access to the global object. |
973 ASSERT(current_map->IsJSGlobalProxyMap() || | 936 ASSERT(current_map->IsJSGlobalProxyMap() || |
974 !current_map->is_access_check_needed()); | 937 !current_map->is_access_check_needed()); |
975 if (current_map->IsJSGlobalProxyMap()) { | 938 if (current_map->IsJSGlobalProxyMap()) { |
976 __ CheckAccessGlobalProxy(reg, scratch1, miss); | 939 __ CheckAccessGlobalProxy(reg, scratch1, miss); |
977 } | 940 } |
978 | 941 |
979 // Return the register containing the holder. | 942 // Return the register containing the holder. |
980 return reg; | 943 return reg; |
981 } | 944 } |
982 | 945 |
983 | 946 |
984 void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { | 947 void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { |
985 if (!miss->is_unused()) { | 948 if (!miss->is_unused()) { |
986 Label success; | 949 Label success; |
987 __ b(&success); | 950 __ B(&success); |
988 __ bind(miss); | 951 |
| 952 __ Bind(miss); |
989 TailCallBuiltin(masm(), MissBuiltin(kind())); | 953 TailCallBuiltin(masm(), MissBuiltin(kind())); |
990 __ bind(&success); | 954 |
| 955 __ Bind(&success); |
991 } | 956 } |
992 } | 957 } |
993 | 958 |
994 | 959 |
995 void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { | 960 void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { |
996 if (!miss->is_unused()) { | 961 if (!miss->is_unused()) { |
997 Label success; | 962 Label success; |
998 __ b(&success); | 963 __ B(&success); |
| 964 |
999 GenerateRestoreName(masm(), miss, name); | 965 GenerateRestoreName(masm(), miss, name); |
1000 TailCallBuiltin(masm(), MissBuiltin(kind())); | 966 TailCallBuiltin(masm(), MissBuiltin(kind())); |
1001 __ bind(&success); | 967 |
| 968 __ Bind(&success); |
1002 } | 969 } |
1003 } | 970 } |
1004 | 971 |
1005 | 972 |
1006 Register LoadStubCompiler::CallbackHandlerFrontend( | 973 Register LoadStubCompiler::CallbackHandlerFrontend(Handle<HeapType> type, |
1007 Handle<HeapType> type, | 974 Register object_reg, |
1008 Register object_reg, | 975 Handle<JSObject> holder, |
1009 Handle<JSObject> holder, | 976 Handle<Name> name, |
1010 Handle<Name> name, | 977 Handle<Object> callback) { |
1011 Handle<Object> callback) { | |
1012 Label miss; | 978 Label miss; |
1013 | 979 |
1014 Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss); | 980 Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss); |
1015 | 981 |
| 982 // TODO(jbramely): HandlerFrontendHeader returns its result in scratch1(), so |
| 983 // we can't use it below, but that isn't very obvious. Is there a better way |
| 984 // of handling this? |
| 985 |
1016 if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { | 986 if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { |
1017 ASSERT(!reg.is(scratch2())); | 987 ASSERT(!AreAliased(reg, scratch2(), scratch3(), scratch4())); |
1018 ASSERT(!reg.is(scratch3())); | |
1019 ASSERT(!reg.is(scratch4())); | |
1020 | 988 |
1021 // Load the properties dictionary. | 989 // Load the properties dictionary. |
1022 Register dictionary = scratch4(); | 990 Register dictionary = scratch4(); |
1023 __ ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset)); | 991 __ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset)); |
1024 | 992 |
1025 // Probe the dictionary. | 993 // Probe the dictionary. |
1026 Label probe_done; | 994 Label probe_done; |
1027 NameDictionaryLookupStub::GeneratePositiveLookup(masm(), | 995 NameDictionaryLookupStub::GeneratePositiveLookup(masm(), |
1028 &miss, | 996 &miss, |
1029 &probe_done, | 997 &probe_done, |
1030 dictionary, | 998 dictionary, |
1031 this->name(), | 999 this->name(), |
1032 scratch2(), | 1000 scratch2(), |
1033 scratch3()); | 1001 scratch3()); |
1034 __ bind(&probe_done); | 1002 __ Bind(&probe_done); |
1035 | 1003 |
1036 // If probing finds an entry in the dictionary, scratch3 contains the | 1004 // If probing finds an entry in the dictionary, scratch3 contains the |
1037 // pointer into the dictionary. Check that the value is the callback. | 1005 // pointer into the dictionary. Check that the value is the callback. |
1038 Register pointer = scratch3(); | 1006 Register pointer = scratch3(); |
1039 const int kElementsStartOffset = NameDictionary::kHeaderSize + | 1007 const int kElementsStartOffset = NameDictionary::kHeaderSize + |
1040 NameDictionary::kElementsStartIndex * kPointerSize; | 1008 NameDictionary::kElementsStartIndex * kPointerSize; |
1041 const int kValueOffset = kElementsStartOffset + kPointerSize; | 1009 const int kValueOffset = kElementsStartOffset + kPointerSize; |
1042 __ ldr(scratch2(), FieldMemOperand(pointer, kValueOffset)); | 1010 __ Ldr(scratch2(), FieldMemOperand(pointer, kValueOffset)); |
1043 __ cmp(scratch2(), Operand(callback)); | 1011 __ Cmp(scratch2(), Operand(callback)); |
1044 __ b(ne, &miss); | 1012 __ B(ne, &miss); |
1045 } | 1013 } |
1046 | 1014 |
1047 HandlerFrontendFooter(name, &miss); | 1015 HandlerFrontendFooter(name, &miss); |
1048 return reg; | 1016 return reg; |
1049 } | 1017 } |
1050 | 1018 |
1051 | 1019 |
1052 void LoadStubCompiler::GenerateLoadField(Register reg, | 1020 void LoadStubCompiler::GenerateLoadField(Register reg, |
1053 Handle<JSObject> holder, | 1021 Handle<JSObject> holder, |
1054 PropertyIndex field, | 1022 PropertyIndex field, |
1055 Representation representation) { | 1023 Representation representation) { |
1056 if (!reg.is(receiver())) __ mov(receiver(), reg); | 1024 __ Mov(receiver(), reg); |
1057 if (kind() == Code::LOAD_IC) { | 1025 if (kind() == Code::LOAD_IC) { |
1058 LoadFieldStub stub(field.is_inobject(holder), | 1026 LoadFieldStub stub(field.is_inobject(holder), |
1059 field.translate(holder), | 1027 field.translate(holder), |
1060 representation); | 1028 representation); |
1061 GenerateTailCall(masm(), stub.GetCode(isolate())); | 1029 GenerateTailCall(masm(), stub.GetCode(isolate())); |
1062 } else { | 1030 } else { |
1063 KeyedLoadFieldStub stub(field.is_inobject(holder), | 1031 KeyedLoadFieldStub stub(field.is_inobject(holder), |
1064 field.translate(holder), | 1032 field.translate(holder), |
1065 representation); | 1033 representation); |
1066 GenerateTailCall(masm(), stub.GetCode(isolate())); | 1034 GenerateTailCall(masm(), stub.GetCode(isolate())); |
1067 } | 1035 } |
1068 } | 1036 } |
1069 | 1037 |
1070 | 1038 |
1071 void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { | 1039 void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { |
1072 // Return the constant value. | 1040 // Return the constant value. |
1073 __ Move(r0, value); | 1041 __ LoadObject(x0, value); |
1074 __ Ret(); | 1042 __ Ret(); |
1075 } | 1043 } |
1076 | 1044 |
1077 | 1045 |
1078 void LoadStubCompiler::GenerateLoadCallback( | 1046 void LoadStubCompiler::GenerateLoadCallback( |
1079 const CallOptimization& call_optimization, | 1047 const CallOptimization& call_optimization, |
1080 Handle<Map> receiver_map) { | 1048 Handle<Map> receiver_map) { |
1081 GenerateFastApiCall( | 1049 GenerateFastApiCall( |
1082 masm(), call_optimization, receiver_map, | 1050 masm(), call_optimization, receiver_map, receiver(), scratch3(), 0, NULL); |
1083 receiver(), scratch3(), 0, NULL); | |
1084 } | 1051 } |
1085 | 1052 |
1086 | 1053 |
1087 void LoadStubCompiler::GenerateLoadCallback( | 1054 void LoadStubCompiler::GenerateLoadCallback( |
1088 Register reg, | 1055 Register reg, |
1089 Handle<ExecutableAccessorInfo> callback) { | 1056 Handle<ExecutableAccessorInfo> callback) { |
1090 // Build AccessorInfo::args_ list on the stack and push property name below | 1057 ASSERT(!AreAliased(scratch2(), scratch3(), scratch4(), reg)); |
1091 // the exit frame to make GC aware of them and store pointers to them. | 1058 |
| 1059 // Build ExecutableAccessorInfo::args_ list on the stack and push property |
| 1060 // name below the exit frame to make GC aware of them and store pointers to |
| 1061 // them. |
1092 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0); | 1062 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0); |
1093 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1); | 1063 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1); |
1094 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2); | 1064 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2); |
1095 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3); | 1065 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3); |
1096 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4); | 1066 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4); |
1097 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5); | 1067 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5); |
1098 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6); | 1068 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6); |
1099 ASSERT(!scratch2().is(reg)); | 1069 |
1100 ASSERT(!scratch3().is(reg)); | 1070 __ Push(receiver()); |
1101 ASSERT(!scratch4().is(reg)); | 1071 |
1102 __ push(receiver()); | |
1103 if (heap()->InNewSpace(callback->data())) { | 1072 if (heap()->InNewSpace(callback->data())) { |
1104 __ Move(scratch3(), callback); | 1073 __ Mov(scratch3(), Operand(callback)); |
1105 __ ldr(scratch3(), FieldMemOperand(scratch3(), | 1074 __ Ldr(scratch3(), FieldMemOperand(scratch3(), |
1106 ExecutableAccessorInfo::kDataOffset)); | 1075 ExecutableAccessorInfo::kDataOffset)); |
1107 } else { | 1076 } else { |
1108 __ Move(scratch3(), Handle<Object>(callback->data(), isolate())); | 1077 __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate()))); |
1109 } | 1078 } |
1110 __ push(scratch3()); | 1079 // TODO(jbramley): Find another scratch register and combine the pushes |
1111 __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex); | 1080 // together. Can we use scratch1() here? |
1112 __ mov(scratch4(), scratch3()); | 1081 __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex); |
1113 __ Push(scratch3(), scratch4()); | 1082 __ Push(scratch3(), scratch4()); |
1114 __ mov(scratch4(), | 1083 __ Mov(scratch3(), Operand(ExternalReference::isolate_address(isolate()))); |
1115 Operand(ExternalReference::isolate_address(isolate()))); | 1084 __ Push(scratch4(), scratch3(), reg, name()); |
1116 __ Push(scratch4(), reg); | |
1117 __ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_ | |
1118 __ push(name()); | |
1119 | 1085 |
1120 // Abi for CallApiGetter | 1086 Register args_addr = scratch2(); |
1121 Register getter_address_reg = r2; | 1087 __ Add(args_addr, __ StackPointer(), kPointerSize); |
1122 | 1088 |
| 1089 // Stack at this point: |
| 1090 // sp[40] callback data |
| 1091 // sp[32] undefined |
| 1092 // sp[24] undefined |
| 1093 // sp[16] isolate |
| 1094 // args_addr -> sp[8] reg |
| 1095 // sp[0] name |
| 1096 |
| 1097 // Abi for CallApiGetter. |
| 1098 Register getter_address_reg = x2; |
| 1099 |
| 1100 // Set up the call. |
1123 Address getter_address = v8::ToCData<Address>(callback->getter()); | 1101 Address getter_address = v8::ToCData<Address>(callback->getter()); |
1124 ApiFunction fun(getter_address); | 1102 ApiFunction fun(getter_address); |
1125 ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL; | 1103 ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL; |
1126 ExternalReference ref = ExternalReference(&fun, type, isolate()); | 1104 ExternalReference ref = ExternalReference(&fun, type, isolate()); |
1127 __ mov(getter_address_reg, Operand(ref)); | 1105 __ Mov(getter_address_reg, Operand(ref)); |
1128 | 1106 |
1129 CallApiGetterStub stub; | 1107 CallApiGetterStub stub; |
1130 __ TailCallStub(&stub); | 1108 __ TailCallStub(&stub); |
1131 } | 1109 } |
1132 | 1110 |
1133 | 1111 |
1134 void LoadStubCompiler::GenerateLoadInterceptor( | 1112 void LoadStubCompiler::GenerateLoadInterceptor( |
1135 Register holder_reg, | 1113 Register holder_reg, |
1136 Handle<Object> object, | 1114 Handle<Object> object, |
1137 Handle<JSObject> interceptor_holder, | 1115 Handle<JSObject> interceptor_holder, |
1138 LookupResult* lookup, | 1116 LookupResult* lookup, |
1139 Handle<Name> name) { | 1117 Handle<Name> name) { |
| 1118 ASSERT(!AreAliased(receiver(), this->name(), |
| 1119 scratch1(), scratch2(), scratch3())); |
1140 ASSERT(interceptor_holder->HasNamedInterceptor()); | 1120 ASSERT(interceptor_holder->HasNamedInterceptor()); |
1141 ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); | 1121 ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); |
1142 | 1122 |
1143 // So far the most popular follow ups for interceptor loads are FIELD | 1123 // So far the most popular follow ups for interceptor loads are FIELD |
1144 // and CALLBACKS, so inline only them, other cases may be added | 1124 // and CALLBACKS, so inline only them, other cases may be added later. |
1145 // later. | |
1146 bool compile_followup_inline = false; | 1125 bool compile_followup_inline = false; |
1147 if (lookup->IsFound() && lookup->IsCacheable()) { | 1126 if (lookup->IsFound() && lookup->IsCacheable()) { |
1148 if (lookup->IsField()) { | 1127 if (lookup->IsField()) { |
1149 compile_followup_inline = true; | 1128 compile_followup_inline = true; |
1150 } else if (lookup->type() == CALLBACKS && | 1129 } else if (lookup->type() == CALLBACKS && |
1151 lookup->GetCallbackObject()->IsExecutableAccessorInfo()) { | 1130 lookup->GetCallbackObject()->IsExecutableAccessorInfo()) { |
1152 ExecutableAccessorInfo* callback = | 1131 ExecutableAccessorInfo* callback = |
1153 ExecutableAccessorInfo::cast(lookup->GetCallbackObject()); | 1132 ExecutableAccessorInfo::cast(lookup->GetCallbackObject()); |
1154 compile_followup_inline = callback->getter() != NULL && | 1133 compile_followup_inline = callback->getter() != NULL && |
1155 callback->IsCompatibleReceiver(*object); | 1134 callback->IsCompatibleReceiver(*object); |
1156 } | 1135 } |
1157 } | 1136 } |
1158 | 1137 |
1159 if (compile_followup_inline) { | 1138 if (compile_followup_inline) { |
1160 // Compile the interceptor call, followed by inline code to load the | 1139 // Compile the interceptor call, followed by inline code to load the |
1161 // property from further up the prototype chain if the call fails. | 1140 // property from further up the prototype chain if the call fails. |
1162 // Check that the maps haven't changed. | 1141 // Check that the maps haven't changed. |
1163 ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1())); | 1142 ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1())); |
1164 | 1143 |
1165 // Preserve the receiver register explicitly whenever it is different from | 1144 // Preserve the receiver register explicitly whenever it is different from |
1166 // the holder and it is needed should the interceptor return without any | 1145 // the holder and it is needed should the interceptor return without any |
1167 // result. The CALLBACKS case needs the receiver to be passed into C++ code, | 1146 // result. The CALLBACKS case needs the receiver to be passed into C++ code, |
1168 // the FIELD case might cause a miss during the prototype check. | 1147 // the FIELD case might cause a miss during the prototype check. |
1169 bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder(); | 1148 bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder(); |
1170 bool must_preserve_receiver_reg = !receiver().is(holder_reg) && | 1149 bool must_preserve_receiver_reg = !receiver().Is(holder_reg) && |
1171 (lookup->type() == CALLBACKS || must_perfrom_prototype_check); | 1150 (lookup->type() == CALLBACKS || must_perfrom_prototype_check); |
1172 | 1151 |
1173 // Save necessary data before invoking an interceptor. | 1152 // Save necessary data before invoking an interceptor. |
1174 // Requires a frame to make GC aware of pushed pointers. | 1153 // Requires a frame to make GC aware of pushed pointers. |
1175 { | 1154 { |
1176 FrameScope frame_scope(masm(), StackFrame::INTERNAL); | 1155 FrameScope frame_scope(masm(), StackFrame::INTERNAL); |
1177 if (must_preserve_receiver_reg) { | 1156 if (must_preserve_receiver_reg) { |
1178 __ Push(receiver(), holder_reg, this->name()); | 1157 __ Push(receiver(), holder_reg, this->name()); |
1179 } else { | 1158 } else { |
1180 __ Push(holder_reg, this->name()); | 1159 __ Push(holder_reg, this->name()); |
1181 } | 1160 } |
1182 // Invoke an interceptor. Note: map checks from receiver to | 1161 // Invoke an interceptor. Note: map checks from receiver to |
1183 // interceptor's holder has been compiled before (see a caller | 1162 // interceptor's holder has been compiled before (see a caller |
1184 // of this method.) | 1163 // of this method.) |
1185 CompileCallLoadPropertyWithInterceptor( | 1164 CompileCallLoadPropertyWithInterceptor( |
1186 masm(), receiver(), holder_reg, this->name(), interceptor_holder, | 1165 masm(), receiver(), holder_reg, this->name(), interceptor_holder, |
1187 IC::kLoadPropertyWithInterceptorOnly); | 1166 IC::kLoadPropertyWithInterceptorOnly); |
1188 | 1167 |
1189 // Check if interceptor provided a value for property. If it's | 1168 // Check if interceptor provided a value for property. If it's |
1190 // the case, return immediately. | 1169 // the case, return immediately. |
1191 Label interceptor_failed; | 1170 Label interceptor_failed; |
1192 __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex); | 1171 __ JumpIfRoot(x0, |
1193 __ cmp(r0, scratch1()); | 1172 Heap::kNoInterceptorResultSentinelRootIndex, |
1194 __ b(eq, &interceptor_failed); | 1173 &interceptor_failed); |
1195 frame_scope.GenerateLeaveFrame(); | 1174 frame_scope.GenerateLeaveFrame(); |
1196 __ Ret(); | 1175 __ Ret(); |
1197 | 1176 |
1198 __ bind(&interceptor_failed); | 1177 __ Bind(&interceptor_failed); |
1199 __ pop(this->name()); | |
1200 __ pop(holder_reg); | |
1201 if (must_preserve_receiver_reg) { | 1178 if (must_preserve_receiver_reg) { |
1202 __ pop(receiver()); | 1179 __ Pop(this->name(), holder_reg, receiver()); |
| 1180 } else { |
| 1181 __ Pop(this->name(), holder_reg); |
1203 } | 1182 } |
1204 // Leave the internal frame. | 1183 // Leave the internal frame. |
1205 } | 1184 } |
1206 | |
1207 GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup); | 1185 GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup); |
1208 } else { // !compile_followup_inline | 1186 } else { // !compile_followup_inline |
1209 // Call the runtime system to load the interceptor. | 1187 // Call the runtime system to load the interceptor. |
1210 // Check that the maps haven't changed. | 1188 // Check that the maps haven't changed. |
1211 PushInterceptorArguments(masm(), receiver(), holder_reg, | 1189 PushInterceptorArguments( |
1212 this->name(), interceptor_holder); | 1190 masm(), receiver(), holder_reg, this->name(), interceptor_holder); |
1213 | 1191 |
1214 ExternalReference ref = | 1192 ExternalReference ref = |
1215 ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), | 1193 ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), |
1216 isolate()); | 1194 isolate()); |
1217 __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1); | 1195 __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1); |
1218 } | 1196 } |
1219 } | 1197 } |
1220 | 1198 |
1221 | 1199 |
1222 void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) { | 1200 void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) { |
1223 Label success; | 1201 Label success; |
1224 // Check that the object is a boolean. | 1202 // Check that the object is a boolean. |
1225 __ LoadRoot(ip, Heap::kTrueValueRootIndex); | 1203 // TODO(all): Optimize this like LCodeGen::DoDeferredTaggedToI. |
1226 __ cmp(object, ip); | 1204 __ JumpIfRoot(object, Heap::kTrueValueRootIndex, &success); |
1227 __ b(eq, &success); | 1205 __ JumpIfNotRoot(object, Heap::kFalseValueRootIndex, miss); |
1228 __ LoadRoot(ip, Heap::kFalseValueRootIndex); | 1206 __ Bind(&success); |
1229 __ cmp(object, ip); | |
1230 __ b(ne, miss); | |
1231 __ bind(&success); | |
1232 } | 1207 } |
1233 | 1208 |
1234 | 1209 |
1235 Handle<Code> StoreStubCompiler::CompileStoreCallback( | 1210 Handle<Code> StoreStubCompiler::CompileStoreCallback( |
1236 Handle<JSObject> object, | 1211 Handle<JSObject> object, |
1237 Handle<JSObject> holder, | 1212 Handle<JSObject> holder, |
1238 Handle<Name> name, | 1213 Handle<Name> name, |
1239 Handle<ExecutableAccessorInfo> callback) { | 1214 Handle<ExecutableAccessorInfo> callback) { |
| 1215 ASM_LOCATION("StoreStubCompiler::CompileStoreCallback"); |
1240 Register holder_reg = HandlerFrontend( | 1216 Register holder_reg = HandlerFrontend( |
1241 IC::CurrentTypeOf(object, isolate()), receiver(), holder, name); | 1217 IC::CurrentTypeOf(object, isolate()), receiver(), holder, name); |
1242 | 1218 |
1243 // Stub never generated for non-global objects that require access checks. | 1219 // Stub never generated for non-global objects that require access checks. |
1244 ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); | 1220 ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); |
1245 | 1221 |
1246 __ push(receiver()); // receiver | 1222 // TODO(jbramley): Make Push take more than four arguments and combine these |
1247 __ push(holder_reg); | 1223 // two calls. |
1248 __ mov(ip, Operand(callback)); // callback info | 1224 __ Push(receiver(), holder_reg); |
1249 __ push(ip); | 1225 __ Mov(scratch1(), Operand(callback)); |
1250 __ mov(ip, Operand(name)); | 1226 __ Mov(scratch2(), Operand(name)); |
1251 __ Push(ip, value()); | 1227 __ Push(scratch1(), scratch2(), value()); |
1252 | 1228 |
1253 // Do tail-call to the runtime system. | 1229 // Do tail-call to the runtime system. |
1254 ExternalReference store_callback_property = | 1230 ExternalReference store_callback_property = |
1255 ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate()); | 1231 ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate()); |
1256 __ TailCallExternalReference(store_callback_property, 5, 1); | 1232 __ TailCallExternalReference(store_callback_property, 5, 1); |
1257 | 1233 |
1258 // Return the generated code. | 1234 // Return the generated code. |
1259 return GetCode(kind(), Code::FAST, name); | 1235 return GetCode(kind(), Code::FAST, name); |
1260 } | 1236 } |
1261 | 1237 |
1262 | 1238 |
1263 Handle<Code> StoreStubCompiler::CompileStoreCallback( | |
1264 Handle<JSObject> object, | |
1265 Handle<JSObject> holder, | |
1266 Handle<Name> name, | |
1267 const CallOptimization& call_optimization) { | |
1268 HandlerFrontend(IC::CurrentTypeOf(object, isolate()), | |
1269 receiver(), holder, name); | |
1270 | |
1271 Register values[] = { value() }; | |
1272 GenerateFastApiCall( | |
1273 masm(), call_optimization, handle(object->map()), | |
1274 receiver(), scratch3(), 1, values); | |
1275 | |
1276 // Return the generated code. | |
1277 return GetCode(kind(), Code::FAST, name); | |
1278 } | |
1279 | |
1280 | |
1281 #undef __ | 1239 #undef __ |
1282 #define __ ACCESS_MASM(masm) | 1240 #define __ ACCESS_MASM(masm) |
1283 | 1241 |
1284 | 1242 |
1285 void StoreStubCompiler::GenerateStoreViaSetter( | 1243 void StoreStubCompiler::GenerateStoreViaSetter( |
1286 MacroAssembler* masm, | 1244 MacroAssembler* masm, |
1287 Handle<HeapType> type, | 1245 Handle<HeapType> type, |
1288 Handle<JSFunction> setter) { | 1246 Handle<JSFunction> setter) { |
1289 // ----------- S t a t e ------------- | 1247 // ----------- S t a t e ------------- |
1290 // -- r0 : value | 1248 // -- x0 : value |
1291 // -- r1 : receiver | 1249 // -- x1 : receiver |
1292 // -- r2 : name | 1250 // -- x2 : name |
1293 // -- lr : return address | 1251 // -- lr : return address |
1294 // ----------------------------------- | 1252 // ----------------------------------- |
| 1253 Register value = x0; |
| 1254 Register receiver = x1; |
| 1255 Label miss; |
| 1256 |
1295 { | 1257 { |
1296 FrameScope scope(masm, StackFrame::INTERNAL); | 1258 FrameScope scope(masm, StackFrame::INTERNAL); |
1297 Register receiver = r1; | |
1298 Register value = r0; | |
1299 | 1259 |
1300 // Save value register, so we can restore it later. | 1260 // Save value register, so we can restore it later. |
1301 __ push(value); | 1261 __ Push(value); |
1302 | 1262 |
1303 if (!setter.is_null()) { | 1263 if (!setter.is_null()) { |
1304 // Call the JavaScript setter with receiver and value on the stack. | 1264 // Call the JavaScript setter with receiver and value on the stack. |
1305 if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { | 1265 if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { |
1306 // Swap in the global receiver. | 1266 // Swap in the global receiver. |
1307 __ ldr(receiver, | 1267 __ Ldr(receiver, |
1308 FieldMemOperand( | 1268 FieldMemOperand( |
1309 receiver, JSGlobalObject::kGlobalReceiverOffset)); | 1269 receiver, JSGlobalObject::kGlobalReceiverOffset)); |
1310 } | 1270 } |
1311 __ Push(receiver, value); | 1271 __ Push(receiver, value); |
1312 ParameterCount actual(1); | 1272 ParameterCount actual(1); |
1313 ParameterCount expected(setter); | 1273 ParameterCount expected(setter); |
1314 __ InvokeFunction(setter, expected, actual, | 1274 __ InvokeFunction(setter, expected, actual, |
1315 CALL_FUNCTION, NullCallWrapper()); | 1275 CALL_FUNCTION, NullCallWrapper()); |
1316 } else { | 1276 } else { |
1317 // If we generate a global code snippet for deoptimization only, remember | 1277 // If we generate a global code snippet for deoptimization only, remember |
1318 // the place to continue after deoptimization. | 1278 // the place to continue after deoptimization. |
1319 masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset()); | 1279 masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset()); |
1320 } | 1280 } |
1321 | 1281 |
1322 // We have to return the passed value, not the return value of the setter. | 1282 // We have to return the passed value, not the return value of the setter. |
1323 __ pop(r0); | 1283 __ Pop(value); |
1324 | 1284 |
1325 // Restore context register. | 1285 // Restore context register. |
1326 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 1286 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
1327 } | 1287 } |
1328 __ Ret(); | 1288 __ Ret(); |
1329 } | 1289 } |
1330 | 1290 |
1331 | 1291 |
1332 #undef __ | 1292 #undef __ |
1333 #define __ ACCESS_MASM(masm()) | 1293 #define __ ACCESS_MASM(masm()) |
1334 | 1294 |
1335 | 1295 |
1336 Handle<Code> StoreStubCompiler::CompileStoreInterceptor( | 1296 Handle<Code> StoreStubCompiler::CompileStoreInterceptor( |
1337 Handle<JSObject> object, | 1297 Handle<JSObject> object, |
1338 Handle<Name> name) { | 1298 Handle<Name> name) { |
1339 Label miss; | 1299 Label miss; |
1340 | 1300 |
| 1301 ASM_LOCATION("StoreStubCompiler::CompileStoreInterceptor"); |
| 1302 |
1341 // Check that the map of the object hasn't changed. | 1303 // Check that the map of the object hasn't changed. |
1342 __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss, | 1304 __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss, |
1343 DO_SMI_CHECK); | 1305 DO_SMI_CHECK); |
1344 | 1306 |
1345 // Perform global security token check if needed. | 1307 // Perform global security token check if needed. |
1346 if (object->IsJSGlobalProxy()) { | 1308 if (object->IsJSGlobalProxy()) { |
1347 __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss); | 1309 __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss); |
1348 } | 1310 } |
1349 | 1311 |
1350 // Stub is never generated for non-global objects that require access | 1312 // Stub is never generated for non-global objects that require access checks. |
1351 // checks. | |
1352 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); | 1313 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); |
1353 | 1314 |
1354 __ Push(receiver(), this->name(), value()); | 1315 __ Push(receiver(), this->name(), value()); |
1355 | 1316 |
1356 // Do tail-call to the runtime system. | 1317 // Do tail-call to the runtime system. |
1357 ExternalReference store_ic_property = | 1318 ExternalReference store_ic_property = |
1358 ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate()); | 1319 ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate()); |
1359 __ TailCallExternalReference(store_ic_property, 3, 1); | 1320 __ TailCallExternalReference(store_ic_property, 3, 1); |
1360 | 1321 |
1361 // Handle store cache miss. | 1322 // Handle store cache miss. |
1362 __ bind(&miss); | 1323 __ Bind(&miss); |
1363 TailCallBuiltin(masm(), MissBuiltin(kind())); | 1324 TailCallBuiltin(masm(), MissBuiltin(kind())); |
1364 | 1325 |
1365 // Return the generated code. | 1326 // Return the generated code. |
1366 return GetCode(kind(), Code::FAST, name); | 1327 return GetCode(kind(), Code::FAST, name); |
1367 } | 1328 } |
1368 | 1329 |
1369 | 1330 |
1370 Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type, | 1331 Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type, |
1371 Handle<JSObject> last, | 1332 Handle<JSObject> last, |
1372 Handle<Name> name) { | 1333 Handle<Name> name) { |
1373 NonexistentHandlerFrontend(type, last, name); | 1334 NonexistentHandlerFrontend(type, last, name); |
1374 | 1335 |
1375 // Return undefined if maps of the full prototype chain are still the | 1336 // Return undefined if maps of the full prototype chain are still the |
1376 // same and no global property with this name contains a value. | 1337 // same and no global property with this name contains a value. |
1377 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); | 1338 __ LoadRoot(x0, Heap::kUndefinedValueRootIndex); |
1378 __ Ret(); | 1339 __ Ret(); |
1379 | 1340 |
1380 // Return the generated code. | 1341 // Return the generated code. |
1381 return GetCode(kind(), Code::FAST, name); | 1342 return GetCode(kind(), Code::FAST, name); |
1382 } | 1343 } |
1383 | 1344 |
1384 | 1345 |
| 1346 // TODO(all): The so-called scratch registers are significant in some cases. For |
| 1347 // example, KeyedStoreStubCompiler::registers()[3] (x3) is actually used for |
| 1348 // KeyedStoreCompiler::transition_map(). We should verify which registers are |
| 1349 // actually scratch registers, and which are important. For now, we use the same |
| 1350 // assignments as ARM to remain on the safe side. |
| 1351 |
1385 Register* LoadStubCompiler::registers() { | 1352 Register* LoadStubCompiler::registers() { |
1386 // receiver, name, scratch1, scratch2, scratch3, scratch4. | 1353 // receiver, name, scratch1, scratch2, scratch3, scratch4. |
1387 static Register registers[] = { r0, r2, r3, r1, r4, r5 }; | 1354 static Register registers[] = { x0, x2, x3, x1, x4, x5 }; |
1388 return registers; | 1355 return registers; |
1389 } | 1356 } |
1390 | 1357 |
1391 | 1358 |
1392 Register* KeyedLoadStubCompiler::registers() { | 1359 Register* KeyedLoadStubCompiler::registers() { |
1393 // receiver, name, scratch1, scratch2, scratch3, scratch4. | 1360 // receiver, name/key, scratch1, scratch2, scratch3, scratch4. |
1394 static Register registers[] = { r1, r0, r2, r3, r4, r5 }; | 1361 static Register registers[] = { x1, x0, x2, x3, x4, x5 }; |
1395 return registers; | 1362 return registers; |
1396 } | 1363 } |
1397 | 1364 |
1398 | 1365 |
1399 Register* StoreStubCompiler::registers() { | 1366 Register* StoreStubCompiler::registers() { |
1400 // receiver, name, value, scratch1, scratch2, scratch3. | 1367 // receiver, name, value, scratch1, scratch2, scratch3. |
1401 static Register registers[] = { r1, r2, r0, r3, r4, r5 }; | 1368 static Register registers[] = { x1, x2, x0, x3, x4, x5 }; |
1402 return registers; | 1369 return registers; |
1403 } | 1370 } |
1404 | 1371 |
1405 | 1372 |
1406 Register* KeyedStoreStubCompiler::registers() { | 1373 Register* KeyedStoreStubCompiler::registers() { |
1407 // receiver, name, value, scratch1, scratch2, scratch3. | 1374 // receiver, name, value, scratch1, scratch2, scratch3. |
1408 static Register registers[] = { r2, r1, r0, r3, r4, r5 }; | 1375 static Register registers[] = { x2, x1, x0, x3, x4, x5 }; |
1409 return registers; | 1376 return registers; |
1410 } | 1377 } |
1411 | 1378 |
1412 | 1379 |
1413 #undef __ | 1380 #undef __ |
1414 #define __ ACCESS_MASM(masm) | 1381 #define __ ACCESS_MASM(masm) |
1415 | 1382 |
1416 | |
1417 void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, | 1383 void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, |
1418 Handle<HeapType> type, | 1384 Handle<HeapType> type, |
1419 Register receiver, | 1385 Register receiver, |
1420 Handle<JSFunction> getter) { | 1386 Handle<JSFunction> getter) { |
1421 // ----------- S t a t e ------------- | |
1422 // -- r0 : receiver | |
1423 // -- r2 : name | |
1424 // -- lr : return address | |
1425 // ----------------------------------- | |
1426 { | 1387 { |
1427 FrameScope scope(masm, StackFrame::INTERNAL); | 1388 FrameScope scope(masm, StackFrame::INTERNAL); |
1428 | 1389 |
1429 if (!getter.is_null()) { | 1390 if (!getter.is_null()) { |
1430 // Call the JavaScript getter with the receiver on the stack. | 1391 // Call the JavaScript getter with the receiver on the stack. |
1431 if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { | 1392 if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { |
1432 // Swap in the global receiver. | 1393 // Swap in the global receiver. |
1433 __ ldr(receiver, | 1394 __ Ldr(receiver, |
1434 FieldMemOperand( | 1395 FieldMemOperand( |
1435 receiver, JSGlobalObject::kGlobalReceiverOffset)); | 1396 receiver, JSGlobalObject::kGlobalReceiverOffset)); |
1436 } | 1397 } |
1437 __ push(receiver); | 1398 __ Push(receiver); |
1438 ParameterCount actual(0); | 1399 ParameterCount actual(0); |
1439 ParameterCount expected(getter); | 1400 ParameterCount expected(getter); |
1440 __ InvokeFunction(getter, expected, actual, | 1401 __ InvokeFunction(getter, expected, actual, |
1441 CALL_FUNCTION, NullCallWrapper()); | 1402 CALL_FUNCTION, NullCallWrapper()); |
1442 } else { | 1403 } else { |
1443 // If we generate a global code snippet for deoptimization only, remember | 1404 // If we generate a global code snippet for deoptimization only, remember |
1444 // the place to continue after deoptimization. | 1405 // the place to continue after deoptimization. |
1445 masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset()); | 1406 masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset()); |
1446 } | 1407 } |
1447 | 1408 |
1448 // Restore context register. | 1409 // Restore context register. |
1449 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 1410 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
1450 } | 1411 } |
1451 __ Ret(); | 1412 __ Ret(); |
1452 } | 1413 } |
1453 | 1414 |
1454 | 1415 |
1455 #undef __ | 1416 #undef __ |
1456 #define __ ACCESS_MASM(masm()) | 1417 #define __ ACCESS_MASM(masm()) |
1457 | 1418 |
1458 | 1419 |
1459 Handle<Code> LoadStubCompiler::CompileLoadGlobal( | 1420 Handle<Code> LoadStubCompiler::CompileLoadGlobal( |
1460 Handle<HeapType> type, | 1421 Handle<HeapType> type, |
1461 Handle<GlobalObject> global, | 1422 Handle<GlobalObject> global, |
1462 Handle<PropertyCell> cell, | 1423 Handle<PropertyCell> cell, |
1463 Handle<Name> name, | 1424 Handle<Name> name, |
1464 bool is_dont_delete) { | 1425 bool is_dont_delete) { |
1465 Label miss; | 1426 Label miss; |
1466 HandlerFrontendHeader(type, receiver(), global, name, &miss); | 1427 HandlerFrontendHeader(type, receiver(), global, name, &miss); |
1467 | 1428 |
1468 // Get the value from the cell. | 1429 // Get the value from the cell. |
1469 __ mov(r3, Operand(cell)); | 1430 __ Mov(x3, Operand(cell)); |
1470 __ ldr(r4, FieldMemOperand(r3, Cell::kValueOffset)); | 1431 __ Ldr(x4, FieldMemOperand(x3, Cell::kValueOffset)); |
1471 | 1432 |
1472 // Check for deleted property if property can actually be deleted. | 1433 // Check for deleted property if property can actually be deleted. |
1473 if (!is_dont_delete) { | 1434 if (!is_dont_delete) { |
1474 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 1435 __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &miss); |
1475 __ cmp(r4, ip); | |
1476 __ b(eq, &miss); | |
1477 } | 1436 } |
1478 | 1437 |
1479 Counters* counters = isolate()->counters(); | 1438 Counters* counters = isolate()->counters(); |
1480 __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3); | 1439 __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3); |
1481 __ mov(r0, r4); | 1440 __ Mov(x0, x4); |
1482 __ Ret(); | 1441 __ Ret(); |
1483 | 1442 |
1484 HandlerFrontendFooter(name, &miss); | 1443 HandlerFrontendFooter(name, &miss); |
1485 | 1444 |
1486 // Return the generated code. | 1445 // Return the generated code. |
1487 return GetCode(kind(), Code::NORMAL, name); | 1446 return GetCode(kind(), Code::NORMAL, name); |
1488 } | 1447 } |
1489 | 1448 |
1490 | 1449 |
1491 Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC( | 1450 Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC( |
1492 TypeHandleList* types, | 1451 TypeHandleList* types, |
1493 CodeHandleList* handlers, | 1452 CodeHandleList* handlers, |
1494 Handle<Name> name, | 1453 Handle<Name> name, |
1495 Code::StubType type, | 1454 Code::StubType type, |
1496 IcCheckType check) { | 1455 IcCheckType check) { |
1497 Label miss; | 1456 Label miss; |
1498 | 1457 |
1499 if (check == PROPERTY && | 1458 if (check == PROPERTY && |
1500 (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) { | 1459 (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) { |
1501 __ cmp(this->name(), Operand(name)); | 1460 __ CompareAndBranch(this->name(), Operand(name), ne, &miss); |
1502 __ b(ne, &miss); | |
1503 } | 1461 } |
1504 | 1462 |
1505 Label number_case; | 1463 Label number_case; |
1506 Label* smi_target = IncludesNumberType(types) ? &number_case : &miss; | 1464 Label* smi_target = IncludesNumberType(types) ? &number_case : &miss; |
1507 __ JumpIfSmi(receiver(), smi_target); | 1465 __ JumpIfSmi(receiver(), smi_target); |
1508 | 1466 |
1509 Register map_reg = scratch1(); | 1467 Register map_reg = scratch1(); |
1510 | 1468 __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); |
1511 int receiver_count = types->length(); | 1469 int receiver_count = types->length(); |
1512 int number_of_handled_maps = 0; | 1470 int number_of_handled_maps = 0; |
1513 __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); | |
1514 for (int current = 0; current < receiver_count; ++current) { | 1471 for (int current = 0; current < receiver_count; ++current) { |
1515 Handle<HeapType> type = types->at(current); | 1472 Handle<HeapType> type = types->at(current); |
1516 Handle<Map> map = IC::TypeToMap(*type, isolate()); | 1473 Handle<Map> map = IC::TypeToMap(*type, isolate()); |
1517 if (!map->is_deprecated()) { | 1474 if (!map->is_deprecated()) { |
1518 number_of_handled_maps++; | 1475 number_of_handled_maps++; |
1519 __ mov(ip, Operand(map)); | 1476 Label try_next; |
1520 __ cmp(map_reg, ip); | 1477 __ Cmp(map_reg, Operand(map)); |
| 1478 __ B(ne, &try_next); |
1521 if (type->Is(HeapType::Number())) { | 1479 if (type->Is(HeapType::Number())) { |
1522 ASSERT(!number_case.is_unused()); | 1480 ASSERT(!number_case.is_unused()); |
1523 __ bind(&number_case); | 1481 __ Bind(&number_case); |
1524 } | 1482 } |
1525 __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq); | 1483 __ Jump(handlers->at(current), RelocInfo::CODE_TARGET); |
| 1484 __ Bind(&try_next); |
1526 } | 1485 } |
1527 } | 1486 } |
1528 ASSERT(number_of_handled_maps != 0); | 1487 ASSERT(number_of_handled_maps != 0); |
1529 | 1488 |
1530 __ bind(&miss); | 1489 __ Bind(&miss); |
1531 TailCallBuiltin(masm(), MissBuiltin(kind())); | 1490 TailCallBuiltin(masm(), MissBuiltin(kind())); |
1532 | 1491 |
1533 // Return the generated code. | 1492 // Return the generated code. |
1534 InlineCacheState state = | 1493 InlineCacheState state = |
1535 number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC; | 1494 (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC; |
1536 return GetICCode(kind(), type, name, state); | 1495 return GetICCode(kind(), type, name, state); |
1537 } | 1496 } |
1538 | 1497 |
1539 | 1498 |
1540 Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( | 1499 Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( |
1541 MapHandleList* receiver_maps, | 1500 MapHandleList* receiver_maps, |
1542 CodeHandleList* handler_stubs, | 1501 CodeHandleList* handler_stubs, |
1543 MapHandleList* transitioned_maps) { | 1502 MapHandleList* transitioned_maps) { |
1544 Label miss; | 1503 Label miss; |
| 1504 |
| 1505 ASM_LOCATION("KeyedStoreStubCompiler::CompileStorePolymorphic"); |
| 1506 |
1545 __ JumpIfSmi(receiver(), &miss); | 1507 __ JumpIfSmi(receiver(), &miss); |
1546 | 1508 |
1547 int receiver_count = receiver_maps->length(); | 1509 int receiver_count = receiver_maps->length(); |
1548 __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); | 1510 __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); |
1549 for (int i = 0; i < receiver_count; ++i) { | 1511 for (int i = 0; i < receiver_count; i++) { |
1550 __ mov(ip, Operand(receiver_maps->at(i))); | 1512 __ Cmp(scratch1(), Operand(receiver_maps->at(i))); |
1551 __ cmp(scratch1(), ip); | 1513 |
1552 if (transitioned_maps->at(i).is_null()) { | 1514 Label skip; |
1553 __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq); | 1515 __ B(&skip, ne); |
1554 } else { | 1516 if (!transitioned_maps->at(i).is_null()) { |
1555 Label next_map; | 1517 // This argument is used by the handler stub. For example, see |
1556 __ b(ne, &next_map); | 1518 // ElementsTransitionGenerator::GenerateMapChangeElementsTransition. |
1557 __ mov(transition_map(), Operand(transitioned_maps->at(i))); | 1519 __ Mov(transition_map(), Operand(transitioned_maps->at(i))); |
1558 __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al); | |
1559 __ bind(&next_map); | |
1560 } | 1520 } |
| 1521 __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); |
| 1522 __ Bind(&skip); |
1561 } | 1523 } |
1562 | 1524 |
1563 __ bind(&miss); | 1525 __ Bind(&miss); |
1564 TailCallBuiltin(masm(), MissBuiltin(kind())); | 1526 TailCallBuiltin(masm(), MissBuiltin(kind())); |
1565 | 1527 |
1566 // Return the generated code. | |
1567 return GetICCode( | 1528 return GetICCode( |
1568 kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); | 1529 kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); |
1569 } | 1530 } |
1570 | 1531 |
1571 | 1532 |
| 1533 Handle<Code> StoreStubCompiler::CompileStoreCallback( |
| 1534 Handle<JSObject> object, |
| 1535 Handle<JSObject> holder, |
| 1536 Handle<Name> name, |
| 1537 const CallOptimization& call_optimization) { |
| 1538 HandlerFrontend(IC::CurrentTypeOf(object, isolate()), |
| 1539 receiver(), holder, name); |
| 1540 |
| 1541 Register values[] = { value() }; |
| 1542 GenerateFastApiCall(masm(), call_optimization, handle(object->map()), |
| 1543 receiver(), scratch3(), 1, values); |
| 1544 |
| 1545 // Return the generated code. |
| 1546 return GetCode(kind(), Code::FAST, name); |
| 1547 } |
| 1548 |
| 1549 |
1572 #undef __ | 1550 #undef __ |
1573 #define __ ACCESS_MASM(masm) | 1551 #define __ ACCESS_MASM(masm) |
1574 | 1552 |
1575 | |
1576 void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( | 1553 void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( |
1577 MacroAssembler* masm) { | 1554 MacroAssembler* masm) { |
1578 // ---------- S t a t e -------------- | 1555 // ---------- S t a t e -------------- |
1579 // -- lr : return address | 1556 // -- lr : return address |
1580 // -- r0 : key | 1557 // -- x0 : key |
1581 // -- r1 : receiver | 1558 // -- x1 : receiver |
1582 // ----------------------------------- | 1559 // ----------------------------------- |
1583 Label slow, miss; | 1560 Label slow, miss; |
1584 | 1561 |
1585 Register key = r0; | 1562 Register result = x0; |
1586 Register receiver = r1; | 1563 Register key = x0; |
| 1564 Register receiver = x1; |
1587 | 1565 |
1588 __ UntagAndJumpIfNotSmi(r2, key, &miss); | 1566 __ JumpIfNotSmi(key, &miss); |
1589 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 1567 __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
1590 __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5); | 1568 __ LoadFromNumberDictionary(&slow, x4, key, result, x2, x3, x5, x6); |
1591 __ Ret(); | 1569 __ Ret(); |
1592 | 1570 |
1593 __ bind(&slow); | 1571 __ Bind(&slow); |
1594 __ IncrementCounter( | 1572 __ IncrementCounter( |
1595 masm->isolate()->counters()->keyed_load_external_array_slow(), | 1573 masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x2, x3); |
1596 1, r2, r3); | |
1597 | |
1598 // ---------- S t a t e -------------- | |
1599 // -- lr : return address | |
1600 // -- r0 : key | |
1601 // -- r1 : receiver | |
1602 // ----------------------------------- | |
1603 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); | 1574 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); |
1604 | 1575 |
1605 // Miss case, call the runtime. | 1576 // Miss case, call the runtime. |
1606 __ bind(&miss); | 1577 __ Bind(&miss); |
1607 | |
1608 // ---------- S t a t e -------------- | |
1609 // -- lr : return address | |
1610 // -- r0 : key | |
1611 // -- r1 : receiver | |
1612 // ----------------------------------- | |
1613 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss); | 1578 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss); |
1614 } | 1579 } |
1615 | 1580 |
1616 | 1581 |
1617 #undef __ | |
1618 | |
1619 } } // namespace v8::internal | 1582 } } // namespace v8::internal |
1620 | 1583 |
1621 #endif // V8_TARGET_ARCH_ARM | 1584 #endif // V8_TARGET_ARCH_A64 |
OLD | NEW |