Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(537)

Side by Side Diff: src/ic/arm64/ic-arm64.cc

Issue 483683005: Move IC code into a subdir and move ic-compilation related code from stub-cache into ic-compiler (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Fix BUILD.gn Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ic/arm/stub-cache-arm.cc ('k') | src/ic/arm64/ic-compiler-arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #if V8_TARGET_ARCH_ARM64 7 #if V8_TARGET_ARCH_ARM64
8 8
9 #include "src/arm64/assembler-arm64.h"
10 #include "src/code-stubs.h"
11 #include "src/codegen.h" 9 #include "src/codegen.h"
12 #include "src/disasm.h" 10 #include "src/ic/ic.h"
13 #include "src/ic-inl.h" 11 #include "src/ic/stub-cache.h"
14 #include "src/runtime.h"
15 #include "src/stub-cache.h"
16 12
17 namespace v8 { 13 namespace v8 {
18 namespace internal { 14 namespace internal {
19 15
20 16
21 #define __ ACCESS_MASM(masm) 17 #define __ ACCESS_MASM(masm)
22 18
23 19
24 // "type" holds an instance type on entry and is not clobbered. 20 // "type" holds an instance type on entry and is not clobbered.
25 // Generated code branch on "global_object" if type is any kind of global 21 // Generated code branch on "global_object" if type is any kind of global
26 // JS object. 22 // JS object.
27 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, 23 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
28 Register type,
29 Label* global_object) { 24 Label* global_object) {
30 __ Cmp(type, JS_GLOBAL_OBJECT_TYPE); 25 __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
31 __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne); 26 __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
32 __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne); 27 __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
33 __ B(eq, global_object); 28 __ B(eq, global_object);
34 } 29 }
35 30
36 31
37 // Helper function used from LoadIC GenerateNormal. 32 // Helper function used from LoadIC GenerateNormal.
38 // 33 //
39 // elements: Property dictionary. It is not clobbered if a jump to the miss 34 // elements: Property dictionary. It is not clobbered if a jump to the miss
40 // label is done. 35 // label is done.
41 // name: Property name. It is not clobbered if a jump to the miss label is 36 // name: Property name. It is not clobbered if a jump to the miss label is
42 // done 37 // done
43 // result: Register for the result. It is only updated if a jump to the miss 38 // result: Register for the result. It is only updated if a jump to the miss
44 // label is not done. 39 // label is not done.
45 // The scratch registers need to be different from elements, name and result. 40 // The scratch registers need to be different from elements, name and result.
46 // The generated code assumes that the receiver has slow properties, 41 // The generated code assumes that the receiver has slow properties,
47 // is not a global object and does not have interceptors. 42 // is not a global object and does not have interceptors.
48 static void GenerateDictionaryLoad(MacroAssembler* masm, 43 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
49 Label* miss, 44 Register elements, Register name,
50 Register elements, 45 Register result, Register scratch1,
51 Register name,
52 Register result,
53 Register scratch1,
54 Register scratch2) { 46 Register scratch2) {
55 DCHECK(!AreAliased(elements, name, scratch1, scratch2)); 47 DCHECK(!AreAliased(elements, name, scratch1, scratch2));
56 DCHECK(!AreAliased(result, scratch1, scratch2)); 48 DCHECK(!AreAliased(result, scratch1, scratch2));
57 49
58 Label done; 50 Label done;
59 51
60 // Probe the dictionary. 52 // Probe the dictionary.
61 NameDictionaryLookupStub::GeneratePositiveLookup(masm, 53 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
62 miss, 54 name, scratch1, scratch2);
63 &done,
64 elements,
65 name,
66 scratch1,
67 scratch2);
68 55
69 // If probing finds an entry check that the value is a normal property. 56 // If probing finds an entry check that the value is a normal property.
70 __ Bind(&done); 57 __ Bind(&done);
71 58
72 static const int kElementsStartOffset = NameDictionary::kHeaderSize + 59 static const int kElementsStartOffset =
60 NameDictionary::kHeaderSize +
73 NameDictionary::kElementsStartIndex * kPointerSize; 61 NameDictionary::kElementsStartIndex * kPointerSize;
74 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; 62 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
75 __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); 63 __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
76 __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask)); 64 __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
77 __ B(ne, miss); 65 __ B(ne, miss);
78 66
79 // Get the value at the masked, scaled index and return. 67 // Get the value at the masked, scaled index and return.
80 __ Ldr(result, 68 __ Ldr(result,
81 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); 69 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
82 } 70 }
83 71
84 72
85 // Helper function used from StoreIC::GenerateNormal. 73 // Helper function used from StoreIC::GenerateNormal.
86 // 74 //
87 // elements: Property dictionary. It is not clobbered if a jump to the miss 75 // elements: Property dictionary. It is not clobbered if a jump to the miss
88 // label is done. 76 // label is done.
89 // name: Property name. It is not clobbered if a jump to the miss label is 77 // name: Property name. It is not clobbered if a jump to the miss label is
90 // done 78 // done
91 // value: The value to store (never clobbered). 79 // value: The value to store (never clobbered).
92 // 80 //
93 // The generated code assumes that the receiver has slow properties, 81 // The generated code assumes that the receiver has slow properties,
94 // is not a global object and does not have interceptors. 82 // is not a global object and does not have interceptors.
95 static void GenerateDictionaryStore(MacroAssembler* masm, 83 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
96 Label* miss, 84 Register elements, Register name,
97 Register elements, 85 Register value, Register scratch1,
98 Register name,
99 Register value,
100 Register scratch1,
101 Register scratch2) { 86 Register scratch2) {
102 DCHECK(!AreAliased(elements, name, value, scratch1, scratch2)); 87 DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
103 88
104 Label done; 89 Label done;
105 90
106 // Probe the dictionary. 91 // Probe the dictionary.
107 NameDictionaryLookupStub::GeneratePositiveLookup(masm, 92 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
108 miss, 93 name, scratch1, scratch2);
109 &done,
110 elements,
111 name,
112 scratch1,
113 scratch2);
114 94
115 // If probing finds an entry in the dictionary check that the value 95 // If probing finds an entry in the dictionary check that the value
116 // is a normal property that is not read only. 96 // is a normal property that is not read only.
117 __ Bind(&done); 97 __ Bind(&done);
118 98
119 static const int kElementsStartOffset = NameDictionary::kHeaderSize + 99 static const int kElementsStartOffset =
100 NameDictionary::kHeaderSize +
120 NameDictionary::kElementsStartIndex * kPointerSize; 101 NameDictionary::kElementsStartIndex * kPointerSize;
121 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; 102 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
122 static const int kTypeAndReadOnlyMask = 103 static const int kTypeAndReadOnlyMask =
123 PropertyDetails::TypeField::kMask | 104 PropertyDetails::TypeField::kMask |
124 PropertyDetails::AttributesField::encode(READ_ONLY); 105 PropertyDetails::AttributesField::encode(READ_ONLY);
125 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset)); 106 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
126 __ Tst(scratch1, kTypeAndReadOnlyMask); 107 __ Tst(scratch1, kTypeAndReadOnlyMask);
127 __ B(ne, miss); 108 __ B(ne, miss);
128 109
129 // Store the value at the masked, scaled index and return. 110 // Store the value at the masked, scaled index and return.
130 static const int kValueOffset = kElementsStartOffset + kPointerSize; 111 static const int kValueOffset = kElementsStartOffset + kPointerSize;
131 __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag); 112 __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
132 __ Str(value, MemOperand(scratch2)); 113 __ Str(value, MemOperand(scratch2));
133 114
134 // Update the write barrier. Make sure not to clobber the value. 115 // Update the write barrier. Make sure not to clobber the value.
135 __ Mov(scratch1, value); 116 __ Mov(scratch1, value);
136 __ RecordWrite( 117 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
137 elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs); 118 kDontSaveFPRegs);
138 } 119 }
139 120
140 121
141 // Checks the receiver for special cases (value type, slow case bits). 122 // Checks the receiver for special cases (value type, slow case bits).
142 // Falls through for regular JS object and return the map of the 123 // Falls through for regular JS object and return the map of the
143 // receiver in 'map_scratch' if the receiver is not a SMI. 124 // receiver in 'map_scratch' if the receiver is not a SMI.
144 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, 125 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
145 Register receiver, 126 Register receiver,
146 Register map_scratch, 127 Register map_scratch,
147 Register scratch, 128 Register scratch,
148 int interceptor_bit, 129 int interceptor_bit, Label* slow) {
149 Label* slow) {
150 DCHECK(!AreAliased(map_scratch, scratch)); 130 DCHECK(!AreAliased(map_scratch, scratch));
151 131
152 // Check that the object isn't a smi. 132 // Check that the object isn't a smi.
153 __ JumpIfSmi(receiver, slow); 133 __ JumpIfSmi(receiver, slow);
154 // Get the map of the receiver. 134 // Get the map of the receiver.
155 __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset)); 135 __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
156 // Check bit field. 136 // Check bit field.
157 __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset)); 137 __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
158 __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow); 138 __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
159 __ Tbnz(scratch, interceptor_bit, slow); 139 __ Tbnz(scratch, interceptor_bit, slow);
(...skipping 20 matching lines...) Expand all
180 // 160 //
181 // elements - holds the elements of the receiver on exit. 161 // elements - holds the elements of the receiver on exit.
182 // 162 //
183 // elements_map - holds the elements map on exit if the not_fast_array branch is 163 // elements_map - holds the elements map on exit if the not_fast_array branch is
184 // taken. Otherwise, this is used as a scratch register. 164 // taken. Otherwise, this is used as a scratch register.
185 // 165 //
186 // result - holds the result on exit if the load succeeded. 166 // result - holds the result on exit if the load succeeded.
187 // Allowed to be the the same as 'receiver' or 'key'. 167 // Allowed to be the the same as 'receiver' or 'key'.
188 // Unchanged on bailout so 'receiver' and 'key' can be safely 168 // Unchanged on bailout so 'receiver' and 'key' can be safely
189 // used by further computation. 169 // used by further computation.
190 static void GenerateFastArrayLoad(MacroAssembler* masm, 170 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
191 Register receiver, 171 Register key, Register elements,
192 Register key, 172 Register elements_map, Register scratch2,
193 Register elements, 173 Register result, Label* not_fast_array,
194 Register elements_map,
195 Register scratch2,
196 Register result,
197 Label* not_fast_array,
198 Label* slow) { 174 Label* slow) {
199 DCHECK(!AreAliased(receiver, key, elements, elements_map, scratch2)); 175 DCHECK(!AreAliased(receiver, key, elements, elements_map, scratch2));
200 176
201 // Check for fast array. 177 // Check for fast array.
202 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); 178 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
203 if (not_fast_array != NULL) { 179 if (not_fast_array != NULL) {
204 // Check that the object is in fast mode and writable. 180 // Check that the object is in fast mode and writable.
205 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); 181 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
206 __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex, 182 __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
207 not_fast_array); 183 not_fast_array);
(...skipping 24 matching lines...) Expand all
232 // preserved if we jump to 'slow'. 208 // preserved if we jump to 'slow'.
233 __ Mov(result, scratch2); 209 __ Mov(result, scratch2);
234 } 210 }
235 211
236 212
237 // Checks whether a key is an array index string or a unique name. 213 // Checks whether a key is an array index string or a unique name.
238 // Falls through if a key is a unique name. 214 // Falls through if a key is a unique name.
239 // The map of the key is returned in 'map_scratch'. 215 // The map of the key is returned in 'map_scratch'.
240 // If the jump to 'index_string' is done the hash of the key is left 216 // If the jump to 'index_string' is done the hash of the key is left
241 // in 'hash_scratch'. 217 // in 'hash_scratch'.
242 static void GenerateKeyNameCheck(MacroAssembler* masm, 218 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
243 Register key, 219 Register map_scratch, Register hash_scratch,
244 Register map_scratch, 220 Label* index_string, Label* not_unique) {
245 Register hash_scratch,
246 Label* index_string,
247 Label* not_unique) {
248 DCHECK(!AreAliased(key, map_scratch, hash_scratch)); 221 DCHECK(!AreAliased(key, map_scratch, hash_scratch));
249 222
250 // Is the key a name? 223 // Is the key a name?
251 Label unique; 224 Label unique;
252 __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE, 225 __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
253 not_unique, hi); 226 not_unique, hi);
254 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); 227 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
255 __ B(eq, &unique); 228 __ B(eq, &unique);
256 229
257 // Is the string an array index with cached numeric value? 230 // Is the string an array index with cached numeric value?
258 __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset)); 231 __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
259 __ TestAndBranchIfAllClear(hash_scratch, 232 __ TestAndBranchIfAllClear(hash_scratch, Name::kContainsCachedArrayIndexMask,
260 Name::kContainsCachedArrayIndexMask,
261 index_string); 233 index_string);
262 234
263 // Is the string internalized? We know it's a string, so a single bit test is 235 // Is the string internalized? We know it's a string, so a single bit test is
264 // enough. 236 // enough.
265 __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset)); 237 __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
266 STATIC_ASSERT(kInternalizedTag == 0); 238 STATIC_ASSERT(kInternalizedTag == 0);
267 __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique); 239 __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
268 240
269 __ Bind(&unique); 241 __ Bind(&unique);
270 // Fall through if the key is a unique name. 242 // Fall through if the key is a unique name.
271 } 243 }
272 244
273 245
274 // Neither 'object' nor 'key' are modified by this function. 246 // Neither 'object' nor 'key' are modified by this function.
275 // 247 //
276 // If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is 248 // If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
277 // left with the object's elements map. Otherwise, it is used as a scratch 249 // left with the object's elements map. Otherwise, it is used as a scratch
278 // register. 250 // register.
279 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, 251 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
280 Register object, 252 Register object, Register key,
281 Register key, 253 Register map, Register scratch1,
282 Register map,
283 Register scratch1,
284 Register scratch2, 254 Register scratch2,
285 Label* unmapped_case, 255 Label* unmapped_case,
286 Label* slow_case) { 256 Label* slow_case) {
287 DCHECK(!AreAliased(object, key, map, scratch1, scratch2)); 257 DCHECK(!AreAliased(object, key, map, scratch1, scratch2));
288 258
289 Heap* heap = masm->isolate()->heap(); 259 Heap* heap = masm->isolate()->heap();
290 260
291 // Check that the receiver is a JSObject. Because of the elements 261 // Check that the receiver is a JSObject. Because of the elements
292 // map check later, we do not need to check for interceptors or 262 // map check later, we do not need to check for interceptors or
293 // whether it requires access checks. 263 // whether it requires access checks.
294 __ JumpIfSmi(object, slow_case); 264 __ JumpIfSmi(object, slow_case);
295 // Check that the object is some kind of JSObject. 265 // Check that the object is some kind of JSObject.
296 __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE, 266 __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE, slow_case,
297 slow_case, lt); 267 lt);
298 268
299 // Check that the key is a positive smi. 269 // Check that the key is a positive smi.
300 __ JumpIfNotSmi(key, slow_case); 270 __ JumpIfNotSmi(key, slow_case);
301 __ Tbnz(key, kXSignBit, slow_case); 271 __ Tbnz(key, kXSignBit, slow_case);
302 272
303 // Load the elements object and check its map. 273 // Load the elements object and check its map.
304 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map()); 274 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
305 __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset)); 275 __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
306 __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK); 276 __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
307 277
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
340 Register scratch, 310 Register scratch,
341 Label* slow_case) { 311 Label* slow_case) {
342 DCHECK(!AreAliased(key, parameter_map, scratch)); 312 DCHECK(!AreAliased(key, parameter_map, scratch));
343 313
344 // Element is in arguments backing store, which is referenced by the 314 // Element is in arguments backing store, which is referenced by the
345 // second element of the parameter_map. 315 // second element of the parameter_map.
346 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; 316 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
347 Register backing_store = parameter_map; 317 Register backing_store = parameter_map;
348 __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); 318 __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
349 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); 319 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
350 __ CheckMap( 320 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
351 backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK); 321 DONT_DO_SMI_CHECK);
352 __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); 322 __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
353 __ Cmp(key, scratch); 323 __ Cmp(key, scratch);
354 __ B(hs, slow_case); 324 __ B(hs, slow_case);
355 325
356 __ Add(backing_store, 326 __ Add(backing_store, backing_store,
357 backing_store,
358 FixedArray::kHeaderSize - kHeapObjectTag); 327 FixedArray::kHeaderSize - kHeapObjectTag);
359 __ SmiUntag(scratch, key); 328 __ SmiUntag(scratch, key);
360 return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2); 329 return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
361 } 330 }
362 331
363 332
364 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { 333 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
365 // The return address is in lr. 334 // The return address is in lr.
366 Register receiver = ReceiverRegister(); 335 Register receiver = ReceiverRegister();
367 Register name = NameRegister(); 336 Register name = NameRegister();
368 DCHECK(receiver.is(x1)); 337 DCHECK(receiver.is(x1));
369 DCHECK(name.is(x2)); 338 DCHECK(name.is(x2));
370 339
371 // Probe the stub cache. 340 // Probe the stub cache.
372 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( 341 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
373 Code::ComputeHandlerFlags(Code::LOAD_IC)); 342 Code::ComputeHandlerFlags(Code::LOAD_IC));
374 masm->isolate()->stub_cache()->GenerateProbe( 343 masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, x3,
375 masm, flags, receiver, name, x3, x4, x5, x6); 344 x4, x5, x6);
376 345
377 // Cache miss: Jump to runtime. 346 // Cache miss: Jump to runtime.
378 GenerateMiss(masm); 347 GenerateMiss(masm);
379 } 348 }
380 349
381 350
382 void LoadIC::GenerateNormal(MacroAssembler* masm) { 351 void LoadIC::GenerateNormal(MacroAssembler* masm) {
383 Register dictionary = x0; 352 Register dictionary = x0;
384 DCHECK(!dictionary.is(ReceiverRegister())); 353 DCHECK(!dictionary.is(ReceiverRegister()));
385 DCHECK(!dictionary.is(NameRegister())); 354 DCHECK(!dictionary.is(NameRegister()));
(...skipping 12 matching lines...) Expand all
398 367
399 void LoadIC::GenerateMiss(MacroAssembler* masm) { 368 void LoadIC::GenerateMiss(MacroAssembler* masm) {
400 // The return address is in lr. 369 // The return address is in lr.
401 Isolate* isolate = masm->isolate(); 370 Isolate* isolate = masm->isolate();
402 ASM_LOCATION("LoadIC::GenerateMiss"); 371 ASM_LOCATION("LoadIC::GenerateMiss");
403 372
404 __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4); 373 __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
405 374
406 // Perform tail call to the entry. 375 // Perform tail call to the entry.
407 __ Push(ReceiverRegister(), NameRegister()); 376 __ Push(ReceiverRegister(), NameRegister());
408 ExternalReference ref = 377 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
409 ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
410 __ TailCallExternalReference(ref, 2, 1); 378 __ TailCallExternalReference(ref, 2, 1);
411 } 379 }
412 380
413 381
414 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { 382 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
415 // The return address is in lr. 383 // The return address is in lr.
416 __ Push(ReceiverRegister(), NameRegister()); 384 __ Push(ReceiverRegister(), NameRegister());
417 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); 385 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
418 } 386 }
419 387
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
457 DCHECK(key.is(x2)); 425 DCHECK(key.is(x2));
458 DCHECK(value.is(x0)); 426 DCHECK(value.is(x0));
459 427
460 Register map = x3; 428 Register map = x3;
461 429
462 // These registers are used by GenerateMappedArgumentsLookup to build a 430 // These registers are used by GenerateMappedArgumentsLookup to build a
463 // MemOperand. They are live for as long as the MemOperand is live. 431 // MemOperand. They are live for as long as the MemOperand is live.
464 Register mapped1 = x4; 432 Register mapped1 = x4;
465 Register mapped2 = x5; 433 Register mapped2 = x5;
466 434
467 MemOperand mapped = 435 MemOperand mapped = GenerateMappedArgumentsLookup(
468 GenerateMappedArgumentsLookup(masm, receiver, key, map, 436 masm, receiver, key, map, mapped1, mapped2, &notin, &slow);
469 mapped1, mapped2,
470 &notin, &slow);
471 Operand mapped_offset = mapped.OffsetAsOperand(); 437 Operand mapped_offset = mapped.OffsetAsOperand();
472 __ Str(value, mapped); 438 __ Str(value, mapped);
473 __ Add(x10, mapped.base(), mapped_offset); 439 __ Add(x10, mapped.base(), mapped_offset);
474 __ Mov(x11, value); 440 __ Mov(x11, value);
475 __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs); 441 __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
476 __ Ret(); 442 __ Ret();
477 443
478 __ Bind(&notin); 444 __ Bind(&notin);
479 445
480 // These registers are used by GenerateMappedArgumentsLookup to build a 446 // These registers are used by GenerateMappedArgumentsLookup to build a
481 // MemOperand. They are live for as long as the MemOperand is live. 447 // MemOperand. They are live for as long as the MemOperand is live.
482 Register unmapped1 = map; // This is assumed to alias 'map'. 448 Register unmapped1 = map; // This is assumed to alias 'map'.
483 Register unmapped2 = x4; 449 Register unmapped2 = x4;
484 MemOperand unmapped = 450 MemOperand unmapped =
485 GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow); 451 GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
486 Operand unmapped_offset = unmapped.OffsetAsOperand(); 452 Operand unmapped_offset = unmapped.OffsetAsOperand();
487 __ Str(value, unmapped); 453 __ Str(value, unmapped);
488 __ Add(x10, unmapped.base(), unmapped_offset); 454 __ Add(x10, unmapped.base(), unmapped_offset);
489 __ Mov(x11, value); 455 __ Mov(x11, value);
490 __ RecordWrite(unmapped.base(), x10, x11, 456 __ RecordWrite(unmapped.base(), x10, x11, kLRHasNotBeenSaved,
491 kLRHasNotBeenSaved, kDontSaveFPRegs); 457 kDontSaveFPRegs);
492 __ Ret(); 458 __ Ret();
493 __ Bind(&slow); 459 __ Bind(&slow);
494 GenerateMiss(masm); 460 GenerateMiss(masm);
495 } 461 }
496 462
497 463
498 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { 464 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
499 // The return address is in lr. 465 // The return address is in lr.
500 Isolate* isolate = masm->isolate(); 466 Isolate* isolate = masm->isolate();
501 467
(...skipping 23 matching lines...) Expand all
525 DCHECK(FLAG_vector_ics); 491 DCHECK(FLAG_vector_ics);
526 return x3; 492 return x3;
527 } 493 }
528 494
529 495
530 const Register StoreIC::ReceiverRegister() { return x1; } 496 const Register StoreIC::ReceiverRegister() { return x1; }
531 const Register StoreIC::NameRegister() { return x2; } 497 const Register StoreIC::NameRegister() { return x2; }
532 const Register StoreIC::ValueRegister() { return x0; } 498 const Register StoreIC::ValueRegister() { return x0; }
533 499
534 500
535 const Register KeyedStoreIC::MapRegister() { 501 const Register KeyedStoreIC::MapRegister() { return x3; }
536 return x3;
537 }
538 502
539 503
540 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { 504 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
541 // The return address is in lr. 505 // The return address is in lr.
542 __ Push(ReceiverRegister(), NameRegister()); 506 __ Push(ReceiverRegister(), NameRegister());
543 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); 507 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
544 } 508 }
545 509
546 510
547 static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, 511 static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
548 Register key, 512 Register receiver, Register scratch1,
549 Register receiver, 513 Register scratch2, Register scratch3,
550 Register scratch1, 514 Register scratch4, Register scratch5,
551 Register scratch2, 515 Label* slow) {
552 Register scratch3, 516 DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
553 Register scratch4, 517 scratch5));
554 Register scratch5,
555 Label *slow) {
556 DCHECK(!AreAliased(
557 key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
558 518
559 Isolate* isolate = masm->isolate(); 519 Isolate* isolate = masm->isolate();
560 Label check_number_dictionary; 520 Label check_number_dictionary;
561 // If we can load the value, it should be returned in x0. 521 // If we can load the value, it should be returned in x0.
562 Register result = x0; 522 Register result = x0;
563 523
564 GenerateKeyedLoadReceiverCheck( 524 GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
565 masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow); 525 Map::kHasIndexedInterceptor, slow);
566 526
567 // Check the receiver's map to see if it has fast elements. 527 // Check the receiver's map to see if it has fast elements.
568 __ CheckFastElements(scratch1, scratch2, &check_number_dictionary); 528 __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
569 529
570 GenerateFastArrayLoad( 530 GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
571 masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow); 531 result, NULL, slow);
572 __ IncrementCounter( 532 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1,
573 isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2); 533 scratch1, scratch2);
574 __ Ret(); 534 __ Ret();
575 535
576 __ Bind(&check_number_dictionary); 536 __ Bind(&check_number_dictionary);
577 __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset)); 537 __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
578 __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset)); 538 __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
579 539
580 // Check whether we have a number dictionary. 540 // Check whether we have a number dictionary.
581 __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow); 541 __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
582 542
583 __ LoadFromNumberDictionary( 543 __ LoadFromNumberDictionary(slow, scratch3, key, result, scratch1, scratch2,
584 slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5); 544 scratch4, scratch5);
585 __ Ret(); 545 __ Ret();
586 } 546 }
587 547
588 static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, 548 static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
589 Register key, 549 Register receiver, Register scratch1,
590 Register receiver, 550 Register scratch2, Register scratch3,
591 Register scratch1, 551 Register scratch4, Register scratch5,
592 Register scratch2, 552 Label* slow) {
593 Register scratch3, 553 DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
594 Register scratch4, 554 scratch5));
595 Register scratch5,
596 Label *slow) {
597 DCHECK(!AreAliased(
598 key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
599 555
600 Isolate* isolate = masm->isolate(); 556 Isolate* isolate = masm->isolate();
601 Label probe_dictionary, property_array_property; 557 Label probe_dictionary, property_array_property;
602 // If we can load the value, it should be returned in x0. 558 // If we can load the value, it should be returned in x0.
603 Register result = x0; 559 Register result = x0;
604 560
605 GenerateKeyedLoadReceiverCheck( 561 GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
606 masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow); 562 Map::kHasNamedInterceptor, slow);
607 563
608 // If the receiver is a fast-case object, check the keyed lookup cache. 564 // If the receiver is a fast-case object, check the keyed lookup cache.
609 // Otherwise probe the dictionary. 565 // Otherwise probe the dictionary.
610 __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); 566 __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
611 __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset)); 567 __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
612 __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary); 568 __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
613 569
614 // We keep the map of the receiver in scratch1. 570 // We keep the map of the receiver in scratch1.
615 Register receiver_map = scratch1; 571 Register receiver_map = scratch1;
616 572
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
671 __ Subs(scratch4, scratch4, scratch5); 627 __ Subs(scratch4, scratch4, scratch5);
672 __ B(ge, &property_array_property); 628 __ B(ge, &property_array_property);
673 if (i != 0) { 629 if (i != 0) {
674 __ B(&load_in_object_property); 630 __ B(&load_in_object_property);
675 } 631 }
676 } 632 }
677 633
678 // Load in-object property. 634 // Load in-object property.
679 __ Bind(&load_in_object_property); 635 __ Bind(&load_in_object_property);
680 __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset)); 636 __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
681 __ Add(scratch5, scratch5, scratch4); // Index from start of object. 637 __ Add(scratch5, scratch5, scratch4); // Index from start of object.
682 __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag. 638 __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag.
683 __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2)); 639 __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
684 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 640 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
685 1, scratch1, scratch2); 641 scratch1, scratch2);
686 __ Ret(); 642 __ Ret();
687 643
688 // Load property array property. 644 // Load property array property.
689 __ Bind(&property_array_property); 645 __ Bind(&property_array_property);
690 __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); 646 __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
691 __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag); 647 __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
692 __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2)); 648 __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
693 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 649 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
694 1, scratch1, scratch2); 650 scratch1, scratch2);
695 __ Ret(); 651 __ Ret();
696 652
697 // Do a quick inline probe of the receiver's dictionary, if it exists. 653 // Do a quick inline probe of the receiver's dictionary, if it exists.
698 __ Bind(&probe_dictionary); 654 __ Bind(&probe_dictionary);
699 __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); 655 __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
700 __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); 656 __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
701 GenerateGlobalInstanceTypeCheck(masm, scratch1, slow); 657 GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
702 // Load the property. 658 // Load the property.
703 GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3); 659 GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
704 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 660 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1,
705 1, scratch1, scratch2); 661 scratch1, scratch2);
706 __ Ret(); 662 __ Ret();
707 } 663 }
708 664
709 665
710 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { 666 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
711 // The return address is in lr. 667 // The return address is in lr.
712 Label slow, check_name, index_smi, index_name; 668 Label slow, check_name, index_smi, index_name;
713 669
714 Register key = NameRegister(); 670 Register key = NameRegister();
715 Register receiver = ReceiverRegister(); 671 Register receiver = ReceiverRegister();
716 DCHECK(key.is(x2)); 672 DCHECK(key.is(x2));
717 DCHECK(receiver.is(x1)); 673 DCHECK(receiver.is(x1));
718 674
719 __ JumpIfNotSmi(key, &check_name); 675 __ JumpIfNotSmi(key, &check_name);
720 __ Bind(&index_smi); 676 __ Bind(&index_smi);
721 // Now the key is known to be a smi. This place is also jumped to from below 677 // Now the key is known to be a smi. This place is also jumped to from below
722 // where a numeric string is converted to a smi. 678 // where a numeric string is converted to a smi.
723 GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow); 679 GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
724 680
725 // Slow case. 681 // Slow case.
726 __ Bind(&slow); 682 __ Bind(&slow);
727 __ IncrementCounter( 683 __ IncrementCounter(masm->isolate()->counters()->keyed_load_generic_slow(), 1,
728 masm->isolate()->counters()->keyed_load_generic_slow(), 1, x4, x3); 684 x4, x3);
729 GenerateRuntimeGetProperty(masm); 685 GenerateRuntimeGetProperty(masm);
730 686
731 __ Bind(&check_name); 687 __ Bind(&check_name);
732 GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow); 688 GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
733 689
734 GenerateKeyedLoadWithNameKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow); 690 GenerateKeyedLoadWithNameKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
735 691
736 __ Bind(&index_name); 692 __ Bind(&index_name);
737 __ IndexFromHash(x3, key); 693 __ IndexFromHash(x3, key);
738 // Now jump to the place where smi keys are handled. 694 // Now jump to the place where smi keys are handled.
739 __ B(&index_smi); 695 __ B(&index_smi);
740 } 696 }
741 697
742 698
743 void KeyedLoadIC::GenerateString(MacroAssembler* masm) { 699 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
744 // Return address is in lr. 700 // Return address is in lr.
745 Label miss; 701 Label miss;
746 702
747 Register receiver = ReceiverRegister(); 703 Register receiver = ReceiverRegister();
748 Register index = NameRegister(); 704 Register index = NameRegister();
749 Register result = x0; 705 Register result = x0;
750 Register scratch = x3; 706 Register scratch = x3;
751 DCHECK(!scratch.is(receiver) && !scratch.is(index)); 707 DCHECK(!scratch.is(receiver) && !scratch.is(index));
752 708
753 StringCharAtGenerator char_at_generator(receiver, 709 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
754 index,
755 scratch,
756 result,
757 &miss, // When not a string. 710 &miss, // When not a string.
758 &miss, // When not a number. 711 &miss, // When not a number.
759 &miss, // When index out of range. 712 &miss, // When index out of range.
760 STRING_INDEX_IS_ARRAY_INDEX); 713 STRING_INDEX_IS_ARRAY_INDEX);
761 char_at_generator.GenerateFast(masm); 714 char_at_generator.GenerateFast(masm);
762 __ Ret(); 715 __ Ret();
763 716
764 StubRuntimeCallHelper call_helper; 717 StubRuntimeCallHelper call_helper;
765 char_at_generator.GenerateSlow(masm, call_helper); 718 char_at_generator.GenerateSlow(masm, call_helper);
766 719
(...skipping 18 matching lines...) Expand all
785 // Check that the key is an array index, that is Uint32. 738 // Check that the key is an array index, that is Uint32.
786 __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow); 739 __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
787 740
788 // Get the map of the receiver. 741 // Get the map of the receiver.
789 Register map = scratch1; 742 Register map = scratch1;
790 __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); 743 __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
791 744
792 // Check that it has indexed interceptor and access checks 745 // Check that it has indexed interceptor and access checks
793 // are not enabled for this object. 746 // are not enabled for this object.
794 __ Ldrb(scratch2, FieldMemOperand(map, Map::kBitFieldOffset)); 747 __ Ldrb(scratch2, FieldMemOperand(map, Map::kBitFieldOffset));
795 DCHECK(kSlowCaseBitFieldMask == 748 DCHECK(kSlowCaseBitFieldMask == ((1 << Map::kIsAccessCheckNeeded) |
796 ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor))); 749 (1 << Map::kHasIndexedInterceptor)));
797 __ Tbnz(scratch2, Map::kIsAccessCheckNeeded, &slow); 750 __ Tbnz(scratch2, Map::kIsAccessCheckNeeded, &slow);
798 __ Tbz(scratch2, Map::kHasIndexedInterceptor, &slow); 751 __ Tbz(scratch2, Map::kHasIndexedInterceptor, &slow);
799 752
800 // Everything is fine, call runtime. 753 // Everything is fine, call runtime.
801 __ Push(receiver, key); 754 __ Push(receiver, key);
802 __ TailCallExternalReference( 755 __ TailCallExternalReference(
803 ExternalReference(IC_Utility(kLoadElementWithInterceptor), 756 ExternalReference(IC_Utility(kLoadElementWithInterceptor),
804 masm->isolate()), 757 masm->isolate()),
805 2, 1); 758 2, 1);
806 759
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
844 797
845 // Push strict_mode for runtime call. 798 // Push strict_mode for runtime call.
846 __ Mov(x10, Smi::FromInt(strict_mode)); 799 __ Mov(x10, Smi::FromInt(strict_mode));
847 __ Push(x10); 800 __ Push(x10);
848 801
849 __ TailCallRuntime(Runtime::kSetProperty, 4, 1); 802 __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
850 } 803 }
851 804
852 805
853 static void KeyedStoreGenerateGenericHelper( 806 static void KeyedStoreGenerateGenericHelper(
854 MacroAssembler* masm, 807 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
855 Label* fast_object, 808 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
856 Label* fast_double, 809 Register value, Register key, Register receiver, Register receiver_map,
857 Label* slow, 810 Register elements_map, Register elements) {
858 KeyedStoreCheckMap check_map, 811 DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
859 KeyedStoreIncrementLength increment_length, 812 x10, x11));
860 Register value,
861 Register key,
862 Register receiver,
863 Register receiver_map,
864 Register elements_map,
865 Register elements) {
866 DCHECK(!AreAliased(
867 value, key, receiver, receiver_map, elements_map, elements, x10, x11));
868 813
869 Label transition_smi_elements; 814 Label transition_smi_elements;
870 Label transition_double_elements; 815 Label transition_double_elements;
871 Label fast_double_without_map_check; 816 Label fast_double_without_map_check;
872 Label non_double_value; 817 Label non_double_value;
873 Label finish_store; 818 Label finish_store;
874 819
875 __ Bind(fast_object); 820 __ Bind(fast_object);
876 if (check_map == kCheckMap) { 821 if (check_map == kCheckMap) {
877 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); 822 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
(...skipping 29 matching lines...) Expand all
907 Register address = x11; 852 Register address = x11;
908 __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag); 853 __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
909 __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2)); 854 __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
910 __ Str(value, MemOperand(address)); 855 __ Str(value, MemOperand(address));
911 856
912 Label dont_record_write; 857 Label dont_record_write;
913 __ JumpIfSmi(value, &dont_record_write); 858 __ JumpIfSmi(value, &dont_record_write);
914 859
915 // Update write barrier for the elements array address. 860 // Update write barrier for the elements array address.
916 __ Mov(x10, value); // Preserve the value which is returned. 861 __ Mov(x10, value); // Preserve the value which is returned.
917 __ RecordWrite(elements, 862 __ RecordWrite(elements, address, x10, kLRHasNotBeenSaved, kDontSaveFPRegs,
918 address, 863 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
919 x10,
920 kLRHasNotBeenSaved,
921 kDontSaveFPRegs,
922 EMIT_REMEMBERED_SET,
923 OMIT_SMI_CHECK);
924 864
925 __ Bind(&dont_record_write); 865 __ Bind(&dont_record_write);
926 __ Ret(); 866 __ Ret();
927 867
928 868
929 __ Bind(fast_double); 869 __ Bind(fast_double);
930 if (check_map == kCheckMap) { 870 if (check_map == kCheckMap) {
931 // Check for fast double array case. If this fails, call through to the 871 // Check for fast double array case. If this fails, call through to the
932 // runtime. 872 // runtime.
933 __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow); 873 __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
934 } 874 }
935 875
936 // HOLECHECK: guards "A[i] double hole?" 876 // HOLECHECK: guards "A[i] double hole?"
937 // We have to see if the double version of the hole is present. If so go to 877 // We have to see if the double version of the hole is present. If so go to
938 // the runtime. 878 // the runtime.
939 __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag); 879 __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
940 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2)); 880 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
941 __ Ldr(x11, MemOperand(x10)); 881 __ Ldr(x11, MemOperand(x10));
942 __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check); 882 __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
943 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow); 883 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
944 884
945 __ Bind(&fast_double_without_map_check); 885 __ Bind(&fast_double_without_map_check);
946 __ StoreNumberToDoubleElements(value, 886 __ StoreNumberToDoubleElements(value, key, elements, x10, d0,
947 key,
948 elements,
949 x10,
950 d0,
951 &transition_double_elements); 887 &transition_double_elements);
952 if (increment_length == kIncrementLength) { 888 if (increment_length == kIncrementLength) {
953 // Add 1 to receiver->length. 889 // Add 1 to receiver->length.
954 __ Add(x10, key, Smi::FromInt(1)); 890 __ Add(x10, key, Smi::FromInt(1));
955 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset)); 891 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
956 } 892 }
957 __ Ret(); 893 __ Ret();
958 894
959 895
960 __ Bind(&transition_smi_elements); 896 __ Bind(&transition_smi_elements);
961 // Transition the array appropriately depending on the value type. 897 // Transition the array appropriately depending on the value type.
962 __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset)); 898 __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
963 __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value); 899 __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
964 900
965 // Value is a double. Transition FAST_SMI_ELEMENTS -> 901 // Value is a double. Transition FAST_SMI_ELEMENTS ->
966 // FAST_DOUBLE_ELEMENTS and complete the store. 902 // FAST_DOUBLE_ELEMENTS and complete the store.
967 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, 903 __ LoadTransitionedArrayMapConditional(
968 FAST_DOUBLE_ELEMENTS, 904 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, x10, x11, slow);
969 receiver_map, 905 AllocationSiteMode mode =
970 x10, 906 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
971 x11, 907 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
972 slow); 908 receiver_map, mode, slow);
973 AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
974 FAST_DOUBLE_ELEMENTS);
975 ElementsTransitionGenerator::GenerateSmiToDouble(
976 masm, receiver, key, value, receiver_map, mode, slow);
977 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); 909 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
978 __ B(&fast_double_without_map_check); 910 __ B(&fast_double_without_map_check);
979 911
980 __ Bind(&non_double_value); 912 __ Bind(&non_double_value);
981 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS. 913 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
982 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, 914 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
983 FAST_ELEMENTS, 915 receiver_map, x10, x11, slow);
984 receiver_map,
985 x10,
986 x11,
987 slow);
988 916
989 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); 917 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
990 ElementsTransitionGenerator::GenerateMapChangeElementsTransition( 918 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
991 masm, receiver, key, value, receiver_map, mode, slow); 919 masm, receiver, key, value, receiver_map, mode, slow);
992 920
993 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); 921 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
994 __ B(&finish_store); 922 __ B(&finish_store);
995 923
996 __ Bind(&transition_double_elements); 924 __ Bind(&transition_double_elements);
997 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a 925 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
998 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and 926 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
999 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS 927 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
1000 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, 928 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
1001 FAST_ELEMENTS, 929 receiver_map, x10, x11, slow);
1002 receiver_map,
1003 x10,
1004 x11,
1005 slow);
1006 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); 930 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
1007 ElementsTransitionGenerator::GenerateDoubleToObject( 931 ElementsTransitionGenerator::GenerateDoubleToObject(
1008 masm, receiver, key, value, receiver_map, mode, slow); 932 masm, receiver, key, value, receiver_map, mode, slow);
1009 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); 933 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1010 __ B(&finish_store); 934 __ B(&finish_store);
1011 } 935 }
1012 936
1013 937
1014 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, 938 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
1015 StrictMode strict_mode) { 939 StrictMode strict_mode) {
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
1094 // is the length is always a smi. 1018 // is the length is always a smi.
1095 1019
1096 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); 1020 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1097 1021
1098 // Check the key against the length in the array. 1022 // Check the key against the length in the array.
1099 __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset)); 1023 __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
1100 __ Cmp(x10, Operand::UntagSmi(key)); 1024 __ Cmp(x10, Operand::UntagSmi(key));
1101 __ B(eq, &extra); // We can handle the case where we are appending 1 element. 1025 __ B(eq, &extra); // We can handle the case where we are appending 1 element.
1102 __ B(lo, &slow); 1026 __ B(lo, &slow);
1103 1027
1104 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, 1028 KeyedStoreGenerateGenericHelper(
1105 &slow, kCheckMap, kDontIncrementLength, 1029 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
1106 value, key, receiver, receiver_map, 1030 value, key, receiver, receiver_map, elements_map, elements);
1107 elements_map, elements);
1108 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, 1031 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
1109 &slow, kDontCheckMap, kIncrementLength, 1032 &slow, kDontCheckMap, kIncrementLength, value,
1110 value, key, receiver, receiver_map, 1033 key, receiver, receiver_map, elements_map,
1111 elements_map, elements); 1034 elements);
1112 } 1035 }
1113 1036
1114 1037
1115 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { 1038 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
1116 Register receiver = ReceiverRegister(); 1039 Register receiver = ReceiverRegister();
1117 Register name = NameRegister(); 1040 Register name = NameRegister();
1118 DCHECK(!AreAliased(receiver, name, ValueRegister(), x3, x4, x5, x6)); 1041 DCHECK(!AreAliased(receiver, name, ValueRegister(), x3, x4, x5, x6));
1119 1042
1120 // Probe the stub cache. 1043 // Probe the stub cache.
1121 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( 1044 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
1122 Code::ComputeHandlerFlags(Code::STORE_IC)); 1045 Code::ComputeHandlerFlags(Code::STORE_IC));
1123 masm->isolate()->stub_cache()->GenerateProbe( 1046 masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, x3,
1124 masm, flags, receiver, name, x3, x4, x5, x6); 1047 x4, x5, x6);
1125 1048
1126 // Cache miss: Jump to runtime. 1049 // Cache miss: Jump to runtime.
1127 GenerateMiss(masm); 1050 GenerateMiss(masm);
1128 } 1051 }
1129 1052
1130 1053
1131 void StoreIC::GenerateMiss(MacroAssembler* masm) { 1054 void StoreIC::GenerateMiss(MacroAssembler* masm) {
1132 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); 1055 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
1133 1056
1134 // Tail call to the entry. 1057 // Tail call to the entry.
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
1208 return ge; 1131 return ge;
1209 default: 1132 default:
1210 UNREACHABLE(); 1133 UNREACHABLE();
1211 return al; 1134 return al;
1212 } 1135 }
1213 } 1136 }
1214 1137
1215 1138
1216 bool CompareIC::HasInlinedSmiCode(Address address) { 1139 bool CompareIC::HasInlinedSmiCode(Address address) {
1217 // The address of the instruction following the call. 1140 // The address of the instruction following the call.
1218 Address info_address = 1141 Address info_address = Assembler::return_address_from_call_start(address);
1219 Assembler::return_address_from_call_start(address);
1220 1142
1221 InstructionSequence* patch_info = InstructionSequence::At(info_address); 1143 InstructionSequence* patch_info = InstructionSequence::At(info_address);
1222 return patch_info->IsInlineData(); 1144 return patch_info->IsInlineData();
1223 } 1145 }
1224 1146
1225 1147
1226 // Activate a SMI fast-path by patching the instructions generated by 1148 // Activate a SMI fast-path by patching the instructions generated by
1227 // JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by 1149 // JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
1228 // JumpPatchSite::EmitPatchInfo(). 1150 // JumpPatchSite::EmitPatchInfo().
1229 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { 1151 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
1230 // The patch information is encoded in the instruction stream using 1152 // The patch information is encoded in the instruction stream using
1231 // instructions which have no side effects, so we can safely execute them. 1153 // instructions which have no side effects, so we can safely execute them.
1232 // The patch information is encoded directly after the call to the helper 1154 // The patch information is encoded directly after the call to the helper
1233 // function which is requesting this patch operation. 1155 // function which is requesting this patch operation.
1234 Address info_address = 1156 Address info_address = Assembler::return_address_from_call_start(address);
1235 Assembler::return_address_from_call_start(address);
1236 InlineSmiCheckInfo info(info_address); 1157 InlineSmiCheckInfo info(info_address);
1237 1158
1238 // Check and decode the patch information instruction. 1159 // Check and decode the patch information instruction.
1239 if (!info.HasSmiCheck()) { 1160 if (!info.HasSmiCheck()) {
1240 return; 1161 return;
1241 } 1162 }
1242 1163
1243 if (FLAG_trace_ic) { 1164 if (FLAG_trace_ic) {
1244 PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n", 1165 PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n", address,
1245 address, info_address, reinterpret_cast<void*>(info.SmiCheck())); 1166 info_address, reinterpret_cast<void*>(info.SmiCheck()));
1246 } 1167 }
1247 1168
1248 // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi() 1169 // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
1249 // and JumpPatchSite::EmitJumpIfSmi(). 1170 // and JumpPatchSite::EmitJumpIfSmi().
1250 // Changing 1171 // Changing
1251 // tb(n)z xzr, #0, <target> 1172 // tb(n)z xzr, #0, <target>
1252 // to 1173 // to
1253 // tb(!n)z test_reg, #0, <target> 1174 // tb(!n)z test_reg, #0, <target>
1254 Instruction* to_patch = info.SmiCheck(); 1175 Instruction* to_patch = info.SmiCheck();
1255 PatchingAssembler patcher(to_patch, 1); 1176 PatchingAssembler patcher(to_patch, 1);
(...skipping 17 matching lines...) Expand all
1273 1194
1274 if (to_patch->Mask(TestBranchMask) == TBZ) { 1195 if (to_patch->Mask(TestBranchMask) == TBZ) {
1275 // This is JumpIfNotSmi(smi_reg, branch_imm). 1196 // This is JumpIfNotSmi(smi_reg, branch_imm).
1276 patcher.tbnz(smi_reg, 0, branch_imm); 1197 patcher.tbnz(smi_reg, 0, branch_imm);
1277 } else { 1198 } else {
1278 DCHECK(to_patch->Mask(TestBranchMask) == TBNZ); 1199 DCHECK(to_patch->Mask(TestBranchMask) == TBNZ);
1279 // This is JumpIfSmi(smi_reg, branch_imm). 1200 // This is JumpIfSmi(smi_reg, branch_imm).
1280 patcher.tbz(smi_reg, 0, branch_imm); 1201 patcher.tbz(smi_reg, 0, branch_imm);
1281 } 1202 }
1282 } 1203 }
1283 1204 }
1284 1205 } // namespace v8::internal
1285 } } // namespace v8::internal
1286 1206
1287 #endif // V8_TARGET_ARCH_ARM64 1207 #endif // V8_TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « src/ic/arm/stub-cache-arm.cc ('k') | src/ic/arm64/ic-compiler-arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698