Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(615)

Side by Side Diff: src/ic/mips/ic-mips.cc

Issue 496393002: MIPS: Move IC code into a subdir and move ic-compilation related code from stub-cache into ic-compi… (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Fix formatting Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ic/mips/ic-compiler-mips.cc ('k') | src/ic/mips/stub-cache-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 5
6
7 #include "src/v8.h" 6 #include "src/v8.h"
8 7
9 #if V8_TARGET_ARCH_MIPS 8 #if V8_TARGET_ARCH_MIPS
10 9
11 #include "src/code-stubs.h"
12 #include "src/codegen.h" 10 #include "src/codegen.h"
13 #include "src/ic-inl.h" 11 #include "src/ic/ic.h"
14 #include "src/runtime.h" 12 #include "src/ic/stub-cache.h"
15 #include "src/stub-cache.h"
16 13
17 namespace v8 { 14 namespace v8 {
18 namespace internal { 15 namespace internal {
19 16
20 17
21 // ---------------------------------------------------------------------------- 18 // ----------------------------------------------------------------------------
22 // Static IC stub generators. 19 // Static IC stub generators.
23 // 20 //
24 21
25 #define __ ACCESS_MASM(masm) 22 #define __ ACCESS_MASM(masm)
26 23
27 24
28 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, 25 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
29 Register type,
30 Label* global_object) { 26 Label* global_object) {
31 // Register usage: 27 // Register usage:
32 // type: holds the receiver instance type on entry. 28 // type: holds the receiver instance type on entry.
33 __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE)); 29 __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
34 __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE)); 30 __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
35 __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE)); 31 __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
36 } 32 }
37 33
38 34
39 // Helper function used from LoadIC GenerateNormal. 35 // Helper function used from LoadIC GenerateNormal.
40 // 36 //
41 // elements: Property dictionary. It is not clobbered if a jump to the miss 37 // elements: Property dictionary. It is not clobbered if a jump to the miss
42 // label is done. 38 // label is done.
43 // name: Property name. It is not clobbered if a jump to the miss label is 39 // name: Property name. It is not clobbered if a jump to the miss label is
44 // done 40 // done
45 // result: Register for the result. It is only updated if a jump to the miss 41 // result: Register for the result. It is only updated if a jump to the miss
46 // label is not done. Can be the same as elements or name clobbering 42 // label is not done. Can be the same as elements or name clobbering
47 // one of these in the case of not jumping to the miss label. 43 // one of these in the case of not jumping to the miss label.
48 // The two scratch registers need to be different from elements, name and 44 // The two scratch registers need to be different from elements, name and
49 // result. 45 // result.
50 // The generated code assumes that the receiver has slow properties, 46 // The generated code assumes that the receiver has slow properties,
51 // is not a global object and does not have interceptors. 47 // is not a global object and does not have interceptors.
52 // The address returned from GenerateStringDictionaryProbes() in scratch2 48 // The address returned from GenerateStringDictionaryProbes() in scratch2
53 // is used. 49 // is used.
54 static void GenerateDictionaryLoad(MacroAssembler* masm, 50 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
55 Label* miss, 51 Register elements, Register name,
56 Register elements, 52 Register result, Register scratch1,
57 Register name,
58 Register result,
59 Register scratch1,
60 Register scratch2) { 53 Register scratch2) {
61 // Main use of the scratch registers. 54 // Main use of the scratch registers.
62 // scratch1: Used as temporary and to hold the capacity of the property 55 // scratch1: Used as temporary and to hold the capacity of the property
63 // dictionary. 56 // dictionary.
64 // scratch2: Used as temporary. 57 // scratch2: Used as temporary.
65 Label done; 58 Label done;
66 59
67 // Probe the dictionary. 60 // Probe the dictionary.
68 NameDictionaryLookupStub::GeneratePositiveLookup(masm, 61 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
69 miss, 62 name, scratch1, scratch2);
70 &done,
71 elements,
72 name,
73 scratch1,
74 scratch2);
75 63
76 // If probing finds an entry check that the value is a normal 64 // If probing finds an entry check that the value is a normal
77 // property. 65 // property.
78 __ bind(&done); // scratch2 == elements + 4 * index. 66 __ bind(&done); // scratch2 == elements + 4 * index.
79 const int kElementsStartOffset = NameDictionary::kHeaderSize + 67 const int kElementsStartOffset =
68 NameDictionary::kHeaderSize +
80 NameDictionary::kElementsStartIndex * kPointerSize; 69 NameDictionary::kElementsStartIndex * kPointerSize;
81 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; 70 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
82 __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); 71 __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
83 __ And(at, 72 __ And(at, scratch1,
84 scratch1,
85 Operand(PropertyDetails::TypeField::kMask << kSmiTagSize)); 73 Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
86 __ Branch(miss, ne, at, Operand(zero_reg)); 74 __ Branch(miss, ne, at, Operand(zero_reg));
87 75
88 // Get the value at the masked, scaled index and return. 76 // Get the value at the masked, scaled index and return.
89 __ lw(result, 77 __ lw(result,
90 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); 78 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
91 } 79 }
92 80
93 81
94 // Helper function used from StoreIC::GenerateNormal. 82 // Helper function used from StoreIC::GenerateNormal.
95 // 83 //
96 // elements: Property dictionary. It is not clobbered if a jump to the miss 84 // elements: Property dictionary. It is not clobbered if a jump to the miss
97 // label is done. 85 // label is done.
98 // name: Property name. It is not clobbered if a jump to the miss label is 86 // name: Property name. It is not clobbered if a jump to the miss label is
99 // done 87 // done
100 // value: The value to store. 88 // value: The value to store.
101 // The two scratch registers need to be different from elements, name and 89 // The two scratch registers need to be different from elements, name and
102 // result. 90 // result.
103 // The generated code assumes that the receiver has slow properties, 91 // The generated code assumes that the receiver has slow properties,
104 // is not a global object and does not have interceptors. 92 // is not a global object and does not have interceptors.
105 // The address returned from GenerateStringDictionaryProbes() in scratch2 93 // The address returned from GenerateStringDictionaryProbes() in scratch2
106 // is used. 94 // is used.
107 static void GenerateDictionaryStore(MacroAssembler* masm, 95 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
108 Label* miss, 96 Register elements, Register name,
109 Register elements, 97 Register value, Register scratch1,
110 Register name,
111 Register value,
112 Register scratch1,
113 Register scratch2) { 98 Register scratch2) {
114 // Main use of the scratch registers. 99 // Main use of the scratch registers.
115 // scratch1: Used as temporary and to hold the capacity of the property 100 // scratch1: Used as temporary and to hold the capacity of the property
116 // dictionary. 101 // dictionary.
117 // scratch2: Used as temporary. 102 // scratch2: Used as temporary.
118 Label done; 103 Label done;
119 104
120 // Probe the dictionary. 105 // Probe the dictionary.
121 NameDictionaryLookupStub::GeneratePositiveLookup(masm, 106 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
122 miss, 107 name, scratch1, scratch2);
123 &done,
124 elements,
125 name,
126 scratch1,
127 scratch2);
128 108
129 // If probing finds an entry in the dictionary check that the value 109 // If probing finds an entry in the dictionary check that the value
130 // is a normal property that is not read only. 110 // is a normal property that is not read only.
131 __ bind(&done); // scratch2 == elements + 4 * index. 111 __ bind(&done); // scratch2 == elements + 4 * index.
132 const int kElementsStartOffset = NameDictionary::kHeaderSize + 112 const int kElementsStartOffset =
113 NameDictionary::kHeaderSize +
133 NameDictionary::kElementsStartIndex * kPointerSize; 114 NameDictionary::kElementsStartIndex * kPointerSize;
134 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; 115 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
135 const int kTypeAndReadOnlyMask = 116 const int kTypeAndReadOnlyMask =
136 (PropertyDetails::TypeField::kMask | 117 (PropertyDetails::TypeField::kMask |
137 PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize; 118 PropertyDetails::AttributesField::encode(READ_ONLY))
119 << kSmiTagSize;
138 __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); 120 __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
139 __ And(at, scratch1, Operand(kTypeAndReadOnlyMask)); 121 __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
140 __ Branch(miss, ne, at, Operand(zero_reg)); 122 __ Branch(miss, ne, at, Operand(zero_reg));
141 123
142 // Store the value at the masked, scaled index and return. 124 // Store the value at the masked, scaled index and return.
143 const int kValueOffset = kElementsStartOffset + kPointerSize; 125 const int kValueOffset = kElementsStartOffset + kPointerSize;
144 __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag)); 126 __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
145 __ sw(value, MemOperand(scratch2)); 127 __ sw(value, MemOperand(scratch2));
146 128
147 // Update the write barrier. Make sure not to clobber the value. 129 // Update the write barrier. Make sure not to clobber the value.
148 __ mov(scratch1, value); 130 __ mov(scratch1, value);
149 __ RecordWrite( 131 __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
150 elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs); 132 kDontSaveFPRegs);
151 } 133 }
152 134
153 135
154 // Checks the receiver for special cases (value type, slow case bits). 136 // Checks the receiver for special cases (value type, slow case bits).
155 // Falls through for regular JS object. 137 // Falls through for regular JS object.
156 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, 138 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
157 Register receiver, 139 Register receiver, Register map,
158 Register map,
159 Register scratch, 140 Register scratch,
160 int interceptor_bit, 141 int interceptor_bit, Label* slow) {
161 Label* slow) {
162 // Check that the object isn't a smi. 142 // Check that the object isn't a smi.
163 __ JumpIfSmi(receiver, slow); 143 __ JumpIfSmi(receiver, slow);
164 // Get the map of the receiver. 144 // Get the map of the receiver.
165 __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); 145 __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
166 // Check bit field. 146 // Check bit field.
167 __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); 147 __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
168 __ And(at, scratch, 148 __ And(at, scratch,
169 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); 149 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
170 __ Branch(slow, ne, at, Operand(zero_reg)); 150 __ Branch(slow, ne, at, Operand(zero_reg));
171 // Check that the object is some kind of JS object EXCEPT JS Value type. 151 // Check that the object is some kind of JS object EXCEPT JS Value type.
172 // In the case that the object is a value-wrapper object, 152 // In the case that the object is a value-wrapper object,
173 // we enter the runtime system to make sure that indexing into string 153 // we enter the runtime system to make sure that indexing into string
174 // objects work as intended. 154 // objects work as intended.
175 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); 155 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
176 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); 156 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
177 __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE)); 157 __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
178 } 158 }
179 159
180 160
181 // Loads an indexed element from a fast case array. 161 // Loads an indexed element from a fast case array.
182 // If not_fast_array is NULL, doesn't perform the elements map check. 162 // If not_fast_array is NULL, doesn't perform the elements map check.
183 static void GenerateFastArrayLoad(MacroAssembler* masm, 163 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
184 Register receiver, 164 Register key, Register elements,
185 Register key, 165 Register scratch1, Register scratch2,
186 Register elements, 166 Register result, Label* not_fast_array,
187 Register scratch1,
188 Register scratch2,
189 Register result,
190 Label* not_fast_array,
191 Label* out_of_range) { 167 Label* out_of_range) {
192 // Register use: 168 // Register use:
193 // 169 //
194 // receiver - holds the receiver on entry. 170 // receiver - holds the receiver on entry.
195 // Unchanged unless 'result' is the same register. 171 // Unchanged unless 'result' is the same register.
196 // 172 //
197 // key - holds the smi key on entry. 173 // key - holds the smi key on entry.
198 // Unchanged unless 'result' is the same register. 174 // Unchanged unless 'result' is the same register.
199 // 175 //
200 // elements - holds the elements of the receiver on exit. 176 // elements - holds the elements of the receiver on exit.
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
237 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 213 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
238 // In case the loaded value is the_hole we have to consult GetProperty 214 // In case the loaded value is the_hole we have to consult GetProperty
239 // to ensure the prototype chain is searched. 215 // to ensure the prototype chain is searched.
240 __ Branch(out_of_range, eq, scratch2, Operand(at)); 216 __ Branch(out_of_range, eq, scratch2, Operand(at));
241 __ mov(result, scratch2); 217 __ mov(result, scratch2);
242 } 218 }
243 219
244 220
245 // Checks whether a key is an array index string or a unique name. 221 // Checks whether a key is an array index string or a unique name.
246 // Falls through if a key is a unique name. 222 // Falls through if a key is a unique name.
247 static void GenerateKeyNameCheck(MacroAssembler* masm, 223 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
248 Register key, 224 Register map, Register hash,
249 Register map, 225 Label* index_string, Label* not_unique) {
250 Register hash,
251 Label* index_string,
252 Label* not_unique) {
253 // The key is not a smi. 226 // The key is not a smi.
254 Label unique; 227 Label unique;
255 // Is it a name? 228 // Is it a name?
256 __ GetObjectType(key, map, hash); 229 __ GetObjectType(key, map, hash);
257 __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE)); 230 __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
258 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); 231 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
259 __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE)); 232 __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
260 233
261 // Is the string an array index, with cached numeric value? 234 // Is the string an array index, with cached numeric value?
262 __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset)); 235 __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
(...skipping 15 matching lines...) Expand all
278 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { 251 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
279 // The return address is in lr. 252 // The return address is in lr.
280 Register receiver = ReceiverRegister(); 253 Register receiver = ReceiverRegister();
281 Register name = NameRegister(); 254 Register name = NameRegister();
282 DCHECK(receiver.is(a1)); 255 DCHECK(receiver.is(a1));
283 DCHECK(name.is(a2)); 256 DCHECK(name.is(a2));
284 257
285 // Probe the stub cache. 258 // Probe the stub cache.
286 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( 259 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
287 Code::ComputeHandlerFlags(Code::LOAD_IC)); 260 Code::ComputeHandlerFlags(Code::LOAD_IC));
288 masm->isolate()->stub_cache()->GenerateProbe( 261 masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, a3,
289 masm, flags, receiver, name, a3, t0, t1, t2); 262 t0, t1, t2);
290 263
291 // Cache miss: Jump to runtime. 264 // Cache miss: Jump to runtime.
292 GenerateMiss(masm); 265 GenerateMiss(masm);
293 } 266 }
294 267
295 268
296 void LoadIC::GenerateNormal(MacroAssembler* masm) { 269 void LoadIC::GenerateNormal(MacroAssembler* masm) {
297 Register dictionary = a0; 270 Register dictionary = a0;
298 DCHECK(!dictionary.is(ReceiverRegister())); 271 DCHECK(!dictionary.is(ReceiverRegister()));
299 DCHECK(!dictionary.is(NameRegister())); 272 DCHECK(!dictionary.is(NameRegister()));
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
333 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { 306 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
334 // The return address is in ra. 307 // The return address is in ra.
335 308
336 __ mov(LoadIC_TempRegister(), ReceiverRegister()); 309 __ mov(LoadIC_TempRegister(), ReceiverRegister());
337 __ Push(LoadIC_TempRegister(), NameRegister()); 310 __ Push(LoadIC_TempRegister(), NameRegister());
338 311
339 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); 312 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
340 } 313 }
341 314
342 315
343 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, 316 static MemOperand GenerateMappedArgumentsLookup(
344 Register object, 317 MacroAssembler* masm, Register object, Register key, Register scratch1,
345 Register key, 318 Register scratch2, Register scratch3, Label* unmapped_case,
346 Register scratch1, 319 Label* slow_case) {
347 Register scratch2,
348 Register scratch3,
349 Label* unmapped_case,
350 Label* slow_case) {
351 Heap* heap = masm->isolate()->heap(); 320 Heap* heap = masm->isolate()->heap();
352 321
353 // Check that the receiver is a JSObject. Because of the map check 322 // Check that the receiver is a JSObject. Because of the map check
354 // later, we do not need to check for interceptors or whether it 323 // later, we do not need to check for interceptors or whether it
355 // requires access checks. 324 // requires access checks.
356 __ JumpIfSmi(object, slow_case); 325 __ JumpIfSmi(object, slow_case);
357 // Check that the object is some kind of JSObject. 326 // Check that the object is some kind of JSObject.
358 __ GetObjectType(object, scratch1, scratch2); 327 __ GetObjectType(object, scratch1, scratch2);
359 __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE)); 328 __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
360 329
361 // Check that the key is a positive smi. 330 // Check that the key is a positive smi.
362 __ And(scratch1, key, Operand(0x80000001)); 331 __ And(scratch1, key, Operand(0x80000001));
363 __ Branch(slow_case, ne, scratch1, Operand(zero_reg)); 332 __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
364 333
365 // Load the elements into scratch1 and check its map. 334 // Load the elements into scratch1 and check its map.
366 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map()); 335 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
367 __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset)); 336 __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
368 __ CheckMap(scratch1, 337 __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
369 scratch2,
370 arguments_map,
371 slow_case,
372 DONT_DO_SMI_CHECK);
373 // Check if element is in the range of mapped arguments. If not, jump 338 // Check if element is in the range of mapped arguments. If not, jump
374 // to the unmapped lookup with the parameter map in scratch1. 339 // to the unmapped lookup with the parameter map in scratch1.
375 __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); 340 __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
376 __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2))); 341 __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
377 __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2)); 342 __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
378 343
379 // Load element index and check whether it is the hole. 344 // Load element index and check whether it is the hole.
380 const int kOffset = 345 const int kOffset =
381 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; 346 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
382 347
(...skipping 23 matching lines...) Expand all
406 Register parameter_map, 371 Register parameter_map,
407 Register scratch, 372 Register scratch,
408 Label* slow_case) { 373 Label* slow_case) {
409 // Element is in arguments backing store, which is referenced by the 374 // Element is in arguments backing store, which is referenced by the
410 // second element of the parameter_map. The parameter_map register 375 // second element of the parameter_map. The parameter_map register
411 // must be loaded with the parameter map of the arguments object and is 376 // must be loaded with the parameter map of the arguments object and is
412 // overwritten. 377 // overwritten.
413 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; 378 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
414 Register backing_store = parameter_map; 379 Register backing_store = parameter_map;
415 __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); 380 __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
416 __ CheckMap(backing_store, 381 __ CheckMap(backing_store, scratch, Heap::kFixedArrayMapRootIndex, slow_case,
417 scratch,
418 Heap::kFixedArrayMapRootIndex,
419 slow_case,
420 DONT_DO_SMI_CHECK); 382 DONT_DO_SMI_CHECK);
421 __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); 383 __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
422 __ Branch(slow_case, Ugreater_equal, key, Operand(scratch)); 384 __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
423 __ li(scratch, Operand(kPointerSize >> 1)); 385 __ li(scratch, Operand(kPointerSize >> 1));
424 __ Mul(scratch, key, scratch); 386 __ Mul(scratch, key, scratch);
425 __ Addu(scratch, 387 __ Addu(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
426 scratch,
427 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
428 __ Addu(scratch, backing_store, scratch); 388 __ Addu(scratch, backing_store, scratch);
429 return MemOperand(scratch); 389 return MemOperand(scratch);
430 } 390 }
431 391
432 392
433 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { 393 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
434 // The return address is in ra. 394 // The return address is in ra.
435 Register receiver = ReceiverRegister(); 395 Register receiver = ReceiverRegister();
436 Register key = NameRegister(); 396 Register key = NameRegister();
437 DCHECK(receiver.is(a1)); 397 DCHECK(receiver.is(a1));
438 DCHECK(key.is(a2)); 398 DCHECK(key.is(a2));
439 399
440 Label slow, notin; 400 Label slow, notin;
441 MemOperand mapped_location = 401 MemOperand mapped_location = GenerateMappedArgumentsLookup(
442 GenerateMappedArgumentsLookup( 402 masm, receiver, key, a0, a3, t0, &notin, &slow);
443 masm, receiver, key, a0, a3, t0, &notin, &slow);
444 __ Ret(USE_DELAY_SLOT); 403 __ Ret(USE_DELAY_SLOT);
445 __ lw(v0, mapped_location); 404 __ lw(v0, mapped_location);
446 __ bind(&notin); 405 __ bind(&notin);
447 // The unmapped lookup expects that the parameter map is in a0. 406 // The unmapped lookup expects that the parameter map is in a0.
448 MemOperand unmapped_location = 407 MemOperand unmapped_location =
449 GenerateUnmappedArgumentsLookup(masm, key, a0, a3, &slow); 408 GenerateUnmappedArgumentsLookup(masm, key, a0, a3, &slow);
450 __ lw(a0, unmapped_location); 409 __ lw(a0, unmapped_location);
451 __ LoadRoot(a3, Heap::kTheHoleValueRootIndex); 410 __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
452 __ Branch(&slow, eq, a0, Operand(a3)); 411 __ Branch(&slow, eq, a0, Operand(a3));
453 __ Ret(USE_DELAY_SLOT); 412 __ Ret(USE_DELAY_SLOT);
454 __ mov(v0, a0); 413 __ mov(v0, a0);
455 __ bind(&slow); 414 __ bind(&slow);
456 GenerateMiss(masm); 415 GenerateMiss(masm);
457 } 416 }
458 417
459 418
460 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { 419 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
461 Register receiver = ReceiverRegister(); 420 Register receiver = ReceiverRegister();
462 Register key = NameRegister(); 421 Register key = NameRegister();
463 Register value = ValueRegister(); 422 Register value = ValueRegister();
464 DCHECK(value.is(a0)); 423 DCHECK(value.is(a0));
465 424
466 Label slow, notin; 425 Label slow, notin;
467 // Store address is returned in register (of MemOperand) mapped_location. 426 // Store address is returned in register (of MemOperand) mapped_location.
468 MemOperand mapped_location = GenerateMappedArgumentsLookup( 427 MemOperand mapped_location = GenerateMappedArgumentsLookup(
469 masm, receiver, key, a3, t0, t1, &notin, &slow); 428 masm, receiver, key, a3, t0, t1, &notin, &slow);
470 __ sw(value, mapped_location); 429 __ sw(value, mapped_location);
471 __ mov(t5, value); 430 __ mov(t5, value);
472 DCHECK_EQ(mapped_location.offset(), 0); 431 DCHECK_EQ(mapped_location.offset(), 0);
473 __ RecordWrite(a3, mapped_location.rm(), t5, 432 __ RecordWrite(a3, mapped_location.rm(), t5, kRAHasNotBeenSaved,
474 kRAHasNotBeenSaved, kDontSaveFPRegs); 433 kDontSaveFPRegs);
475 __ Ret(USE_DELAY_SLOT); 434 __ Ret(USE_DELAY_SLOT);
476 __ mov(v0, value); // (In delay slot) return the value stored in v0. 435 __ mov(v0, value); // (In delay slot) return the value stored in v0.
477 __ bind(&notin); 436 __ bind(&notin);
478 // The unmapped lookup expects that the parameter map is in a3. 437 // The unmapped lookup expects that the parameter map is in a3.
479 // Store address is returned in register (of MemOperand) unmapped_location. 438 // Store address is returned in register (of MemOperand) unmapped_location.
480 MemOperand unmapped_location = 439 MemOperand unmapped_location =
481 GenerateUnmappedArgumentsLookup(masm, key, a3, t0, &slow); 440 GenerateUnmappedArgumentsLookup(masm, key, a3, t0, &slow);
482 __ sw(value, unmapped_location); 441 __ sw(value, unmapped_location);
483 __ mov(t5, value); 442 __ mov(t5, value);
484 DCHECK_EQ(unmapped_location.offset(), 0); 443 DCHECK_EQ(unmapped_location.offset(), 0);
485 __ RecordWrite(a3, unmapped_location.rm(), t5, 444 __ RecordWrite(a3, unmapped_location.rm(), t5, kRAHasNotBeenSaved,
486 kRAHasNotBeenSaved, kDontSaveFPRegs); 445 kDontSaveFPRegs);
487 __ Ret(USE_DELAY_SLOT); 446 __ Ret(USE_DELAY_SLOT);
488 __ mov(v0, a0); // (In delay slot) return the value stored in v0. 447 __ mov(v0, a0); // (In delay slot) return the value stored in v0.
489 __ bind(&slow); 448 __ bind(&slow);
490 GenerateMiss(masm); 449 GenerateMiss(masm);
491 } 450 }
492 451
493 452
494 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { 453 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
495 // The return address is in ra. 454 // The return address is in ra.
496 Isolate* isolate = masm->isolate(); 455 Isolate* isolate = masm->isolate();
(...skipping 25 matching lines...) Expand all
522 DCHECK(FLAG_vector_ics); 481 DCHECK(FLAG_vector_ics);
523 return a3; 482 return a3;
524 } 483 }
525 484
526 485
527 const Register StoreIC::ReceiverRegister() { return a1; } 486 const Register StoreIC::ReceiverRegister() { return a1; }
528 const Register StoreIC::NameRegister() { return a2; } 487 const Register StoreIC::NameRegister() { return a2; }
529 const Register StoreIC::ValueRegister() { return a0; } 488 const Register StoreIC::ValueRegister() { return a0; }
530 489
531 490
532 const Register KeyedStoreIC::MapRegister() { 491 const Register KeyedStoreIC::MapRegister() { return a3; }
533 return a3;
534 }
535 492
536 493
537 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { 494 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
538 // The return address is in ra. 495 // The return address is in ra.
539 496
540 __ Push(ReceiverRegister(), NameRegister()); 497 __ Push(ReceiverRegister(), NameRegister());
541 498
542 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); 499 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
543 } 500 }
544 501
545 502
546 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { 503 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
547 // The return address is in ra. 504 // The return address is in ra.
548 Label slow, check_name, index_smi, index_name, property_array_property; 505 Label slow, check_name, index_smi, index_name, property_array_property;
549 Label probe_dictionary, check_number_dictionary; 506 Label probe_dictionary, check_number_dictionary;
550 507
551 Register key = NameRegister(); 508 Register key = NameRegister();
552 Register receiver = ReceiverRegister(); 509 Register receiver = ReceiverRegister();
553 DCHECK(key.is(a2)); 510 DCHECK(key.is(a2));
554 DCHECK(receiver.is(a1)); 511 DCHECK(receiver.is(a1));
555 512
556 Isolate* isolate = masm->isolate(); 513 Isolate* isolate = masm->isolate();
557 514
558 // Check that the key is a smi. 515 // Check that the key is a smi.
559 __ JumpIfNotSmi(key, &check_name); 516 __ JumpIfNotSmi(key, &check_name);
560 __ bind(&index_smi); 517 __ bind(&index_smi);
561 // Now the key is known to be a smi. This place is also jumped to from below 518 // Now the key is known to be a smi. This place is also jumped to from below
562 // where a numeric string is converted to a smi. 519 // where a numeric string is converted to a smi.
563 520
564 GenerateKeyedLoadReceiverCheck( 521 GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
565 masm, receiver, a0, a3, Map::kHasIndexedInterceptor, &slow); 522 Map::kHasIndexedInterceptor, &slow);
566 523
567 // Check the receiver's map to see if it has fast elements. 524 // Check the receiver's map to see if it has fast elements.
568 __ CheckFastElements(a0, a3, &check_number_dictionary); 525 __ CheckFastElements(a0, a3, &check_number_dictionary);
569 526
570 GenerateFastArrayLoad( 527 GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, NULL, &slow);
571 masm, receiver, key, a0, a3, t0, v0, NULL, &slow);
572 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, t0, a3); 528 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, t0, a3);
573 __ Ret(); 529 __ Ret();
574 530
575 __ bind(&check_number_dictionary); 531 __ bind(&check_number_dictionary);
576 __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset)); 532 __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
577 __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset)); 533 __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
578 534
579 // Check whether the elements is a number dictionary. 535 // Check whether the elements is a number dictionary.
580 // a3: elements map 536 // a3: elements map
581 // t0: elements 537 // t0: elements
582 __ LoadRoot(at, Heap::kHashTableMapRootIndex); 538 __ LoadRoot(at, Heap::kHashTableMapRootIndex);
583 __ Branch(&slow, ne, a3, Operand(at)); 539 __ Branch(&slow, ne, a3, Operand(at));
584 __ sra(a0, key, kSmiTagSize); 540 __ sra(a0, key, kSmiTagSize);
585 __ LoadFromNumberDictionary(&slow, t0, key, v0, a0, a3, t1); 541 __ LoadFromNumberDictionary(&slow, t0, key, v0, a0, a3, t1);
586 __ Ret(); 542 __ Ret();
587 543
588 // Slow case, key and receiver still in a2 and a1. 544 // Slow case, key and receiver still in a2 and a1.
589 __ bind(&slow); 545 __ bind(&slow);
590 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 546 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, t0,
591 1,
592 t0,
593 a3); 547 a3);
594 GenerateRuntimeGetProperty(masm); 548 GenerateRuntimeGetProperty(masm);
595 549
596 __ bind(&check_name); 550 __ bind(&check_name);
597 GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow); 551 GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
598 552
599 GenerateKeyedLoadReceiverCheck( 553 GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
600 masm, receiver, a0, a3, Map::kHasNamedInterceptor, &slow); 554 Map::kHasNamedInterceptor, &slow);
601 555
602 556
603 // If the receiver is a fast-case object, check the keyed lookup 557 // If the receiver is a fast-case object, check the keyed lookup
604 // cache. Otherwise probe the dictionary. 558 // cache. Otherwise probe the dictionary.
605 __ lw(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); 559 __ lw(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
606 __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset)); 560 __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
607 __ LoadRoot(at, Heap::kHashTableMapRootIndex); 561 __ LoadRoot(at, Heap::kHashTableMapRootIndex);
608 __ Branch(&probe_dictionary, eq, t0, Operand(at)); 562 __ Branch(&probe_dictionary, eq, t0, Operand(at));
609 563
610 // Load the map of the receiver, compute the keyed lookup cache hash 564 // Load the map of the receiver, compute the keyed lookup cache hash
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
664 } 618 }
665 619
666 // Load in-object property. 620 // Load in-object property.
667 __ bind(&load_in_object_property); 621 __ bind(&load_in_object_property);
668 __ lbu(t2, FieldMemOperand(a0, Map::kInstanceSizeOffset)); 622 __ lbu(t2, FieldMemOperand(a0, Map::kInstanceSizeOffset));
669 __ addu(t2, t2, t1); // Index from start of object. 623 __ addu(t2, t2, t1); // Index from start of object.
670 __ Subu(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag. 624 __ Subu(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag.
671 __ sll(at, t2, kPointerSizeLog2); 625 __ sll(at, t2, kPointerSizeLog2);
672 __ addu(at, receiver, at); 626 __ addu(at, receiver, at);
673 __ lw(v0, MemOperand(at)); 627 __ lw(v0, MemOperand(at));
674 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 628 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
675 1, 629 t0, a3);
676 t0,
677 a3);
678 __ Ret(); 630 __ Ret();
679 631
680 // Load property array property. 632 // Load property array property.
681 __ bind(&property_array_property); 633 __ bind(&property_array_property);
682 __ lw(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); 634 __ lw(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
683 __ Addu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag); 635 __ Addu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag);
684 __ sll(v0, t1, kPointerSizeLog2); 636 __ sll(v0, t1, kPointerSizeLog2);
685 __ Addu(v0, v0, receiver); 637 __ Addu(v0, v0, receiver);
686 __ lw(v0, MemOperand(v0)); 638 __ lw(v0, MemOperand(v0));
687 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 639 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
688 1, 640 t0, a3);
689 t0,
690 a3);
691 __ Ret(); 641 __ Ret();
692 642
693 643
694 // Do a quick inline probe of the receiver's dictionary, if it 644 // Do a quick inline probe of the receiver's dictionary, if it
695 // exists. 645 // exists.
696 __ bind(&probe_dictionary); 646 __ bind(&probe_dictionary);
697 // a3: elements 647 // a3: elements
698 __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset)); 648 __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
699 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); 649 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
700 GenerateGlobalInstanceTypeCheck(masm, a0, &slow); 650 GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
701 // Load the property to v0. 651 // Load the property to v0.
702 GenerateDictionaryLoad(masm, &slow, a3, key, v0, t1, t0); 652 GenerateDictionaryLoad(masm, &slow, a3, key, v0, t1, t0);
703 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 653 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, t0,
704 1,
705 t0,
706 a3); 654 a3);
707 __ Ret(); 655 __ Ret();
708 656
709 __ bind(&index_name); 657 __ bind(&index_name);
710 __ IndexFromHash(a3, key); 658 __ IndexFromHash(a3, key);
711 // Now jump to the place where smi keys are handled. 659 // Now jump to the place where smi keys are handled.
712 __ Branch(&index_smi); 660 __ Branch(&index_smi);
713 } 661 }
714 662
715 663
716 void KeyedLoadIC::GenerateString(MacroAssembler* masm) { 664 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
717 // Return address is in ra. 665 // Return address is in ra.
718 Label miss; 666 Label miss;
719 667
720 Register receiver = ReceiverRegister(); 668 Register receiver = ReceiverRegister();
721 Register index = NameRegister(); 669 Register index = NameRegister();
722 Register scratch = a3; 670 Register scratch = a3;
723 Register result = v0; 671 Register result = v0;
724 DCHECK(!scratch.is(receiver) && !scratch.is(index)); 672 DCHECK(!scratch.is(receiver) && !scratch.is(index));
725 673
726 StringCharAtGenerator char_at_generator(receiver, 674 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
727 index,
728 scratch,
729 result,
730 &miss, // When not a string. 675 &miss, // When not a string.
731 &miss, // When not a number. 676 &miss, // When not a number.
732 &miss, // When index out of range. 677 &miss, // When index out of range.
733 STRING_INDEX_IS_ARRAY_INDEX); 678 STRING_INDEX_IS_ARRAY_INDEX);
734 char_at_generator.GenerateFast(masm); 679 char_at_generator.GenerateFast(masm);
735 __ Ret(); 680 __ Ret();
736 681
737 StubRuntimeCallHelper call_helper; 682 StubRuntimeCallHelper call_helper;
738 char_at_generator.GenerateSlow(masm, call_helper); 683 char_at_generator.GenerateSlow(masm, call_helper);
739 684
740 __ bind(&miss); 685 __ bind(&miss);
741 GenerateMiss(masm); 686 GenerateMiss(masm);
742 } 687 }
743 688
744 689
745 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, 690 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
746 StrictMode strict_mode) { 691 StrictMode strict_mode) {
747 // Push receiver, key and value for runtime call. 692 // Push receiver, key and value for runtime call.
748 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); 693 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
749 __ li(a0, Operand(Smi::FromInt(strict_mode))); // Strict mode. 694 __ li(a0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
750 __ Push(a0); 695 __ Push(a0);
751 696
752 __ TailCallRuntime(Runtime::kSetProperty, 4, 1); 697 __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
753 } 698 }
754 699
755 700
756 static void KeyedStoreGenerateGenericHelper( 701 static void KeyedStoreGenerateGenericHelper(
757 MacroAssembler* masm, 702 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
758 Label* fast_object, 703 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
759 Label* fast_double, 704 Register value, Register key, Register receiver, Register receiver_map,
760 Label* slow, 705 Register elements_map, Register elements) {
761 KeyedStoreCheckMap check_map,
762 KeyedStoreIncrementLength increment_length,
763 Register value,
764 Register key,
765 Register receiver,
766 Register receiver_map,
767 Register elements_map,
768 Register elements) {
769 Label transition_smi_elements; 706 Label transition_smi_elements;
770 Label finish_object_store, non_double_value, transition_double_elements; 707 Label finish_object_store, non_double_value, transition_double_elements;
771 Label fast_double_without_map_check; 708 Label fast_double_without_map_check;
772 709
773 // Fast case: Do the store, could be either Object or double. 710 // Fast case: Do the store, could be either Object or double.
774 __ bind(fast_object); 711 __ bind(fast_object);
775 Register scratch_value = t0; 712 Register scratch_value = t0;
776 Register address = t1; 713 Register address = t1;
777 if (check_map == kCheckMap) { 714 if (check_map == kCheckMap) {
778 __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); 715 __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
822 // Add 1 to receiver->length. 759 // Add 1 to receiver->length.
823 __ Addu(scratch_value, key, Operand(Smi::FromInt(1))); 760 __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
824 __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); 761 __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
825 } 762 }
826 __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 763 __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
827 __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize); 764 __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
828 __ Addu(address, address, scratch_value); 765 __ Addu(address, address, scratch_value);
829 __ sw(value, MemOperand(address)); 766 __ sw(value, MemOperand(address));
830 // Update write barrier for the elements array address. 767 // Update write barrier for the elements array address.
831 __ mov(scratch_value, value); // Preserve the value which is returned. 768 __ mov(scratch_value, value); // Preserve the value which is returned.
832 __ RecordWrite(elements, 769 __ RecordWrite(elements, address, scratch_value, kRAHasNotBeenSaved,
833 address, 770 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
834 scratch_value,
835 kRAHasNotBeenSaved,
836 kDontSaveFPRegs,
837 EMIT_REMEMBERED_SET,
838 OMIT_SMI_CHECK);
839 __ Ret(); 771 __ Ret();
840 772
841 __ bind(fast_double); 773 __ bind(fast_double);
842 if (check_map == kCheckMap) { 774 if (check_map == kCheckMap) {
843 // Check for fast double array case. If this fails, call through to the 775 // Check for fast double array case. If this fails, call through to the
844 // runtime. 776 // runtime.
845 __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex); 777 __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
846 __ Branch(slow, ne, elements_map, Operand(at)); 778 __ Branch(slow, ne, elements_map, Operand(at));
847 } 779 }
848 780
849 // HOLECHECK: guards "A[i] double hole?" 781 // HOLECHECK: guards "A[i] double hole?"
850 // We have to see if the double version of the hole is present. If so 782 // We have to see if the double version of the hole is present. If so
851 // go to the runtime. 783 // go to the runtime.
852 __ Addu(address, elements, 784 __ Addu(address, elements, Operand(FixedDoubleArray::kHeaderSize +
853 Operand(FixedDoubleArray::kHeaderSize + kHoleNanUpper32Offset 785 kHoleNanUpper32Offset - kHeapObjectTag));
854 - kHeapObjectTag));
855 __ sll(at, key, kPointerSizeLog2); 786 __ sll(at, key, kPointerSizeLog2);
856 __ addu(address, address, at); 787 __ addu(address, address, at);
857 __ lw(scratch_value, MemOperand(address)); 788 __ lw(scratch_value, MemOperand(address));
858 __ Branch(&fast_double_without_map_check, ne, scratch_value, 789 __ Branch(&fast_double_without_map_check, ne, scratch_value,
859 Operand(kHoleNanUpper32)); 790 Operand(kHoleNanUpper32));
860 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, 791 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
861 slow); 792 slow);
862 793
863 __ bind(&fast_double_without_map_check); 794 __ bind(&fast_double_without_map_check);
864 __ StoreNumberToDoubleElements(value, 795 __ StoreNumberToDoubleElements(value, key,
865 key,
866 elements, // Overwritten. 796 elements, // Overwritten.
867 a3, // Scratch regs... 797 a3, // Scratch regs...
868 t0, 798 t0, t1, &transition_double_elements);
869 t1,
870 &transition_double_elements);
871 if (increment_length == kIncrementLength) { 799 if (increment_length == kIncrementLength) {
872 // Add 1 to receiver->length. 800 // Add 1 to receiver->length.
873 __ Addu(scratch_value, key, Operand(Smi::FromInt(1))); 801 __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
874 __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); 802 __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
875 } 803 }
876 __ Ret(); 804 __ Ret();
877 805
878 __ bind(&transition_smi_elements); 806 __ bind(&transition_smi_elements);
879 // Transition the array appropriately depending on the value type. 807 // Transition the array appropriately depending on the value type.
880 __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset)); 808 __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
881 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); 809 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
882 __ Branch(&non_double_value, ne, t0, Operand(at)); 810 __ Branch(&non_double_value, ne, t0, Operand(at));
883 811
884 // Value is a double. Transition FAST_SMI_ELEMENTS -> 812 // Value is a double. Transition FAST_SMI_ELEMENTS ->
885 // FAST_DOUBLE_ELEMENTS and complete the store. 813 // FAST_DOUBLE_ELEMENTS and complete the store.
886 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, 814 __ LoadTransitionedArrayMapConditional(
887 FAST_DOUBLE_ELEMENTS, 815 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, t0, slow);
888 receiver_map, 816 AllocationSiteMode mode =
889 t0, 817 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
890 slow); 818 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
891 AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, 819 receiver_map, mode, slow);
892 FAST_DOUBLE_ELEMENTS);
893 ElementsTransitionGenerator::GenerateSmiToDouble(
894 masm, receiver, key, value, receiver_map, mode, slow);
895 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); 820 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
896 __ jmp(&fast_double_without_map_check); 821 __ jmp(&fast_double_without_map_check);
897 822
898 __ bind(&non_double_value); 823 __ bind(&non_double_value);
899 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS 824 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
900 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, 825 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
901 FAST_ELEMENTS, 826 receiver_map, t0, slow);
902 receiver_map,
903 t0,
904 slow);
905 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); 827 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
906 ElementsTransitionGenerator::GenerateMapChangeElementsTransition( 828 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
907 masm, receiver, key, value, receiver_map, mode, slow); 829 masm, receiver, key, value, receiver_map, mode, slow);
908 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); 830 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
909 __ jmp(&finish_object_store); 831 __ jmp(&finish_object_store);
910 832
911 __ bind(&transition_double_elements); 833 __ bind(&transition_double_elements);
912 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a 834 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
913 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and 835 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
914 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS 836 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
915 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, 837 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
916 FAST_ELEMENTS, 838 receiver_map, t0, slow);
917 receiver_map,
918 t0,
919 slow);
920 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); 839 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
921 ElementsTransitionGenerator::GenerateDoubleToObject( 840 ElementsTransitionGenerator::GenerateDoubleToObject(
922 masm, receiver, key, value, receiver_map, mode, slow); 841 masm, receiver, key, value, receiver_map, mode, slow);
923 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); 842 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
924 __ jmp(&finish_object_store); 843 __ jmp(&finish_object_store);
925 } 844 }
926 845
927 846
928 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, 847 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
929 StrictMode strict_mode) { 848 StrictMode strict_mode) {
(...skipping 19 matching lines...) Expand all
949 868
950 // Check that the key is a smi. 869 // Check that the key is a smi.
951 __ JumpIfNotSmi(key, &slow); 870 __ JumpIfNotSmi(key, &slow);
952 // Check that the object isn't a smi. 871 // Check that the object isn't a smi.
953 __ JumpIfSmi(receiver, &slow); 872 __ JumpIfSmi(receiver, &slow);
954 // Get the map of the object. 873 // Get the map of the object.
955 __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); 874 __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
956 // Check that the receiver does not require access checks and is not observed. 875 // Check that the receiver does not require access checks and is not observed.
957 // The generic stub does not perform map checks or handle observed objects. 876 // The generic stub does not perform map checks or handle observed objects.
958 __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); 877 __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
959 __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded | 878 __ And(t0, t0,
960 1 << Map::kIsObserved)); 879 Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
961 __ Branch(&slow, ne, t0, Operand(zero_reg)); 880 __ Branch(&slow, ne, t0, Operand(zero_reg));
962 // Check if the object is a JS array or not. 881 // Check if the object is a JS array or not.
963 __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); 882 __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
964 __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE)); 883 __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
965 // Check that the object is some kind of JSObject. 884 // Check that the object is some kind of JSObject.
966 __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE)); 885 __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
967 886
968 // Object case: Check key against length in the elements array. 887 // Object case: Check key against length in the elements array.
969 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); 888 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
970 // Check array bounds. Both the key and the length of FixedArray are smis. 889 // Check array bounds. Both the key and the length of FixedArray are smis.
(...skipping 13 matching lines...) Expand all
984 // element to the array by writing to array[array.length]. 903 // element to the array by writing to array[array.length].
985 __ bind(&extra); 904 __ bind(&extra);
986 // Condition code from comparing key and array length is still available. 905 // Condition code from comparing key and array length is still available.
987 // Only support writing to array[array.length]. 906 // Only support writing to array[array.length].
988 __ Branch(&slow, ne, key, Operand(t0)); 907 __ Branch(&slow, ne, key, Operand(t0));
989 // Check for room in the elements backing store. 908 // Check for room in the elements backing store.
990 // Both the key and the length of FixedArray are smis. 909 // Both the key and the length of FixedArray are smis.
991 __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); 910 __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
992 __ Branch(&slow, hs, key, Operand(t0)); 911 __ Branch(&slow, hs, key, Operand(t0));
993 __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); 912 __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
994 __ Branch( 913 __ Branch(&check_if_double_array, ne, elements_map,
995 &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex); 914 Heap::kFixedArrayMapRootIndex);
996 915
997 __ jmp(&fast_object_grow); 916 __ jmp(&fast_object_grow);
998 917
999 __ bind(&check_if_double_array); 918 __ bind(&check_if_double_array);
1000 __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex); 919 __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
1001 __ jmp(&fast_double_grow); 920 __ jmp(&fast_double_grow);
1002 921
1003 // Array case: Get the length and the elements array from the JS 922 // Array case: Get the length and the elements array from the JS
1004 // array. Check that the array is in fast mode (and writable); if it 923 // array. Check that the array is in fast mode (and writable); if it
1005 // is the length is always a smi. 924 // is the length is always a smi.
1006 __ bind(&array); 925 __ bind(&array);
1007 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); 926 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1008 927
1009 // Check the key against the length in the array. 928 // Check the key against the length in the array.
1010 __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset)); 929 __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1011 __ Branch(&extra, hs, key, Operand(t0)); 930 __ Branch(&extra, hs, key, Operand(t0));
1012 931
1013 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, 932 KeyedStoreGenerateGenericHelper(
1014 &slow, kCheckMap, kDontIncrementLength, 933 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
1015 value, key, receiver, receiver_map, 934 value, key, receiver, receiver_map, elements_map, elements);
1016 elements_map, elements);
1017 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, 935 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
1018 &slow, kDontCheckMap, kIncrementLength, 936 &slow, kDontCheckMap, kIncrementLength, value,
1019 value, key, receiver, receiver_map, 937 key, receiver, receiver_map, elements_map,
1020 elements_map, elements); 938 elements);
1021 } 939 }
1022 940
1023 941
1024 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { 942 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1025 // Return address is in ra. 943 // Return address is in ra.
1026 Label slow; 944 Label slow;
1027 945
1028 Register receiver = ReceiverRegister(); 946 Register receiver = ReceiverRegister();
1029 Register key = NameRegister(); 947 Register key = NameRegister();
1030 Register scratch1 = a3; 948 Register scratch1 = a3;
(...skipping 13 matching lines...) Expand all
1044 962
1045 // Check that it has indexed interceptor and access checks 963 // Check that it has indexed interceptor and access checks
1046 // are not enabled for this object. 964 // are not enabled for this object.
1047 __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); 965 __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
1048 __ And(scratch2, scratch2, Operand(kSlowCaseBitFieldMask)); 966 __ And(scratch2, scratch2, Operand(kSlowCaseBitFieldMask));
1049 __ Branch(&slow, ne, scratch2, Operand(1 << Map::kHasIndexedInterceptor)); 967 __ Branch(&slow, ne, scratch2, Operand(1 << Map::kHasIndexedInterceptor));
1050 // Everything is fine, call runtime. 968 // Everything is fine, call runtime.
1051 __ Push(receiver, key); // Receiver, key. 969 __ Push(receiver, key); // Receiver, key.
1052 970
1053 // Perform tail call to the entry. 971 // Perform tail call to the entry.
1054 __ TailCallExternalReference(ExternalReference( 972 __ TailCallExternalReference(
1055 IC_Utility(kLoadElementWithInterceptor), masm->isolate()), 2, 1); 973 ExternalReference(IC_Utility(kLoadElementWithInterceptor),
974 masm->isolate()),
975 2, 1);
1056 976
1057 __ bind(&slow); 977 __ bind(&slow);
1058 GenerateMiss(masm); 978 GenerateMiss(masm);
1059 } 979 }
1060 980
1061 981
1062 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { 982 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
1063 // Push receiver, key and value for runtime call. 983 // Push receiver, key and value for runtime call.
1064 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); 984 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
1065 985
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
1098 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { 1018 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
1099 Register receiver = ReceiverRegister(); 1019 Register receiver = ReceiverRegister();
1100 Register name = NameRegister(); 1020 Register name = NameRegister();
1101 DCHECK(receiver.is(a1)); 1021 DCHECK(receiver.is(a1));
1102 DCHECK(name.is(a2)); 1022 DCHECK(name.is(a2));
1103 DCHECK(ValueRegister().is(a0)); 1023 DCHECK(ValueRegister().is(a0));
1104 1024
1105 // Get the receiver from the stack and probe the stub cache. 1025 // Get the receiver from the stack and probe the stub cache.
1106 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( 1026 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
1107 Code::ComputeHandlerFlags(Code::STORE_IC)); 1027 Code::ComputeHandlerFlags(Code::STORE_IC));
1108 masm->isolate()->stub_cache()->GenerateProbe( 1028 masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, a3,
1109 masm, flags, receiver, name, a3, t0, t1, t2); 1029 t0, t1, t2);
1110 1030
1111 // Cache miss: Jump to runtime. 1031 // Cache miss: Jump to runtime.
1112 GenerateMiss(masm); 1032 GenerateMiss(masm);
1113 } 1033 }
1114 1034
1115 1035
1116 void StoreIC::GenerateMiss(MacroAssembler* masm) { 1036 void StoreIC::GenerateMiss(MacroAssembler* masm) {
1117 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); 1037 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
1118 // Perform tail call to the entry. 1038 // Perform tail call to the entry.
1119 ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss), 1039 ExternalReference ref =
1120 masm->isolate()); 1040 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
1121 __ TailCallExternalReference(ref, 3, 1); 1041 __ TailCallExternalReference(ref, 3, 1);
1122 } 1042 }
1123 1043
1124 1044
1125 void StoreIC::GenerateNormal(MacroAssembler* masm) { 1045 void StoreIC::GenerateNormal(MacroAssembler* masm) {
1126 Label miss; 1046 Label miss;
1127 Register receiver = ReceiverRegister(); 1047 Register receiver = ReceiverRegister();
1128 Register name = NameRegister(); 1048 Register name = NameRegister();
1129 Register value = ValueRegister(); 1049 Register value = ValueRegister();
1130 Register dictionary = a3; 1050 Register dictionary = a3;
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
1182 1102
1183 bool CompareIC::HasInlinedSmiCode(Address address) { 1103 bool CompareIC::HasInlinedSmiCode(Address address) {
1184 // The address of the instruction following the call. 1104 // The address of the instruction following the call.
1185 Address andi_instruction_address = 1105 Address andi_instruction_address =
1186 address + Assembler::kCallTargetAddressOffset; 1106 address + Assembler::kCallTargetAddressOffset;
1187 1107
1188 // If the instruction following the call is not a andi at, rx, #yyy, nothing 1108 // If the instruction following the call is not a andi at, rx, #yyy, nothing
1189 // was inlined. 1109 // was inlined.
1190 Instr instr = Assembler::instr_at(andi_instruction_address); 1110 Instr instr = Assembler::instr_at(andi_instruction_address);
1191 return Assembler::IsAndImmediate(instr) && 1111 return Assembler::IsAndImmediate(instr) &&
1192 Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()); 1112 Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
1193 } 1113 }
1194 1114
1195 1115
1196 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { 1116 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
1197 Address andi_instruction_address = 1117 Address andi_instruction_address =
1198 address + Assembler::kCallTargetAddressOffset; 1118 address + Assembler::kCallTargetAddressOffset;
1199 1119
1200 // If the instruction following the call is not a andi at, rx, #yyy, nothing 1120 // If the instruction following the call is not a andi at, rx, #yyy, nothing
1201 // was inlined. 1121 // was inlined.
1202 Instr instr = Assembler::instr_at(andi_instruction_address); 1122 Instr instr = Assembler::instr_at(andi_instruction_address);
1203 if (!(Assembler::IsAndImmediate(instr) && 1123 if (!(Assembler::IsAndImmediate(instr) &&
1204 Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) { 1124 Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
1205 return; 1125 return;
1206 } 1126 }
1207 1127
1208 // The delta to the start of the map check instruction and the 1128 // The delta to the start of the map check instruction and the
1209 // condition code uses at the patched jump. 1129 // condition code uses at the patched jump.
1210 int delta = Assembler::GetImmediate16(instr); 1130 int delta = Assembler::GetImmediate16(instr);
1211 delta += Assembler::GetRs(instr) * kImm16Mask; 1131 delta += Assembler::GetRs(instr) * kImm16Mask;
1212 // If the delta is 0 the instruction is andi at, zero_reg, #0 which also 1132 // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
1213 // signals that nothing was inlined. 1133 // signals that nothing was inlined.
1214 if (delta == 0) { 1134 if (delta == 0) {
1215 return; 1135 return;
1216 } 1136 }
1217 1137
1218 if (FLAG_trace_ic) { 1138 if (FLAG_trace_ic) {
1219 PrintF("[ patching ic at %p, andi=%p, delta=%d\n", 1139 PrintF("[ patching ic at %p, andi=%p, delta=%d\n", address,
1220 address, andi_instruction_address, delta); 1140 andi_instruction_address, delta);
1221 } 1141 }
1222 1142
1223 Address patch_address = 1143 Address patch_address =
1224 andi_instruction_address - delta * Instruction::kInstrSize; 1144 andi_instruction_address - delta * Instruction::kInstrSize;
1225 Instr instr_at_patch = Assembler::instr_at(patch_address); 1145 Instr instr_at_patch = Assembler::instr_at(patch_address);
1226 Instr branch_instr = 1146 Instr branch_instr =
1227 Assembler::instr_at(patch_address + Instruction::kInstrSize); 1147 Assembler::instr_at(patch_address + Instruction::kInstrSize);
1228 // This is patching a conditional "jump if not smi/jump if smi" site. 1148 // This is patching a conditional "jump if not smi/jump if smi" site.
1229 // Enabling by changing from 1149 // Enabling by changing from
1230 // andi at, rx, 0 1150 // andi at, rx, 0
(...skipping 14 matching lines...) Expand all
1245 patcher.masm()->andi(at, reg, 0); 1165 patcher.masm()->andi(at, reg, 0);
1246 } 1166 }
1247 DCHECK(Assembler::IsBranch(branch_instr)); 1167 DCHECK(Assembler::IsBranch(branch_instr));
1248 if (Assembler::IsBeq(branch_instr)) { 1168 if (Assembler::IsBeq(branch_instr)) {
1249 patcher.ChangeBranchCondition(ne); 1169 patcher.ChangeBranchCondition(ne);
1250 } else { 1170 } else {
1251 DCHECK(Assembler::IsBne(branch_instr)); 1171 DCHECK(Assembler::IsBne(branch_instr));
1252 patcher.ChangeBranchCondition(eq); 1172 patcher.ChangeBranchCondition(eq);
1253 } 1173 }
1254 } 1174 }
1255 1175 }
1256 1176 } // namespace v8::internal
1257 } } // namespace v8::internal
1258 1177
1259 #endif // V8_TARGET_ARCH_MIPS 1178 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/ic/mips/ic-compiler-mips.cc ('k') | src/ic/mips/stub-cache-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698