OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_PPC | 5 #if V8_TARGET_ARCH_S390 |
6 | 6 |
| 7 #include "src/ic/ic.h" |
7 #include "src/codegen.h" | 8 #include "src/codegen.h" |
8 #include "src/ic/ic.h" | |
9 #include "src/ic/ic-compiler.h" | 9 #include "src/ic/ic-compiler.h" |
10 #include "src/ic/stub-cache.h" | 10 #include "src/ic/stub-cache.h" |
11 | 11 |
12 namespace v8 { | 12 namespace v8 { |
13 namespace internal { | 13 namespace internal { |
14 | 14 |
15 | |
16 // ---------------------------------------------------------------------------- | 15 // ---------------------------------------------------------------------------- |
17 // Static IC stub generators. | 16 // Static IC stub generators. |
18 // | 17 // |
19 | 18 |
20 #define __ ACCESS_MASM(masm) | 19 #define __ ACCESS_MASM(masm) |
21 | 20 |
22 | |
23 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type, | 21 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type, |
24 Label* global_object) { | 22 Label* global_object) { |
25 // Register usage: | 23 // Register usage: |
26 // type: holds the receiver instance type on entry. | 24 // type: holds the receiver instance type on entry. |
27 __ cmpi(type, Operand(JS_GLOBAL_OBJECT_TYPE)); | 25 __ CmpP(type, Operand(JS_GLOBAL_OBJECT_TYPE)); |
28 __ beq(global_object); | 26 __ beq(global_object); |
29 __ cmpi(type, Operand(JS_GLOBAL_PROXY_TYPE)); | 27 __ CmpP(type, Operand(JS_GLOBAL_PROXY_TYPE)); |
30 __ beq(global_object); | 28 __ beq(global_object); |
31 } | 29 } |
32 | 30 |
33 | |
34 // Helper function used from LoadIC GenerateNormal. | 31 // Helper function used from LoadIC GenerateNormal. |
35 // | 32 // |
36 // elements: Property dictionary. It is not clobbered if a jump to the miss | 33 // elements: Property dictionary. It is not clobbered if a jump to the miss |
37 // label is done. | 34 // label is done. |
38 // name: Property name. It is not clobbered if a jump to the miss label is | 35 // name: Property name. It is not clobbered if a jump to the miss label is |
39 // done | 36 // done |
40 // result: Register for the result. It is only updated if a jump to the miss | 37 // result: Register for the result. It is only updated if a jump to the miss |
41 // label is not done. Can be the same as elements or name clobbering | 38 // label is not done. Can be the same as elements or name clobbering |
42 // one of these in the case of not jumping to the miss label. | 39 // one of these in the case of not jumping to the miss label. |
43 // The two scratch registers need to be different from elements, name and | 40 // The two scratch registers need to be different from elements, name and |
(...skipping 15 matching lines...) Expand all Loading... |
59 name, scratch1, scratch2); | 56 name, scratch1, scratch2); |
60 | 57 |
61 // If probing finds an entry check that the value is a normal | 58 // If probing finds an entry check that the value is a normal |
62 // property. | 59 // property. |
63 __ bind(&done); // scratch2 == elements + 4 * index | 60 __ bind(&done); // scratch2 == elements + 4 * index |
64 const int kElementsStartOffset = | 61 const int kElementsStartOffset = |
65 NameDictionary::kHeaderSize + | 62 NameDictionary::kHeaderSize + |
66 NameDictionary::kElementsStartIndex * kPointerSize; | 63 NameDictionary::kElementsStartIndex * kPointerSize; |
67 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | 64 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; |
68 __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); | 65 __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); |
69 __ mr(r0, scratch2); | 66 __ LoadRR(r0, scratch2); |
70 __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask)); | 67 __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask)); |
71 __ and_(scratch2, scratch1, scratch2, SetRC); | 68 __ AndP(scratch2, scratch1); |
72 __ bne(miss, cr0); | 69 __ bne(miss); |
73 __ mr(scratch2, r0); | 70 __ LoadRR(scratch2, r0); |
74 | 71 |
75 // Get the value at the masked, scaled index and return. | 72 // Get the value at the masked, scaled index and return. |
76 __ LoadP(result, | 73 __ LoadP(result, |
77 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); | 74 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); |
78 } | 75 } |
79 | 76 |
80 | |
81 // Helper function used from StoreIC::GenerateNormal. | 77 // Helper function used from StoreIC::GenerateNormal. |
82 // | 78 // |
83 // elements: Property dictionary. It is not clobbered if a jump to the miss | 79 // elements: Property dictionary. It is not clobbered if a jump to the miss |
84 // label is done. | 80 // label is done. |
85 // name: Property name. It is not clobbered if a jump to the miss label is | 81 // name: Property name. It is not clobbered if a jump to the miss label is |
86 // done | 82 // done |
87 // value: The value to store. | 83 // value: The value to store. |
88 // The two scratch registers need to be different from elements, name and | 84 // The two scratch registers need to be different from elements, name and |
89 // result. | 85 // result. |
90 // The generated code assumes that the receiver has slow properties, | 86 // The generated code assumes that the receiver has slow properties, |
(...skipping 16 matching lines...) Expand all Loading... |
107 // is a normal property that is not read only. | 103 // is a normal property that is not read only. |
108 __ bind(&done); // scratch2 == elements + 4 * index | 104 __ bind(&done); // scratch2 == elements + 4 * index |
109 const int kElementsStartOffset = | 105 const int kElementsStartOffset = |
110 NameDictionary::kHeaderSize + | 106 NameDictionary::kHeaderSize + |
111 NameDictionary::kElementsStartIndex * kPointerSize; | 107 NameDictionary::kElementsStartIndex * kPointerSize; |
112 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | 108 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; |
113 int kTypeAndReadOnlyMask = | 109 int kTypeAndReadOnlyMask = |
114 PropertyDetails::TypeField::kMask | | 110 PropertyDetails::TypeField::kMask | |
115 PropertyDetails::AttributesField::encode(READ_ONLY); | 111 PropertyDetails::AttributesField::encode(READ_ONLY); |
116 __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); | 112 __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); |
117 __ mr(r0, scratch2); | 113 __ LoadRR(r0, scratch2); |
118 __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask)); | 114 __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask)); |
119 __ and_(scratch2, scratch1, scratch2, SetRC); | 115 __ AndP(scratch2, scratch1); |
120 __ bne(miss, cr0); | 116 __ bne(miss /*, cr0*/); |
121 __ mr(scratch2, r0); | 117 __ LoadRR(scratch2, r0); |
122 | 118 |
123 // Store the value at the masked, scaled index and return. | 119 // Store the value at the masked, scaled index and return. |
124 const int kValueOffset = kElementsStartOffset + kPointerSize; | 120 const int kValueOffset = kElementsStartOffset + kPointerSize; |
125 __ addi(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag)); | 121 __ AddP(scratch2, Operand(kValueOffset - kHeapObjectTag)); |
126 __ StoreP(value, MemOperand(scratch2)); | 122 __ StoreP(value, MemOperand(scratch2)); |
127 | 123 |
128 // Update the write barrier. Make sure not to clobber the value. | 124 // Update the write barrier. Make sure not to clobber the value. |
129 __ mr(scratch1, value); | 125 __ LoadRR(scratch1, value); |
130 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved, | 126 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved, |
131 kDontSaveFPRegs); | 127 kDontSaveFPRegs); |
132 } | 128 } |
133 | 129 |
134 | |
135 // Checks the receiver for special cases (value type, slow case bits). | 130 // Checks the receiver for special cases (value type, slow case bits). |
136 // Falls through for regular JS object. | 131 // Falls through for regular JS object. |
137 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, | 132 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, |
138 Register receiver, Register map, | 133 Register receiver, Register map, |
139 Register scratch, | 134 Register scratch, |
140 int interceptor_bit, Label* slow) { | 135 int interceptor_bit, Label* slow) { |
141 // Check that the object isn't a smi. | 136 // Check that the object isn't a smi. |
142 __ JumpIfSmi(receiver, slow); | 137 __ JumpIfSmi(receiver, slow); |
143 // Get the map of the receiver. | 138 // Get the map of the receiver. |
144 __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 139 __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
145 // Check bit field. | 140 // Check bit field. |
146 __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); | 141 __ LoadlB(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); |
147 DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000); | 142 DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000); |
148 __ andi(r0, scratch, | 143 __ mov(r0, |
149 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); | 144 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); |
150 __ bne(slow, cr0); | 145 __ AndP(r0, scratch); |
| 146 __ bne(slow /*, cr0*/); |
151 // Check that the object is some kind of JS object EXCEPT JS Value type. | 147 // Check that the object is some kind of JS object EXCEPT JS Value type. |
152 // In the case that the object is a value-wrapper object, | 148 // In the case that the object is a value-wrapper object, |
153 // we enter the runtime system to make sure that indexing into string | 149 // we enter the runtime system to make sure that indexing into string |
154 // objects work as intended. | 150 // objects work as intended. |
155 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); | 151 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); |
156 __ lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 152 __ LoadlB(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
157 __ cmpi(scratch, Operand(JS_OBJECT_TYPE)); | 153 __ CmpP(scratch, Operand(JS_OBJECT_TYPE)); |
158 __ blt(slow); | 154 __ blt(slow); |
159 } | 155 } |
160 | 156 |
161 | |
162 // Loads an indexed element from a fast case array. | 157 // Loads an indexed element from a fast case array. |
163 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver, | 158 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver, |
164 Register key, Register elements, | 159 Register key, Register elements, |
165 Register scratch1, Register scratch2, | 160 Register scratch1, Register scratch2, |
166 Register result, Label* slow) { | 161 Register result, Label* slow) { |
167 // Register use: | 162 // Register use: |
168 // | 163 // |
169 // receiver - holds the receiver on entry. | 164 // receiver - holds the receiver on entry. |
170 // Unchanged unless 'result' is the same register. | 165 // Unchanged unless 'result' is the same register. |
171 // | 166 // |
(...skipping 13 matching lines...) Expand all Loading... |
185 // | 180 // |
186 // scratch2 - used to hold maps, prototypes, and the loaded value. | 181 // scratch2 - used to hold maps, prototypes, and the loaded value. |
187 Label check_prototypes, check_next_prototype; | 182 Label check_prototypes, check_next_prototype; |
188 Label done, in_bounds, absent; | 183 Label done, in_bounds, absent; |
189 | 184 |
190 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 185 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
191 __ AssertFastElements(elements); | 186 __ AssertFastElements(elements); |
192 | 187 |
193 // Check that the key (index) is within bounds. | 188 // Check that the key (index) is within bounds. |
194 __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); | 189 __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
195 __ cmpl(key, scratch1); | 190 __ CmpLogicalP(key, scratch1); |
196 __ blt(&in_bounds); | 191 __ blt(&in_bounds, Label::kNear); |
197 // Out-of-bounds. Check the prototype chain to see if we can just return | 192 // Out-of-bounds. Check the prototype chain to see if we can just return |
198 // 'undefined'. | 193 // 'undefined'. |
199 __ cmpi(key, Operand::Zero()); | 194 __ CmpP(key, Operand::Zero()); |
200 __ blt(slow); // Negative keys can't take the fast OOB path. | 195 __ blt(slow); // Negative keys can't take the fast OOB path. |
201 __ bind(&check_prototypes); | 196 __ bind(&check_prototypes); |
202 __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 197 __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
203 __ bind(&check_next_prototype); | 198 __ bind(&check_next_prototype); |
204 __ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset)); | 199 __ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset)); |
205 // scratch2: current prototype | 200 // scratch2: current prototype |
206 __ CompareRoot(scratch2, Heap::kNullValueRootIndex); | 201 __ CompareRoot(scratch2, Heap::kNullValueRootIndex); |
207 __ beq(&absent); | 202 __ beq(&absent, Label::kNear); |
208 __ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset)); | 203 __ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset)); |
209 __ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset)); | 204 __ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset)); |
210 // elements: elements of current prototype | 205 // elements: elements of current prototype |
211 // scratch2: map of current prototype | 206 // scratch2: map of current prototype |
212 __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE); | 207 __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE); |
213 __ blt(slow); | 208 __ blt(slow); |
214 __ lbz(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset)); | 209 __ LoadlB(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset)); |
215 __ andi(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) | | 210 __ AndP(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) | |
216 (1 << Map::kHasIndexedInterceptor))); | 211 (1 << Map::kHasIndexedInterceptor))); |
217 __ bne(slow, cr0); | 212 __ bne(slow); |
218 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex); | 213 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex); |
219 __ bne(slow); | 214 __ bne(slow); |
220 __ jmp(&check_next_prototype); | 215 __ jmp(&check_next_prototype); |
221 | 216 |
222 __ bind(&absent); | 217 __ bind(&absent); |
223 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | 218 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
224 __ jmp(&done); | 219 __ jmp(&done); |
225 | 220 |
226 __ bind(&in_bounds); | 221 __ bind(&in_bounds); |
227 // Fast case: Do the load. | 222 // Fast case: Do the load. |
228 __ addi(scratch1, elements, | 223 __ AddP(scratch1, elements, |
229 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 224 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
230 // The key is a smi. | 225 // The key is a smi. |
231 __ SmiToPtrArrayOffset(scratch2, key); | 226 __ SmiToPtrArrayOffset(scratch2, key); |
232 __ LoadPX(scratch2, MemOperand(scratch2, scratch1)); | 227 __ LoadP(scratch2, MemOperand(scratch2, scratch1)); |
233 __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex); | 228 __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex); |
234 // In case the loaded value is the_hole we have to check the prototype chain. | 229 // In case the loaded value is the_hole we have to check the prototype chain. |
235 __ beq(&check_prototypes); | 230 __ beq(&check_prototypes); |
236 __ mr(result, scratch2); | 231 __ LoadRR(result, scratch2); |
237 __ bind(&done); | 232 __ bind(&done); |
238 } | 233 } |
239 | 234 |
240 | |
241 // Checks whether a key is an array index string or a unique name. | 235 // Checks whether a key is an array index string or a unique name. |
242 // Falls through if a key is a unique name. | 236 // Falls through if a key is a unique name. |
243 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key, | 237 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key, |
244 Register map, Register hash, | 238 Register map, Register hash, |
245 Label* index_string, Label* not_unique) { | 239 Label* index_string, Label* not_unique) { |
246 // The key is not a smi. | 240 // The key is not a smi. |
247 Label unique; | 241 Label unique; |
248 // Is it a name? | 242 // Is it a name? |
249 __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE); | 243 __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE); |
250 __ bgt(not_unique); | 244 __ bgt(not_unique); |
251 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); | 245 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); |
252 __ beq(&unique); | 246 __ beq(&unique, Label::kNear); |
253 | 247 |
254 // Is the string an array index, with cached numeric value? | 248 // Is the string an array index, with cached numeric value? |
255 __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset)); | 249 __ LoadlW(hash, FieldMemOperand(key, Name::kHashFieldOffset)); |
256 __ mov(r8, Operand(Name::kContainsCachedArrayIndexMask)); | 250 __ mov(r7, Operand(Name::kContainsCachedArrayIndexMask)); |
257 __ and_(r0, hash, r8, SetRC); | 251 __ AndP(r0, hash, r7); |
258 __ beq(index_string, cr0); | 252 __ beq(index_string); |
259 | 253 |
260 // Is the string internalized? We know it's a string, so a single | 254 // Is the string internalized? We know it's a string, so a single |
261 // bit test is enough. | 255 // bit test is enough. |
262 // map: key map | 256 // map: key map |
263 __ lbz(hash, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 257 __ LoadlB(hash, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
264 STATIC_ASSERT(kInternalizedTag == 0); | 258 STATIC_ASSERT(kInternalizedTag == 0); |
265 __ andi(r0, hash, Operand(kIsNotInternalizedMask)); | 259 __ tmll(hash, Operand(kIsNotInternalizedMask)); |
266 __ bne(not_unique, cr0); | 260 __ bne(not_unique); |
267 | 261 |
268 __ bind(&unique); | 262 __ bind(&unique); |
269 } | 263 } |
270 | 264 |
271 void LoadIC::GenerateNormal(MacroAssembler* masm) { | 265 void LoadIC::GenerateNormal(MacroAssembler* masm) { |
272 Register dictionary = r3; | 266 Register dictionary = r2; |
273 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister())); | 267 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister())); |
274 DCHECK(!dictionary.is(LoadDescriptor::NameRegister())); | 268 DCHECK(!dictionary.is(LoadDescriptor::NameRegister())); |
275 | 269 |
276 Label slow; | 270 Label slow; |
277 | 271 |
278 __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(), | 272 __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(), |
279 JSObject::kPropertiesOffset)); | 273 JSObject::kPropertiesOffset)); |
280 GenerateDictionaryLoad(masm, &slow, dictionary, | 274 GenerateDictionaryLoad(masm, &slow, dictionary, |
281 LoadDescriptor::NameRegister(), r3, r6, r7); | 275 LoadDescriptor::NameRegister(), r2, r5, r6); |
282 __ Ret(); | 276 __ Ret(); |
283 | 277 |
284 // Dictionary load failed, go slow (but don't miss). | 278 // Dictionary load failed, go slow (but don't miss). |
285 __ bind(&slow); | 279 __ bind(&slow); |
286 GenerateRuntimeGetProperty(masm); | 280 GenerateRuntimeGetProperty(masm); |
287 } | 281 } |
288 | 282 |
289 | |
290 // A register that isn't one of the parameters to the load ic. | 283 // A register that isn't one of the parameters to the load ic. |
291 static const Register LoadIC_TempRegister() { return r6; } | 284 static const Register LoadIC_TempRegister() { return r5; } |
292 | |
293 | 285 |
294 static void LoadIC_PushArgs(MacroAssembler* masm) { | 286 static void LoadIC_PushArgs(MacroAssembler* masm) { |
295 Register receiver = LoadDescriptor::ReceiverRegister(); | 287 Register receiver = LoadDescriptor::ReceiverRegister(); |
296 Register name = LoadDescriptor::NameRegister(); | 288 Register name = LoadDescriptor::NameRegister(); |
297 Register slot = LoadDescriptor::SlotRegister(); | 289 Register slot = LoadDescriptor::SlotRegister(); |
298 Register vector = LoadWithVectorDescriptor::VectorRegister(); | 290 Register vector = LoadWithVectorDescriptor::VectorRegister(); |
299 | 291 |
300 __ Push(receiver, name, slot, vector); | 292 __ Push(receiver, name, slot, vector); |
301 } | 293 } |
302 | 294 |
303 | |
304 void LoadIC::GenerateMiss(MacroAssembler* masm) { | 295 void LoadIC::GenerateMiss(MacroAssembler* masm) { |
305 // The return address is in lr. | 296 // The return address is in lr. |
306 Isolate* isolate = masm->isolate(); | 297 Isolate* isolate = masm->isolate(); |
307 | 298 |
308 DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(), | 299 DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(), |
309 LoadWithVectorDescriptor::VectorRegister())); | 300 LoadWithVectorDescriptor::VectorRegister())); |
310 __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r7, r8); | 301 __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r6, r7); |
311 | 302 |
312 LoadIC_PushArgs(masm); | 303 LoadIC_PushArgs(masm); |
313 | 304 |
314 // Perform tail call to the entry. | 305 // Perform tail call to the entry. |
315 __ TailCallRuntime(Runtime::kLoadIC_Miss); | 306 __ TailCallRuntime(Runtime::kLoadIC_Miss); |
316 } | 307 } |
317 | 308 |
318 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | 309 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { |
319 // The return address is in lr. | 310 // The return address is in lr. |
320 | 311 |
321 __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister()); | 312 __ LoadRR(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister()); |
322 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister()); | 313 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister()); |
323 | 314 |
324 // Do tail-call to runtime routine. | 315 // Do tail-call to runtime routine. |
325 __ TailCallRuntime(Runtime::kGetProperty); | 316 __ TailCallRuntime(Runtime::kGetProperty); |
326 } | 317 } |
327 | 318 |
328 | |
329 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { | 319 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { |
330 // The return address is in lr. | 320 // The return address is in lr. |
331 Isolate* isolate = masm->isolate(); | 321 Isolate* isolate = masm->isolate(); |
332 | 322 |
333 DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(), | 323 DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(), |
334 LoadWithVectorDescriptor::VectorRegister())); | 324 LoadWithVectorDescriptor::VectorRegister())); |
335 __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r7, r8); | 325 __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r6, r7); |
336 | 326 |
337 LoadIC_PushArgs(masm); | 327 LoadIC_PushArgs(masm); |
338 | 328 |
339 // Perform tail call to the entry. | 329 // Perform tail call to the entry. |
340 __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss); | 330 __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss); |
341 } | 331 } |
342 | 332 |
343 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | 333 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { |
344 // The return address is in lr. | 334 // The return address is in lr. |
345 | 335 |
346 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister()); | 336 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister()); |
347 | 337 |
348 // Do tail-call to runtime routine. | 338 // Do tail-call to runtime routine. |
349 __ TailCallRuntime(Runtime::kKeyedGetProperty); | 339 __ TailCallRuntime(Runtime::kKeyedGetProperty); |
350 } | 340 } |
351 | 341 |
352 void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) { | 342 void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) { |
353 // The return address is in lr. | 343 // The return address is in lr. |
354 Label slow, check_name, index_smi, index_name, property_array_property; | 344 Label slow, check_name, index_smi, index_name, property_array_property; |
355 Label probe_dictionary, check_number_dictionary; | 345 Label probe_dictionary, check_number_dictionary; |
356 | 346 |
357 Register key = LoadDescriptor::NameRegister(); | 347 Register key = LoadDescriptor::NameRegister(); |
358 Register receiver = LoadDescriptor::ReceiverRegister(); | 348 Register receiver = LoadDescriptor::ReceiverRegister(); |
359 DCHECK(key.is(r5)); | 349 DCHECK(key.is(r4)); |
360 DCHECK(receiver.is(r4)); | 350 DCHECK(receiver.is(r3)); |
361 | 351 |
362 Isolate* isolate = masm->isolate(); | 352 Isolate* isolate = masm->isolate(); |
363 | 353 |
364 // Check that the key is a smi. | 354 // Check that the key is a smi. |
365 __ JumpIfNotSmi(key, &check_name); | 355 __ JumpIfNotSmi(key, &check_name); |
366 __ bind(&index_smi); | 356 __ bind(&index_smi); |
367 // Now the key is known to be a smi. This place is also jumped to from below | 357 // Now the key is known to be a smi. This place is also jumped to from below |
368 // where a numeric string is converted to a smi. | 358 // where a numeric string is converted to a smi. |
369 | 359 |
370 GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6, | 360 GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5, |
371 Map::kHasIndexedInterceptor, &slow); | 361 Map::kHasIndexedInterceptor, &slow); |
372 | 362 |
373 // Check the receiver's map to see if it has fast elements. | 363 // Check the receiver's map to see if it has fast elements. |
374 __ CheckFastElements(r3, r6, &check_number_dictionary); | 364 __ CheckFastElements(r2, r5, &check_number_dictionary); |
375 | 365 |
376 GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, &slow); | 366 GenerateFastArrayLoad(masm, receiver, key, r2, r5, r6, r2, &slow); |
377 __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r7, | 367 __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r6, |
378 r6); | 368 r5); |
379 __ Ret(); | 369 __ Ret(); |
380 | 370 |
381 __ bind(&check_number_dictionary); | 371 __ bind(&check_number_dictionary); |
382 __ LoadP(r7, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 372 __ LoadP(r6, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
383 __ LoadP(r6, FieldMemOperand(r7, JSObject::kMapOffset)); | 373 __ LoadP(r5, FieldMemOperand(r6, JSObject::kMapOffset)); |
384 | 374 |
385 // Check whether the elements is a number dictionary. | 375 // Check whether the elements is a number dictionary. |
386 // r6: elements map | 376 // r5: elements map |
387 // r7: elements | 377 // r6: elements |
388 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); | 378 __ CompareRoot(r5, Heap::kHashTableMapRootIndex); |
389 __ cmp(r6, ip); | 379 __ bne(&slow, Label::kNear); |
390 __ bne(&slow); | 380 __ SmiUntag(r2, key); |
391 __ SmiUntag(r3, key); | 381 __ LoadFromNumberDictionary(&slow, r6, key, r2, r2, r5, r7); |
392 __ LoadFromNumberDictionary(&slow, r7, key, r3, r3, r6, r8); | |
393 __ Ret(); | 382 __ Ret(); |
394 | 383 |
395 // Slow case, key and receiver still in r3 and r4. | 384 // Slow case, key and receiver still in r2 and r3. |
396 __ bind(&slow); | 385 __ bind(&slow); |
397 __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r7, | 386 __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r6, |
398 r6); | 387 r5); |
399 GenerateRuntimeGetProperty(masm); | 388 GenerateRuntimeGetProperty(masm); |
400 | 389 |
401 __ bind(&check_name); | 390 __ bind(&check_name); |
402 GenerateKeyNameCheck(masm, key, r3, r6, &index_name, &slow); | 391 GenerateKeyNameCheck(masm, key, r2, r5, &index_name, &slow); |
403 | 392 |
404 GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6, | 393 GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5, |
405 Map::kHasNamedInterceptor, &slow); | 394 Map::kHasNamedInterceptor, &slow); |
406 | 395 |
407 // If the receiver is a fast-case object, check the stub cache. Otherwise | 396 // If the receiver is a fast-case object, check the stub cache. Otherwise |
408 // probe the dictionary. | 397 // probe the dictionary. |
409 __ LoadP(r6, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 398 __ LoadP(r5, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
410 __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset)); | 399 __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset)); |
411 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); | 400 __ CompareRoot(r6, Heap::kHashTableMapRootIndex); |
412 __ cmp(r7, ip); | |
413 __ beq(&probe_dictionary); | 401 __ beq(&probe_dictionary); |
414 | 402 |
415 | |
416 // The handlers in the stub cache expect a vector and slot. Since we won't | 403 // The handlers in the stub cache expect a vector and slot. Since we won't |
417 // change the IC from any downstream misses, a dummy vector can be used. | 404 // change the IC from any downstream misses, a dummy vector can be used. |
418 Register vector = LoadWithVectorDescriptor::VectorRegister(); | 405 Register vector = LoadWithVectorDescriptor::VectorRegister(); |
419 Register slot = LoadWithVectorDescriptor::SlotRegister(); | 406 Register slot = LoadWithVectorDescriptor::SlotRegister(); |
420 DCHECK(!AreAliased(vector, slot, r7, r8, r9, r10)); | 407 DCHECK(!AreAliased(vector, slot, r6, r7, r8, r9)); |
421 Handle<TypeFeedbackVector> dummy_vector = | 408 Handle<TypeFeedbackVector> dummy_vector = |
422 TypeFeedbackVector::DummyVector(masm->isolate()); | 409 TypeFeedbackVector::DummyVector(masm->isolate()); |
423 int slot_index = dummy_vector->GetIndex( | 410 int slot_index = dummy_vector->GetIndex( |
424 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot)); | 411 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot)); |
425 __ LoadRoot(vector, Heap::kDummyVectorRootIndex); | 412 __ LoadRoot(vector, Heap::kDummyVectorRootIndex); |
426 __ LoadSmiLiteral(slot, Smi::FromInt(slot_index)); | 413 __ LoadSmiLiteral(slot, Smi::FromInt(slot_index)); |
427 | 414 |
428 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( | 415 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( |
429 Code::ComputeHandlerFlags(Code::LOAD_IC)); | 416 Code::ComputeHandlerFlags(Code::LOAD_IC)); |
430 masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags, | 417 masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags, |
431 receiver, key, r7, r8, r9, r10); | 418 receiver, key, r6, r7, r8, r9); |
432 // Cache miss. | 419 // Cache miss. |
433 GenerateMiss(masm); | 420 GenerateMiss(masm); |
434 | 421 |
435 // Do a quick inline probe of the receiver's dictionary, if it | 422 // Do a quick inline probe of the receiver's dictionary, if it |
436 // exists. | 423 // exists. |
437 __ bind(&probe_dictionary); | 424 __ bind(&probe_dictionary); |
438 // r6: elements | 425 // r5: elements |
439 __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 426 __ LoadP(r2, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
440 __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset)); | 427 __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset)); |
441 GenerateGlobalInstanceTypeCheck(masm, r3, &slow); | 428 GenerateGlobalInstanceTypeCheck(masm, r2, &slow); |
442 // Load the property to r3. | 429 // Load the property to r2. |
443 GenerateDictionaryLoad(masm, &slow, r6, key, r3, r8, r7); | 430 GenerateDictionaryLoad(masm, &slow, r5, key, r2, r7, r6); |
444 __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1, | 431 __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1, |
445 r7, r6); | 432 r6, r5); |
446 __ Ret(); | 433 __ Ret(); |
447 | 434 |
448 __ bind(&index_name); | 435 __ bind(&index_name); |
449 __ IndexFromHash(r6, key); | 436 __ IndexFromHash(r5, key); |
450 // Now jump to the place where smi keys are handled. | 437 // Now jump to the place where smi keys are handled. |
451 __ b(&index_smi); | 438 __ b(&index_smi); |
452 } | 439 } |
453 | 440 |
454 | |
455 static void StoreIC_PushArgs(MacroAssembler* masm) { | 441 static void StoreIC_PushArgs(MacroAssembler* masm) { |
456 __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), | 442 __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), |
457 StoreDescriptor::ValueRegister(), | 443 StoreDescriptor::ValueRegister(), |
458 VectorStoreICDescriptor::SlotRegister(), | 444 VectorStoreICDescriptor::SlotRegister(), |
459 VectorStoreICDescriptor::VectorRegister()); | 445 VectorStoreICDescriptor::VectorRegister()); |
460 } | 446 } |
461 | 447 |
462 | |
463 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { | 448 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { |
464 StoreIC_PushArgs(masm); | 449 StoreIC_PushArgs(masm); |
465 | 450 |
466 __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss); | 451 __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss); |
467 } | 452 } |
468 | 453 |
469 | |
470 static void KeyedStoreGenerateMegamorphicHelper( | 454 static void KeyedStoreGenerateMegamorphicHelper( |
471 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow, | 455 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow, |
472 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length, | 456 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length, |
473 Register value, Register key, Register receiver, Register receiver_map, | 457 Register value, Register key, Register receiver, Register receiver_map, |
474 Register elements_map, Register elements) { | 458 Register elements_map, Register elements) { |
475 Label transition_smi_elements; | 459 Label transition_smi_elements; |
476 Label finish_object_store, non_double_value, transition_double_elements; | 460 Label finish_object_store, non_double_value, transition_double_elements; |
477 Label fast_double_without_map_check; | 461 Label fast_double_without_map_check; |
478 | 462 |
479 // Fast case: Do the store, could be either Object or double. | 463 // Fast case: Do the store, could be either Object or double. |
480 __ bind(fast_object); | 464 __ bind(fast_object); |
481 Register scratch = r7; | 465 Register scratch = r6; |
482 Register address = r8; | 466 Register address = r7; |
483 DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements, | 467 DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements, |
484 scratch, address)); | 468 scratch, address)); |
485 | 469 |
486 if (check_map == kCheckMap) { | 470 if (check_map == kCheckMap) { |
487 __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | 471 __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
488 __ mov(scratch, Operand(masm->isolate()->factory()->fixed_array_map())); | 472 __ CmpP(elements_map, |
489 __ cmp(elements_map, scratch); | 473 Operand(masm->isolate()->factory()->fixed_array_map())); |
490 __ bne(fast_double); | 474 __ bne(fast_double); |
491 } | 475 } |
492 | 476 |
493 // HOLECHECK: guards "A[i] = V" | 477 // HOLECHECK: guards "A[i] = V" |
494 // We have to go to the runtime if the current value is the hole because | 478 // We have to go to the runtime if the current value is the hole because |
495 // there may be a callback on the element | 479 // there may be a callback on the element |
496 Label holecheck_passed1; | 480 Label holecheck_passed1; |
497 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 481 // @TODO(joransiu) : Fold AddP into memref of LoadP |
| 482 __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
498 __ SmiToPtrArrayOffset(scratch, key); | 483 __ SmiToPtrArrayOffset(scratch, key); |
499 __ LoadPX(scratch, MemOperand(address, scratch)); | 484 __ LoadP(scratch, MemOperand(address, scratch)); |
500 __ Cmpi(scratch, Operand(masm->isolate()->factory()->the_hole_value()), r0); | 485 __ CmpP(scratch, Operand(masm->isolate()->factory()->the_hole_value())); |
501 __ bne(&holecheck_passed1); | 486 __ bne(&holecheck_passed1, Label::kNear); |
502 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); | 487 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); |
503 | 488 |
504 __ bind(&holecheck_passed1); | 489 __ bind(&holecheck_passed1); |
505 | 490 |
506 // Smi stores don't require further checks. | 491 // Smi stores don't require further checks. |
507 Label non_smi_value; | 492 Label non_smi_value; |
508 __ JumpIfNotSmi(value, &non_smi_value); | 493 __ JumpIfNotSmi(value, &non_smi_value); |
509 | 494 |
510 if (increment_length == kIncrementLength) { | 495 if (increment_length == kIncrementLength) { |
511 // Add 1 to receiver->length. | 496 // Add 1 to receiver->length. |
512 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0); | 497 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0); |
513 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0); | 498 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
514 } | 499 } |
515 // It's irrelevant whether array is smi-only or not when writing a smi. | 500 // It's irrelevant whether array is smi-only or not when writing a smi. |
516 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 501 __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
517 __ SmiToPtrArrayOffset(scratch, key); | 502 __ SmiToPtrArrayOffset(scratch, key); |
518 __ StorePX(value, MemOperand(address, scratch)); | 503 __ StoreP(value, MemOperand(address, scratch)); |
519 __ Ret(); | 504 __ Ret(); |
520 | 505 |
521 __ bind(&non_smi_value); | 506 __ bind(&non_smi_value); |
522 // Escape to elements kind transition case. | 507 // Escape to elements kind transition case. |
523 __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements); | 508 __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements); |
524 | 509 |
525 // Fast elements array, store the value to the elements backing store. | 510 // Fast elements array, store the value to the elements backing store. |
526 __ bind(&finish_object_store); | 511 __ bind(&finish_object_store); |
527 if (increment_length == kIncrementLength) { | 512 if (increment_length == kIncrementLength) { |
528 // Add 1 to receiver->length. | 513 // Add 1 to receiver->length. |
529 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0); | 514 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0); |
530 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0); | 515 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
531 } | 516 } |
532 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 517 __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
533 __ SmiToPtrArrayOffset(scratch, key); | 518 __ SmiToPtrArrayOffset(scratch, key); |
534 __ StorePUX(value, MemOperand(address, scratch)); | 519 __ StoreP(value, MemOperand(address, scratch)); |
| 520 __ la(address, MemOperand(address, scratch)); |
535 // Update write barrier for the elements array address. | 521 // Update write barrier for the elements array address. |
536 __ mr(scratch, value); // Preserve the value which is returned. | 522 __ LoadRR(scratch, value); // Preserve the value which is returned. |
537 __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved, | 523 __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved, |
538 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | 524 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
539 __ Ret(); | 525 __ Ret(); |
540 | 526 |
541 __ bind(fast_double); | 527 __ bind(fast_double); |
542 if (check_map == kCheckMap) { | 528 if (check_map == kCheckMap) { |
543 // Check for fast double array case. If this fails, call through to the | 529 // Check for fast double array case. If this fails, call through to the |
544 // runtime. | 530 // runtime. |
545 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex); | 531 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex); |
546 __ bne(slow); | 532 __ bne(slow); |
547 } | 533 } |
548 | 534 |
549 // HOLECHECK: guards "A[i] double hole?" | 535 // HOLECHECK: guards "A[i] double hole?" |
550 // We have to see if the double version of the hole is present. If so | 536 // We have to see if the double version of the hole is present. If so |
551 // go to the runtime. | 537 // go to the runtime. |
552 __ addi(address, elements, | 538 // @TODO(joransiu) : Fold AddP Operand into LoadlW |
| 539 __ AddP(address, elements, |
553 Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset - | 540 Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset - |
554 kHeapObjectTag))); | 541 kHeapObjectTag))); |
555 __ SmiToDoubleArrayOffset(scratch, key); | 542 __ SmiToDoubleArrayOffset(scratch, key); |
556 __ lwzx(scratch, MemOperand(address, scratch)); | 543 __ LoadlW(scratch, MemOperand(address, scratch)); |
557 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0); | 544 __ CmpP(scratch, Operand(kHoleNanUpper32)); |
558 __ bne(&fast_double_without_map_check); | 545 __ bne(&fast_double_without_map_check, Label::kNear); |
559 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); | 546 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow); |
560 | 547 |
561 __ bind(&fast_double_without_map_check); | 548 __ bind(&fast_double_without_map_check); |
562 __ StoreNumberToDoubleElements(value, key, elements, scratch, d0, | 549 __ StoreNumberToDoubleElements(value, key, elements, scratch, d0, |
563 &transition_double_elements); | 550 &transition_double_elements); |
564 if (increment_length == kIncrementLength) { | 551 if (increment_length == kIncrementLength) { |
565 // Add 1 to receiver->length. | 552 // Add 1 to receiver->length. |
566 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0); | 553 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0); |
567 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0); | 554 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
568 } | 555 } |
569 __ Ret(); | 556 __ Ret(); |
570 | 557 |
571 __ bind(&transition_smi_elements); | 558 __ bind(&transition_smi_elements); |
572 // Transition the array appropriately depending on the value type. | 559 // Transition the array appropriately depending on the value type. |
573 __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); | 560 __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); |
574 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); | 561 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); |
575 __ bne(&non_double_value); | 562 __ bne(&non_double_value); |
576 | 563 |
577 // Value is a double. Transition FAST_SMI_ELEMENTS -> | 564 // Value is a double. Transition FAST_SMI_ELEMENTS -> |
(...skipping 23 matching lines...) Expand all Loading... |
601 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS | 588 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS |
602 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, | 589 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, |
603 receiver_map, scratch, slow); | 590 receiver_map, scratch, slow); |
604 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); | 591 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); |
605 ElementsTransitionGenerator::GenerateDoubleToObject( | 592 ElementsTransitionGenerator::GenerateDoubleToObject( |
606 masm, receiver, key, value, receiver_map, mode, slow); | 593 masm, receiver, key, value, receiver_map, mode, slow); |
607 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 594 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
608 __ b(&finish_object_store); | 595 __ b(&finish_object_store); |
609 } | 596 } |
610 | 597 |
611 | |
612 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, | 598 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, |
613 LanguageMode language_mode) { | 599 LanguageMode language_mode) { |
614 // ---------- S t a t e -------------- | 600 // ---------- S t a t e -------------- |
615 // -- r3 : value | 601 // -- r2 : value |
616 // -- r4 : key | 602 // -- r3 : key |
617 // -- r5 : receiver | 603 // -- r4 : receiver |
618 // -- lr : return address | 604 // -- lr : return address |
619 // ----------------------------------- | 605 // ----------------------------------- |
620 Label slow, fast_object, fast_object_grow; | 606 Label slow, fast_object, fast_object_grow; |
621 Label fast_double, fast_double_grow; | 607 Label fast_double, fast_double_grow; |
622 Label array, extra, check_if_double_array, maybe_name_key, miss; | 608 Label array, extra, check_if_double_array, maybe_name_key, miss; |
623 | 609 |
624 // Register usage. | 610 // Register usage. |
625 Register value = StoreDescriptor::ValueRegister(); | 611 Register value = StoreDescriptor::ValueRegister(); |
626 Register key = StoreDescriptor::NameRegister(); | 612 Register key = StoreDescriptor::NameRegister(); |
627 Register receiver = StoreDescriptor::ReceiverRegister(); | 613 Register receiver = StoreDescriptor::ReceiverRegister(); |
628 DCHECK(receiver.is(r4)); | 614 DCHECK(receiver.is(r3)); |
629 DCHECK(key.is(r5)); | 615 DCHECK(key.is(r4)); |
630 DCHECK(value.is(r3)); | 616 DCHECK(value.is(r2)); |
631 Register receiver_map = r6; | 617 Register receiver_map = r5; |
632 Register elements_map = r9; | 618 Register elements_map = r8; |
633 Register elements = r10; // Elements array of the receiver. | 619 Register elements = r9; // Elements array of the receiver. |
634 // r7 and r8 are used as general scratch registers. | 620 // r6 and r7 are used as general scratch registers. |
635 | 621 |
636 // Check that the key is a smi. | 622 // Check that the key is a smi. |
637 __ JumpIfNotSmi(key, &maybe_name_key); | 623 __ JumpIfNotSmi(key, &maybe_name_key); |
638 // Check that the object isn't a smi. | 624 // Check that the object isn't a smi. |
639 __ JumpIfSmi(receiver, &slow); | 625 __ JumpIfSmi(receiver, &slow); |
640 // Get the map of the object. | 626 // Get the map of the object. |
641 __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 627 __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
642 // Check that the receiver does not require access checks and is not observed. | 628 // Check that the receiver does not require access checks and is not observed. |
643 // The generic stub does not perform map checks or handle observed objects. | 629 // The generic stub does not perform map checks or handle observed objects. |
644 __ lbz(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); | 630 __ LoadlB(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); |
645 __ andi(r0, ip, | 631 __ AndP(r0, ip, |
646 Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved)); | 632 Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved)); |
647 __ bne(&slow, cr0); | 633 __ bne(&slow, Label::kNear); |
648 // Check if the object is a JS array or not. | 634 // Check if the object is a JS array or not. |
649 __ lbz(r7, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); | 635 __ LoadlB(r6, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); |
650 __ cmpi(r7, Operand(JS_ARRAY_TYPE)); | 636 __ CmpP(r6, Operand(JS_ARRAY_TYPE)); |
651 __ beq(&array); | 637 __ beq(&array); |
652 // Check that the object is some kind of JSObject. | 638 // Check that the object is some kind of JSObject. |
653 __ cmpi(r7, Operand(FIRST_JS_OBJECT_TYPE)); | 639 __ CmpP(r6, Operand(FIRST_JS_OBJECT_TYPE)); |
654 __ blt(&slow); | 640 __ blt(&slow, Label::kNear); |
655 | 641 |
656 // Object case: Check key against length in the elements array. | 642 // Object case: Check key against length in the elements array. |
657 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 643 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
658 // Check array bounds. Both the key and the length of FixedArray are smis. | 644 // Check array bounds. Both the key and the length of FixedArray are smis. |
659 __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); | 645 __ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
660 __ cmpl(key, ip); | |
661 __ blt(&fast_object); | 646 __ blt(&fast_object); |
662 | 647 |
663 // Slow case, handle jump to runtime. | 648 // Slow case, handle jump to runtime. |
664 __ bind(&slow); | 649 __ bind(&slow); |
665 // Entry registers are intact. | 650 // Entry registers are intact. |
666 // r3: value. | 651 // r2: value. |
667 // r4: key. | 652 // r3: key. |
668 // r5: receiver. | 653 // r4: receiver. |
669 PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode); | 654 PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode); |
670 // Never returns to here. | 655 // Never returns to here. |
671 | 656 |
672 __ bind(&maybe_name_key); | 657 __ bind(&maybe_name_key); |
673 __ LoadP(r7, FieldMemOperand(key, HeapObject::kMapOffset)); | 658 __ LoadP(r6, FieldMemOperand(key, HeapObject::kMapOffset)); |
674 __ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset)); | 659 __ LoadlB(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset)); |
675 __ JumpIfNotUniqueNameInstanceType(r7, &slow); | 660 __ JumpIfNotUniqueNameInstanceType(r6, &slow); |
676 | 661 |
677 // The handlers in the stub cache expect a vector and slot. Since we won't | 662 // The handlers in the stub cache expect a vector and slot. Since we won't |
678 // change the IC from any downstream misses, a dummy vector can be used. | 663 // change the IC from any downstream misses, a dummy vector can be used. |
679 Register vector = VectorStoreICDescriptor::VectorRegister(); | 664 Register vector = VectorStoreICDescriptor::VectorRegister(); |
680 Register slot = VectorStoreICDescriptor::SlotRegister(); | 665 Register slot = VectorStoreICDescriptor::SlotRegister(); |
681 DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11)); | 666 DCHECK(!AreAliased(vector, slot, r7, r8, r9, ip)); |
682 Handle<TypeFeedbackVector> dummy_vector = | 667 Handle<TypeFeedbackVector> dummy_vector = |
683 TypeFeedbackVector::DummyVector(masm->isolate()); | 668 TypeFeedbackVector::DummyVector(masm->isolate()); |
684 int slot_index = dummy_vector->GetIndex( | 669 int slot_index = dummy_vector->GetIndex( |
685 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); | 670 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)); |
686 __ LoadRoot(vector, Heap::kDummyVectorRootIndex); | 671 __ LoadRoot(vector, Heap::kDummyVectorRootIndex); |
687 __ LoadSmiLiteral(slot, Smi::FromInt(slot_index)); | 672 __ LoadSmiLiteral(slot, Smi::FromInt(slot_index)); |
688 | 673 |
689 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( | 674 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( |
690 Code::ComputeHandlerFlags(Code::STORE_IC)); | 675 Code::ComputeHandlerFlags(Code::STORE_IC)); |
691 masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags, | 676 masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags, |
692 receiver, key, r8, r9, r10, r11); | 677 receiver, key, r7, r8, r9, ip); |
693 // Cache miss. | 678 // Cache miss. |
694 __ b(&miss); | 679 __ b(&miss); |
695 | 680 |
696 // Extra capacity case: Check if there is extra capacity to | 681 // Extra capacity case: Check if there is extra capacity to |
697 // perform the store and update the length. Used for adding one | 682 // perform the store and update the length. Used for adding one |
698 // element to the array by writing to array[array.length]. | 683 // element to the array by writing to array[array.length]. |
699 __ bind(&extra); | 684 __ bind(&extra); |
700 // Condition code from comparing key and array length is still available. | 685 // Condition code from comparing key and array length is still available. |
701 __ bne(&slow); // Only support writing to writing to array[array.length]. | 686 __ bne(&slow); // Only support writing to writing to array[array.length]. |
702 // Check for room in the elements backing store. | 687 // Check for room in the elements backing store. |
703 // Both the key and the length of FixedArray are smis. | 688 // Both the key and the length of FixedArray are smis. |
704 __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); | 689 __ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
705 __ cmpl(key, ip); | |
706 __ bge(&slow); | 690 __ bge(&slow); |
707 __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | 691 __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
708 __ mov(ip, Operand(masm->isolate()->factory()->fixed_array_map())); | 692 __ CmpP(elements_map, Operand(masm->isolate()->factory()->fixed_array_map())); |
709 __ cmp(elements_map, ip); // PPC - I think I can re-use ip here | 693 __ bne(&check_if_double_array, Label::kNear); |
710 __ bne(&check_if_double_array); | |
711 __ b(&fast_object_grow); | 694 __ b(&fast_object_grow); |
712 | 695 |
713 __ bind(&check_if_double_array); | 696 __ bind(&check_if_double_array); |
714 __ mov(ip, Operand(masm->isolate()->factory()->fixed_double_array_map())); | 697 __ CmpP(elements_map, |
715 __ cmp(elements_map, ip); // PPC - another ip re-use | 698 Operand(masm->isolate()->factory()->fixed_double_array_map())); |
716 __ bne(&slow); | 699 __ bne(&slow); |
717 __ b(&fast_double_grow); | 700 __ b(&fast_double_grow); |
718 | 701 |
719 // Array case: Get the length and the elements array from the JS | 702 // Array case: Get the length and the elements array from the JS |
720 // array. Check that the array is in fast mode (and writable); if it | 703 // array. Check that the array is in fast mode (and writable); if it |
721 // is the length is always a smi. | 704 // is the length is always a smi. |
722 __ bind(&array); | 705 __ bind(&array); |
723 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 706 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
724 | 707 |
725 // Check the key against the length in the array. | 708 // Check the key against the length in the array. |
726 __ LoadP(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 709 __ CmpLogicalP(key, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
727 __ cmpl(key, ip); | |
728 __ bge(&extra); | 710 __ bge(&extra); |
729 | 711 |
730 KeyedStoreGenerateMegamorphicHelper( | 712 KeyedStoreGenerateMegamorphicHelper( |
731 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength, | 713 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength, |
732 value, key, receiver, receiver_map, elements_map, elements); | 714 value, key, receiver, receiver_map, elements_map, elements); |
733 KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow, | 715 KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow, |
734 &fast_double_grow, &slow, kDontCheckMap, | 716 &fast_double_grow, &slow, kDontCheckMap, |
735 kIncrementLength, value, key, receiver, | 717 kIncrementLength, value, key, receiver, |
736 receiver_map, elements_map, elements); | 718 receiver_map, elements_map, elements); |
737 __ bind(&miss); | 719 __ bind(&miss); |
738 GenerateMiss(masm); | 720 GenerateMiss(masm); |
739 } | 721 } |
740 | 722 |
741 | |
742 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { | 723 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { |
743 Register receiver = StoreDescriptor::ReceiverRegister(); | 724 Register receiver = StoreDescriptor::ReceiverRegister(); |
744 Register name = StoreDescriptor::NameRegister(); | 725 Register name = StoreDescriptor::NameRegister(); |
745 DCHECK(receiver.is(r4)); | 726 DCHECK(receiver.is(r3)); |
746 DCHECK(name.is(r5)); | 727 DCHECK(name.is(r4)); |
747 DCHECK(StoreDescriptor::ValueRegister().is(r3)); | 728 DCHECK(StoreDescriptor::ValueRegister().is(r2)); |
748 | 729 |
749 // Get the receiver from the stack and probe the stub cache. | 730 // Get the receiver from the stack and probe the stub cache. |
750 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( | 731 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( |
751 Code::ComputeHandlerFlags(Code::STORE_IC)); | 732 Code::ComputeHandlerFlags(Code::STORE_IC)); |
752 | 733 |
753 masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags, | 734 masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags, |
754 receiver, name, r6, r7, r8, r9); | 735 receiver, name, r5, r6, r7, r8); |
755 | 736 |
756 // Cache miss: Jump to runtime. | 737 // Cache miss: Jump to runtime. |
757 GenerateMiss(masm); | 738 GenerateMiss(masm); |
758 } | 739 } |
759 | 740 |
760 | |
761 void StoreIC::GenerateMiss(MacroAssembler* masm) { | 741 void StoreIC::GenerateMiss(MacroAssembler* masm) { |
762 StoreIC_PushArgs(masm); | 742 StoreIC_PushArgs(masm); |
763 | 743 |
764 // Perform tail call to the entry. | 744 // Perform tail call to the entry. |
765 __ TailCallRuntime(Runtime::kStoreIC_Miss); | 745 __ TailCallRuntime(Runtime::kStoreIC_Miss); |
766 } | 746 } |
767 | 747 |
768 | |
769 void StoreIC::GenerateNormal(MacroAssembler* masm) { | 748 void StoreIC::GenerateNormal(MacroAssembler* masm) { |
770 Label miss; | 749 Label miss; |
771 Register receiver = StoreDescriptor::ReceiverRegister(); | 750 Register receiver = StoreDescriptor::ReceiverRegister(); |
772 Register name = StoreDescriptor::NameRegister(); | 751 Register name = StoreDescriptor::NameRegister(); |
773 Register value = StoreDescriptor::ValueRegister(); | 752 Register value = StoreDescriptor::ValueRegister(); |
774 Register dictionary = r8; | 753 Register dictionary = r7; |
775 DCHECK(receiver.is(r4)); | 754 DCHECK(receiver.is(r3)); |
776 DCHECK(name.is(r5)); | 755 DCHECK(name.is(r4)); |
777 DCHECK(value.is(r3)); | 756 DCHECK(value.is(r2)); |
778 DCHECK(VectorStoreICDescriptor::VectorRegister().is(r6)); | 757 DCHECK(VectorStoreICDescriptor::VectorRegister().is(r5)); |
779 DCHECK(VectorStoreICDescriptor::SlotRegister().is(r7)); | 758 DCHECK(VectorStoreICDescriptor::SlotRegister().is(r6)); |
780 | 759 |
781 __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 760 __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
782 | 761 |
783 GenerateDictionaryStore(masm, &miss, dictionary, name, value, r9, r10); | 762 GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9); |
784 Counters* counters = masm->isolate()->counters(); | 763 Counters* counters = masm->isolate()->counters(); |
785 __ IncrementCounter(counters->ic_store_normal_hit(), 1, r9, r10); | 764 __ IncrementCounter(counters->ic_store_normal_hit(), 1, r8, r9); |
786 __ Ret(); | 765 __ Ret(); |
787 | 766 |
788 __ bind(&miss); | 767 __ bind(&miss); |
789 __ IncrementCounter(counters->ic_store_normal_miss(), 1, r9, r10); | 768 __ IncrementCounter(counters->ic_store_normal_miss(), 1, r8, r9); |
790 GenerateMiss(masm); | 769 GenerateMiss(masm); |
791 } | 770 } |
792 | 771 |
793 | |
794 #undef __ | 772 #undef __ |
795 | 773 |
796 | |
797 Condition CompareIC::ComputeCondition(Token::Value op) { | 774 Condition CompareIC::ComputeCondition(Token::Value op) { |
798 switch (op) { | 775 switch (op) { |
799 case Token::EQ_STRICT: | 776 case Token::EQ_STRICT: |
800 case Token::EQ: | 777 case Token::EQ: |
801 return eq; | 778 return eq; |
802 case Token::LT: | 779 case Token::LT: |
803 return lt; | 780 return lt; |
804 case Token::GT: | 781 case Token::GT: |
805 return gt; | 782 return gt; |
806 case Token::LTE: | 783 case Token::LTE: |
807 return le; | 784 return le; |
808 case Token::GTE: | 785 case Token::GTE: |
809 return ge; | 786 return ge; |
810 default: | 787 default: |
811 UNREACHABLE(); | 788 UNREACHABLE(); |
812 return kNoCondition; | 789 return kNoCondition; |
813 } | 790 } |
814 } | 791 } |
815 | 792 |
816 | |
817 bool CompareIC::HasInlinedSmiCode(Address address) { | 793 bool CompareIC::HasInlinedSmiCode(Address address) { |
818 // The address of the instruction following the call. | 794 // The address of the instruction following the call. |
819 Address cmp_instruction_address = | 795 Address cmp_instruction_address = |
820 Assembler::return_address_from_call_start(address); | 796 Assembler::return_address_from_call_start(address); |
821 | 797 |
822 // If the instruction following the call is not a cmp rx, #yyy, nothing | 798 // If the instruction following the call is not a CHI, nothing |
823 // was inlined. | 799 // was inlined. |
824 Instr instr = Assembler::instr_at(cmp_instruction_address); | 800 return (Instruction::S390OpcodeValue(cmp_instruction_address) == CHI); |
825 return Assembler::IsCmpImmediate(instr); | |
826 } | 801 } |
827 | 802 |
828 | |
829 // | 803 // |
830 // This code is paired with the JumpPatchSite class in full-codegen-ppc.cc | 804 // This code is paired with the JumpPatchSite class in full-codegen-s390.cc |
831 // | 805 // |
832 void PatchInlinedSmiCode(Isolate* isolate, Address address, | 806 void PatchInlinedSmiCode(Isolate* isolate, Address address, |
833 InlinedSmiCheck check) { | 807 InlinedSmiCheck check) { |
834 Address cmp_instruction_address = | 808 Address cmp_instruction_address = |
835 Assembler::return_address_from_call_start(address); | 809 Assembler::return_address_from_call_start(address); |
836 | 810 |
837 // If the instruction following the call is not a cmp rx, #yyy, nothing | 811 // If the instruction following the call is not a cmp rx, #yyy, nothing |
838 // was inlined. | 812 // was inlined. |
839 Instr instr = Assembler::instr_at(cmp_instruction_address); | 813 Instr instr = Assembler::instr_at(cmp_instruction_address); |
840 if (!Assembler::IsCmpImmediate(instr)) { | 814 if (Instruction::S390OpcodeValue(cmp_instruction_address) != CHI) { |
841 return; | 815 return; |
842 } | 816 } |
843 | 817 |
| 818 if (Instruction::S390OpcodeValue(address) != BRASL) { |
| 819 return; |
| 820 } |
844 // The delta to the start of the map check instruction and the | 821 // The delta to the start of the map check instruction and the |
845 // condition code uses at the patched jump. | 822 // condition code uses at the patched jump. |
846 int delta = Assembler::GetCmpImmediateRawImmediate(instr); | 823 int delta = instr & 0x0000ffff; |
847 delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff16Mask; | 824 |
848 // If the delta is 0 the instruction is cmp r0, #0 which also signals that | 825 // If the delta is 0 the instruction is cmp r0, #0 which also signals that |
849 // nothing was inlined. | 826 // nothing was inlined. |
850 if (delta == 0) { | 827 if (delta == 0) { |
851 return; | 828 return; |
852 } | 829 } |
853 | 830 |
854 if (FLAG_trace_ic) { | 831 if (FLAG_trace_ic) { |
855 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address, | 832 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address, |
856 cmp_instruction_address, delta); | 833 cmp_instruction_address, delta); |
857 } | 834 } |
858 | 835 |
859 Address patch_address = | 836 // Expected sequence to enable by changing the following |
860 cmp_instruction_address - delta * Instruction::kInstrSize; | 837 // CR/CGR Rx, Rx // 2 / 4 bytes |
| 838 // LR R0, R0 // 2 bytes // 31-bit only! |
| 839 // BRC/BRCL // 4 / 6 bytes |
| 840 // into |
| 841 // TMLL Rx, XXX // 4 bytes |
| 842 // BRC/BRCL // 4 / 6 bytes |
| 843 // And vice versa to disable. |
| 844 |
| 845 // The following constant is the size of the CR/CGR + LR + LR |
| 846 const int kPatchAreaSizeNoBranch = 4; |
| 847 Address patch_address = cmp_instruction_address - delta; |
| 848 Address branch_address = patch_address + kPatchAreaSizeNoBranch; |
| 849 |
861 Instr instr_at_patch = Assembler::instr_at(patch_address); | 850 Instr instr_at_patch = Assembler::instr_at(patch_address); |
862 Instr branch_instr = | 851 SixByteInstr branch_instr = Assembler::instr_at(branch_address); |
863 Assembler::instr_at(patch_address + Instruction::kInstrSize); | 852 |
864 // This is patching a conditional "jump if not smi/jump if smi" site. | 853 // This is patching a conditional "jump if not smi/jump if smi" site. |
865 // Enabling by changing from | 854 size_t patch_size = 0; |
866 // cmp cr0, rx, rx | 855 if (Instruction::S390OpcodeValue(branch_address) == BRC) { |
867 // to | 856 patch_size = kPatchAreaSizeNoBranch + 4; |
868 // rlwinm(r0, value, 0, 31, 31, SetRC); | 857 } else if (Instruction::S390OpcodeValue(branch_address) == BRCL) { |
869 // bc(label, BT/BF, 2) | 858 patch_size = kPatchAreaSizeNoBranch + 6; |
870 // and vice-versa to be disabled again. | 859 } else { |
871 CodePatcher patcher(isolate, patch_address, 2); | 860 DCHECK(false); |
872 Register reg = Assembler::GetRA(instr_at_patch); | 861 } |
| 862 CodePatcher patcher(isolate, patch_address, patch_size); |
| 863 Register reg; |
| 864 reg.reg_code = instr_at_patch & 0xf; |
873 if (check == ENABLE_INLINED_SMI_CHECK) { | 865 if (check == ENABLE_INLINED_SMI_CHECK) { |
874 DCHECK(Assembler::IsCmpRegister(instr_at_patch)); | 866 patcher.masm()->TestIfSmi(reg); |
875 DCHECK_EQ(Assembler::GetRA(instr_at_patch).code(), | |
876 Assembler::GetRB(instr_at_patch).code()); | |
877 patcher.masm()->TestIfSmi(reg, r0); | |
878 } else { | 867 } else { |
| 868 // Emit the NOP to ensure sufficient place for patching |
| 869 // (replaced by LR + NILL) |
879 DCHECK(check == DISABLE_INLINED_SMI_CHECK); | 870 DCHECK(check == DISABLE_INLINED_SMI_CHECK); |
880 DCHECK(Assembler::IsAndi(instr_at_patch)); | 871 patcher.masm()->CmpP(reg, reg); |
881 patcher.masm()->cmp(reg, reg, cr0); | 872 #ifndef V8_TARGET_ARCH_S390X |
| 873 patcher.masm()->nop(); |
| 874 #endif |
882 } | 875 } |
883 DCHECK(Assembler::IsBranch(branch_instr)); | |
884 | 876 |
885 // Invert the logic of the branch | 877 Condition cc = al; |
886 if (Assembler::GetCondition(branch_instr) == eq) { | 878 if (Instruction::S390OpcodeValue(branch_address) == BRC) { |
887 patcher.EmitCondition(ne); | 879 cc = static_cast<Condition>((branch_instr & 0x00f00000) >> 20); |
| 880 DCHECK((cc == ne) || (cc == eq)); |
| 881 cc = (cc == ne) ? eq : ne; |
| 882 patcher.masm()->brc(cc, Operand((branch_instr & 0xffff) << 1)); |
| 883 } else if (Instruction::S390OpcodeValue(branch_address) == BRCL) { |
| 884 cc = static_cast<Condition>( |
| 885 (branch_instr & (static_cast<uint64_t>(0x00f0) << 32)) >> 36); |
| 886 DCHECK((cc == ne) || (cc == eq)); |
| 887 cc = (cc == ne) ? eq : ne; |
| 888 patcher.masm()->brcl(cc, Operand((branch_instr & 0xffffffff) << 1)); |
888 } else { | 889 } else { |
889 DCHECK(Assembler::GetCondition(branch_instr) == ne); | 890 DCHECK(false); |
890 patcher.EmitCondition(eq); | |
891 } | 891 } |
892 } | 892 } |
| 893 |
893 } // namespace internal | 894 } // namespace internal |
894 } // namespace v8 | 895 } // namespace v8 |
895 | 896 |
896 #endif // V8_TARGET_ARCH_PPC | 897 #endif // V8_TARGET_ARCH_S390 |
OLD | NEW |