OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. |
6 #if defined(TARGET_ARCH_ARM) | 6 #if defined(TARGET_ARCH_ARM) |
7 | 7 |
8 #include "vm/intrinsifier.h" | 8 #include "vm/intrinsifier.h" |
9 | 9 |
10 #include "vm/assembler.h" | 10 #include "vm/assembler.h" |
(...skipping 22 matching lines...) Expand all Loading... |
33 Array_getLength(assembler); | 33 Array_getLength(assembler); |
34 } | 34 } |
35 | 35 |
36 | 36 |
37 void Intrinsifier::Array_getIndexed(Assembler* assembler) { | 37 void Intrinsifier::Array_getIndexed(Assembler* assembler) { |
38 Label fall_through; | 38 Label fall_through; |
39 | 39 |
40 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Index | 40 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Index |
41 __ ldr(R1, Address(SP, + 1 * kWordSize)); // Array | 41 __ ldr(R1, Address(SP, + 1 * kWordSize)); // Array |
42 | 42 |
43 __ tst(R0, ShifterOperand(kSmiTagMask)); | 43 __ tst(R0, Operand(kSmiTagMask)); |
44 __ b(&fall_through, NE); // Index is not an smi, fall through | 44 __ b(&fall_through, NE); // Index is not an smi, fall through |
45 | 45 |
46 // range check | 46 // range check |
47 __ ldr(R6, FieldAddress(R1, Array::length_offset())); | 47 __ ldr(R6, FieldAddress(R1, Array::length_offset())); |
48 __ cmp(R0, ShifterOperand(R6)); | 48 __ cmp(R0, Operand(R6)); |
49 | 49 |
50 ASSERT(kSmiTagShift == 1); | 50 ASSERT(kSmiTagShift == 1); |
51 // array element at R1 + R0*2 + Array::data_offset - 1 | 51 // array element at R1 + R0*2 + Array::data_offset - 1 |
52 __ add(R6, R1, ShifterOperand(R0, LSL, 1), CC); | 52 __ add(R6, R1, Operand(R0, LSL, 1), CC); |
53 __ ldr(R0, FieldAddress(R6, Array::data_offset()), CC); | 53 __ ldr(R0, FieldAddress(R6, Array::data_offset()), CC); |
54 __ bx(LR, CC); | 54 __ bx(LR, CC); |
55 __ Bind(&fall_through); | 55 __ Bind(&fall_through); |
56 } | 56 } |
57 | 57 |
58 | 58 |
59 void Intrinsifier::ImmutableList_getIndexed(Assembler* assembler) { | 59 void Intrinsifier::ImmutableList_getIndexed(Assembler* assembler) { |
60 Array_getIndexed(assembler); | 60 Array_getIndexed(assembler); |
61 } | 61 } |
62 | 62 |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
97 __ CompareImmediate(R1, raw_null); | 97 __ CompareImmediate(R1, raw_null); |
98 __ b(&checked_ok, EQ); | 98 __ b(&checked_ok, EQ); |
99 | 99 |
100 // Check if it's dynamic. | 100 // Check if it's dynamic. |
101 // Get type at index 0. | 101 // Get type at index 0. |
102 __ ldr(R0, FieldAddress(R1, TypeArguments::type_at_offset(0))); | 102 __ ldr(R0, FieldAddress(R1, TypeArguments::type_at_offset(0))); |
103 __ CompareObject(R0, Type::ZoneHandle(Type::DynamicType())); | 103 __ CompareObject(R0, Type::ZoneHandle(Type::DynamicType())); |
104 __ b(&checked_ok, EQ); | 104 __ b(&checked_ok, EQ); |
105 | 105 |
106 // Check for int and num. | 106 // Check for int and num. |
107 __ tst(R2, ShifterOperand(kSmiTagMask)); // Value is Smi? | 107 __ tst(R2, Operand(kSmiTagMask)); // Value is Smi? |
108 __ b(&fall_through, NE); // Non-smi value. | 108 __ b(&fall_through, NE); // Non-smi value. |
109 __ CompareObject(R0, Type::ZoneHandle(Type::IntType())); | 109 __ CompareObject(R0, Type::ZoneHandle(Type::IntType())); |
110 __ b(&checked_ok, EQ); | 110 __ b(&checked_ok, EQ); |
111 __ CompareObject(R0, Type::ZoneHandle(Type::Number())); | 111 __ CompareObject(R0, Type::ZoneHandle(Type::Number())); |
112 __ b(&fall_through, NE); | 112 __ b(&fall_through, NE); |
113 __ Bind(&checked_ok); | 113 __ Bind(&checked_ok); |
114 } | 114 } |
115 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. | 115 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. |
116 __ tst(R1, ShifterOperand(kSmiTagMask)); | 116 __ tst(R1, Operand(kSmiTagMask)); |
117 // Index not Smi. | 117 // Index not Smi. |
118 __ b(&fall_through, NE); | 118 __ b(&fall_through, NE); |
119 __ ldr(R0, Address(SP, 2 * kWordSize)); // Array. | 119 __ ldr(R0, Address(SP, 2 * kWordSize)); // Array. |
120 | 120 |
121 // Range check. | 121 // Range check. |
122 __ ldr(R3, FieldAddress(R0, Array::length_offset())); // Array length. | 122 __ ldr(R3, FieldAddress(R0, Array::length_offset())); // Array length. |
123 __ cmp(R1, ShifterOperand(R3)); | 123 __ cmp(R1, Operand(R3)); |
124 // Runtime throws exception. | 124 // Runtime throws exception. |
125 __ b(&fall_through, CS); | 125 __ b(&fall_through, CS); |
126 | 126 |
127 // Note that R1 is Smi, i.e, times 2. | 127 // Note that R1 is Smi, i.e, times 2. |
128 ASSERT(kSmiTagShift == 1); | 128 ASSERT(kSmiTagShift == 1); |
129 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. | 129 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. |
130 __ add(R1, R0, ShifterOperand(R1, LSL, 1)); // R1 is Smi. | 130 __ add(R1, R0, Operand(R1, LSL, 1)); // R1 is Smi. |
131 __ StoreIntoObject(R0, | 131 __ StoreIntoObject(R0, FieldAddress(R1, Array::data_offset()), R2); |
132 FieldAddress(R1, Array::data_offset()), | |
133 R2); | |
134 // Caller is responsible for preserving the value if necessary. | 132 // Caller is responsible for preserving the value if necessary. |
135 __ Ret(); | 133 __ Ret(); |
136 __ Bind(&fall_through); | 134 __ Bind(&fall_through); |
137 } | 135 } |
138 | 136 |
139 | 137 |
140 // Allocate a GrowableObjectArray using the backing array specified. | 138 // Allocate a GrowableObjectArray using the backing array specified. |
141 // On stack: type argument (+1), data (+0). | 139 // On stack: type argument (+1), data (+0). |
142 void Intrinsifier::GrowableList_Allocate(Assembler* assembler) { | 140 void Intrinsifier::GrowableList_Allocate(Assembler* assembler) { |
143 // The newly allocated object is returned in R0. | 141 // The newly allocated object is returned in R0. |
(...skipping 11 matching lines...) Expand all Loading... |
155 | 153 |
156 __ LoadImmediate(R2, heap->TopAddress()); | 154 __ LoadImmediate(R2, heap->TopAddress()); |
157 __ ldr(R0, Address(R2, 0)); | 155 __ ldr(R0, Address(R2, 0)); |
158 __ AddImmediate(R1, R0, fixed_size); | 156 __ AddImmediate(R1, R0, fixed_size); |
159 | 157 |
160 // Check if the allocation fits into the remaining space. | 158 // Check if the allocation fits into the remaining space. |
161 // R0: potential new backing array object start. | 159 // R0: potential new backing array object start. |
162 // R1: potential next object start. | 160 // R1: potential next object start. |
163 __ LoadImmediate(R3, heap->EndAddress()); | 161 __ LoadImmediate(R3, heap->EndAddress()); |
164 __ ldr(R3, Address(R3, 0)); | 162 __ ldr(R3, Address(R3, 0)); |
165 __ cmp(R1, ShifterOperand(R3)); | 163 __ cmp(R1, Operand(R3)); |
166 __ b(&fall_through, CS); | 164 __ b(&fall_through, CS); |
167 | 165 |
168 // Successfully allocated the object(s), now update top to point to | 166 // Successfully allocated the object(s), now update top to point to |
169 // next object start and initialize the object. | 167 // next object start and initialize the object. |
170 __ str(R1, Address(R2, 0)); | 168 __ str(R1, Address(R2, 0)); |
171 __ AddImmediate(R0, kHeapObjectTag); | 169 __ AddImmediate(R0, kHeapObjectTag); |
172 | 170 |
173 // Initialize the tags. | 171 // Initialize the tags. |
174 // R0: new growable array object start as a tagged pointer. | 172 // R0: new growable array object start as a tagged pointer. |
175 const Class& cls = Class::Handle( | 173 const Class& cls = Class::Handle( |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
220 __ Ret(); | 218 __ Ret(); |
221 } | 219 } |
222 | 220 |
223 | 221 |
224 void Intrinsifier::GrowableList_getIndexed(Assembler* assembler) { | 222 void Intrinsifier::GrowableList_getIndexed(Assembler* assembler) { |
225 Label fall_through; | 223 Label fall_through; |
226 | 224 |
227 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Index | 225 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Index |
228 __ ldr(R1, Address(SP, + 1 * kWordSize)); // Array | 226 __ ldr(R1, Address(SP, + 1 * kWordSize)); // Array |
229 | 227 |
230 __ tst(R0, ShifterOperand(kSmiTagMask)); | 228 __ tst(R0, Operand(kSmiTagMask)); |
231 __ b(&fall_through, NE); // Index is not an smi, fall through | 229 __ b(&fall_through, NE); // Index is not an smi, fall through |
232 | 230 |
233 // range check | 231 // range check |
234 __ ldr(R6, FieldAddress(R1, GrowableObjectArray::length_offset())); | 232 __ ldr(R6, FieldAddress(R1, GrowableObjectArray::length_offset())); |
235 __ cmp(R0, ShifterOperand(R6)); | 233 __ cmp(R0, Operand(R6)); |
236 | 234 |
237 ASSERT(kSmiTagShift == 1); | 235 ASSERT(kSmiTagShift == 1); |
238 // array element at R6 + R0 * 2 + Array::data_offset - 1 | 236 // array element at R6 + R0 * 2 + Array::data_offset - 1 |
239 __ ldr(R6, FieldAddress(R1, GrowableObjectArray::data_offset()), CC); // data | 237 __ ldr(R6, FieldAddress(R1, GrowableObjectArray::data_offset()), CC); // data |
240 __ add(R6, R6, ShifterOperand(R0, LSL, 1), CC); | 238 __ add(R6, R6, Operand(R0, LSL, 1), CC); |
241 __ ldr(R0, FieldAddress(R6, Array::data_offset()), CC); | 239 __ ldr(R0, FieldAddress(R6, Array::data_offset()), CC); |
242 __ bx(LR, CC); | 240 __ bx(LR, CC); |
243 __ Bind(&fall_through); | 241 __ Bind(&fall_through); |
244 } | 242 } |
245 | 243 |
246 | 244 |
247 // Set value into growable object array at specified index. | 245 // Set value into growable object array at specified index. |
248 // On stack: growable array (+2), index (+1), value (+0). | 246 // On stack: growable array (+2), index (+1), value (+0). |
249 void Intrinsifier::GrowableList_setIndexed(Assembler* assembler) { | 247 void Intrinsifier::GrowableList_setIndexed(Assembler* assembler) { |
250 if (FLAG_enable_type_checks) { | 248 if (FLAG_enable_type_checks) { |
251 return; | 249 return; |
252 } | 250 } |
253 Label fall_through; | 251 Label fall_through; |
254 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. | 252 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. |
255 __ ldr(R0, Address(SP, 2 * kWordSize)); // GrowableArray. | 253 __ ldr(R0, Address(SP, 2 * kWordSize)); // GrowableArray. |
256 __ tst(R1, ShifterOperand(kSmiTagMask)); | 254 __ tst(R1, Operand(kSmiTagMask)); |
257 __ b(&fall_through, NE); // Non-smi index. | 255 __ b(&fall_through, NE); // Non-smi index. |
258 // Range check using _length field. | 256 // Range check using _length field. |
259 __ ldr(R2, FieldAddress(R0, GrowableObjectArray::length_offset())); | 257 __ ldr(R2, FieldAddress(R0, GrowableObjectArray::length_offset())); |
260 __ cmp(R1, ShifterOperand(R2)); | 258 __ cmp(R1, Operand(R2)); |
261 // Runtime throws exception. | 259 // Runtime throws exception. |
262 __ b(&fall_through, CS); | 260 __ b(&fall_through, CS); |
263 __ ldr(R0, FieldAddress(R0, GrowableObjectArray::data_offset())); // data. | 261 __ ldr(R0, FieldAddress(R0, GrowableObjectArray::data_offset())); // data. |
264 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. | 262 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. |
265 // Note that R1 is Smi, i.e, times 2. | 263 // Note that R1 is Smi, i.e, times 2. |
266 ASSERT(kSmiTagShift == 1); | 264 ASSERT(kSmiTagShift == 1); |
267 __ add(R1, R0, ShifterOperand(R1, LSL, 1)); | 265 __ add(R1, R0, Operand(R1, LSL, 1)); |
268 __ StoreIntoObject(R0, | 266 __ StoreIntoObject(R0, FieldAddress(R1, Array::data_offset()), R2); |
269 FieldAddress(R1, Array::data_offset()), | |
270 R2); | |
271 __ Ret(); | 267 __ Ret(); |
272 __ Bind(&fall_through); | 268 __ Bind(&fall_through); |
273 } | 269 } |
274 | 270 |
275 | 271 |
276 // Set length of growable object array. The length cannot | 272 // Set length of growable object array. The length cannot |
277 // be greater than the length of the data container. | 273 // be greater than the length of the data container. |
278 // On stack: growable array (+1), length (+0). | 274 // On stack: growable array (+1), length (+0). |
279 void Intrinsifier::GrowableList_setLength(Assembler* assembler) { | 275 void Intrinsifier::GrowableList_setLength(Assembler* assembler) { |
280 __ ldr(R0, Address(SP, 1 * kWordSize)); // Growable array. | 276 __ ldr(R0, Address(SP, 1 * kWordSize)); // Growable array. |
281 __ ldr(R1, Address(SP, 0 * kWordSize)); // Length value. | 277 __ ldr(R1, Address(SP, 0 * kWordSize)); // Length value. |
282 __ tst(R1, ShifterOperand(kSmiTagMask)); // Check for Smi. | 278 __ tst(R1, Operand(kSmiTagMask)); // Check for Smi. |
283 __ str(R1, FieldAddress(R0, GrowableObjectArray::length_offset()), EQ); | 279 __ str(R1, FieldAddress(R0, GrowableObjectArray::length_offset()), EQ); |
284 __ bx(LR, EQ); | 280 __ bx(LR, EQ); |
285 // Fall through on non-Smi. | 281 // Fall through on non-Smi. |
286 } | 282 } |
287 | 283 |
288 | 284 |
289 // Set data of growable object array. | 285 // Set data of growable object array. |
290 // On stack: growable array (+1), data (+0). | 286 // On stack: growable array (+1), data (+0). |
291 void Intrinsifier::GrowableList_setData(Assembler* assembler) { | 287 void Intrinsifier::GrowableList_setData(Assembler* assembler) { |
292 if (FLAG_enable_type_checks) { | 288 if (FLAG_enable_type_checks) { |
293 return; | 289 return; |
294 } | 290 } |
295 Label fall_through; | 291 Label fall_through; |
296 __ ldr(R1, Address(SP, 0 * kWordSize)); // Data. | 292 __ ldr(R1, Address(SP, 0 * kWordSize)); // Data. |
297 // Check that data is an ObjectArray. | 293 // Check that data is an ObjectArray. |
298 __ tst(R1, ShifterOperand(kSmiTagMask)); | 294 __ tst(R1, Operand(kSmiTagMask)); |
299 __ b(&fall_through, EQ); // Data is Smi. | 295 __ b(&fall_through, EQ); // Data is Smi. |
300 __ CompareClassId(R1, kArrayCid, R0); | 296 __ CompareClassId(R1, kArrayCid, R0); |
301 __ b(&fall_through, NE); | 297 __ b(&fall_through, NE); |
302 __ ldr(R0, Address(SP, 1 * kWordSize)); // Growable array. | 298 __ ldr(R0, Address(SP, 1 * kWordSize)); // Growable array. |
303 __ StoreIntoObject(R0, | 299 __ StoreIntoObject(R0, |
304 FieldAddress(R0, GrowableObjectArray::data_offset()), | 300 FieldAddress(R0, GrowableObjectArray::data_offset()), |
305 R1); | 301 R1); |
306 __ Ret(); | 302 __ Ret(); |
307 __ Bind(&fall_through); | 303 __ Bind(&fall_through); |
308 } | 304 } |
(...skipping 10 matching lines...) Expand all Loading... |
319 Label fall_through; | 315 Label fall_through; |
320 // R0: Array. | 316 // R0: Array. |
321 __ ldr(R0, Address(SP, 1 * kWordSize)); | 317 __ ldr(R0, Address(SP, 1 * kWordSize)); |
322 // R1: length. | 318 // R1: length. |
323 __ ldr(R1, FieldAddress(R0, GrowableObjectArray::length_offset())); | 319 __ ldr(R1, FieldAddress(R0, GrowableObjectArray::length_offset())); |
324 // R2: data. | 320 // R2: data. |
325 __ ldr(R2, FieldAddress(R0, GrowableObjectArray::data_offset())); | 321 __ ldr(R2, FieldAddress(R0, GrowableObjectArray::data_offset())); |
326 // R3: capacity. | 322 // R3: capacity. |
327 __ ldr(R3, FieldAddress(R2, Array::length_offset())); | 323 __ ldr(R3, FieldAddress(R2, Array::length_offset())); |
328 // Compare length with capacity. | 324 // Compare length with capacity. |
329 __ cmp(R1, ShifterOperand(R3)); | 325 __ cmp(R1, Operand(R3)); |
330 __ b(&fall_through, EQ); // Must grow data. | 326 __ b(&fall_through, EQ); // Must grow data. |
331 const int32_t value_one = reinterpret_cast<int32_t>(Smi::New(1)); | 327 const int32_t value_one = reinterpret_cast<int32_t>(Smi::New(1)); |
332 // len = len + 1; | 328 // len = len + 1; |
333 __ add(R3, R1, ShifterOperand(value_one)); | 329 __ add(R3, R1, Operand(value_one)); |
334 __ str(R3, FieldAddress(R0, GrowableObjectArray::length_offset())); | 330 __ str(R3, FieldAddress(R0, GrowableObjectArray::length_offset())); |
335 __ ldr(R0, Address(SP, 0 * kWordSize)); // Value. | 331 __ ldr(R0, Address(SP, 0 * kWordSize)); // Value. |
336 ASSERT(kSmiTagShift == 1); | 332 ASSERT(kSmiTagShift == 1); |
337 __ add(R1, R2, ShifterOperand(R1, LSL, 1)); | 333 __ add(R1, R2, Operand(R1, LSL, 1)); |
338 __ StoreIntoObject(R2, | 334 __ StoreIntoObject(R2, FieldAddress(R1, Array::data_offset()), R0); |
339 FieldAddress(R1, Array::data_offset()), | |
340 R0); | |
341 const int32_t raw_null = reinterpret_cast<int32_t>(Object::null()); | 335 const int32_t raw_null = reinterpret_cast<int32_t>(Object::null()); |
342 __ LoadImmediate(R0, raw_null); | 336 __ LoadImmediate(R0, raw_null); |
343 __ Ret(); | 337 __ Ret(); |
344 __ Bind(&fall_through); | 338 __ Bind(&fall_through); |
345 } | 339 } |
346 | 340 |
347 | 341 |
348 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ | 342 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ |
349 Label fall_through; \ | 343 Label fall_through; \ |
350 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ | 344 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ |
351 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ | 345 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ |
352 /* Check that length is a positive Smi. */ \ | 346 /* Check that length is a positive Smi. */ \ |
353 /* R2: requested array length argument. */ \ | 347 /* R2: requested array length argument. */ \ |
354 __ tst(R2, ShifterOperand(kSmiTagMask)); \ | 348 __ tst(R2, Operand(kSmiTagMask)); \ |
355 __ b(&fall_through, NE); \ | 349 __ b(&fall_through, NE); \ |
356 __ CompareImmediate(R2, 0); \ | 350 __ CompareImmediate(R2, 0); \ |
357 __ b(&fall_through, LT); \ | 351 __ b(&fall_through, LT); \ |
358 __ SmiUntag(R2); \ | 352 __ SmiUntag(R2); \ |
359 /* Check for maximum allowed length. */ \ | 353 /* Check for maximum allowed length. */ \ |
360 /* R2: untagged array length. */ \ | 354 /* R2: untagged array length. */ \ |
361 __ CompareImmediate(R2, max_len); \ | 355 __ CompareImmediate(R2, max_len); \ |
362 __ b(&fall_through, GT); \ | 356 __ b(&fall_through, GT); \ |
363 __ mov(R2, ShifterOperand(R2, LSL, scale_shift)); \ | 357 __ mov(R2, Operand(R2, LSL, scale_shift)); \ |
364 const intptr_t fixed_size = sizeof(Raw##type_name) + kObjectAlignment - 1; \ | 358 const intptr_t fixed_size = sizeof(Raw##type_name) + kObjectAlignment - 1; \ |
365 __ AddImmediate(R2, fixed_size); \ | 359 __ AddImmediate(R2, fixed_size); \ |
366 __ bic(R2, R2, ShifterOperand(kObjectAlignment - 1)); \ | 360 __ bic(R2, R2, Operand(kObjectAlignment - 1)); \ |
367 Heap* heap = Isolate::Current()->heap(); \ | 361 Heap* heap = Isolate::Current()->heap(); \ |
368 \ | 362 \ |
369 __ LoadImmediate(R0, heap->TopAddress()); \ | 363 __ LoadImmediate(R0, heap->TopAddress()); \ |
370 __ ldr(R0, Address(R0, 0)); \ | 364 __ ldr(R0, Address(R0, 0)); \ |
371 \ | 365 \ |
372 /* R2: allocation size. */ \ | 366 /* R2: allocation size. */ \ |
373 __ add(R1, R0, ShifterOperand(R2)); \ | 367 __ add(R1, R0, Operand(R2)); \ |
374 __ b(&fall_through, VS); \ | 368 __ b(&fall_through, VS); \ |
375 \ | 369 \ |
376 /* Check if the allocation fits into the remaining space. */ \ | 370 /* Check if the allocation fits into the remaining space. */ \ |
377 /* R0: potential new object start. */ \ | 371 /* R0: potential new object start. */ \ |
378 /* R1: potential next object start. */ \ | 372 /* R1: potential next object start. */ \ |
379 /* R2: allocation size. */ \ | 373 /* R2: allocation size. */ \ |
380 __ LoadImmediate(R3, heap->EndAddress()); \ | 374 __ LoadImmediate(R3, heap->EndAddress()); \ |
381 __ ldr(R3, Address(R3, 0)); \ | 375 __ ldr(R3, Address(R3, 0)); \ |
382 __ cmp(R1, ShifterOperand(R3)); \ | 376 __ cmp(R1, Operand(R3)); \ |
383 __ b(&fall_through, CS); \ | 377 __ b(&fall_through, CS); \ |
384 \ | 378 \ |
385 /* Successfully allocated the object(s), now update top to point to */ \ | 379 /* Successfully allocated the object(s), now update top to point to */ \ |
386 /* next object start and initialize the object. */ \ | 380 /* next object start and initialize the object. */ \ |
387 __ LoadImmediate(R3, heap->TopAddress()); \ | 381 __ LoadImmediate(R3, heap->TopAddress()); \ |
388 __ str(R1, Address(R3, 0)); \ | 382 __ str(R1, Address(R3, 0)); \ |
389 __ AddImmediate(R0, kHeapObjectTag); \ | 383 __ AddImmediate(R0, kHeapObjectTag); \ |
390 __ UpdateAllocationStatsWithSize(cid, R2, R4); \ | 384 __ UpdateAllocationStatsWithSize(cid, R2, R4); \ |
391 /* Initialize the tags. */ \ | 385 /* Initialize the tags. */ \ |
392 /* R0: new object start as a tagged pointer. */ \ | 386 /* R0: new object start as a tagged pointer. */ \ |
393 /* R1: new object end address. */ \ | 387 /* R1: new object end address. */ \ |
394 /* R2: allocation size. */ \ | 388 /* R2: allocation size. */ \ |
395 { \ | 389 { \ |
396 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); \ | 390 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); \ |
397 __ mov(R2, ShifterOperand(R2, LSL, \ | 391 __ mov(R2, Operand(R2, LSL, \ |
398 RawObject::kSizeTagPos - kObjectAlignmentLog2), LS); \ | 392 RawObject::kSizeTagPos - kObjectAlignmentLog2), LS); \ |
399 __ mov(R2, ShifterOperand(0), HI); \ | 393 __ mov(R2, Operand(0), HI); \ |
400 \ | 394 \ |
401 /* Get the class index and insert it into the tags. */ \ | 395 /* Get the class index and insert it into the tags. */ \ |
402 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); \ | 396 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); \ |
403 __ orr(R2, R2, ShifterOperand(TMP)); \ | 397 __ orr(R2, R2, Operand(TMP)); \ |
404 __ str(R2, FieldAddress(R0, type_name::tags_offset())); /* Tags. */ \ | 398 __ str(R2, FieldAddress(R0, type_name::tags_offset())); /* Tags. */ \ |
405 } \ | 399 } \ |
406 /* Set the length field. */ \ | 400 /* Set the length field. */ \ |
407 /* R0: new object start as a tagged pointer. */ \ | 401 /* R0: new object start as a tagged pointer. */ \ |
408 /* R1: new object end address. */ \ | 402 /* R1: new object end address. */ \ |
409 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ | 403 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ |
410 __ StoreIntoObjectNoBarrier(R0, \ | 404 __ StoreIntoObjectNoBarrier(R0, \ |
411 FieldAddress(R0, type_name::length_offset()), \ | 405 FieldAddress(R0, type_name::length_offset()), \ |
412 R2); \ | 406 R2); \ |
413 /* Initialize all array elements to 0. */ \ | 407 /* Initialize all array elements to 0. */ \ |
414 /* R0: new object start as a tagged pointer. */ \ | 408 /* R0: new object start as a tagged pointer. */ \ |
415 /* R1: new object end address. */ \ | 409 /* R1: new object end address. */ \ |
416 /* R2: iterator which initially points to the start of the variable */ \ | 410 /* R2: iterator which initially points to the start of the variable */ \ |
417 /* R3: scratch register. */ \ | 411 /* R3: scratch register. */ \ |
418 /* data area to be initialized. */ \ | 412 /* data area to be initialized. */ \ |
419 __ LoadImmediate(R3, 0); \ | 413 __ LoadImmediate(R3, 0); \ |
420 __ AddImmediate(R2, R0, sizeof(Raw##type_name) - 1); \ | 414 __ AddImmediate(R2, R0, sizeof(Raw##type_name) - 1); \ |
421 Label init_loop; \ | 415 Label init_loop; \ |
422 __ Bind(&init_loop); \ | 416 __ Bind(&init_loop); \ |
423 __ cmp(R2, ShifterOperand(R1)); \ | 417 __ cmp(R2, Operand(R1)); \ |
424 __ str(R3, Address(R2, 0), CC); \ | 418 __ str(R3, Address(R2, 0), CC); \ |
425 __ add(R2, R2, ShifterOperand(kWordSize), CC); \ | 419 __ add(R2, R2, Operand(kWordSize), CC); \ |
426 __ b(&init_loop, CC); \ | 420 __ b(&init_loop, CC); \ |
427 \ | 421 \ |
428 __ Ret(); \ | 422 __ Ret(); \ |
429 __ Bind(&fall_through); \ | 423 __ Bind(&fall_through); \ |
430 | 424 |
431 | 425 |
432 // Gets the length of a TypedData. | 426 // Gets the length of a TypedData. |
433 void Intrinsifier::TypedData_getLength(Assembler* assembler) { | 427 void Intrinsifier::TypedData_getLength(Assembler* assembler) { |
434 __ ldr(R0, Address(SP, 0 * kWordSize)); | 428 __ ldr(R0, Address(SP, 0 * kWordSize)); |
435 __ ldr(R0, FieldAddress(R0, TypedData::length_offset())); | 429 __ ldr(R0, FieldAddress(R0, TypedData::length_offset())); |
(...skipping 29 matching lines...) Expand all Loading... |
465 } | 459 } |
466 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) | 460 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) |
467 #undef TYPED_DATA_ALLOCATOR | 461 #undef TYPED_DATA_ALLOCATOR |
468 | 462 |
469 | 463 |
470 // Loads args from stack into R0 and R1 | 464 // Loads args from stack into R0 and R1 |
471 // Tests if they are smis, jumps to label not_smi if not. | 465 // Tests if they are smis, jumps to label not_smi if not. |
472 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { | 466 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { |
473 __ ldr(R0, Address(SP, + 0 * kWordSize)); | 467 __ ldr(R0, Address(SP, + 0 * kWordSize)); |
474 __ ldr(R1, Address(SP, + 1 * kWordSize)); | 468 __ ldr(R1, Address(SP, + 1 * kWordSize)); |
475 __ orr(TMP, R0, ShifterOperand(R1)); | 469 __ orr(TMP, R0, Operand(R1)); |
476 __ tst(TMP, ShifterOperand(kSmiTagMask)); | 470 __ tst(TMP, Operand(kSmiTagMask)); |
477 __ b(not_smi, NE); | 471 __ b(not_smi, NE); |
478 return; | 472 return; |
479 } | 473 } |
480 | 474 |
481 | 475 |
482 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { | 476 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { |
483 Label fall_through; | 477 Label fall_through; |
484 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. | 478 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. |
485 __ adds(R0, R0, ShifterOperand(R1)); // Adds. | 479 __ adds(R0, R0, Operand(R1)); // Adds. |
486 __ bx(LR, VC); // Return if no overflow. | 480 __ bx(LR, VC); // Return if no overflow. |
487 // Otherwise fall through. | 481 // Otherwise fall through. |
488 __ Bind(&fall_through); | 482 __ Bind(&fall_through); |
489 } | 483 } |
490 | 484 |
491 | 485 |
492 void Intrinsifier::Integer_add(Assembler* assembler) { | 486 void Intrinsifier::Integer_add(Assembler* assembler) { |
493 Integer_addFromInteger(assembler); | 487 Integer_addFromInteger(assembler); |
494 } | 488 } |
495 | 489 |
496 | 490 |
497 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { | 491 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { |
498 Label fall_through; | 492 Label fall_through; |
499 TestBothArgumentsSmis(assembler, &fall_through); | 493 TestBothArgumentsSmis(assembler, &fall_through); |
500 __ subs(R0, R0, ShifterOperand(R1)); // Subtract. | 494 __ subs(R0, R0, Operand(R1)); // Subtract. |
501 __ bx(LR, VC); // Return if no overflow. | 495 __ bx(LR, VC); // Return if no overflow. |
502 // Otherwise fall through. | 496 // Otherwise fall through. |
503 __ Bind(&fall_through); | 497 __ Bind(&fall_through); |
504 } | 498 } |
505 | 499 |
506 | 500 |
507 void Intrinsifier::Integer_sub(Assembler* assembler) { | 501 void Intrinsifier::Integer_sub(Assembler* assembler) { |
508 Label fall_through; | 502 Label fall_through; |
509 TestBothArgumentsSmis(assembler, &fall_through); | 503 TestBothArgumentsSmis(assembler, &fall_through); |
510 __ subs(R0, R1, ShifterOperand(R0)); // Subtract. | 504 __ subs(R0, R1, Operand(R0)); // Subtract. |
511 __ bx(LR, VC); // Return if no overflow. | 505 __ bx(LR, VC); // Return if no overflow. |
512 // Otherwise fall through. | 506 // Otherwise fall through. |
513 __ Bind(&fall_through); | 507 __ Bind(&fall_through); |
514 } | 508 } |
515 | 509 |
516 | 510 |
517 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { | 511 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { |
518 Label fall_through; | 512 Label fall_through; |
519 | 513 |
520 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | 514 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis |
521 __ SmiUntag(R0); // Untags R6. We only want result shifted by one. | 515 __ SmiUntag(R0); // Untags R6. We only want result shifted by one. |
522 | 516 |
523 if (TargetCPUFeatures::arm_version() == ARMv7) { | 517 if (TargetCPUFeatures::arm_version() == ARMv7) { |
524 __ smull(R0, IP, R0, R1); // IP:R0 <- R0 * R1. | 518 __ smull(R0, IP, R0, R1); // IP:R0 <- R0 * R1. |
525 __ cmp(IP, ShifterOperand(R0, ASR, 31)); | 519 __ cmp(IP, Operand(R0, ASR, 31)); |
526 __ bx(LR, EQ); | 520 __ bx(LR, EQ); |
527 } else { | 521 } else { |
528 __ CheckMultSignedOverflow(R0, R1, IP, D0, D1, &fall_through); | 522 __ CheckMultSignedOverflow(R0, R1, IP, D0, D1, &fall_through); |
529 __ mul(R0, R0, R1); | 523 __ mul(R0, R0, R1); |
530 __ Ret(); | 524 __ Ret(); |
531 } | 525 } |
532 | 526 |
533 __ Bind(&fall_through); // Fall through on overflow. | 527 __ Bind(&fall_through); // Fall through on overflow. |
534 } | 528 } |
535 | 529 |
(...skipping 15 matching lines...) Expand all Loading... |
551 // R1: Untagged result (remainder). | 545 // R1: Untagged result (remainder). |
552 static void EmitRemainderOperation(Assembler* assembler) { | 546 static void EmitRemainderOperation(Assembler* assembler) { |
553 Label modulo; | 547 Label modulo; |
554 const Register left = R1; | 548 const Register left = R1; |
555 const Register right = R0; | 549 const Register right = R0; |
556 const Register result = R1; | 550 const Register result = R1; |
557 const Register tmp = R2; | 551 const Register tmp = R2; |
558 ASSERT(left == result); | 552 ASSERT(left == result); |
559 | 553 |
560 // Check for quick zero results. | 554 // Check for quick zero results. |
561 __ cmp(left, ShifterOperand(0)); | 555 __ cmp(left, Operand(0)); |
562 __ mov(R0, ShifterOperand(0), EQ); | 556 __ mov(R0, Operand(0), EQ); |
563 __ bx(LR, EQ); // left is 0? Return 0. | 557 __ bx(LR, EQ); // left is 0? Return 0. |
564 __ cmp(left, ShifterOperand(right)); | 558 __ cmp(left, Operand(right)); |
565 __ mov(R0, ShifterOperand(0), EQ); | 559 __ mov(R0, Operand(0), EQ); |
566 __ bx(LR, EQ); // left == right? Return 0. | 560 __ bx(LR, EQ); // left == right? Return 0. |
567 | 561 |
568 // Check if result should be left. | 562 // Check if result should be left. |
569 __ cmp(left, ShifterOperand(0)); | 563 __ cmp(left, Operand(0)); |
570 __ b(&modulo, LT); | 564 __ b(&modulo, LT); |
571 // left is positive. | 565 // left is positive. |
572 __ cmp(left, ShifterOperand(right)); | 566 __ cmp(left, Operand(right)); |
573 // left is less than right, result is left. | 567 // left is less than right, result is left. |
574 __ mov(R0, ShifterOperand(left), LT); | 568 __ mov(R0, Operand(left), LT); |
575 __ bx(LR, LT); | 569 __ bx(LR, LT); |
576 | 570 |
577 __ Bind(&modulo); | 571 __ Bind(&modulo); |
578 // result <- left - right * (left / right) | 572 // result <- left - right * (left / right) |
579 __ SmiUntag(left); | 573 __ SmiUntag(left); |
580 __ SmiUntag(right); | 574 __ SmiUntag(right); |
581 | 575 |
582 __ IntegerDivide(tmp, left, right, D1, D0); | 576 __ IntegerDivide(tmp, left, right, D1, D0); |
583 | 577 |
584 __ mls(result, right, tmp, left); // result <- left - right * TMP | 578 __ mls(result, right, tmp, left); // result <- left - right * TMP |
585 return; | 579 return; |
586 } | 580 } |
587 | 581 |
588 | 582 |
589 // Implementation: | 583 // Implementation: |
590 // res = left % right; | 584 // res = left % right; |
591 // if (res < 0) { | 585 // if (res < 0) { |
592 // if (right < 0) { | 586 // if (right < 0) { |
593 // res = res - right; | 587 // res = res - right; |
594 // } else { | 588 // } else { |
595 // res = res + right; | 589 // res = res + right; |
596 // } | 590 // } |
597 // } | 591 // } |
598 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { | 592 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { |
599 // Check to see if we have integer division | 593 // Check to see if we have integer division |
600 Label fall_through; | 594 Label fall_through; |
601 __ ldr(R1, Address(SP, + 0 * kWordSize)); | 595 __ ldr(R1, Address(SP, + 0 * kWordSize)); |
602 __ ldr(R0, Address(SP, + 1 * kWordSize)); | 596 __ ldr(R0, Address(SP, + 1 * kWordSize)); |
603 __ orr(TMP, R0, ShifterOperand(R1)); | 597 __ orr(TMP, R0, Operand(R1)); |
604 __ tst(TMP, ShifterOperand(kSmiTagMask)); | 598 __ tst(TMP, Operand(kSmiTagMask)); |
605 __ b(&fall_through, NE); | 599 __ b(&fall_through, NE); |
606 // R1: Tagged left (dividend). | 600 // R1: Tagged left (dividend). |
607 // R0: Tagged right (divisor). | 601 // R0: Tagged right (divisor). |
608 // Check if modulo by zero -> exception thrown in main function. | 602 // Check if modulo by zero -> exception thrown in main function. |
609 __ cmp(R0, ShifterOperand(0)); | 603 __ cmp(R0, Operand(0)); |
610 __ b(&fall_through, EQ); | 604 __ b(&fall_through, EQ); |
611 EmitRemainderOperation(assembler); | 605 EmitRemainderOperation(assembler); |
612 // Untagged right in R0. Untagged remainder result in R1. | 606 // Untagged right in R0. Untagged remainder result in R1. |
613 | 607 |
614 __ cmp(R1, ShifterOperand(0)); | 608 __ cmp(R1, Operand(0)); |
615 __ mov(R0, ShifterOperand(R1, LSL, 1), GE); // Tag and move result to R0. | 609 __ mov(R0, Operand(R1, LSL, 1), GE); // Tag and move result to R0. |
616 __ bx(LR, GE); | 610 __ bx(LR, GE); |
617 | 611 |
618 // Result is negative, adjust it. | 612 // Result is negative, adjust it. |
619 __ cmp(R0, ShifterOperand(0)); | 613 __ cmp(R0, Operand(0)); |
620 __ sub(R0, R1, ShifterOperand(R0), LT); | 614 __ sub(R0, R1, Operand(R0), LT); |
621 __ add(R0, R1, ShifterOperand(R0), GE); | 615 __ add(R0, R1, Operand(R0), GE); |
622 __ SmiTag(R0); | 616 __ SmiTag(R0); |
623 __ Ret(); | 617 __ Ret(); |
624 | 618 |
625 __ Bind(&fall_through); | 619 __ Bind(&fall_through); |
626 } | 620 } |
627 | 621 |
628 | 622 |
629 void Intrinsifier::Integer_truncDivide(Assembler* assembler) { | 623 void Intrinsifier::Integer_truncDivide(Assembler* assembler) { |
630 // Check to see if we have integer division | 624 // Check to see if we have integer division |
631 Label fall_through; | 625 Label fall_through; |
632 | 626 |
633 TestBothArgumentsSmis(assembler, &fall_through); | 627 TestBothArgumentsSmis(assembler, &fall_through); |
634 __ cmp(R0, ShifterOperand(0)); | 628 __ cmp(R0, Operand(0)); |
635 __ b(&fall_through, EQ); // If b is 0, fall through. | 629 __ b(&fall_through, EQ); // If b is 0, fall through. |
636 | 630 |
637 __ SmiUntag(R0); | 631 __ SmiUntag(R0); |
638 __ SmiUntag(R1); | 632 __ SmiUntag(R1); |
639 | 633 |
640 __ IntegerDivide(R0, R1, R0, D1, D0); | 634 __ IntegerDivide(R0, R1, R0, D1, D0); |
641 | 635 |
642 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we | 636 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we |
643 // cannot tag the result. | 637 // cannot tag the result. |
644 __ CompareImmediate(R0, 0x40000000); | 638 __ CompareImmediate(R0, 0x40000000); |
645 __ SmiTag(R0, NE); // Not equal. Okay to tag and return. | 639 __ SmiTag(R0, NE); // Not equal. Okay to tag and return. |
646 __ bx(LR, NE); // Return. | 640 __ bx(LR, NE); // Return. |
647 __ Bind(&fall_through); | 641 __ Bind(&fall_through); |
648 } | 642 } |
649 | 643 |
650 | 644 |
651 void Intrinsifier::Integer_negate(Assembler* assembler) { | 645 void Intrinsifier::Integer_negate(Assembler* assembler) { |
652 Label fall_through; | 646 Label fall_through; |
653 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Grab first argument. | 647 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Grab first argument. |
654 __ tst(R0, ShifterOperand(kSmiTagMask)); // Test for Smi. | 648 __ tst(R0, Operand(kSmiTagMask)); // Test for Smi. |
655 __ b(&fall_through, NE); | 649 __ b(&fall_through, NE); |
656 __ rsbs(R0, R0, ShifterOperand(0)); // R0 is a Smi. R0 <- 0 - R0. | 650 __ rsbs(R0, R0, Operand(0)); // R0 is a Smi. R0 <- 0 - R0. |
657 __ bx(LR, VC); // Return if there wasn't overflow, fall through otherwise. | 651 __ bx(LR, VC); // Return if there wasn't overflow, fall through otherwise. |
658 // R0 is not a Smi. Fall through. | 652 // R0 is not a Smi. Fall through. |
659 __ Bind(&fall_through); | 653 __ Bind(&fall_through); |
660 } | 654 } |
661 | 655 |
662 | 656 |
663 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { | 657 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { |
664 Label fall_through; | 658 Label fall_through; |
665 | 659 |
666 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | 660 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis |
667 __ and_(R0, R0, ShifterOperand(R1)); | 661 __ and_(R0, R0, Operand(R1)); |
668 | 662 |
669 __ Ret(); | 663 __ Ret(); |
670 __ Bind(&fall_through); | 664 __ Bind(&fall_through); |
671 } | 665 } |
672 | 666 |
673 | 667 |
674 void Intrinsifier::Integer_bitAnd(Assembler* assembler) { | 668 void Intrinsifier::Integer_bitAnd(Assembler* assembler) { |
675 Integer_bitAndFromInteger(assembler); | 669 Integer_bitAndFromInteger(assembler); |
676 } | 670 } |
677 | 671 |
678 | 672 |
679 void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) { | 673 void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) { |
680 Label fall_through; | 674 Label fall_through; |
681 | 675 |
682 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | 676 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis |
683 __ orr(R0, R0, ShifterOperand(R1)); | 677 __ orr(R0, R0, Operand(R1)); |
684 | 678 |
685 __ Ret(); | 679 __ Ret(); |
686 __ Bind(&fall_through); | 680 __ Bind(&fall_through); |
687 } | 681 } |
688 | 682 |
689 | 683 |
690 void Intrinsifier::Integer_bitOr(Assembler* assembler) { | 684 void Intrinsifier::Integer_bitOr(Assembler* assembler) { |
691 Integer_bitOrFromInteger(assembler); | 685 Integer_bitOrFromInteger(assembler); |
692 } | 686 } |
693 | 687 |
694 | 688 |
695 void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) { | 689 void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) { |
696 Label fall_through; | 690 Label fall_through; |
697 | 691 |
698 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | 692 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis |
699 __ eor(R0, R0, ShifterOperand(R1)); | 693 __ eor(R0, R0, Operand(R1)); |
700 | 694 |
701 __ Ret(); | 695 __ Ret(); |
702 __ Bind(&fall_through); | 696 __ Bind(&fall_through); |
703 } | 697 } |
704 | 698 |
705 | 699 |
706 void Intrinsifier::Integer_bitXor(Assembler* assembler) { | 700 void Intrinsifier::Integer_bitXor(Assembler* assembler) { |
707 Integer_bitXorFromInteger(assembler); | 701 Integer_bitXorFromInteger(assembler); |
708 } | 702 } |
709 | 703 |
710 | 704 |
711 void Intrinsifier::Integer_shl(Assembler* assembler) { | 705 void Intrinsifier::Integer_shl(Assembler* assembler) { |
712 ASSERT(kSmiTagShift == 1); | 706 ASSERT(kSmiTagShift == 1); |
713 ASSERT(kSmiTag == 0); | 707 ASSERT(kSmiTag == 0); |
714 Label fall_through; | 708 Label fall_through; |
715 | 709 |
716 TestBothArgumentsSmis(assembler, &fall_through); | 710 TestBothArgumentsSmis(assembler, &fall_through); |
717 __ CompareImmediate(R0, Smi::RawValue(Smi::kBits)); | 711 __ CompareImmediate(R0, Smi::RawValue(Smi::kBits)); |
718 __ b(&fall_through, HI); | 712 __ b(&fall_through, HI); |
719 | 713 |
720 __ SmiUntag(R0); | 714 __ SmiUntag(R0); |
721 | 715 |
722 // Check for overflow by shifting left and shifting back arithmetically. | 716 // Check for overflow by shifting left and shifting back arithmetically. |
723 // If the result is different from the original, there was overflow. | 717 // If the result is different from the original, there was overflow. |
724 __ mov(IP, ShifterOperand(R1, LSL, R0)); | 718 __ mov(IP, Operand(R1, LSL, R0)); |
725 __ cmp(R1, ShifterOperand(IP, ASR, R0)); | 719 __ cmp(R1, Operand(IP, ASR, R0)); |
726 | 720 |
727 // No overflow, result in R0. | 721 // No overflow, result in R0. |
728 __ mov(R0, ShifterOperand(R1, LSL, R0), EQ); | 722 __ mov(R0, Operand(R1, LSL, R0), EQ); |
729 __ bx(LR, EQ); | 723 __ bx(LR, EQ); |
730 | 724 |
731 // Arguments are Smi but the shift produced an overflow to Mint. | 725 // Arguments are Smi but the shift produced an overflow to Mint. |
732 __ CompareImmediate(R1, 0); | 726 __ CompareImmediate(R1, 0); |
733 __ b(&fall_through, LT); | 727 __ b(&fall_through, LT); |
734 __ SmiUntag(R1); | 728 __ SmiUntag(R1); |
735 | 729 |
736 // Pull off high bits that will be shifted off of R1 by making a mask | 730 // Pull off high bits that will be shifted off of R1 by making a mask |
737 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back. | 731 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back. |
738 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0) | 732 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0) |
739 // lo bits = R1 << R0 | 733 // lo bits = R1 << R0 |
740 __ LoadImmediate(R7, 1); | 734 __ LoadImmediate(R7, 1); |
741 __ mov(R7, ShifterOperand(R7, LSL, R0)); // R7 <- 1 << R0 | 735 __ mov(R7, Operand(R7, LSL, R0)); // R7 <- 1 << R0 |
742 __ sub(R7, R7, ShifterOperand(1)); // R7 <- R7 - 1 | 736 __ sub(R7, R7, Operand(1)); // R7 <- R7 - 1 |
743 __ rsb(R8, R0, ShifterOperand(32)); // R8 <- 32 - R0 | 737 __ rsb(R8, R0, Operand(32)); // R8 <- 32 - R0 |
744 __ mov(R7, ShifterOperand(R7, LSL, R8)); // R7 <- R7 << R8 | 738 __ mov(R7, Operand(R7, LSL, R8)); // R7 <- R7 << R8 |
745 __ and_(R7, R1, ShifterOperand(R7)); // R7 <- R7 & R1 | 739 __ and_(R7, R1, Operand(R7)); // R7 <- R7 & R1 |
746 __ mov(R7, ShifterOperand(R7, LSR, R8)); // R7 <- R7 >> R8 | 740 __ mov(R7, Operand(R7, LSR, R8)); // R7 <- R7 >> R8 |
747 // Now R7 has the bits that fall off of R1 on a left shift. | 741 // Now R7 has the bits that fall off of R1 on a left shift. |
748 __ mov(R1, ShifterOperand(R1, LSL, R0)); // R1 gets the low bits. | 742 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits. |
749 | 743 |
750 const Class& mint_class = Class::Handle( | 744 const Class& mint_class = Class::Handle( |
751 Isolate::Current()->object_store()->mint_class()); | 745 Isolate::Current()->object_store()->mint_class()); |
752 __ TryAllocate(mint_class, &fall_through, R0, R2); | 746 __ TryAllocate(mint_class, &fall_through, R0, R2); |
753 | 747 |
754 | 748 |
755 __ str(R1, FieldAddress(R0, Mint::value_offset())); | 749 __ str(R1, FieldAddress(R0, Mint::value_offset())); |
756 __ str(R7, FieldAddress(R0, Mint::value_offset() + kWordSize)); | 750 __ str(R7, FieldAddress(R0, Mint::value_offset() + kWordSize)); |
757 __ Ret(); | 751 __ Ret(); |
758 __ Bind(&fall_through); | 752 __ Bind(&fall_through); |
759 } | 753 } |
760 | 754 |
761 | 755 |
762 static void Get64SmiOrMint(Assembler* assembler, | 756 static void Get64SmiOrMint(Assembler* assembler, |
763 Register res_hi, | 757 Register res_hi, |
764 Register res_lo, | 758 Register res_lo, |
765 Register reg, | 759 Register reg, |
766 Label* not_smi_or_mint) { | 760 Label* not_smi_or_mint) { |
767 Label not_smi, done; | 761 Label not_smi, done; |
768 __ tst(reg, ShifterOperand(kSmiTagMask)); | 762 __ tst(reg, Operand(kSmiTagMask)); |
769 __ b(¬_smi, NE); | 763 __ b(¬_smi, NE); |
770 __ SmiUntag(reg); | 764 __ SmiUntag(reg); |
771 | 765 |
772 // Sign extend to 64 bit | 766 // Sign extend to 64 bit |
773 __ mov(res_lo, ShifterOperand(reg)); | 767 __ mov(res_lo, Operand(reg)); |
774 __ mov(res_hi, ShifterOperand(res_lo, ASR, 31)); | 768 __ mov(res_hi, Operand(res_lo, ASR, 31)); |
775 __ b(&done); | 769 __ b(&done); |
776 | 770 |
777 __ Bind(¬_smi); | 771 __ Bind(¬_smi); |
778 __ CompareClassId(reg, kMintCid, res_lo); | 772 __ CompareClassId(reg, kMintCid, res_lo); |
779 __ b(not_smi_or_mint, NE); | 773 __ b(not_smi_or_mint, NE); |
780 | 774 |
781 // Mint. | 775 // Mint. |
782 __ ldr(res_lo, FieldAddress(reg, Mint::value_offset())); | 776 __ ldr(res_lo, FieldAddress(reg, Mint::value_offset())); |
783 __ ldr(res_hi, FieldAddress(reg, Mint::value_offset() + kWordSize)); | 777 __ ldr(res_hi, FieldAddress(reg, Mint::value_offset() + kWordSize)); |
784 __ Bind(&done); | 778 __ Bind(&done); |
785 return; | 779 return; |
786 } | 780 } |
787 | 781 |
788 | 782 |
789 static void CompareIntegers(Assembler* assembler, Condition true_condition) { | 783 static void CompareIntegers(Assembler* assembler, Condition true_condition) { |
790 Label try_mint_smi, is_true, is_false, drop_two_fall_through, fall_through; | 784 Label try_mint_smi, is_true, is_false, drop_two_fall_through, fall_through; |
791 TestBothArgumentsSmis(assembler, &try_mint_smi); | 785 TestBothArgumentsSmis(assembler, &try_mint_smi); |
792 // R0 contains the right argument. R1 contains left argument | 786 // R0 contains the right argument. R1 contains left argument |
793 | 787 |
794 __ cmp(R1, ShifterOperand(R0)); | 788 __ cmp(R1, Operand(R0)); |
795 __ b(&is_true, true_condition); | 789 __ b(&is_true, true_condition); |
796 __ Bind(&is_false); | 790 __ Bind(&is_false); |
797 __ LoadObject(R0, Bool::False()); | 791 __ LoadObject(R0, Bool::False()); |
798 __ Ret(); | 792 __ Ret(); |
799 __ Bind(&is_true); | 793 __ Bind(&is_true); |
800 __ LoadObject(R0, Bool::True()); | 794 __ LoadObject(R0, Bool::True()); |
801 __ Ret(); | 795 __ Ret(); |
802 | 796 |
803 // 64-bit comparison | 797 // 64-bit comparison |
804 Condition hi_true_cond, hi_false_cond, lo_false_cond; | 798 Condition hi_true_cond, hi_false_cond, lo_false_cond; |
(...skipping 18 matching lines...) Expand all Loading... |
823 __ Bind(&try_mint_smi); | 817 __ Bind(&try_mint_smi); |
824 // Get left as 64 bit integer. | 818 // Get left as 64 bit integer. |
825 Get64SmiOrMint(assembler, R3, R2, R1, &fall_through); | 819 Get64SmiOrMint(assembler, R3, R2, R1, &fall_through); |
826 // Get right as 64 bit integer. | 820 // Get right as 64 bit integer. |
827 Get64SmiOrMint(assembler, R7, R6, R0, &fall_through); | 821 Get64SmiOrMint(assembler, R7, R6, R0, &fall_through); |
828 // R3: left high. | 822 // R3: left high. |
829 // R2: left low. | 823 // R2: left low. |
830 // R7: right high. | 824 // R7: right high. |
831 // R6: right low. | 825 // R6: right low. |
832 | 826 |
833 __ cmp(R3, ShifterOperand(R7)); // Compare left hi, right high. | 827 __ cmp(R3, Operand(R7)); // Compare left hi, right high. |
834 __ b(&is_false, hi_false_cond); | 828 __ b(&is_false, hi_false_cond); |
835 __ b(&is_true, hi_true_cond); | 829 __ b(&is_true, hi_true_cond); |
836 __ cmp(R2, ShifterOperand(R6)); // Compare left lo, right lo. | 830 __ cmp(R2, Operand(R6)); // Compare left lo, right lo. |
837 __ b(&is_false, lo_false_cond); | 831 __ b(&is_false, lo_false_cond); |
838 // Else is true. | 832 // Else is true. |
839 __ b(&is_true); | 833 __ b(&is_true); |
840 | 834 |
841 __ Bind(&fall_through); | 835 __ Bind(&fall_through); |
842 } | 836 } |
843 | 837 |
844 | 838 |
845 void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler) { | 839 void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler) { |
846 CompareIntegers(assembler, LT); | 840 CompareIntegers(assembler, LT); |
(...skipping 20 matching lines...) Expand all Loading... |
867 } | 861 } |
868 | 862 |
869 | 863 |
870 // This is called for Smi, Mint and Bigint receivers. The right argument | 864 // This is called for Smi, Mint and Bigint receivers. The right argument |
871 // can be Smi, Mint, Bigint or double. | 865 // can be Smi, Mint, Bigint or double. |
872 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) { | 866 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) { |
873 Label fall_through, true_label, check_for_mint; | 867 Label fall_through, true_label, check_for_mint; |
874 // For integer receiver '===' check first. | 868 // For integer receiver '===' check first. |
875 __ ldr(R0, Address(SP, 0 * kWordSize)); | 869 __ ldr(R0, Address(SP, 0 * kWordSize)); |
876 __ ldr(R1, Address(SP, 1 * kWordSize)); | 870 __ ldr(R1, Address(SP, 1 * kWordSize)); |
877 __ cmp(R0, ShifterOperand(R1)); | 871 __ cmp(R0, Operand(R1)); |
878 __ b(&true_label, EQ); | 872 __ b(&true_label, EQ); |
879 | 873 |
880 __ orr(R2, R0, ShifterOperand(R1)); | 874 __ orr(R2, R0, Operand(R1)); |
881 __ tst(R2, ShifterOperand(kSmiTagMask)); | 875 __ tst(R2, Operand(kSmiTagMask)); |
882 __ b(&check_for_mint, NE); // If R0 or R1 is not a smi do Mint checks. | 876 __ b(&check_for_mint, NE); // If R0 or R1 is not a smi do Mint checks. |
883 | 877 |
884 // Both arguments are smi, '===' is good enough. | 878 // Both arguments are smi, '===' is good enough. |
885 __ LoadObject(R0, Bool::False()); | 879 __ LoadObject(R0, Bool::False()); |
886 __ Ret(); | 880 __ Ret(); |
887 __ Bind(&true_label); | 881 __ Bind(&true_label); |
888 __ LoadObject(R0, Bool::True()); | 882 __ LoadObject(R0, Bool::True()); |
889 __ Ret(); | 883 __ Ret(); |
890 | 884 |
891 // At least one of the arguments was not Smi. | 885 // At least one of the arguments was not Smi. |
892 Label receiver_not_smi; | 886 Label receiver_not_smi; |
893 __ Bind(&check_for_mint); | 887 __ Bind(&check_for_mint); |
894 | 888 |
895 __ tst(R1, ShifterOperand(kSmiTagMask)); // Check receiver. | 889 __ tst(R1, Operand(kSmiTagMask)); // Check receiver. |
896 __ b(&receiver_not_smi, NE); | 890 __ b(&receiver_not_smi, NE); |
897 | 891 |
898 // Left (receiver) is Smi, return false if right is not Double. | 892 // Left (receiver) is Smi, return false if right is not Double. |
899 // Note that an instance of Mint or Bigint never contains a value that can be | 893 // Note that an instance of Mint or Bigint never contains a value that can be |
900 // represented by Smi. | 894 // represented by Smi. |
901 | 895 |
902 __ CompareClassId(R0, kDoubleCid, R2); | 896 __ CompareClassId(R0, kDoubleCid, R2); |
903 __ b(&fall_through, EQ); | 897 __ b(&fall_through, EQ); |
904 __ LoadObject(R0, Bool::False()); // Smi == Mint -> false. | 898 __ LoadObject(R0, Bool::False()); // Smi == Mint -> false. |
905 __ Ret(); | 899 __ Ret(); |
906 | 900 |
907 __ Bind(&receiver_not_smi); | 901 __ Bind(&receiver_not_smi); |
908 // R1:: receiver. | 902 // R1:: receiver. |
909 | 903 |
910 __ CompareClassId(R1, kMintCid, R2); | 904 __ CompareClassId(R1, kMintCid, R2); |
911 __ b(&fall_through, NE); | 905 __ b(&fall_through, NE); |
912 // Receiver is Mint, return false if right is Smi. | 906 // Receiver is Mint, return false if right is Smi. |
913 __ tst(R0, ShifterOperand(kSmiTagMask)); | 907 __ tst(R0, Operand(kSmiTagMask)); |
914 __ b(&fall_through, NE); | 908 __ b(&fall_through, NE); |
915 __ LoadObject(R0, Bool::False()); | 909 __ LoadObject(R0, Bool::False()); |
916 __ Ret(); | 910 __ Ret(); |
917 // TODO(srdjan): Implement Mint == Mint comparison. | 911 // TODO(srdjan): Implement Mint == Mint comparison. |
918 | 912 |
919 __ Bind(&fall_through); | 913 __ Bind(&fall_through); |
920 } | 914 } |
921 | 915 |
922 | 916 |
923 void Intrinsifier::Integer_equal(Assembler* assembler) { | 917 void Intrinsifier::Integer_equal(Assembler* assembler) { |
924 Integer_equalToInteger(assembler); | 918 Integer_equalToInteger(assembler); |
925 } | 919 } |
926 | 920 |
927 | 921 |
928 void Intrinsifier::Integer_sar(Assembler* assembler) { | 922 void Intrinsifier::Integer_sar(Assembler* assembler) { |
929 Label fall_through; | 923 Label fall_through; |
930 | 924 |
931 TestBothArgumentsSmis(assembler, &fall_through); | 925 TestBothArgumentsSmis(assembler, &fall_through); |
932 // Shift amount in R0. Value to shift in R1. | 926 // Shift amount in R0. Value to shift in R1. |
933 | 927 |
934 // Fall through if shift amount is negative. | 928 // Fall through if shift amount is negative. |
935 __ SmiUntag(R0); | 929 __ SmiUntag(R0); |
936 __ CompareImmediate(R0, 0); | 930 __ CompareImmediate(R0, 0); |
937 __ b(&fall_through, LT); | 931 __ b(&fall_through, LT); |
938 | 932 |
939 // If shift amount is bigger than 31, set to 31. | 933 // If shift amount is bigger than 31, set to 31. |
940 __ CompareImmediate(R0, 0x1F); | 934 __ CompareImmediate(R0, 0x1F); |
941 __ LoadImmediate(R0, 0x1F, GT); | 935 __ LoadImmediate(R0, 0x1F, GT); |
942 __ SmiUntag(R1); | 936 __ SmiUntag(R1); |
943 __ mov(R0, ShifterOperand(R1, ASR, R0)); | 937 __ mov(R0, Operand(R1, ASR, R0)); |
944 __ SmiTag(R0); | 938 __ SmiTag(R0); |
945 __ Ret(); | 939 __ Ret(); |
946 __ Bind(&fall_through); | 940 __ Bind(&fall_through); |
947 } | 941 } |
948 | 942 |
949 | 943 |
950 void Intrinsifier::Smi_bitNegate(Assembler* assembler) { | 944 void Intrinsifier::Smi_bitNegate(Assembler* assembler) { |
951 __ ldr(R0, Address(SP, 0 * kWordSize)); | 945 __ ldr(R0, Address(SP, 0 * kWordSize)); |
952 __ mvn(R0, ShifterOperand(R0)); | 946 __ mvn(R0, Operand(R0)); |
953 __ bic(R0, R0, ShifterOperand(kSmiTagMask)); // Remove inverted smi-tag. | 947 __ bic(R0, R0, Operand(kSmiTagMask)); // Remove inverted smi-tag. |
954 __ Ret(); | 948 __ Ret(); |
955 } | 949 } |
956 | 950 |
957 | 951 |
958 void Intrinsifier::Smi_bitLength(Assembler* assembler) { | 952 void Intrinsifier::Smi_bitLength(Assembler* assembler) { |
959 // TODO(sra): Implement as word-length - CLZ. | 953 // TODO(sra): Implement as word-length - CLZ. |
960 } | 954 } |
961 | 955 |
962 | 956 |
963 // Check if the last argument is a double, jump to label 'is_smi' if smi | 957 // Check if the last argument is a double, jump to label 'is_smi' if smi |
964 // (easy to convert to double), otherwise jump to label 'not_double_smi', | 958 // (easy to convert to double), otherwise jump to label 'not_double_smi', |
965 // Returns the last argument in R0. | 959 // Returns the last argument in R0. |
966 static void TestLastArgumentIsDouble(Assembler* assembler, | 960 static void TestLastArgumentIsDouble(Assembler* assembler, |
967 Label* is_smi, | 961 Label* is_smi, |
968 Label* not_double_smi) { | 962 Label* not_double_smi) { |
969 __ ldr(R0, Address(SP, 0 * kWordSize)); | 963 __ ldr(R0, Address(SP, 0 * kWordSize)); |
970 __ tst(R0, ShifterOperand(kSmiTagMask)); | 964 __ tst(R0, Operand(kSmiTagMask)); |
971 __ b(is_smi, EQ); | 965 __ b(is_smi, EQ); |
972 __ CompareClassId(R0, kDoubleCid, R1); | 966 __ CompareClassId(R0, kDoubleCid, R1); |
973 __ b(not_double_smi, NE); | 967 __ b(not_double_smi, NE); |
974 // Fall through with Double in R0. | 968 // Fall through with Double in R0. |
975 } | 969 } |
976 | 970 |
977 | 971 |
978 // Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown | 972 // Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown |
979 // type. Return true or false object in the register R0. Any NaN argument | 973 // type. Return true or false object in the register R0. Any NaN argument |
980 // returns false. Any non-double arg1 causes control flow to fall through to the | 974 // returns false. Any non-double arg1 causes control flow to fall through to the |
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1081 DoubleArithmeticOperations(assembler, Token::kDIV); | 1075 DoubleArithmeticOperations(assembler, Token::kDIV); |
1082 } | 1076 } |
1083 | 1077 |
1084 | 1078 |
1085 // Left is double right is integer (Bigint, Mint or Smi) | 1079 // Left is double right is integer (Bigint, Mint or Smi) |
1086 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) { | 1080 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) { |
1087 if (TargetCPUFeatures::vfp_supported()) { | 1081 if (TargetCPUFeatures::vfp_supported()) { |
1088 Label fall_through; | 1082 Label fall_through; |
1089 // Only smis allowed. | 1083 // Only smis allowed. |
1090 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1084 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1091 __ tst(R0, ShifterOperand(kSmiTagMask)); | 1085 __ tst(R0, Operand(kSmiTagMask)); |
1092 __ b(&fall_through, NE); | 1086 __ b(&fall_through, NE); |
1093 // Is Smi. | 1087 // Is Smi. |
1094 __ SmiUntag(R0); | 1088 __ SmiUntag(R0); |
1095 __ vmovsr(S0, R0); | 1089 __ vmovsr(S0, R0); |
1096 __ vcvtdi(D1, S0); | 1090 __ vcvtdi(D1, S0); |
1097 __ ldr(R0, Address(SP, 1 * kWordSize)); | 1091 __ ldr(R0, Address(SP, 1 * kWordSize)); |
1098 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag); | 1092 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag); |
1099 __ vmuld(D0, D0, D1); | 1093 __ vmuld(D0, D0, D1); |
1100 const Class& double_class = Class::Handle( | 1094 const Class& double_class = Class::Handle( |
1101 Isolate::Current()->object_store()->double_class()); | 1095 Isolate::Current()->object_store()->double_class()); |
1102 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register. | 1096 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register. |
1103 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); | 1097 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); |
1104 __ Ret(); | 1098 __ Ret(); |
1105 __ Bind(&fall_through); | 1099 __ Bind(&fall_through); |
1106 } | 1100 } |
1107 } | 1101 } |
1108 | 1102 |
1109 | 1103 |
1110 void Intrinsifier::Double_fromInteger(Assembler* assembler) { | 1104 void Intrinsifier::Double_fromInteger(Assembler* assembler) { |
1111 if (TargetCPUFeatures::vfp_supported()) { | 1105 if (TargetCPUFeatures::vfp_supported()) { |
1112 Label fall_through; | 1106 Label fall_through; |
1113 | 1107 |
1114 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1108 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1115 __ tst(R0, ShifterOperand(kSmiTagMask)); | 1109 __ tst(R0, Operand(kSmiTagMask)); |
1116 __ b(&fall_through, NE); | 1110 __ b(&fall_through, NE); |
1117 // Is Smi. | 1111 // Is Smi. |
1118 __ SmiUntag(R0); | 1112 __ SmiUntag(R0); |
1119 __ vmovsr(S0, R0); | 1113 __ vmovsr(S0, R0); |
1120 __ vcvtdi(D0, S0); | 1114 __ vcvtdi(D0, S0); |
1121 const Class& double_class = Class::Handle( | 1115 const Class& double_class = Class::Handle( |
1122 Isolate::Current()->object_store()->double_class()); | 1116 Isolate::Current()->object_store()->double_class()); |
1123 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register. | 1117 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register. |
1124 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); | 1118 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); |
1125 __ Ret(); | 1119 __ Ret(); |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1157 __ LoadObject(R0, Bool::True()); | 1151 __ LoadObject(R0, Bool::True()); |
1158 __ Ret(); | 1152 __ Ret(); |
1159 | 1153 |
1160 __ Bind(&is_false); | 1154 __ Bind(&is_false); |
1161 __ LoadObject(R0, Bool::False()); | 1155 __ LoadObject(R0, Bool::False()); |
1162 __ Ret(); | 1156 __ Ret(); |
1163 | 1157 |
1164 __ Bind(&is_zero); | 1158 __ Bind(&is_zero); |
1165 // Check for negative zero by looking at the sign bit. | 1159 // Check for negative zero by looking at the sign bit. |
1166 __ vmovrrd(R0, R1, D0); // R1:R0 <- D0, so sign bit is in bit 31 of R1. | 1160 __ vmovrrd(R0, R1, D0); // R1:R0 <- D0, so sign bit is in bit 31 of R1. |
1167 __ mov(R1, ShifterOperand(R1, LSR, 31)); | 1161 __ mov(R1, Operand(R1, LSR, 31)); |
1168 __ tst(R1, ShifterOperand(1)); | 1162 __ tst(R1, Operand(1)); |
1169 __ b(&is_true, NE); // Sign bit set. | 1163 __ b(&is_true, NE); // Sign bit set. |
1170 __ b(&is_false); | 1164 __ b(&is_false); |
1171 } | 1165 } |
1172 } | 1166 } |
1173 | 1167 |
1174 | 1168 |
1175 void Intrinsifier::Double_toInt(Assembler* assembler) { | 1169 void Intrinsifier::Double_toInt(Assembler* assembler) { |
1176 if (TargetCPUFeatures::vfp_supported()) { | 1170 if (TargetCPUFeatures::vfp_supported()) { |
1177 Label fall_through; | 1171 Label fall_through; |
1178 | 1172 |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1249 __ ldr(R1, FieldAddress(R0, state_field.Offset())); // Field '_state'. | 1243 __ ldr(R1, FieldAddress(R0, state_field.Offset())); // Field '_state'. |
1250 // Addresses of _state[0] and _state[1]. | 1244 // Addresses of _state[0] and _state[1]. |
1251 | 1245 |
1252 const int64_t disp_0 = Instance::DataOffsetFor(kTypedDataUint32ArrayCid); | 1246 const int64_t disp_0 = Instance::DataOffsetFor(kTypedDataUint32ArrayCid); |
1253 const int64_t disp_1 = disp_0 + | 1247 const int64_t disp_1 = disp_0 + |
1254 Instance::ElementSizeFor(kTypedDataUint32ArrayCid); | 1248 Instance::ElementSizeFor(kTypedDataUint32ArrayCid); |
1255 | 1249 |
1256 __ LoadImmediate(R0, a_int32_value); | 1250 __ LoadImmediate(R0, a_int32_value); |
1257 __ LoadFromOffset(kWord, R2, R1, disp_0 - kHeapObjectTag); | 1251 __ LoadFromOffset(kWord, R2, R1, disp_0 - kHeapObjectTag); |
1258 __ LoadFromOffset(kWord, R3, R1, disp_1 - kHeapObjectTag); | 1252 __ LoadFromOffset(kWord, R3, R1, disp_1 - kHeapObjectTag); |
1259 __ mov(R6, ShifterOperand(0)); // Zero extend unsigned _state[kSTATE_HI]. | 1253 __ mov(R6, Operand(0)); // Zero extend unsigned _state[kSTATE_HI]. |
1260 // Unsigned 32-bit multiply and 64-bit accumulate into R6:R3. | 1254 // Unsigned 32-bit multiply and 64-bit accumulate into R6:R3. |
1261 __ umlal(R3, R6, R0, R2); // R6:R3 <- R6:R3 + R0 * R2. | 1255 __ umlal(R3, R6, R0, R2); // R6:R3 <- R6:R3 + R0 * R2. |
1262 __ StoreToOffset(kWord, R3, R1, disp_0 - kHeapObjectTag); | 1256 __ StoreToOffset(kWord, R3, R1, disp_0 - kHeapObjectTag); |
1263 __ StoreToOffset(kWord, R6, R1, disp_1 - kHeapObjectTag); | 1257 __ StoreToOffset(kWord, R6, R1, disp_1 - kHeapObjectTag); |
1264 __ Ret(); | 1258 __ Ret(); |
1265 } | 1259 } |
1266 } | 1260 } |
1267 | 1261 |
1268 | 1262 |
1269 void Intrinsifier::Object_equal(Assembler* assembler) { | 1263 void Intrinsifier::Object_equal(Assembler* assembler) { |
1270 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1264 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1271 __ ldr(R1, Address(SP, 1 * kWordSize)); | 1265 __ ldr(R1, Address(SP, 1 * kWordSize)); |
1272 __ cmp(R0, ShifterOperand(R1)); | 1266 __ cmp(R0, Operand(R1)); |
1273 __ LoadObject(R0, Bool::False(), NE); | 1267 __ LoadObject(R0, Bool::False(), NE); |
1274 __ LoadObject(R0, Bool::True(), EQ); | 1268 __ LoadObject(R0, Bool::True(), EQ); |
1275 __ Ret(); | 1269 __ Ret(); |
1276 } | 1270 } |
1277 | 1271 |
1278 | 1272 |
1279 void Intrinsifier::String_getHashCode(Assembler* assembler) { | 1273 void Intrinsifier::String_getHashCode(Assembler* assembler) { |
1280 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1274 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1281 __ ldr(R0, FieldAddress(R0, String::hash_offset())); | 1275 __ ldr(R0, FieldAddress(R0, String::hash_offset())); |
1282 __ cmp(R0, ShifterOperand(0)); | 1276 __ cmp(R0, Operand(0)); |
1283 __ bx(LR, NE); // Hash not yet computed. | 1277 __ bx(LR, NE); // Hash not yet computed. |
1284 } | 1278 } |
1285 | 1279 |
1286 | 1280 |
1287 void Intrinsifier::String_getLength(Assembler* assembler) { | 1281 void Intrinsifier::String_getLength(Assembler* assembler) { |
1288 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1282 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1289 __ ldr(R0, FieldAddress(R0, String::length_offset())); | 1283 __ ldr(R0, FieldAddress(R0, String::length_offset())); |
1290 __ Ret(); | 1284 __ Ret(); |
1291 } | 1285 } |
1292 | 1286 |
1293 | 1287 |
1294 void Intrinsifier::String_codeUnitAt(Assembler* assembler) { | 1288 void Intrinsifier::String_codeUnitAt(Assembler* assembler) { |
1295 Label fall_through, try_two_byte_string; | 1289 Label fall_through, try_two_byte_string; |
1296 | 1290 |
1297 __ ldr(R1, Address(SP, 0 * kWordSize)); // Index. | 1291 __ ldr(R1, Address(SP, 0 * kWordSize)); // Index. |
1298 __ ldr(R0, Address(SP, 1 * kWordSize)); // String. | 1292 __ ldr(R0, Address(SP, 1 * kWordSize)); // String. |
1299 __ tst(R1, ShifterOperand(kSmiTagMask)); | 1293 __ tst(R1, Operand(kSmiTagMask)); |
1300 __ b(&fall_through, NE); // Index is not a Smi. | 1294 __ b(&fall_through, NE); // Index is not a Smi. |
1301 // Range check. | 1295 // Range check. |
1302 __ ldr(R2, FieldAddress(R0, String::length_offset())); | 1296 __ ldr(R2, FieldAddress(R0, String::length_offset())); |
1303 __ cmp(R1, ShifterOperand(R2)); | 1297 __ cmp(R1, Operand(R2)); |
1304 __ b(&fall_through, CS); // Runtime throws exception. | 1298 __ b(&fall_through, CS); // Runtime throws exception. |
1305 __ CompareClassId(R0, kOneByteStringCid, R3); | 1299 __ CompareClassId(R0, kOneByteStringCid, R3); |
1306 __ b(&try_two_byte_string, NE); | 1300 __ b(&try_two_byte_string, NE); |
1307 __ SmiUntag(R1); | 1301 __ SmiUntag(R1); |
1308 __ AddImmediate(R0, OneByteString::data_offset() - kHeapObjectTag); | 1302 __ AddImmediate(R0, OneByteString::data_offset() - kHeapObjectTag); |
1309 __ ldrb(R0, Address(R0, R1)); | 1303 __ ldrb(R0, Address(R0, R1)); |
1310 __ SmiTag(R0); | 1304 __ SmiTag(R0); |
1311 __ Ret(); | 1305 __ Ret(); |
1312 | 1306 |
1313 __ Bind(&try_two_byte_string); | 1307 __ Bind(&try_two_byte_string); |
1314 __ CompareClassId(R0, kTwoByteStringCid, R3); | 1308 __ CompareClassId(R0, kTwoByteStringCid, R3); |
1315 __ b(&fall_through, NE); | 1309 __ b(&fall_through, NE); |
1316 ASSERT(kSmiTagShift == 1); | 1310 ASSERT(kSmiTagShift == 1); |
1317 __ AddImmediate(R0, TwoByteString::data_offset() - kHeapObjectTag); | 1311 __ AddImmediate(R0, TwoByteString::data_offset() - kHeapObjectTag); |
1318 __ ldrh(R0, Address(R0, R1)); | 1312 __ ldrh(R0, Address(R0, R1)); |
1319 __ SmiTag(R0); | 1313 __ SmiTag(R0); |
1320 __ Ret(); | 1314 __ Ret(); |
1321 | 1315 |
1322 __ Bind(&fall_through); | 1316 __ Bind(&fall_through); |
1323 } | 1317 } |
1324 | 1318 |
1325 | 1319 |
1326 void Intrinsifier::String_getIsEmpty(Assembler* assembler) { | 1320 void Intrinsifier::String_getIsEmpty(Assembler* assembler) { |
1327 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1321 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1328 __ ldr(R0, FieldAddress(R0, String::length_offset())); | 1322 __ ldr(R0, FieldAddress(R0, String::length_offset())); |
1329 __ cmp(R0, ShifterOperand(Smi::RawValue(0))); | 1323 __ cmp(R0, Operand(Smi::RawValue(0))); |
1330 __ LoadObject(R0, Bool::True(), EQ); | 1324 __ LoadObject(R0, Bool::True(), EQ); |
1331 __ LoadObject(R0, Bool::False(), NE); | 1325 __ LoadObject(R0, Bool::False(), NE); |
1332 __ Ret(); | 1326 __ Ret(); |
1333 } | 1327 } |
1334 | 1328 |
1335 | 1329 |
1336 void Intrinsifier::OneByteString_getHashCode(Assembler* assembler) { | 1330 void Intrinsifier::OneByteString_getHashCode(Assembler* assembler) { |
1337 __ ldr(R1, Address(SP, 0 * kWordSize)); | 1331 __ ldr(R1, Address(SP, 0 * kWordSize)); |
1338 __ ldr(R0, FieldAddress(R1, String::hash_offset())); | 1332 __ ldr(R0, FieldAddress(R1, String::hash_offset())); |
1339 __ cmp(R0, ShifterOperand(0)); | 1333 __ cmp(R0, Operand(0)); |
1340 __ bx(LR, NE); // Return if already computed. | 1334 __ bx(LR, NE); // Return if already computed. |
1341 | 1335 |
1342 __ ldr(R2, FieldAddress(R1, String::length_offset())); | 1336 __ ldr(R2, FieldAddress(R1, String::length_offset())); |
1343 | 1337 |
1344 Label done; | 1338 Label done; |
1345 // If the string is empty, set the hash to 1, and return. | 1339 // If the string is empty, set the hash to 1, and return. |
1346 __ cmp(R2, ShifterOperand(Smi::RawValue(0))); | 1340 __ cmp(R2, Operand(Smi::RawValue(0))); |
1347 __ b(&done, EQ); | 1341 __ b(&done, EQ); |
1348 | 1342 |
1349 __ SmiUntag(R2); | 1343 __ SmiUntag(R2); |
1350 __ mov(R3, ShifterOperand(0)); | 1344 __ mov(R3, Operand(0)); |
1351 __ AddImmediate(R6, R1, OneByteString::data_offset() - kHeapObjectTag); | 1345 __ AddImmediate(R6, R1, OneByteString::data_offset() - kHeapObjectTag); |
1352 // R1: Instance of OneByteString. | 1346 // R1: Instance of OneByteString. |
1353 // R2: String length, untagged integer. | 1347 // R2: String length, untagged integer. |
1354 // R3: Loop counter, untagged integer. | 1348 // R3: Loop counter, untagged integer. |
1355 // R6: String data. | 1349 // R6: String data. |
1356 // R0: Hash code, untagged integer. | 1350 // R0: Hash code, untagged integer. |
1357 | 1351 |
1358 Label loop; | 1352 Label loop; |
1359 // Add to hash code: (hash_ is uint32) | 1353 // Add to hash code: (hash_ is uint32) |
1360 // hash_ += ch; | 1354 // hash_ += ch; |
1361 // hash_ += hash_ << 10; | 1355 // hash_ += hash_ << 10; |
1362 // hash_ ^= hash_ >> 6; | 1356 // hash_ ^= hash_ >> 6; |
1363 // Get one characters (ch). | 1357 // Get one characters (ch). |
1364 __ Bind(&loop); | 1358 __ Bind(&loop); |
1365 __ ldrb(R7, Address(R6, 0)); | 1359 __ ldrb(R7, Address(R6, 0)); |
1366 // R7: ch. | 1360 // R7: ch. |
1367 __ add(R3, R3, ShifterOperand(1)); | 1361 __ add(R3, R3, Operand(1)); |
1368 __ add(R6, R6, ShifterOperand(1)); | 1362 __ add(R6, R6, Operand(1)); |
1369 __ add(R0, R0, ShifterOperand(R7)); | 1363 __ add(R0, R0, Operand(R7)); |
1370 __ add(R0, R0, ShifterOperand(R0, LSL, 10)); | 1364 __ add(R0, R0, Operand(R0, LSL, 10)); |
1371 __ eor(R0, R0, ShifterOperand(R0, LSR, 6)); | 1365 __ eor(R0, R0, Operand(R0, LSR, 6)); |
1372 __ cmp(R3, ShifterOperand(R2)); | 1366 __ cmp(R3, Operand(R2)); |
1373 __ b(&loop, NE); | 1367 __ b(&loop, NE); |
1374 | 1368 |
1375 // Finalize. | 1369 // Finalize. |
1376 // hash_ += hash_ << 3; | 1370 // hash_ += hash_ << 3; |
1377 // hash_ ^= hash_ >> 11; | 1371 // hash_ ^= hash_ >> 11; |
1378 // hash_ += hash_ << 15; | 1372 // hash_ += hash_ << 15; |
1379 __ add(R0, R0, ShifterOperand(R0, LSL, 3)); | 1373 __ add(R0, R0, Operand(R0, LSL, 3)); |
1380 __ eor(R0, R0, ShifterOperand(R0, LSR, 11)); | 1374 __ eor(R0, R0, Operand(R0, LSR, 11)); |
1381 __ add(R0, R0, ShifterOperand(R0, LSL, 15)); | 1375 __ add(R0, R0, Operand(R0, LSL, 15)); |
1382 // hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1); | 1376 // hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1); |
1383 __ LoadImmediate(R2, (static_cast<intptr_t>(1) << String::kHashBits) - 1); | 1377 __ LoadImmediate(R2, (static_cast<intptr_t>(1) << String::kHashBits) - 1); |
1384 __ and_(R0, R0, ShifterOperand(R2)); | 1378 __ and_(R0, R0, Operand(R2)); |
1385 __ cmp(R0, ShifterOperand(0)); | 1379 __ cmp(R0, Operand(0)); |
1386 // return hash_ == 0 ? 1 : hash_; | 1380 // return hash_ == 0 ? 1 : hash_; |
1387 __ Bind(&done); | 1381 __ Bind(&done); |
1388 __ mov(R0, ShifterOperand(1), EQ); | 1382 __ mov(R0, Operand(1), EQ); |
1389 __ SmiTag(R0); | 1383 __ SmiTag(R0); |
1390 __ str(R0, FieldAddress(R1, String::hash_offset())); | 1384 __ str(R0, FieldAddress(R1, String::hash_offset())); |
1391 __ Ret(); | 1385 __ Ret(); |
1392 } | 1386 } |
1393 | 1387 |
1394 | 1388 |
1395 // Allocates one-byte string of length 'end - start'. The content is not | 1389 // Allocates one-byte string of length 'end - start'. The content is not |
1396 // initialized. | 1390 // initialized. |
1397 // 'length-reg' (R2) contains tagged length. | 1391 // 'length-reg' (R2) contains tagged length. |
1398 // Returns new string as tagged pointer in R0. | 1392 // Returns new string as tagged pointer in R0. |
1399 static void TryAllocateOnebyteString(Assembler* assembler, | 1393 static void TryAllocateOnebyteString(Assembler* assembler, |
1400 Label* ok, | 1394 Label* ok, |
1401 Label* failure) { | 1395 Label* failure) { |
1402 const Register length_reg = R2; | 1396 const Register length_reg = R2; |
1403 Label fail; | 1397 Label fail; |
1404 | 1398 |
1405 __ mov(R6, ShifterOperand(length_reg)); // Save the length register. | 1399 __ mov(R6, Operand(length_reg)); // Save the length register. |
1406 __ SmiUntag(length_reg); | 1400 __ SmiUntag(length_reg); |
1407 const intptr_t fixed_size = sizeof(RawString) + kObjectAlignment - 1; | 1401 const intptr_t fixed_size = sizeof(RawString) + kObjectAlignment - 1; |
1408 __ AddImmediate(length_reg, fixed_size); | 1402 __ AddImmediate(length_reg, fixed_size); |
1409 __ bic(length_reg, length_reg, ShifterOperand(kObjectAlignment - 1)); | 1403 __ bic(length_reg, length_reg, Operand(kObjectAlignment - 1)); |
1410 | 1404 |
1411 Isolate* isolate = Isolate::Current(); | 1405 Isolate* isolate = Isolate::Current(); |
1412 Heap* heap = isolate->heap(); | 1406 Heap* heap = isolate->heap(); |
1413 | 1407 |
1414 __ LoadImmediate(R3, heap->TopAddress()); | 1408 __ LoadImmediate(R3, heap->TopAddress()); |
1415 __ ldr(R0, Address(R3, 0)); | 1409 __ ldr(R0, Address(R3, 0)); |
1416 | 1410 |
1417 // length_reg: allocation size. | 1411 // length_reg: allocation size. |
1418 __ adds(R1, R0, ShifterOperand(length_reg)); | 1412 __ adds(R1, R0, Operand(length_reg)); |
1419 __ b(&fail, VS); // Fail on overflow. | 1413 __ b(&fail, VS); // Fail on overflow. |
1420 | 1414 |
1421 // Check if the allocation fits into the remaining space. | 1415 // Check if the allocation fits into the remaining space. |
1422 // R0: potential new object start. | 1416 // R0: potential new object start. |
1423 // R1: potential next object start. | 1417 // R1: potential next object start. |
1424 // R2: allocation size. | 1418 // R2: allocation size. |
1425 // R3: heap->Top->Address(). | 1419 // R3: heap->Top->Address(). |
1426 __ LoadImmediate(R7, heap->EndAddress()); | 1420 __ LoadImmediate(R7, heap->EndAddress()); |
1427 __ ldr(R7, Address(R7, 0)); | 1421 __ ldr(R7, Address(R7, 0)); |
1428 __ cmp(R1, ShifterOperand(R7)); | 1422 __ cmp(R1, Operand(R7)); |
1429 __ b(&fail, CS); | 1423 __ b(&fail, CS); |
1430 | 1424 |
1431 // Successfully allocated the object(s), now update top to point to | 1425 // Successfully allocated the object(s), now update top to point to |
1432 // next object start and initialize the object. | 1426 // next object start and initialize the object. |
1433 __ str(R1, Address(R3, 0)); | 1427 __ str(R1, Address(R3, 0)); |
1434 __ AddImmediate(R0, kHeapObjectTag); | 1428 __ AddImmediate(R0, kHeapObjectTag); |
1435 __ UpdateAllocationStatsWithSize(kOneByteStringCid, R2, R3); | 1429 __ UpdateAllocationStatsWithSize(kOneByteStringCid, R2, R3); |
1436 | 1430 |
1437 // Initialize the tags. | 1431 // Initialize the tags. |
1438 // R0: new object start as a tagged pointer. | 1432 // R0: new object start as a tagged pointer. |
1439 // R1: new object end address. | 1433 // R1: new object end address. |
1440 // R2: allocation size. | 1434 // R2: allocation size. |
1441 { | 1435 { |
1442 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; | 1436 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; |
1443 const Class& cls = | 1437 const Class& cls = |
1444 Class::Handle(isolate->object_store()->one_byte_string_class()); | 1438 Class::Handle(isolate->object_store()->one_byte_string_class()); |
1445 | 1439 |
1446 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); | 1440 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); |
1447 __ mov(R2, ShifterOperand(R2, LSL, shift), LS); | 1441 __ mov(R2, Operand(R2, LSL, shift), LS); |
1448 __ mov(R2, ShifterOperand(0), HI); | 1442 __ mov(R2, Operand(0), HI); |
1449 | 1443 |
1450 // Get the class index and insert it into the tags. | 1444 // Get the class index and insert it into the tags. |
1451 // R2: size and bit tags. | 1445 // R2: size and bit tags. |
1452 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cls.id())); | 1446 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cls.id())); |
1453 __ orr(R2, R2, ShifterOperand(TMP)); | 1447 __ orr(R2, R2, Operand(TMP)); |
1454 __ str(R2, FieldAddress(R0, String::tags_offset())); // Store tags. | 1448 __ str(R2, FieldAddress(R0, String::tags_offset())); // Store tags. |
1455 } | 1449 } |
1456 | 1450 |
1457 // Set the length field using the saved length (R6). | 1451 // Set the length field using the saved length (R6). |
1458 __ StoreIntoObjectNoBarrier(R0, | 1452 __ StoreIntoObjectNoBarrier(R0, |
1459 FieldAddress(R0, String::length_offset()), | 1453 FieldAddress(R0, String::length_offset()), |
1460 R6); | 1454 R6); |
1461 // Clear hash. | 1455 // Clear hash. |
1462 __ LoadImmediate(TMP, 0); | 1456 __ LoadImmediate(TMP, 0); |
1463 __ str(TMP, FieldAddress(R0, String::hash_offset())); | 1457 __ str(TMP, FieldAddress(R0, String::hash_offset())); |
1464 __ b(ok); | 1458 __ b(ok); |
1465 | 1459 |
1466 __ Bind(&fail); | 1460 __ Bind(&fail); |
1467 __ b(failure); | 1461 __ b(failure); |
1468 } | 1462 } |
1469 | 1463 |
1470 | 1464 |
1471 // Arg0: OneByteString (receiver). | 1465 // Arg0: OneByteString (receiver). |
1472 // Arg1: Start index as Smi. | 1466 // Arg1: Start index as Smi. |
1473 // Arg2: End index as Smi. | 1467 // Arg2: End index as Smi. |
1474 // The indexes must be valid. | 1468 // The indexes must be valid. |
1475 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { | 1469 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { |
1476 const intptr_t kStringOffset = 2 * kWordSize; | 1470 const intptr_t kStringOffset = 2 * kWordSize; |
1477 const intptr_t kStartIndexOffset = 1 * kWordSize; | 1471 const intptr_t kStartIndexOffset = 1 * kWordSize; |
1478 const intptr_t kEndIndexOffset = 0 * kWordSize; | 1472 const intptr_t kEndIndexOffset = 0 * kWordSize; |
1479 Label fall_through, ok; | 1473 Label fall_through, ok; |
1480 | 1474 |
1481 __ ldr(R2, Address(SP, kEndIndexOffset)); | 1475 __ ldr(R2, Address(SP, kEndIndexOffset)); |
1482 __ ldr(TMP, Address(SP, kStartIndexOffset)); | 1476 __ ldr(TMP, Address(SP, kStartIndexOffset)); |
1483 __ orr(R3, R2, ShifterOperand(TMP)); | 1477 __ orr(R3, R2, Operand(TMP)); |
1484 __ tst(R3, ShifterOperand(kSmiTagMask)); | 1478 __ tst(R3, Operand(kSmiTagMask)); |
1485 __ b(&fall_through, NE); // 'start', 'end' not Smi. | 1479 __ b(&fall_through, NE); // 'start', 'end' not Smi. |
1486 | 1480 |
1487 __ sub(R2, R2, ShifterOperand(TMP)); | 1481 __ sub(R2, R2, Operand(TMP)); |
1488 TryAllocateOnebyteString(assembler, &ok, &fall_through); | 1482 TryAllocateOnebyteString(assembler, &ok, &fall_through); |
1489 __ Bind(&ok); | 1483 __ Bind(&ok); |
1490 // R0: new string as tagged pointer. | 1484 // R0: new string as tagged pointer. |
1491 // Copy string. | 1485 // Copy string. |
1492 __ ldr(R3, Address(SP, kStringOffset)); | 1486 __ ldr(R3, Address(SP, kStringOffset)); |
1493 __ ldr(R1, Address(SP, kStartIndexOffset)); | 1487 __ ldr(R1, Address(SP, kStartIndexOffset)); |
1494 __ SmiUntag(R1); | 1488 __ SmiUntag(R1); |
1495 __ add(R3, R3, ShifterOperand(R1)); | 1489 __ add(R3, R3, Operand(R1)); |
1496 // Calculate start address and untag (- 1). | 1490 // Calculate start address and untag (- 1). |
1497 __ AddImmediate(R3, OneByteString::data_offset() - 1); | 1491 __ AddImmediate(R3, OneByteString::data_offset() - 1); |
1498 | 1492 |
1499 // R3: Start address to copy from (untagged). | 1493 // R3: Start address to copy from (untagged). |
1500 // R1: Untagged start index. | 1494 // R1: Untagged start index. |
1501 __ ldr(R2, Address(SP, kEndIndexOffset)); | 1495 __ ldr(R2, Address(SP, kEndIndexOffset)); |
1502 __ SmiUntag(R2); | 1496 __ SmiUntag(R2); |
1503 __ sub(R2, R2, ShifterOperand(R1)); | 1497 __ sub(R2, R2, Operand(R1)); |
1504 | 1498 |
1505 // R3: Start address to copy from (untagged). | 1499 // R3: Start address to copy from (untagged). |
1506 // R2: Untagged number of bytes to copy. | 1500 // R2: Untagged number of bytes to copy. |
1507 // R0: Tagged result string. | 1501 // R0: Tagged result string. |
1508 // R6: Pointer into R3. | 1502 // R6: Pointer into R3. |
1509 // R7: Pointer into R0. | 1503 // R7: Pointer into R0. |
1510 // R1: Scratch register. | 1504 // R1: Scratch register. |
1511 Label loop, done; | 1505 Label loop, done; |
1512 __ cmp(R2, ShifterOperand(0)); | 1506 __ cmp(R2, Operand(0)); |
1513 __ b(&done, LE); | 1507 __ b(&done, LE); |
1514 __ mov(R6, ShifterOperand(R3)); | 1508 __ mov(R6, Operand(R3)); |
1515 __ mov(R7, ShifterOperand(R0)); | 1509 __ mov(R7, Operand(R0)); |
1516 __ Bind(&loop); | 1510 __ Bind(&loop); |
1517 __ ldrb(R1, Address(R6, 0)); | 1511 __ ldrb(R1, Address(R6, 0)); |
1518 __ AddImmediate(R6, 1); | 1512 __ AddImmediate(R6, 1); |
1519 __ sub(R2, R2, ShifterOperand(1)); | 1513 __ sub(R2, R2, Operand(1)); |
1520 __ cmp(R2, ShifterOperand(0)); | 1514 __ cmp(R2, Operand(0)); |
1521 __ strb(R1, FieldAddress(R7, OneByteString::data_offset())); | 1515 __ strb(R1, FieldAddress(R7, OneByteString::data_offset())); |
1522 __ AddImmediate(R7, 1); | 1516 __ AddImmediate(R7, 1); |
1523 __ b(&loop, GT); | 1517 __ b(&loop, GT); |
1524 | 1518 |
1525 __ Bind(&done); | 1519 __ Bind(&done); |
1526 __ Ret(); | 1520 __ Ret(); |
1527 __ Bind(&fall_through); | 1521 __ Bind(&fall_through); |
1528 } | 1522 } |
1529 | 1523 |
1530 | 1524 |
(...skipping 21 matching lines...) Expand all Loading... |
1552 } | 1546 } |
1553 | 1547 |
1554 | 1548 |
1555 // TODO(srdjan): Add combinations (one-byte/two-byte/external strings). | 1549 // TODO(srdjan): Add combinations (one-byte/two-byte/external strings). |
1556 void StringEquality(Assembler* assembler, intptr_t string_cid) { | 1550 void StringEquality(Assembler* assembler, intptr_t string_cid) { |
1557 Label fall_through, is_true, is_false, loop; | 1551 Label fall_through, is_true, is_false, loop; |
1558 __ ldr(R0, Address(SP, 1 * kWordSize)); // This. | 1552 __ ldr(R0, Address(SP, 1 * kWordSize)); // This. |
1559 __ ldr(R1, Address(SP, 0 * kWordSize)); // Other. | 1553 __ ldr(R1, Address(SP, 0 * kWordSize)); // Other. |
1560 | 1554 |
1561 // Are identical? | 1555 // Are identical? |
1562 __ cmp(R0, ShifterOperand(R1)); | 1556 __ cmp(R0, Operand(R1)); |
1563 __ b(&is_true, EQ); | 1557 __ b(&is_true, EQ); |
1564 | 1558 |
1565 // Is other OneByteString? | 1559 // Is other OneByteString? |
1566 __ tst(R1, ShifterOperand(kSmiTagMask)); | 1560 __ tst(R1, Operand(kSmiTagMask)); |
1567 __ b(&fall_through, EQ); | 1561 __ b(&fall_through, EQ); |
1568 __ CompareClassId(R1, string_cid, R2); | 1562 __ CompareClassId(R1, string_cid, R2); |
1569 __ b(&fall_through, NE); | 1563 __ b(&fall_through, NE); |
1570 | 1564 |
1571 // Have same length? | 1565 // Have same length? |
1572 __ ldr(R2, FieldAddress(R0, String::length_offset())); | 1566 __ ldr(R2, FieldAddress(R0, String::length_offset())); |
1573 __ ldr(R3, FieldAddress(R1, String::length_offset())); | 1567 __ ldr(R3, FieldAddress(R1, String::length_offset())); |
1574 __ cmp(R2, ShifterOperand(R3)); | 1568 __ cmp(R2, Operand(R3)); |
1575 __ b(&is_false, NE); | 1569 __ b(&is_false, NE); |
1576 | 1570 |
1577 // Check contents, no fall-through possible. | 1571 // Check contents, no fall-through possible. |
1578 // TODO(zra): try out other sequences. | 1572 // TODO(zra): try out other sequences. |
1579 ASSERT((string_cid == kOneByteStringCid) || | 1573 ASSERT((string_cid == kOneByteStringCid) || |
1580 (string_cid == kTwoByteStringCid)); | 1574 (string_cid == kTwoByteStringCid)); |
1581 const intptr_t offset = (string_cid == kOneByteStringCid) ? | 1575 const intptr_t offset = (string_cid == kOneByteStringCid) ? |
1582 OneByteString::data_offset() : TwoByteString::data_offset(); | 1576 OneByteString::data_offset() : TwoByteString::data_offset(); |
1583 __ AddImmediate(R0, offset - kHeapObjectTag); | 1577 __ AddImmediate(R0, offset - kHeapObjectTag); |
1584 __ AddImmediate(R1, offset - kHeapObjectTag); | 1578 __ AddImmediate(R1, offset - kHeapObjectTag); |
1585 __ SmiUntag(R2); | 1579 __ SmiUntag(R2); |
1586 __ Bind(&loop); | 1580 __ Bind(&loop); |
1587 __ AddImmediate(R2, -1); | 1581 __ AddImmediate(R2, -1); |
1588 __ cmp(R2, ShifterOperand(0)); | 1582 __ cmp(R2, Operand(0)); |
1589 __ b(&is_true, LT); | 1583 __ b(&is_true, LT); |
1590 if (string_cid == kOneByteStringCid) { | 1584 if (string_cid == kOneByteStringCid) { |
1591 __ ldrb(R3, Address(R0)); | 1585 __ ldrb(R3, Address(R0)); |
1592 __ ldrb(R4, Address(R1)); | 1586 __ ldrb(R4, Address(R1)); |
1593 __ AddImmediate(R0, 1); | 1587 __ AddImmediate(R0, 1); |
1594 __ AddImmediate(R1, 1); | 1588 __ AddImmediate(R1, 1); |
1595 } else if (string_cid == kTwoByteStringCid) { | 1589 } else if (string_cid == kTwoByteStringCid) { |
1596 __ ldrh(R3, Address(R0)); | 1590 __ ldrh(R3, Address(R0)); |
1597 __ ldrh(R4, Address(R1)); | 1591 __ ldrh(R4, Address(R1)); |
1598 __ AddImmediate(R0, 2); | 1592 __ AddImmediate(R0, 2); |
1599 __ AddImmediate(R1, 2); | 1593 __ AddImmediate(R1, 2); |
1600 } else { | 1594 } else { |
1601 UNIMPLEMENTED(); | 1595 UNIMPLEMENTED(); |
1602 } | 1596 } |
1603 __ cmp(R3, ShifterOperand(R4)); | 1597 __ cmp(R3, Operand(R4)); |
1604 __ b(&is_false, NE); | 1598 __ b(&is_false, NE); |
1605 __ b(&loop); | 1599 __ b(&loop); |
1606 | 1600 |
1607 __ Bind(&is_true); | 1601 __ Bind(&is_true); |
1608 __ LoadObject(R0, Bool::True()); | 1602 __ LoadObject(R0, Bool::True()); |
1609 __ Ret(); | 1603 __ Ret(); |
1610 | 1604 |
1611 __ Bind(&is_false); | 1605 __ Bind(&is_false); |
1612 __ LoadObject(R0, Bool::False()); | 1606 __ LoadObject(R0, Bool::False()); |
1613 __ Ret(); | 1607 __ Ret(); |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1661 Isolate* isolate = Isolate::Current(); | 1655 Isolate* isolate = Isolate::Current(); |
1662 __ LoadImmediate(R1, reinterpret_cast<uword>(isolate)); | 1656 __ LoadImmediate(R1, reinterpret_cast<uword>(isolate)); |
1663 // Set return value to Isolate::current_tag_. | 1657 // Set return value to Isolate::current_tag_. |
1664 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); | 1658 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); |
1665 __ Ret(); | 1659 __ Ret(); |
1666 } | 1660 } |
1667 | 1661 |
1668 } // namespace dart | 1662 } // namespace dart |
1669 | 1663 |
1670 #endif // defined TARGET_ARCH_ARM | 1664 #endif // defined TARGET_ARCH_ARM |
OLD | NEW |