OLD | NEW |
---|---|
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64. |
6 #if defined(TARGET_ARCH_ARM64) | 6 #if defined(TARGET_ARCH_ARM64) |
7 | 7 |
8 #include "vm/intrinsifier.h" | 8 #include "vm/intrinsifier.h" |
9 | 9 |
10 #include "vm/assembler.h" | 10 #include "vm/assembler.h" |
11 #include "vm/flow_graph_compiler.h" | 11 #include "vm/flow_graph_compiler.h" |
12 #include "vm/object.h" | 12 #include "vm/object.h" |
13 #include "vm/object_store.h" | 13 #include "vm/object_store.h" |
14 #include "vm/symbols.h" | 14 #include "vm/symbols.h" |
15 | 15 |
16 namespace dart { | 16 namespace dart { |
17 | 17 |
18 DECLARE_FLAG(bool, enable_type_checks); | |
19 | |
18 #define __ assembler-> | 20 #define __ assembler-> |
19 | 21 |
20 | 22 |
21 void Intrinsifier::Array_getLength(Assembler* assembler) { | 23 void Intrinsifier::Array_getLength(Assembler* assembler) { |
22 return; | 24 __ ldr(R0, Address(SP, 0 * kWordSize)); |
25 __ ldr(R0, FieldAddress(R0, Array::length_offset())); | |
26 __ ret(); | |
23 } | 27 } |
24 | 28 |
25 | 29 |
26 void Intrinsifier::ImmutableList_getLength(Assembler* assembler) { | 30 void Intrinsifier::ImmutableList_getLength(Assembler* assembler) { |
27 return; | 31 Array_getLength(assembler); |
28 } | 32 } |
29 | 33 |
30 | 34 |
31 void Intrinsifier::Array_getIndexed(Assembler* assembler) { | 35 void Intrinsifier::Array_getIndexed(Assembler* assembler) { |
32 return; | 36 Label fall_through; |
37 | |
38 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Index | |
39 __ ldr(R1, Address(SP, + 1 * kWordSize)); // Array | |
40 | |
41 __ tsti(R0, kSmiTagMask); | |
42 __ b(&fall_through, NE); // Index is not an smi, fall through | |
regis
2014/05/19 20:14:02
period
zra
2014/05/19 21:13:18
Done.
| |
43 | |
44 // range check | |
45 __ ldr(R6, FieldAddress(R1, Array::length_offset())); | |
46 __ cmp(R0, Operand(R6)); | |
47 __ b(&fall_through, CS); | |
48 | |
49 ASSERT(kSmiTagShift == 1); | |
50 // array element at R1 + R0*4 + Array::data_offset - 1 | |
51 __ add(R6, R1, Operand(R0, LSL, 2)); | |
52 __ ldr(R0, FieldAddress(R6, Array::data_offset())); | |
53 __ ret(); | |
54 __ Bind(&fall_through); | |
33 } | 55 } |
34 | 56 |
35 | 57 |
36 void Intrinsifier::ImmutableList_getIndexed(Assembler* assembler) { | 58 void Intrinsifier::ImmutableList_getIndexed(Assembler* assembler) { |
37 return; | 59 Array_getIndexed(assembler); |
38 } | 60 } |
39 | 61 |
40 | 62 |
63 static intptr_t ComputeObjectArrayTypeArgumentsOffset() { | |
64 const Library& core_lib = Library::Handle(Library::CoreLibrary()); | |
65 const Class& cls = Class::Handle( | |
66 core_lib.LookupClassAllowPrivate(Symbols::_List())); | |
67 ASSERT(!cls.IsNull()); | |
68 ASSERT(cls.NumTypeArguments() == 1); | |
69 const intptr_t field_offset = cls.type_arguments_field_offset(); | |
70 ASSERT(field_offset != Class::kNoTypeArguments); | |
71 return field_offset; | |
72 } | |
73 | |
74 | |
75 // Intrinsify only for Smi value and index. Non-smi values need a store buffer | |
76 // update. Array length is always a Smi. | |
41 void Intrinsifier::Array_setIndexed(Assembler* assembler) { | 77 void Intrinsifier::Array_setIndexed(Assembler* assembler) { |
42 return; | 78 Label fall_through; |
79 | |
80 if (FLAG_enable_type_checks) { | |
81 const intptr_t type_args_field_offset = | |
82 ComputeObjectArrayTypeArgumentsOffset(); | |
83 // Inline simple tests (Smi, null), fallthrough if not positive. | |
84 Label checked_ok; | |
85 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. | |
86 | |
87 // Null value is valid for any type. | |
88 __ CompareObject(R2, Object::null_object(), PP); | |
89 __ b(&checked_ok, EQ); | |
90 | |
91 __ ldr(R1, Address(SP, 2 * kWordSize)); // Array. | |
92 __ ldr(R1, FieldAddress(R1, type_args_field_offset)); | |
93 | |
94 // R1: Type arguments of array. | |
95 __ CompareObject(R1, Object::null_object(), PP); | |
96 __ b(&checked_ok, EQ); | |
97 | |
98 // Check if it's dynamic. | |
99 // Get type at index 0. | |
100 __ ldr(R0, FieldAddress(R1, TypeArguments::type_at_offset(0))); | |
101 __ CompareObject(R0, Type::ZoneHandle(Type::DynamicType()), PP); | |
102 __ b(&checked_ok, EQ); | |
103 | |
104 // Check for int and num. | |
105 __ tsti(R2, kSmiTagMask); // Value is Smi? | |
106 __ b(&fall_through, NE); // Non-smi value. | |
107 __ CompareObject(R0, Type::ZoneHandle(Type::IntType()), PP); | |
108 __ b(&checked_ok, EQ); | |
109 __ CompareObject(R0, Type::ZoneHandle(Type::Number()), PP); | |
110 __ b(&fall_through, NE); | |
111 __ Bind(&checked_ok); | |
112 } | |
113 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. | |
114 __ tsti(R1, kSmiTagMask); | |
115 // Index not Smi. | |
116 __ b(&fall_through, NE); | |
117 __ ldr(R0, Address(SP, 2 * kWordSize)); // Array. | |
118 | |
119 // Range check. | |
120 __ ldr(R3, FieldAddress(R0, Array::length_offset())); // Array length. | |
121 __ cmp(R1, Operand(R3)); | |
122 // Runtime throws exception. | |
123 __ b(&fall_through, CS); | |
124 | |
125 // Note that R1 is Smi, i.e, times 2. | |
126 ASSERT(kSmiTagShift == 1); | |
127 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. | |
128 __ add(R1, R0, Operand(R1, LSL, 2)); // R1 is Smi. | |
129 __ StoreIntoObject(R0, | |
130 FieldAddress(R1, Array::data_offset()), | |
131 R2); | |
132 // Caller is responsible for preserving the value if necessary. | |
133 __ ret(); | |
134 __ Bind(&fall_through); | |
43 } | 135 } |
44 | 136 |
45 | 137 |
46 // Allocate a GrowableObjectArray using the backing array specified. | 138 // Allocate a GrowableObjectArray using the backing array specified. |
47 // On stack: type argument (+1), data (+0). | 139 // On stack: type argument (+1), data (+0). |
48 void Intrinsifier::GrowableList_Allocate(Assembler* assembler) { | 140 void Intrinsifier::GrowableList_Allocate(Assembler* assembler) { |
49 return; | 141 // The newly allocated object is returned in R0. |
142 const intptr_t kTypeArgumentsOffset = 1 * kWordSize; | |
143 const intptr_t kArrayOffset = 0 * kWordSize; | |
144 Label fall_through; | |
145 | |
146 // Compute the size to be allocated, it is based on the array length | |
147 // and is computed as: | |
148 // RoundedAllocationSize(sizeof(RawGrowableObjectArray)) + | |
149 intptr_t fixed_size = GrowableObjectArray::InstanceSize(); | |
150 | |
151 Isolate* isolate = Isolate::Current(); | |
152 Heap* heap = isolate->heap(); | |
153 | |
154 __ LoadImmediate(R2, heap->TopAddress(), kNoPP); | |
155 __ ldr(R0, Address(R2, 0)); | |
156 __ AddImmediate(R1, R0, fixed_size, kNoPP); | |
157 | |
158 // Check if the allocation fits into the remaining space. | |
159 // R0: potential new backing array object start. | |
160 // R1: potential next object start. | |
161 __ LoadImmediate(R3, heap->EndAddress(), kNoPP); | |
162 __ ldr(R3, Address(R3, 0)); | |
163 __ cmp(R1, Operand(R3)); | |
164 __ b(&fall_through, CS); | |
165 | |
166 // Successfully allocated the object(s), now update top to point to | |
167 // next object start and initialize the object. | |
168 __ str(R1, Address(R2, 0)); | |
169 __ AddImmediate(R0, R0, kHeapObjectTag, kNoPP); | |
170 | |
171 // Initialize the tags. | |
172 // R0: new growable array object start as a tagged pointer. | |
173 const Class& cls = Class::Handle( | |
174 isolate->object_store()->growable_object_array_class()); | |
175 uword tags = 0; | |
176 tags = RawObject::SizeTag::update(fixed_size, tags); | |
177 tags = RawObject::ClassIdTag::update(cls.id(), tags); | |
178 __ LoadImmediate(R1, tags, kNoPP); | |
179 __ str(R1, FieldAddress(R0, GrowableObjectArray::tags_offset())); | |
180 | |
181 // Store backing array object in growable array object. | |
182 __ ldr(R1, Address(SP, kArrayOffset)); // Data argument. | |
183 // R0 is new, no barrier needed. | |
184 __ StoreIntoObjectNoBarrier( | |
185 R0, | |
186 FieldAddress(R0, GrowableObjectArray::data_offset()), | |
187 R1); | |
188 | |
189 // R0: new growable array object start as a tagged pointer. | |
190 // Store the type argument field in the growable array object. | |
191 __ ldr(R1, Address(SP, kTypeArgumentsOffset)); // Type argument. | |
192 __ StoreIntoObjectNoBarrier( | |
193 R0, | |
194 FieldAddress(R0, GrowableObjectArray::type_arguments_offset()), | |
195 R1); | |
196 | |
197 // Set the length field in the growable array object to 0. | |
198 __ LoadImmediate(R1, 0, kNoPP); | |
199 __ str(R1, FieldAddress(R0, GrowableObjectArray::length_offset())); | |
200 __ UpdateAllocationStats(kGrowableObjectArrayCid, R1, kNoPP); | |
201 __ ret(); // Returns the newly allocated object in R0. | |
202 | |
203 __ Bind(&fall_through); | |
50 } | 204 } |
51 | 205 |
52 | 206 |
53 void Intrinsifier::GrowableList_getLength(Assembler* assembler) { | 207 void Intrinsifier::GrowableList_getLength(Assembler* assembler) { |
54 return; | 208 __ ldr(R0, Address(SP, 0 * kWordSize)); |
209 __ ldr(R0, FieldAddress(R0, GrowableObjectArray::length_offset())); | |
210 __ ret(); | |
55 } | 211 } |
56 | 212 |
57 | 213 |
58 void Intrinsifier::GrowableList_getCapacity(Assembler* assembler) { | 214 void Intrinsifier::GrowableList_getCapacity(Assembler* assembler) { |
59 return; | 215 __ ldr(R0, Address(SP, 0 * kWordSize)); |
216 __ ldr(R0, FieldAddress(R0, GrowableObjectArray::data_offset())); | |
217 __ ldr(R0, FieldAddress(R0, Array::length_offset())); | |
218 __ ret(); | |
60 } | 219 } |
61 | 220 |
62 | 221 |
63 void Intrinsifier::GrowableList_getIndexed(Assembler* assembler) { | 222 void Intrinsifier::GrowableList_getIndexed(Assembler* assembler) { |
64 return; | 223 Label fall_through; |
224 | |
225 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Index | |
226 __ ldr(R1, Address(SP, + 1 * kWordSize)); // Array | |
227 | |
228 __ tsti(R0, kSmiTagMask); | |
229 __ b(&fall_through, NE); // Index is not an smi, fall through | |
regis
2014/05/19 20:14:02
period
zra
2014/05/19 21:13:18
Done.
| |
230 | |
231 // range check | |
232 __ ldr(R6, FieldAddress(R1, GrowableObjectArray::length_offset())); | |
233 __ cmp(R0, Operand(R6)); | |
234 __ b(&fall_through, CS); | |
235 | |
236 ASSERT(kSmiTagShift == 1); | |
237 // array element at R6 + R0 * 4 + Array::data_offset - 1 | |
238 __ ldr(R6, FieldAddress(R1, GrowableObjectArray::data_offset())); // data | |
regis
2014/05/19 20:14:02
Data.
zra
2014/05/19 21:13:18
Done.
| |
239 __ add(R6, R6, Operand(R0, LSL, 2)); | |
240 __ ldr(R0, FieldAddress(R6, Array::data_offset())); | |
241 __ ret(); | |
242 __ Bind(&fall_through); | |
65 } | 243 } |
66 | 244 |
67 | 245 |
68 // Set value into growable object array at specified index. | 246 // Set value into growable object array at specified index. |
69 // On stack: growable array (+2), index (+1), value (+0). | 247 // On stack: growable array (+2), index (+1), value (+0). |
70 void Intrinsifier::GrowableList_setIndexed(Assembler* assembler) { | 248 void Intrinsifier::GrowableList_setIndexed(Assembler* assembler) { |
71 return; | 249 if (FLAG_enable_type_checks) { |
72 } | 250 return; |
73 | 251 } |
74 | 252 Label fall_through; |
253 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. | |
254 __ ldr(R0, Address(SP, 2 * kWordSize)); // GrowableArray. | |
255 __ tsti(R1, kSmiTagMask); | |
256 __ b(&fall_through, NE); // Non-smi index. | |
257 // Range check using _length field. | |
258 __ ldr(R2, FieldAddress(R0, GrowableObjectArray::length_offset())); | |
259 __ cmp(R1, Operand(R2)); | |
260 // Runtime throws exception. | |
261 __ b(&fall_through, CS); | |
262 __ ldr(R0, FieldAddress(R0, GrowableObjectArray::data_offset())); // data. | |
263 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. | |
264 // Note that R1 is Smi, i.e, times 2. | |
265 ASSERT(kSmiTagShift == 1); | |
266 __ add(R1, R0, Operand(R1, LSL, 2)); | |
267 __ StoreIntoObject(R0, | |
268 FieldAddress(R1, Array::data_offset()), | |
269 R2); | |
270 __ ret(); | |
271 __ Bind(&fall_through); | |
272 } | |
273 | |
274 | |
75 // Set length of growable object array. The length cannot | 275 // Set length of growable object array. The length cannot |
76 // be greater than the length of the data container. | 276 // be greater than the length of the data container. |
77 // On stack: growable array (+1), length (+0). | 277 // On stack: growable array (+1), length (+0). |
78 void Intrinsifier::GrowableList_setLength(Assembler* assembler) { | 278 void Intrinsifier::GrowableList_setLength(Assembler* assembler) { |
79 return; | 279 Label fall_through; |
280 __ ldr(R0, Address(SP, 1 * kWordSize)); // Growable array. | |
281 __ ldr(R1, Address(SP, 0 * kWordSize)); // Length value. | |
282 __ tsti(R1, kSmiTagMask); // Check for Smi. | |
283 __ b(&fall_through, NE); | |
284 __ str(R1, FieldAddress(R0, GrowableObjectArray::length_offset())); | |
285 __ ret(); | |
286 __ Bind(&fall_through); | |
287 // Fall through on non-Smi. | |
80 } | 288 } |
81 | 289 |
82 | 290 |
83 // Set data of growable object array. | 291 // Set data of growable object array. |
84 // On stack: growable array (+1), data (+0). | 292 // On stack: growable array (+1), data (+0). |
85 void Intrinsifier::GrowableList_setData(Assembler* assembler) { | 293 void Intrinsifier::GrowableList_setData(Assembler* assembler) { |
86 return; | 294 if (FLAG_enable_type_checks) { |
87 } | 295 return; |
88 | 296 } |
89 | 297 Label fall_through; |
298 __ ldr(R1, Address(SP, 0 * kWordSize)); // Data. | |
299 // Check that data is an ObjectArray. | |
300 __ tsti(R1, kSmiTagMask); | |
301 __ b(&fall_through, EQ); // Data is Smi. | |
302 __ CompareClassId(R1, kArrayCid, kNoPP); | |
303 __ b(&fall_through, NE); | |
304 __ ldr(R0, Address(SP, 1 * kWordSize)); // Growable array. | |
305 __ StoreIntoObject(R0, | |
306 FieldAddress(R0, GrowableObjectArray::data_offset()), | |
307 R1); | |
308 __ ret(); | |
309 __ Bind(&fall_through); | |
310 } | |
311 | |
312 | |
313 // Add an element to growable array if it doesn't need to grow, otherwise | |
314 // call into regular code. | |
315 // On stack: growable array (+1), value (+0). | |
90 void Intrinsifier::GrowableList_add(Assembler* assembler) { | 316 void Intrinsifier::GrowableList_add(Assembler* assembler) { |
91 return; | 317 // In checked mode we need to type-check the incoming argument. |
318 if (FLAG_enable_type_checks) { | |
319 return; | |
320 } | |
321 Label fall_through; | |
322 // R0: Array. | |
323 __ ldr(R0, Address(SP, 1 * kWordSize)); | |
324 // R1: length. | |
325 __ ldr(R1, FieldAddress(R0, GrowableObjectArray::length_offset())); | |
326 // R2: data. | |
327 __ ldr(R2, FieldAddress(R0, GrowableObjectArray::data_offset())); | |
328 // R3: capacity. | |
329 __ ldr(R3, FieldAddress(R2, Array::length_offset())); | |
330 // Compare length with capacity. | |
331 __ cmp(R1, Operand(R3)); | |
332 __ b(&fall_through, EQ); // Must grow data. | |
333 const int64_t value_one = reinterpret_cast<int64_t>(Smi::New(1)); | |
334 // len = len + 1; | |
335 __ add(R3, R1, Operand(value_one)); | |
336 __ str(R3, FieldAddress(R0, GrowableObjectArray::length_offset())); | |
337 __ ldr(R0, Address(SP, 0 * kWordSize)); // Value. | |
338 ASSERT(kSmiTagShift == 1); | |
339 __ add(R1, R2, Operand(R1, LSL, 2)); | |
340 __ StoreIntoObject(R2, | |
341 FieldAddress(R1, Array::data_offset()), | |
342 R0); | |
343 __ LoadObject(R0, Object::null_object(), PP); | |
344 __ ret(); | |
345 __ Bind(&fall_through); | |
92 } | 346 } |
93 | 347 |
94 | 348 |
95 // Gets the length of a TypedData. | 349 // Gets the length of a TypedData. |
96 void Intrinsifier::TypedData_getLength(Assembler* assembler) { | 350 void Intrinsifier::TypedData_getLength(Assembler* assembler) { |
97 return; | 351 __ ldr(R0, Address(SP, 0 * kWordSize)); |
98 } | 352 __ ldr(R0, FieldAddress(R0, TypedData::length_offset())); |
353 __ ret(); | |
354 } | |
355 | |
356 | |
357 static int GetScaleFactor(intptr_t size) { | |
358 switch (size) { | |
359 case 1: return 0; | |
360 case 2: return 1; | |
361 case 4: return 2; | |
362 case 8: return 3; | |
363 case 16: return 4; | |
364 } | |
365 UNREACHABLE(); | |
366 return -1; | |
367 } | |
368 | |
369 | |
370 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ | |
371 Label fall_through; \ | |
372 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ | |
373 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ | |
374 /* Check that length is a positive Smi. */ \ | |
375 /* R2: requested array length argument. */ \ | |
376 __ tsti(R2, kSmiTagMask); \ | |
377 __ b(&fall_through, NE); \ | |
378 __ CompareRegisters(R2, ZR); \ | |
379 __ b(&fall_through, LT); \ | |
380 __ SmiUntag(R2); \ | |
381 /* Check for maximum allowed length. */ \ | |
382 /* R2: untagged array length. */ \ | |
383 __ CompareImmediate(R2, max_len, kNoPP); \ | |
384 __ b(&fall_through, GT); \ | |
385 __ Lsl(R2, R2, scale_shift); \ | |
386 const intptr_t fixed_size = sizeof(Raw##type_name) + kObjectAlignment - 1; \ | |
387 __ AddImmediate(R2, R2, fixed_size, kNoPP); \ | |
388 __ andi(R2, R2, ~(kObjectAlignment - 1)); \ | |
389 Heap* heap = Isolate::Current()->heap(); \ | |
390 \ | |
391 __ LoadImmediate(R0, heap->TopAddress(), kNoPP); \ | |
392 __ ldr(R0, Address(R0, 0)); \ | |
393 \ | |
394 /* R2: allocation size. */ \ | |
395 __ add(R1, R0, Operand(R2)); \ | |
396 __ b(&fall_through, VS); \ | |
397 \ | |
398 /* Check if the allocation fits into the remaining space. */ \ | |
399 /* R0: potential new object start. */ \ | |
400 /* R1: potential next object start. */ \ | |
401 /* R2: allocation size. */ \ | |
402 __ LoadImmediate(R3, heap->EndAddress(), kNoPP); \ | |
403 __ ldr(R3, Address(R3, 0)); \ | |
404 __ cmp(R1, Operand(R3)); \ | |
405 __ b(&fall_through, CS); \ | |
406 \ | |
407 /* Successfully allocated the object(s), now update top to point to */ \ | |
408 /* next object start and initialize the object. */ \ | |
409 __ LoadImmediate(R3, heap->TopAddress(), kNoPP); \ | |
410 __ str(R1, Address(R3, 0)); \ | |
411 __ AddImmediate(R0, R0, kHeapObjectTag, kNoPP); \ | |
412 __ UpdateAllocationStatsWithSize(cid, R2, R4, kNoPP); \ | |
413 /* Initialize the tags. */ \ | |
414 /* R0: new object start as a tagged pointer. */ \ | |
415 /* R1: new object end address. */ \ | |
416 /* R2: allocation size. */ \ | |
417 { \ | |
418 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag, kNoPP); \ | |
419 __ Lsl(R2, R2, RawObject::kSizeTagPos - kObjectAlignmentLog2); \ | |
420 __ csel(R2, ZR, R2, HI); \ | |
421 \ | |
422 /* Get the class index and insert it into the tags. */ \ | |
423 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid), kNoPP); \ | |
424 __ orr(R2, R2, Operand(TMP)); \ | |
425 __ str(R2, FieldAddress(R0, type_name::tags_offset())); /* Tags. */ \ | |
426 } \ | |
427 /* Set the length field. */ \ | |
428 /* R0: new object start as a tagged pointer. */ \ | |
429 /* R1: new object end address. */ \ | |
430 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ | |
431 __ StoreIntoObjectNoBarrier(R0, \ | |
432 FieldAddress(R0, type_name::length_offset()), \ | |
433 R2); \ | |
434 /* Initialize all array elements to 0. */ \ | |
435 /* R0: new object start as a tagged pointer. */ \ | |
436 /* R1: new object end address. */ \ | |
437 /* R2: iterator which initially points to the start of the variable */ \ | |
438 /* R3: scratch register. */ \ | |
439 /* data area to be initialized. */ \ | |
440 __ mov(R3, ZR); \ | |
441 __ AddImmediate(R2, R0, sizeof(Raw##type_name) - 1, kNoPP); \ | |
442 Label init_loop, done; \ | |
443 __ Bind(&init_loop); \ | |
444 __ cmp(R2, Operand(R1)); \ | |
445 __ b(&done, CS); \ | |
446 __ str(R3, Address(R2, 0)); \ | |
447 __ add(R2, R2, Operand(kWordSize)); \ | |
448 __ b(&init_loop); \ | |
449 __ Bind(&done); \ | |
450 \ | |
451 __ ret(); \ | |
452 __ Bind(&fall_through); \ | |
99 | 453 |
100 | 454 |
101 #define TYPED_DATA_ALLOCATOR(clazz) \ | 455 #define TYPED_DATA_ALLOCATOR(clazz) \ |
102 void Intrinsifier::TypedData_##clazz##_new(Assembler* assembler) { \ | 456 void Intrinsifier::TypedData_##clazz##_new(Assembler* assembler) { \ |
103 return; \ | 457 intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \ |
458 intptr_t max_len = TypedData::MaxElements(kTypedData##clazz##Cid); \ | |
459 int shift = GetScaleFactor(size); \ | |
460 TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, shift); \ | |
104 } \ | 461 } \ |
105 void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler) { \ | 462 void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler) { \ |
106 return; \ | 463 intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \ |
464 intptr_t max_len = TypedData::MaxElements(kTypedData##clazz##Cid); \ | |
465 int shift = GetScaleFactor(size); \ | |
466 TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, shift); \ | |
107 } | 467 } |
108 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) | 468 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) |
109 #undef TYPED_DATA_ALLOCATOR | 469 #undef TYPED_DATA_ALLOCATOR |
110 | 470 |
111 | 471 |
472 // Loads args from stack into R0 and R1 | |
473 // Tests if they are smis, jumps to label not_smi if not. | |
474 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { | |
475 __ ldr(R0, Address(SP, + 0 * kWordSize)); | |
476 __ ldr(R1, Address(SP, + 1 * kWordSize)); | |
477 __ orr(TMP, R0, Operand(R1)); | |
478 __ tsti(TMP, kSmiTagMask); | |
479 __ b(not_smi, NE); | |
480 } | |
481 | |
482 | |
112 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { | 483 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { |
113 return; | 484 Label fall_through; |
485 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. | |
486 __ adds(R0, R0, Operand(R1)); // Adds. | |
487 __ b(&fall_through, VS); // Fall-through on overflow. | |
488 __ ret(); | |
489 __ Bind(&fall_through); | |
114 } | 490 } |
115 | 491 |
116 | 492 |
117 void Intrinsifier::Integer_add(Assembler* assembler) { | 493 void Intrinsifier::Integer_add(Assembler* assembler) { |
118 return; | 494 Integer_addFromInteger(assembler); |
119 } | 495 } |
120 | 496 |
121 | 497 |
122 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { | 498 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { |
123 return; | 499 Label fall_through; |
500 TestBothArgumentsSmis(assembler, &fall_through); | |
501 __ subs(R0, R0, Operand(R1)); // Subtract. | |
502 __ b(&fall_through, VS); // Fall-through on overflow. | |
503 __ ret(); | |
504 __ Bind(&fall_through); | |
124 } | 505 } |
125 | 506 |
126 | 507 |
127 void Intrinsifier::Integer_sub(Assembler* assembler) { | 508 void Intrinsifier::Integer_sub(Assembler* assembler) { |
128 return; | 509 Label fall_through; |
510 TestBothArgumentsSmis(assembler, &fall_through); | |
511 __ subs(R0, R1, Operand(R0)); // Subtract. | |
512 __ b(&fall_through, VS); // Fall-through on overflow. | |
513 __ ret(); | |
514 __ Bind(&fall_through); | |
129 } | 515 } |
130 | 516 |
131 | 517 |
132 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { | 518 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { |
133 return; | 519 Label fall_through; |
520 | |
521 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | |
522 __ SmiUntag(R0); // untags R6. We only want result shifted by one. | |
523 | |
524 __ mul(TMP, R0, R1); | |
525 __ smulh(TMP2, R0, R1); | |
526 // TMP: result bits 64..127. | |
527 __ cmp(TMP2, Operand(TMP, ASR, 63)); | |
528 __ b(&fall_through, NE); | |
529 __ mov(R0, TMP); | |
530 __ ret(); | |
531 __ Bind(&fall_through); | |
134 } | 532 } |
135 | 533 |
136 | 534 |
137 void Intrinsifier::Integer_mul(Assembler* assembler) { | 535 void Intrinsifier::Integer_mul(Assembler* assembler) { |
138 return; | 536 Integer_mulFromInteger(assembler); |
537 } | |
538 | |
539 | |
540 // Optimizations: | |
541 // - result is 0 if: | |
542 // - left is 0 | |
543 // - left equals right | |
544 // - result is left if | |
545 // - left > 0 && left < right | |
546 // R1: Tagged left (dividend). | |
547 // R0: Tagged right (divisor). | |
548 // Returns with result in R0, OR: | |
549 // R1: Untagged result (remainder). | |
550 static void EmitRemainderOperation(Assembler* assembler) { | |
551 Label return_zero, modulo; | |
552 const Register left = R1; | |
553 const Register right = R0; | |
554 const Register result = R1; | |
555 const Register tmp = R2; | |
556 ASSERT(left == result); | |
557 | |
558 // Check for quick zero results. | |
559 __ CompareRegisters(left, ZR); | |
560 __ b(&return_zero, EQ); | |
561 __ CompareRegisters(left, right); | |
562 __ b(&return_zero, EQ); | |
563 | |
564 // Check if result should be left. | |
565 __ CompareRegisters(left, ZR); | |
566 __ b(&modulo, LT); | |
567 // left is positive. | |
568 __ CompareRegisters(left, right); | |
569 // left is less than right, result is left. | |
570 __ b(&modulo, GT); | |
571 __ mov(R0, left); | |
572 __ ret(); | |
573 | |
574 __ Bind(&return_zero); | |
575 __ mov(R0, ZR); | |
576 __ ret(); | |
577 | |
578 __ Bind(&modulo); | |
579 // result <- left - right * (left / right) | |
580 __ SmiUntag(left); | |
581 __ SmiUntag(right); | |
582 | |
583 __ sdiv(tmp, left, right); | |
584 __ msub(result, right, tmp, left); // result <- left - right * tmp | |
139 } | 585 } |
140 | 586 |
141 | 587 |
142 // Implementation: | 588 // Implementation: |
143 // res = left % right; | 589 // res = left % right; |
144 // if (res < 0) { | 590 // if (res < 0) { |
145 // if (right < 0) { | 591 // if (right < 0) { |
146 // res = res - right; | 592 // res = res - right; |
147 // } else { | 593 // } else { |
148 // res = res + right; | 594 // res = res + right; |
149 // } | 595 // } |
150 // } | 596 // } |
151 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { | 597 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { |
152 return; | 598 // Check to see if we have integer division |
599 Label neg_remainder, fall_through; | |
600 __ ldr(R1, Address(SP, + 0 * kWordSize)); | |
601 __ ldr(R0, Address(SP, + 1 * kWordSize)); | |
602 __ orr(TMP, R0, Operand(R1)); | |
603 __ tsti(TMP, kSmiTagMask); | |
604 __ b(&fall_through, NE); | |
605 // R1: Tagged left (dividend). | |
606 // R0: Tagged right (divisor). | |
607 // Check if modulo by zero -> exception thrown in main function. | |
608 __ CompareRegisters(R0, ZR); | |
609 __ b(&fall_through, EQ); | |
610 EmitRemainderOperation(assembler); | |
611 // Untagged right in R0. Untagged remainder result in R1. | |
612 | |
613 __ CompareRegisters(R1, ZR); | |
614 __ b(&neg_remainder, LT); | |
615 __ Lsl(R0, R1, 1); // Tag and move result to R0. | |
616 __ ret(); | |
617 | |
618 __ Bind(&neg_remainder); | |
619 // Result is negative, adjust it. | |
620 __ CompareRegisters(R0, ZR); | |
621 __ sub(TMP, R1, Operand(R0)); | |
622 __ add(TMP2, R1, Operand(R0)); | |
623 __ csel(R0, TMP2, TMP, GE); | |
624 __ SmiTag(R0); | |
625 __ ret(); | |
626 | |
627 __ Bind(&fall_through); | |
153 } | 628 } |
154 | 629 |
155 | 630 |
156 void Intrinsifier::Integer_truncDivide(Assembler* assembler) { | 631 void Intrinsifier::Integer_truncDivide(Assembler* assembler) { |
157 return; | 632 // Check to see if we have integer division |
633 Label fall_through; | |
634 | |
635 TestBothArgumentsSmis(assembler, &fall_through); | |
636 __ CompareRegisters(R0, ZR); | |
637 __ b(&fall_through, EQ); // If b is 0, fall through. | |
638 | |
639 __ SmiUntag(R0); | |
640 __ SmiUntag(R1); | |
641 | |
642 __ sdiv(R0, R1, R0); | |
643 | |
644 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we | |
645 // cannot tag the result. | |
646 __ CompareImmediate(R0, 0x4000000000000000, kNoPP); | |
647 __ b(&fall_through, EQ); | |
648 __ SmiTag(R0); // Not equal. Okay to tag and return. | |
649 __ ret(); // Return. | |
650 __ Bind(&fall_through); | |
158 } | 651 } |
159 | 652 |
160 | 653 |
161 void Intrinsifier::Integer_negate(Assembler* assembler) { | 654 void Intrinsifier::Integer_negate(Assembler* assembler) { |
162 return; | 655 Label fall_through; |
656 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Grab first argument. | |
657 __ tsti(R0, kSmiTagMask); // Test for Smi. | |
658 __ b(&fall_through, NE); | |
659 __ negs(R0, R0); | |
660 __ b(&fall_through, VS); | |
661 __ ret(); | |
662 __ Bind(&fall_through); | |
163 } | 663 } |
164 | 664 |
165 | 665 |
166 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { | 666 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { |
167 return; | 667 Label fall_through; |
668 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | |
regis
2014/05/19 20:14:02
Check two smis.
zra
2014/05/19 21:13:18
Done.
| |
669 __ and_(R0, R0, Operand(R1)); | |
670 __ ret(); | |
671 __ Bind(&fall_through); | |
168 } | 672 } |
169 | 673 |
170 | 674 |
171 void Intrinsifier::Integer_bitAnd(Assembler* assembler) { | 675 void Intrinsifier::Integer_bitAnd(Assembler* assembler) { |
172 return; | 676 Integer_bitAndFromInteger(assembler); |
173 } | 677 } |
174 | 678 |
175 | 679 |
176 void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) { | 680 void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) { |
177 return; | 681 Label fall_through; |
682 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | |
regis
2014/05/19 20:14:02
Check two smis.
zra
2014/05/19 21:13:18
Done.
| |
683 __ orr(R0, R0, Operand(R1)); | |
684 __ ret(); | |
685 __ Bind(&fall_through); | |
178 } | 686 } |
179 | 687 |
180 | 688 |
181 void Intrinsifier::Integer_bitOr(Assembler* assembler) { | 689 void Intrinsifier::Integer_bitOr(Assembler* assembler) { |
182 return; | 690 Integer_bitOrFromInteger(assembler); |
183 } | 691 } |
184 | 692 |
185 | 693 |
186 void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) { | 694 void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) { |
187 return; | 695 Label fall_through; |
696 | |
697 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | |
regis
2014/05/19 20:14:02
Check two smis.
zra
2014/05/19 21:13:18
Done.
| |
698 __ eor(R0, R0, Operand(R1)); | |
699 __ ret(); | |
700 __ Bind(&fall_through); | |
188 } | 701 } |
189 | 702 |
190 | 703 |
191 void Intrinsifier::Integer_bitXor(Assembler* assembler) { | 704 void Intrinsifier::Integer_bitXor(Assembler* assembler) { |
192 return; | 705 Integer_bitXorFromInteger(assembler); |
193 } | 706 } |
194 | 707 |
195 | 708 |
196 void Intrinsifier::Integer_shl(Assembler* assembler) { | 709 void Intrinsifier::Integer_shl(Assembler* assembler) { |
197 return; | 710 ASSERT(kSmiTagShift == 1); |
711 ASSERT(kSmiTag == 0); | |
712 const Register right = R0; | |
713 const Register left = R1; | |
714 const Register temp = R2; | |
715 const Register result = R0; | |
716 Label fall_through; | |
717 | |
718 TestBothArgumentsSmis(assembler, &fall_through); | |
719 __ CompareImmediate( | |
720 right, reinterpret_cast<int64_t>(Smi::New(Smi::kBits)), PP); | |
721 __ b(&fall_through, CS); | |
722 | |
723 // Left is not a constant. | |
724 // Check if count too large for handling it inlined. | |
725 __ Asr(TMP, right, kSmiTagSize); // SmiUntag right into TMP. | |
726 // Overflow test (preserve left, right, and TMP); | |
727 __ lslv(temp, left, TMP); | |
728 __ asrv(TMP2, temp, TMP); | |
729 __ CompareRegisters(left, TMP2); | |
730 __ b(&fall_through, NE); // Overflow. | |
731 // Shift for result now we know there is no overflow. | |
732 __ lslv(result, left, TMP); | |
733 __ ret(); | |
734 __ Bind(&fall_through); | |
735 } | |
736 | |
737 | |
738 static void CompareIntegers(Assembler* assembler, Condition true_condition) { | |
739 Label fall_through, true_label; | |
740 TestBothArgumentsSmis(assembler, &fall_through); | |
741 // R0 contains the right argument, R1 the left. | |
742 __ CompareRegisters(R1, R0); | |
743 __ LoadObject(R0, Bool::False(), PP); | |
744 __ LoadObject(TMP, Bool::True(), PP); | |
745 __ csel(R0, TMP, R0, true_condition); | |
746 __ ret(); | |
747 __ Bind(&fall_through); | |
198 } | 748 } |
199 | 749 |
200 | 750 |
201 void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler) { | 751 void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler) { |
202 return; | 752 CompareIntegers(assembler, LT); |
203 } | 753 } |
204 | 754 |
205 | 755 |
206 void Intrinsifier::Integer_lessThan(Assembler* assembler) { | 756 void Intrinsifier::Integer_lessThan(Assembler* assembler) { |
207 return; | 757 Integer_greaterThanFromInt(assembler); |
208 } | 758 } |
209 | 759 |
210 | 760 |
211 void Intrinsifier::Integer_greaterThan(Assembler* assembler) { | 761 void Intrinsifier::Integer_greaterThan(Assembler* assembler) { |
212 return; | 762 CompareIntegers(assembler, GT); |
213 } | 763 } |
214 | 764 |
215 | 765 |
216 void Intrinsifier::Integer_lessEqualThan(Assembler* assembler) { | 766 void Intrinsifier::Integer_lessEqualThan(Assembler* assembler) { |
217 return; | 767 CompareIntegers(assembler, LE); |
218 } | 768 } |
219 | 769 |
220 | 770 |
221 void Intrinsifier::Integer_greaterEqualThan(Assembler* assembler) { | 771 void Intrinsifier::Integer_greaterEqualThan(Assembler* assembler) { |
222 return; | 772 CompareIntegers(assembler, GE); |
223 } | 773 } |
224 | 774 |
225 | 775 |
226 // This is called for Smi, Mint and Bigint receivers. The right argument | 776 // This is called for Smi, Mint and Bigint receivers. The right argument |
227 // can be Smi, Mint, Bigint or double. | 777 // can be Smi, Mint, Bigint or double. |
228 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) { | 778 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) { |
229 return; | 779 Label fall_through, true_label, check_for_mint; |
780 // For integer receiver '===' check first. | |
781 __ ldr(R0, Address(SP, 0 * kWordSize)); | |
782 __ ldr(R1, Address(SP, 1 * kWordSize)); | |
783 __ cmp(R0, Operand(R1)); | |
784 __ b(&true_label, EQ); | |
785 | |
786 __ orr(R2, R0, Operand(R1)); | |
787 __ tsti(R2, kSmiTagMask); | |
788 __ b(&check_for_mint, NE); // If R0 or R1 is not a smi do Mint checks. | |
789 | |
790 // Both arguments are smi, '===' is good enough. | |
791 __ LoadObject(R0, Bool::False(), PP); | |
792 __ ret(); | |
793 __ Bind(&true_label); | |
794 __ LoadObject(R0, Bool::True(), PP); | |
795 __ ret(); | |
796 | |
797 // At least one of the arguments was not Smi. | |
798 Label receiver_not_smi; | |
799 __ Bind(&check_for_mint); | |
800 | |
801 __ tsti(R1, kSmiTagMask); // Check receiver. | |
802 __ b(&receiver_not_smi, NE); | |
803 | |
804 // Left (receiver) is Smi, return false if right is not Double. | |
805 // Note that an instance of Mint or Bigint never contains a value that can be | |
806 // represented by Smi. | |
807 | |
808 __ CompareClassId(R0, kDoubleCid, kNoPP); | |
809 __ b(&fall_through, EQ); | |
810 __ LoadObject(R0, Bool::False(), PP); // Smi == Mint -> false. | |
811 __ ret(); | |
812 | |
813 __ Bind(&receiver_not_smi); | |
814 // R1: receiver. | |
815 | |
816 __ CompareClassId(R1, kMintCid, kNoPP); | |
817 __ b(&fall_through, NE); | |
818 // Receiver is Mint, return false if right is Smi. | |
819 __ tsti(R0, kSmiTagMask); | |
820 __ b(&fall_through, NE); | |
821 __ LoadObject(R0, Bool::False(), PP); | |
822 __ ret(); | |
823 // TODO(srdjan): Implement Mint == Mint comparison. | |
824 | |
825 __ Bind(&fall_through); | |
230 } | 826 } |
231 | 827 |
232 | 828 |
233 void Intrinsifier::Integer_equal(Assembler* assembler) { | 829 void Intrinsifier::Integer_equal(Assembler* assembler) { |
234 return; | 830 Integer_equalToInteger(assembler); |
235 } | 831 } |
236 | 832 |
237 | 833 |
238 void Intrinsifier::Integer_sar(Assembler* assembler) { | 834 void Intrinsifier::Integer_sar(Assembler* assembler) { |
239 return; | 835 Label fall_through; |
836 | |
837 TestBothArgumentsSmis(assembler, &fall_through); | |
838 // Shift amount in R0. Value to shift in R1. | |
839 | |
840 // Fall through if shift amount is negative. | |
841 __ SmiUntag(R0); | |
842 __ CompareRegisters(R0, ZR); | |
843 __ b(&fall_through, LT); | |
844 | |
845 // If shift amount is bigger than 63, set to 63. | |
846 __ LoadImmediate(TMP, 0x3F, kNoPP); | |
847 __ CompareRegisters(R0, TMP); | |
848 __ csel(R0, TMP, R0, GT); | |
849 __ SmiUntag(R1); | |
850 __ asrv(R0, R1, R0); | |
851 __ SmiTag(R0); | |
852 __ ret(); | |
853 __ Bind(&fall_through); | |
240 } | 854 } |
241 | 855 |
242 | 856 |
243 void Intrinsifier::Smi_bitNegate(Assembler* assembler) { | 857 void Intrinsifier::Smi_bitNegate(Assembler* assembler) { |
244 return; | 858 __ ldr(R0, Address(SP, 0 * kWordSize)); |
859 __ mvn(R0, R0); | |
860 __ andi(R0, R0, ~kSmiTagMask); // Remove inverted smi-tag. | |
861 __ ret(); | |
245 } | 862 } |
246 | 863 |
247 | 864 |
248 void Intrinsifier::Smi_bitLength(Assembler* assembler) { | 865 void Intrinsifier::Smi_bitLength(Assembler* assembler) { |
249 return; | 866 // TODO(sra): Implement as word-length - CLZ. |
867 } | |
868 | |
869 | |
870 // Check if the last argument is a double, jump to label 'is_smi' if smi | |
871 // (easy to convert to double), otherwise jump to label 'not_double_smi', | |
872 // Returns the last argument in R0. | |
873 static void TestLastArgumentIsDouble(Assembler* assembler, | |
874 Label* is_smi, | |
875 Label* not_double_smi) { | |
876 __ ldr(R0, Address(SP, 0 * kWordSize)); | |
877 __ tsti(R0, kSmiTagMask); | |
878 __ b(is_smi, EQ); | |
879 __ CompareClassId(R0, kDoubleCid, kNoPP); | |
880 __ b(not_double_smi, NE); | |
881 // Fall through with Double in R0. | |
882 } | |
883 | |
884 | |
885 // Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown | |
886 // type. Return true or false object in the register R0. Any NaN argument | |
887 // returns false. Any non-double arg1 causes control flow to fall through to the | |
888 // slow case (compiled method body). | |
889 static void CompareDoubles(Assembler* assembler, Condition true_condition) { | |
890 Label fall_through, is_smi, double_op, not_nan; | |
891 | |
892 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | |
893 // Both arguments are double, right operand is in R0. | |
894 | |
895 __ LoadDFieldFromOffset(V1, R0, Double::value_offset(), kNoPP); | |
896 __ Bind(&double_op); | |
897 __ ldr(R0, Address(SP, 1 * kWordSize)); // Left argument. | |
898 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP); | |
899 | |
900 __ fcmpd(V0, V1); | |
901 __ LoadObject(R0, Bool::False(), PP); | |
902 // Return false if D0 or D1 was NaN before checking true condition. | |
903 __ b(¬_nan, VC); | |
904 __ ret(); | |
905 __ Bind(¬_nan); | |
906 __ LoadObject(TMP, Bool::True(), PP); | |
907 __ csel(R0, TMP, R0, true_condition); | |
908 __ ret(); | |
909 | |
910 __ Bind(&is_smi); // Convert R0 to a double. | |
911 __ SmiUntag(R0); | |
912 __ scvtfd(V1, R0); | |
913 __ b(&double_op); // Then do the comparison. | |
914 __ Bind(&fall_through); | |
250 } | 915 } |
251 | 916 |
252 | 917 |
253 void Intrinsifier::Double_greaterThan(Assembler* assembler) { | 918 void Intrinsifier::Double_greaterThan(Assembler* assembler) { |
254 return; | 919 CompareDoubles(assembler, HI); |
255 } | 920 } |
256 | 921 |
257 | 922 |
258 void Intrinsifier::Double_greaterEqualThan(Assembler* assembler) { | 923 void Intrinsifier::Double_greaterEqualThan(Assembler* assembler) { |
259 return; | 924 CompareDoubles(assembler, CS); |
260 } | 925 } |
261 | 926 |
262 | 927 |
263 void Intrinsifier::Double_lessThan(Assembler* assembler) { | 928 void Intrinsifier::Double_lessThan(Assembler* assembler) { |
264 return; | 929 CompareDoubles(assembler, CC); |
265 } | 930 } |
266 | 931 |
267 | 932 |
268 void Intrinsifier::Double_equal(Assembler* assembler) { | 933 void Intrinsifier::Double_equal(Assembler* assembler) { |
269 return; | 934 CompareDoubles(assembler, EQ); |
270 } | 935 } |
271 | 936 |
272 | 937 |
273 void Intrinsifier::Double_lessEqualThan(Assembler* assembler) { | 938 void Intrinsifier::Double_lessEqualThan(Assembler* assembler) { |
274 return; | 939 CompareDoubles(assembler, LS); |
940 } | |
941 | |
942 | |
943 // Expects left argument to be double (receiver). Right argument is unknown. | |
944 // Both arguments are on stack. | |
945 static void DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) { | |
946 Label fall_through; | |
947 | |
948 TestLastArgumentIsDouble(assembler, &fall_through, &fall_through); | |
949 // Both arguments are double, right operand is in R0. | |
950 __ LoadDFieldFromOffset(V1, R0, Double::value_offset(), kNoPP); | |
951 __ ldr(R0, Address(SP, 1 * kWordSize)); // Left argument. | |
952 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP); | |
953 switch (kind) { | |
954 case Token::kADD: __ faddd(V0, V0, V1); break; | |
955 case Token::kSUB: __ fsubd(V0, V0, V1); break; | |
956 case Token::kMUL: __ fmuld(V0, V0, V1); break; | |
957 case Token::kDIV: __ fdivd(V0, V0, V1); break; | |
958 default: UNREACHABLE(); | |
959 } | |
960 const Class& double_class = Class::Handle( | |
961 Isolate::Current()->object_store()->double_class()); | |
962 __ TryAllocate(double_class, &fall_through, R0, R1, kNoPP); | |
963 __ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP); | |
964 __ ret(); | |
965 __ Bind(&fall_through); | |
275 } | 966 } |
276 | 967 |
277 | 968 |
278 void Intrinsifier::Double_add(Assembler* assembler) { | 969 void Intrinsifier::Double_add(Assembler* assembler) { |
279 return; | 970 DoubleArithmeticOperations(assembler, Token::kADD); |
280 } | 971 } |
281 | 972 |
282 | 973 |
283 void Intrinsifier::Double_mul(Assembler* assembler) { | 974 void Intrinsifier::Double_mul(Assembler* assembler) { |
284 return; | 975 DoubleArithmeticOperations(assembler, Token::kMUL); |
285 } | 976 } |
286 | 977 |
287 | 978 |
288 void Intrinsifier::Double_sub(Assembler* assembler) { | 979 void Intrinsifier::Double_sub(Assembler* assembler) { |
289 return; | 980 DoubleArithmeticOperations(assembler, Token::kSUB); |
290 } | 981 } |
291 | 982 |
292 | 983 |
293 void Intrinsifier::Double_div(Assembler* assembler) { | 984 void Intrinsifier::Double_div(Assembler* assembler) { |
294 return; | 985 DoubleArithmeticOperations(assembler, Token::kDIV); |
295 } | 986 } |
296 | 987 |
297 | 988 |
298 // Left is double right is integer (Bigint, Mint or Smi) | 989 // Left is double right is integer (Bigint, Mint or Smi) |
299 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) { | 990 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) { |
300 return; | 991 Label fall_through; |
992 // Only smis allowed. | |
993 __ ldr(R0, Address(SP, 0 * kWordSize)); | |
994 __ tsti(R0, kSmiTagMask); | |
995 __ b(&fall_through, NE); | |
996 // Is Smi. | |
997 __ SmiUntag(R0); | |
998 __ scvtfd(V1, R0); | |
999 __ ldr(R0, Address(SP, 1 * kWordSize)); | |
1000 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP); | |
1001 __ fmuld(V0, V0, V1); | |
1002 const Class& double_class = Class::Handle( | |
1003 Isolate::Current()->object_store()->double_class()); | |
1004 __ TryAllocate(double_class, &fall_through, R0, R1, kNoPP); | |
1005 __ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP); | |
1006 __ ret(); | |
1007 __ Bind(&fall_through); | |
301 } | 1008 } |
302 | 1009 |
303 | 1010 |
304 void Intrinsifier::Double_fromInteger(Assembler* assembler) { | 1011 void Intrinsifier::Double_fromInteger(Assembler* assembler) { |
305 return; | 1012 Label fall_through; |
1013 | |
1014 __ ldr(R0, Address(SP, 0 * kWordSize)); | |
1015 __ tsti(R0, kSmiTagMask); | |
1016 __ b(&fall_through, NE); | |
1017 // Is Smi. | |
1018 __ SmiUntag(R0); | |
1019 __ scvtfd(V0, R0); | |
1020 const Class& double_class = Class::Handle( | |
1021 Isolate::Current()->object_store()->double_class()); | |
1022 __ TryAllocate(double_class, &fall_through, R0, R1, kNoPP); | |
1023 __ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP); | |
1024 __ ret(); | |
1025 __ Bind(&fall_through); | |
306 } | 1026 } |
307 | 1027 |
308 | 1028 |
309 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { | 1029 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { |
310 return; | 1030 Label is_true; |
1031 __ ldr(R0, Address(SP, 0 * kWordSize)); | |
1032 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP); | |
1033 __ fcmpd(V0, V0); | |
1034 __ LoadObject(TMP, Bool::False(), PP); | |
1035 __ LoadObject(R0, Bool::True(), PP); | |
1036 __ csel(R0, TMP, R0, VC); | |
1037 __ ret(); | |
311 } | 1038 } |
312 | 1039 |
313 | 1040 |
314 void Intrinsifier::Double_getIsNegative(Assembler* assembler) { | 1041 void Intrinsifier::Double_getIsNegative(Assembler* assembler) { |
315 return; | 1042 const Register false_reg = R0; |
1043 const Register true_reg = R2; | |
1044 Label is_false, is_true, is_zero; | |
1045 | |
1046 __ ldr(R0, Address(SP, 0 * kWordSize)); | |
1047 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP); | |
1048 __ fcmpdz(V0); | |
1049 __ LoadObject(true_reg, Bool::True(), PP); | |
1050 __ LoadObject(false_reg, Bool::False(), PP); | |
1051 __ b(&is_false, VS); // NaN -> false. | |
1052 __ b(&is_zero, EQ); // Check for negative zero. | |
1053 __ b(&is_false, CS); // >= 0 -> false. | |
1054 | |
1055 __ Bind(&is_true); | |
1056 __ mov(R0, true_reg); | |
1057 | |
1058 __ Bind(&is_false); | |
1059 __ ret(); | |
1060 | |
1061 __ Bind(&is_zero); | |
1062 // Check for negative zero by looking at the sign bit. | |
1063 __ fmovrd(R1, V0); | |
1064 __ Lsr(R1, R1, 63); | |
1065 __ tsti(R1, 1); | |
1066 __ csel(R0, true_reg, false_reg, NE); // Sign bit set. | |
1067 __ ret(); | |
316 } | 1068 } |
317 | 1069 |
318 | 1070 |
319 void Intrinsifier::Double_toInt(Assembler* assembler) { | 1071 void Intrinsifier::Double_toInt(Assembler* assembler) { |
320 return; | 1072 Label fall_through; |
1073 | |
1074 __ ldr(R0, Address(SP, 0 * kWordSize)); | |
1075 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP); | |
1076 | |
1077 // Explicit NaN check, since ARM gives an FPU exception if you try to | |
1078 // convert NaN to an int. | |
1079 __ fcmpd(V0, V0); | |
1080 __ b(&fall_through, VS); | |
1081 | |
1082 __ fcvtzds(R0, V0); | |
1083 // Overflow is signaled with minint. | |
1084 // Check for overflow and that it fits into Smi. | |
1085 __ CompareImmediate(R0, 0xC000000000000000, kNoPP); | |
1086 __ b(&fall_through, MI); | |
1087 __ SmiTag(R0); | |
1088 __ ret(); | |
1089 __ Bind(&fall_through); | |
321 } | 1090 } |
322 | 1091 |
323 | 1092 |
324 void Intrinsifier::Math_sqrt(Assembler* assembler) { | 1093 void Intrinsifier::Math_sqrt(Assembler* assembler) { |
325 return; | 1094 Label fall_through, is_smi, double_op; |
326 } | 1095 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); |
327 | 1096 // Argument is double and is in R0. |
328 | 1097 __ LoadDFieldFromOffset(V1, R0, Double::value_offset(), kNoPP); |
1098 __ Bind(&double_op); | |
1099 __ fsqrtd(V0, V1); | |
1100 const Class& double_class = Class::Handle( | |
1101 Isolate::Current()->object_store()->double_class()); | |
1102 __ TryAllocate(double_class, &fall_through, R0, R1, kNoPP); | |
1103 __ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP); | |
1104 __ ret(); | |
1105 __ Bind(&is_smi); | |
1106 __ SmiUntag(R0); | |
1107 __ scvtfd(V1, R0); | |
1108 __ b(&double_op); | |
1109 __ Bind(&fall_through); | |
1110 } | |
1111 | |
1112 | |
329 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64; | 1113 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64; |
330 // _state[kSTATE_LO] = state & _MASK_32; | 1114 // _state[kSTATE_LO] = state & _MASK_32; |
331 // _state[kSTATE_HI] = state >> 32; | 1115 // _state[kSTATE_HI] = state >> 32; |
332 void Intrinsifier::Random_nextState(Assembler* assembler) { | 1116 void Intrinsifier::Random_nextState(Assembler* assembler) { |
333 return; | 1117 const Library& math_lib = Library::Handle(Library::MathLibrary()); |
1118 ASSERT(!math_lib.IsNull()); | |
1119 const Class& random_class = Class::Handle( | |
1120 math_lib.LookupClassAllowPrivate(Symbols::_Random())); | |
1121 ASSERT(!random_class.IsNull()); | |
1122 const Field& state_field = Field::ZoneHandle( | |
1123 random_class.LookupInstanceField(Symbols::_state())); | |
1124 ASSERT(!state_field.IsNull()); | |
1125 const Field& random_A_field = Field::ZoneHandle( | |
1126 random_class.LookupStaticField(Symbols::_A())); | |
1127 ASSERT(!random_A_field.IsNull()); | |
1128 ASSERT(random_A_field.is_const()); | |
1129 const Instance& a_value = Instance::Handle(random_A_field.value()); | |
1130 const int64_t a_int_value = Integer::Cast(a_value).AsInt64Value(); | |
1131 | |
1132 __ ldr(R0, Address(SP, 0 * kWordSize)); // Receiver. | |
1133 __ ldr(R1, FieldAddress(R0, state_field.Offset())); // Field '_state'. | |
1134 | |
1135 // Addresses of _state[0]. | |
1136 const int64_t disp = | |
1137 FlowGraphCompiler::DataOffsetFor(kTypedDataUint32ArrayCid) - | |
1138 kHeapObjectTag; | |
1139 | |
1140 __ LoadImmediate(R0, a_int_value, kNoPP); | |
1141 __ LoadFromOffset(R2, R1, disp, kNoPP); | |
1142 __ Lsr(R3, R2, 32); | |
1143 __ andi(R2, R2, 0xffffffff); | |
1144 __ mul(R2, R0, R2); | |
1145 __ add(R2, R2, Operand(R3)); | |
1146 __ StoreToOffset(R2, R1, disp, kNoPP); | |
1147 __ ret(); | |
334 } | 1148 } |
335 | 1149 |
336 | 1150 |
337 void Intrinsifier::Object_equal(Assembler* assembler) { | 1151 void Intrinsifier::Object_equal(Assembler* assembler) { |
338 return; | 1152 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1153 __ ldr(R1, Address(SP, 1 * kWordSize)); | |
1154 __ cmp(R0, Operand(R1)); | |
1155 __ LoadObject(R0, Bool::False(), PP); | |
1156 __ LoadObject(TMP, Bool::True(), PP); | |
1157 __ csel(R0, TMP, R0, EQ); | |
1158 __ ret(); | |
339 } | 1159 } |
340 | 1160 |
341 | 1161 |
342 void Intrinsifier::String_getHashCode(Assembler* assembler) { | 1162 void Intrinsifier::String_getHashCode(Assembler* assembler) { |
343 return; | 1163 Label fall_through; |
1164 __ ldr(R0, Address(SP, 0 * kWordSize)); | |
1165 __ ldr(R0, FieldAddress(R0, String::hash_offset())); | |
1166 __ CompareRegisters(R0, ZR); | |
1167 __ b(&fall_through, EQ); | |
1168 __ ret(); | |
1169 // Hash not yet computed. | |
1170 __ Bind(&fall_through); | |
344 } | 1171 } |
345 | 1172 |
346 | 1173 |
347 void Intrinsifier::String_getLength(Assembler* assembler) { | 1174 void Intrinsifier::String_getLength(Assembler* assembler) { |
348 return; | 1175 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1176 __ ldr(R0, FieldAddress(R0, String::length_offset())); | |
1177 __ ret(); | |
349 } | 1178 } |
350 | 1179 |
351 | 1180 |
352 void Intrinsifier::String_codeUnitAt(Assembler* assembler) { | 1181 void Intrinsifier::String_codeUnitAt(Assembler* assembler) { |
353 return; | 1182 Label fall_through, try_two_byte_string; |
1183 | |
1184 __ ldr(R1, Address(SP, 0 * kWordSize)); // Index. | |
1185 __ ldr(R0, Address(SP, 1 * kWordSize)); // String. | |
1186 __ tsti(R1, kSmiTagMask); | |
1187 __ b(&fall_through, NE); // Index is not a Smi. | |
1188 // Range check. | |
1189 __ ldr(R2, FieldAddress(R0, String::length_offset())); | |
1190 __ cmp(R1, Operand(R2)); | |
1191 __ b(&fall_through, CS); // Runtime throws exception. | |
1192 __ CompareClassId(R0, kOneByteStringCid, kNoPP); | |
1193 __ b(&try_two_byte_string, NE); | |
1194 __ SmiUntag(R1); | |
1195 __ AddImmediate(R0, R0, OneByteString::data_offset() - kHeapObjectTag, kNoPP); | |
1196 __ ldr(R0, Address(R0, R1), kUnsignedByte); | |
1197 __ SmiTag(R0); | |
1198 __ ret(); | |
1199 | |
1200 __ Bind(&try_two_byte_string); | |
1201 __ CompareClassId(R0, kTwoByteStringCid, kNoPP); | |
1202 __ b(&fall_through, NE); | |
1203 ASSERT(kSmiTagShift == 1); | |
1204 __ AddImmediate(R0, R0, TwoByteString::data_offset() - kHeapObjectTag, kNoPP); | |
1205 __ ldr(R0, Address(R0, R1), kUnsignedHalfword); | |
1206 __ SmiTag(R0); | |
1207 __ ret(); | |
1208 | |
1209 __ Bind(&fall_through); | |
354 } | 1210 } |
355 | 1211 |
356 | 1212 |
357 void Intrinsifier::String_getIsEmpty(Assembler* assembler) { | 1213 void Intrinsifier::String_getIsEmpty(Assembler* assembler) { |
358 return; | 1214 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1215 __ ldr(R0, FieldAddress(R0, String::length_offset())); | |
1216 __ cmp(R0, Operand(Smi::RawValue(0))); | |
1217 __ LoadObject(R0, Bool::True(), PP); | |
1218 __ LoadObject(TMP, Bool::False(), PP); | |
1219 __ csel(R0, TMP, R0, NE); | |
1220 __ ret(); | |
359 } | 1221 } |
360 | 1222 |
361 | 1223 |
362 void Intrinsifier::OneByteString_getHashCode(Assembler* assembler) { | 1224 void Intrinsifier::OneByteString_getHashCode(Assembler* assembler) { |
363 return; | 1225 Label compute_hash; |
1226 __ ldr(R1, Address(SP, 0 * kWordSize)); // OneByteString object. | |
1227 __ ldr(R0, FieldAddress(R1, String::hash_offset())); | |
1228 __ CompareRegisters(R0, ZR); | |
1229 __ b(&compute_hash, EQ); | |
1230 __ ret(); // Return if already computed. | |
1231 | |
1232 __ Bind(&compute_hash); | |
1233 __ ldr(R2, FieldAddress(R1, String::length_offset())); | |
1234 __ SmiUntag(R2); | |
1235 | |
1236 Label done; | |
1237 // If the string is empty, set the hash to 1, and return. | |
1238 __ CompareRegisters(R2, ZR); | |
1239 __ b(&done, EQ); | |
1240 | |
1241 __ mov(R3, ZR); | |
1242 __ AddImmediate(R6, R1, OneByteString::data_offset() - kHeapObjectTag, kNoPP); | |
1243 // R1: Instance of OneByteString. | |
1244 // R2: String length, untagged integer. | |
1245 // R3: Loop counter, untagged integer. | |
1246 // R6: String data. | |
1247 // R0: Hash code, untagged integer. | |
1248 | |
1249 Label loop; | |
1250 // Add to hash code: (hash_ is uint32) | |
1251 // hash_ += ch; | |
1252 // hash_ += hash_ << 10; | |
1253 // hash_ ^= hash_ >> 6; | |
1254 // Get one characters (ch). | |
1255 __ Bind(&loop); | |
1256 __ ldr(R7, Address(R6, R3), kUnsignedByte); | |
1257 // R7: ch. | |
1258 __ add(R3, R3, Operand(1)); | |
1259 __ addw(R0, R0, Operand(R7)); | |
1260 __ addw(R0, R0, Operand(R0, LSL, 10)); | |
1261 __ eorw(R0, R0, Operand(R0, LSR, 6)); | |
1262 __ cmp(R3, Operand(R2)); | |
1263 __ b(&loop, NE); | |
1264 | |
1265 // Finalize. | |
1266 // hash_ += hash_ << 3; | |
1267 // hash_ ^= hash_ >> 11; | |
1268 // hash_ += hash_ << 15; | |
1269 __ addw(R0, R0, Operand(R0, LSL, 3)); | |
1270 __ eorw(R0, R0, Operand(R0, LSR, 11)); | |
1271 __ addw(R0, R0, Operand(R0, LSL, 15)); | |
1272 // hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1); | |
1273 __ AndImmediate( | |
1274 R0, R0, (static_cast<intptr_t>(1) << String::kHashBits) - 1, kNoPP); | |
1275 __ CompareRegisters(R0, ZR); | |
1276 // return hash_ == 0 ? 1 : hash_; | |
1277 __ Bind(&done); | |
1278 __ csinc(R0, R0, ZR, NE); // R0 <- (R0 != 0) ? R0 : (ZR + 1). | |
regis
2014/05/19 20:14:02
nice!
| |
1279 __ SmiTag(R0); | |
1280 __ str(R0, FieldAddress(R1, String::hash_offset())); | |
1281 __ ret(); | |
1282 } | |
1283 | |
1284 | |
1285 // Allocates one-byte string of length 'end - start'. The content is not | |
1286 // initialized. | |
1287 // 'length-reg' (R2) contains tagged length. | |
1288 // Returns new string as tagged pointer in R0. | |
1289 static void TryAllocateOnebyteString(Assembler* assembler, | |
1290 Label* ok, | |
1291 Label* failure) { | |
1292 const Register length_reg = R2; | |
1293 Label fail; | |
1294 | |
1295 __ mov(R6, length_reg); // Save the length register. | |
1296 __ SmiUntag(length_reg); | |
1297 const intptr_t fixed_size = sizeof(RawString) + kObjectAlignment - 1; | |
1298 __ AddImmediate(length_reg, length_reg, fixed_size, kNoPP); | |
1299 __ andi(length_reg, length_reg, ~(kObjectAlignment - 1)); | |
1300 | |
1301 Isolate* isolate = Isolate::Current(); | |
1302 Heap* heap = isolate->heap(); | |
1303 | |
1304 __ LoadImmediate(R3, heap->TopAddress(), kNoPP); | |
1305 __ ldr(R0, Address(R3)); | |
1306 | |
1307 // length_reg: allocation size. | |
1308 __ adds(R1, R0, Operand(length_reg)); | |
1309 __ b(&fail, VS); // Fail on overflow. | |
1310 | |
1311 // Check if the allocation fits into the remaining space. | |
1312 // R0: potential new object start. | |
1313 // R1: potential next object start. | |
1314 // R2: allocation size. | |
1315 // R3: heap->Top->Address(). | |
1316 __ LoadImmediate(R7, heap->EndAddress(), kNoPP); | |
1317 __ ldr(R7, Address(R7)); | |
1318 __ cmp(R1, Operand(R7)); | |
1319 __ b(&fail, CS); | |
1320 | |
1321 // Successfully allocated the object(s), now update top to point to | |
1322 // next object start and initialize the object. | |
1323 __ str(R1, Address(R3)); | |
1324 __ AddImmediate(R0, R0, kHeapObjectTag, kNoPP); | |
1325 __ UpdateAllocationStatsWithSize(kOneByteStringCid, R2, R3, kNoPP); | |
1326 | |
1327 // Initialize the tags. | |
1328 // R0: new object start as a tagged pointer. | |
1329 // R1: new object end address. | |
1330 // R2: allocation size. | |
1331 { | |
1332 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; | |
1333 const Class& cls = | |
1334 Class::Handle(isolate->object_store()->one_byte_string_class()); | |
1335 | |
1336 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag, kNoPP); | |
1337 __ Lsl(R2, R2, shift); | |
1338 __ csel(R2, R2, ZR, LS); | |
1339 | |
1340 // Get the class index and insert it into the tags. | |
1341 // R2: size and bit tags. | |
1342 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cls.id()), kNoPP); | |
1343 __ orr(R2, R2, Operand(TMP)); | |
1344 __ str(R2, FieldAddress(R0, String::tags_offset())); // Store tags. | |
1345 } | |
1346 | |
1347 // Set the length field using the saved length (R6). | |
1348 __ StoreIntoObjectNoBarrier(R0, | |
1349 FieldAddress(R0, String::length_offset()), | |
1350 R6); | |
1351 // Clear hash. | |
1352 __ mov(TMP, ZR); | |
1353 __ str(TMP, FieldAddress(R0, String::hash_offset())); | |
1354 __ b(ok); | |
1355 | |
1356 __ Bind(&fail); | |
1357 __ b(failure); | |
364 } | 1358 } |
365 | 1359 |
366 | 1360 |
367 // Arg0: OneByteString (receiver). | 1361 // Arg0: OneByteString (receiver). |
368 // Arg1: Start index as Smi. | 1362 // Arg1: Start index as Smi. |
369 // Arg2: End index as Smi. | 1363 // Arg2: End index as Smi. |
370 // The indexes must be valid. | 1364 // The indexes must be valid. |
371 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { | 1365 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { |
372 return; | 1366 const intptr_t kStringOffset = 2 * kWordSize; |
1367 const intptr_t kStartIndexOffset = 1 * kWordSize; | |
1368 const intptr_t kEndIndexOffset = 0 * kWordSize; | |
1369 Label fall_through, ok; | |
1370 | |
1371 __ ldr(R2, Address(SP, kEndIndexOffset)); | |
1372 __ ldr(TMP, Address(SP, kStartIndexOffset)); | |
1373 __ orr(R3, R2, Operand(TMP)); | |
1374 __ tsti(R3, kSmiTagMask); | |
1375 __ b(&fall_through, NE); // 'start', 'end' not Smi. | |
1376 | |
1377 __ sub(R2, R2, Operand(TMP)); | |
1378 TryAllocateOnebyteString(assembler, &ok, &fall_through); | |
1379 __ Bind(&ok); | |
1380 // R0: new string as tagged pointer. | |
1381 // Copy string. | |
1382 __ ldr(R3, Address(SP, kStringOffset)); | |
1383 __ ldr(R1, Address(SP, kStartIndexOffset)); | |
1384 __ SmiUntag(R1); | |
1385 __ add(R3, R3, Operand(R1)); | |
1386 // Calculate start address and untag (- 1). | |
1387 __ AddImmediate(R3, R3, OneByteString::data_offset() - 1, kNoPP); | |
1388 | |
1389 // R3: Start address to copy from (untagged). | |
1390 // R1: Untagged start index. | |
1391 __ ldr(R2, Address(SP, kEndIndexOffset)); | |
1392 __ SmiUntag(R2); | |
1393 __ sub(R2, R2, Operand(R1)); | |
1394 | |
1395 // R3: Start address to copy from (untagged). | |
1396 // R2: Untagged number of bytes to copy. | |
1397 // R0: Tagged result string. | |
1398 // R6: Pointer into R3. | |
1399 // R7: Pointer into R0. | |
1400 // R1: Scratch register. | |
1401 Label loop, done; | |
1402 __ cmp(R2, Operand(0)); | |
1403 __ b(&done, LE); | |
1404 __ mov(R6, R3); | |
1405 __ mov(R7, R0); | |
1406 __ Bind(&loop); | |
1407 __ ldr(R1, Address(R6), kUnsignedByte); | |
1408 __ AddImmediate(R6, R6, 1, kNoPP); | |
1409 __ sub(R2, R2, Operand(1)); | |
1410 __ cmp(R2, Operand(0)); | |
1411 __ str(R1, FieldAddress(R7, OneByteString::data_offset()), kUnsignedByte); | |
1412 __ AddImmediate(R7, R7, 1, kNoPP); | |
1413 __ b(&loop, GT); | |
1414 | |
1415 __ Bind(&done); | |
1416 __ ret(); | |
1417 __ Bind(&fall_through); | |
373 } | 1418 } |
374 | 1419 |
375 | 1420 |
376 void Intrinsifier::OneByteString_setAt(Assembler* assembler) { | 1421 void Intrinsifier::OneByteString_setAt(Assembler* assembler) { |
377 return; | 1422 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. |
1423 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. | |
1424 __ ldr(R0, Address(SP, 2 * kWordSize)); // OneByteString. | |
1425 __ SmiUntag(R1); | |
1426 __ SmiUntag(R2); | |
1427 __ AddImmediate(R3, R0, OneByteString::data_offset() - kHeapObjectTag, kNoPP); | |
1428 __ str(R2, Address(R3, R1), kUnsignedByte); | |
1429 __ ret(); | |
378 } | 1430 } |
379 | 1431 |
380 | 1432 |
381 void Intrinsifier::OneByteString_allocate(Assembler* assembler) { | 1433 void Intrinsifier::OneByteString_allocate(Assembler* assembler) { |
382 return; | 1434 Label fall_through, ok; |
1435 | |
1436 __ ldr(R2, Address(SP, 0 * kWordSize)); // Length. | |
1437 TryAllocateOnebyteString(assembler, &ok, &fall_through); | |
1438 | |
1439 __ Bind(&ok); | |
1440 __ ret(); | |
1441 | |
1442 __ Bind(&fall_through); | |
383 } | 1443 } |
384 | 1444 |
385 | 1445 |
386 // TODO(srdjan): Add combinations (one-byte/two-byte/external strings). | 1446 // TODO(srdjan): Add combinations (one-byte/two-byte/external strings). |
387 void StringEquality(Assembler* assembler, intptr_t string_cid) { | 1447 void StringEquality(Assembler* assembler, intptr_t string_cid) { |
388 return; | 1448 Label fall_through, is_true, is_false, loop; |
1449 __ ldr(R0, Address(SP, 1 * kWordSize)); // This. | |
1450 __ ldr(R1, Address(SP, 0 * kWordSize)); // Other. | |
1451 | |
1452 // Are identical? | |
1453 __ cmp(R0, Operand(R1)); | |
1454 __ b(&is_true, EQ); | |
1455 | |
1456 // Is other OneByteString? | |
1457 __ tsti(R1, kSmiTagMask); | |
1458 __ b(&fall_through, EQ); | |
1459 __ CompareClassId(R1, string_cid, kNoPP); | |
1460 __ b(&fall_through, NE); | |
1461 | |
1462 // Have same length? | |
1463 __ ldr(R2, FieldAddress(R0, String::length_offset())); | |
1464 __ ldr(R3, FieldAddress(R1, String::length_offset())); | |
1465 __ cmp(R2, Operand(R3)); | |
1466 __ b(&is_false, NE); | |
1467 | |
1468 // Check contents, no fall-through possible. | |
1469 // TODO(zra): try out other sequences. | |
1470 ASSERT((string_cid == kOneByteStringCid) || | |
1471 (string_cid == kTwoByteStringCid)); | |
1472 const intptr_t offset = (string_cid == kOneByteStringCid) ? | |
1473 OneByteString::data_offset() : TwoByteString::data_offset(); | |
1474 __ AddImmediate(R0, R0, offset - kHeapObjectTag, kNoPP); | |
1475 __ AddImmediate(R1, R1, offset - kHeapObjectTag, kNoPP); | |
1476 __ SmiUntag(R2); | |
1477 __ Bind(&loop); | |
1478 __ AddImmediate(R2, R2, -1, kNoPP); | |
1479 __ CompareRegisters(R2, ZR); | |
1480 __ b(&is_true, LT); | |
1481 if (string_cid == kOneByteStringCid) { | |
1482 __ ldr(R3, Address(R0), kUnsignedByte); | |
1483 __ ldr(R4, Address(R1), kUnsignedByte); | |
1484 __ AddImmediate(R0, R0, 1, kNoPP); | |
1485 __ AddImmediate(R1, R1, 1, kNoPP); | |
1486 } else if (string_cid == kTwoByteStringCid) { | |
1487 __ ldr(R3, Address(R0), kUnsignedHalfword); | |
1488 __ ldr(R4, Address(R1), kUnsignedHalfword); | |
1489 __ AddImmediate(R0, R0, 2, kNoPP); | |
1490 __ AddImmediate(R1, R1, 2, kNoPP); | |
1491 } else { | |
1492 UNIMPLEMENTED(); | |
1493 } | |
1494 __ cmp(R3, Operand(R4)); | |
1495 __ b(&is_false, NE); | |
1496 __ b(&loop); | |
1497 | |
1498 __ Bind(&is_true); | |
1499 __ LoadObject(R0, Bool::True(), PP); | |
1500 __ ret(); | |
1501 | |
1502 __ Bind(&is_false); | |
1503 __ LoadObject(R0, Bool::False(), PP); | |
1504 __ ret(); | |
1505 | |
1506 __ Bind(&fall_through); | |
389 } | 1507 } |
390 | 1508 |
391 | 1509 |
392 void Intrinsifier::OneByteString_equality(Assembler* assembler) { | 1510 void Intrinsifier::OneByteString_equality(Assembler* assembler) { |
393 return; | 1511 StringEquality(assembler, kOneByteStringCid); |
394 } | 1512 } |
395 | 1513 |
396 | 1514 |
397 void Intrinsifier::TwoByteString_equality(Assembler* assembler) { | 1515 void Intrinsifier::TwoByteString_equality(Assembler* assembler) { |
398 return; | 1516 StringEquality(assembler, kTwoByteStringCid); |
399 } | 1517 } |
400 | 1518 |
401 | 1519 |
1520 // On stack: user tag (+0). | |
402 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { | 1521 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { |
403 return; | 1522 // R1: Isolate. |
1523 Isolate* isolate = Isolate::Current(); | |
1524 __ LoadImmediate(R1, reinterpret_cast<uword>(isolate), kNoPP); | |
1525 // R0: Current user tag. | |
1526 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); | |
1527 // R2: UserTag. | |
1528 __ ldr(R2, Address(SP, + 0 * kWordSize)); | |
1529 // Set Isolate::current_tag_. | |
1530 __ str(R2, Address(R1, Isolate::current_tag_offset())); | |
1531 // R2: UserTag's tag. | |
1532 __ ldr(R2, FieldAddress(R2, UserTag::tag_offset())); | |
1533 // Set Isolate::user_tag_. | |
1534 __ str(R2, Address(R1, Isolate::user_tag_offset())); | |
1535 __ ret(); | |
1536 } | |
1537 | |
1538 | |
1539 void Intrinsifier::UserTag_defaultTag(Assembler* assembler) { | |
1540 Isolate* isolate = Isolate::Current(); | |
1541 // Set return value to default tag address. | |
1542 __ LoadImmediate(R0, | |
1543 reinterpret_cast<uword>(isolate->object_store()) + | |
1544 ObjectStore::default_tag_offset(), kNoPP); | |
1545 __ ldr(R0, Address(R0)); | |
1546 __ ret(); | |
404 } | 1547 } |
405 | 1548 |
406 | 1549 |
407 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) { | 1550 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) { |
408 return; | 1551 // R1: Default tag address. |
409 } | 1552 Isolate* isolate = Isolate::Current(); |
410 | 1553 __ LoadImmediate(R1, reinterpret_cast<uword>(isolate), kNoPP); |
411 | 1554 // Set return value to Isolate::current_tag_. |
412 void Intrinsifier::UserTag_defaultTag(Assembler* assembler) { | 1555 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); |
413 return; | 1556 __ ret(); |
414 } | 1557 } |
415 | 1558 |
416 } // namespace dart | 1559 } // namespace dart |
417 | 1560 |
418 #endif // defined TARGET_ARCH_ARM64 | 1561 #endif // defined TARGET_ARCH_ARM64 |
OLD | NEW |