Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(451)

Side by Side Diff: runtime/vm/intrinsifier_arm64.cc

Issue 2481873005: clang-format runtime/vm (Closed)
Patch Set: Merge Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/intrinsifier_arm.cc ('k') | runtime/vm/intrinsifier_dbc.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64.
6 #if defined(TARGET_ARCH_ARM64) 6 #if defined(TARGET_ARCH_ARM64)
7 7
8 #include "vm/intrinsifier.h" 8 #include "vm/intrinsifier.h"
9 9
10 #include "vm/assembler.h" 10 #include "vm/assembler.h"
(...skipping 11 matching lines...) Expand all
22 // R4: Arguments descriptor 22 // R4: Arguments descriptor
23 // LR: Return address 23 // LR: Return address
24 // The R4 register can be destroyed only if there is no slow-path, i.e. 24 // The R4 register can be destroyed only if there is no slow-path, i.e.
25 // if the intrinsified method always executes a return. 25 // if the intrinsified method always executes a return.
26 // The FP register should not be modified, because it is used by the profiler. 26 // The FP register should not be modified, because it is used by the profiler.
27 // The PP and THR registers (see constants_arm64.h) must be preserved. 27 // The PP and THR registers (see constants_arm64.h) must be preserved.
28 28
29 #define __ assembler-> 29 #define __ assembler->
30 30
31 31
32 intptr_t Intrinsifier::ParameterSlotFromSp() { return -1; } 32 intptr_t Intrinsifier::ParameterSlotFromSp() {
33 return -1;
34 }
33 35
34 36
35 static bool IsABIPreservedRegister(Register reg) { 37 static bool IsABIPreservedRegister(Register reg) {
36 return ((1 << reg) & kAbiPreservedCpuRegs) != 0; 38 return ((1 << reg) & kAbiPreservedCpuRegs) != 0;
37 } 39 }
38 40
39 41
40 void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) { 42 void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
41 ASSERT(IsABIPreservedRegister(CODE_REG)); 43 ASSERT(IsABIPreservedRegister(CODE_REG));
42 ASSERT(!IsABIPreservedRegister(ARGS_DESC_REG)); 44 ASSERT(!IsABIPreservedRegister(ARGS_DESC_REG));
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
76 78
77 // Range check. 79 // Range check.
78 __ ldr(R3, FieldAddress(R0, Array::length_offset())); // Array length. 80 __ ldr(R3, FieldAddress(R0, Array::length_offset())); // Array length.
79 __ cmp(R1, Operand(R3)); 81 __ cmp(R1, Operand(R3));
80 // Runtime throws exception. 82 // Runtime throws exception.
81 __ b(&fall_through, CS); 83 __ b(&fall_through, CS);
82 84
83 // Note that R1 is Smi, i.e, times 2. 85 // Note that R1 is Smi, i.e, times 2.
84 ASSERT(kSmiTagShift == 1); 86 ASSERT(kSmiTagShift == 1);
85 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. 87 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value.
86 __ add(R1, R0, Operand(R1, LSL, 2)); // R1 is Smi. 88 __ add(R1, R0, Operand(R1, LSL, 2)); // R1 is Smi.
87 __ StoreIntoObject(R0, 89 __ StoreIntoObject(R0, FieldAddress(R1, Array::data_offset()), R2);
88 FieldAddress(R1, Array::data_offset()),
89 R2);
90 // Caller is responsible for preserving the value if necessary. 90 // Caller is responsible for preserving the value if necessary.
91 __ ret(); 91 __ ret();
92 __ Bind(&fall_through); 92 __ Bind(&fall_through);
93 } 93 }
94 94
95 95
96 // Allocate a GrowableObjectArray using the backing array specified. 96 // Allocate a GrowableObjectArray using the backing array specified.
97 // On stack: type argument (+1), data (+0). 97 // On stack: type argument (+1), data (+0).
98 void Intrinsifier::GrowableArray_Allocate(Assembler* assembler) { 98 void Intrinsifier::GrowableArray_Allocate(Assembler* assembler) {
99 // The newly allocated object is returned in R0. 99 // The newly allocated object is returned in R0.
100 const intptr_t kTypeArgumentsOffset = 1 * kWordSize; 100 const intptr_t kTypeArgumentsOffset = 1 * kWordSize;
101 const intptr_t kArrayOffset = 0 * kWordSize; 101 const intptr_t kArrayOffset = 0 * kWordSize;
102 Label fall_through; 102 Label fall_through;
103 103
104 // Try allocating in new space. 104 // Try allocating in new space.
105 const Class& cls = Class::Handle( 105 const Class& cls = Class::Handle(
106 Isolate::Current()->object_store()->growable_object_array_class()); 106 Isolate::Current()->object_store()->growable_object_array_class());
107 __ TryAllocate(cls, &fall_through, R0, R1); 107 __ TryAllocate(cls, &fall_through, R0, R1);
108 108
109 // Store backing array object in growable array object. 109 // Store backing array object in growable array object.
110 __ ldr(R1, Address(SP, kArrayOffset)); // Data argument. 110 __ ldr(R1, Address(SP, kArrayOffset)); // Data argument.
111 // R0 is new, no barrier needed. 111 // R0 is new, no barrier needed.
112 __ StoreIntoObjectNoBarrier( 112 __ StoreIntoObjectNoBarrier(
113 R0, 113 R0, FieldAddress(R0, GrowableObjectArray::data_offset()), R1);
114 FieldAddress(R0, GrowableObjectArray::data_offset()),
115 R1);
116 114
117 // R0: new growable array object start as a tagged pointer. 115 // R0: new growable array object start as a tagged pointer.
118 // Store the type argument field in the growable array object. 116 // Store the type argument field in the growable array object.
119 __ ldr(R1, Address(SP, kTypeArgumentsOffset)); // Type argument. 117 __ ldr(R1, Address(SP, kTypeArgumentsOffset)); // Type argument.
120 __ StoreIntoObjectNoBarrier( 118 __ StoreIntoObjectNoBarrier(
121 R0, 119 R0, FieldAddress(R0, GrowableObjectArray::type_arguments_offset()), R1);
122 FieldAddress(R0, GrowableObjectArray::type_arguments_offset()),
123 R1);
124 120
125 // Set the length field in the growable array object to 0. 121 // Set the length field in the growable array object to 0.
126 __ LoadImmediate(R1, 0); 122 __ LoadImmediate(R1, 0);
127 __ str(R1, FieldAddress(R0, GrowableObjectArray::length_offset())); 123 __ str(R1, FieldAddress(R0, GrowableObjectArray::length_offset()));
128 __ ret(); // Returns the newly allocated object in R0. 124 __ ret(); // Returns the newly allocated object in R0.
129 125
130 __ Bind(&fall_through); 126 __ Bind(&fall_through);
131 } 127 }
132 128
133 129
(...skipping 17 matching lines...) Expand all
151 // Compare length with capacity. 147 // Compare length with capacity.
152 __ cmp(R1, Operand(R3)); 148 __ cmp(R1, Operand(R3));
153 __ b(&fall_through, EQ); // Must grow data. 149 __ b(&fall_through, EQ); // Must grow data.
154 const int64_t value_one = reinterpret_cast<int64_t>(Smi::New(1)); 150 const int64_t value_one = reinterpret_cast<int64_t>(Smi::New(1));
155 // len = len + 1; 151 // len = len + 1;
156 __ add(R3, R1, Operand(value_one)); 152 __ add(R3, R1, Operand(value_one));
157 __ str(R3, FieldAddress(R0, GrowableObjectArray::length_offset())); 153 __ str(R3, FieldAddress(R0, GrowableObjectArray::length_offset()));
158 __ ldr(R0, Address(SP, 0 * kWordSize)); // Value. 154 __ ldr(R0, Address(SP, 0 * kWordSize)); // Value.
159 ASSERT(kSmiTagShift == 1); 155 ASSERT(kSmiTagShift == 1);
160 __ add(R1, R2, Operand(R1, LSL, 2)); 156 __ add(R1, R2, Operand(R1, LSL, 2));
161 __ StoreIntoObject(R2, 157 __ StoreIntoObject(R2, FieldAddress(R1, Array::data_offset()), R0);
162 FieldAddress(R1, Array::data_offset()),
163 R0);
164 __ LoadObject(R0, Object::null_object()); 158 __ LoadObject(R0, Object::null_object());
165 __ ret(); 159 __ ret();
166 __ Bind(&fall_through); 160 __ Bind(&fall_through);
167 } 161 }
168 162
169 163
170 static int GetScaleFactor(intptr_t size) { 164 static int GetScaleFactor(intptr_t size) {
171 switch (size) { 165 switch (size) {
172 case 1: return 0; 166 case 1:
173 case 2: return 1; 167 return 0;
174 case 4: return 2; 168 case 2:
175 case 8: return 3; 169 return 1;
176 case 16: return 4; 170 case 4:
171 return 2;
172 case 8:
173 return 3;
174 case 16:
175 return 4;
177 } 176 }
178 UNREACHABLE(); 177 UNREACHABLE();
179 return -1; 178 return -1;
180 } 179 }
181 180
182 181
183 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ 182 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \
184 Label fall_through; \ 183 Label fall_through; \
185 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ 184 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \
186 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R2, &fall_through)); \ 185 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R2, &fall_through)); \
187 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ 186 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \
188 /* Check that length is a positive Smi. */ \ 187 /* Check that length is a positive Smi. */ \
189 /* R2: requested array length argument. */ \ 188 /* R2: requested array length argument. */ \
190 __ tsti(R2, Immediate(kSmiTagMask)); \ 189 __ tsti(R2, Immediate(kSmiTagMask)); \
191 __ b(&fall_through, NE); \ 190 __ b(&fall_through, NE); \
192 __ CompareRegisters(R2, ZR); \ 191 __ CompareRegisters(R2, ZR); \
193 __ b(&fall_through, LT); \ 192 __ b(&fall_through, LT); \
194 __ SmiUntag(R2); \ 193 __ SmiUntag(R2); \
195 /* Check for maximum allowed length. */ \ 194 /* Check for maximum allowed length. */ \
196 /* R2: untagged array length. */ \ 195 /* R2: untagged array length. */ \
197 __ CompareImmediate(R2, max_len); \ 196 __ CompareImmediate(R2, max_len); \
198 __ b(&fall_through, GT); \ 197 __ b(&fall_through, GT); \
199 __ LslImmediate(R2, R2, scale_shift); \ 198 __ LslImmediate(R2, R2, scale_shift); \
200 const intptr_t fixed_size = sizeof(Raw##type_name) + kObjectAlignment - 1; \ 199 const intptr_t fixed_size = sizeof(Raw##type_name) + kObjectAlignment - 1; \
201 __ AddImmediate(R2, R2, fixed_size); \ 200 __ AddImmediate(R2, R2, fixed_size); \
202 __ andi(R2, R2, Immediate(~(kObjectAlignment - 1))); \ 201 __ andi(R2, R2, Immediate(~(kObjectAlignment - 1))); \
203 Heap::Space space = Heap::kNew; \ 202 Heap::Space space = Heap::kNew; \
204 __ ldr(R3, Address(THR, Thread::heap_offset())); \ 203 __ ldr(R3, Address(THR, Thread::heap_offset())); \
205 __ ldr(R0, Address(R3, Heap::TopOffset(space))); \ 204 __ ldr(R0, Address(R3, Heap::TopOffset(space))); \
206 \ 205 \
207 /* R2: allocation size. */ \ 206 /* R2: allocation size. */ \
208 __ adds(R1, R0, Operand(R2)); \ 207 __ adds(R1, R0, Operand(R2)); \
209 __ b(&fall_through, CS); /* Fail on unsigned overflow. */ \ 208 __ b(&fall_through, CS); /* Fail on unsigned overflow. */ \
210 \ 209 \
211 /* Check if the allocation fits into the remaining space. */ \ 210 /* Check if the allocation fits into the remaining space. */ \
212 /* R0: potential new object start. */ \ 211 /* R0: potential new object start. */ \
213 /* R1: potential next object start. */ \ 212 /* R1: potential next object start. */ \
214 /* R2: allocation size. */ \ 213 /* R2: allocation size. */ \
215 /* R3: heap. */ \ 214 /* R3: heap. */ \
216 __ ldr(R6, Address(R3, Heap::EndOffset(space))); \ 215 __ ldr(R6, Address(R3, Heap::EndOffset(space))); \
217 __ cmp(R1, Operand(R6)); \ 216 __ cmp(R1, Operand(R6)); \
218 __ b(&fall_through, CS); \ 217 __ b(&fall_through, CS); \
219 \ 218 \
220 /* Successfully allocated the object(s), now update top to point to */ \ 219 /* Successfully allocated the object(s), now update top to point to */ \
221 /* next object start and initialize the object. */ \ 220 /* next object start and initialize the object. */ \
222 __ str(R1, Address(R3, Heap::TopOffset(space))); \ 221 __ str(R1, Address(R3, Heap::TopOffset(space))); \
223 __ AddImmediate(R0, R0, kHeapObjectTag); \ 222 __ AddImmediate(R0, R0, kHeapObjectTag); \
224 NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2, space)); \ 223 NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2, space)); \
225 /* Initialize the tags. */ \ 224 /* Initialize the tags. */ \
226 /* R0: new object start as a tagged pointer. */ \ 225 /* R0: new object start as a tagged pointer. */ \
227 /* R1: new object end address. */ \ 226 /* R1: new object end address. */ \
228 /* R2: allocation size. */ \ 227 /* R2: allocation size. */ \
229 { \ 228 { \
230 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); \ 229 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); \
231 __ LslImmediate(R2, R2, RawObject::kSizeTagPos - kObjectAlignmentLog2); \ 230 __ LslImmediate(R2, R2, RawObject::kSizeTagPos - kObjectAlignmentLog2); \
232 __ csel(R2, ZR, R2, HI); \ 231 __ csel(R2, ZR, R2, HI); \
233 \ 232 \
234 /* Get the class index and insert it into the tags. */ \ 233 /* Get the class index and insert it into the tags. */ \
235 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); \ 234 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); \
236 __ orr(R2, R2, Operand(TMP)); \ 235 __ orr(R2, R2, Operand(TMP)); \
237 __ str(R2, FieldAddress(R0, type_name::tags_offset())); /* Tags. */ \ 236 __ str(R2, FieldAddress(R0, type_name::tags_offset())); /* Tags. */ \
238 } \ 237 } \
239 /* Set the length field. */ \ 238 /* Set the length field. */ \
240 /* R0: new object start as a tagged pointer. */ \ 239 /* R0: new object start as a tagged pointer. */ \
241 /* R1: new object end address. */ \ 240 /* R1: new object end address. */ \
242 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ 241 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \
243 __ StoreIntoObjectNoBarrier(R0, \ 242 __ StoreIntoObjectNoBarrier( \
244 FieldAddress(R0, type_name::length_offset()), \ 243 R0, FieldAddress(R0, type_name::length_offset()), R2); \
245 R2); \
246 /* Initialize all array elements to 0. */ \ 244 /* Initialize all array elements to 0. */ \
247 /* R0: new object start as a tagged pointer. */ \ 245 /* R0: new object start as a tagged pointer. */ \
248 /* R1: new object end address. */ \ 246 /* R1: new object end address. */ \
249 /* R2: iterator which initially points to the start of the variable */ \ 247 /* R2: iterator which initially points to the start of the variable */ \
250 /* R3: scratch register. */ \ 248 /* R3: scratch register. */ \
251 /* data area to be initialized. */ \ 249 /* data area to be initialized. */ \
252 __ mov(R3, ZR); \ 250 __ mov(R3, ZR); \
253 __ AddImmediate(R2, R0, sizeof(Raw##type_name) - 1); \ 251 __ AddImmediate(R2, R0, sizeof(Raw##type_name) - 1); \
254 Label init_loop, done; \ 252 Label init_loop, done; \
255 __ Bind(&init_loop); \ 253 __ Bind(&init_loop); \
256 __ cmp(R2, Operand(R1)); \ 254 __ cmp(R2, Operand(R1)); \
257 __ b(&done, CS); \ 255 __ b(&done, CS); \
258 __ str(R3, Address(R2, 0)); \ 256 __ str(R3, Address(R2, 0)); \
259 __ add(R2, R2, Operand(kWordSize)); \ 257 __ add(R2, R2, Operand(kWordSize)); \
260 __ b(&init_loop); \ 258 __ b(&init_loop); \
261 __ Bind(&done); \ 259 __ Bind(&done); \
262 \ 260 \
263 __ ret(); \ 261 __ ret(); \
264 __ Bind(&fall_through); \ 262 __ Bind(&fall_through);
265 263
266 264
267 #define TYPED_DATA_ALLOCATOR(clazz) \ 265 #define TYPED_DATA_ALLOCATOR(clazz) \
268 void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler) { \ 266 void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler) { \
269 intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \ 267 intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \
270 intptr_t max_len = TypedData::MaxElements(kTypedData##clazz##Cid); \ 268 intptr_t max_len = TypedData::MaxElements(kTypedData##clazz##Cid); \
271 int shift = GetScaleFactor(size); \ 269 int shift = GetScaleFactor(size); \
272 TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, shift); \ 270 TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, shift); \
273 } 271 }
274 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) 272 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR)
275 #undef TYPED_DATA_ALLOCATOR 273 #undef TYPED_DATA_ALLOCATOR
276 274
277 275
278 // Loads args from stack into R0 and R1 276 // Loads args from stack into R0 and R1
279 // Tests if they are smis, jumps to label not_smi if not. 277 // Tests if they are smis, jumps to label not_smi if not.
280 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { 278 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
281 __ ldr(R0, Address(SP, + 0 * kWordSize)); 279 __ ldr(R0, Address(SP, +0 * kWordSize));
282 __ ldr(R1, Address(SP, + 1 * kWordSize)); 280 __ ldr(R1, Address(SP, +1 * kWordSize));
283 __ orr(TMP, R0, Operand(R1)); 281 __ orr(TMP, R0, Operand(R1));
284 __ tsti(TMP, Immediate(kSmiTagMask)); 282 __ tsti(TMP, Immediate(kSmiTagMask));
285 __ b(not_smi, NE); 283 __ b(not_smi, NE);
286 } 284 }
287 285
288 286
289 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { 287 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) {
290 Label fall_through; 288 Label fall_through;
291 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. 289 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis.
292 __ adds(R0, R0, Operand(R1)); // Adds. 290 __ adds(R0, R0, Operand(R1)); // Adds.
293 __ b(&fall_through, VS); // Fall-through on overflow. 291 __ b(&fall_through, VS); // Fall-through on overflow.
294 __ ret(); 292 __ ret();
295 __ Bind(&fall_through); 293 __ Bind(&fall_through);
296 } 294 }
297 295
298 296
299 void Intrinsifier::Integer_add(Assembler* assembler) { 297 void Intrinsifier::Integer_add(Assembler* assembler) {
300 Integer_addFromInteger(assembler); 298 Integer_addFromInteger(assembler);
301 } 299 }
302 300
303 301
304 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { 302 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) {
305 Label fall_through; 303 Label fall_through;
306 TestBothArgumentsSmis(assembler, &fall_through); 304 TestBothArgumentsSmis(assembler, &fall_through);
307 __ subs(R0, R0, Operand(R1)); // Subtract. 305 __ subs(R0, R0, Operand(R1)); // Subtract.
308 __ b(&fall_through, VS); // Fall-through on overflow. 306 __ b(&fall_through, VS); // Fall-through on overflow.
309 __ ret(); 307 __ ret();
310 __ Bind(&fall_through); 308 __ Bind(&fall_through);
311 } 309 }
312 310
313 311
314 void Intrinsifier::Integer_sub(Assembler* assembler) { 312 void Intrinsifier::Integer_sub(Assembler* assembler) {
315 Label fall_through; 313 Label fall_through;
316 TestBothArgumentsSmis(assembler, &fall_through); 314 TestBothArgumentsSmis(assembler, &fall_through);
317 __ subs(R0, R1, Operand(R0)); // Subtract. 315 __ subs(R0, R1, Operand(R0)); // Subtract.
318 __ b(&fall_through, VS); // Fall-through on overflow. 316 __ b(&fall_through, VS); // Fall-through on overflow.
319 __ ret(); 317 __ ret();
320 __ Bind(&fall_through); 318 __ Bind(&fall_through);
321 } 319 }
322 320
323 321
324 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { 322 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) {
325 Label fall_through; 323 Label fall_through;
326 324
327 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis 325 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis
328 __ SmiUntag(R0); // Untags R6. We only want result shifted by one. 326 __ SmiUntag(R0); // Untags R6. We only want result shifted by one.
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
397 // if (res < 0) { 395 // if (res < 0) {
398 // if (right < 0) { 396 // if (right < 0) {
399 // res = res - right; 397 // res = res - right;
400 // } else { 398 // } else {
401 // res = res + right; 399 // res = res + right;
402 // } 400 // }
403 // } 401 // }
404 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { 402 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) {
405 // Check to see if we have integer division 403 // Check to see if we have integer division
406 Label neg_remainder, fall_through; 404 Label neg_remainder, fall_through;
407 __ ldr(R1, Address(SP, + 0 * kWordSize)); 405 __ ldr(R1, Address(SP, +0 * kWordSize));
408 __ ldr(R0, Address(SP, + 1 * kWordSize)); 406 __ ldr(R0, Address(SP, +1 * kWordSize));
409 __ orr(TMP, R0, Operand(R1)); 407 __ orr(TMP, R0, Operand(R1));
410 __ tsti(TMP, Immediate(kSmiTagMask)); 408 __ tsti(TMP, Immediate(kSmiTagMask));
411 __ b(&fall_through, NE); 409 __ b(&fall_through, NE);
412 // R1: Tagged left (dividend). 410 // R1: Tagged left (dividend).
413 // R0: Tagged right (divisor). 411 // R0: Tagged right (divisor).
414 // Check if modulo by zero -> exception thrown in main function. 412 // Check if modulo by zero -> exception thrown in main function.
415 __ CompareRegisters(R0, ZR); 413 __ CompareRegisters(R0, ZR);
416 __ b(&fall_through, EQ); 414 __ b(&fall_through, EQ);
417 EmitRemainderOperation(assembler); 415 EmitRemainderOperation(assembler);
418 // Untagged right in R0. Untagged remainder result in R1. 416 // Untagged right in R0. Untagged remainder result in R1.
(...skipping 27 matching lines...) Expand all
446 __ SmiUntag(R0); 444 __ SmiUntag(R0);
447 __ SmiUntag(R1); 445 __ SmiUntag(R1);
448 446
449 __ sdiv(R0, R1, R0); 447 __ sdiv(R0, R1, R0);
450 448
451 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we 449 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
452 // cannot tag the result. 450 // cannot tag the result.
453 __ CompareImmediate(R0, 0x4000000000000000); 451 __ CompareImmediate(R0, 0x4000000000000000);
454 __ b(&fall_through, EQ); 452 __ b(&fall_through, EQ);
455 __ SmiTag(R0); // Not equal. Okay to tag and return. 453 __ SmiTag(R0); // Not equal. Okay to tag and return.
456 __ ret(); // Return. 454 __ ret(); // Return.
457 __ Bind(&fall_through); 455 __ Bind(&fall_through);
458 } 456 }
459 457
460 458
461 void Intrinsifier::Integer_negate(Assembler* assembler) { 459 void Intrinsifier::Integer_negate(Assembler* assembler) {
462 Label fall_through; 460 Label fall_through;
463 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Grab first argument. 461 __ ldr(R0, Address(SP, +0 * kWordSize)); // Grab first argument.
464 __ tsti(R0, Immediate(kSmiTagMask)); // Test for Smi. 462 __ tsti(R0, Immediate(kSmiTagMask)); // Test for Smi.
465 __ b(&fall_through, NE); 463 __ b(&fall_through, NE);
466 __ negs(R0, R0); 464 __ negs(R0, R0);
467 __ b(&fall_through, VS); 465 __ b(&fall_through, VS);
468 __ ret(); 466 __ ret();
469 __ Bind(&fall_through); 467 __ Bind(&fall_through);
470 } 468 }
471 469
472 470
473 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { 471 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) {
474 Label fall_through; 472 Label fall_through;
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
516 void Intrinsifier::Integer_shl(Assembler* assembler) { 514 void Intrinsifier::Integer_shl(Assembler* assembler) {
517 ASSERT(kSmiTagShift == 1); 515 ASSERT(kSmiTagShift == 1);
518 ASSERT(kSmiTag == 0); 516 ASSERT(kSmiTag == 0);
519 const Register right = R0; 517 const Register right = R0;
520 const Register left = R1; 518 const Register left = R1;
521 const Register temp = R2; 519 const Register temp = R2;
522 const Register result = R0; 520 const Register result = R0;
523 Label fall_through; 521 Label fall_through;
524 522
525 TestBothArgumentsSmis(assembler, &fall_through); 523 TestBothArgumentsSmis(assembler, &fall_through);
526 __ CompareImmediate( 524 __ CompareImmediate(right, reinterpret_cast<int64_t>(Smi::New(Smi::kBits)));
527 right, reinterpret_cast<int64_t>(Smi::New(Smi::kBits)));
528 __ b(&fall_through, CS); 525 __ b(&fall_through, CS);
529 526
530 // Left is not a constant. 527 // Left is not a constant.
531 // Check if count too large for handling it inlined. 528 // Check if count too large for handling it inlined.
532 __ SmiUntag(TMP, right); // SmiUntag right into TMP. 529 __ SmiUntag(TMP, right); // SmiUntag right into TMP.
533 // Overflow test (preserve left, right, and TMP); 530 // Overflow test (preserve left, right, and TMP);
534 __ lslv(temp, left, TMP); 531 __ lslv(temp, left, TMP);
535 __ asrv(TMP2, temp, TMP); 532 __ asrv(TMP2, temp, TMP);
536 __ CompareRegisters(left, TMP2); 533 __ CompareRegisters(left, TMP2);
537 __ b(&fall_through, NE); // Overflow. 534 __ b(&fall_through, NE); // Overflow.
(...skipping 268 matching lines...) Expand 10 before | Expand all | Expand 10 after
806 // R7 = &digits[a_used rounded up to even number]. 803 // R7 = &digits[a_used rounded up to even number].
807 __ add(R7, R3, Operand(R4, LSL, 3)); 804 __ add(R7, R3, Operand(R4, LSL, 3));
808 805
809 // R8 = &digits[a_used rounded up to even number]. 806 // R8 = &digits[a_used rounded up to even number].
810 __ add(R8, R3, Operand(R2, LSL, 3)); 807 __ add(R8, R3, Operand(R2, LSL, 3));
811 808
812 __ adds(R0, R0, Operand(0)); // carry flag = 0 809 __ adds(R0, R0, Operand(0)); // carry flag = 0
813 Label add_loop; 810 Label add_loop;
814 __ Bind(&add_loop); 811 __ Bind(&add_loop);
815 // Loop (a_used+1)/2 times, a_used > 0. 812 // Loop (a_used+1)/2 times, a_used > 0.
816 __ ldr(R0, Address(R3, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 813 __ ldr(R0, Address(R3, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
817 __ ldr(R1, Address(R5, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 814 __ ldr(R1, Address(R5, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
818 __ adcs(R0, R0, R1); 815 __ adcs(R0, R0, R1);
819 __ sub(R9, R3, Operand(R7)); // Does not affect carry flag. 816 __ sub(R9, R3, Operand(R7)); // Does not affect carry flag.
820 __ str(R0, Address(R6, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 817 __ str(R0, Address(R6, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
821 __ cbnz(&add_loop, R9); // Does not affect carry flag. 818 __ cbnz(&add_loop, R9); // Does not affect carry flag.
822 819
823 Label last_carry; 820 Label last_carry;
824 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag. 821 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag.
825 __ cbz(&last_carry, R9); // If used - a_used == 0. 822 __ cbz(&last_carry, R9); // If used - a_used == 0.
826 823
827 Label carry_loop; 824 Label carry_loop;
828 __ Bind(&carry_loop); 825 __ Bind(&carry_loop);
829 // Loop (used+1)/2 - (a_used+1)/2 times, used - a_used > 0. 826 // Loop (used+1)/2 - (a_used+1)/2 times, used - a_used > 0.
830 __ ldr(R0, Address(R3, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 827 __ ldr(R0, Address(R3, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
831 __ adcs(R0, R0, ZR); 828 __ adcs(R0, R0, ZR);
832 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag. 829 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag.
833 __ str(R0, Address(R6, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 830 __ str(R0, Address(R6, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
834 __ cbnz(&carry_loop, R9); 831 __ cbnz(&carry_loop, R9);
835 832
836 __ Bind(&last_carry); 833 __ Bind(&last_carry);
837 Label done; 834 Label done;
838 __ b(&done, CC); 835 __ b(&done, CC);
839 __ LoadImmediate(R0, 1); 836 __ LoadImmediate(R0, 1);
840 __ str(R0, Address(R6, 0)); 837 __ str(R0, Address(R6, 0));
841 838
842 __ Bind(&done); 839 __ Bind(&done);
843 // Returning Object::null() is not required, since this method is private. 840 // Returning Object::null() is not required, since this method is private.
(...skipping 28 matching lines...) Expand all
872 // R7 = &digits[a_used rounded up to even number]. 869 // R7 = &digits[a_used rounded up to even number].
873 __ add(R7, R3, Operand(R4, LSL, 3)); 870 __ add(R7, R3, Operand(R4, LSL, 3));
874 871
875 // R8 = &digits[a_used rounded up to even number]. 872 // R8 = &digits[a_used rounded up to even number].
876 __ add(R8, R3, Operand(R2, LSL, 3)); 873 __ add(R8, R3, Operand(R2, LSL, 3));
877 874
878 __ subs(R0, R0, Operand(0)); // carry flag = 1 875 __ subs(R0, R0, Operand(0)); // carry flag = 1
879 Label sub_loop; 876 Label sub_loop;
880 __ Bind(&sub_loop); 877 __ Bind(&sub_loop);
881 // Loop (a_used+1)/2 times, a_used > 0. 878 // Loop (a_used+1)/2 times, a_used > 0.
882 __ ldr(R0, Address(R3, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 879 __ ldr(R0, Address(R3, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
883 __ ldr(R1, Address(R5, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 880 __ ldr(R1, Address(R5, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
884 __ sbcs(R0, R0, R1); 881 __ sbcs(R0, R0, R1);
885 __ sub(R9, R3, Operand(R7)); // Does not affect carry flag. 882 __ sub(R9, R3, Operand(R7)); // Does not affect carry flag.
886 __ str(R0, Address(R6, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 883 __ str(R0, Address(R6, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
887 __ cbnz(&sub_loop, R9); // Does not affect carry flag. 884 __ cbnz(&sub_loop, R9); // Does not affect carry flag.
888 885
889 Label done; 886 Label done;
890 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag. 887 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag.
891 __ cbz(&done, R9); // If used - a_used == 0. 888 __ cbz(&done, R9); // If used - a_used == 0.
892 889
893 Label carry_loop; 890 Label carry_loop;
894 __ Bind(&carry_loop); 891 __ Bind(&carry_loop);
895 // Loop (used+1)/2 - (a_used+1)/2 times, used - a_used > 0. 892 // Loop (used+1)/2 - (a_used+1)/2 times, used - a_used > 0.
896 __ ldr(R0, Address(R3, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 893 __ ldr(R0, Address(R3, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
897 __ sbcs(R0, R0, ZR); 894 __ sbcs(R0, R0, ZR);
898 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag. 895 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag.
899 __ str(R0, Address(R6, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 896 __ str(R0, Address(R6, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
900 __ cbnz(&carry_loop, R9); 897 __ cbnz(&carry_loop, R9);
901 898
902 __ Bind(&done); 899 __ Bind(&done);
903 // Returning Object::null() is not required, since this method is private. 900 // Returning Object::null() is not required, since this method is private.
904 __ ret(); 901 __ ret();
905 } 902 }
906 903
907 904
908 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) { 905 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) {
909 // Pseudo code: 906 // Pseudo code:
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
967 Label muladd_loop; 964 Label muladd_loop;
968 __ Bind(&muladd_loop); 965 __ Bind(&muladd_loop);
969 // x: R3 966 // x: R3
970 // mip: R4 967 // mip: R4
971 // ajp: R5 968 // ajp: R5
972 // c: R1 969 // c: R1
973 // n: R6 970 // n: R6
974 // t: R7:R8 (not live at loop entry) 971 // t: R7:R8 (not live at loop entry)
975 972
976 // uint64_t mi = *mip++ 973 // uint64_t mi = *mip++
977 __ ldr(R2, Address(R4, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 974 __ ldr(R2, Address(R4, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
978 975
979 // uint64_t aj = *ajp 976 // uint64_t aj = *ajp
980 __ ldr(R0, Address(R5, 0)); 977 __ ldr(R0, Address(R5, 0));
981 978
982 // uint128_t t = x*mi + aj + c 979 // uint128_t t = x*mi + aj + c
983 __ mul(R7, R2, R3); // R7 = low64(R2*R3). 980 __ mul(R7, R2, R3); // R7 = low64(R2*R3).
984 __ umulh(R8, R2, R3); // R8 = high64(R2*R3), t = R8:R7 = x*mi. 981 __ umulh(R8, R2, R3); // R8 = high64(R2*R3), t = R8:R7 = x*mi.
985 __ adds(R7, R7, Operand(R0)); 982 __ adds(R7, R7, Operand(R0));
986 __ adc(R8, R8, ZR); // t += aj. 983 __ adc(R8, R8, ZR); // t += aj.
987 __ adds(R0, R7, Operand(R1)); // t += c, R0 = low64(t). 984 __ adds(R0, R7, Operand(R1)); // t += c, R0 = low64(t).
988 __ adc(R1, R8, ZR); // c = R1 = high64(t). 985 __ adc(R1, R8, ZR); // c = R1 = high64(t).
989 986
990 // *ajp++ = low64(t) = R0 987 // *ajp++ = low64(t) = R0
991 __ str(R0, Address(R5, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 988 __ str(R0, Address(R5, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
992 989
993 // while (--n > 0) 990 // while (--n > 0)
994 __ subs(R6, R6, Operand(1)); // --n 991 __ subs(R6, R6, Operand(1)); // --n
995 __ b(&muladd_loop, NE); 992 __ b(&muladd_loop, NE);
996 993
997 __ tst(R1, Operand(R1)); 994 __ tst(R1, Operand(R1));
998 __ b(&done, EQ); 995 __ b(&done, EQ);
999 996
1000 // *ajp++ += c 997 // *ajp++ += c
1001 __ ldr(R0, Address(R5, 0)); 998 __ ldr(R0, Address(R5, 0));
1002 __ adds(R0, R0, Operand(R1)); 999 __ adds(R0, R0, Operand(R1));
1003 __ str(R0, Address(R5, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 1000 __ str(R0, Address(R5, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
1004 __ b(&done, CC); 1001 __ b(&done, CC);
1005 1002
1006 Label propagate_carry_loop; 1003 Label propagate_carry_loop;
1007 __ Bind(&propagate_carry_loop); 1004 __ Bind(&propagate_carry_loop);
1008 __ ldr(R0, Address(R5, 0)); 1005 __ ldr(R0, Address(R5, 0));
1009 __ adds(R0, R0, Operand(1)); 1006 __ adds(R0, R0, Operand(1));
1010 __ str(R0, Address(R5, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 1007 __ str(R0, Address(R5, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
1011 __ b(&propagate_carry_loop, CS); 1008 __ b(&propagate_carry_loop, CS);
1012 1009
1013 __ Bind(&done); 1010 __ Bind(&done);
1014 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed. 1011 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed.
1015 __ ret(); 1012 __ ret();
1016 } 1013 }
1017 1014
1018 1015
1019 void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) { 1016 void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) {
1020 // Pseudo code: 1017 // Pseudo code:
(...skipping 23 matching lines...) Expand all
1044 // } 1041 // }
1045 1042
1046 // R4 = xip = &x_digits[i >> 1] 1043 // R4 = xip = &x_digits[i >> 1]
1047 // R2 = i as Smi, R3 = x_digits 1044 // R2 = i as Smi, R3 = x_digits
1048 __ ldp(R2, R3, Address(SP, 2 * kWordSize, Address::PairOffset)); 1045 __ ldp(R2, R3, Address(SP, 2 * kWordSize, Address::PairOffset));
1049 __ add(R3, R3, Operand(R2, LSL, 1)); 1046 __ add(R3, R3, Operand(R2, LSL, 1));
1050 __ add(R4, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); 1047 __ add(R4, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
1051 1048
1052 // R3 = x = *xip++, return if x == 0 1049 // R3 = x = *xip++, return if x == 0
1053 Label x_zero; 1050 Label x_zero;
1054 __ ldr(R3, Address(R4, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 1051 __ ldr(R3, Address(R4, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
1055 __ tst(R3, Operand(R3)); 1052 __ tst(R3, Operand(R3));
1056 __ b(&x_zero, EQ); 1053 __ b(&x_zero, EQ);
1057 1054
1058 // R5 = ajp = &a_digits[i] 1055 // R5 = ajp = &a_digits[i]
1059 __ ldr(R1, Address(SP, 1 * kWordSize)); // a_digits 1056 __ ldr(R1, Address(SP, 1 * kWordSize)); // a_digits
1060 __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi. 1057 __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi.
1061 __ add(R5, R1, Operand(TypedData::data_offset() - kHeapObjectTag)); 1058 __ add(R5, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
1062 1059
1063 // R6:R1 = t = x*x + *ajp 1060 // R6:R1 = t = x*x + *ajp
1064 __ ldr(R0, Address(R5, 0)); 1061 __ ldr(R0, Address(R5, 0));
1065 __ mul(R1, R3, R3); // R1 = low64(R3*R3). 1062 __ mul(R1, R3, R3); // R1 = low64(R3*R3).
1066 __ umulh(R6, R3, R3); // R6 = high64(R3*R3). 1063 __ umulh(R6, R3, R3); // R6 = high64(R3*R3).
1067 __ adds(R1, R1, Operand(R0)); // R6:R1 += *ajp. 1064 __ adds(R1, R1, Operand(R0)); // R6:R1 += *ajp.
1068 __ adc(R6, R6, ZR); // R6 = low64(c) = high64(t). 1065 __ adc(R6, R6, ZR); // R6 = low64(c) = high64(t).
1069 __ mov(R7, ZR); // R7 = high64(c) = 0. 1066 __ mov(R7, ZR); // R7 = high64(c) = 0.
1070 1067
1071 // *ajp++ = low64(t) = R1 1068 // *ajp++ = low64(t) = R1
1072 __ str(R1, Address(R5, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 1069 __ str(R1, Address(R5, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
1073 1070
1074 // int n = (used - i + 1)/2 - 1 1071 // int n = (used - i + 1)/2 - 1
1075 __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi 1072 __ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi
1076 __ sub(R8, R0, Operand(R2)); 1073 __ sub(R8, R0, Operand(R2));
1077 __ add(R8, R8, Operand(2)); 1074 __ add(R8, R8, Operand(2));
1078 __ movn(R0, Immediate(1), 0); // R0 = ~1 = -2. 1075 __ movn(R0, Immediate(1), 0); // R0 = ~1 = -2.
1079 __ adds(R8, R0, Operand(R8, ASR, 2)); // while (--n >= 0) 1076 __ adds(R8, R0, Operand(R8, ASR, 2)); // while (--n >= 0)
1080 1077
1081 Label loop, done; 1078 Label loop, done;
1082 __ b(&done, MI); 1079 __ b(&done, MI);
1083 1080
1084 __ Bind(&loop); 1081 __ Bind(&loop);
1085 // x: R3 1082 // x: R3
1086 // xip: R4 1083 // xip: R4
1087 // ajp: R5 1084 // ajp: R5
1088 // c: R7:R6 1085 // c: R7:R6
1089 // t: R2:R1:R0 (not live at loop entry) 1086 // t: R2:R1:R0 (not live at loop entry)
1090 // n: R8 1087 // n: R8
1091 1088
1092 // uint64_t xi = *xip++ 1089 // uint64_t xi = *xip++
1093 __ ldr(R2, Address(R4, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 1090 __ ldr(R2, Address(R4, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
1094 1091
1095 // uint192_t t = R2:R1:R0 = 2*x*xi + aj + c 1092 // uint192_t t = R2:R1:R0 = 2*x*xi + aj + c
1096 __ mul(R0, R2, R3); // R0 = low64(R2*R3) = low64(x*xi). 1093 __ mul(R0, R2, R3); // R0 = low64(R2*R3) = low64(x*xi).
1097 __ umulh(R1, R2, R3); // R1 = high64(R2*R3) = high64(x*xi). 1094 __ umulh(R1, R2, R3); // R1 = high64(R2*R3) = high64(x*xi).
1098 __ adds(R0, R0, Operand(R0)); 1095 __ adds(R0, R0, Operand(R0));
1099 __ adcs(R1, R1, R1); 1096 __ adcs(R1, R1, R1);
1100 __ adc(R2, ZR, ZR); // R2:R1:R0 = R1:R0 + R1:R0 = 2*x*xi. 1097 __ adc(R2, ZR, ZR); // R2:R1:R0 = R1:R0 + R1:R0 = 2*x*xi.
1101 __ adds(R0, R0, Operand(R6)); 1098 __ adds(R0, R0, Operand(R6));
1102 __ adcs(R1, R1, R7); 1099 __ adcs(R1, R1, R7);
1103 __ adc(R2, R2, ZR); // R2:R1:R0 += c. 1100 __ adc(R2, R2, ZR); // R2:R1:R0 += c.
1104 __ ldr(R7, Address(R5, 0)); // R7 = aj = *ajp. 1101 __ ldr(R7, Address(R5, 0)); // R7 = aj = *ajp.
1105 __ adds(R0, R0, Operand(R7)); 1102 __ adds(R0, R0, Operand(R7));
1106 __ adcs(R6, R1, ZR); 1103 __ adcs(R6, R1, ZR);
1107 __ adc(R7, R2, ZR); // R7:R6:R0 = 2*x*xi + aj + c. 1104 __ adc(R7, R2, ZR); // R7:R6:R0 = 2*x*xi + aj + c.
1108 1105
1109 // *ajp++ = low64(t) = R0 1106 // *ajp++ = low64(t) = R0
1110 __ str(R0, Address(R5, 2*Bigint::kBytesPerDigit, Address::PostIndex)); 1107 __ str(R0, Address(R5, 2 * Bigint::kBytesPerDigit, Address::PostIndex));
1111 1108
1112 // while (--n >= 0) 1109 // while (--n >= 0)
1113 __ subs(R8, R8, Operand(1)); // --n 1110 __ subs(R8, R8, Operand(1)); // --n
1114 __ b(&loop, PL); 1111 __ b(&loop, PL);
1115 1112
1116 __ Bind(&done); 1113 __ Bind(&done);
1117 // uint64_t aj = *ajp 1114 // uint64_t aj = *ajp
1118 __ ldr(R0, Address(R5, 0)); 1115 __ ldr(R0, Address(R5, 0));
1119 1116
1120 // uint128_t t = aj + c 1117 // uint128_t t = aj + c
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
1191 1188
1192 // R0 = qd = (DIGIT_MASK << 32) | DIGIT_MASK = -1 1189 // R0 = qd = (DIGIT_MASK << 32) | DIGIT_MASK = -1
1193 __ movn(R0, Immediate(0), 0); 1190 __ movn(R0, Immediate(0), 0);
1194 1191
1195 // Return qd if dh == yt 1192 // Return qd if dh == yt
1196 Label return_qd; 1193 Label return_qd;
1197 __ cmp(R2, Operand(R3)); 1194 __ cmp(R2, Operand(R3));
1198 __ b(&return_qd, EQ); 1195 __ b(&return_qd, EQ);
1199 1196
1200 // R1 = dl = digits[(i >> 1) - 3 .. (i >> 1) - 2] 1197 // R1 = dl = digits[(i >> 1) - 3 .. (i >> 1) - 2]
1201 __ ldr(R1, 1198 __ ldr(R1, FieldAddress(
1202 FieldAddress(R1, TypedData::data_offset() - 3*Bigint::kBytesPerDigit)); 1199 R1, TypedData::data_offset() - 3 * Bigint::kBytesPerDigit));
1203 1200
1204 // R5 = yth = yt >> 32 1201 // R5 = yth = yt >> 32
1205 __ orr(R5, ZR, Operand(R3, LSR, 32)); 1202 __ orr(R5, ZR, Operand(R3, LSR, 32));
1206 1203
1207 // R6 = qh = dh / yth 1204 // R6 = qh = dh / yth
1208 __ udiv(R6, R2, R5); 1205 __ udiv(R6, R2, R5);
1209 1206
1210 // R8:R7 = ph:pl = yt*qh 1207 // R8:R7 = ph:pl = yt*qh
1211 __ mul(R7, R3, R6); 1208 __ mul(R7, R3, R6);
1212 __ umulh(R8, R3, R6); 1209 __ umulh(R8, R3, R6);
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
1290 1287
1291 // --ql 1288 // --ql
1292 __ sub(R6, R6, Operand(1)); 1289 __ sub(R6, R6, Operand(1));
1293 1290
1294 __ Bind(&ql_ok); 1291 __ Bind(&ql_ok);
1295 // qd |= ql; 1292 // qd |= ql;
1296 __ orr(R0, R0, Operand(R6)); 1293 __ orr(R0, R0, Operand(R6));
1297 1294
1298 __ Bind(&return_qd); 1295 __ Bind(&return_qd);
1299 // args[2..3] = qd 1296 // args[2..3] = qd
1300 __ str(R0, 1297 __ str(R0, FieldAddress(
1301 FieldAddress(R4, TypedData::data_offset() + 2*Bigint::kBytesPerDigit)); 1298 R4, TypedData::data_offset() + 2 * Bigint::kBytesPerDigit));
1302 1299
1303 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed. 1300 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed.
1304 __ ret(); 1301 __ ret();
1305 } 1302 }
1306 1303
1307 1304
1308 void Intrinsifier::Montgomery_mulMod(Assembler* assembler) { 1305 void Intrinsifier::Montgomery_mulMod(Assembler* assembler) {
1309 // Pseudo code: 1306 // Pseudo code:
1310 // static int _mulMod(Uint32List args, Uint32List digits, int i) { 1307 // static int _mulMod(Uint32List args, Uint32List digits, int i) {
1311 // uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3. 1308 // uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3.
1312 // uint64_t d = digits[i >> 1 .. (i >> 1) + 1]; // i is Smi and even. 1309 // uint64_t d = digits[i >> 1 .. (i >> 1) + 1]; // i is Smi and even.
1313 // uint128_t t = rho*d; 1310 // uint128_t t = rho*d;
1314 // args[_MU .. _MU_HI] = t mod DIGIT_BASE^2; // _MU == 4, _MU_HI == 5. 1311 // args[_MU .. _MU_HI] = t mod DIGIT_BASE^2; // _MU == 4, _MU_HI == 5.
1315 // return 2; 1312 // return 2;
1316 // } 1313 // }
1317 1314
1318 // R4 = args 1315 // R4 = args
1319 __ ldr(R4, Address(SP, 2 * kWordSize)); // args 1316 __ ldr(R4, Address(SP, 2 * kWordSize)); // args
1320 1317
1321 // R3 = rho = args[2..3] 1318 // R3 = rho = args[2..3]
1322 __ ldr(R3, 1319 __ ldr(R3, FieldAddress(
1323 FieldAddress(R4, TypedData::data_offset() + 2*Bigint::kBytesPerDigit)); 1320 R4, TypedData::data_offset() + 2 * Bigint::kBytesPerDigit));
1324 1321
1325 // R2 = digits[i >> 1 .. (i >> 1) + 1] 1322 // R2 = digits[i >> 1 .. (i >> 1) + 1]
1326 // R0 = i as Smi, R1 = digits 1323 // R0 = i as Smi, R1 = digits
1327 __ ldp(R0, R1, Address(SP, 0 * kWordSize, Address::PairOffset)); 1324 __ ldp(R0, R1, Address(SP, 0 * kWordSize, Address::PairOffset));
1328 __ add(R1, R1, Operand(R0, LSL, 1)); 1325 __ add(R1, R1, Operand(R0, LSL, 1));
1329 __ ldr(R2, FieldAddress(R1, TypedData::data_offset())); 1326 __ ldr(R2, FieldAddress(R1, TypedData::data_offset()));
1330 1327
1331 // R0 = rho*d mod DIGIT_BASE 1328 // R0 = rho*d mod DIGIT_BASE
1332 __ mul(R0, R2, R3); // R0 = low64(R2*R3). 1329 __ mul(R0, R2, R3); // R0 = low64(R2*R3).
1333 1330
1334 // args[4 .. 5] = R0 1331 // args[4 .. 5] = R0
1335 __ str(R0, 1332 __ str(R0, FieldAddress(
1336 FieldAddress(R4, TypedData::data_offset() + 4*Bigint::kBytesPerDigit)); 1333 R4, TypedData::data_offset() + 4 * Bigint::kBytesPerDigit));
1337 1334
1338 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed. 1335 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed.
1339 __ ret(); 1336 __ ret();
1340 } 1337 }
1341 1338
1342 1339
1343 // Check if the last argument is a double, jump to label 'is_smi' if smi 1340 // Check if the last argument is a double, jump to label 'is_smi' if smi
1344 // (easy to convert to double), otherwise jump to label 'not_double_smi', 1341 // (easy to convert to double), otherwise jump to label 'not_double_smi',
1345 // Returns the last argument in R0. 1342 // Returns the last argument in R0.
1346 static void TestLastArgumentIsDouble(Assembler* assembler, 1343 static void TestLastArgumentIsDouble(Assembler* assembler,
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
1418 static void DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) { 1415 static void DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) {
1419 Label fall_through, is_smi, double_op; 1416 Label fall_through, is_smi, double_op;
1420 1417
1421 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); 1418 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through);
1422 // Both arguments are double, right operand is in R0. 1419 // Both arguments are double, right operand is in R0.
1423 __ LoadDFieldFromOffset(V1, R0, Double::value_offset()); 1420 __ LoadDFieldFromOffset(V1, R0, Double::value_offset());
1424 __ Bind(&double_op); 1421 __ Bind(&double_op);
1425 __ ldr(R0, Address(SP, 1 * kWordSize)); // Left argument. 1422 __ ldr(R0, Address(SP, 1 * kWordSize)); // Left argument.
1426 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); 1423 __ LoadDFieldFromOffset(V0, R0, Double::value_offset());
1427 switch (kind) { 1424 switch (kind) {
1428 case Token::kADD: __ faddd(V0, V0, V1); break; 1425 case Token::kADD:
1429 case Token::kSUB: __ fsubd(V0, V0, V1); break; 1426 __ faddd(V0, V0, V1);
1430 case Token::kMUL: __ fmuld(V0, V0, V1); break; 1427 break;
1431 case Token::kDIV: __ fdivd(V0, V0, V1); break; 1428 case Token::kSUB:
1432 default: UNREACHABLE(); 1429 __ fsubd(V0, V0, V1);
1430 break;
1431 case Token::kMUL:
1432 __ fmuld(V0, V0, V1);
1433 break;
1434 case Token::kDIV:
1435 __ fdivd(V0, V0, V1);
1436 break;
1437 default:
1438 UNREACHABLE();
1433 } 1439 }
1434 const Class& double_class = Class::Handle( 1440 const Class& double_class =
1435 Isolate::Current()->object_store()->double_class()); 1441 Class::Handle(Isolate::Current()->object_store()->double_class());
1436 __ TryAllocate(double_class, &fall_through, R0, R1); 1442 __ TryAllocate(double_class, &fall_through, R0, R1);
1437 __ StoreDFieldToOffset(V0, R0, Double::value_offset()); 1443 __ StoreDFieldToOffset(V0, R0, Double::value_offset());
1438 __ ret(); 1444 __ ret();
1439 1445
1440 __ Bind(&is_smi); // Convert R0 to a double. 1446 __ Bind(&is_smi); // Convert R0 to a double.
1441 __ SmiUntag(R0); 1447 __ SmiUntag(R0);
1442 __ scvtfdx(V1, R0); 1448 __ scvtfdx(V1, R0);
1443 __ b(&double_op); 1449 __ b(&double_op);
1444 1450
1445 __ Bind(&fall_through); 1451 __ Bind(&fall_through);
(...skipping 26 matching lines...) Expand all
1472 // Only smis allowed. 1478 // Only smis allowed.
1473 __ ldr(R0, Address(SP, 0 * kWordSize)); 1479 __ ldr(R0, Address(SP, 0 * kWordSize));
1474 __ tsti(R0, Immediate(kSmiTagMask)); 1480 __ tsti(R0, Immediate(kSmiTagMask));
1475 __ b(&fall_through, NE); 1481 __ b(&fall_through, NE);
1476 // Is Smi. 1482 // Is Smi.
1477 __ SmiUntag(R0); 1483 __ SmiUntag(R0);
1478 __ scvtfdx(V1, R0); 1484 __ scvtfdx(V1, R0);
1479 __ ldr(R0, Address(SP, 1 * kWordSize)); 1485 __ ldr(R0, Address(SP, 1 * kWordSize));
1480 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); 1486 __ LoadDFieldFromOffset(V0, R0, Double::value_offset());
1481 __ fmuld(V0, V0, V1); 1487 __ fmuld(V0, V0, V1);
1482 const Class& double_class = Class::Handle( 1488 const Class& double_class =
1483 Isolate::Current()->object_store()->double_class()); 1489 Class::Handle(Isolate::Current()->object_store()->double_class());
1484 __ TryAllocate(double_class, &fall_through, R0, R1); 1490 __ TryAllocate(double_class, &fall_through, R0, R1);
1485 __ StoreDFieldToOffset(V0, R0, Double::value_offset()); 1491 __ StoreDFieldToOffset(V0, R0, Double::value_offset());
1486 __ ret(); 1492 __ ret();
1487 __ Bind(&fall_through); 1493 __ Bind(&fall_through);
1488 } 1494 }
1489 1495
1490 1496
1491 void Intrinsifier::DoubleFromInteger(Assembler* assembler) { 1497 void Intrinsifier::DoubleFromInteger(Assembler* assembler) {
1492 Label fall_through; 1498 Label fall_through;
1493 1499
1494 __ ldr(R0, Address(SP, 0 * kWordSize)); 1500 __ ldr(R0, Address(SP, 0 * kWordSize));
1495 __ tsti(R0, Immediate(kSmiTagMask)); 1501 __ tsti(R0, Immediate(kSmiTagMask));
1496 __ b(&fall_through, NE); 1502 __ b(&fall_through, NE);
1497 // Is Smi. 1503 // Is Smi.
1498 __ SmiUntag(R0); 1504 __ SmiUntag(R0);
1499 __ scvtfdx(V0, R0); 1505 __ scvtfdx(V0, R0);
1500 const Class& double_class = Class::Handle( 1506 const Class& double_class =
1501 Isolate::Current()->object_store()->double_class()); 1507 Class::Handle(Isolate::Current()->object_store()->double_class());
1502 __ TryAllocate(double_class, &fall_through, R0, R1); 1508 __ TryAllocate(double_class, &fall_through, R0, R1);
1503 __ StoreDFieldToOffset(V0, R0, Double::value_offset()); 1509 __ StoreDFieldToOffset(V0, R0, Double::value_offset());
1504 __ ret(); 1510 __ ret();
1505 __ Bind(&fall_through); 1511 __ Bind(&fall_through);
1506 } 1512 }
1507 1513
1508 1514
1509 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { 1515 void Intrinsifier::Double_getIsNaN(Assembler* assembler) {
1510 __ ldr(R0, Address(SP, 0 * kWordSize)); 1516 __ ldr(R0, Address(SP, 0 * kWordSize));
1511 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); 1517 __ LoadDFieldFromOffset(V0, R0, Double::value_offset());
(...skipping 23 matching lines...) Expand all
1535 const Register false_reg = R0; 1541 const Register false_reg = R0;
1536 const Register true_reg = R2; 1542 const Register true_reg = R2;
1537 Label is_false, is_true, is_zero; 1543 Label is_false, is_true, is_zero;
1538 1544
1539 __ ldr(R0, Address(SP, 0 * kWordSize)); 1545 __ ldr(R0, Address(SP, 0 * kWordSize));
1540 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); 1546 __ LoadDFieldFromOffset(V0, R0, Double::value_offset());
1541 __ fcmpdz(V0); 1547 __ fcmpdz(V0);
1542 __ LoadObject(true_reg, Bool::True()); 1548 __ LoadObject(true_reg, Bool::True());
1543 __ LoadObject(false_reg, Bool::False()); 1549 __ LoadObject(false_reg, Bool::False());
1544 __ b(&is_false, VS); // NaN -> false. 1550 __ b(&is_false, VS); // NaN -> false.
1545 __ b(&is_zero, EQ); // Check for negative zero. 1551 __ b(&is_zero, EQ); // Check for negative zero.
1546 __ b(&is_false, CS); // >= 0 -> false. 1552 __ b(&is_false, CS); // >= 0 -> false.
1547 1553
1548 __ Bind(&is_true); 1554 __ Bind(&is_true);
1549 __ mov(R0, true_reg); 1555 __ mov(R0, true_reg);
1550 1556
1551 __ Bind(&is_false); 1557 __ Bind(&is_false);
1552 __ ret(); 1558 __ ret();
1553 1559
1554 __ Bind(&is_zero); 1560 __ Bind(&is_zero);
1555 // Check for negative zero by looking at the sign bit. 1561 // Check for negative zero by looking at the sign bit.
(...skipping 27 matching lines...) Expand all
1583 } 1589 }
1584 1590
1585 1591
1586 void Intrinsifier::MathSqrt(Assembler* assembler) { 1592 void Intrinsifier::MathSqrt(Assembler* assembler) {
1587 Label fall_through, is_smi, double_op; 1593 Label fall_through, is_smi, double_op;
1588 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); 1594 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through);
1589 // Argument is double and is in R0. 1595 // Argument is double and is in R0.
1590 __ LoadDFieldFromOffset(V1, R0, Double::value_offset()); 1596 __ LoadDFieldFromOffset(V1, R0, Double::value_offset());
1591 __ Bind(&double_op); 1597 __ Bind(&double_op);
1592 __ fsqrtd(V0, V1); 1598 __ fsqrtd(V0, V1);
1593 const Class& double_class = Class::Handle( 1599 const Class& double_class =
1594 Isolate::Current()->object_store()->double_class()); 1600 Class::Handle(Isolate::Current()->object_store()->double_class());
1595 __ TryAllocate(double_class, &fall_through, R0, R1); 1601 __ TryAllocate(double_class, &fall_through, R0, R1);
1596 __ StoreDFieldToOffset(V0, R0, Double::value_offset()); 1602 __ StoreDFieldToOffset(V0, R0, Double::value_offset());
1597 __ ret(); 1603 __ ret();
1598 __ Bind(&is_smi); 1604 __ Bind(&is_smi);
1599 __ SmiUntag(R0); 1605 __ SmiUntag(R0);
1600 __ scvtfdx(V1, R0); 1606 __ scvtfdx(V1, R0);
1601 __ b(&double_op); 1607 __ b(&double_op);
1602 __ Bind(&fall_through); 1608 __ Bind(&fall_through);
1603 } 1609 }
1604 1610
1605 1611
1606 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64; 1612 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64;
1607 // _state[kSTATE_LO] = state & _MASK_32; 1613 // _state[kSTATE_LO] = state & _MASK_32;
1608 // _state[kSTATE_HI] = state >> 32; 1614 // _state[kSTATE_HI] = state >> 32;
1609 void Intrinsifier::Random_nextState(Assembler* assembler) { 1615 void Intrinsifier::Random_nextState(Assembler* assembler) {
1610 const Library& math_lib = Library::Handle(Library::MathLibrary()); 1616 const Library& math_lib = Library::Handle(Library::MathLibrary());
1611 ASSERT(!math_lib.IsNull()); 1617 ASSERT(!math_lib.IsNull());
1612 const Class& random_class = Class::Handle( 1618 const Class& random_class =
1613 math_lib.LookupClassAllowPrivate(Symbols::_Random())); 1619 Class::Handle(math_lib.LookupClassAllowPrivate(Symbols::_Random()));
1614 ASSERT(!random_class.IsNull()); 1620 ASSERT(!random_class.IsNull());
1615 const Field& state_field = Field::ZoneHandle( 1621 const Field& state_field = Field::ZoneHandle(
1616 random_class.LookupInstanceFieldAllowPrivate(Symbols::_state())); 1622 random_class.LookupInstanceFieldAllowPrivate(Symbols::_state()));
1617 ASSERT(!state_field.IsNull()); 1623 ASSERT(!state_field.IsNull());
1618 const Field& random_A_field = Field::ZoneHandle( 1624 const Field& random_A_field = Field::ZoneHandle(
1619 random_class.LookupStaticFieldAllowPrivate(Symbols::_A())); 1625 random_class.LookupStaticFieldAllowPrivate(Symbols::_A()));
1620 ASSERT(!random_A_field.IsNull()); 1626 ASSERT(!random_A_field.IsNull());
1621 ASSERT(random_A_field.is_const()); 1627 ASSERT(random_A_field.is_const());
1622 const Instance& a_value = Instance::Handle(random_A_field.StaticValue()); 1628 const Instance& a_value = Instance::Handle(random_A_field.StaticValue());
1623 const int64_t a_int_value = Integer::Cast(a_value).AsInt64Value(); 1629 const int64_t a_int_value = Integer::Cast(a_value).AsInt64Value();
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
1680 1686
1681 static void JumpIfNotInteger(Assembler* assembler, 1687 static void JumpIfNotInteger(Assembler* assembler,
1682 Register cid, 1688 Register cid,
1683 Register tmp, 1689 Register tmp,
1684 Label* target) { 1690 Label* target) {
1685 RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfNotInRange, target); 1691 RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfNotInRange, target);
1686 } 1692 }
1687 1693
1688 1694
1689 static void JumpIfString(Assembler* assembler, 1695 static void JumpIfString(Assembler* assembler,
1690 Register cid, 1696 Register cid,
1691 Register tmp, 1697 Register tmp,
1692 Label* target) { 1698 Label* target) {
1693 RangeCheck(assembler, 1699 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid,
1694 cid, 1700 kIfInRange, target);
1695 tmp,
1696 kOneByteStringCid,
1697 kExternalTwoByteStringCid,
1698 kIfInRange,
1699 target);
1700 } 1701 }
1701 1702
1702 1703
1703 static void JumpIfNotString(Assembler* assembler, 1704 static void JumpIfNotString(Assembler* assembler,
1704 Register cid, 1705 Register cid,
1705 Register tmp, 1706 Register tmp,
1706 Label* target) { 1707 Label* target) {
1707 RangeCheck(assembler, 1708 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid,
1708 cid, 1709 kIfNotInRange, target);
1709 tmp,
1710 kOneByteStringCid,
1711 kExternalTwoByteStringCid,
1712 kIfNotInRange,
1713 target);
1714 } 1710 }
1715 1711
1716 1712
1717 // Return type quickly for simple types (not parameterized and not signature). 1713 // Return type quickly for simple types (not parameterized and not signature).
1718 void Intrinsifier::ObjectRuntimeType(Assembler* assembler) { 1714 void Intrinsifier::ObjectRuntimeType(Assembler* assembler) {
1719 Label fall_through, use_canonical_type, not_double, not_integer; 1715 Label fall_through, use_canonical_type, not_double, not_integer;
1720 __ ldr(R0, Address(SP, 0 * kWordSize)); 1716 __ ldr(R0, Address(SP, 0 * kWordSize));
1721 __ LoadClassIdMayBeSmi(R1, R0); 1717 __ LoadClassIdMayBeSmi(R1, R0);
1722 1718
1723 __ CompareImmediate(R1, kClosureCid); 1719 __ CompareImmediate(R1, kClosureCid);
(...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after
1908 1904
1909 __ tsti(R1, Immediate(kSmiTagMask)); 1905 __ tsti(R1, Immediate(kSmiTagMask));
1910 __ b(&fall_through, NE); // 'start' is not a Smi. 1906 __ b(&fall_through, NE); // 'start' is not a Smi.
1911 1907
1912 __ CompareClassId(R2, kOneByteStringCid); 1908 __ CompareClassId(R2, kOneByteStringCid);
1913 __ b(&fall_through, NE); 1909 __ b(&fall_through, NE);
1914 1910
1915 __ CompareClassId(R0, kOneByteStringCid); 1911 __ CompareClassId(R0, kOneByteStringCid);
1916 __ b(&fall_through, NE); 1912 __ b(&fall_through, NE);
1917 1913
1918 GenerateSubstringMatchesSpecialization(assembler, 1914 GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid,
1919 kOneByteStringCid, 1915 kOneByteStringCid, &return_true,
1920 kOneByteStringCid,
1921 &return_true,
1922 &return_false); 1916 &return_false);
1923 1917
1924 __ Bind(&try_two_byte); 1918 __ Bind(&try_two_byte);
1925 __ CompareClassId(R0, kTwoByteStringCid); 1919 __ CompareClassId(R0, kTwoByteStringCid);
1926 __ b(&fall_through, NE); 1920 __ b(&fall_through, NE);
1927 1921
1928 GenerateSubstringMatchesSpecialization(assembler, 1922 GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid,
1929 kTwoByteStringCid, 1923 kOneByteStringCid, &return_true,
1930 kOneByteStringCid,
1931 &return_true,
1932 &return_false); 1924 &return_false);
1933 1925
1934 __ Bind(&return_true); 1926 __ Bind(&return_true);
1935 __ LoadObject(R0, Bool::True()); 1927 __ LoadObject(R0, Bool::True());
1936 __ ret(); 1928 __ ret();
1937 1929
1938 __ Bind(&return_false); 1930 __ Bind(&return_false);
1939 __ LoadObject(R0, Bool::False()); 1931 __ LoadObject(R0, Bool::False());
1940 __ ret(); 1932 __ ret();
1941 1933
(...skipping 14 matching lines...) Expand all
1956 __ b(&fall_through, CS); // Runtime throws exception. 1948 __ b(&fall_through, CS); // Runtime throws exception.
1957 1949
1958 __ CompareClassId(R0, kOneByteStringCid); 1950 __ CompareClassId(R0, kOneByteStringCid);
1959 __ b(&try_two_byte_string, NE); 1951 __ b(&try_two_byte_string, NE);
1960 __ SmiUntag(R1); 1952 __ SmiUntag(R1);
1961 __ AddImmediate(R0, R0, OneByteString::data_offset() - kHeapObjectTag); 1953 __ AddImmediate(R0, R0, OneByteString::data_offset() - kHeapObjectTag);
1962 __ ldr(R1, Address(R0, R1), kUnsignedByte); 1954 __ ldr(R1, Address(R0, R1), kUnsignedByte);
1963 __ CompareImmediate(R1, Symbols::kNumberOfOneCharCodeSymbols); 1955 __ CompareImmediate(R1, Symbols::kNumberOfOneCharCodeSymbols);
1964 __ b(&fall_through, GE); 1956 __ b(&fall_through, GE);
1965 __ ldr(R0, Address(THR, Thread::predefined_symbols_address_offset())); 1957 __ ldr(R0, Address(THR, Thread::predefined_symbols_address_offset()));
1966 __ AddImmediate( 1958 __ AddImmediate(R0, R0, Symbols::kNullCharCodeSymbolOffset * kWordSize);
1967 R0, R0, Symbols::kNullCharCodeSymbolOffset * kWordSize);
1968 __ ldr(R0, Address(R0, R1, UXTX, Address::Scaled)); 1959 __ ldr(R0, Address(R0, R1, UXTX, Address::Scaled));
1969 __ ret(); 1960 __ ret();
1970 1961
1971 __ Bind(&try_two_byte_string); 1962 __ Bind(&try_two_byte_string);
1972 __ CompareClassId(R0, kTwoByteStringCid); 1963 __ CompareClassId(R0, kTwoByteStringCid);
1973 __ b(&fall_through, NE); 1964 __ b(&fall_through, NE);
1974 ASSERT(kSmiTagShift == 1); 1965 ASSERT(kSmiTagShift == 1);
1975 __ AddImmediate(R0, R0, TwoByteString::data_offset() - kHeapObjectTag); 1966 __ AddImmediate(R0, R0, TwoByteString::data_offset() - kHeapObjectTag);
1976 __ ldr(R1, Address(R0, R1), kUnsignedHalfword); 1967 __ ldr(R1, Address(R0, R1), kUnsignedHalfword);
1977 __ CompareImmediate(R1, Symbols::kNumberOfOneCharCodeSymbols); 1968 __ CompareImmediate(R1, Symbols::kNumberOfOneCharCodeSymbols);
1978 __ b(&fall_through, GE); 1969 __ b(&fall_through, GE);
1979 __ ldr(R0, Address(THR, Thread::predefined_symbols_address_offset())); 1970 __ ldr(R0, Address(THR, Thread::predefined_symbols_address_offset()));
1980 __ AddImmediate( 1971 __ AddImmediate(R0, R0, Symbols::kNullCharCodeSymbolOffset * kWordSize);
1981 R0, R0, Symbols::kNullCharCodeSymbolOffset * kWordSize);
1982 __ ldr(R0, Address(R0, R1, UXTX, Address::Scaled)); 1972 __ ldr(R0, Address(R0, R1, UXTX, Address::Scaled));
1983 __ ret(); 1973 __ ret();
1984 1974
1985 __ Bind(&fall_through); 1975 __ Bind(&fall_through);
1986 } 1976 }
1987 1977
1988 1978
1989 void Intrinsifier::StringBaseIsEmpty(Assembler* assembler) { 1979 void Intrinsifier::StringBaseIsEmpty(Assembler* assembler) {
1990 __ ldr(R0, Address(SP, 0 * kWordSize)); 1980 __ ldr(R0, Address(SP, 0 * kWordSize));
1991 __ ldr(R0, FieldAddress(R0, String::length_offset())); 1981 __ ldr(R0, FieldAddress(R0, String::length_offset()));
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
2039 __ b(&loop, NE); 2029 __ b(&loop, NE);
2040 2030
2041 // Finalize. 2031 // Finalize.
2042 // hash_ += hash_ << 3; 2032 // hash_ += hash_ << 3;
2043 // hash_ ^= hash_ >> 11; 2033 // hash_ ^= hash_ >> 11;
2044 // hash_ += hash_ << 15; 2034 // hash_ += hash_ << 15;
2045 __ addw(R0, R0, Operand(R0, LSL, 3)); 2035 __ addw(R0, R0, Operand(R0, LSL, 3));
2046 __ eorw(R0, R0, Operand(R0, LSR, 11)); 2036 __ eorw(R0, R0, Operand(R0, LSR, 11));
2047 __ addw(R0, R0, Operand(R0, LSL, 15)); 2037 __ addw(R0, R0, Operand(R0, LSL, 15));
2048 // hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1); 2038 // hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1);
2049 __ AndImmediate( 2039 __ AndImmediate(R0, R0, (static_cast<intptr_t>(1) << String::kHashBits) - 1);
2050 R0, R0, (static_cast<intptr_t>(1) << String::kHashBits) - 1);
2051 __ CompareRegisters(R0, ZR); 2040 __ CompareRegisters(R0, ZR);
2052 // return hash_ == 0 ? 1 : hash_; 2041 // return hash_ == 0 ? 1 : hash_;
2053 __ Bind(&done); 2042 __ Bind(&done);
2054 __ csinc(R0, R0, ZR, NE); // R0 <- (R0 != 0) ? R0 : (ZR + 1). 2043 __ csinc(R0, R0, ZR, NE); // R0 <- (R0 != 0) ? R0 : (ZR + 1).
2055 __ SmiTag(R0); 2044 __ SmiTag(R0);
2056 __ str(R0, FieldAddress(R1, String::hash_offset())); 2045 __ str(R0, FieldAddress(R1, String::hash_offset()));
2057 __ ret(); 2046 __ ret();
2058 } 2047 }
2059 2048
2060 2049
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
2111 __ csel(R2, R2, ZR, LS); 2100 __ csel(R2, R2, ZR, LS);
2112 2101
2113 // Get the class index and insert it into the tags. 2102 // Get the class index and insert it into the tags.
2114 // R2: size and bit tags. 2103 // R2: size and bit tags.
2115 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); 2104 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid));
2116 __ orr(R2, R2, Operand(TMP)); 2105 __ orr(R2, R2, Operand(TMP));
2117 __ str(R2, FieldAddress(R0, String::tags_offset())); // Store tags. 2106 __ str(R2, FieldAddress(R0, String::tags_offset())); // Store tags.
2118 } 2107 }
2119 2108
2120 // Set the length field using the saved length (R6). 2109 // Set the length field using the saved length (R6).
2121 __ StoreIntoObjectNoBarrier(R0, 2110 __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, String::length_offset()),
2122 FieldAddress(R0, String::length_offset()),
2123 R6); 2111 R6);
2124 // Clear hash. 2112 // Clear hash.
2125 __ mov(TMP, ZR); 2113 __ mov(TMP, ZR);
2126 __ str(TMP, FieldAddress(R0, String::hash_offset())); 2114 __ str(TMP, FieldAddress(R0, String::hash_offset()));
2127 __ b(ok); 2115 __ b(ok);
2128 2116
2129 __ Bind(&fail); 2117 __ Bind(&fail);
2130 __ b(failure); 2118 __ b(failure);
2131 } 2119 }
2132 2120
2133 2121
2134 // Arg0: OneByteString (receiver). 2122 // Arg0: OneByteString (receiver).
2135 // Arg1: Start index as Smi. 2123 // Arg1: Start index as Smi.
2136 // Arg2: End index as Smi. 2124 // Arg2: End index as Smi.
2137 // The indexes must be valid. 2125 // The indexes must be valid.
2138 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { 2126 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) {
2139 const intptr_t kStringOffset = 2 * kWordSize; 2127 const intptr_t kStringOffset = 2 * kWordSize;
2140 const intptr_t kStartIndexOffset = 1 * kWordSize; 2128 const intptr_t kStartIndexOffset = 1 * kWordSize;
2141 const intptr_t kEndIndexOffset = 0 * kWordSize; 2129 const intptr_t kEndIndexOffset = 0 * kWordSize;
2142 Label fall_through, ok; 2130 Label fall_through, ok;
2143 2131
2144 __ ldr(R2, Address(SP, kEndIndexOffset)); 2132 __ ldr(R2, Address(SP, kEndIndexOffset));
2145 __ ldr(TMP, Address(SP, kStartIndexOffset)); 2133 __ ldr(TMP, Address(SP, kStartIndexOffset));
2146 __ orr(R3, R2, Operand(TMP)); 2134 __ orr(R3, R2, Operand(TMP));
2147 __ tsti(R3, Immediate(kSmiTagMask)); 2135 __ tsti(R3, Immediate(kSmiTagMask));
2148 __ b(&fall_through, NE); // 'start', 'end' not Smi. 2136 __ b(&fall_through, NE); // 'start', 'end' not Smi.
2149 2137
2150 __ sub(R2, R2, Operand(TMP)); 2138 __ sub(R2, R2, Operand(TMP));
2151 TryAllocateOnebyteString(assembler, &ok, &fall_through); 2139 TryAllocateOnebyteString(assembler, &ok, &fall_through);
2152 __ Bind(&ok); 2140 __ Bind(&ok);
2153 // R0: new string as tagged pointer. 2141 // R0: new string as tagged pointer.
2154 // Copy string. 2142 // Copy string.
2155 __ ldr(R3, Address(SP, kStringOffset)); 2143 __ ldr(R3, Address(SP, kStringOffset));
2156 __ ldr(R1, Address(SP, kStartIndexOffset)); 2144 __ ldr(R1, Address(SP, kStartIndexOffset));
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
2235 // Have same length? 2223 // Have same length?
2236 __ ldr(R2, FieldAddress(R0, String::length_offset())); 2224 __ ldr(R2, FieldAddress(R0, String::length_offset()));
2237 __ ldr(R3, FieldAddress(R1, String::length_offset())); 2225 __ ldr(R3, FieldAddress(R1, String::length_offset()));
2238 __ cmp(R2, Operand(R3)); 2226 __ cmp(R2, Operand(R3));
2239 __ b(&is_false, NE); 2227 __ b(&is_false, NE);
2240 2228
2241 // Check contents, no fall-through possible. 2229 // Check contents, no fall-through possible.
2242 // TODO(zra): try out other sequences. 2230 // TODO(zra): try out other sequences.
2243 ASSERT((string_cid == kOneByteStringCid) || 2231 ASSERT((string_cid == kOneByteStringCid) ||
2244 (string_cid == kTwoByteStringCid)); 2232 (string_cid == kTwoByteStringCid));
2245 const intptr_t offset = (string_cid == kOneByteStringCid) ? 2233 const intptr_t offset = (string_cid == kOneByteStringCid)
2246 OneByteString::data_offset() : TwoByteString::data_offset(); 2234 ? OneByteString::data_offset()
2235 : TwoByteString::data_offset();
2247 __ AddImmediate(R0, R0, offset - kHeapObjectTag); 2236 __ AddImmediate(R0, R0, offset - kHeapObjectTag);
2248 __ AddImmediate(R1, R1, offset - kHeapObjectTag); 2237 __ AddImmediate(R1, R1, offset - kHeapObjectTag);
2249 __ SmiUntag(R2); 2238 __ SmiUntag(R2);
2250 __ Bind(&loop); 2239 __ Bind(&loop);
2251 __ AddImmediate(R2, R2, -1); 2240 __ AddImmediate(R2, R2, -1);
2252 __ CompareRegisters(R2, ZR); 2241 __ CompareRegisters(R2, ZR);
2253 __ b(&is_true, LT); 2242 __ b(&is_true, LT);
2254 if (string_cid == kOneByteStringCid) { 2243 if (string_cid == kOneByteStringCid) {
2255 __ ldr(R3, Address(R0), kUnsignedByte); 2244 __ ldr(R3, Address(R0), kUnsignedByte);
2256 __ ldr(R4, Address(R1), kUnsignedByte); 2245 __ ldr(R4, Address(R1), kUnsignedByte);
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
2322 } 2311 }
2323 2312
2324 2313
2325 // On stack: user tag (+0). 2314 // On stack: user tag (+0).
2326 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { 2315 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) {
2327 // R1: Isolate. 2316 // R1: Isolate.
2328 __ LoadIsolate(R1); 2317 __ LoadIsolate(R1);
2329 // R0: Current user tag. 2318 // R0: Current user tag.
2330 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); 2319 __ ldr(R0, Address(R1, Isolate::current_tag_offset()));
2331 // R2: UserTag. 2320 // R2: UserTag.
2332 __ ldr(R2, Address(SP, + 0 * kWordSize)); 2321 __ ldr(R2, Address(SP, +0 * kWordSize));
2333 // Set Isolate::current_tag_. 2322 // Set Isolate::current_tag_.
2334 __ str(R2, Address(R1, Isolate::current_tag_offset())); 2323 __ str(R2, Address(R1, Isolate::current_tag_offset()));
2335 // R2: UserTag's tag. 2324 // R2: UserTag's tag.
2336 __ ldr(R2, FieldAddress(R2, UserTag::tag_offset())); 2325 __ ldr(R2, FieldAddress(R2, UserTag::tag_offset()));
2337 // Set Isolate::user_tag_. 2326 // Set Isolate::user_tag_.
2338 __ str(R2, Address(R1, Isolate::user_tag_offset())); 2327 __ str(R2, Address(R1, Isolate::user_tag_offset()));
2339 __ ret(); 2328 __ ret();
2340 } 2329 }
2341 2330
2342 2331
(...skipping 24 matching lines...) Expand all
2367 __ cmp(R0, Operand(0)); 2356 __ cmp(R0, Operand(0));
2368 __ LoadObject(R0, Bool::False()); 2357 __ LoadObject(R0, Bool::False());
2369 __ LoadObject(TMP, Bool::True()); 2358 __ LoadObject(TMP, Bool::True());
2370 __ csel(R0, TMP, R0, NE); 2359 __ csel(R0, TMP, R0, NE);
2371 __ ret(); 2360 __ ret();
2372 } 2361 }
2373 2362
2374 } // namespace dart 2363 } // namespace dart
2375 2364
2376 #endif // defined TARGET_ARCH_ARM64 2365 #endif // defined TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « runtime/vm/intrinsifier_arm.cc ('k') | runtime/vm/intrinsifier_dbc.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698