| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | |
| 2 // for details. All rights reserved. Use of this source code is governed by a | |
| 3 // BSD-style license that can be found in the LICENSE file. | |
| 4 | |
| 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. | |
| 6 #if defined(TARGET_ARCH_MIPS) | |
| 7 | |
| 8 #include "vm/intrinsifier.h" | |
| 9 | |
| 10 #include "vm/assembler.h" | |
| 11 #include "vm/dart_entry.h" | |
| 12 #include "vm/flow_graph_compiler.h" | |
| 13 #include "vm/object.h" | |
| 14 #include "vm/object_store.h" | |
| 15 #include "vm/regexp_assembler.h" | |
| 16 #include "vm/symbols.h" | |
| 17 #include "vm/timeline.h" | |
| 18 | |
| 19 namespace dart { | |
| 20 | |
| 21 // When entering intrinsics code: | |
| 22 // S4: Arguments descriptor | |
| 23 // RA: Return address | |
| 24 // The S4 register can be destroyed only if there is no slow-path, i.e. | |
| 25 // if the intrinsified method always executes a return. | |
| 26 // The FP register should not be modified, because it is used by the profiler. | |
| 27 // The PP and THR registers (see constants_mips.h) must be preserved. | |
| 28 | |
| 29 #define __ assembler-> | |
| 30 | |
| 31 | |
| 32 intptr_t Intrinsifier::ParameterSlotFromSp() { | |
| 33 return -1; | |
| 34 } | |
| 35 | |
| 36 | |
| 37 static bool IsABIPreservedRegister(Register reg) { | |
| 38 return ((1 << reg) & kAbiPreservedCpuRegs) != 0; | |
| 39 } | |
| 40 | |
| 41 void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) { | |
| 42 ASSERT(IsABIPreservedRegister(CODE_REG)); | |
| 43 ASSERT(IsABIPreservedRegister(ARGS_DESC_REG)); | |
| 44 ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP)); | |
| 45 ASSERT(CALLEE_SAVED_TEMP != CODE_REG); | |
| 46 ASSERT(CALLEE_SAVED_TEMP != ARGS_DESC_REG); | |
| 47 | |
| 48 assembler->Comment("IntrinsicCallPrologue"); | |
| 49 assembler->mov(CALLEE_SAVED_TEMP, LRREG); | |
| 50 } | |
| 51 | |
| 52 | |
| 53 void Intrinsifier::IntrinsicCallEpilogue(Assembler* assembler) { | |
| 54 assembler->Comment("IntrinsicCallEpilogue"); | |
| 55 assembler->mov(LRREG, CALLEE_SAVED_TEMP); | |
| 56 } | |
| 57 | |
| 58 | |
| 59 // Intrinsify only for Smi value and index. Non-smi values need a store buffer | |
| 60 // update. Array length is always a Smi. | |
| 61 void Intrinsifier::ObjectArraySetIndexed(Assembler* assembler) { | |
| 62 if (Isolate::Current()->type_checks()) { | |
| 63 return; | |
| 64 } | |
| 65 | |
| 66 Label fall_through; | |
| 67 __ lw(T1, Address(SP, 1 * kWordSize)); // Index. | |
| 68 __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); | |
| 69 // Index not Smi. | |
| 70 __ bne(CMPRES1, ZR, &fall_through); | |
| 71 | |
| 72 __ lw(T0, Address(SP, 2 * kWordSize)); // Array. | |
| 73 // Range check. | |
| 74 __ lw(T3, FieldAddress(T0, Array::length_offset())); // Array length. | |
| 75 // Runtime throws exception. | |
| 76 __ BranchUnsignedGreaterEqual(T1, T3, &fall_through); | |
| 77 | |
| 78 // Note that T1 is Smi, i.e, times 2. | |
| 79 ASSERT(kSmiTagShift == 1); | |
| 80 __ lw(T2, Address(SP, 0 * kWordSize)); // Value. | |
| 81 __ sll(T1, T1, 1); // T1 is Smi. | |
| 82 __ addu(T1, T0, T1); | |
| 83 __ StoreIntoObject(T0, FieldAddress(T1, Array::data_offset()), T2); | |
| 84 // Caller is responsible for preserving the value if necessary. | |
| 85 __ Ret(); | |
| 86 __ Bind(&fall_through); | |
| 87 } | |
| 88 | |
| 89 | |
| 90 // Allocate a GrowableObjectArray using the backing array specified. | |
| 91 // On stack: type argument (+1), data (+0). | |
| 92 void Intrinsifier::GrowableArray_Allocate(Assembler* assembler) { | |
| 93 // The newly allocated object is returned in V0. | |
| 94 const intptr_t kTypeArgumentsOffset = 1 * kWordSize; | |
| 95 const intptr_t kArrayOffset = 0 * kWordSize; | |
| 96 Label fall_through; | |
| 97 | |
| 98 // Try allocating in new space. | |
| 99 const Class& cls = Class::Handle( | |
| 100 Isolate::Current()->object_store()->growable_object_array_class()); | |
| 101 __ TryAllocate(cls, &fall_through, V0, T1); | |
| 102 | |
| 103 // Store backing array object in growable array object. | |
| 104 __ lw(T1, Address(SP, kArrayOffset)); // Data argument. | |
| 105 // V0 is new, no barrier needed. | |
| 106 __ StoreIntoObjectNoBarrier( | |
| 107 V0, FieldAddress(V0, GrowableObjectArray::data_offset()), T1); | |
| 108 | |
| 109 // V0: new growable array object start as a tagged pointer. | |
| 110 // Store the type argument field in the growable array object. | |
| 111 __ lw(T1, Address(SP, kTypeArgumentsOffset)); // Type argument. | |
| 112 __ StoreIntoObjectNoBarrier( | |
| 113 V0, FieldAddress(V0, GrowableObjectArray::type_arguments_offset()), T1); | |
| 114 // Set the length field in the growable array object to 0. | |
| 115 __ Ret(); // Returns the newly allocated object in V0. | |
| 116 __ delay_slot()->sw(ZR, | |
| 117 FieldAddress(V0, GrowableObjectArray::length_offset())); | |
| 118 | |
| 119 __ Bind(&fall_through); | |
| 120 } | |
| 121 | |
| 122 | |
| 123 // Add an element to growable array if it doesn't need to grow, otherwise | |
| 124 // call into regular code. | |
| 125 // On stack: growable array (+1), value (+0). | |
| 126 void Intrinsifier::GrowableArray_add(Assembler* assembler) { | |
| 127 // In checked mode we need to type-check the incoming argument. | |
| 128 if (Isolate::Current()->type_checks()) return; | |
| 129 Label fall_through; | |
| 130 __ lw(T0, Address(SP, 1 * kWordSize)); // Array. | |
| 131 __ lw(T1, FieldAddress(T0, GrowableObjectArray::length_offset())); | |
| 132 // T1: length. | |
| 133 __ lw(T2, FieldAddress(T0, GrowableObjectArray::data_offset())); | |
| 134 // T2: data. | |
| 135 __ lw(T3, FieldAddress(T2, Array::length_offset())); | |
| 136 // Compare length with capacity. | |
| 137 // T3: capacity. | |
| 138 __ beq(T1, T3, &fall_through); // Must grow data. | |
| 139 const int32_t value_one = reinterpret_cast<int32_t>(Smi::New(1)); | |
| 140 // len = len + 1; | |
| 141 __ addiu(T3, T1, Immediate(value_one)); | |
| 142 __ sw(T3, FieldAddress(T0, GrowableObjectArray::length_offset())); | |
| 143 __ lw(T0, Address(SP, 0 * kWordSize)); // Value. | |
| 144 ASSERT(kSmiTagShift == 1); | |
| 145 __ sll(T1, T1, 1); | |
| 146 __ addu(T1, T2, T1); | |
| 147 __ StoreIntoObject(T2, FieldAddress(T1, Array::data_offset()), T0); | |
| 148 __ LoadObject(T7, Object::null_object()); | |
| 149 __ Ret(); | |
| 150 __ delay_slot()->mov(V0, T7); | |
| 151 __ Bind(&fall_through); | |
| 152 } | |
| 153 | |
| 154 | |
| 155 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ | |
| 156 Label fall_through; \ | |
| 157 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ | |
| 158 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, T2, &fall_through)); \ | |
| 159 __ lw(T2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ | |
| 160 /* Check that length is a positive Smi. */ \ | |
| 161 /* T2: requested array length argument. */ \ | |
| 162 __ andi(CMPRES1, T2, Immediate(kSmiTagMask)); \ | |
| 163 __ bne(CMPRES1, ZR, &fall_through); \ | |
| 164 __ BranchSignedLess(T2, Immediate(0), &fall_through); \ | |
| 165 __ SmiUntag(T2); \ | |
| 166 /* Check for maximum allowed length. */ \ | |
| 167 /* T2: untagged array length. */ \ | |
| 168 __ BranchSignedGreater(T2, Immediate(max_len), &fall_through); \ | |
| 169 __ sll(T2, T2, scale_shift); \ | |
| 170 const intptr_t fixed_size_plus_alignment_padding = \ | |
| 171 sizeof(Raw##type_name) + kObjectAlignment - 1; \ | |
| 172 __ AddImmediate(T2, fixed_size_plus_alignment_padding); \ | |
| 173 __ LoadImmediate(TMP, -kObjectAlignment); \ | |
| 174 __ and_(T2, T2, TMP); \ | |
| 175 Heap::Space space = Heap::kNew; \ | |
| 176 __ lw(T3, Address(THR, Thread::heap_offset())); \ | |
| 177 __ lw(V0, Address(T3, Heap::TopOffset(space))); \ | |
| 178 \ | |
| 179 /* T2: allocation size. */ \ | |
| 180 __ addu(T1, V0, T2); \ | |
| 181 /* Branch on unsigned overflow. */ \ | |
| 182 __ BranchUnsignedLess(T1, V0, &fall_through); \ | |
| 183 \ | |
| 184 /* Check if the allocation fits into the remaining space. */ \ | |
| 185 /* V0: potential new object start. */ \ | |
| 186 /* T1: potential next object start. */ \ | |
| 187 /* T2: allocation size. */ \ | |
| 188 /* T3: heap. */ \ | |
| 189 __ lw(T4, Address(T3, Heap::EndOffset(space))); \ | |
| 190 __ BranchUnsignedGreaterEqual(T1, T4, &fall_through); \ | |
| 191 \ | |
| 192 /* Successfully allocated the object(s), now update top to point to */ \ | |
| 193 /* next object start and initialize the object. */ \ | |
| 194 __ sw(T1, Address(T3, Heap::TopOffset(space))); \ | |
| 195 __ AddImmediate(V0, kHeapObjectTag); \ | |
| 196 NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, T2, T4, space)); \ | |
| 197 /* Initialize the tags. */ \ | |
| 198 /* V0: new object start as a tagged pointer. */ \ | |
| 199 /* T1: new object end address. */ \ | |
| 200 /* T2: allocation size. */ \ | |
| 201 { \ | |
| 202 Label size_tag_overflow, done; \ | |
| 203 __ BranchUnsignedGreater(T2, Immediate(RawObject::SizeTag::kMaxSizeTag), \ | |
| 204 &size_tag_overflow); \ | |
| 205 __ b(&done); \ | |
| 206 __ delay_slot()->sll(T2, T2, \ | |
| 207 RawObject::kSizeTagPos - kObjectAlignmentLog2); \ | |
| 208 \ | |
| 209 __ Bind(&size_tag_overflow); \ | |
| 210 __ mov(T2, ZR); \ | |
| 211 __ Bind(&done); \ | |
| 212 \ | |
| 213 /* Get the class index and insert it into the tags. */ \ | |
| 214 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); \ | |
| 215 __ or_(T2, T2, TMP); \ | |
| 216 __ sw(T2, FieldAddress(V0, type_name::tags_offset())); /* Tags. */ \ | |
| 217 } \ | |
| 218 /* Set the length field. */ \ | |
| 219 /* V0: new object start as a tagged pointer. */ \ | |
| 220 /* T1: new object end address. */ \ | |
| 221 __ lw(T2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ | |
| 222 __ StoreIntoObjectNoBarrier( \ | |
| 223 V0, FieldAddress(V0, type_name::length_offset()), T2); \ | |
| 224 /* Initialize all array elements to 0. */ \ | |
| 225 /* V0: new object start as a tagged pointer. */ \ | |
| 226 /* T1: new object end address. */ \ | |
| 227 /* T2: iterator which initially points to the start of the variable */ \ | |
| 228 /* data area to be initialized. */ \ | |
| 229 __ AddImmediate(T2, V0, sizeof(Raw##type_name) - 1); \ | |
| 230 Label done, init_loop; \ | |
| 231 __ Bind(&init_loop); \ | |
| 232 __ BranchUnsignedGreaterEqual(T2, T1, &done); \ | |
| 233 __ sw(ZR, Address(T2, 0)); \ | |
| 234 __ b(&init_loop); \ | |
| 235 __ delay_slot()->addiu(T2, T2, Immediate(kWordSize)); \ | |
| 236 __ Bind(&done); \ | |
| 237 \ | |
| 238 __ Ret(); \ | |
| 239 __ Bind(&fall_through); | |
| 240 | |
| 241 | |
| 242 static int GetScaleFactor(intptr_t size) { | |
| 243 switch (size) { | |
| 244 case 1: | |
| 245 return 0; | |
| 246 case 2: | |
| 247 return 1; | |
| 248 case 4: | |
| 249 return 2; | |
| 250 case 8: | |
| 251 return 3; | |
| 252 case 16: | |
| 253 return 4; | |
| 254 } | |
| 255 UNREACHABLE(); | |
| 256 return -1; | |
| 257 } | |
| 258 | |
| 259 | |
| 260 #define TYPED_DATA_ALLOCATOR(clazz) \ | |
| 261 void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler) { \ | |
| 262 intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \ | |
| 263 intptr_t max_len = TypedData::MaxElements(kTypedData##clazz##Cid); \ | |
| 264 int shift = GetScaleFactor(size); \ | |
| 265 TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, shift); \ | |
| 266 } | |
| 267 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) | |
| 268 #undef TYPED_DATA_ALLOCATOR | |
| 269 | |
| 270 | |
| 271 // Loads args from stack into T0 and T1 | |
| 272 // Tests if they are smis, jumps to label not_smi if not. | |
| 273 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { | |
| 274 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 275 __ lw(T1, Address(SP, 1 * kWordSize)); | |
| 276 __ or_(CMPRES1, T0, T1); | |
| 277 __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask)); | |
| 278 __ bne(CMPRES1, ZR, not_smi); | |
| 279 return; | |
| 280 } | |
| 281 | |
| 282 | |
| 283 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { | |
| 284 Label fall_through; | |
| 285 | |
| 286 TestBothArgumentsSmis(assembler, &fall_through); // Checks two Smis. | |
| 287 __ AdduDetectOverflow(V0, T0, T1, CMPRES1); // Add. | |
| 288 __ bltz(CMPRES1, &fall_through); // Fall through on overflow. | |
| 289 __ Ret(); // Nothing in branch delay slot. | |
| 290 __ Bind(&fall_through); | |
| 291 } | |
| 292 | |
| 293 | |
| 294 void Intrinsifier::Integer_add(Assembler* assembler) { | |
| 295 Integer_addFromInteger(assembler); | |
| 296 } | |
| 297 | |
| 298 | |
| 299 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { | |
| 300 Label fall_through; | |
| 301 | |
| 302 TestBothArgumentsSmis(assembler, &fall_through); | |
| 303 __ SubuDetectOverflow(V0, T0, T1, CMPRES1); // Subtract. | |
| 304 __ bltz(CMPRES1, &fall_through); // Fall through on overflow. | |
| 305 __ Ret(); | |
| 306 __ Bind(&fall_through); | |
| 307 } | |
| 308 | |
| 309 | |
| 310 void Intrinsifier::Integer_sub(Assembler* assembler) { | |
| 311 Label fall_through; | |
| 312 | |
| 313 TestBothArgumentsSmis(assembler, &fall_through); | |
| 314 __ SubuDetectOverflow(V0, T1, T0, CMPRES1); // Subtract. | |
| 315 __ bltz(CMPRES1, &fall_through); // Fall through on overflow. | |
| 316 __ Ret(); // Nothing in branch delay slot. | |
| 317 __ Bind(&fall_through); | |
| 318 } | |
| 319 | |
| 320 | |
| 321 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { | |
| 322 Label fall_through; | |
| 323 | |
| 324 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | |
| 325 __ SmiUntag(T0); // untags T0. only want result shifted by one | |
| 326 | |
| 327 __ mult(T0, T1); // HI:LO <- T0 * T1. | |
| 328 __ mflo(V0); // V0 <- LO. | |
| 329 __ mfhi(T2); // T2 <- HI. | |
| 330 __ sra(T3, V0, 31); // T3 <- V0 >> 31. | |
| 331 __ bne(T2, T3, &fall_through); // Fall through on overflow. | |
| 332 __ Ret(); | |
| 333 __ Bind(&fall_through); | |
| 334 } | |
| 335 | |
| 336 | |
| 337 void Intrinsifier::Integer_mul(Assembler* assembler) { | |
| 338 Integer_mulFromInteger(assembler); | |
| 339 } | |
| 340 | |
| 341 | |
| 342 // Optimizations: | |
| 343 // - result is 0 if: | |
| 344 // - left is 0 | |
| 345 // - left equals right | |
| 346 // - result is left if | |
| 347 // - left > 0 && left < right | |
| 348 // T1: Tagged left (dividend). | |
| 349 // T0: Tagged right (divisor). | |
| 350 // Returns: | |
| 351 // V0: Untagged fallthrough result (remainder to be adjusted), or | |
| 352 // V0: Tagged return result (remainder). | |
| 353 static void EmitRemainderOperation(Assembler* assembler) { | |
| 354 Label return_zero, modulo; | |
| 355 const Register left = T1; | |
| 356 const Register right = T0; | |
| 357 const Register result = V0; | |
| 358 | |
| 359 __ beq(left, ZR, &return_zero); | |
| 360 __ beq(left, right, &return_zero); | |
| 361 | |
| 362 __ bltz(left, &modulo); | |
| 363 // left is positive. | |
| 364 __ BranchSignedGreaterEqual(left, right, &modulo); | |
| 365 // left is less than right. return left. | |
| 366 __ Ret(); | |
| 367 __ delay_slot()->mov(result, left); | |
| 368 | |
| 369 __ Bind(&return_zero); | |
| 370 __ Ret(); | |
| 371 __ delay_slot()->mov(result, ZR); | |
| 372 | |
| 373 __ Bind(&modulo); | |
| 374 __ SmiUntag(right); | |
| 375 __ SmiUntag(left); | |
| 376 __ div(left, right); // Divide, remainder goes in HI. | |
| 377 __ mfhi(result); // result <- HI. | |
| 378 return; | |
| 379 } | |
| 380 | |
| 381 | |
| 382 // Implementation: | |
| 383 // res = left % right; | |
| 384 // if (res < 0) { | |
| 385 // if (right < 0) { | |
| 386 // res = res - right; | |
| 387 // } else { | |
| 388 // res = res + right; | |
| 389 // } | |
| 390 // } | |
| 391 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { | |
| 392 Label fall_through, subtract; | |
| 393 // Test arguments for smi. | |
| 394 __ lw(T1, Address(SP, 0 * kWordSize)); | |
| 395 __ lw(T0, Address(SP, 1 * kWordSize)); | |
| 396 __ or_(CMPRES1, T0, T1); | |
| 397 __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask)); | |
| 398 __ bne(CMPRES1, ZR, &fall_through); | |
| 399 // T1: Tagged left (dividend). | |
| 400 // T0: Tagged right (divisor). | |
| 401 // Check if modulo by zero -> exception thrown in main function. | |
| 402 __ beq(T0, ZR, &fall_through); | |
| 403 EmitRemainderOperation(assembler); | |
| 404 // Untagged right in T0. Untagged remainder result in V0. | |
| 405 | |
| 406 Label done; | |
| 407 __ bgez(V0, &done); | |
| 408 __ bltz(T0, &subtract); | |
| 409 __ addu(V0, V0, T0); | |
| 410 __ Ret(); | |
| 411 __ delay_slot()->SmiTag(V0); | |
| 412 | |
| 413 __ Bind(&subtract); | |
| 414 __ subu(V0, V0, T0); | |
| 415 __ Ret(); | |
| 416 __ delay_slot()->SmiTag(V0); | |
| 417 | |
| 418 __ Bind(&done); | |
| 419 __ Ret(); | |
| 420 __ delay_slot()->SmiTag(V0); | |
| 421 | |
| 422 __ Bind(&fall_through); | |
| 423 } | |
| 424 | |
| 425 | |
| 426 void Intrinsifier::Integer_truncDivide(Assembler* assembler) { | |
| 427 Label fall_through; | |
| 428 | |
| 429 TestBothArgumentsSmis(assembler, &fall_through); | |
| 430 __ beq(T0, ZR, &fall_through); // If b is 0, fall through. | |
| 431 | |
| 432 __ SmiUntag(T0); | |
| 433 __ SmiUntag(T1); | |
| 434 __ div(T1, T0); // LO <- T1 / T0 | |
| 435 __ mflo(V0); // V0 <- LO | |
| 436 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we | |
| 437 // cannot tag the result. | |
| 438 __ BranchEqual(V0, Immediate(0x40000000), &fall_through); | |
| 439 __ Ret(); | |
| 440 __ delay_slot()->SmiTag(V0); | |
| 441 __ Bind(&fall_through); | |
| 442 } | |
| 443 | |
| 444 | |
| 445 void Intrinsifier::Integer_negate(Assembler* assembler) { | |
| 446 Label fall_through; | |
| 447 | |
| 448 __ lw(T0, Address(SP, +0 * kWordSize)); // Grabs first argument. | |
| 449 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); // Test for Smi. | |
| 450 __ bne(CMPRES1, ZR, &fall_through); // Fall through if not a Smi. | |
| 451 __ SubuDetectOverflow(V0, ZR, T0, CMPRES1); | |
| 452 __ bltz(CMPRES1, &fall_through); // There was overflow. | |
| 453 __ Ret(); | |
| 454 __ Bind(&fall_through); | |
| 455 } | |
| 456 | |
| 457 | |
| 458 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { | |
| 459 Label fall_through; | |
| 460 | |
| 461 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. | |
| 462 __ Ret(); | |
| 463 __ delay_slot()->and_(V0, T0, T1); | |
| 464 __ Bind(&fall_through); | |
| 465 } | |
| 466 | |
| 467 | |
| 468 void Intrinsifier::Integer_bitAnd(Assembler* assembler) { | |
| 469 Integer_bitAndFromInteger(assembler); | |
| 470 } | |
| 471 | |
| 472 | |
| 473 void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) { | |
| 474 Label fall_through; | |
| 475 | |
| 476 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. | |
| 477 __ Ret(); | |
| 478 __ delay_slot()->or_(V0, T0, T1); | |
| 479 __ Bind(&fall_through); | |
| 480 } | |
| 481 | |
| 482 | |
| 483 void Intrinsifier::Integer_bitOr(Assembler* assembler) { | |
| 484 Integer_bitOrFromInteger(assembler); | |
| 485 } | |
| 486 | |
| 487 | |
| 488 void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) { | |
| 489 Label fall_through; | |
| 490 | |
| 491 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. | |
| 492 __ Ret(); | |
| 493 __ delay_slot()->xor_(V0, T0, T1); | |
| 494 __ Bind(&fall_through); | |
| 495 } | |
| 496 | |
| 497 | |
| 498 void Intrinsifier::Integer_bitXor(Assembler* assembler) { | |
| 499 Integer_bitXorFromInteger(assembler); | |
| 500 } | |
| 501 | |
| 502 | |
| 503 void Intrinsifier::Integer_shl(Assembler* assembler) { | |
| 504 ASSERT(kSmiTagShift == 1); | |
| 505 ASSERT(kSmiTag == 0); | |
| 506 Label fall_through, overflow; | |
| 507 | |
| 508 TestBothArgumentsSmis(assembler, &fall_through); | |
| 509 __ BranchUnsignedGreater(T0, Immediate(Smi::RawValue(Smi::kBits)), | |
| 510 &fall_through); | |
| 511 __ SmiUntag(T0); | |
| 512 | |
| 513 // Check for overflow by shifting left and shifting back arithmetically. | |
| 514 // If the result is different from the original, there was overflow. | |
| 515 __ sllv(TMP, T1, T0); | |
| 516 __ srav(CMPRES1, TMP, T0); | |
| 517 __ bne(CMPRES1, T1, &overflow); | |
| 518 | |
| 519 // No overflow, result in V0. | |
| 520 __ Ret(); | |
| 521 __ delay_slot()->sllv(V0, T1, T0); | |
| 522 | |
| 523 __ Bind(&overflow); | |
| 524 // Arguments are Smi but the shift produced an overflow to Mint. | |
| 525 __ bltz(T1, &fall_through); | |
| 526 __ SmiUntag(T1); | |
| 527 | |
| 528 // Pull off high bits that will be shifted off of T1 by making a mask | |
| 529 // ((1 << T0) - 1), shifting it to the right, masking T1, then shifting back. | |
| 530 // high bits = (((1 << T0) - 1) << (32 - T0)) & T1) >> (32 - T0) | |
| 531 // lo bits = T1 << T0 | |
| 532 __ LoadImmediate(T3, 1); | |
| 533 __ sllv(T3, T3, T0); // T3 <- T3 << T0 | |
| 534 __ addiu(T3, T3, Immediate(-1)); // T3 <- T3 - 1 | |
| 535 __ subu(T4, ZR, T0); // T4 <- -T0 | |
| 536 __ addiu(T4, T4, Immediate(32)); // T4 <- 32 - T0 | |
| 537 __ sllv(T3, T3, T4); // T3 <- T3 << T4 | |
| 538 __ and_(T3, T3, T1); // T3 <- T3 & T1 | |
| 539 __ srlv(T3, T3, T4); // T3 <- T3 >> T4 | |
| 540 // Now T3 has the bits that fall off of T1 on a left shift. | |
| 541 __ sllv(T0, T1, T0); // T0 gets low bits. | |
| 542 | |
| 543 const Class& mint_class = | |
| 544 Class::Handle(Isolate::Current()->object_store()->mint_class()); | |
| 545 __ TryAllocate(mint_class, &fall_through, V0, T1); | |
| 546 | |
| 547 __ sw(T0, FieldAddress(V0, Mint::value_offset())); | |
| 548 __ Ret(); | |
| 549 __ delay_slot()->sw(T3, FieldAddress(V0, Mint::value_offset() + kWordSize)); | |
| 550 __ Bind(&fall_through); | |
| 551 } | |
| 552 | |
| 553 | |
| 554 static void Get64SmiOrMint(Assembler* assembler, | |
| 555 Register res_hi, | |
| 556 Register res_lo, | |
| 557 Register reg, | |
| 558 Label* not_smi_or_mint) { | |
| 559 Label not_smi, done; | |
| 560 __ andi(CMPRES1, reg, Immediate(kSmiTagMask)); | |
| 561 __ bne(CMPRES1, ZR, ¬_smi); | |
| 562 __ SmiUntag(reg); | |
| 563 | |
| 564 // Sign extend to 64 bit | |
| 565 __ mov(res_lo, reg); | |
| 566 __ b(&done); | |
| 567 __ delay_slot()->sra(res_hi, reg, 31); | |
| 568 | |
| 569 __ Bind(¬_smi); | |
| 570 __ LoadClassId(CMPRES1, reg); | |
| 571 __ BranchNotEqual(CMPRES1, Immediate(kMintCid), not_smi_or_mint); | |
| 572 | |
| 573 // Mint. | |
| 574 __ lw(res_lo, FieldAddress(reg, Mint::value_offset())); | |
| 575 __ lw(res_hi, FieldAddress(reg, Mint::value_offset() + kWordSize)); | |
| 576 __ Bind(&done); | |
| 577 return; | |
| 578 } | |
| 579 | |
| 580 | |
| 581 static void CompareIntegers(Assembler* assembler, RelationOperator rel_op) { | |
| 582 Label try_mint_smi, is_true, is_false, drop_two_fall_through, fall_through; | |
| 583 TestBothArgumentsSmis(assembler, &try_mint_smi); | |
| 584 // T0 contains the right argument. T1 contains left argument | |
| 585 | |
| 586 switch (rel_op) { | |
| 587 case LT: | |
| 588 __ BranchSignedLess(T1, T0, &is_true); | |
| 589 break; | |
| 590 case LE: | |
| 591 __ BranchSignedLessEqual(T1, T0, &is_true); | |
| 592 break; | |
| 593 case GT: | |
| 594 __ BranchSignedGreater(T1, T0, &is_true); | |
| 595 break; | |
| 596 case GE: | |
| 597 __ BranchSignedGreaterEqual(T1, T0, &is_true); | |
| 598 break; | |
| 599 default: | |
| 600 UNREACHABLE(); | |
| 601 break; | |
| 602 } | |
| 603 | |
| 604 __ Bind(&is_false); | |
| 605 __ LoadObject(V0, Bool::False()); | |
| 606 __ Ret(); | |
| 607 __ Bind(&is_true); | |
| 608 __ LoadObject(V0, Bool::True()); | |
| 609 __ Ret(); | |
| 610 | |
| 611 __ Bind(&try_mint_smi); | |
| 612 // Get left as 64 bit integer. | |
| 613 Get64SmiOrMint(assembler, T3, T2, T1, &fall_through); | |
| 614 // Get right as 64 bit integer. | |
| 615 Get64SmiOrMint(assembler, T5, T4, T0, &fall_through); | |
| 616 // T3: left high. | |
| 617 // T2: left low. | |
| 618 // T5: right high. | |
| 619 // T4: right low. | |
| 620 | |
| 621 // 64-bit comparison | |
| 622 switch (rel_op) { | |
| 623 case LT: | |
| 624 case LE: { | |
| 625 // Compare left hi, right high. | |
| 626 __ BranchSignedGreater(T3, T5, &is_false); | |
| 627 __ BranchSignedLess(T3, T5, &is_true); | |
| 628 // Compare left lo, right lo. | |
| 629 if (rel_op == LT) { | |
| 630 __ BranchUnsignedGreaterEqual(T2, T4, &is_false); | |
| 631 } else { | |
| 632 __ BranchUnsignedGreater(T2, T4, &is_false); | |
| 633 } | |
| 634 break; | |
| 635 } | |
| 636 case GT: | |
| 637 case GE: { | |
| 638 // Compare left hi, right high. | |
| 639 __ BranchSignedLess(T3, T5, &is_false); | |
| 640 __ BranchSignedGreater(T3, T5, &is_true); | |
| 641 // Compare left lo, right lo. | |
| 642 if (rel_op == GT) { | |
| 643 __ BranchUnsignedLessEqual(T2, T4, &is_false); | |
| 644 } else { | |
| 645 __ BranchUnsignedLess(T2, T4, &is_false); | |
| 646 } | |
| 647 break; | |
| 648 } | |
| 649 default: | |
| 650 UNREACHABLE(); | |
| 651 break; | |
| 652 } | |
| 653 // Else is true. | |
| 654 __ b(&is_true); | |
| 655 | |
| 656 __ Bind(&fall_through); | |
| 657 } | |
| 658 | |
| 659 | |
| 660 void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler) { | |
| 661 CompareIntegers(assembler, LT); | |
| 662 } | |
| 663 | |
| 664 | |
| 665 void Intrinsifier::Integer_lessThan(Assembler* assembler) { | |
| 666 CompareIntegers(assembler, LT); | |
| 667 } | |
| 668 | |
| 669 | |
| 670 void Intrinsifier::Integer_greaterThan(Assembler* assembler) { | |
| 671 CompareIntegers(assembler, GT); | |
| 672 } | |
| 673 | |
| 674 | |
| 675 void Intrinsifier::Integer_lessEqualThan(Assembler* assembler) { | |
| 676 CompareIntegers(assembler, LE); | |
| 677 } | |
| 678 | |
| 679 | |
| 680 void Intrinsifier::Integer_greaterEqualThan(Assembler* assembler) { | |
| 681 CompareIntegers(assembler, GE); | |
| 682 } | |
| 683 | |
| 684 | |
| 685 // This is called for Smi, Mint and Bigint receivers. The right argument | |
| 686 // can be Smi, Mint, Bigint or double. | |
| 687 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) { | |
| 688 Label fall_through, true_label, check_for_mint; | |
| 689 // For integer receiver '===' check first. | |
| 690 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 691 __ lw(T1, Address(SP, 1 * kWordSize)); | |
| 692 __ beq(T0, T1, &true_label); | |
| 693 | |
| 694 __ or_(T2, T0, T1); | |
| 695 __ andi(CMPRES1, T2, Immediate(kSmiTagMask)); | |
| 696 // If T0 or T1 is not a smi do Mint checks. | |
| 697 __ bne(CMPRES1, ZR, &check_for_mint); | |
| 698 | |
| 699 // Both arguments are smi, '===' is good enough. | |
| 700 __ LoadObject(V0, Bool::False()); | |
| 701 __ Ret(); | |
| 702 __ Bind(&true_label); | |
| 703 __ LoadObject(V0, Bool::True()); | |
| 704 __ Ret(); | |
| 705 | |
| 706 // At least one of the arguments was not Smi. | |
| 707 Label receiver_not_smi; | |
| 708 __ Bind(&check_for_mint); | |
| 709 | |
| 710 __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); | |
| 711 __ bne(CMPRES1, ZR, &receiver_not_smi); // Check receiver. | |
| 712 | |
| 713 // Left (receiver) is Smi, return false if right is not Double. | |
| 714 // Note that an instance of Mint or Bigint never contains a value that can be | |
| 715 // represented by Smi. | |
| 716 | |
| 717 __ LoadClassId(CMPRES1, T0); | |
| 718 __ BranchEqual(CMPRES1, Immediate(kDoubleCid), &fall_through); | |
| 719 __ LoadObject(V0, Bool::False()); // Smi == Mint -> false. | |
| 720 __ Ret(); | |
| 721 | |
| 722 __ Bind(&receiver_not_smi); | |
| 723 // T1:: receiver. | |
| 724 | |
| 725 __ LoadClassId(CMPRES1, T1); | |
| 726 __ BranchNotEqual(CMPRES1, Immediate(kMintCid), &fall_through); | |
| 727 // Receiver is Mint, return false if right is Smi. | |
| 728 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); | |
| 729 __ bne(CMPRES1, ZR, &fall_through); | |
| 730 __ LoadObject(V0, Bool::False()); | |
| 731 __ Ret(); | |
| 732 // TODO(srdjan): Implement Mint == Mint comparison. | |
| 733 | |
| 734 __ Bind(&fall_through); | |
| 735 } | |
| 736 | |
| 737 | |
| 738 void Intrinsifier::Integer_equal(Assembler* assembler) { | |
| 739 Integer_equalToInteger(assembler); | |
| 740 } | |
| 741 | |
| 742 | |
| 743 void Intrinsifier::Integer_sar(Assembler* assembler) { | |
| 744 Label fall_through; | |
| 745 | |
| 746 TestBothArgumentsSmis(assembler, &fall_through); | |
| 747 // Shift amount in T0. Value to shift in T1. | |
| 748 | |
| 749 __ SmiUntag(T0); | |
| 750 __ bltz(T0, &fall_through); | |
| 751 | |
| 752 __ LoadImmediate(T2, 0x1F); | |
| 753 __ slt(CMPRES1, T2, T0); // CMPRES1 <- 0x1F < T0 ? 1 : 0 | |
| 754 __ movn(T0, T2, CMPRES1); // T0 <- 0x1F < T0 ? 0x1F : T0 | |
| 755 | |
| 756 __ SmiUntag(T1); | |
| 757 __ srav(V0, T1, T0); | |
| 758 __ Ret(); | |
| 759 __ delay_slot()->SmiTag(V0); | |
| 760 __ Bind(&fall_through); | |
| 761 } | |
| 762 | |
| 763 | |
| 764 void Intrinsifier::Smi_bitNegate(Assembler* assembler) { | |
| 765 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 766 __ nor(V0, T0, ZR); | |
| 767 __ Ret(); | |
| 768 __ delay_slot()->addiu(V0, V0, Immediate(-1)); // Remove inverted smi-tag. | |
| 769 } | |
| 770 | |
| 771 | |
| 772 void Intrinsifier::Smi_bitLength(Assembler* assembler) { | |
| 773 __ lw(V0, Address(SP, 0 * kWordSize)); | |
| 774 __ SmiUntag(V0); | |
| 775 // XOR with sign bit to complement bits if value is negative. | |
| 776 __ sra(T0, V0, 31); | |
| 777 __ xor_(V0, V0, T0); | |
| 778 __ clz(V0, V0); | |
| 779 __ LoadImmediate(T0, 32); | |
| 780 __ subu(V0, T0, V0); | |
| 781 __ Ret(); | |
| 782 __ delay_slot()->SmiTag(V0); | |
| 783 } | |
| 784 | |
| 785 | |
| 786 void Intrinsifier::Smi_bitAndFromSmi(Assembler* assembler) { | |
| 787 Integer_bitAndFromInteger(assembler); | |
| 788 } | |
| 789 | |
| 790 | |
| 791 void Intrinsifier::Bigint_lsh(Assembler* assembler) { | |
| 792 // static void _lsh(Uint32List x_digits, int x_used, int n, | |
| 793 // Uint32List r_digits) | |
| 794 | |
| 795 // T2 = x_used, T3 = x_digits, x_used > 0, x_used is Smi. | |
| 796 __ lw(T2, Address(SP, 2 * kWordSize)); | |
| 797 __ lw(T3, Address(SP, 3 * kWordSize)); | |
| 798 // T4 = r_digits, T5 = n, n is Smi, n % _DIGIT_BITS != 0. | |
| 799 __ lw(T4, Address(SP, 0 * kWordSize)); | |
| 800 __ lw(T5, Address(SP, 1 * kWordSize)); | |
| 801 __ SmiUntag(T5); | |
| 802 // T0 = n ~/ _DIGIT_BITS | |
| 803 __ sra(T0, T5, 5); | |
| 804 // T6 = &x_digits[0] | |
| 805 __ addiu(T6, T3, Immediate(TypedData::data_offset() - kHeapObjectTag)); | |
| 806 // V0 = &x_digits[x_used] | |
| 807 __ sll(T2, T2, 1); | |
| 808 __ addu(V0, T6, T2); | |
| 809 // V1 = &r_digits[1] | |
| 810 __ addiu(V1, T4, Immediate(TypedData::data_offset() - kHeapObjectTag + | |
| 811 Bigint::kBytesPerDigit)); | |
| 812 // V1 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1] | |
| 813 __ addu(V1, V1, T2); | |
| 814 __ sll(T1, T0, 2); | |
| 815 __ addu(V1, V1, T1); | |
| 816 // T3 = n % _DIGIT_BITS | |
| 817 __ andi(T3, T5, Immediate(31)); | |
| 818 // T2 = 32 - T3 | |
| 819 __ subu(T2, ZR, T3); | |
| 820 __ addiu(T2, T2, Immediate(32)); | |
| 821 __ mov(T1, ZR); | |
| 822 Label loop; | |
| 823 __ Bind(&loop); | |
| 824 __ addiu(V0, V0, Immediate(-Bigint::kBytesPerDigit)); | |
| 825 __ lw(T0, Address(V0, 0)); | |
| 826 __ srlv(AT, T0, T2); | |
| 827 __ or_(T1, T1, AT); | |
| 828 __ addiu(V1, V1, Immediate(-Bigint::kBytesPerDigit)); | |
| 829 __ sw(T1, Address(V1, 0)); | |
| 830 __ bne(V0, T6, &loop); | |
| 831 __ delay_slot()->sllv(T1, T0, T3); | |
| 832 __ sw(T1, Address(V1, -Bigint::kBytesPerDigit)); | |
| 833 // Returning Object::null() is not required, since this method is private. | |
| 834 __ Ret(); | |
| 835 } | |
| 836 | |
| 837 | |
| 838 void Intrinsifier::Bigint_rsh(Assembler* assembler) { | |
| 839 // static void _lsh(Uint32List x_digits, int x_used, int n, | |
| 840 // Uint32List r_digits) | |
| 841 | |
| 842 // T2 = x_used, T3 = x_digits, x_used > 0, x_used is Smi. | |
| 843 __ lw(T2, Address(SP, 2 * kWordSize)); | |
| 844 __ lw(T3, Address(SP, 3 * kWordSize)); | |
| 845 // T4 = r_digits, T5 = n, n is Smi, n % _DIGIT_BITS != 0. | |
| 846 __ lw(T4, Address(SP, 0 * kWordSize)); | |
| 847 __ lw(T5, Address(SP, 1 * kWordSize)); | |
| 848 __ SmiUntag(T5); | |
| 849 // T0 = n ~/ _DIGIT_BITS | |
| 850 __ sra(T0, T5, 5); | |
| 851 // V1 = &r_digits[0] | |
| 852 __ addiu(V1, T4, Immediate(TypedData::data_offset() - kHeapObjectTag)); | |
| 853 // V0 = &x_digits[n ~/ _DIGIT_BITS] | |
| 854 __ addiu(V0, T3, Immediate(TypedData::data_offset() - kHeapObjectTag)); | |
| 855 __ sll(T1, T0, 2); | |
| 856 __ addu(V0, V0, T1); | |
| 857 // T6 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1] | |
| 858 __ sll(T2, T2, 1); | |
| 859 __ addu(T6, V1, T2); | |
| 860 __ subu(T6, T6, T1); | |
| 861 __ addiu(T6, T6, Immediate(-4)); | |
| 862 // T3 = n % _DIGIT_BITS | |
| 863 __ andi(T3, T5, Immediate(31)); | |
| 864 // T2 = 32 - T3 | |
| 865 __ subu(T2, ZR, T3); | |
| 866 __ addiu(T2, T2, Immediate(32)); | |
| 867 // T1 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS) | |
| 868 __ lw(T1, Address(V0, 0)); | |
| 869 __ addiu(V0, V0, Immediate(Bigint::kBytesPerDigit)); | |
| 870 Label loop_exit; | |
| 871 __ beq(V1, T6, &loop_exit); | |
| 872 __ delay_slot()->srlv(T1, T1, T3); | |
| 873 Label loop; | |
| 874 __ Bind(&loop); | |
| 875 __ lw(T0, Address(V0, 0)); | |
| 876 __ addiu(V0, V0, Immediate(Bigint::kBytesPerDigit)); | |
| 877 __ sllv(AT, T0, T2); | |
| 878 __ or_(T1, T1, AT); | |
| 879 __ sw(T1, Address(V1, 0)); | |
| 880 __ addiu(V1, V1, Immediate(Bigint::kBytesPerDigit)); | |
| 881 __ bne(V1, T6, &loop); | |
| 882 __ delay_slot()->srlv(T1, T0, T3); | |
| 883 __ Bind(&loop_exit); | |
| 884 __ sw(T1, Address(V1, 0)); | |
| 885 // Returning Object::null() is not required, since this method is private. | |
| 886 __ Ret(); | |
| 887 } | |
| 888 | |
| 889 | |
| 890 void Intrinsifier::Bigint_absAdd(Assembler* assembler) { | |
| 891 // static void _absAdd(Uint32List digits, int used, | |
| 892 // Uint32List a_digits, int a_used, | |
| 893 // Uint32List r_digits) | |
| 894 | |
| 895 // T2 = used, T3 = digits | |
| 896 __ lw(T2, Address(SP, 3 * kWordSize)); | |
| 897 __ lw(T3, Address(SP, 4 * kWordSize)); | |
| 898 // T3 = &digits[0] | |
| 899 __ addiu(T3, T3, Immediate(TypedData::data_offset() - kHeapObjectTag)); | |
| 900 | |
| 901 // T4 = a_used, T5 = a_digits | |
| 902 __ lw(T4, Address(SP, 1 * kWordSize)); | |
| 903 __ lw(T5, Address(SP, 2 * kWordSize)); | |
| 904 // T5 = &a_digits[0] | |
| 905 __ addiu(T5, T5, Immediate(TypedData::data_offset() - kHeapObjectTag)); | |
| 906 | |
| 907 // T6 = r_digits | |
| 908 __ lw(T6, Address(SP, 0 * kWordSize)); | |
| 909 // T6 = &r_digits[0] | |
| 910 __ addiu(T6, T6, Immediate(TypedData::data_offset() - kHeapObjectTag)); | |
| 911 | |
| 912 // V0 = &digits[a_used >> 1], a_used is Smi. | |
| 913 __ sll(V0, T4, 1); | |
| 914 __ addu(V0, V0, T3); | |
| 915 | |
| 916 // V1 = &digits[used >> 1], used is Smi. | |
| 917 __ sll(V1, T2, 1); | |
| 918 __ addu(V1, V1, T3); | |
| 919 | |
| 920 // T2 = carry in = 0. | |
| 921 __ mov(T2, ZR); | |
| 922 Label add_loop; | |
| 923 __ Bind(&add_loop); | |
| 924 // Loop a_used times, a_used > 0. | |
| 925 __ lw(T0, Address(T3, 0)); // T0 = x. | |
| 926 __ addiu(T3, T3, Immediate(Bigint::kBytesPerDigit)); | |
| 927 __ lw(T1, Address(T5, 0)); // T1 = y. | |
| 928 __ addiu(T5, T5, Immediate(Bigint::kBytesPerDigit)); | |
| 929 __ addu(T1, T0, T1); // T1 = x + y. | |
| 930 __ sltu(T4, T1, T0); // T4 = carry out of x + y. | |
| 931 __ addu(T0, T1, T2); // T0 = x + y + carry in. | |
| 932 __ sltu(T2, T0, T1); // T2 = carry out of (x + y) + carry in. | |
| 933 __ or_(T2, T2, T4); // T2 = carry out of x + y + carry in. | |
| 934 __ sw(T0, Address(T6, 0)); | |
| 935 __ bne(T3, V0, &add_loop); | |
| 936 __ delay_slot()->addiu(T6, T6, Immediate(Bigint::kBytesPerDigit)); | |
| 937 | |
| 938 Label last_carry; | |
| 939 __ beq(T3, V1, &last_carry); | |
| 940 | |
| 941 Label carry_loop; | |
| 942 __ Bind(&carry_loop); | |
| 943 // Loop used - a_used times, used - a_used > 0. | |
| 944 __ lw(T0, Address(T3, 0)); // T0 = x. | |
| 945 __ addiu(T3, T3, Immediate(Bigint::kBytesPerDigit)); | |
| 946 __ addu(T1, T0, T2); // T1 = x + carry in. | |
| 947 __ sltu(T2, T1, T0); // T2 = carry out of x + carry in. | |
| 948 __ sw(T1, Address(T6, 0)); | |
| 949 __ bne(T3, V1, &carry_loop); | |
| 950 __ delay_slot()->addiu(T6, T6, Immediate(Bigint::kBytesPerDigit)); | |
| 951 | |
| 952 __ Bind(&last_carry); | |
| 953 __ sw(T2, Address(T6, 0)); | |
| 954 | |
| 955 // Returning Object::null() is not required, since this method is private. | |
| 956 __ Ret(); | |
| 957 } | |
| 958 | |
| 959 | |
| 960 void Intrinsifier::Bigint_absSub(Assembler* assembler) { | |
| 961 // static void _absSub(Uint32List digits, int used, | |
| 962 // Uint32List a_digits, int a_used, | |
| 963 // Uint32List r_digits) | |
| 964 | |
| 965 // T2 = used, T3 = digits | |
| 966 __ lw(T2, Address(SP, 3 * kWordSize)); | |
| 967 __ lw(T3, Address(SP, 4 * kWordSize)); | |
| 968 // T3 = &digits[0] | |
| 969 __ addiu(T3, T3, Immediate(TypedData::data_offset() - kHeapObjectTag)); | |
| 970 | |
| 971 // T4 = a_used, T5 = a_digits | |
| 972 __ lw(T4, Address(SP, 1 * kWordSize)); | |
| 973 __ lw(T5, Address(SP, 2 * kWordSize)); | |
| 974 // T5 = &a_digits[0] | |
| 975 __ addiu(T5, T5, Immediate(TypedData::data_offset() - kHeapObjectTag)); | |
| 976 | |
| 977 // T6 = r_digits | |
| 978 __ lw(T6, Address(SP, 0 * kWordSize)); | |
| 979 // T6 = &r_digits[0] | |
| 980 __ addiu(T6, T6, Immediate(TypedData::data_offset() - kHeapObjectTag)); | |
| 981 | |
| 982 // V0 = &digits[a_used >> 1], a_used is Smi. | |
| 983 __ sll(V0, T4, 1); | |
| 984 __ addu(V0, V0, T3); | |
| 985 | |
| 986 // V1 = &digits[used >> 1], used is Smi. | |
| 987 __ sll(V1, T2, 1); | |
| 988 __ addu(V1, V1, T3); | |
| 989 | |
| 990 // T2 = borrow in = 0. | |
| 991 __ mov(T2, ZR); | |
| 992 Label sub_loop; | |
| 993 __ Bind(&sub_loop); | |
| 994 // Loop a_used times, a_used > 0. | |
| 995 __ lw(T0, Address(T3, 0)); // T0 = x. | |
| 996 __ addiu(T3, T3, Immediate(Bigint::kBytesPerDigit)); | |
| 997 __ lw(T1, Address(T5, 0)); // T1 = y. | |
| 998 __ addiu(T5, T5, Immediate(Bigint::kBytesPerDigit)); | |
| 999 __ subu(T1, T0, T1); // T1 = x - y. | |
| 1000 __ sltu(T4, T0, T1); // T4 = borrow out of x - y. | |
| 1001 __ subu(T0, T1, T2); // T0 = x - y - borrow in. | |
| 1002 __ sltu(T2, T1, T0); // T2 = borrow out of (x - y) - borrow in. | |
| 1003 __ or_(T2, T2, T4); // T2 = borrow out of x - y - borrow in. | |
| 1004 __ sw(T0, Address(T6, 0)); | |
| 1005 __ bne(T3, V0, &sub_loop); | |
| 1006 __ delay_slot()->addiu(T6, T6, Immediate(Bigint::kBytesPerDigit)); | |
| 1007 | |
| 1008 Label done; | |
| 1009 __ beq(T3, V1, &done); | |
| 1010 | |
| 1011 Label borrow_loop; | |
| 1012 __ Bind(&borrow_loop); | |
| 1013 // Loop used - a_used times, used - a_used > 0. | |
| 1014 __ lw(T0, Address(T3, 0)); // T0 = x. | |
| 1015 __ addiu(T3, T3, Immediate(Bigint::kBytesPerDigit)); | |
| 1016 __ subu(T1, T0, T2); // T1 = x - borrow in. | |
| 1017 __ sltu(T2, T0, T1); // T2 = borrow out of x - borrow in. | |
| 1018 __ sw(T1, Address(T6, 0)); | |
| 1019 __ bne(T3, V1, &borrow_loop); | |
| 1020 __ delay_slot()->addiu(T6, T6, Immediate(Bigint::kBytesPerDigit)); | |
| 1021 | |
| 1022 __ Bind(&done); | |
| 1023 // Returning Object::null() is not required, since this method is private. | |
| 1024 __ Ret(); | |
| 1025 } | |
| 1026 | |
| 1027 | |
| 1028 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) { | |
| 1029 // Pseudo code: | |
| 1030 // static int _mulAdd(Uint32List x_digits, int xi, | |
| 1031 // Uint32List m_digits, int i, | |
| 1032 // Uint32List a_digits, int j, int n) { | |
| 1033 // uint32_t x = x_digits[xi >> 1]; // xi is Smi. | |
| 1034 // if (x == 0 || n == 0) { | |
| 1035 // return 1; | |
| 1036 // } | |
| 1037 // uint32_t* mip = &m_digits[i >> 1]; // i is Smi. | |
| 1038 // uint32_t* ajp = &a_digits[j >> 1]; // j is Smi. | |
| 1039 // uint32_t c = 0; | |
| 1040 // SmiUntag(n); | |
| 1041 // do { | |
| 1042 // uint32_t mi = *mip++; | |
| 1043 // uint32_t aj = *ajp; | |
| 1044 // uint64_t t = x*mi + aj + c; // 32-bit * 32-bit -> 64-bit. | |
| 1045 // *ajp++ = low32(t); | |
| 1046 // c = high32(t); | |
| 1047 // } while (--n > 0); | |
| 1048 // while (c != 0) { | |
| 1049 // uint64_t t = *ajp + c; | |
| 1050 // *ajp++ = low32(t); | |
| 1051 // c = high32(t); // c == 0 or 1. | |
| 1052 // } | |
| 1053 // return 1; | |
| 1054 // } | |
| 1055 | |
| 1056 Label done; | |
| 1057 // T3 = x, no_op if x == 0 | |
| 1058 __ lw(T0, Address(SP, 5 * kWordSize)); // T0 = xi as Smi. | |
| 1059 __ lw(T1, Address(SP, 6 * kWordSize)); // T1 = x_digits. | |
| 1060 __ sll(T0, T0, 1); | |
| 1061 __ addu(T1, T0, T1); | |
| 1062 __ lw(T3, FieldAddress(T1, TypedData::data_offset())); | |
| 1063 __ beq(T3, ZR, &done); | |
| 1064 | |
| 1065 // T6 = SmiUntag(n), no_op if n == 0 | |
| 1066 __ lw(T6, Address(SP, 0 * kWordSize)); | |
| 1067 __ SmiUntag(T6); | |
| 1068 __ beq(T6, ZR, &done); | |
| 1069 __ delay_slot()->addiu(T6, T6, Immediate(-1)); // ... while (n-- > 0). | |
| 1070 | |
| 1071 // T4 = mip = &m_digits[i >> 1] | |
| 1072 __ lw(T0, Address(SP, 3 * kWordSize)); // T0 = i as Smi. | |
| 1073 __ lw(T1, Address(SP, 4 * kWordSize)); // T1 = m_digits. | |
| 1074 __ sll(T0, T0, 1); | |
| 1075 __ addu(T1, T0, T1); | |
| 1076 __ addiu(T4, T1, Immediate(TypedData::data_offset() - kHeapObjectTag)); | |
| 1077 | |
| 1078 // T5 = ajp = &a_digits[j >> 1] | |
| 1079 __ lw(T0, Address(SP, 1 * kWordSize)); // T0 = j as Smi. | |
| 1080 __ lw(T1, Address(SP, 2 * kWordSize)); // T1 = a_digits. | |
| 1081 __ sll(T0, T0, 1); | |
| 1082 __ addu(T1, T0, T1); | |
| 1083 __ addiu(T5, T1, Immediate(TypedData::data_offset() - kHeapObjectTag)); | |
| 1084 | |
| 1085 // T1 = c = 0 | |
| 1086 __ mov(T1, ZR); | |
| 1087 | |
| 1088 Label muladd_loop; | |
| 1089 __ Bind(&muladd_loop); | |
| 1090 // x: T3 | |
| 1091 // mip: T4 | |
| 1092 // ajp: T5 | |
| 1093 // c: T1 | |
| 1094 // n-1: T6 | |
| 1095 | |
| 1096 // uint32_t mi = *mip++ | |
| 1097 __ lw(T2, Address(T4, 0)); | |
| 1098 | |
| 1099 // uint32_t aj = *ajp | |
| 1100 __ lw(T0, Address(T5, 0)); | |
| 1101 | |
| 1102 // uint64_t t = x*mi + aj + c | |
| 1103 __ multu(T2, T3); // HI:LO = x*mi. | |
| 1104 __ addiu(T4, T4, Immediate(Bigint::kBytesPerDigit)); | |
| 1105 __ mflo(V0); | |
| 1106 __ mfhi(V1); | |
| 1107 __ addu(V0, V0, T0); // V0 = low32(x*mi) + aj. | |
| 1108 __ sltu(T7, V0, T0); // T7 = carry out of low32(x*mi) + aj. | |
| 1109 __ addu(V1, V1, T7); // V1:V0 = x*mi + aj. | |
| 1110 __ addu(T0, V0, T1); // T0 = low32(x*mi + aj) + c. | |
| 1111 __ sltu(T7, T0, T1); // T7 = carry out of low32(x*mi + aj) + c. | |
| 1112 __ addu(T1, V1, T7); // T1 = c = high32(x*mi + aj + c). | |
| 1113 | |
| 1114 // *ajp++ = low32(t) = T0 | |
| 1115 __ sw(T0, Address(T5, 0)); | |
| 1116 __ addiu(T5, T5, Immediate(Bigint::kBytesPerDigit)); | |
| 1117 | |
| 1118 // while (n-- > 0) | |
| 1119 __ bgtz(T6, &muladd_loop); | |
| 1120 __ delay_slot()->addiu(T6, T6, Immediate(-1)); // --n | |
| 1121 | |
| 1122 __ beq(T1, ZR, &done); | |
| 1123 | |
| 1124 // *ajp++ += c | |
| 1125 __ lw(T0, Address(T5, 0)); | |
| 1126 __ addu(T0, T0, T1); | |
| 1127 __ sltu(T1, T0, T1); | |
| 1128 __ sw(T0, Address(T5, 0)); | |
| 1129 __ beq(T1, ZR, &done); | |
| 1130 __ delay_slot()->addiu(T5, T5, Immediate(Bigint::kBytesPerDigit)); | |
| 1131 | |
| 1132 Label propagate_carry_loop; | |
| 1133 __ Bind(&propagate_carry_loop); | |
| 1134 __ lw(T0, Address(T5, 0)); | |
| 1135 __ addiu(T0, T0, Immediate(1)); | |
| 1136 __ sw(T0, Address(T5, 0)); | |
| 1137 __ beq(T0, ZR, &propagate_carry_loop); | |
| 1138 __ delay_slot()->addiu(T5, T5, Immediate(Bigint::kBytesPerDigit)); | |
| 1139 | |
| 1140 __ Bind(&done); | |
| 1141 __ addiu(V0, ZR, Immediate(Smi::RawValue(1))); // One digit processed. | |
| 1142 __ Ret(); | |
| 1143 } | |
| 1144 | |
| 1145 | |
| 1146 void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) { | |
| 1147 // Pseudo code: | |
| 1148 // static int _sqrAdd(Uint32List x_digits, int i, | |
| 1149 // Uint32List a_digits, int used) { | |
| 1150 // uint32_t* xip = &x_digits[i >> 1]; // i is Smi. | |
| 1151 // uint32_t x = *xip++; | |
| 1152 // if (x == 0) return 1; | |
| 1153 // uint32_t* ajp = &a_digits[i]; // j == 2*i, i is Smi. | |
| 1154 // uint32_t aj = *ajp; | |
| 1155 // uint64_t t = x*x + aj; | |
| 1156 // *ajp++ = low32(t); | |
| 1157 // uint64_t c = high32(t); | |
| 1158 // int n = ((used - i) >> 1) - 1; // used and i are Smi. | |
| 1159 // while (--n >= 0) { | |
| 1160 // uint32_t xi = *xip++; | |
| 1161 // uint32_t aj = *ajp; | |
| 1162 // uint96_t t = 2*x*xi + aj + c; // 2-bit * 32-bit * 32-bit -> 65-bit. | |
| 1163 // *ajp++ = low32(t); | |
| 1164 // c = high64(t); // 33-bit. | |
| 1165 // } | |
| 1166 // uint32_t aj = *ajp; | |
| 1167 // uint64_t t = aj + c; // 32-bit + 33-bit -> 34-bit. | |
| 1168 // *ajp++ = low32(t); | |
| 1169 // *ajp = high32(t); | |
| 1170 // return 1; | |
| 1171 // } | |
| 1172 | |
| 1173 // T4 = xip = &x_digits[i >> 1] | |
| 1174 __ lw(T2, Address(SP, 2 * kWordSize)); // T2 = i as Smi. | |
| 1175 __ lw(T3, Address(SP, 3 * kWordSize)); // T3 = x_digits. | |
| 1176 __ sll(T0, T2, 1); | |
| 1177 __ addu(T3, T0, T3); | |
| 1178 __ addiu(T4, T3, Immediate(TypedData::data_offset() - kHeapObjectTag)); | |
| 1179 | |
| 1180 // T3 = x = *xip++, return if x == 0 | |
| 1181 Label x_zero; | |
| 1182 __ lw(T3, Address(T4, 0)); | |
| 1183 __ beq(T3, ZR, &x_zero); | |
| 1184 __ delay_slot()->addiu(T4, T4, Immediate(Bigint::kBytesPerDigit)); | |
| 1185 | |
| 1186 // T5 = ajp = &a_digits[i] | |
| 1187 __ lw(T1, Address(SP, 1 * kWordSize)); // a_digits | |
| 1188 __ sll(T0, T2, 2); // j == 2*i, i is Smi. | |
| 1189 __ addu(T1, T0, T1); | |
| 1190 __ addiu(T5, T1, Immediate(TypedData::data_offset() - kHeapObjectTag)); | |
| 1191 | |
| 1192 // T6:T0 = t = x*x + *ajp | |
| 1193 __ lw(T0, Address(T5, 0)); // *ajp. | |
| 1194 __ mthi(ZR); | |
| 1195 __ mtlo(T0); | |
| 1196 __ maddu(T3, T3); // HI:LO = T3*T3 + *ajp. | |
| 1197 __ mfhi(T6); | |
| 1198 __ mflo(T0); | |
| 1199 | |
| 1200 // *ajp++ = low32(t) = R0 | |
| 1201 __ sw(T0, Address(T5, 0)); | |
| 1202 __ addiu(T5, T5, Immediate(Bigint::kBytesPerDigit)); | |
| 1203 | |
| 1204 // T6 = low32(c) = high32(t) | |
| 1205 // T7 = high32(c) = 0 | |
| 1206 __ mov(T7, ZR); | |
| 1207 | |
| 1208 // int n = used - i - 1; while (--n >= 0) ... | |
| 1209 __ lw(T0, Address(SP, 0 * kWordSize)); // used is Smi | |
| 1210 __ subu(V0, T0, T2); | |
| 1211 __ SmiUntag(V0); // V0 = used - i | |
| 1212 // int n = used - i - 2; if (n >= 0) ... while (n-- > 0) | |
| 1213 __ addiu(V0, V0, Immediate(-2)); | |
| 1214 | |
| 1215 Label loop, done; | |
| 1216 __ bltz(V0, &done); | |
| 1217 | |
| 1218 __ Bind(&loop); | |
| 1219 // x: T3 | |
| 1220 // xip: T4 | |
| 1221 // ajp: T5 | |
| 1222 // c: T7:T6 | |
| 1223 // t: A2:A1:A0 (not live at loop entry) | |
| 1224 // n: V0 | |
| 1225 | |
| 1226 // uint32_t xi = *xip++ | |
| 1227 __ lw(T2, Address(T4, 0)); | |
| 1228 __ addiu(T4, T4, Immediate(Bigint::kBytesPerDigit)); | |
| 1229 | |
| 1230 // uint32_t aj = *ajp | |
| 1231 __ lw(T0, Address(T5, 0)); | |
| 1232 | |
| 1233 // uint96_t t = T7:T6:T0 = 2*x*xi + aj + c | |
| 1234 __ multu(T2, T3); | |
| 1235 __ mfhi(A1); | |
| 1236 __ mflo(A0); // A1:A0 = x*xi. | |
| 1237 __ srl(A2, A1, 31); | |
| 1238 __ sll(A1, A1, 1); | |
| 1239 __ srl(T1, A0, 31); | |
| 1240 __ or_(A1, A1, T1); | |
| 1241 __ sll(A0, A0, 1); // A2:A1:A0 = 2*x*xi. | |
| 1242 __ addu(A0, A0, T0); | |
| 1243 __ sltu(T1, A0, T0); | |
| 1244 __ addu(A1, A1, T1); // No carry out possible; A2:A1:A0 = 2*x*xi + aj. | |
| 1245 __ addu(T0, A0, T6); | |
| 1246 __ sltu(T1, T0, T6); | |
| 1247 __ addu(T6, A1, T1); // No carry out; A2:T6:T0 = 2*x*xi + aj + low32(c). | |
| 1248 __ addu(T6, T6, T7); // No carry out; A2:T6:T0 = 2*x*xi + aj + c. | |
| 1249 __ mov(T7, A2); // T7:T6:T0 = 2*x*xi + aj + c. | |
| 1250 | |
| 1251 // *ajp++ = low32(t) = T0 | |
| 1252 __ sw(T0, Address(T5, 0)); | |
| 1253 __ addiu(T5, T5, Immediate(Bigint::kBytesPerDigit)); | |
| 1254 | |
| 1255 // while (n-- > 0) | |
| 1256 __ bgtz(V0, &loop); | |
| 1257 __ delay_slot()->addiu(V0, V0, Immediate(-1)); // --n | |
| 1258 | |
| 1259 __ Bind(&done); | |
| 1260 // uint32_t aj = *ajp | |
| 1261 __ lw(T0, Address(T5, 0)); | |
| 1262 | |
| 1263 // uint64_t t = aj + c | |
| 1264 __ addu(T6, T6, T0); | |
| 1265 __ sltu(T1, T6, T0); | |
| 1266 __ addu(T7, T7, T1); | |
| 1267 | |
| 1268 // *ajp = low32(t) = T6 | |
| 1269 // *(ajp + 1) = high32(t) = T7 | |
| 1270 __ sw(T6, Address(T5, 0)); | |
| 1271 __ sw(T7, Address(T5, Bigint::kBytesPerDigit)); | |
| 1272 | |
| 1273 __ Bind(&x_zero); | |
| 1274 __ addiu(V0, ZR, Immediate(Smi::RawValue(1))); // One digit processed. | |
| 1275 __ Ret(); | |
| 1276 } | |
| 1277 | |
| 1278 | |
| 1279 void Intrinsifier::Bigint_estQuotientDigit(Assembler* assembler) { | |
| 1280 // No unsigned 64-bit / 32-bit divide instruction. | |
| 1281 } | |
| 1282 | |
| 1283 | |
| 1284 void Intrinsifier::Montgomery_mulMod(Assembler* assembler) { | |
| 1285 // Pseudo code: | |
| 1286 // static int _mulMod(Uint32List args, Uint32List digits, int i) { | |
| 1287 // uint32_t rho = args[_RHO]; // _RHO == 2. | |
| 1288 // uint32_t d = digits[i >> 1]; // i is Smi. | |
| 1289 // uint64_t t = rho*d; | |
| 1290 // args[_MU] = t mod DIGIT_BASE; // _MU == 4. | |
| 1291 // return 1; | |
| 1292 // } | |
| 1293 | |
| 1294 // T4 = args | |
| 1295 __ lw(T4, Address(SP, 2 * kWordSize)); // args | |
| 1296 | |
| 1297 // T3 = rho = args[2] | |
| 1298 __ lw(T3, FieldAddress( | |
| 1299 T4, TypedData::data_offset() + 2 * Bigint::kBytesPerDigit)); | |
| 1300 | |
| 1301 // T2 = d = digits[i >> 1] | |
| 1302 __ lw(T0, Address(SP, 0 * kWordSize)); // T0 = i as Smi. | |
| 1303 __ lw(T1, Address(SP, 1 * kWordSize)); // T1 = digits. | |
| 1304 __ sll(T0, T0, 1); | |
| 1305 __ addu(T1, T0, T1); | |
| 1306 __ lw(T2, FieldAddress(T1, TypedData::data_offset())); | |
| 1307 | |
| 1308 // HI:LO = t = rho*d | |
| 1309 __ multu(T2, T3); | |
| 1310 | |
| 1311 // args[4] = t mod DIGIT_BASE = low32(t) | |
| 1312 __ mflo(T0); | |
| 1313 __ sw(T0, FieldAddress( | |
| 1314 T4, TypedData::data_offset() + 4 * Bigint::kBytesPerDigit)); | |
| 1315 | |
| 1316 __ addiu(V0, ZR, Immediate(Smi::RawValue(1))); // One digit processed. | |
| 1317 __ Ret(); | |
| 1318 } | |
| 1319 | |
| 1320 | |
| 1321 // Check if the last argument is a double, jump to label 'is_smi' if smi | |
| 1322 // (easy to convert to double), otherwise jump to label 'not_double_smi', | |
| 1323 // Returns the last argument in T0. | |
| 1324 static void TestLastArgumentIsDouble(Assembler* assembler, | |
| 1325 Label* is_smi, | |
| 1326 Label* not_double_smi) { | |
| 1327 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 1328 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); | |
| 1329 __ beq(CMPRES1, ZR, is_smi); | |
| 1330 __ LoadClassId(CMPRES1, T0); | |
| 1331 __ BranchNotEqual(CMPRES1, Immediate(kDoubleCid), not_double_smi); | |
| 1332 // Fall through with Double in T0. | |
| 1333 } | |
| 1334 | |
| 1335 | |
| 1336 // Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown | |
| 1337 // type. Return true or false object in the register V0. Any NaN argument | |
| 1338 // returns false. Any non-double arg1 causes control flow to fall through to the | |
| 1339 // slow case (compiled method body). | |
| 1340 static void CompareDoubles(Assembler* assembler, RelationOperator rel_op) { | |
| 1341 Label is_smi, double_op, no_NaN, fall_through; | |
| 1342 __ Comment("CompareDoubles Intrinsic"); | |
| 1343 | |
| 1344 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | |
| 1345 // Both arguments are double, right operand is in T0. | |
| 1346 __ LoadDFromOffset(D1, T0, Double::value_offset() - kHeapObjectTag); | |
| 1347 __ Bind(&double_op); | |
| 1348 __ lw(T0, Address(SP, 1 * kWordSize)); // Left argument. | |
| 1349 __ LoadDFromOffset(D0, T0, Double::value_offset() - kHeapObjectTag); | |
| 1350 // Now, left is in D0, right is in D1. | |
| 1351 | |
| 1352 __ cund(D0, D1); // Check for NaN. | |
| 1353 __ bc1f(&no_NaN); | |
| 1354 __ LoadObject(V0, Bool::False()); // Return false if either is NaN. | |
| 1355 __ Ret(); | |
| 1356 __ Bind(&no_NaN); | |
| 1357 | |
| 1358 switch (rel_op) { | |
| 1359 case EQ: | |
| 1360 __ ceqd(D0, D1); | |
| 1361 break; | |
| 1362 case LT: | |
| 1363 __ coltd(D0, D1); | |
| 1364 break; | |
| 1365 case LE: | |
| 1366 __ coled(D0, D1); | |
| 1367 break; | |
| 1368 case GT: | |
| 1369 __ coltd(D1, D0); | |
| 1370 break; | |
| 1371 case GE: | |
| 1372 __ coled(D1, D0); | |
| 1373 break; | |
| 1374 default: { | |
| 1375 // Only passing the above conditions to this function. | |
| 1376 UNREACHABLE(); | |
| 1377 break; | |
| 1378 } | |
| 1379 } | |
| 1380 | |
| 1381 Label is_true; | |
| 1382 __ bc1t(&is_true); | |
| 1383 __ LoadObject(V0, Bool::False()); | |
| 1384 __ Ret(); | |
| 1385 __ Bind(&is_true); | |
| 1386 __ LoadObject(V0, Bool::True()); | |
| 1387 __ Ret(); | |
| 1388 | |
| 1389 | |
| 1390 __ Bind(&is_smi); | |
| 1391 __ SmiUntag(T0); | |
| 1392 __ mtc1(T0, STMP1); | |
| 1393 __ b(&double_op); | |
| 1394 __ delay_slot()->cvtdw(D1, STMP1); | |
| 1395 | |
| 1396 | |
| 1397 __ Bind(&fall_through); | |
| 1398 } | |
| 1399 | |
| 1400 | |
| 1401 void Intrinsifier::Double_greaterThan(Assembler* assembler) { | |
| 1402 CompareDoubles(assembler, GT); | |
| 1403 } | |
| 1404 | |
| 1405 | |
| 1406 void Intrinsifier::Double_greaterEqualThan(Assembler* assembler) { | |
| 1407 CompareDoubles(assembler, GE); | |
| 1408 } | |
| 1409 | |
| 1410 | |
| 1411 void Intrinsifier::Double_lessThan(Assembler* assembler) { | |
| 1412 CompareDoubles(assembler, LT); | |
| 1413 } | |
| 1414 | |
| 1415 | |
| 1416 void Intrinsifier::Double_equal(Assembler* assembler) { | |
| 1417 CompareDoubles(assembler, EQ); | |
| 1418 } | |
| 1419 | |
| 1420 | |
| 1421 void Intrinsifier::Double_lessEqualThan(Assembler* assembler) { | |
| 1422 CompareDoubles(assembler, LE); | |
| 1423 } | |
| 1424 | |
| 1425 | |
| 1426 // Expects left argument to be double (receiver). Right argument is unknown. | |
| 1427 // Both arguments are on stack. | |
| 1428 static void DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) { | |
| 1429 Label fall_through, is_smi, double_op; | |
| 1430 | |
| 1431 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | |
| 1432 // Both arguments are double, right operand is in T0. | |
| 1433 __ lwc1(F2, FieldAddress(T0, Double::value_offset())); | |
| 1434 __ lwc1(F3, FieldAddress(T0, Double::value_offset() + kWordSize)); | |
| 1435 __ Bind(&double_op); | |
| 1436 __ lw(T0, Address(SP, 1 * kWordSize)); // Left argument. | |
| 1437 __ lwc1(F0, FieldAddress(T0, Double::value_offset())); | |
| 1438 __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); | |
| 1439 switch (kind) { | |
| 1440 case Token::kADD: | |
| 1441 __ addd(D0, D0, D1); | |
| 1442 break; | |
| 1443 case Token::kSUB: | |
| 1444 __ subd(D0, D0, D1); | |
| 1445 break; | |
| 1446 case Token::kMUL: | |
| 1447 __ muld(D0, D0, D1); | |
| 1448 break; | |
| 1449 case Token::kDIV: | |
| 1450 __ divd(D0, D0, D1); | |
| 1451 break; | |
| 1452 default: | |
| 1453 UNREACHABLE(); | |
| 1454 } | |
| 1455 const Class& double_class = | |
| 1456 Class::Handle(Isolate::Current()->object_store()->double_class()); | |
| 1457 __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. | |
| 1458 __ swc1(F0, FieldAddress(V0, Double::value_offset())); | |
| 1459 __ Ret(); | |
| 1460 __ delay_slot()->swc1(F1, | |
| 1461 FieldAddress(V0, Double::value_offset() + kWordSize)); | |
| 1462 | |
| 1463 __ Bind(&is_smi); | |
| 1464 __ SmiUntag(T0); | |
| 1465 __ mtc1(T0, STMP1); | |
| 1466 __ b(&double_op); | |
| 1467 __ delay_slot()->cvtdw(D1, STMP1); | |
| 1468 | |
| 1469 __ Bind(&fall_through); | |
| 1470 } | |
| 1471 | |
| 1472 | |
| 1473 void Intrinsifier::Double_add(Assembler* assembler) { | |
| 1474 DoubleArithmeticOperations(assembler, Token::kADD); | |
| 1475 } | |
| 1476 | |
| 1477 | |
| 1478 void Intrinsifier::Double_mul(Assembler* assembler) { | |
| 1479 DoubleArithmeticOperations(assembler, Token::kMUL); | |
| 1480 } | |
| 1481 | |
| 1482 | |
| 1483 void Intrinsifier::Double_sub(Assembler* assembler) { | |
| 1484 DoubleArithmeticOperations(assembler, Token::kSUB); | |
| 1485 } | |
| 1486 | |
| 1487 | |
| 1488 void Intrinsifier::Double_div(Assembler* assembler) { | |
| 1489 DoubleArithmeticOperations(assembler, Token::kDIV); | |
| 1490 } | |
| 1491 | |
| 1492 | |
| 1493 // Left is double right is integer (Bigint, Mint or Smi) | |
| 1494 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) { | |
| 1495 Label fall_through; | |
| 1496 // Only smis allowed. | |
| 1497 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 1498 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); | |
| 1499 __ bne(CMPRES1, ZR, &fall_through); | |
| 1500 | |
| 1501 // Is Smi. | |
| 1502 __ SmiUntag(T0); | |
| 1503 __ mtc1(T0, F4); | |
| 1504 __ cvtdw(D1, F4); | |
| 1505 | |
| 1506 __ lw(T0, Address(SP, 1 * kWordSize)); | |
| 1507 __ lwc1(F0, FieldAddress(T0, Double::value_offset())); | |
| 1508 __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); | |
| 1509 __ muld(D0, D0, D1); | |
| 1510 const Class& double_class = | |
| 1511 Class::Handle(Isolate::Current()->object_store()->double_class()); | |
| 1512 __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. | |
| 1513 __ swc1(F0, FieldAddress(V0, Double::value_offset())); | |
| 1514 __ Ret(); | |
| 1515 __ delay_slot()->swc1(F1, | |
| 1516 FieldAddress(V0, Double::value_offset() + kWordSize)); | |
| 1517 __ Bind(&fall_through); | |
| 1518 } | |
| 1519 | |
| 1520 | |
| 1521 void Intrinsifier::DoubleFromInteger(Assembler* assembler) { | |
| 1522 Label fall_through; | |
| 1523 | |
| 1524 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 1525 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); | |
| 1526 __ bne(CMPRES1, ZR, &fall_through); | |
| 1527 | |
| 1528 // Is Smi. | |
| 1529 __ SmiUntag(T0); | |
| 1530 __ mtc1(T0, F4); | |
| 1531 __ cvtdw(D0, F4); | |
| 1532 const Class& double_class = | |
| 1533 Class::Handle(Isolate::Current()->object_store()->double_class()); | |
| 1534 __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. | |
| 1535 __ swc1(F0, FieldAddress(V0, Double::value_offset())); | |
| 1536 __ Ret(); | |
| 1537 __ delay_slot()->swc1(F1, | |
| 1538 FieldAddress(V0, Double::value_offset() + kWordSize)); | |
| 1539 __ Bind(&fall_through); | |
| 1540 } | |
| 1541 | |
| 1542 | |
| 1543 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { | |
| 1544 Label is_true; | |
| 1545 | |
| 1546 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 1547 __ lwc1(F0, FieldAddress(T0, Double::value_offset())); | |
| 1548 __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); | |
| 1549 __ cund(D0, D0); // Check for NaN. | |
| 1550 __ bc1t(&is_true); | |
| 1551 __ LoadObject(V0, Bool::False()); // Return false if either is NaN. | |
| 1552 __ Ret(); | |
| 1553 __ Bind(&is_true); | |
| 1554 __ LoadObject(V0, Bool::True()); | |
| 1555 __ Ret(); | |
| 1556 } | |
| 1557 | |
| 1558 | |
| 1559 void Intrinsifier::Double_getIsInfinite(Assembler* assembler) { | |
| 1560 Label not_inf; | |
| 1561 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 1562 __ lw(T1, FieldAddress(T0, Double::value_offset())); | |
| 1563 __ lw(T2, FieldAddress(T0, Double::value_offset() + kWordSize)); | |
| 1564 // If the low word isn't zero, then it isn't infinity. | |
| 1565 __ bne(T1, ZR, ¬_inf); | |
| 1566 // Mask off the sign bit. | |
| 1567 __ AndImmediate(T2, T2, 0x7FFFFFFF); | |
| 1568 // Compare with +infinity. | |
| 1569 __ BranchNotEqual(T2, Immediate(0x7FF00000), ¬_inf); | |
| 1570 | |
| 1571 __ LoadObject(V0, Bool::True()); | |
| 1572 __ Ret(); | |
| 1573 | |
| 1574 __ Bind(¬_inf); | |
| 1575 __ LoadObject(V0, Bool::False()); | |
| 1576 __ Ret(); | |
| 1577 } | |
| 1578 | |
| 1579 | |
| 1580 void Intrinsifier::Double_getIsNegative(Assembler* assembler) { | |
| 1581 Label is_false, is_true, is_zero; | |
| 1582 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 1583 __ LoadDFromOffset(D0, T0, Double::value_offset() - kHeapObjectTag); | |
| 1584 | |
| 1585 __ cund(D0, D0); | |
| 1586 __ bc1t(&is_false); // NaN -> false. | |
| 1587 | |
| 1588 __ LoadImmediate(D1, 0.0); | |
| 1589 __ ceqd(D0, D1); | |
| 1590 __ bc1t(&is_zero); // Check for negative zero. | |
| 1591 | |
| 1592 __ coled(D1, D0); | |
| 1593 __ bc1t(&is_false); // >= 0 -> false. | |
| 1594 | |
| 1595 __ Bind(&is_true); | |
| 1596 __ LoadObject(V0, Bool::True()); | |
| 1597 __ Ret(); | |
| 1598 | |
| 1599 __ Bind(&is_false); | |
| 1600 __ LoadObject(V0, Bool::False()); | |
| 1601 __ Ret(); | |
| 1602 | |
| 1603 __ Bind(&is_zero); | |
| 1604 // Check for negative zero by looking at the sign bit. | |
| 1605 __ mfc1(T0, F1); // Moves bits 32...63 of D0 to T0. | |
| 1606 __ srl(T0, T0, 31); // Get the sign bit down to bit 0 of T0. | |
| 1607 __ andi(CMPRES1, T0, Immediate(1)); // Check if the bit is set. | |
| 1608 __ bne(T0, ZR, &is_true); // Sign bit set. True. | |
| 1609 __ b(&is_false); | |
| 1610 } | |
| 1611 | |
| 1612 | |
| 1613 void Intrinsifier::DoubleToInteger(Assembler* assembler) { | |
| 1614 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 1615 __ LoadDFromOffset(D0, T0, Double::value_offset() - kHeapObjectTag); | |
| 1616 | |
| 1617 __ truncwd(F2, D0); | |
| 1618 __ mfc1(V0, F2); | |
| 1619 | |
| 1620 // Overflow is signaled with minint. | |
| 1621 Label fall_through; | |
| 1622 // Check for overflow and that it fits into Smi. | |
| 1623 __ LoadImmediate(TMP, 0xC0000000); | |
| 1624 __ subu(CMPRES1, V0, TMP); | |
| 1625 __ bltz(CMPRES1, &fall_through); | |
| 1626 __ Ret(); | |
| 1627 __ delay_slot()->SmiTag(V0); | |
| 1628 __ Bind(&fall_through); | |
| 1629 } | |
| 1630 | |
| 1631 | |
| 1632 void Intrinsifier::MathSqrt(Assembler* assembler) { | |
| 1633 Label fall_through, is_smi, double_op; | |
| 1634 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | |
| 1635 // Argument is double and is in T0. | |
| 1636 __ LoadDFromOffset(D1, T0, Double::value_offset() - kHeapObjectTag); | |
| 1637 __ Bind(&double_op); | |
| 1638 __ sqrtd(D0, D1); | |
| 1639 const Class& double_class = | |
| 1640 Class::Handle(Isolate::Current()->object_store()->double_class()); | |
| 1641 __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. | |
| 1642 __ swc1(F0, FieldAddress(V0, Double::value_offset())); | |
| 1643 __ Ret(); | |
| 1644 __ delay_slot()->swc1(F1, | |
| 1645 FieldAddress(V0, Double::value_offset() + kWordSize)); | |
| 1646 | |
| 1647 __ Bind(&is_smi); | |
| 1648 __ SmiUntag(T0); | |
| 1649 __ mtc1(T0, F2); | |
| 1650 __ b(&double_op); | |
| 1651 __ delay_slot()->cvtdw(D1, F2); | |
| 1652 __ Bind(&fall_through); | |
| 1653 } | |
| 1654 | |
| 1655 | |
| 1656 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64; | |
| 1657 // _state[kSTATE_LO] = state & _MASK_32; | |
| 1658 // _state[kSTATE_HI] = state >> 32; | |
| 1659 void Intrinsifier::Random_nextState(Assembler* assembler) { | |
| 1660 const Library& math_lib = Library::Handle(Library::MathLibrary()); | |
| 1661 ASSERT(!math_lib.IsNull()); | |
| 1662 const Class& random_class = | |
| 1663 Class::Handle(math_lib.LookupClassAllowPrivate(Symbols::_Random())); | |
| 1664 ASSERT(!random_class.IsNull()); | |
| 1665 const Field& state_field = Field::ZoneHandle( | |
| 1666 random_class.LookupInstanceFieldAllowPrivate(Symbols::_state())); | |
| 1667 ASSERT(!state_field.IsNull()); | |
| 1668 const Field& random_A_field = Field::ZoneHandle( | |
| 1669 random_class.LookupStaticFieldAllowPrivate(Symbols::_A())); | |
| 1670 ASSERT(!random_A_field.IsNull()); | |
| 1671 ASSERT(random_A_field.is_const()); | |
| 1672 Instance& a_value = Instance::Handle(random_A_field.StaticValue()); | |
| 1673 if (a_value.raw() == Object::sentinel().raw() || | |
| 1674 a_value.raw() == Object::transition_sentinel().raw()) { | |
| 1675 random_A_field.EvaluateInitializer(); | |
| 1676 a_value = random_A_field.StaticValue(); | |
| 1677 } | |
| 1678 const int64_t a_int_value = Integer::Cast(a_value).AsInt64Value(); | |
| 1679 // 'a_int_value' is a mask. | |
| 1680 ASSERT(Utils::IsUint(32, a_int_value)); | |
| 1681 int32_t a_int32_value = static_cast<int32_t>(a_int_value); | |
| 1682 | |
| 1683 // Receiver. | |
| 1684 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 1685 // Field '_state'. | |
| 1686 __ lw(T1, FieldAddress(T0, state_field.Offset())); | |
| 1687 | |
| 1688 // Addresses of _state[0] and _state[1]. | |
| 1689 const intptr_t scale = Instance::ElementSizeFor(kTypedDataUint32ArrayCid); | |
| 1690 const intptr_t offset = Instance::DataOffsetFor(kTypedDataUint32ArrayCid); | |
| 1691 const Address& addr_0 = FieldAddress(T1, 0 * scale + offset); | |
| 1692 const Address& addr_1 = FieldAddress(T1, 1 * scale + offset); | |
| 1693 | |
| 1694 __ LoadImmediate(T0, a_int32_value); | |
| 1695 __ lw(T2, addr_0); | |
| 1696 __ lw(T3, addr_1); | |
| 1697 __ mtlo(T3); | |
| 1698 __ mthi(ZR); // HI:LO <- ZR:T3 Zero extend T3 into HI. | |
| 1699 // 64-bit multiply and accumulate into T6:T3. | |
| 1700 __ maddu(T0, T2); // HI:LO <- HI:LO + T0 * T2. | |
| 1701 __ mflo(T3); | |
| 1702 __ mfhi(T6); | |
| 1703 __ sw(T3, addr_0); | |
| 1704 __ sw(T6, addr_1); | |
| 1705 __ Ret(); | |
| 1706 } | |
| 1707 | |
| 1708 | |
| 1709 void Intrinsifier::ObjectEquals(Assembler* assembler) { | |
| 1710 Label is_true; | |
| 1711 | |
| 1712 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 1713 __ lw(T1, Address(SP, 1 * kWordSize)); | |
| 1714 __ beq(T0, T1, &is_true); | |
| 1715 __ LoadObject(V0, Bool::False()); | |
| 1716 __ Ret(); | |
| 1717 __ Bind(&is_true); | |
| 1718 __ LoadObject(V0, Bool::True()); | |
| 1719 __ Ret(); | |
| 1720 } | |
| 1721 | |
| 1722 | |
| 1723 enum RangeCheckCondition { kIfNotInRange, kIfInRange }; | |
| 1724 | |
| 1725 | |
| 1726 static void RangeCheck(Assembler* assembler, | |
| 1727 Register val, | |
| 1728 Register tmp, | |
| 1729 intptr_t low, | |
| 1730 intptr_t high, | |
| 1731 RangeCheckCondition cc, | |
| 1732 Label* target) { | |
| 1733 __ AddImmediate(tmp, val, -low); | |
| 1734 if (cc == kIfInRange) { | |
| 1735 __ BranchUnsignedLessEqual(tmp, Immediate(high - low), target); | |
| 1736 } else { | |
| 1737 ASSERT(cc == kIfNotInRange); | |
| 1738 __ BranchUnsignedGreater(tmp, Immediate(high - low), target); | |
| 1739 } | |
| 1740 } | |
| 1741 | |
| 1742 | |
| 1743 static void JumpIfInteger(Assembler* assembler, | |
| 1744 Register cid, | |
| 1745 Register tmp, | |
| 1746 Label* target) { | |
| 1747 RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfInRange, target); | |
| 1748 } | |
| 1749 | |
| 1750 | |
| 1751 static void JumpIfNotInteger(Assembler* assembler, | |
| 1752 Register cid, | |
| 1753 Register tmp, | |
| 1754 Label* target) { | |
| 1755 RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfNotInRange, target); | |
| 1756 } | |
| 1757 | |
| 1758 | |
| 1759 static void JumpIfString(Assembler* assembler, | |
| 1760 Register cid, | |
| 1761 Register tmp, | |
| 1762 Label* target) { | |
| 1763 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid, | |
| 1764 kIfInRange, target); | |
| 1765 } | |
| 1766 | |
| 1767 | |
| 1768 static void JumpIfNotString(Assembler* assembler, | |
| 1769 Register cid, | |
| 1770 Register tmp, | |
| 1771 Label* target) { | |
| 1772 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid, | |
| 1773 kIfNotInRange, target); | |
| 1774 } | |
| 1775 | |
| 1776 | |
| 1777 // Return type quickly for simple types (not parameterized and not signature). | |
| 1778 void Intrinsifier::ObjectRuntimeType(Assembler* assembler) { | |
| 1779 Label fall_through, use_canonical_type, not_integer, not_double; | |
| 1780 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 1781 __ LoadClassIdMayBeSmi(T1, T0); | |
| 1782 | |
| 1783 // Closures are handled in the runtime. | |
| 1784 __ BranchEqual(T1, Immediate(kClosureCid), &fall_through); | |
| 1785 | |
| 1786 __ BranchUnsignedGreaterEqual(T1, Immediate(kNumPredefinedCids), | |
| 1787 &use_canonical_type); | |
| 1788 | |
| 1789 __ BranchNotEqual(T1, Immediate(kDoubleCid), ¬_double); | |
| 1790 // Object is a double. | |
| 1791 __ LoadIsolate(T1); | |
| 1792 __ LoadFromOffset(T1, T1, Isolate::object_store_offset()); | |
| 1793 __ LoadFromOffset(V0, T1, ObjectStore::double_type_offset()); | |
| 1794 __ Ret(); | |
| 1795 | |
| 1796 __ Bind(¬_double); | |
| 1797 JumpIfNotInteger(assembler, T1, T2, ¬_integer); | |
| 1798 // Object is an integer. | |
| 1799 __ LoadIsolate(T1); | |
| 1800 __ LoadFromOffset(T1, T1, Isolate::object_store_offset()); | |
| 1801 __ LoadFromOffset(V0, T1, ObjectStore::int_type_offset()); | |
| 1802 __ Ret(); | |
| 1803 | |
| 1804 __ Bind(¬_integer); | |
| 1805 JumpIfNotString(assembler, T1, T2, &use_canonical_type); | |
| 1806 // Object is a string. | |
| 1807 __ LoadIsolate(T1); | |
| 1808 __ LoadFromOffset(T1, T1, Isolate::object_store_offset()); | |
| 1809 __ LoadFromOffset(V0, T1, ObjectStore::string_type_offset()); | |
| 1810 __ Ret(); | |
| 1811 | |
| 1812 __ Bind(&use_canonical_type); | |
| 1813 __ LoadClassById(T2, T1); | |
| 1814 __ lhu(T1, FieldAddress(T2, Class::num_type_arguments_offset())); | |
| 1815 __ BranchNotEqual(T1, Immediate(0), &fall_through); | |
| 1816 | |
| 1817 __ lw(V0, FieldAddress(T2, Class::canonical_type_offset())); | |
| 1818 __ BranchEqual(V0, Object::null_object(), &fall_through); | |
| 1819 __ Ret(); | |
| 1820 | |
| 1821 __ Bind(&fall_through); | |
| 1822 } | |
| 1823 | |
| 1824 | |
| 1825 void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler) { | |
| 1826 Label fall_through, different_cids, equal, not_equal, not_integer; | |
| 1827 | |
| 1828 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 1829 __ LoadClassIdMayBeSmi(T1, T0); | |
| 1830 | |
| 1831 // Closures are handled in the runtime. | |
| 1832 __ BranchEqual(T1, Immediate(kClosureCid), &fall_through); | |
| 1833 | |
| 1834 __ lw(T0, Address(SP, 1 * kWordSize)); | |
| 1835 __ LoadClassIdMayBeSmi(T2, T0); | |
| 1836 | |
| 1837 // Check whether class ids match. If class ids don't match objects can still | |
| 1838 // have the same runtime type (e.g. multiple string implementation classes | |
| 1839 // map to a single String type). | |
| 1840 __ BranchNotEqual(T1, T2, &different_cids); | |
| 1841 | |
| 1842 // Objects have the same class and neither is a closure. | |
| 1843 // Check if there are no type arguments. In this case we can return true. | |
| 1844 // Otherwise fall through into the runtime to handle comparison. | |
| 1845 __ LoadClassById(T2, T1); | |
| 1846 __ lhu(T1, FieldAddress(T2, Class::num_type_arguments_offset())); | |
| 1847 __ BranchNotEqual(T1, Immediate(0), &fall_through); | |
| 1848 | |
| 1849 __ Bind(&equal); | |
| 1850 __ LoadObject(V0, Bool::True()); | |
| 1851 __ Ret(); | |
| 1852 | |
| 1853 // Class ids are different. Check if we are comparing runtime types of | |
| 1854 // two strings (with different representations) or two integers. | |
| 1855 __ Bind(&different_cids); | |
| 1856 __ BranchUnsignedGreaterEqual(T1, Immediate(kNumPredefinedCids), ¬_equal); | |
| 1857 | |
| 1858 // Check if both are integers. | |
| 1859 JumpIfNotInteger(assembler, T1, T0, ¬_integer); | |
| 1860 JumpIfInteger(assembler, T2, T0, &equal); | |
| 1861 __ b(¬_equal); | |
| 1862 | |
| 1863 __ Bind(¬_integer); | |
| 1864 // Check if both are strings. | |
| 1865 JumpIfNotString(assembler, T1, T0, ¬_equal); | |
| 1866 JumpIfString(assembler, T2, T0, &equal); | |
| 1867 | |
| 1868 // Neither strings nor integers and have different class ids. | |
| 1869 __ Bind(¬_equal); | |
| 1870 __ LoadObject(V0, Bool::False()); | |
| 1871 __ Ret(); | |
| 1872 | |
| 1873 __ Bind(&fall_through); | |
| 1874 } | |
| 1875 | |
| 1876 | |
| 1877 void Intrinsifier::String_getHashCode(Assembler* assembler) { | |
| 1878 Label fall_through; | |
| 1879 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 1880 __ lw(V0, FieldAddress(T0, String::hash_offset())); | |
| 1881 __ beq(V0, ZR, &fall_through); | |
| 1882 __ Ret(); | |
| 1883 __ Bind(&fall_through); // Hash not yet computed. | |
| 1884 } | |
| 1885 | |
| 1886 | |
| 1887 void GenerateSubstringMatchesSpecialization(Assembler* assembler, | |
| 1888 intptr_t receiver_cid, | |
| 1889 intptr_t other_cid, | |
| 1890 Label* return_true, | |
| 1891 Label* return_false) { | |
| 1892 __ SmiUntag(A1); | |
| 1893 __ lw(T1, FieldAddress(A0, String::length_offset())); // this.length | |
| 1894 __ SmiUntag(T1); | |
| 1895 __ lw(T2, FieldAddress(A2, String::length_offset())); // other.length | |
| 1896 __ SmiUntag(T2); | |
| 1897 | |
| 1898 // if (other.length == 0) return true; | |
| 1899 __ beq(T2, ZR, return_true); | |
| 1900 | |
| 1901 // if (start < 0) return false; | |
| 1902 __ bltz(A1, return_false); | |
| 1903 | |
| 1904 // if (start + other.length > this.length) return false; | |
| 1905 __ addu(T0, A1, T2); | |
| 1906 __ BranchSignedGreater(T0, T1, return_false); | |
| 1907 | |
| 1908 if (receiver_cid == kOneByteStringCid) { | |
| 1909 __ AddImmediate(A0, A0, OneByteString::data_offset() - kHeapObjectTag); | |
| 1910 __ addu(A0, A0, A1); | |
| 1911 } else { | |
| 1912 ASSERT(receiver_cid == kTwoByteStringCid); | |
| 1913 __ AddImmediate(A0, A0, TwoByteString::data_offset() - kHeapObjectTag); | |
| 1914 __ addu(A0, A0, A1); | |
| 1915 __ addu(A0, A0, A1); | |
| 1916 } | |
| 1917 if (other_cid == kOneByteStringCid) { | |
| 1918 __ AddImmediate(A2, A2, OneByteString::data_offset() - kHeapObjectTag); | |
| 1919 } else { | |
| 1920 ASSERT(other_cid == kTwoByteStringCid); | |
| 1921 __ AddImmediate(A2, A2, TwoByteString::data_offset() - kHeapObjectTag); | |
| 1922 } | |
| 1923 | |
| 1924 // i = 0 | |
| 1925 __ LoadImmediate(T0, 0); | |
| 1926 | |
| 1927 // do | |
| 1928 Label loop; | |
| 1929 __ Bind(&loop); | |
| 1930 | |
| 1931 if (receiver_cid == kOneByteStringCid) { | |
| 1932 __ lbu(T3, Address(A0, 0)); // this.codeUnitAt(i + start) | |
| 1933 } else { | |
| 1934 __ lhu(T3, Address(A0, 0)); // this.codeUnitAt(i + start) | |
| 1935 } | |
| 1936 if (other_cid == kOneByteStringCid) { | |
| 1937 __ lbu(T4, Address(A2, 0)); // other.codeUnitAt(i) | |
| 1938 } else { | |
| 1939 __ lhu(T4, Address(A2, 0)); // other.codeUnitAt(i) | |
| 1940 } | |
| 1941 __ bne(T3, T4, return_false); | |
| 1942 | |
| 1943 // i++, while (i < len) | |
| 1944 __ AddImmediate(T0, T0, 1); | |
| 1945 __ AddImmediate(A0, A0, receiver_cid == kOneByteStringCid ? 1 : 2); | |
| 1946 __ AddImmediate(A2, A2, other_cid == kOneByteStringCid ? 1 : 2); | |
| 1947 __ BranchSignedLess(T0, T2, &loop); | |
| 1948 | |
| 1949 __ b(return_true); | |
| 1950 } | |
| 1951 | |
| 1952 | |
| 1953 // bool _substringMatches(int start, String other) | |
| 1954 // This intrinsic handles a OneByteString or TwoByteString receiver with a | |
| 1955 // OneByteString other. | |
| 1956 void Intrinsifier::StringBaseSubstringMatches(Assembler* assembler) { | |
| 1957 Label fall_through, return_true, return_false, try_two_byte; | |
| 1958 __ lw(A0, Address(SP, 2 * kWordSize)); // this | |
| 1959 __ lw(A1, Address(SP, 1 * kWordSize)); // start | |
| 1960 __ lw(A2, Address(SP, 0 * kWordSize)); // other | |
| 1961 | |
| 1962 __ andi(CMPRES1, A1, Immediate(kSmiTagMask)); | |
| 1963 __ bne(CMPRES1, ZR, &fall_through); // 'start' is not a Smi. | |
| 1964 | |
| 1965 __ LoadClassId(CMPRES1, A2); | |
| 1966 __ BranchNotEqual(CMPRES1, Immediate(kOneByteStringCid), &fall_through); | |
| 1967 | |
| 1968 __ LoadClassId(CMPRES1, A0); | |
| 1969 __ BranchNotEqual(CMPRES1, Immediate(kOneByteStringCid), &try_two_byte); | |
| 1970 | |
| 1971 GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid, | |
| 1972 kOneByteStringCid, &return_true, | |
| 1973 &return_false); | |
| 1974 | |
| 1975 __ Bind(&try_two_byte); | |
| 1976 __ LoadClassId(CMPRES1, A0); | |
| 1977 __ BranchNotEqual(CMPRES1, Immediate(kTwoByteStringCid), &fall_through); | |
| 1978 | |
| 1979 GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid, | |
| 1980 kOneByteStringCid, &return_true, | |
| 1981 &return_false); | |
| 1982 | |
| 1983 __ Bind(&return_true); | |
| 1984 __ LoadObject(V0, Bool::True()); | |
| 1985 __ Ret(); | |
| 1986 | |
| 1987 __ Bind(&return_false); | |
| 1988 __ LoadObject(V0, Bool::False()); | |
| 1989 __ Ret(); | |
| 1990 | |
| 1991 __ Bind(&fall_through); | |
| 1992 } | |
| 1993 | |
| 1994 | |
| 1995 void Intrinsifier::StringBaseCharAt(Assembler* assembler) { | |
| 1996 Label fall_through, try_two_byte_string; | |
| 1997 | |
| 1998 __ lw(T1, Address(SP, 0 * kWordSize)); // Index. | |
| 1999 __ lw(T0, Address(SP, 1 * kWordSize)); // String. | |
| 2000 | |
| 2001 // Checks. | |
| 2002 __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); | |
| 2003 __ bne(CMPRES1, ZR, &fall_through); // Index is not a Smi. | |
| 2004 __ lw(T2, FieldAddress(T0, String::length_offset())); // Range check. | |
| 2005 // Runtime throws exception. | |
| 2006 __ BranchUnsignedGreaterEqual(T1, T2, &fall_through); | |
| 2007 __ LoadClassId(CMPRES1, T0); // Class ID check. | |
| 2008 __ BranchNotEqual(CMPRES1, Immediate(kOneByteStringCid), | |
| 2009 &try_two_byte_string); | |
| 2010 | |
| 2011 // Grab byte and return. | |
| 2012 __ SmiUntag(T1); | |
| 2013 __ addu(T2, T0, T1); | |
| 2014 __ lbu(T2, FieldAddress(T2, OneByteString::data_offset())); | |
| 2015 __ BranchUnsignedGreaterEqual( | |
| 2016 T2, Immediate(Symbols::kNumberOfOneCharCodeSymbols), &fall_through); | |
| 2017 __ lw(V0, Address(THR, Thread::predefined_symbols_address_offset())); | |
| 2018 __ AddImmediate(V0, Symbols::kNullCharCodeSymbolOffset * kWordSize); | |
| 2019 __ sll(T2, T2, 2); | |
| 2020 __ addu(T2, T2, V0); | |
| 2021 __ Ret(); | |
| 2022 __ delay_slot()->lw(V0, Address(T2)); | |
| 2023 | |
| 2024 __ Bind(&try_two_byte_string); | |
| 2025 __ BranchNotEqual(CMPRES1, Immediate(kTwoByteStringCid), &fall_through); | |
| 2026 ASSERT(kSmiTagShift == 1); | |
| 2027 __ addu(T2, T0, T1); | |
| 2028 __ lhu(T2, FieldAddress(T2, TwoByteString::data_offset())); | |
| 2029 __ BranchUnsignedGreaterEqual( | |
| 2030 T2, Immediate(Symbols::kNumberOfOneCharCodeSymbols), &fall_through); | |
| 2031 __ lw(V0, Address(THR, Thread::predefined_symbols_address_offset())); | |
| 2032 __ AddImmediate(V0, Symbols::kNullCharCodeSymbolOffset * kWordSize); | |
| 2033 __ sll(T2, T2, 2); | |
| 2034 __ addu(T2, T2, V0); | |
| 2035 __ Ret(); | |
| 2036 __ delay_slot()->lw(V0, Address(T2)); | |
| 2037 | |
| 2038 __ Bind(&fall_through); | |
| 2039 } | |
| 2040 | |
| 2041 | |
| 2042 void Intrinsifier::StringBaseIsEmpty(Assembler* assembler) { | |
| 2043 Label is_true; | |
| 2044 | |
| 2045 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 2046 __ lw(T0, FieldAddress(T0, String::length_offset())); | |
| 2047 | |
| 2048 __ beq(T0, ZR, &is_true); | |
| 2049 __ LoadObject(V0, Bool::False()); | |
| 2050 __ Ret(); | |
| 2051 __ Bind(&is_true); | |
| 2052 __ LoadObject(V0, Bool::True()); | |
| 2053 __ Ret(); | |
| 2054 } | |
| 2055 | |
| 2056 | |
| 2057 void Intrinsifier::OneByteString_getHashCode(Assembler* assembler) { | |
| 2058 Label no_hash; | |
| 2059 | |
| 2060 __ lw(T1, Address(SP, 0 * kWordSize)); | |
| 2061 __ lw(V0, FieldAddress(T1, String::hash_offset())); | |
| 2062 __ beq(V0, ZR, &no_hash); | |
| 2063 __ Ret(); // Return if already computed. | |
| 2064 __ Bind(&no_hash); | |
| 2065 | |
| 2066 __ lw(T2, FieldAddress(T1, String::length_offset())); | |
| 2067 | |
| 2068 Label done; | |
| 2069 // If the string is empty, set the hash to 1, and return. | |
| 2070 __ BranchEqual(T2, Immediate(Smi::RawValue(0)), &done); | |
| 2071 __ delay_slot()->mov(V0, ZR); | |
| 2072 | |
| 2073 __ SmiUntag(T2); | |
| 2074 __ AddImmediate(T3, T1, OneByteString::data_offset() - kHeapObjectTag); | |
| 2075 __ addu(T4, T3, T2); | |
| 2076 // V0: Hash code, untagged integer. | |
| 2077 // T1: Instance of OneByteString. | |
| 2078 // T2: String length, untagged integer. | |
| 2079 // T3: String data start. | |
| 2080 // T4: String data end. | |
| 2081 | |
| 2082 Label loop; | |
| 2083 // Add to hash code: (hash_ is uint32) | |
| 2084 // hash_ += ch; | |
| 2085 // hash_ += hash_ << 10; | |
| 2086 // hash_ ^= hash_ >> 6; | |
| 2087 // Get one characters (ch). | |
| 2088 __ Bind(&loop); | |
| 2089 __ lbu(T5, Address(T3)); | |
| 2090 // T5: ch. | |
| 2091 __ addiu(T3, T3, Immediate(1)); | |
| 2092 __ addu(V0, V0, T5); | |
| 2093 __ sll(T6, V0, 10); | |
| 2094 __ addu(V0, V0, T6); | |
| 2095 __ srl(T6, V0, 6); | |
| 2096 __ bne(T3, T4, &loop); | |
| 2097 __ delay_slot()->xor_(V0, V0, T6); | |
| 2098 | |
| 2099 // Finalize. | |
| 2100 // hash_ += hash_ << 3; | |
| 2101 // hash_ ^= hash_ >> 11; | |
| 2102 // hash_ += hash_ << 15; | |
| 2103 __ sll(T6, V0, 3); | |
| 2104 __ addu(V0, V0, T6); | |
| 2105 __ srl(T6, V0, 11); | |
| 2106 __ xor_(V0, V0, T6); | |
| 2107 __ sll(T6, V0, 15); | |
| 2108 __ addu(V0, V0, T6); | |
| 2109 // hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1); | |
| 2110 __ LoadImmediate(T6, (static_cast<intptr_t>(1) << String::kHashBits) - 1); | |
| 2111 __ and_(V0, V0, T6); | |
| 2112 __ Bind(&done); | |
| 2113 | |
| 2114 __ LoadImmediate(T2, 1); | |
| 2115 __ movz(V0, T2, V0); // If V0 is 0, set to 1. | |
| 2116 __ SmiTag(V0); | |
| 2117 | |
| 2118 __ Ret(); | |
| 2119 __ delay_slot()->sw(V0, FieldAddress(T1, String::hash_offset())); | |
| 2120 } | |
| 2121 | |
| 2122 | |
| 2123 // Allocates one-byte string of length 'end - start'. The content is not | |
| 2124 // initialized. | |
| 2125 // 'length-reg' (T2) contains tagged length. | |
| 2126 // Returns new string as tagged pointer in V0. | |
| 2127 static void TryAllocateOnebyteString(Assembler* assembler, | |
| 2128 Label* ok, | |
| 2129 Label* failure) { | |
| 2130 const Register length_reg = T2; | |
| 2131 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kOneByteStringCid, V0, failure)); | |
| 2132 __ mov(T6, length_reg); // Save the length register. | |
| 2133 // TODO(koda): Protect against negative length and overflow here. | |
| 2134 __ SmiUntag(length_reg); | |
| 2135 const intptr_t fixed_size_plus_alignment_padding = | |
| 2136 sizeof(RawString) + kObjectAlignment - 1; | |
| 2137 __ AddImmediate(length_reg, fixed_size_plus_alignment_padding); | |
| 2138 __ LoadImmediate(TMP, ~(kObjectAlignment - 1)); | |
| 2139 __ and_(length_reg, length_reg, TMP); | |
| 2140 | |
| 2141 const intptr_t cid = kOneByteStringCid; | |
| 2142 Heap::Space space = Heap::kNew; | |
| 2143 __ lw(T3, Address(THR, Thread::heap_offset())); | |
| 2144 __ lw(V0, Address(T3, Heap::TopOffset(space))); | |
| 2145 | |
| 2146 // length_reg: allocation size. | |
| 2147 __ addu(T1, V0, length_reg); | |
| 2148 __ BranchUnsignedLess(T1, V0, failure); // Fail on unsigned overflow. | |
| 2149 | |
| 2150 // Check if the allocation fits into the remaining space. | |
| 2151 // V0: potential new object start. | |
| 2152 // T1: potential next object start. | |
| 2153 // T2: allocation size. | |
| 2154 // T3: heap. | |
| 2155 __ lw(T4, Address(T3, Heap::EndOffset(space))); | |
| 2156 __ BranchUnsignedGreaterEqual(T1, T4, failure); | |
| 2157 | |
| 2158 // Successfully allocated the object(s), now update top to point to | |
| 2159 // next object start and initialize the object. | |
| 2160 __ sw(T1, Address(T3, Heap::TopOffset(space))); | |
| 2161 __ AddImmediate(V0, kHeapObjectTag); | |
| 2162 | |
| 2163 NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, T2, T3, space)); | |
| 2164 | |
| 2165 // Initialize the tags. | |
| 2166 // V0: new object start as a tagged pointer. | |
| 2167 // T1: new object end address. | |
| 2168 // T2: allocation size. | |
| 2169 { | |
| 2170 Label overflow, done; | |
| 2171 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; | |
| 2172 | |
| 2173 __ BranchUnsignedGreater(T2, Immediate(RawObject::SizeTag::kMaxSizeTag), | |
| 2174 &overflow); | |
| 2175 __ b(&done); | |
| 2176 __ delay_slot()->sll(T2, T2, shift); | |
| 2177 __ Bind(&overflow); | |
| 2178 __ mov(T2, ZR); | |
| 2179 __ Bind(&done); | |
| 2180 | |
| 2181 // Get the class index and insert it into the tags. | |
| 2182 // T2: size and bit tags. | |
| 2183 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); | |
| 2184 __ or_(T2, T2, TMP); | |
| 2185 __ sw(T2, FieldAddress(V0, String::tags_offset())); // Store tags. | |
| 2186 } | |
| 2187 | |
| 2188 // Set the length field using the saved length (T6). | |
| 2189 __ StoreIntoObjectNoBarrier(V0, FieldAddress(V0, String::length_offset()), | |
| 2190 T6); | |
| 2191 // Clear hash. | |
| 2192 __ b(ok); | |
| 2193 __ delay_slot()->sw(ZR, FieldAddress(V0, String::hash_offset())); | |
| 2194 } | |
| 2195 | |
| 2196 | |
| 2197 // Arg0: OneByteString (receiver). | |
| 2198 // Arg1: Start index as Smi. | |
| 2199 // Arg2: End index as Smi. | |
| 2200 // The indexes must be valid. | |
| 2201 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { | |
| 2202 const intptr_t kStringOffset = 2 * kWordSize; | |
| 2203 const intptr_t kStartIndexOffset = 1 * kWordSize; | |
| 2204 const intptr_t kEndIndexOffset = 0 * kWordSize; | |
| 2205 Label fall_through, ok; | |
| 2206 | |
| 2207 __ lw(T2, Address(SP, kEndIndexOffset)); | |
| 2208 __ lw(TMP, Address(SP, kStartIndexOffset)); | |
| 2209 __ or_(CMPRES1, T2, TMP); | |
| 2210 __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask)); | |
| 2211 __ bne(CMPRES1, ZR, &fall_through); // 'start', 'end' not Smi. | |
| 2212 | |
| 2213 __ subu(T2, T2, TMP); | |
| 2214 TryAllocateOnebyteString(assembler, &ok, &fall_through); | |
| 2215 __ Bind(&ok); | |
| 2216 // V0: new string as tagged pointer. | |
| 2217 // Copy string. | |
| 2218 __ lw(T3, Address(SP, kStringOffset)); | |
| 2219 __ lw(T1, Address(SP, kStartIndexOffset)); | |
| 2220 __ SmiUntag(T1); | |
| 2221 __ addu(T3, T3, T1); | |
| 2222 __ AddImmediate(T3, OneByteString::data_offset() - 1); | |
| 2223 | |
| 2224 // T3: Start address to copy from (untagged). | |
| 2225 // T1: Untagged start index. | |
| 2226 __ lw(T2, Address(SP, kEndIndexOffset)); | |
| 2227 __ SmiUntag(T2); | |
| 2228 __ subu(T2, T2, T1); | |
| 2229 | |
| 2230 // T3: Start address to copy from (untagged). | |
| 2231 // T2: Untagged number of bytes to copy. | |
| 2232 // V0: Tagged result string. | |
| 2233 // T6: Pointer into T3. | |
| 2234 // T7: Pointer into T0. | |
| 2235 // T1: Scratch register. | |
| 2236 Label loop, done; | |
| 2237 __ beq(T2, ZR, &done); | |
| 2238 __ mov(T6, T3); | |
| 2239 __ mov(T7, V0); | |
| 2240 | |
| 2241 __ Bind(&loop); | |
| 2242 __ lbu(T1, Address(T6, 0)); | |
| 2243 __ AddImmediate(T6, 1); | |
| 2244 __ addiu(T2, T2, Immediate(-1)); | |
| 2245 __ sb(T1, FieldAddress(T7, OneByteString::data_offset())); | |
| 2246 __ bgtz(T2, &loop); | |
| 2247 __ delay_slot()->addiu(T7, T7, Immediate(1)); | |
| 2248 | |
| 2249 __ Bind(&done); | |
| 2250 __ Ret(); | |
| 2251 __ Bind(&fall_through); | |
| 2252 } | |
| 2253 | |
| 2254 | |
| 2255 void Intrinsifier::OneByteStringSetAt(Assembler* assembler) { | |
| 2256 __ lw(T2, Address(SP, 0 * kWordSize)); // Value. | |
| 2257 __ lw(T1, Address(SP, 1 * kWordSize)); // Index. | |
| 2258 __ lw(T0, Address(SP, 2 * kWordSize)); // OneByteString. | |
| 2259 __ SmiUntag(T1); | |
| 2260 __ SmiUntag(T2); | |
| 2261 __ addu(T3, T0, T1); | |
| 2262 __ Ret(); | |
| 2263 __ delay_slot()->sb(T2, FieldAddress(T3, OneByteString::data_offset())); | |
| 2264 } | |
| 2265 | |
| 2266 | |
| 2267 void Intrinsifier::OneByteString_allocate(Assembler* assembler) { | |
| 2268 Label fall_through, ok; | |
| 2269 | |
| 2270 __ lw(T2, Address(SP, 0 * kWordSize)); // Length. | |
| 2271 TryAllocateOnebyteString(assembler, &ok, &fall_through); | |
| 2272 | |
| 2273 __ Bind(&ok); | |
| 2274 __ Ret(); | |
| 2275 | |
| 2276 __ Bind(&fall_through); | |
| 2277 } | |
| 2278 | |
| 2279 | |
| 2280 // TODO(srdjan): Add combinations (one-byte/two-byte/external strings). | |
| 2281 static void StringEquality(Assembler* assembler, intptr_t string_cid) { | |
| 2282 Label fall_through, is_true, is_false, loop; | |
| 2283 __ lw(T0, Address(SP, 1 * kWordSize)); // This. | |
| 2284 __ lw(T1, Address(SP, 0 * kWordSize)); // Other. | |
| 2285 | |
| 2286 // Are identical? | |
| 2287 __ beq(T0, T1, &is_true); | |
| 2288 | |
| 2289 // Is other OneByteString? | |
| 2290 __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); | |
| 2291 __ beq(CMPRES1, ZR, &fall_through); // Other is Smi. | |
| 2292 __ LoadClassId(CMPRES1, T1); // Class ID check. | |
| 2293 __ BranchNotEqual(CMPRES1, Immediate(string_cid), &fall_through); | |
| 2294 | |
| 2295 // Have same length? | |
| 2296 __ lw(T2, FieldAddress(T0, String::length_offset())); | |
| 2297 __ lw(T3, FieldAddress(T1, String::length_offset())); | |
| 2298 __ bne(T2, T3, &is_false); | |
| 2299 | |
| 2300 // Check contents, no fall-through possible. | |
| 2301 ASSERT((string_cid == kOneByteStringCid) || | |
| 2302 (string_cid == kTwoByteStringCid)); | |
| 2303 __ SmiUntag(T2); | |
| 2304 __ Bind(&loop); | |
| 2305 __ AddImmediate(T2, -1); | |
| 2306 __ BranchSignedLess(T2, Immediate(0), &is_true); | |
| 2307 if (string_cid == kOneByteStringCid) { | |
| 2308 __ lbu(V0, FieldAddress(T0, OneByteString::data_offset())); | |
| 2309 __ lbu(V1, FieldAddress(T1, OneByteString::data_offset())); | |
| 2310 __ AddImmediate(T0, 1); | |
| 2311 __ AddImmediate(T1, 1); | |
| 2312 } else if (string_cid == kTwoByteStringCid) { | |
| 2313 __ lhu(V0, FieldAddress(T0, OneByteString::data_offset())); | |
| 2314 __ lhu(V1, FieldAddress(T1, OneByteString::data_offset())); | |
| 2315 __ AddImmediate(T0, 2); | |
| 2316 __ AddImmediate(T1, 2); | |
| 2317 } else { | |
| 2318 UNIMPLEMENTED(); | |
| 2319 } | |
| 2320 __ bne(V0, V1, &is_false); | |
| 2321 __ b(&loop); | |
| 2322 | |
| 2323 __ Bind(&is_false); | |
| 2324 __ LoadObject(V0, Bool::False()); | |
| 2325 __ Ret(); | |
| 2326 __ Bind(&is_true); | |
| 2327 __ LoadObject(V0, Bool::True()); | |
| 2328 __ Ret(); | |
| 2329 | |
| 2330 __ Bind(&fall_through); | |
| 2331 } | |
| 2332 | |
| 2333 | |
| 2334 void Intrinsifier::OneByteString_equality(Assembler* assembler) { | |
| 2335 StringEquality(assembler, kOneByteStringCid); | |
| 2336 } | |
| 2337 | |
| 2338 | |
| 2339 void Intrinsifier::TwoByteString_equality(Assembler* assembler) { | |
| 2340 StringEquality(assembler, kTwoByteStringCid); | |
| 2341 } | |
| 2342 | |
| 2343 | |
| 2344 void Intrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler, | |
| 2345 bool sticky) { | |
| 2346 if (FLAG_interpret_irregexp) return; | |
| 2347 | |
| 2348 static const intptr_t kRegExpParamOffset = 2 * kWordSize; | |
| 2349 static const intptr_t kStringParamOffset = 1 * kWordSize; | |
| 2350 // start_index smi is located at 0. | |
| 2351 | |
| 2352 // Incoming registers: | |
| 2353 // T0: Function. (Will be reloaded with the specialized matcher function.) | |
| 2354 // S4: Arguments descriptor. (Will be preserved.) | |
| 2355 // S5: Unknown. (Must be GC safe on tail call.) | |
| 2356 | |
| 2357 // Load the specialized function pointer into T0. Leverage the fact the | |
| 2358 // string CIDs as well as stored function pointers are in sequence. | |
| 2359 __ lw(T1, Address(SP, kRegExpParamOffset)); | |
| 2360 __ lw(T3, Address(SP, kStringParamOffset)); | |
| 2361 __ LoadClassId(T2, T3); | |
| 2362 __ AddImmediate(T2, -kOneByteStringCid); | |
| 2363 __ sll(T2, T2, kWordSizeLog2); | |
| 2364 __ addu(T2, T2, T1); | |
| 2365 __ lw(T0, | |
| 2366 FieldAddress(T2, RegExp::function_offset(kOneByteStringCid, sticky))); | |
| 2367 | |
| 2368 // Registers are now set up for the lazy compile stub. It expects the function | |
| 2369 // in T0, the argument descriptor in S4, and IC-Data in S5. | |
| 2370 __ mov(S5, ZR); | |
| 2371 | |
| 2372 // Tail-call the function. | |
| 2373 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
| 2374 __ lw(T3, FieldAddress(T0, Function::entry_point_offset())); | |
| 2375 __ jr(T3); | |
| 2376 } | |
| 2377 | |
| 2378 | |
| 2379 // On stack: user tag (+0). | |
| 2380 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { | |
| 2381 // T1: Isolate. | |
| 2382 __ LoadIsolate(T1); | |
| 2383 // V0: Current user tag. | |
| 2384 __ lw(V0, Address(T1, Isolate::current_tag_offset())); | |
| 2385 // T2: UserTag. | |
| 2386 __ lw(T2, Address(SP, +0 * kWordSize)); | |
| 2387 // Set Isolate::current_tag_. | |
| 2388 __ sw(T2, Address(T1, Isolate::current_tag_offset())); | |
| 2389 // T2: UserTag's tag. | |
| 2390 __ lw(T2, FieldAddress(T2, UserTag::tag_offset())); | |
| 2391 // Set Isolate::user_tag_. | |
| 2392 __ sw(T2, Address(T1, Isolate::user_tag_offset())); | |
| 2393 __ Ret(); | |
| 2394 __ delay_slot()->sw(T2, Address(T1, Isolate::user_tag_offset())); | |
| 2395 } | |
| 2396 | |
| 2397 | |
| 2398 void Intrinsifier::UserTag_defaultTag(Assembler* assembler) { | |
| 2399 __ LoadIsolate(V0); | |
| 2400 __ Ret(); | |
| 2401 __ delay_slot()->lw(V0, Address(V0, Isolate::default_tag_offset())); | |
| 2402 } | |
| 2403 | |
| 2404 | |
| 2405 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) { | |
| 2406 __ LoadIsolate(V0); | |
| 2407 __ Ret(); | |
| 2408 __ delay_slot()->lw(V0, Address(V0, Isolate::current_tag_offset())); | |
| 2409 } | |
| 2410 | |
| 2411 | |
| 2412 void Intrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler) { | |
| 2413 if (!FLAG_support_timeline) { | |
| 2414 __ LoadObject(V0, Bool::False()); | |
| 2415 __ Ret(); | |
| 2416 return; | |
| 2417 } | |
| 2418 // Load TimelineStream*. | |
| 2419 __ lw(V0, Address(THR, Thread::dart_stream_offset())); | |
| 2420 // Load uintptr_t from TimelineStream*. | |
| 2421 __ lw(T0, Address(V0, TimelineStream::enabled_offset())); | |
| 2422 __ LoadObject(V0, Bool::True()); | |
| 2423 __ LoadObject(V1, Bool::False()); | |
| 2424 __ Ret(); | |
| 2425 __ delay_slot()->movz(V0, V1, T0); // V0 = (T0 == 0) ? V1 : V0. | |
| 2426 } | |
| 2427 | |
| 2428 | |
| 2429 void Intrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler) { | |
| 2430 __ LoadObject(V0, Object::null_object()); | |
| 2431 __ sw(V0, Address(THR, Thread::async_stack_trace_offset())); | |
| 2432 __ Ret(); | |
| 2433 } | |
| 2434 | |
| 2435 | |
| 2436 void Intrinsifier::SetAsyncThreadStackTrace(Assembler* assembler) { | |
| 2437 __ lw(V0, Address(THR, Thread::async_stack_trace_offset())); | |
| 2438 __ LoadObject(V0, Object::null_object()); | |
| 2439 __ Ret(); | |
| 2440 } | |
| 2441 | |
| 2442 } // namespace dart | |
| 2443 | |
| 2444 #endif // defined TARGET_ARCH_MIPS | |
| OLD | NEW |