OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 22 matching lines...) Expand all Loading... |
33 #include "code-stubs.h" | 33 #include "code-stubs.h" |
34 #include "codegen.h" | 34 #include "codegen.h" |
35 #include "regexp-macro-assembler.h" | 35 #include "regexp-macro-assembler.h" |
36 | 36 |
37 namespace v8 { | 37 namespace v8 { |
38 namespace internal { | 38 namespace internal { |
39 | 39 |
40 | 40 |
41 #define __ ACCESS_MASM(masm) | 41 #define __ ACCESS_MASM(masm) |
42 | 42 |
| 43 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
| 44 Label* slow, |
| 45 Condition cc, |
| 46 bool never_nan_nan); |
| 47 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| 48 Register lhs, |
| 49 Register rhs, |
| 50 Label* rhs_not_nan, |
| 51 Label* slow, |
| 52 bool strict); |
| 53 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); |
| 54 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
| 55 Register lhs, |
| 56 Register rhs); |
| 57 |
| 58 |
| 59 // Check if the operand is a heap number. |
| 60 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand, |
| 61 Register scratch1, Register scratch2, |
| 62 Label* not_a_heap_number) { |
| 63 __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset)); |
| 64 __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex); |
| 65 __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2)); |
| 66 } |
| 67 |
43 | 68 |
44 void ToNumberStub::Generate(MacroAssembler* masm) { | 69 void ToNumberStub::Generate(MacroAssembler* masm) { |
45 UNIMPLEMENTED_MIPS(); | 70 // The ToNumber stub takes one argument in a0. |
| 71 Label check_heap_number, call_builtin; |
| 72 __ JumpIfNotSmi(a0, &check_heap_number); |
| 73 __ mov(v0, a0); |
| 74 __ Ret(); |
| 75 |
| 76 __ bind(&check_heap_number); |
| 77 EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin); |
| 78 __ mov(v0, a0); |
| 79 __ Ret(); |
| 80 |
| 81 __ bind(&call_builtin); |
| 82 __ push(a0); |
| 83 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); |
46 } | 84 } |
47 | 85 |
48 | 86 |
49 void FastNewClosureStub::Generate(MacroAssembler* masm) { | 87 void FastNewClosureStub::Generate(MacroAssembler* masm) { |
50 UNIMPLEMENTED_MIPS(); | 88 // Create a new closure from the given function info in new |
| 89 // space. Set the context to the current context in cp. |
| 90 Label gc; |
| 91 |
| 92 // Pop the function info from the stack. |
| 93 __ pop(a3); |
| 94 |
| 95 // Attempt to allocate new JSFunction in new space. |
| 96 __ AllocateInNewSpace(JSFunction::kSize, |
| 97 v0, |
| 98 a1, |
| 99 a2, |
| 100 &gc, |
| 101 TAG_OBJECT); |
| 102 |
| 103 int map_index = strict_mode_ == kStrictMode |
| 104 ? Context::STRICT_MODE_FUNCTION_MAP_INDEX |
| 105 : Context::FUNCTION_MAP_INDEX; |
| 106 |
| 107 // Compute the function map in the current global context and set that |
| 108 // as the map of the allocated object. |
| 109 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
| 110 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset)); |
| 111 __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index))); |
| 112 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); |
| 113 |
| 114 // Initialize the rest of the function. We don't have to update the |
| 115 // write barrier because the allocated object is in new space. |
| 116 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex); |
| 117 __ LoadRoot(a2, Heap::kTheHoleValueRootIndex); |
| 118 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); |
| 119 __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset)); |
| 120 __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset)); |
| 121 __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset)); |
| 122 __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset)); |
| 123 __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset)); |
| 124 __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset)); |
| 125 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset)); |
| 126 |
| 127 // Initialize the code pointer in the function to be the one |
| 128 // found in the shared function info object. |
| 129 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset)); |
| 130 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 131 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset)); |
| 132 |
| 133 // Return result. The argument function info has been popped already. |
| 134 __ Ret(); |
| 135 |
| 136 // Create a new closure through the slower runtime call. |
| 137 __ bind(&gc); |
| 138 __ LoadRoot(t0, Heap::kFalseValueRootIndex); |
| 139 __ Push(cp, a3, t0); |
| 140 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); |
51 } | 141 } |
52 | 142 |
53 | 143 |
54 void FastNewContextStub::Generate(MacroAssembler* masm) { | 144 void FastNewContextStub::Generate(MacroAssembler* masm) { |
55 UNIMPLEMENTED_MIPS(); | 145 // Try to allocate the context in new space. |
| 146 Label gc; |
| 147 int length = slots_ + Context::MIN_CONTEXT_SLOTS; |
| 148 |
| 149 // Attempt to allocate the context in new space. |
| 150 __ AllocateInNewSpace(FixedArray::SizeFor(length), |
| 151 v0, |
| 152 a1, |
| 153 a2, |
| 154 &gc, |
| 155 TAG_OBJECT); |
| 156 |
| 157 // Load the function from the stack. |
| 158 __ lw(a3, MemOperand(sp, 0)); |
| 159 |
| 160 // Setup the object header. |
| 161 __ LoadRoot(a2, Heap::kContextMapRootIndex); |
| 162 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); |
| 163 __ li(a2, Operand(Smi::FromInt(length))); |
| 164 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset)); |
| 165 |
| 166 // Setup the fixed slots. |
| 167 __ li(a1, Operand(Smi::FromInt(0))); |
| 168 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX))); |
| 169 __ sw(v0, MemOperand(v0, Context::SlotOffset(Context::FCONTEXT_INDEX))); |
| 170 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
| 171 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX))); |
| 172 |
| 173 // Copy the global object from the surrounding context. |
| 174 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
| 175 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX))); |
| 176 |
| 177 // Initialize the rest of the slots to undefined. |
| 178 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex); |
| 179 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { |
| 180 __ sw(a1, MemOperand(v0, Context::SlotOffset(i))); |
| 181 } |
| 182 |
| 183 // Remove the on-stack argument and return. |
| 184 __ mov(cp, v0); |
| 185 __ Pop(); |
| 186 __ Ret(); |
| 187 |
| 188 // Need to collect. Call into runtime system. |
| 189 __ bind(&gc); |
| 190 __ TailCallRuntime(Runtime::kNewContext, 1, 1); |
56 } | 191 } |
57 | 192 |
58 | 193 |
59 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { | 194 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { |
60 UNIMPLEMENTED_MIPS(); | 195 // Stack layout on entry: |
61 } | 196 // [sp]: constant elements. |
62 | 197 // [sp + kPointerSize]: literal index. |
63 | 198 // [sp + (2 * kPointerSize)]: literals array. |
| 199 |
| 200 // All sizes here are multiples of kPointerSize. |
| 201 int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0; |
| 202 int size = JSArray::kSize + elements_size; |
| 203 |
| 204 // Load boilerplate object into r3 and check if we need to create a |
| 205 // boilerplate. |
| 206 Label slow_case; |
| 207 __ lw(a3, MemOperand(sp, 2 * kPointerSize)); |
| 208 __ lw(a0, MemOperand(sp, 1 * kPointerSize)); |
| 209 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 210 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize); |
| 211 __ Addu(t0, a3, t0); |
| 212 __ lw(a3, MemOperand(t0)); |
| 213 __ LoadRoot(t1, Heap::kUndefinedValueRootIndex); |
| 214 __ Branch(&slow_case, eq, a3, Operand(t1)); |
| 215 |
| 216 if (FLAG_debug_code) { |
| 217 const char* message; |
| 218 Heap::RootListIndex expected_map_index; |
| 219 if (mode_ == CLONE_ELEMENTS) { |
| 220 message = "Expected (writable) fixed array"; |
| 221 expected_map_index = Heap::kFixedArrayMapRootIndex; |
| 222 } else { |
| 223 ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS); |
| 224 message = "Expected copy-on-write fixed array"; |
| 225 expected_map_index = Heap::kFixedCOWArrayMapRootIndex; |
| 226 } |
| 227 __ push(a3); |
| 228 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset)); |
| 229 __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset)); |
| 230 __ LoadRoot(at, expected_map_index); |
| 231 __ Assert(eq, message, a3, Operand(at)); |
| 232 __ pop(a3); |
| 233 } |
| 234 |
| 235 // Allocate both the JS array and the elements array in one big |
| 236 // allocation. This avoids multiple limit checks. |
| 237 // Return new object in v0. |
| 238 __ AllocateInNewSpace(size, |
| 239 v0, |
| 240 a1, |
| 241 a2, |
| 242 &slow_case, |
| 243 TAG_OBJECT); |
| 244 |
| 245 // Copy the JS array part. |
| 246 for (int i = 0; i < JSArray::kSize; i += kPointerSize) { |
| 247 if ((i != JSArray::kElementsOffset) || (length_ == 0)) { |
| 248 __ lw(a1, FieldMemOperand(a3, i)); |
| 249 __ sw(a1, FieldMemOperand(v0, i)); |
| 250 } |
| 251 } |
| 252 |
| 253 if (length_ > 0) { |
| 254 // Get hold of the elements array of the boilerplate and setup the |
| 255 // elements pointer in the resulting object. |
| 256 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset)); |
| 257 __ Addu(a2, v0, Operand(JSArray::kSize)); |
| 258 __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset)); |
| 259 |
| 260 // Copy the elements array. |
| 261 __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize); |
| 262 } |
| 263 |
| 264 // Return and remove the on-stack parameters. |
| 265 __ Addu(sp, sp, Operand(3 * kPointerSize)); |
| 266 __ Ret(); |
| 267 |
| 268 __ bind(&slow_case); |
| 269 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); |
| 270 } |
| 271 |
| 272 |
64 // Takes a Smi and converts to an IEEE 64 bit floating point value in two | 273 // Takes a Smi and converts to an IEEE 64 bit floating point value in two |
65 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and | 274 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and |
66 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a | 275 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a |
67 // scratch register. Destroys the source register. No GC occurs during this | 276 // scratch register. Destroys the source register. No GC occurs during this |
68 // stub so you don't have to set up the frame. | 277 // stub so you don't have to set up the frame. |
69 class ConvertToDoubleStub : public CodeStub { | 278 class ConvertToDoubleStub : public CodeStub { |
70 public: | 279 public: |
71 ConvertToDoubleStub(Register result_reg_1, | 280 ConvertToDoubleStub(Register result_reg_1, |
72 Register result_reg_2, | 281 Register result_reg_2, |
73 Register source_reg, | 282 Register source_reg, |
(...skipping 26 matching lines...) Expand all Loading... |
100 | 309 |
101 const char* GetName() { return "ConvertToDoubleStub"; } | 310 const char* GetName() { return "ConvertToDoubleStub"; } |
102 | 311 |
103 #ifdef DEBUG | 312 #ifdef DEBUG |
104 void Print() { PrintF("ConvertToDoubleStub\n"); } | 313 void Print() { PrintF("ConvertToDoubleStub\n"); } |
105 #endif | 314 #endif |
106 }; | 315 }; |
107 | 316 |
108 | 317 |
109 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { | 318 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { |
110 UNIMPLEMENTED_MIPS(); | 319 #ifndef BIG_ENDIAN_FLOATING_POINT |
| 320 Register exponent = result1_; |
| 321 Register mantissa = result2_; |
| 322 #else |
| 323 Register exponent = result2_; |
| 324 Register mantissa = result1_; |
| 325 #endif |
| 326 Label not_special; |
| 327 // Convert from Smi to integer. |
| 328 __ sra(source_, source_, kSmiTagSize); |
| 329 // Move sign bit from source to destination. This works because the sign bit |
| 330 // in the exponent word of the double has the same position and polarity as |
| 331 // the 2's complement sign bit in a Smi. |
| 332 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); |
| 333 __ And(exponent, source_, Operand(HeapNumber::kSignMask)); |
| 334 // Subtract from 0 if source was negative. |
| 335 __ subu(at, zero_reg, source_); |
| 336 __ movn(source_, at, exponent); |
| 337 |
| 338 // We have -1, 0 or 1, which we treat specially. Register source_ contains |
| 339 // absolute value: it is either equal to 1 (special case of -1 and 1), |
| 340 // greater than 1 (not a special case) or less than 1 (special case of 0). |
| 341 __ Branch(¬_special, gt, source_, Operand(1)); |
| 342 |
| 343 // For 1 or -1 we need to or in the 0 exponent (biased to 1023). |
| 344 static const uint32_t exponent_word_for_1 = |
| 345 HeapNumber::kExponentBias << HeapNumber::kExponentShift; |
| 346 // Safe to use 'at' as dest reg here. |
| 347 __ Or(at, exponent, Operand(exponent_word_for_1)); |
| 348 __ movn(exponent, at, source_); // Write exp when source not 0. |
| 349 // 1, 0 and -1 all have 0 for the second word. |
| 350 __ mov(mantissa, zero_reg); |
| 351 __ Ret(); |
| 352 |
| 353 __ bind(¬_special); |
| 354 // Count leading zeros. |
| 355 // Gets the wrong answer for 0, but we already checked for that case above. |
| 356 __ clz(zeros_, source_); |
| 357 // Compute exponent and or it into the exponent register. |
| 358 // We use mantissa as a scratch register here. |
| 359 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias)); |
| 360 __ subu(mantissa, mantissa, zeros_); |
| 361 __ sll(mantissa, mantissa, HeapNumber::kExponentShift); |
| 362 __ Or(exponent, exponent, mantissa); |
| 363 |
| 364 // Shift up the source chopping the top bit off. |
| 365 __ Addu(zeros_, zeros_, Operand(1)); |
| 366 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. |
| 367 __ sllv(source_, source_, zeros_); |
| 368 // Compute lower part of fraction (last 12 bits). |
| 369 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord); |
| 370 // And the top (top 20 bits). |
| 371 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord); |
| 372 __ or_(exponent, exponent, source_); |
| 373 |
| 374 __ Ret(); |
111 } | 375 } |
112 | 376 |
113 | 377 |
114 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, | 378 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, |
115 FloatingPointHelper::Destination destination, | 379 FloatingPointHelper::Destination destination, |
116 Register scratch1, | 380 Register scratch1, |
117 Register scratch2) { | 381 Register scratch2) { |
118 UNIMPLEMENTED_MIPS(); | 382 if (CpuFeatures::IsSupported(FPU)) { |
| 383 CpuFeatures::Scope scope(FPU); |
| 384 __ sra(scratch1, a0, kSmiTagSize); |
| 385 __ mtc1(scratch1, f14); |
| 386 __ cvt_d_w(f14, f14); |
| 387 __ sra(scratch1, a1, kSmiTagSize); |
| 388 __ mtc1(scratch1, f12); |
| 389 __ cvt_d_w(f12, f12); |
| 390 if (destination == kCoreRegisters) { |
| 391 __ mfc1(a2, f14); |
| 392 __ mfc1(a3, f15); |
| 393 |
| 394 __ mfc1(a0, f12); |
| 395 __ mfc1(a1, f13); |
| 396 } |
| 397 } else { |
| 398 ASSERT(destination == kCoreRegisters); |
| 399 // Write Smi from a0 to a3 and a2 in double format. |
| 400 __ mov(scratch1, a0); |
| 401 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2); |
| 402 __ push(ra); |
| 403 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); |
| 404 // Write Smi from a1 to a1 and a0 in double format. |
| 405 __ mov(scratch1, a1); |
| 406 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2); |
| 407 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); |
| 408 __ pop(ra); |
| 409 } |
119 } | 410 } |
120 | 411 |
121 | 412 |
122 void FloatingPointHelper::LoadOperands( | 413 void FloatingPointHelper::LoadOperands( |
123 MacroAssembler* masm, | 414 MacroAssembler* masm, |
124 FloatingPointHelper::Destination destination, | 415 FloatingPointHelper::Destination destination, |
125 Register heap_number_map, | 416 Register heap_number_map, |
126 Register scratch1, | 417 Register scratch1, |
127 Register scratch2, | 418 Register scratch2, |
128 Label* slow) { | 419 Label* slow) { |
129 UNIMPLEMENTED_MIPS(); | 420 |
| 421 // Load right operand (a0) to f12 or a2/a3. |
| 422 LoadNumber(masm, destination, |
| 423 a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow); |
| 424 |
| 425 // Load left operand (a1) to f14 or a0/a1. |
| 426 LoadNumber(masm, destination, |
| 427 a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow); |
130 } | 428 } |
131 | 429 |
132 | 430 |
133 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, | 431 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, |
134 Destination destination, | 432 Destination destination, |
135 Register object, | 433 Register object, |
136 FPURegister dst, | 434 FPURegister dst, |
137 Register dst1, | 435 Register dst1, |
138 Register dst2, | 436 Register dst2, |
139 Register heap_number_map, | 437 Register heap_number_map, |
140 Register scratch1, | 438 Register scratch1, |
141 Register scratch2, | 439 Register scratch2, |
142 Label* not_number) { | 440 Label* not_number) { |
143 UNIMPLEMENTED_MIPS(); | 441 if (FLAG_debug_code) { |
| 442 __ AbortIfNotRootValue(heap_number_map, |
| 443 Heap::kHeapNumberMapRootIndex, |
| 444 "HeapNumberMap register clobbered."); |
| 445 } |
| 446 |
| 447 Label is_smi, done; |
| 448 |
| 449 __ JumpIfSmi(object, &is_smi); |
| 450 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
| 451 |
| 452 // Handle loading a double from a heap number. |
| 453 if (CpuFeatures::IsSupported(FPU) && |
| 454 destination == kFPURegisters) { |
| 455 CpuFeatures::Scope scope(FPU); |
| 456 // Load the double from tagged HeapNumber to double register. |
| 457 |
| 458 // ARM uses a workaround here because of the unaligned HeapNumber |
| 459 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no |
| 460 // point in generating even more instructions. |
| 461 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset)); |
| 462 } else { |
| 463 ASSERT(destination == kCoreRegisters); |
| 464 // Load the double from heap number to dst1 and dst2 in double format. |
| 465 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset)); |
| 466 __ lw(dst2, FieldMemOperand(object, |
| 467 HeapNumber::kValueOffset + kPointerSize)); |
| 468 } |
| 469 __ Branch(&done); |
| 470 |
| 471 // Handle loading a double from a smi. |
| 472 __ bind(&is_smi); |
| 473 if (CpuFeatures::IsSupported(FPU)) { |
| 474 CpuFeatures::Scope scope(FPU); |
| 475 // Convert smi to double using FPU instructions. |
| 476 __ SmiUntag(scratch1, object); |
| 477 __ mtc1(scratch1, dst); |
| 478 __ cvt_d_w(dst, dst); |
| 479 if (destination == kCoreRegisters) { |
| 480 // Load the converted smi to dst1 and dst2 in double format. |
| 481 __ mfc1(dst1, dst); |
| 482 __ mfc1(dst2, FPURegister::from_code(dst.code() + 1)); |
| 483 } |
| 484 } else { |
| 485 ASSERT(destination == kCoreRegisters); |
| 486 // Write smi to dst1 and dst2 double format. |
| 487 __ mov(scratch1, object); |
| 488 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); |
| 489 __ push(ra); |
| 490 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); |
| 491 __ pop(ra); |
| 492 } |
| 493 |
| 494 __ bind(&done); |
144 } | 495 } |
145 | 496 |
146 | 497 |
147 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, | 498 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, |
148 Register object, | 499 Register object, |
149 Register dst, | 500 Register dst, |
150 Register heap_number_map, | 501 Register heap_number_map, |
151 Register scratch1, | 502 Register scratch1, |
152 Register scratch2, | 503 Register scratch2, |
153 Register scratch3, | 504 Register scratch3, |
154 FPURegister double_scratch, | 505 FPURegister double_scratch, |
155 Label* not_number) { | 506 Label* not_number) { |
156 UNIMPLEMENTED_MIPS(); | 507 if (FLAG_debug_code) { |
| 508 __ AbortIfNotRootValue(heap_number_map, |
| 509 Heap::kHeapNumberMapRootIndex, |
| 510 "HeapNumberMap register clobbered."); |
| 511 } |
| 512 Label is_smi; |
| 513 Label done; |
| 514 Label not_in_int32_range; |
| 515 |
| 516 __ JumpIfSmi(object, &is_smi); |
| 517 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); |
| 518 __ Branch(not_number, ne, scratch1, Operand(heap_number_map)); |
| 519 __ ConvertToInt32(object, |
| 520 dst, |
| 521 scratch1, |
| 522 scratch2, |
| 523 double_scratch, |
| 524 ¬_in_int32_range); |
| 525 __ jmp(&done); |
| 526 |
| 527 __ bind(¬_in_int32_range); |
| 528 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
| 529 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); |
| 530 |
| 531 __ EmitOutOfInt32RangeTruncate(dst, |
| 532 scratch1, |
| 533 scratch2, |
| 534 scratch3); |
| 535 |
| 536 __ jmp(&done); |
| 537 |
| 538 __ bind(&is_smi); |
| 539 __ SmiUntag(dst, object); |
| 540 __ bind(&done); |
157 } | 541 } |
158 | 542 |
159 | 543 |
160 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, | 544 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, |
161 Register int_scratch, | 545 Register int_scratch, |
162 Destination destination, | 546 Destination destination, |
163 FPURegister double_dst, | 547 FPURegister double_dst, |
164 Register dst1, | 548 Register dst1, |
165 Register dst2, | 549 Register dst2, |
166 Register scratch2, | 550 Register scratch2, |
167 FPURegister single_scratch) { | 551 FPURegister single_scratch) { |
168 UNIMPLEMENTED_MIPS(); | 552 ASSERT(!int_scratch.is(scratch2)); |
| 553 |
| 554 Label done; |
| 555 |
| 556 if (CpuFeatures::IsSupported(FPU)) { |
| 557 CpuFeatures::Scope scope(FPU); |
| 558 __ mtc1(int_scratch, single_scratch); |
| 559 __ cvt_d_w(double_dst, single_scratch); |
| 560 if (destination == kCoreRegisters) { |
| 561 __ mfc1(dst1, double_dst); |
| 562 __ mfc1(dst2, FPURegister::from_code(double_dst.code() + 1)); |
| 563 } |
| 564 } else { |
| 565 Label fewer_than_20_useful_bits; |
| 566 // Expected output: |
| 567 // | dst2 | dst1 | |
| 568 // | s | exp | mantissa | |
| 569 |
| 570 // Check for zero. |
| 571 __ mov(dst2, int_scratch); |
| 572 __ mov(dst1, int_scratch); |
| 573 __ Branch(&done, eq, int_scratch, Operand(zero_reg)); |
| 574 |
| 575 // Preload the sign of the value. |
| 576 __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask)); |
| 577 // Get the absolute value of the object (as an unsigned integer). |
| 578 Label skip_sub; |
| 579 __ Branch(&skip_sub, ge, dst2, Operand(zero_reg)); |
| 580 __ Subu(int_scratch, zero_reg, int_scratch); |
| 581 __ bind(&skip_sub); |
| 582 |
| 583 // Get mantisssa[51:20]. |
| 584 |
| 585 // Get the position of the first set bit. |
| 586 __ clz(dst1, int_scratch); |
| 587 __ li(scratch2, 31); |
| 588 __ Subu(dst1, scratch2, dst1); |
| 589 |
| 590 // Set the exponent. |
| 591 __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias)); |
| 592 __ Ins(dst2, scratch2, |
| 593 HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
| 594 |
| 595 // Clear the first non null bit. |
| 596 __ li(scratch2, Operand(1)); |
| 597 __ sllv(scratch2, scratch2, dst1); |
| 598 __ li(at, -1); |
| 599 __ Xor(scratch2, scratch2, at); |
| 600 __ And(int_scratch, int_scratch, scratch2); |
| 601 |
| 602 // Get the number of bits to set in the lower part of the mantissa. |
| 603 __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); |
| 604 __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg)); |
| 605 // Set the higher 20 bits of the mantissa. |
| 606 __ srlv(at, int_scratch, scratch2); |
| 607 __ or_(dst2, dst2, at); |
| 608 __ li(at, 32); |
| 609 __ subu(scratch2, at, scratch2); |
| 610 __ sllv(dst1, int_scratch, scratch2); |
| 611 __ Branch(&done); |
| 612 |
| 613 __ bind(&fewer_than_20_useful_bits); |
| 614 __ li(at, HeapNumber::kMantissaBitsInTopWord); |
| 615 __ subu(scratch2, at, dst1); |
| 616 __ sllv(scratch2, int_scratch, scratch2); |
| 617 __ Or(dst2, dst2, scratch2); |
| 618 // Set dst1 to 0. |
| 619 __ mov(dst1, zero_reg); |
| 620 } |
| 621 __ bind(&done); |
169 } | 622 } |
170 | 623 |
171 | 624 |
172 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, | 625 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, |
173 Register object, | 626 Register object, |
174 Destination destination, | 627 Destination destination, |
175 FPURegister double_dst, | 628 FPURegister double_dst, |
176 Register dst1, | 629 Register dst1, |
177 Register dst2, | 630 Register dst2, |
178 Register heap_number_map, | 631 Register heap_number_map, |
179 Register scratch1, | 632 Register scratch1, |
180 Register scratch2, | 633 Register scratch2, |
181 FPURegister single_scratch, | 634 FPURegister single_scratch, |
182 Label* not_int32) { | 635 Label* not_int32) { |
183 UNIMPLEMENTED_MIPS(); | 636 ASSERT(!scratch1.is(object) && !scratch2.is(object)); |
| 637 ASSERT(!scratch1.is(scratch2)); |
| 638 ASSERT(!heap_number_map.is(object) && |
| 639 !heap_number_map.is(scratch1) && |
| 640 !heap_number_map.is(scratch2)); |
| 641 |
| 642 Label done, obj_is_not_smi; |
| 643 |
| 644 __ JumpIfNotSmi(object, &obj_is_not_smi); |
| 645 __ SmiUntag(scratch1, object); |
| 646 ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2, |
| 647 scratch2, single_scratch); |
| 648 __ Branch(&done); |
| 649 |
| 650 __ bind(&obj_is_not_smi); |
| 651 if (FLAG_debug_code) { |
| 652 __ AbortIfNotRootValue(heap_number_map, |
| 653 Heap::kHeapNumberMapRootIndex, |
| 654 "HeapNumberMap register clobbered."); |
| 655 } |
| 656 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
| 657 |
| 658 // Load the number. |
| 659 if (CpuFeatures::IsSupported(FPU)) { |
| 660 CpuFeatures::Scope scope(FPU); |
| 661 // Load the double value. |
| 662 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); |
| 663 |
| 664 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). |
| 665 // On MIPS a lot of things cannot be implemented the same way so right |
| 666 // now it makes a lot more sense to just do things manually. |
| 667 |
| 668 // Save FCSR. |
| 669 __ cfc1(scratch1, FCSR); |
| 670 // Disable FPU exceptions. |
| 671 __ ctc1(zero_reg, FCSR); |
| 672 __ trunc_w_d(single_scratch, double_dst); |
| 673 // Retrieve FCSR. |
| 674 __ cfc1(scratch2, FCSR); |
| 675 // Restore FCSR. |
| 676 __ ctc1(scratch1, FCSR); |
| 677 |
| 678 // Check for inexact conversion. |
| 679 __ srl(scratch2, scratch2, kFCSRFlagShift); |
| 680 __ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit)); |
| 681 |
| 682 // Jump to not_int32 if the operation did not succeed. |
| 683 __ Branch(not_int32, ne, scratch2, Operand(zero_reg)); |
| 684 |
| 685 if (destination == kCoreRegisters) { |
| 686 __ mfc1(dst1, double_dst); |
| 687 __ mfc1(dst2, FPURegister::from_code(double_dst.code() + 1)); |
| 688 } |
| 689 |
| 690 } else { |
| 691 ASSERT(!scratch1.is(object) && !scratch2.is(object)); |
| 692 // Load the double value in the destination registers. |
| 693 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
| 694 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); |
| 695 |
| 696 // Check for 0 and -0. |
| 697 __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask)); |
| 698 __ Or(scratch1, scratch1, Operand(dst2)); |
| 699 __ Branch(&done, eq, scratch1, Operand(zero_reg)); |
| 700 |
| 701 // Check that the value can be exactly represented by a 32-bit integer. |
| 702 // Jump to not_int32 if that's not the case. |
| 703 DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32); |
| 704 |
| 705 // dst1 and dst2 were trashed. Reload the double value. |
| 706 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
| 707 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); |
| 708 } |
| 709 |
| 710 __ bind(&done); |
184 } | 711 } |
185 | 712 |
186 | 713 |
187 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, | 714 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, |
188 Register object, | 715 Register object, |
189 Register dst, | 716 Register dst, |
190 Register heap_number_map, | 717 Register heap_number_map, |
191 Register scratch1, | 718 Register scratch1, |
192 Register scratch2, | 719 Register scratch2, |
193 Register scratch3, | 720 Register scratch3, |
194 FPURegister double_scratch, | 721 FPURegister double_scratch, |
195 Label* not_int32) { | 722 Label* not_int32) { |
196 UNIMPLEMENTED_MIPS(); | 723 ASSERT(!dst.is(object)); |
| 724 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); |
| 725 ASSERT(!scratch1.is(scratch2) && |
| 726 !scratch1.is(scratch3) && |
| 727 !scratch2.is(scratch3)); |
| 728 |
| 729 Label done; |
| 730 |
| 731 // Untag the object into the destination register. |
| 732 __ SmiUntag(dst, object); |
| 733 // Just return if the object is a smi. |
| 734 __ JumpIfSmi(object, &done); |
| 735 |
| 736 if (FLAG_debug_code) { |
| 737 __ AbortIfNotRootValue(heap_number_map, |
| 738 Heap::kHeapNumberMapRootIndex, |
| 739 "HeapNumberMap register clobbered."); |
| 740 } |
| 741 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
| 742 |
| 743 // Object is a heap number. |
| 744 // Convert the floating point value to a 32-bit integer. |
| 745 if (CpuFeatures::IsSupported(FPU)) { |
| 746 CpuFeatures::Scope scope(FPU); |
| 747 // Load the double value. |
| 748 __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); |
| 749 |
| 750 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). |
| 751 // On MIPS a lot of things cannot be implemented the same way so right |
| 752 // now it makes a lot more sense to just do things manually. |
| 753 |
| 754 // Save FCSR. |
| 755 __ cfc1(scratch1, FCSR); |
| 756 // Disable FPU exceptions. |
| 757 __ ctc1(zero_reg, FCSR); |
| 758 __ trunc_w_d(double_scratch, double_scratch); |
| 759 // Retrieve FCSR. |
| 760 __ cfc1(scratch2, FCSR); |
| 761 // Restore FCSR. |
| 762 __ ctc1(scratch1, FCSR); |
| 763 |
| 764 // Check for inexact conversion. |
| 765 __ srl(scratch2, scratch2, kFCSRFlagShift); |
| 766 __ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit)); |
| 767 |
| 768 // Jump to not_int32 if the operation did not succeed. |
| 769 __ Branch(not_int32, ne, scratch2, Operand(zero_reg)); |
| 770 // Get the result in the destination register. |
| 771 __ mfc1(dst, double_scratch); |
| 772 |
| 773 } else { |
| 774 // Load the double value in the destination registers. |
| 775 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
| 776 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); |
| 777 |
| 778 // Check for 0 and -0. |
| 779 __ And(dst, scratch1, Operand(~HeapNumber::kSignMask)); |
| 780 __ Or(dst, scratch2, Operand(dst)); |
| 781 __ Branch(&done, eq, dst, Operand(zero_reg)); |
| 782 |
| 783 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32); |
| 784 |
| 785 // Registers state after DoubleIs32BitInteger. |
| 786 // dst: mantissa[51:20]. |
| 787 // scratch2: 1 |
| 788 |
| 789 // Shift back the higher bits of the mantissa. |
| 790 __ srlv(dst, dst, scratch3); |
| 791 // Set the implicit first bit. |
| 792 __ li(at, 32); |
| 793 __ subu(scratch3, at, scratch3); |
| 794 __ sllv(scratch2, scratch2, scratch3); |
| 795 __ Or(dst, dst, scratch2); |
| 796 // Set the sign. |
| 797 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
| 798 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 799 Label skip_sub; |
| 800 __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg)); |
| 801 __ Subu(dst, zero_reg, dst); |
| 802 __ bind(&skip_sub); |
| 803 } |
| 804 |
| 805 __ bind(&done); |
197 } | 806 } |
198 | 807 |
199 | 808 |
200 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, | 809 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, |
201 Register src1, | 810 Register src1, |
202 Register src2, | 811 Register src2, |
203 Register dst, | 812 Register dst, |
204 Register scratch, | 813 Register scratch, |
205 Label* not_int32) { | 814 Label* not_int32) { |
206 UNIMPLEMENTED_MIPS(); | 815 // Get exponent alone in scratch. |
| 816 __ Ext(scratch, |
| 817 src1, |
| 818 HeapNumber::kExponentShift, |
| 819 HeapNumber::kExponentBits); |
| 820 |
| 821 // Substract the bias from the exponent. |
| 822 __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias)); |
| 823 |
| 824 // src1: higher (exponent) part of the double value. |
| 825 // src2: lower (mantissa) part of the double value. |
| 826 // scratch: unbiased exponent. |
| 827 |
| 828 // Fast cases. Check for obvious non 32-bit integer values. |
| 829 // Negative exponent cannot yield 32-bit integers. |
| 830 __ Branch(not_int32, lt, scratch, Operand(zero_reg)); |
| 831 // Exponent greater than 31 cannot yield 32-bit integers. |
| 832 // Also, a positive value with an exponent equal to 31 is outside of the |
| 833 // signed 32-bit integer range. |
| 834 // Another way to put it is that if (exponent - signbit) > 30 then the |
| 835 // number cannot be represented as an int32. |
| 836 Register tmp = dst; |
| 837 __ srl(at, src1, 31); |
| 838 __ subu(tmp, scratch, at); |
| 839 __ Branch(not_int32, gt, tmp, Operand(30)); |
| 840 // - Bits [21:0] in the mantissa are not null. |
| 841 __ And(tmp, src2, 0x3fffff); |
| 842 __ Branch(not_int32, ne, tmp, Operand(zero_reg)); |
| 843 |
| 844 // Otherwise the exponent needs to be big enough to shift left all the |
| 845 // non zero bits left. So we need the (30 - exponent) last bits of the |
| 846 // 31 higher bits of the mantissa to be null. |
| 847 // Because bits [21:0] are null, we can check instead that the |
| 848 // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null. |
| 849 |
| 850 // Get the 32 higher bits of the mantissa in dst. |
| 851 __ Ext(dst, |
| 852 src2, |
| 853 HeapNumber::kMantissaBitsInTopWord, |
| 854 32 - HeapNumber::kMantissaBitsInTopWord); |
| 855 __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord); |
| 856 __ or_(dst, dst, at); |
| 857 |
| 858 // Create the mask and test the lower bits (of the higher bits). |
| 859 __ li(at, 32); |
| 860 __ subu(scratch, at, scratch); |
| 861 __ li(src2, 1); |
| 862 __ sllv(src1, src2, scratch); |
| 863 __ Subu(src1, src1, Operand(1)); |
| 864 __ And(src1, dst, src1); |
| 865 __ Branch(not_int32, ne, src1, Operand(zero_reg)); |
207 } | 866 } |
208 | 867 |
209 | 868 |
210 void FloatingPointHelper::CallCCodeForDoubleOperation( | 869 void FloatingPointHelper::CallCCodeForDoubleOperation( |
211 MacroAssembler* masm, | 870 MacroAssembler* masm, |
212 Token::Value op, | 871 Token::Value op, |
213 Register heap_number_result, | 872 Register heap_number_result, |
214 Register scratch) { | 873 Register scratch) { |
215 UNIMPLEMENTED_MIPS(); | 874 // Using core registers: |
| 875 // a0: Left value (least significant part of mantissa). |
| 876 // a1: Left value (sign, exponent, top of mantissa). |
| 877 // a2: Right value (least significant part of mantissa). |
| 878 // a3: Right value (sign, exponent, top of mantissa). |
| 879 |
| 880 // Assert that heap_number_result is saved. |
| 881 // We currently always use s0 to pass it. |
| 882 ASSERT(heap_number_result.is(s0)); |
| 883 |
| 884 // Push the current return address before the C call. |
| 885 __ push(ra); |
| 886 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. |
| 887 if (!IsMipsSoftFloatABI) { |
| 888 CpuFeatures::Scope scope(FPU); |
| 889 // We are not using MIPS FPU instructions, and parameters for the runtime |
| 890 // function call are prepaired in a0-a3 registers, but function we are |
| 891 // calling is compiled with hard-float flag and expecting hard float ABI |
| 892 // (parameters in f12/f14 registers). We need to copy parameters from |
| 893 // a0-a3 registers to f12/f14 register pairs. |
| 894 __ mtc1(a0, f12); |
| 895 __ mtc1(a1, f13); |
| 896 __ mtc1(a2, f14); |
| 897 __ mtc1(a3, f15); |
| 898 } |
| 899 // Call C routine that may not cause GC or other trouble. |
| 900 __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()), |
| 901 4); |
| 902 // Store answer in the overwritable heap number. |
| 903 if (!IsMipsSoftFloatABI) { |
| 904 CpuFeatures::Scope scope(FPU); |
| 905 // Double returned in register f0. |
| 906 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
| 907 } else { |
| 908 // Double returned in registers v0 and v1. |
| 909 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset)); |
| 910 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset)); |
| 911 } |
| 912 // Place heap_number_result in v0 and return to the pushed return address. |
| 913 __ mov(v0, heap_number_result); |
| 914 __ pop(ra); |
| 915 __ Ret(); |
216 } | 916 } |
217 | 917 |
218 | 918 |
219 // See comment for class, this does NOT work for int32's that are in Smi range. | 919 // See comment for class, this does NOT work for int32's that are in Smi range. |
220 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { | 920 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { |
221 UNIMPLEMENTED_MIPS(); | 921 Label max_negative_int; |
| 922 // the_int_ has the answer which is a signed int32 but not a Smi. |
| 923 // We test for the special value that has a different exponent. |
| 924 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); |
| 925 // Test sign, and save for later conditionals. |
| 926 __ And(sign_, the_int_, Operand(0x80000000u)); |
| 927 __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u)); |
| 928 |
| 929 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. |
| 930 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). |
| 931 uint32_t non_smi_exponent = |
| 932 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; |
| 933 __ li(scratch_, Operand(non_smi_exponent)); |
| 934 // Set the sign bit in scratch_ if the value was negative. |
| 935 __ or_(scratch_, scratch_, sign_); |
| 936 // Subtract from 0 if the value was negative. |
| 937 __ subu(at, zero_reg, the_int_); |
| 938 __ movn(the_int_, at, sign_); |
| 939 // We should be masking the implict first digit of the mantissa away here, |
| 940 // but it just ends up combining harmlessly with the last digit of the |
| 941 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get |
| 942 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. |
| 943 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); |
| 944 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; |
| 945 __ srl(at, the_int_, shift_distance); |
| 946 __ or_(scratch_, scratch_, at); |
| 947 __ sw(scratch_, FieldMemOperand(the_heap_number_, |
| 948 HeapNumber::kExponentOffset)); |
| 949 __ sll(scratch_, the_int_, 32 - shift_distance); |
| 950 __ sw(scratch_, FieldMemOperand(the_heap_number_, |
| 951 HeapNumber::kMantissaOffset)); |
| 952 __ Ret(); |
| 953 |
| 954 __ bind(&max_negative_int); |
| 955 // The max negative int32 is stored as a positive number in the mantissa of |
| 956 // a double because it uses a sign bit instead of using two's complement. |
| 957 // The actual mantissa bits stored are all 0 because the implicit most |
| 958 // significant 1 bit is not stored. |
| 959 non_smi_exponent += 1 << HeapNumber::kExponentShift; |
| 960 __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent)); |
| 961 __ sw(scratch_, |
| 962 FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); |
| 963 __ mov(scratch_, zero_reg); |
| 964 __ sw(scratch_, |
| 965 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); |
| 966 __ Ret(); |
| 967 } |
| 968 |
| 969 |
| 970 // Handle the case where the lhs and rhs are the same object. |
| 971 // Equality is almost reflexive (everything but NaN), so this is a test |
| 972 // for "identity and not NaN". |
| 973 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
| 974 Label* slow, |
| 975 Condition cc, |
| 976 bool never_nan_nan) { |
| 977 Label not_identical; |
| 978 Label heap_number, return_equal; |
| 979 Register exp_mask_reg = t5; |
| 980 |
| 981 __ Branch(¬_identical, ne, a0, Operand(a1)); |
| 982 |
| 983 // The two objects are identical. If we know that one of them isn't NaN then |
| 984 // we now know they test equal. |
| 985 if (cc != eq || !never_nan_nan) { |
| 986 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask)); |
| 987 |
| 988 // Test for NaN. Sadly, we can't just compare to factory->nan_value(), |
| 989 // so we do the second best thing - test it ourselves. |
| 990 // They are both equal and they are not both Smis so both of them are not |
| 991 // Smis. If it's not a heap number, then return equal. |
| 992 if (cc == less || cc == greater) { |
| 993 __ GetObjectType(a0, t4, t4); |
| 994 __ Branch(slow, greater, t4, Operand(FIRST_JS_OBJECT_TYPE)); |
| 995 } else { |
| 996 __ GetObjectType(a0, t4, t4); |
| 997 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE)); |
| 998 // Comparing JS objects with <=, >= is complicated. |
| 999 if (cc != eq) { |
| 1000 __ Branch(slow, greater, t4, Operand(FIRST_JS_OBJECT_TYPE)); |
| 1001 // Normally here we fall through to return_equal, but undefined is |
| 1002 // special: (undefined == undefined) == true, but |
| 1003 // (undefined <= undefined) == false! See ECMAScript 11.8.5. |
| 1004 if (cc == less_equal || cc == greater_equal) { |
| 1005 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE)); |
| 1006 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); |
| 1007 __ Branch(&return_equal, ne, a0, Operand(t2)); |
| 1008 if (cc == le) { |
| 1009 // undefined <= undefined should fail. |
| 1010 __ li(v0, Operand(GREATER)); |
| 1011 } else { |
| 1012 // undefined >= undefined should fail. |
| 1013 __ li(v0, Operand(LESS)); |
| 1014 } |
| 1015 __ Ret(); |
| 1016 } |
| 1017 } |
| 1018 } |
| 1019 } |
| 1020 |
| 1021 __ bind(&return_equal); |
| 1022 if (cc == less) { |
| 1023 __ li(v0, Operand(GREATER)); // Things aren't less than themselves. |
| 1024 } else if (cc == greater) { |
| 1025 __ li(v0, Operand(LESS)); // Things aren't greater than themselves. |
| 1026 } else { |
| 1027 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves. |
| 1028 } |
| 1029 __ Ret(); |
| 1030 |
| 1031 if (cc != eq || !never_nan_nan) { |
| 1032 // For less and greater we don't have to check for NaN since the result of |
| 1033 // x < x is false regardless. For the others here is some code to check |
| 1034 // for NaN. |
| 1035 if (cc != lt && cc != gt) { |
| 1036 __ bind(&heap_number); |
| 1037 // It is a heap number, so return non-equal if it's NaN and equal if it's |
| 1038 // not NaN. |
| 1039 |
| 1040 // The representation of NaN values has all exponent bits (52..62) set, |
| 1041 // and not all mantissa bits (0..51) clear. |
| 1042 // Read top bits of double representation (second word of value). |
| 1043 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); |
| 1044 // Test that exponent bits are all set. |
| 1045 __ And(t3, t2, Operand(exp_mask_reg)); |
| 1046 // If all bits not set (ne cond), then not a NaN, objects are equal. |
| 1047 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg)); |
| 1048 |
| 1049 // Shift out flag and all exponent bits, retaining only mantissa. |
| 1050 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord); |
| 1051 // Or with all low-bits of mantissa. |
| 1052 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); |
| 1053 __ Or(v0, t3, Operand(t2)); |
| 1054 // For equal we already have the right value in v0: Return zero (equal) |
| 1055 // if all bits in mantissa are zero (it's an Infinity) and non-zero if |
| 1056 // not (it's a NaN). For <= and >= we need to load v0 with the failing |
| 1057 // value if it's a NaN. |
| 1058 if (cc != eq) { |
| 1059 // All-zero means Infinity means equal. |
| 1060 __ Ret(eq, v0, Operand(zero_reg)); |
| 1061 if (cc == le) { |
| 1062 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail. |
| 1063 } else { |
| 1064 __ li(v0, Operand(LESS)); // NaN >= NaN should fail. |
| 1065 } |
| 1066 } |
| 1067 __ Ret(); |
| 1068 } |
| 1069 // No fall through here. |
| 1070 } |
| 1071 |
| 1072 __ bind(¬_identical); |
| 1073 } |
| 1074 |
| 1075 |
| 1076 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| 1077 Register lhs, |
| 1078 Register rhs, |
| 1079 Label* both_loaded_as_doubles, |
| 1080 Label* slow, |
| 1081 bool strict) { |
| 1082 ASSERT((lhs.is(a0) && rhs.is(a1)) || |
| 1083 (lhs.is(a1) && rhs.is(a0))); |
| 1084 |
| 1085 Label lhs_is_smi; |
| 1086 __ And(t0, lhs, Operand(kSmiTagMask)); |
| 1087 __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg)); |
| 1088 // Rhs is a Smi. |
| 1089 // Check whether the non-smi is a heap number. |
| 1090 __ GetObjectType(lhs, t4, t4); |
| 1091 if (strict) { |
| 1092 // If lhs was not a number and rhs was a Smi then strict equality cannot |
| 1093 // succeed. Return non-equal (lhs is already not zero). |
| 1094 __ mov(v0, lhs); |
| 1095 __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE)); |
| 1096 } else { |
| 1097 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
| 1098 // the runtime. |
| 1099 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); |
| 1100 } |
| 1101 |
| 1102 // Rhs is a smi, lhs is a number. |
| 1103 // Convert smi rhs to double. |
| 1104 if (CpuFeatures::IsSupported(FPU)) { |
| 1105 CpuFeatures::Scope scope(FPU); |
| 1106 __ sra(at, rhs, kSmiTagSize); |
| 1107 __ mtc1(at, f14); |
| 1108 __ cvt_d_w(f14, f14); |
| 1109 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
| 1110 } else { |
| 1111 // Load lhs to a double in a2, a3. |
| 1112 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); |
| 1113 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
| 1114 |
| 1115 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch. |
| 1116 __ mov(t6, rhs); |
| 1117 ConvertToDoubleStub stub1(a1, a0, t6, t5); |
| 1118 __ push(ra); |
| 1119 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); |
| 1120 |
| 1121 __ pop(ra); |
| 1122 } |
| 1123 |
| 1124 // We now have both loaded as doubles. |
| 1125 __ jmp(both_loaded_as_doubles); |
| 1126 |
| 1127 __ bind(&lhs_is_smi); |
| 1128 // Lhs is a Smi. Check whether the non-smi is a heap number. |
| 1129 __ GetObjectType(rhs, t4, t4); |
| 1130 if (strict) { |
| 1131 // If lhs was not a number and rhs was a Smi then strict equality cannot |
| 1132 // succeed. Return non-equal. |
| 1133 __ li(v0, Operand(1)); |
| 1134 __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE)); |
| 1135 } else { |
| 1136 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
| 1137 // the runtime. |
| 1138 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); |
| 1139 } |
| 1140 |
| 1141 // Lhs is a smi, rhs is a number. |
| 1142 // Convert smi lhs to double. |
| 1143 if (CpuFeatures::IsSupported(FPU)) { |
| 1144 CpuFeatures::Scope scope(FPU); |
| 1145 __ sra(at, lhs, kSmiTagSize); |
| 1146 __ mtc1(at, f12); |
| 1147 __ cvt_d_w(f12, f12); |
| 1148 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
| 1149 } else { |
| 1150 // Convert lhs to a double format. t5 is scratch. |
| 1151 __ mov(t6, lhs); |
| 1152 ConvertToDoubleStub stub2(a3, a2, t6, t5); |
| 1153 __ push(ra); |
| 1154 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); |
| 1155 __ pop(ra); |
| 1156 // Load rhs to a double in a1, a0. |
| 1157 if (rhs.is(a0)) { |
| 1158 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); |
| 1159 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
| 1160 } else { |
| 1161 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
| 1162 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); |
| 1163 } |
| 1164 } |
| 1165 // Fall through to both_loaded_as_doubles. |
222 } | 1166 } |
223 | 1167 |
224 | 1168 |
225 void EmitNanCheck(MacroAssembler* masm, Condition cc) { | 1169 void EmitNanCheck(MacroAssembler* masm, Condition cc) { |
226 UNIMPLEMENTED_MIPS(); | 1170 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); |
227 } | 1171 if (CpuFeatures::IsSupported(FPU)) { |
228 | 1172 CpuFeatures::Scope scope(FPU); |
229 | 1173 // Lhs and rhs are already loaded to f12 and f14 register pairs. |
| 1174 __ mfc1(t0, f14); // f14 has LS 32 bits of rhs. |
| 1175 __ mfc1(t1, f15); // f15 has MS 32 bits of rhs. |
| 1176 __ mfc1(t2, f12); // f12 has LS 32 bits of lhs. |
| 1177 __ mfc1(t3, f13); // f13 has MS 32 bits of lhs. |
| 1178 } else { |
| 1179 // Lhs and rhs are already loaded to GP registers. |
| 1180 __ mov(t0, a0); // a0 has LS 32 bits of rhs. |
| 1181 __ mov(t1, a1); // a1 has MS 32 bits of rhs. |
| 1182 __ mov(t2, a2); // a2 has LS 32 bits of lhs. |
| 1183 __ mov(t3, a3); // a3 has MS 32 bits of lhs. |
| 1184 } |
| 1185 Register rhs_exponent = exp_first ? t0 : t1; |
| 1186 Register lhs_exponent = exp_first ? t2 : t3; |
| 1187 Register rhs_mantissa = exp_first ? t1 : t0; |
| 1188 Register lhs_mantissa = exp_first ? t3 : t2; |
| 1189 Label one_is_nan, neither_is_nan; |
| 1190 Label lhs_not_nan_exp_mask_is_loaded; |
| 1191 |
| 1192 Register exp_mask_reg = t4; |
| 1193 __ li(exp_mask_reg, HeapNumber::kExponentMask); |
| 1194 __ and_(t5, lhs_exponent, exp_mask_reg); |
| 1195 __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg)); |
| 1196 |
| 1197 __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord); |
| 1198 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg)); |
| 1199 |
| 1200 __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg)); |
| 1201 |
| 1202 __ li(exp_mask_reg, HeapNumber::kExponentMask); |
| 1203 __ bind(&lhs_not_nan_exp_mask_is_loaded); |
| 1204 __ and_(t5, rhs_exponent, exp_mask_reg); |
| 1205 |
| 1206 __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg)); |
| 1207 |
| 1208 __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord); |
| 1209 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg)); |
| 1210 |
| 1211 __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg)); |
| 1212 |
| 1213 __ bind(&one_is_nan); |
| 1214 // NaN comparisons always fail. |
| 1215 // Load whatever we need in v0 to make the comparison fail. |
| 1216 if (cc == lt || cc == le) { |
| 1217 __ li(v0, Operand(GREATER)); |
| 1218 } else { |
| 1219 __ li(v0, Operand(LESS)); |
| 1220 } |
| 1221 __ Ret(); // Return. |
| 1222 |
| 1223 __ bind(&neither_is_nan); |
| 1224 } |
| 1225 |
| 1226 |
| 1227 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { |
| 1228 // f12 and f14 have the two doubles. Neither is a NaN. |
| 1229 // Call a native function to do a comparison between two non-NaNs. |
| 1230 // Call C routine that may not cause GC or other trouble. |
| 1231 // We use a call_was and return manually because we need arguments slots to |
| 1232 // be freed. |
| 1233 |
| 1234 Label return_result_not_equal, return_result_equal; |
| 1235 if (cc == eq) { |
| 1236 // Doubles are not equal unless they have the same bit pattern. |
| 1237 // Exception: 0 and -0. |
| 1238 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); |
| 1239 if (CpuFeatures::IsSupported(FPU)) { |
| 1240 CpuFeatures::Scope scope(FPU); |
| 1241 // Lhs and rhs are already loaded to f12 and f14 register pairs. |
| 1242 __ mfc1(t0, f14); // f14 has LS 32 bits of rhs. |
| 1243 __ mfc1(t1, f15); // f15 has MS 32 bits of rhs. |
| 1244 __ mfc1(t2, f12); // f12 has LS 32 bits of lhs. |
| 1245 __ mfc1(t3, f13); // f13 has MS 32 bits of lhs. |
| 1246 } else { |
| 1247 // Lhs and rhs are already loaded to GP registers. |
| 1248 __ mov(t0, a0); // a0 has LS 32 bits of rhs. |
| 1249 __ mov(t1, a1); // a1 has MS 32 bits of rhs. |
| 1250 __ mov(t2, a2); // a2 has LS 32 bits of lhs. |
| 1251 __ mov(t3, a3); // a3 has MS 32 bits of lhs. |
| 1252 } |
| 1253 Register rhs_exponent = exp_first ? t0 : t1; |
| 1254 Register lhs_exponent = exp_first ? t2 : t3; |
| 1255 Register rhs_mantissa = exp_first ? t1 : t0; |
| 1256 Register lhs_mantissa = exp_first ? t3 : t2; |
| 1257 |
| 1258 __ xor_(v0, rhs_mantissa, lhs_mantissa); |
| 1259 __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg)); |
| 1260 |
| 1261 __ subu(v0, rhs_exponent, lhs_exponent); |
| 1262 __ Branch(&return_result_equal, eq, v0, Operand(zero_reg)); |
| 1263 // 0, -0 case. |
| 1264 __ sll(rhs_exponent, rhs_exponent, kSmiTagSize); |
| 1265 __ sll(lhs_exponent, lhs_exponent, kSmiTagSize); |
| 1266 __ or_(t4, rhs_exponent, lhs_exponent); |
| 1267 __ or_(t4, t4, rhs_mantissa); |
| 1268 |
| 1269 __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg)); |
| 1270 |
| 1271 __ bind(&return_result_equal); |
| 1272 __ li(v0, Operand(EQUAL)); |
| 1273 __ Ret(); |
| 1274 } |
| 1275 |
| 1276 __ bind(&return_result_not_equal); |
| 1277 |
| 1278 if (!CpuFeatures::IsSupported(FPU)) { |
| 1279 __ push(ra); |
| 1280 __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments. |
| 1281 if (!IsMipsSoftFloatABI) { |
| 1282 // We are not using MIPS FPU instructions, and parameters for the runtime |
| 1283 // function call are prepaired in a0-a3 registers, but function we are |
| 1284 // calling is compiled with hard-float flag and expecting hard float ABI |
| 1285 // (parameters in f12/f14 registers). We need to copy parameters from |
| 1286 // a0-a3 registers to f12/f14 register pairs. |
| 1287 __ mtc1(a0, f12); |
| 1288 __ mtc1(a1, f13); |
| 1289 __ mtc1(a2, f14); |
| 1290 __ mtc1(a3, f15); |
| 1291 } |
| 1292 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4); |
| 1293 __ pop(ra); // Because this function returns int, result is in v0. |
| 1294 __ Ret(); |
| 1295 } else { |
| 1296 CpuFeatures::Scope scope(FPU); |
| 1297 Label equal, less_than; |
| 1298 __ c(EQ, D, f12, f14); |
| 1299 __ bc1t(&equal); |
| 1300 __ nop(); |
| 1301 |
| 1302 __ c(OLT, D, f12, f14); |
| 1303 __ bc1t(&less_than); |
| 1304 __ nop(); |
| 1305 |
| 1306 // Not equal, not less, not NaN, must be greater. |
| 1307 __ li(v0, Operand(GREATER)); |
| 1308 __ Ret(); |
| 1309 |
| 1310 __ bind(&equal); |
| 1311 __ li(v0, Operand(EQUAL)); |
| 1312 __ Ret(); |
| 1313 |
| 1314 __ bind(&less_than); |
| 1315 __ li(v0, Operand(LESS)); |
| 1316 __ Ret(); |
| 1317 } |
| 1318 } |
| 1319 |
| 1320 |
| 1321 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
| 1322 Register lhs, |
| 1323 Register rhs) { |
| 1324 // If either operand is a JSObject or an oddball value, then they are |
| 1325 // not equal since their pointers are different. |
| 1326 // There is no test for undetectability in strict equality. |
| 1327 STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); |
| 1328 Label first_non_object; |
| 1329 // Get the type of the first operand into a2 and compare it with |
| 1330 // FIRST_JS_OBJECT_TYPE. |
| 1331 __ GetObjectType(lhs, a2, a2); |
| 1332 __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_OBJECT_TYPE)); |
| 1333 |
| 1334 // Return non-zero. |
| 1335 Label return_not_equal; |
| 1336 __ bind(&return_not_equal); |
| 1337 __ li(v0, Operand(1)); |
| 1338 __ Ret(); |
| 1339 |
| 1340 __ bind(&first_non_object); |
| 1341 // Check for oddballs: true, false, null, undefined. |
| 1342 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE)); |
| 1343 |
| 1344 __ GetObjectType(rhs, a3, a3); |
| 1345 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_OBJECT_TYPE)); |
| 1346 |
| 1347 // Check for oddballs: true, false, null, undefined. |
| 1348 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE)); |
| 1349 |
| 1350 // Now that we have the types we might as well check for symbol-symbol. |
| 1351 // Ensure that no non-strings have the symbol bit set. |
| 1352 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); |
| 1353 STATIC_ASSERT(kSymbolTag != 0); |
| 1354 __ And(t2, a2, Operand(a3)); |
| 1355 __ And(t0, t2, Operand(kIsSymbolMask)); |
| 1356 __ Branch(&return_not_equal, ne, t0, Operand(zero_reg)); |
| 1357 } |
| 1358 |
| 1359 |
| 1360 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, |
| 1361 Register lhs, |
| 1362 Register rhs, |
| 1363 Label* both_loaded_as_doubles, |
| 1364 Label* not_heap_numbers, |
| 1365 Label* slow) { |
| 1366 __ GetObjectType(lhs, a3, a2); |
| 1367 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE)); |
| 1368 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset)); |
| 1369 // If first was a heap number & second wasn't, go to slow case. |
| 1370 __ Branch(slow, ne, a3, Operand(a2)); |
| 1371 |
| 1372 // Both are heap numbers. Load them up then jump to the code we have |
| 1373 // for that. |
| 1374 if (CpuFeatures::IsSupported(FPU)) { |
| 1375 CpuFeatures::Scope scope(FPU); |
| 1376 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
| 1377 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
| 1378 } else { |
| 1379 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
| 1380 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); |
| 1381 if (rhs.is(a0)) { |
| 1382 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); |
| 1383 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
| 1384 } else { |
| 1385 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
| 1386 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); |
| 1387 } |
| 1388 } |
| 1389 __ jmp(both_loaded_as_doubles); |
| 1390 } |
| 1391 |
| 1392 |
| 1393 // Fast negative check for symbol-to-symbol equality. |
| 1394 static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm, |
| 1395 Register lhs, |
| 1396 Register rhs, |
| 1397 Label* possible_strings, |
| 1398 Label* not_both_strings) { |
| 1399 ASSERT((lhs.is(a0) && rhs.is(a1)) || |
| 1400 (lhs.is(a1) && rhs.is(a0))); |
| 1401 |
| 1402 // a2 is object type of lhs. |
| 1403 // Ensure that no non-strings have the symbol bit set. |
| 1404 Label object_test; |
| 1405 STATIC_ASSERT(kSymbolTag != 0); |
| 1406 __ And(at, a2, Operand(kIsNotStringMask)); |
| 1407 __ Branch(&object_test, ne, at, Operand(zero_reg)); |
| 1408 __ And(at, a2, Operand(kIsSymbolMask)); |
| 1409 __ Branch(possible_strings, eq, at, Operand(zero_reg)); |
| 1410 __ GetObjectType(rhs, a3, a3); |
| 1411 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE)); |
| 1412 __ And(at, a3, Operand(kIsSymbolMask)); |
| 1413 __ Branch(possible_strings, eq, at, Operand(zero_reg)); |
| 1414 |
| 1415 // Both are symbols. We already checked they weren't the same pointer |
| 1416 // so they are not equal. |
| 1417 __ li(v0, Operand(1)); // Non-zero indicates not equal. |
| 1418 __ Ret(); |
| 1419 |
| 1420 __ bind(&object_test); |
| 1421 __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_OBJECT_TYPE)); |
| 1422 __ GetObjectType(rhs, a2, a3); |
| 1423 __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_OBJECT_TYPE)); |
| 1424 |
| 1425 // If both objects are undetectable, they are equal. Otherwise, they |
| 1426 // are not equal, since they are different objects and an object is not |
| 1427 // equal to undefined. |
| 1428 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
| 1429 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset)); |
| 1430 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset)); |
| 1431 __ and_(a0, a2, a3); |
| 1432 __ And(a0, a0, Operand(1 << Map::kIsUndetectable)); |
| 1433 __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable)); |
| 1434 __ Ret(); |
| 1435 } |
| 1436 |
| 1437 |
230 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, | 1438 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, |
231 Register object, | 1439 Register object, |
232 Register result, | 1440 Register result, |
233 Register scratch1, | 1441 Register scratch1, |
234 Register scratch2, | 1442 Register scratch2, |
235 Register scratch3, | 1443 Register scratch3, |
236 bool object_is_smi, | 1444 bool object_is_smi, |
237 Label* not_found) { | 1445 Label* not_found) { |
238 UNIMPLEMENTED_MIPS(); | 1446 // Use of registers. Register result is used as a temporary. |
| 1447 Register number_string_cache = result; |
| 1448 Register mask = scratch3; |
| 1449 |
| 1450 // Load the number string cache. |
| 1451 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); |
| 1452 |
| 1453 // Make the hash mask from the length of the number string cache. It |
| 1454 // contains two elements (number and string) for each cache entry. |
| 1455 __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); |
| 1456 // Divide length by two (length is a smi). |
| 1457 __ sra(mask, mask, kSmiTagSize + 1); |
| 1458 __ Addu(mask, mask, -1); // Make mask. |
| 1459 |
| 1460 // Calculate the entry in the number string cache. The hash value in the |
| 1461 // number string cache for smis is just the smi value, and the hash for |
| 1462 // doubles is the xor of the upper and lower words. See |
| 1463 // Heap::GetNumberStringCache. |
| 1464 Isolate* isolate = masm->isolate(); |
| 1465 Label is_smi; |
| 1466 Label load_result_from_cache; |
| 1467 if (!object_is_smi) { |
| 1468 __ JumpIfSmi(object, &is_smi); |
| 1469 if (CpuFeatures::IsSupported(FPU)) { |
| 1470 CpuFeatures::Scope scope(FPU); |
| 1471 __ CheckMap(object, |
| 1472 scratch1, |
| 1473 Heap::kHeapNumberMapRootIndex, |
| 1474 not_found, |
| 1475 true); |
| 1476 |
| 1477 STATIC_ASSERT(8 == kDoubleSize); |
| 1478 __ Addu(scratch1, |
| 1479 object, |
| 1480 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); |
| 1481 __ lw(scratch2, MemOperand(scratch1, kPointerSize)); |
| 1482 __ lw(scratch1, MemOperand(scratch1, 0)); |
| 1483 __ Xor(scratch1, scratch1, Operand(scratch2)); |
| 1484 __ And(scratch1, scratch1, Operand(mask)); |
| 1485 |
| 1486 // Calculate address of entry in string cache: each entry consists |
| 1487 // of two pointer sized fields. |
| 1488 __ sll(scratch1, scratch1, kPointerSizeLog2 + 1); |
| 1489 __ Addu(scratch1, number_string_cache, scratch1); |
| 1490 |
| 1491 Register probe = mask; |
| 1492 __ lw(probe, |
| 1493 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
| 1494 __ JumpIfSmi(probe, not_found); |
| 1495 __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); |
| 1496 __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); |
| 1497 __ c(EQ, D, f12, f14); |
| 1498 __ bc1t(&load_result_from_cache); |
| 1499 __ nop(); // bc1t() requires explicit fill of branch delay slot. |
| 1500 __ Branch(not_found); |
| 1501 } else { |
| 1502 // Note that there is no cache check for non-FPU case, even though |
| 1503 // it seems there could be. May be a tiny opimization for non-FPU |
| 1504 // cores. |
| 1505 __ Branch(not_found); |
| 1506 } |
| 1507 } |
| 1508 |
| 1509 __ bind(&is_smi); |
| 1510 Register scratch = scratch1; |
| 1511 __ sra(scratch, object, 1); // Shift away the tag. |
| 1512 __ And(scratch, mask, Operand(scratch)); |
| 1513 |
| 1514 // Calculate address of entry in string cache: each entry consists |
| 1515 // of two pointer sized fields. |
| 1516 __ sll(scratch, scratch, kPointerSizeLog2 + 1); |
| 1517 __ Addu(scratch, number_string_cache, scratch); |
| 1518 |
| 1519 // Check if the entry is the smi we are looking for. |
| 1520 Register probe = mask; |
| 1521 __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); |
| 1522 __ Branch(not_found, ne, object, Operand(probe)); |
| 1523 |
| 1524 // Get the result from the cache. |
| 1525 __ bind(&load_result_from_cache); |
| 1526 __ lw(result, |
| 1527 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); |
| 1528 |
| 1529 __ IncrementCounter(isolate->counters()->number_to_string_native(), |
| 1530 1, |
| 1531 scratch1, |
| 1532 scratch2); |
239 } | 1533 } |
240 | 1534 |
241 | 1535 |
242 void NumberToStringStub::Generate(MacroAssembler* masm) { | 1536 void NumberToStringStub::Generate(MacroAssembler* masm) { |
243 UNIMPLEMENTED_MIPS(); | 1537 Label runtime; |
| 1538 |
| 1539 __ lw(a1, MemOperand(sp, 0)); |
| 1540 |
| 1541 // Generate code to lookup number in the number string cache. |
| 1542 GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime); |
| 1543 __ Addu(sp, sp, Operand(1 * kPointerSize)); |
| 1544 __ Ret(); |
| 1545 |
| 1546 __ bind(&runtime); |
| 1547 // Handle number to string in the runtime system if not found in the cache. |
| 1548 __ TailCallRuntime(Runtime::kNumberToString, 1, 1); |
244 } | 1549 } |
245 | 1550 |
246 | 1551 |
247 // On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared. | 1552 // On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared. |
248 // On exit, v0 is 0, positive, or negative (smi) to indicate the result | 1553 // On exit, v0 is 0, positive, or negative (smi) to indicate the result |
249 // of the comparison. | 1554 // of the comparison. |
250 void CompareStub::Generate(MacroAssembler* masm) { | 1555 void CompareStub::Generate(MacroAssembler* masm) { |
251 UNIMPLEMENTED_MIPS(); | 1556 Label slow; // Call builtin. |
| 1557 Label not_smis, both_loaded_as_doubles; |
| 1558 |
| 1559 |
| 1560 if (include_smi_compare_) { |
| 1561 Label not_two_smis, smi_done; |
| 1562 __ Or(a2, a1, a0); |
| 1563 __ JumpIfNotSmi(a2, ¬_two_smis); |
| 1564 __ sra(a1, a1, 1); |
| 1565 __ sra(a0, a0, 1); |
| 1566 __ Subu(v0, a1, a0); |
| 1567 __ Ret(); |
| 1568 __ bind(¬_two_smis); |
| 1569 } else if (FLAG_debug_code) { |
| 1570 __ Or(a2, a1, a0); |
| 1571 __ And(a2, a2, kSmiTagMask); |
| 1572 __ Assert(ne, "CompareStub: unexpected smi operands.", |
| 1573 a2, Operand(zero_reg)); |
| 1574 } |
| 1575 |
| 1576 |
| 1577 // NOTICE! This code is only reached after a smi-fast-case check, so |
| 1578 // it is certain that at least one operand isn't a smi. |
| 1579 |
| 1580 // Handle the case where the objects are identical. Either returns the answer |
| 1581 // or goes to slow. Only falls through if the objects were not identical. |
| 1582 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); |
| 1583 |
| 1584 // If either is a Smi (we know that not both are), then they can only |
| 1585 // be strictly equal if the other is a HeapNumber. |
| 1586 STATIC_ASSERT(kSmiTag == 0); |
| 1587 ASSERT_EQ(0, Smi::FromInt(0)); |
| 1588 __ And(t2, lhs_, Operand(rhs_)); |
| 1589 __ JumpIfNotSmi(t2, ¬_smis, t0); |
| 1590 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: |
| 1591 // 1) Return the answer. |
| 1592 // 2) Go to slow. |
| 1593 // 3) Fall through to both_loaded_as_doubles. |
| 1594 // 4) Jump to rhs_not_nan. |
| 1595 // In cases 3 and 4 we have found out we were dealing with a number-number |
| 1596 // comparison and the numbers have been loaded into f12 and f14 as doubles, |
| 1597 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU. |
| 1598 EmitSmiNonsmiComparison(masm, lhs_, rhs_, |
| 1599 &both_loaded_as_doubles, &slow, strict_); |
| 1600 |
| 1601 __ bind(&both_loaded_as_doubles); |
| 1602 // f12, f14 are the double representations of the left hand side |
| 1603 // and the right hand side if we have FPU. Otherwise a2, a3 represent |
| 1604 // left hand side and a0, a1 represent right hand side. |
| 1605 |
| 1606 Isolate* isolate = masm->isolate(); |
| 1607 if (CpuFeatures::IsSupported(FPU)) { |
| 1608 CpuFeatures::Scope scope(FPU); |
| 1609 Label nan; |
| 1610 __ li(t0, Operand(LESS)); |
| 1611 __ li(t1, Operand(GREATER)); |
| 1612 __ li(t2, Operand(EQUAL)); |
| 1613 |
| 1614 // Check if either rhs or lhs is NaN. |
| 1615 __ c(UN, D, f12, f14); |
| 1616 __ bc1t(&nan); |
| 1617 __ nop(); |
| 1618 |
| 1619 // Check if LESS condition is satisfied. If true, move conditionally |
| 1620 // result to v0. |
| 1621 __ c(OLT, D, f12, f14); |
| 1622 __ movt(v0, t0); |
| 1623 // Use previous check to store conditionally to v0 oposite condition |
| 1624 // (GREATER). If rhs is equal to lhs, this will be corrected in next |
| 1625 // check. |
| 1626 __ movf(v0, t1); |
| 1627 // Check if EQUAL condition is satisfied. If true, move conditionally |
| 1628 // result to v0. |
| 1629 __ c(EQ, D, f12, f14); |
| 1630 __ movt(v0, t2); |
| 1631 |
| 1632 __ Ret(); |
| 1633 |
| 1634 __ bind(&nan); |
| 1635 // NaN comparisons always fail. |
| 1636 // Load whatever we need in v0 to make the comparison fail. |
| 1637 if (cc_ == lt || cc_ == le) { |
| 1638 __ li(v0, Operand(GREATER)); |
| 1639 } else { |
| 1640 __ li(v0, Operand(LESS)); |
| 1641 } |
| 1642 __ Ret(); |
| 1643 } else { |
| 1644 // Checks for NaN in the doubles we have loaded. Can return the answer or |
| 1645 // fall through if neither is a NaN. Also binds rhs_not_nan. |
| 1646 EmitNanCheck(masm, cc_); |
| 1647 |
| 1648 // Compares two doubles that are not NaNs. Returns the answer. |
| 1649 // Never falls through. |
| 1650 EmitTwoNonNanDoubleComparison(masm, cc_); |
| 1651 } |
| 1652 |
| 1653 __ bind(¬_smis); |
| 1654 // At this point we know we are dealing with two different objects, |
| 1655 // and neither of them is a Smi. The objects are in lhs_ and rhs_. |
| 1656 if (strict_) { |
| 1657 // This returns non-equal for some object types, or falls through if it |
| 1658 // was not lucky. |
| 1659 EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_); |
| 1660 } |
| 1661 |
| 1662 Label check_for_symbols; |
| 1663 Label flat_string_check; |
| 1664 // Check for heap-number-heap-number comparison. Can jump to slow case, |
| 1665 // or load both doubles and jump to the code that handles |
| 1666 // that case. If the inputs are not doubles then jumps to check_for_symbols. |
| 1667 // In this case a2 will contain the type of lhs_. |
| 1668 EmitCheckForTwoHeapNumbers(masm, |
| 1669 lhs_, |
| 1670 rhs_, |
| 1671 &both_loaded_as_doubles, |
| 1672 &check_for_symbols, |
| 1673 &flat_string_check); |
| 1674 |
| 1675 __ bind(&check_for_symbols); |
| 1676 if (cc_ == eq && !strict_) { |
| 1677 // Returns an answer for two symbols or two detectable objects. |
| 1678 // Otherwise jumps to string case or not both strings case. |
| 1679 // Assumes that a2 is the type of lhs_ on entry. |
| 1680 EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); |
| 1681 } |
| 1682 |
| 1683 // Check for both being sequential ASCII strings, and inline if that is the |
| 1684 // case. |
| 1685 __ bind(&flat_string_check); |
| 1686 |
| 1687 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow); |
| 1688 |
| 1689 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3); |
| 1690 if (cc_ == eq) { |
| 1691 StringCompareStub::GenerateFlatAsciiStringEquals(masm, |
| 1692 lhs_, |
| 1693 rhs_, |
| 1694 a2, |
| 1695 a3, |
| 1696 t0); |
| 1697 } else { |
| 1698 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, |
| 1699 lhs_, |
| 1700 rhs_, |
| 1701 a2, |
| 1702 a3, |
| 1703 t0, |
| 1704 t1); |
| 1705 } |
| 1706 // Never falls through to here. |
| 1707 |
| 1708 __ bind(&slow); |
| 1709 // Prepare for call to builtin. Push object pointers, a0 (lhs) first, |
| 1710 // a1 (rhs) second. |
| 1711 __ Push(lhs_, rhs_); |
| 1712 // Figure out which native to call and setup the arguments. |
| 1713 Builtins::JavaScript native; |
| 1714 if (cc_ == eq) { |
| 1715 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; |
| 1716 } else { |
| 1717 native = Builtins::COMPARE; |
| 1718 int ncr; // NaN compare result. |
| 1719 if (cc_ == lt || cc_ == le) { |
| 1720 ncr = GREATER; |
| 1721 } else { |
| 1722 ASSERT(cc_ == gt || cc_ == ge); // Remaining cases. |
| 1723 ncr = LESS; |
| 1724 } |
| 1725 __ li(a0, Operand(Smi::FromInt(ncr))); |
| 1726 __ push(a0); |
| 1727 } |
| 1728 |
| 1729 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) |
| 1730 // tagged as a small integer. |
| 1731 __ InvokeBuiltin(native, JUMP_FUNCTION); |
252 } | 1732 } |
253 | 1733 |
254 | 1734 |
255 // This stub does not handle the inlined cases (Smis, Booleans, undefined). | 1735 // This stub does not handle the inlined cases (Smis, Booleans, undefined). |
256 // The stub returns zero for false, and a non-zero value for true. | 1736 // The stub returns zero for false, and a non-zero value for true. |
257 void ToBooleanStub::Generate(MacroAssembler* masm) { | 1737 void ToBooleanStub::Generate(MacroAssembler* masm) { |
258 UNIMPLEMENTED_MIPS(); | 1738 // This stub uses FPU instructions. |
| 1739 ASSERT(CpuFeatures::IsEnabled(FPU)); |
| 1740 |
| 1741 Label false_result; |
| 1742 Label not_heap_number; |
| 1743 Register scratch0 = t5.is(tos_) ? t3 : t5; |
| 1744 |
| 1745 __ LoadRoot(scratch0, Heap::kNullValueRootIndex); |
| 1746 __ Branch(&false_result, eq, tos_, Operand(scratch0)); |
| 1747 |
| 1748 // HeapNumber => false if +0, -0, or NaN. |
| 1749 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); |
| 1750 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 1751 __ Branch(¬_heap_number, ne, scratch0, Operand(at)); |
| 1752 |
| 1753 __ Subu(at, tos_, Operand(kHeapObjectTag)); |
| 1754 __ ldc1(f12, MemOperand(at, HeapNumber::kValueOffset)); |
| 1755 __ fcmp(f12, 0.0, UEQ); |
| 1756 |
| 1757 // "tos_" is a register, and contains a non zero value by default. |
| 1758 // Hence we only need to overwrite "tos_" with zero to return false for |
| 1759 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. |
| 1760 __ movt(tos_, zero_reg); |
| 1761 __ Ret(); |
| 1762 |
| 1763 __ bind(¬_heap_number); |
| 1764 |
| 1765 // Check if the value is 'null'. |
| 1766 // 'null' => false. |
| 1767 __ LoadRoot(at, Heap::kNullValueRootIndex); |
| 1768 __ Branch(&false_result, eq, tos_, Operand(at)); |
| 1769 |
| 1770 // It can be an undetectable object. |
| 1771 // Undetectable => false. |
| 1772 __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset)); |
| 1773 __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset)); |
| 1774 __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable)); |
| 1775 __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable)); |
| 1776 |
| 1777 // JavaScript object => true. |
| 1778 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); |
| 1779 __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); |
| 1780 |
| 1781 // "tos_" is a register and contains a non-zero value. |
| 1782 // Hence we implicitly return true if the greater than |
| 1783 // condition is satisfied. |
| 1784 __ Ret(gt, scratch0, Operand(FIRST_JS_OBJECT_TYPE)); |
| 1785 |
| 1786 // Check for string. |
| 1787 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); |
| 1788 __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); |
| 1789 // "tos_" is a register and contains a non-zero value. |
| 1790 // Hence we implicitly return true if the greater than |
| 1791 // condition is satisfied. |
| 1792 __ Ret(gt, scratch0, Operand(FIRST_NONSTRING_TYPE)); |
| 1793 |
| 1794 // String value => false iff empty, i.e., length is zero. |
| 1795 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset)); |
| 1796 // If length is zero, "tos_" contains zero ==> false. |
| 1797 // If length is not zero, "tos_" contains a non-zero value ==> true. |
| 1798 __ Ret(); |
| 1799 |
| 1800 // Return 0 in "tos_" for false. |
| 1801 __ bind(&false_result); |
| 1802 __ mov(tos_, zero_reg); |
| 1803 __ Ret(); |
259 } | 1804 } |
260 | 1805 |
261 | 1806 |
262 Handle<Code> GetTypeRecordingUnaryOpStub(int key, | 1807 Handle<Code> GetTypeRecordingUnaryOpStub(int key, |
263 TRUnaryOpIC::TypeInfo type_info) { | 1808 TRUnaryOpIC::TypeInfo type_info) { |
264 TypeRecordingUnaryOpStub stub(key, type_info); | 1809 TypeRecordingUnaryOpStub stub(key, type_info); |
265 return stub.GetCode(); | 1810 return stub.GetCode(); |
266 } | 1811 } |
267 | 1812 |
268 | 1813 |
269 const char* TypeRecordingUnaryOpStub::GetName() { | 1814 const char* TypeRecordingUnaryOpStub::GetName() { |
270 UNIMPLEMENTED_MIPS(); | 1815 if (name_ != NULL) return name_; |
271 return NULL; | 1816 const int kMaxNameLength = 100; |
| 1817 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( |
| 1818 kMaxNameLength); |
| 1819 if (name_ == NULL) return "OOM"; |
| 1820 const char* op_name = Token::Name(op_); |
| 1821 const char* overwrite_name = NULL; // Make g++ happy. |
| 1822 switch (mode_) { |
| 1823 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; |
| 1824 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; |
| 1825 } |
| 1826 |
| 1827 OS::SNPrintF(Vector<char>(name_, kMaxNameLength), |
| 1828 "TypeRecordingUnaryOpStub_%s_%s_%s", |
| 1829 op_name, |
| 1830 overwrite_name, |
| 1831 TRUnaryOpIC::GetName(operand_type_)); |
| 1832 return name_; |
272 } | 1833 } |
273 | 1834 |
274 | 1835 |
275 // TODO(svenpanne): Use virtual functions instead of switch. | 1836 // TODO(svenpanne): Use virtual functions instead of switch. |
276 void TypeRecordingUnaryOpStub::Generate(MacroAssembler* masm) { | 1837 void TypeRecordingUnaryOpStub::Generate(MacroAssembler* masm) { |
277 UNIMPLEMENTED_MIPS(); | 1838 switch (operand_type_) { |
| 1839 case TRUnaryOpIC::UNINITIALIZED: |
| 1840 GenerateTypeTransition(masm); |
| 1841 break; |
| 1842 case TRUnaryOpIC::SMI: |
| 1843 GenerateSmiStub(masm); |
| 1844 break; |
| 1845 case TRUnaryOpIC::HEAP_NUMBER: |
| 1846 GenerateHeapNumberStub(masm); |
| 1847 break; |
| 1848 case TRUnaryOpIC::GENERIC: |
| 1849 GenerateGenericStub(masm); |
| 1850 break; |
| 1851 } |
278 } | 1852 } |
279 | 1853 |
280 | 1854 |
281 void TypeRecordingUnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 1855 void TypeRecordingUnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
282 UNIMPLEMENTED_MIPS(); | 1856 // Argument is in a0 and v0 at this point, so we can overwrite a0. |
| 1857 // Push this stub's key. Although the operation and the type info are |
| 1858 // encoded into the key, the encoding is opaque, so push them too. |
| 1859 __ li(a2, Operand(Smi::FromInt(MinorKey()))); |
| 1860 __ li(a1, Operand(Smi::FromInt(op_))); |
| 1861 __ li(a0, Operand(Smi::FromInt(operand_type_))); |
| 1862 |
| 1863 __ Push(v0, a2, a1, a0); |
| 1864 |
| 1865 __ TailCallExternalReference( |
| 1866 ExternalReference(IC_Utility(IC::kTypeRecordingUnaryOp_Patch), |
| 1867 masm->isolate()), |
| 1868 4, |
| 1869 1); |
283 } | 1870 } |
284 | 1871 |
285 | 1872 |
286 // TODO(svenpanne): Use virtual functions instead of switch. | 1873 // TODO(svenpanne): Use virtual functions instead of switch. |
287 void TypeRecordingUnaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 1874 void TypeRecordingUnaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
288 UNIMPLEMENTED_MIPS(); | 1875 switch (op_) { |
| 1876 case Token::SUB: |
| 1877 GenerateSmiStubSub(masm); |
| 1878 break; |
| 1879 case Token::BIT_NOT: |
| 1880 GenerateSmiStubBitNot(masm); |
| 1881 break; |
| 1882 default: |
| 1883 UNREACHABLE(); |
| 1884 } |
289 } | 1885 } |
290 | 1886 |
291 | 1887 |
292 void TypeRecordingUnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) { | 1888 void TypeRecordingUnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) { |
293 UNIMPLEMENTED_MIPS(); | 1889 Label non_smi, slow; |
| 1890 GenerateSmiCodeSub(masm, &non_smi, &slow); |
| 1891 __ bind(&non_smi); |
| 1892 __ bind(&slow); |
| 1893 GenerateTypeTransition(masm); |
294 } | 1894 } |
295 | 1895 |
296 | 1896 |
297 void TypeRecordingUnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) { | 1897 void TypeRecordingUnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) { |
298 UNIMPLEMENTED_MIPS(); | 1898 Label non_smi; |
| 1899 GenerateSmiCodeBitNot(masm, &non_smi); |
| 1900 __ bind(&non_smi); |
| 1901 GenerateTypeTransition(masm); |
299 } | 1902 } |
300 | 1903 |
301 | 1904 |
302 void TypeRecordingUnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, | 1905 void TypeRecordingUnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, |
303 Label* non_smi, | 1906 Label* non_smi, |
304 Label* slow) { | 1907 Label* slow) { |
305 UNIMPLEMENTED_MIPS(); | 1908 __ JumpIfNotSmi(a0, non_smi); |
| 1909 |
| 1910 // The result of negating zero or the smallest negative smi is not a smi. |
| 1911 __ And(t0, a0, ~0x80000000); |
| 1912 __ Branch(slow, eq, t0, Operand(zero_reg)); |
| 1913 |
| 1914 // Return '0 - value'. |
| 1915 __ Subu(v0, zero_reg, a0); |
| 1916 __ Ret(); |
306 } | 1917 } |
307 | 1918 |
308 | 1919 |
309 void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, | 1920 void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, |
310 Label* non_smi) { | 1921 Label* non_smi) { |
311 UNIMPLEMENTED_MIPS(); | 1922 __ JumpIfNotSmi(a0, non_smi); |
| 1923 |
| 1924 // Flip bits and revert inverted smi-tag. |
| 1925 __ Neg(v0, a0); |
| 1926 __ And(v0, v0, ~kSmiTagMask); |
| 1927 __ Ret(); |
312 } | 1928 } |
313 | 1929 |
314 | 1930 |
315 // TODO(svenpanne): Use virtual functions instead of switch. | 1931 // TODO(svenpanne): Use virtual functions instead of switch. |
316 void TypeRecordingUnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { | 1932 void TypeRecordingUnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
317 UNIMPLEMENTED_MIPS(); | 1933 switch (op_) { |
| 1934 case Token::SUB: |
| 1935 GenerateHeapNumberStubSub(masm); |
| 1936 break; |
| 1937 case Token::BIT_NOT: |
| 1938 GenerateHeapNumberStubBitNot(masm); |
| 1939 break; |
| 1940 default: |
| 1941 UNREACHABLE(); |
| 1942 } |
318 } | 1943 } |
319 | 1944 |
320 | 1945 |
321 void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) { | 1946 void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) { |
322 UNIMPLEMENTED_MIPS(); | 1947 Label non_smi, slow; |
| 1948 GenerateSmiCodeSub(masm, &non_smi, &slow); |
| 1949 __ bind(&non_smi); |
| 1950 GenerateHeapNumberCodeSub(masm, &slow); |
| 1951 __ bind(&slow); |
| 1952 GenerateTypeTransition(masm); |
323 } | 1953 } |
324 | 1954 |
325 | 1955 |
326 void TypeRecordingUnaryOpStub::GenerateHeapNumberStubBitNot( | 1956 void TypeRecordingUnaryOpStub::GenerateHeapNumberStubBitNot( |
327 MacroAssembler* masm) { | 1957 MacroAssembler* masm) { |
328 UNIMPLEMENTED_MIPS(); | 1958 Label non_smi, slow; |
329 } | 1959 GenerateSmiCodeBitNot(masm, &non_smi); |
330 | 1960 __ bind(&non_smi); |
| 1961 GenerateHeapNumberCodeBitNot(masm, &slow); |
| 1962 __ bind(&slow); |
| 1963 GenerateTypeTransition(masm); |
| 1964 } |
331 | 1965 |
332 void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, | 1966 void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, |
333 Label* slow) { | 1967 Label* slow) { |
334 UNIMPLEMENTED_MIPS(); | 1968 EmitCheckForHeapNumber(masm, a0, a1, t2, slow); |
| 1969 // a0 is a heap number. Get a new heap number in a1. |
| 1970 if (mode_ == UNARY_OVERWRITE) { |
| 1971 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); |
| 1972 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. |
| 1973 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); |
| 1974 } else { |
| 1975 Label slow_allocate_heapnumber, heapnumber_allocated; |
| 1976 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber); |
| 1977 __ jmp(&heapnumber_allocated); |
| 1978 |
| 1979 __ bind(&slow_allocate_heapnumber); |
| 1980 __ EnterInternalFrame(); |
| 1981 __ push(a0); |
| 1982 __ CallRuntime(Runtime::kNumberAlloc, 0); |
| 1983 __ mov(a1, v0); |
| 1984 __ pop(a0); |
| 1985 __ LeaveInternalFrame(); |
| 1986 |
| 1987 __ bind(&heapnumber_allocated); |
| 1988 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); |
| 1989 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); |
| 1990 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset)); |
| 1991 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. |
| 1992 __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset)); |
| 1993 __ mov(v0, a1); |
| 1994 } |
| 1995 __ Ret(); |
335 } | 1996 } |
336 | 1997 |
337 | 1998 |
338 void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeBitNot( | 1999 void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeBitNot( |
339 MacroAssembler* masm, Label* slow) { | 2000 MacroAssembler* masm, Label* slow) { |
340 UNIMPLEMENTED_MIPS(); | 2001 EmitCheckForHeapNumber(masm, a0, a1, t2, slow); |
| 2002 // Convert the heap number in a0 to an untagged integer in a1. |
| 2003 __ ConvertToInt32(a0, a1, a2, a3, f0, slow); |
| 2004 |
| 2005 // Do the bitwise operation and check if the result fits in a smi. |
| 2006 Label try_float; |
| 2007 __ Neg(a1, a1); |
| 2008 __ Addu(a2, a1, Operand(0x40000000)); |
| 2009 __ Branch(&try_float, lt, a2, Operand(zero_reg)); |
| 2010 |
| 2011 // Tag the result as a smi and we're done. |
| 2012 __ SmiTag(v0, a1); |
| 2013 __ Ret(); |
| 2014 |
| 2015 // Try to store the result in a heap number. |
| 2016 __ bind(&try_float); |
| 2017 if (mode_ == UNARY_NO_OVERWRITE) { |
| 2018 Label slow_allocate_heapnumber, heapnumber_allocated; |
| 2019 __ AllocateHeapNumber(v0, a2, a3, t2, &slow_allocate_heapnumber); |
| 2020 __ jmp(&heapnumber_allocated); |
| 2021 |
| 2022 __ bind(&slow_allocate_heapnumber); |
| 2023 __ EnterInternalFrame(); |
| 2024 __ push(a1); |
| 2025 __ CallRuntime(Runtime::kNumberAlloc, 0); |
| 2026 __ pop(a1); |
| 2027 __ LeaveInternalFrame(); |
| 2028 |
| 2029 __ bind(&heapnumber_allocated); |
| 2030 } |
| 2031 |
| 2032 if (CpuFeatures::IsSupported(FPU)) { |
| 2033 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted. |
| 2034 CpuFeatures::Scope scope(FPU); |
| 2035 __ mtc1(a1, f0); |
| 2036 __ cvt_d_w(f0, f0); |
| 2037 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); |
| 2038 __ Ret(); |
| 2039 } else { |
| 2040 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not |
| 2041 // have to set up a frame. |
| 2042 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3); |
| 2043 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
| 2044 } |
341 } | 2045 } |
342 | 2046 |
343 | 2047 |
344 // TODO(svenpanne): Use virtual functions instead of switch. | 2048 // TODO(svenpanne): Use virtual functions instead of switch. |
345 void TypeRecordingUnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { | 2049 void TypeRecordingUnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { |
346 UNIMPLEMENTED_MIPS(); | 2050 switch (op_) { |
| 2051 case Token::SUB: |
| 2052 GenerateGenericStubSub(masm); |
| 2053 break; |
| 2054 case Token::BIT_NOT: |
| 2055 GenerateGenericStubBitNot(masm); |
| 2056 break; |
| 2057 default: |
| 2058 UNREACHABLE(); |
| 2059 } |
347 } | 2060 } |
348 | 2061 |
349 | 2062 |
350 void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) { | 2063 void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) { |
351 UNIMPLEMENTED_MIPS(); | 2064 Label non_smi, slow; |
| 2065 GenerateSmiCodeSub(masm, &non_smi, &slow); |
| 2066 __ bind(&non_smi); |
| 2067 GenerateHeapNumberCodeSub(masm, &slow); |
| 2068 __ bind(&slow); |
| 2069 GenerateGenericCodeFallback(masm); |
352 } | 2070 } |
353 | 2071 |
354 | 2072 |
355 void TypeRecordingUnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) { | 2073 void TypeRecordingUnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) { |
356 UNIMPLEMENTED_MIPS(); | 2074 Label non_smi, slow; |
| 2075 GenerateSmiCodeBitNot(masm, &non_smi); |
| 2076 __ bind(&non_smi); |
| 2077 GenerateHeapNumberCodeBitNot(masm, &slow); |
| 2078 __ bind(&slow); |
| 2079 GenerateGenericCodeFallback(masm); |
357 } | 2080 } |
358 | 2081 |
359 | 2082 |
360 void TypeRecordingUnaryOpStub::GenerateGenericCodeFallback( | 2083 void TypeRecordingUnaryOpStub::GenerateGenericCodeFallback( |
361 MacroAssembler* masm) { | 2084 MacroAssembler* masm) { |
362 UNIMPLEMENTED_MIPS(); | 2085 // Handle the slow case by jumping to the JavaScript builtin. |
363 } | 2086 __ push(a0); |
364 | 2087 switch (op_) { |
365 | 2088 case Token::SUB: |
| 2089 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); |
| 2090 break; |
| 2091 case Token::BIT_NOT: |
| 2092 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); |
| 2093 break; |
| 2094 default: |
| 2095 UNREACHABLE(); |
| 2096 } |
| 2097 } |
| 2098 |
| 2099 |
366 Handle<Code> GetTypeRecordingBinaryOpStub(int key, | 2100 Handle<Code> GetTypeRecordingBinaryOpStub(int key, |
367 TRBinaryOpIC::TypeInfo type_info, | 2101 TRBinaryOpIC::TypeInfo type_info, |
368 TRBinaryOpIC::TypeInfo result_type_info) { | 2102 TRBinaryOpIC::TypeInfo result_type_info) { |
369 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); | 2103 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); |
370 return stub.GetCode(); | 2104 return stub.GetCode(); |
371 } | 2105 } |
372 | 2106 |
373 | 2107 |
374 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 2108 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
375 UNIMPLEMENTED_MIPS(); | 2109 Label get_result; |
| 2110 |
| 2111 __ Push(a1, a0); |
| 2112 |
| 2113 __ li(a2, Operand(Smi::FromInt(MinorKey()))); |
| 2114 __ li(a1, Operand(Smi::FromInt(op_))); |
| 2115 __ li(a0, Operand(Smi::FromInt(operands_type_))); |
| 2116 __ Push(a2, a1, a0); |
| 2117 |
| 2118 __ TailCallExternalReference( |
| 2119 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch), |
| 2120 masm->isolate()), |
| 2121 5, |
| 2122 1); |
376 } | 2123 } |
377 | 2124 |
378 | 2125 |
379 void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs( | 2126 void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs( |
380 MacroAssembler* masm) { | 2127 MacroAssembler* masm) { |
381 UNIMPLEMENTED(); | 2128 UNIMPLEMENTED(); |
382 } | 2129 } |
383 | 2130 |
384 | 2131 |
385 void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) { | 2132 void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) { |
386 UNIMPLEMENTED_MIPS(); | 2133 switch (operands_type_) { |
| 2134 case TRBinaryOpIC::UNINITIALIZED: |
| 2135 GenerateTypeTransition(masm); |
| 2136 break; |
| 2137 case TRBinaryOpIC::SMI: |
| 2138 GenerateSmiStub(masm); |
| 2139 break; |
| 2140 case TRBinaryOpIC::INT32: |
| 2141 GenerateInt32Stub(masm); |
| 2142 break; |
| 2143 case TRBinaryOpIC::HEAP_NUMBER: |
| 2144 GenerateHeapNumberStub(masm); |
| 2145 break; |
| 2146 case TRBinaryOpIC::ODDBALL: |
| 2147 GenerateOddballStub(masm); |
| 2148 break; |
| 2149 case TRBinaryOpIC::BOTH_STRING: |
| 2150 GenerateBothStringStub(masm); |
| 2151 break; |
| 2152 case TRBinaryOpIC::STRING: |
| 2153 GenerateStringStub(masm); |
| 2154 break; |
| 2155 case TRBinaryOpIC::GENERIC: |
| 2156 GenerateGeneric(masm); |
| 2157 break; |
| 2158 default: |
| 2159 UNREACHABLE(); |
| 2160 } |
387 } | 2161 } |
388 | 2162 |
389 | 2163 |
390 const char* TypeRecordingBinaryOpStub::GetName() { | 2164 const char* TypeRecordingBinaryOpStub::GetName() { |
391 UNIMPLEMENTED_MIPS(); | 2165 if (name_ != NULL) return name_; |
| 2166 const int kMaxNameLength = 100; |
| 2167 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( |
| 2168 kMaxNameLength); |
| 2169 if (name_ == NULL) return "OOM"; |
| 2170 const char* op_name = Token::Name(op_); |
| 2171 const char* overwrite_name; |
| 2172 switch (mode_) { |
| 2173 case NO_OVERWRITE: overwrite_name = "Alloc"; break; |
| 2174 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; |
| 2175 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; |
| 2176 default: overwrite_name = "UnknownOverwrite"; break; |
| 2177 } |
| 2178 |
| 2179 OS::SNPrintF(Vector<char>(name_, kMaxNameLength), |
| 2180 "TypeRecordingBinaryOpStub_%s_%s_%s", |
| 2181 op_name, |
| 2182 overwrite_name, |
| 2183 TRBinaryOpIC::GetName(operands_type_)); |
392 return name_; | 2184 return name_; |
393 } | 2185 } |
394 | 2186 |
395 | 2187 |
396 | 2188 |
397 void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation( | 2189 void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation( |
398 MacroAssembler* masm) { | 2190 MacroAssembler* masm) { |
399 UNIMPLEMENTED_MIPS(); | 2191 Register left = a1; |
| 2192 Register right = a0; |
| 2193 |
| 2194 Register scratch1 = t0; |
| 2195 Register scratch2 = t1; |
| 2196 |
| 2197 ASSERT(right.is(a0)); |
| 2198 STATIC_ASSERT(kSmiTag == 0); |
| 2199 |
| 2200 Label not_smi_result; |
| 2201 switch (op_) { |
| 2202 case Token::ADD: |
| 2203 __ AdduAndCheckForOverflow(v0, left, right, scratch1); |
| 2204 __ RetOnNoOverflow(scratch1); |
| 2205 // No need to revert anything - right and left are intact. |
| 2206 break; |
| 2207 case Token::SUB: |
| 2208 __ SubuAndCheckForOverflow(v0, left, right, scratch1); |
| 2209 __ RetOnNoOverflow(scratch1); |
| 2210 // No need to revert anything - right and left are intact. |
| 2211 break; |
| 2212 case Token::MUL: { |
| 2213 // Remove tag from one of the operands. This way the multiplication result |
| 2214 // will be a smi if it fits the smi range. |
| 2215 __ SmiUntag(scratch1, right); |
| 2216 // Do multiplication. |
| 2217 // lo = lower 32 bits of scratch1 * left. |
| 2218 // hi = higher 32 bits of scratch1 * left. |
| 2219 __ Mult(left, scratch1); |
| 2220 // Check for overflowing the smi range - no overflow if higher 33 bits of |
| 2221 // the result are identical. |
| 2222 __ mflo(scratch1); |
| 2223 __ mfhi(scratch2); |
| 2224 __ sra(scratch1, scratch1, 31); |
| 2225 __ Branch(¬_smi_result, ne, scratch1, Operand(scratch2)); |
| 2226 // Go slow on zero result to handle -0. |
| 2227 __ mflo(v0); |
| 2228 __ Ret(ne, v0, Operand(zero_reg)); |
| 2229 // We need -0 if we were multiplying a negative number with 0 to get 0. |
| 2230 // We know one of them was zero. |
| 2231 __ Addu(scratch2, right, left); |
| 2232 Label skip; |
| 2233 // ARM uses the 'pl' condition, which is 'ge'. |
| 2234 // Negating it results in 'lt'. |
| 2235 __ Branch(&skip, lt, scratch2, Operand(zero_reg)); |
| 2236 ASSERT(Smi::FromInt(0) == 0); |
| 2237 __ mov(v0, zero_reg); |
| 2238 __ Ret(); // Return smi 0 if the non-zero one was positive. |
| 2239 __ bind(&skip); |
| 2240 // We fall through here if we multiplied a negative number with 0, because |
| 2241 // that would mean we should produce -0. |
| 2242 } |
| 2243 break; |
| 2244 case Token::DIV: { |
| 2245 Label done; |
| 2246 __ SmiUntag(scratch2, right); |
| 2247 __ SmiUntag(scratch1, left); |
| 2248 __ Div(scratch1, scratch2); |
| 2249 // A minor optimization: div may be calculated asynchronously, so we check |
| 2250 // for division by zero before getting the result. |
| 2251 __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg)); |
| 2252 // If the result is 0, we need to make sure the dividsor (right) is |
| 2253 // positive, otherwise it is a -0 case. |
| 2254 // Quotient is in 'lo', remainder is in 'hi'. |
| 2255 // Check for no remainder first. |
| 2256 __ mfhi(scratch1); |
| 2257 __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg)); |
| 2258 __ mflo(scratch1); |
| 2259 __ Branch(&done, ne, scratch1, Operand(zero_reg)); |
| 2260 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); |
| 2261 __ bind(&done); |
| 2262 // Check that the signed result fits in a Smi. |
| 2263 __ Addu(scratch2, scratch1, Operand(0x40000000)); |
| 2264 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); |
| 2265 __ SmiTag(v0, scratch1); |
| 2266 __ Ret(); |
| 2267 } |
| 2268 break; |
| 2269 case Token::MOD: { |
| 2270 Label done; |
| 2271 __ SmiUntag(scratch2, right); |
| 2272 __ SmiUntag(scratch1, left); |
| 2273 __ Div(scratch1, scratch2); |
| 2274 // A minor optimization: div may be calculated asynchronously, so we check |
| 2275 // for division by 0 before calling mfhi. |
| 2276 // Check for zero on the right hand side. |
| 2277 __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg)); |
| 2278 // If the result is 0, we need to make sure the dividend (left) is |
| 2279 // positive (or 0), otherwise it is a -0 case. |
| 2280 // Remainder is in 'hi'. |
| 2281 __ mfhi(scratch2); |
| 2282 __ Branch(&done, ne, scratch2, Operand(zero_reg)); |
| 2283 __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg)); |
| 2284 __ bind(&done); |
| 2285 // Check that the signed result fits in a Smi. |
| 2286 __ Addu(scratch1, scratch2, Operand(0x40000000)); |
| 2287 __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg)); |
| 2288 __ SmiTag(v0, scratch2); |
| 2289 __ Ret(); |
| 2290 } |
| 2291 break; |
| 2292 case Token::BIT_OR: |
| 2293 __ Or(v0, left, Operand(right)); |
| 2294 __ Ret(); |
| 2295 break; |
| 2296 case Token::BIT_AND: |
| 2297 __ And(v0, left, Operand(right)); |
| 2298 __ Ret(); |
| 2299 break; |
| 2300 case Token::BIT_XOR: |
| 2301 __ Xor(v0, left, Operand(right)); |
| 2302 __ Ret(); |
| 2303 break; |
| 2304 case Token::SAR: |
| 2305 // Remove tags from right operand. |
| 2306 __ GetLeastBitsFromSmi(scratch1, right, 5); |
| 2307 __ srav(scratch1, left, scratch1); |
| 2308 // Smi tag result. |
| 2309 __ And(v0, scratch1, Operand(~kSmiTagMask)); |
| 2310 __ Ret(); |
| 2311 break; |
| 2312 case Token::SHR: |
| 2313 // Remove tags from operands. We can't do this on a 31 bit number |
| 2314 // because then the 0s get shifted into bit 30 instead of bit 31. |
| 2315 __ SmiUntag(scratch1, left); |
| 2316 __ GetLeastBitsFromSmi(scratch2, right, 5); |
| 2317 __ srlv(v0, scratch1, scratch2); |
| 2318 // Unsigned shift is not allowed to produce a negative number, so |
| 2319 // check the sign bit and the sign bit after Smi tagging. |
| 2320 __ And(scratch1, v0, Operand(0xc0000000)); |
| 2321 __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg)); |
| 2322 // Smi tag result. |
| 2323 __ SmiTag(v0); |
| 2324 __ Ret(); |
| 2325 break; |
| 2326 case Token::SHL: |
| 2327 // Remove tags from operands. |
| 2328 __ SmiUntag(scratch1, left); |
| 2329 __ GetLeastBitsFromSmi(scratch2, right, 5); |
| 2330 __ sllv(scratch1, scratch1, scratch2); |
| 2331 // Check that the signed result fits in a Smi. |
| 2332 __ Addu(scratch2, scratch1, Operand(0x40000000)); |
| 2333 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); |
| 2334 __ SmiTag(v0, scratch1); |
| 2335 __ Ret(); |
| 2336 break; |
| 2337 default: |
| 2338 UNREACHABLE(); |
| 2339 } |
| 2340 __ bind(¬_smi_result); |
400 } | 2341 } |
401 | 2342 |
402 | 2343 |
403 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, | 2344 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, |
404 bool smi_operands, | 2345 bool smi_operands, |
405 Label* not_numbers, | 2346 Label* not_numbers, |
406 Label* gc_required) { | 2347 Label* gc_required) { |
407 UNIMPLEMENTED_MIPS(); | 2348 Register left = a1; |
| 2349 Register right = a0; |
| 2350 Register scratch1 = t3; |
| 2351 Register scratch2 = t5; |
| 2352 Register scratch3 = t0; |
| 2353 |
| 2354 ASSERT(smi_operands || (not_numbers != NULL)); |
| 2355 if (smi_operands && FLAG_debug_code) { |
| 2356 __ AbortIfNotSmi(left); |
| 2357 __ AbortIfNotSmi(right); |
| 2358 } |
| 2359 |
| 2360 Register heap_number_map = t2; |
| 2361 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 2362 |
| 2363 switch (op_) { |
| 2364 case Token::ADD: |
| 2365 case Token::SUB: |
| 2366 case Token::MUL: |
| 2367 case Token::DIV: |
| 2368 case Token::MOD: { |
| 2369 // Load left and right operands into f12 and f14 or a0/a1 and a2/a3 |
| 2370 // depending on whether FPU is available or not. |
| 2371 FloatingPointHelper::Destination destination = |
| 2372 CpuFeatures::IsSupported(FPU) && |
| 2373 op_ != Token::MOD ? |
| 2374 FloatingPointHelper::kFPURegisters : |
| 2375 FloatingPointHelper::kCoreRegisters; |
| 2376 |
| 2377 // Allocate new heap number for result. |
| 2378 Register result = s0; |
| 2379 GenerateHeapResultAllocation( |
| 2380 masm, result, heap_number_map, scratch1, scratch2, gc_required); |
| 2381 |
| 2382 // Load the operands. |
| 2383 if (smi_operands) { |
| 2384 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); |
| 2385 } else { |
| 2386 FloatingPointHelper::LoadOperands(masm, |
| 2387 destination, |
| 2388 heap_number_map, |
| 2389 scratch1, |
| 2390 scratch2, |
| 2391 not_numbers); |
| 2392 } |
| 2393 |
| 2394 // Calculate the result. |
| 2395 if (destination == FloatingPointHelper::kFPURegisters) { |
| 2396 // Using FPU registers: |
| 2397 // f12: Left value. |
| 2398 // f14: Right value. |
| 2399 CpuFeatures::Scope scope(FPU); |
| 2400 switch (op_) { |
| 2401 case Token::ADD: |
| 2402 __ add_d(f10, f12, f14); |
| 2403 break; |
| 2404 case Token::SUB: |
| 2405 __ sub_d(f10, f12, f14); |
| 2406 break; |
| 2407 case Token::MUL: |
| 2408 __ mul_d(f10, f12, f14); |
| 2409 break; |
| 2410 case Token::DIV: |
| 2411 __ div_d(f10, f12, f14); |
| 2412 break; |
| 2413 default: |
| 2414 UNREACHABLE(); |
| 2415 } |
| 2416 |
| 2417 // ARM uses a workaround here because of the unaligned HeapNumber |
| 2418 // kValueOffset. On MIPS this workaround is built into sdc1 so |
| 2419 // there's no point in generating even more instructions. |
| 2420 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset)); |
| 2421 __ mov(v0, result); |
| 2422 __ Ret(); |
| 2423 } else { |
| 2424 // Call the C function to handle the double operation. |
| 2425 FloatingPointHelper::CallCCodeForDoubleOperation(masm, |
| 2426 op_, |
| 2427 result, |
| 2428 scratch1); |
| 2429 if (FLAG_debug_code) { |
| 2430 __ stop("Unreachable code."); |
| 2431 } |
| 2432 } |
| 2433 break; |
| 2434 } |
| 2435 case Token::BIT_OR: |
| 2436 case Token::BIT_XOR: |
| 2437 case Token::BIT_AND: |
| 2438 case Token::SAR: |
| 2439 case Token::SHR: |
| 2440 case Token::SHL: { |
| 2441 if (smi_operands) { |
| 2442 __ SmiUntag(a3, left); |
| 2443 __ SmiUntag(a2, right); |
| 2444 } else { |
| 2445 // Convert operands to 32-bit integers. Right in a2 and left in a3. |
| 2446 FloatingPointHelper::ConvertNumberToInt32(masm, |
| 2447 left, |
| 2448 a3, |
| 2449 heap_number_map, |
| 2450 scratch1, |
| 2451 scratch2, |
| 2452 scratch3, |
| 2453 f0, |
| 2454 not_numbers); |
| 2455 FloatingPointHelper::ConvertNumberToInt32(masm, |
| 2456 right, |
| 2457 a2, |
| 2458 heap_number_map, |
| 2459 scratch1, |
| 2460 scratch2, |
| 2461 scratch3, |
| 2462 f0, |
| 2463 not_numbers); |
| 2464 } |
| 2465 Label result_not_a_smi; |
| 2466 switch (op_) { |
| 2467 case Token::BIT_OR: |
| 2468 __ Or(a2, a3, Operand(a2)); |
| 2469 break; |
| 2470 case Token::BIT_XOR: |
| 2471 __ Xor(a2, a3, Operand(a2)); |
| 2472 break; |
| 2473 case Token::BIT_AND: |
| 2474 __ And(a2, a3, Operand(a2)); |
| 2475 break; |
| 2476 case Token::SAR: |
| 2477 // Use only the 5 least significant bits of the shift count. |
| 2478 __ GetLeastBitsFromInt32(a2, a2, 5); |
| 2479 __ srav(a2, a3, a2); |
| 2480 break; |
| 2481 case Token::SHR: |
| 2482 // Use only the 5 least significant bits of the shift count. |
| 2483 __ GetLeastBitsFromInt32(a2, a2, 5); |
| 2484 __ srlv(a2, a3, a2); |
| 2485 // SHR is special because it is required to produce a positive answer. |
| 2486 // The code below for writing into heap numbers isn't capable of |
| 2487 // writing the register as an unsigned int so we go to slow case if we |
| 2488 // hit this case. |
| 2489 if (CpuFeatures::IsSupported(FPU)) { |
| 2490 __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg)); |
| 2491 } else { |
| 2492 __ Branch(not_numbers, lt, a2, Operand(zero_reg)); |
| 2493 } |
| 2494 break; |
| 2495 case Token::SHL: |
| 2496 // Use only the 5 least significant bits of the shift count. |
| 2497 __ GetLeastBitsFromInt32(a2, a2, 5); |
| 2498 __ sllv(a2, a3, a2); |
| 2499 break; |
| 2500 default: |
| 2501 UNREACHABLE(); |
| 2502 } |
| 2503 // Check that the *signed* result fits in a smi. |
| 2504 __ Addu(a3, a2, Operand(0x40000000)); |
| 2505 __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg)); |
| 2506 __ SmiTag(v0, a2); |
| 2507 __ Ret(); |
| 2508 |
| 2509 // Allocate new heap number for result. |
| 2510 __ bind(&result_not_a_smi); |
| 2511 Register result = t1; |
| 2512 if (smi_operands) { |
| 2513 __ AllocateHeapNumber( |
| 2514 result, scratch1, scratch2, heap_number_map, gc_required); |
| 2515 } else { |
| 2516 GenerateHeapResultAllocation( |
| 2517 masm, result, heap_number_map, scratch1, scratch2, gc_required); |
| 2518 } |
| 2519 |
| 2520 // a2: Answer as signed int32. |
| 2521 // t1: Heap number to write answer into. |
| 2522 |
| 2523 // Nothing can go wrong now, so move the heap number to v0, which is the |
| 2524 // result. |
| 2525 __ mov(v0, t1); |
| 2526 |
| 2527 if (CpuFeatures::IsSupported(FPU)) { |
| 2528 // Convert the int32 in a2 to the heap number in a0. As |
| 2529 // mentioned above SHR needs to always produce a positive result. |
| 2530 CpuFeatures::Scope scope(FPU); |
| 2531 __ mtc1(a2, f0); |
| 2532 if (op_ == Token::SHR) { |
| 2533 __ Cvt_d_uw(f0, f0); |
| 2534 } else { |
| 2535 __ cvt_d_w(f0, f0); |
| 2536 } |
| 2537 // ARM uses a workaround here because of the unaligned HeapNumber |
| 2538 // kValueOffset. On MIPS this workaround is built into sdc1 so |
| 2539 // there's no point in generating even more instructions. |
| 2540 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); |
| 2541 __ Ret(); |
| 2542 } else { |
| 2543 // Tail call that writes the int32 in a2 to the heap number in v0, using |
| 2544 // a3 and a0 as scratch. v0 is preserved and returned. |
| 2545 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0); |
| 2546 __ TailCallStub(&stub); |
| 2547 } |
| 2548 break; |
| 2549 } |
| 2550 default: |
| 2551 UNREACHABLE(); |
| 2552 } |
408 } | 2553 } |
409 | 2554 |
410 | 2555 |
411 // Generate the smi code. If the operation on smis are successful this return is | 2556 // Generate the smi code. If the operation on smis are successful this return is |
412 // generated. If the result is not a smi and heap number allocation is not | 2557 // generated. If the result is not a smi and heap number allocation is not |
413 // requested the code falls through. If number allocation is requested but a | 2558 // requested the code falls through. If number allocation is requested but a |
414 // heap number cannot be allocated the code jumps to the lable gc_required. | 2559 // heap number cannot be allocated the code jumps to the lable gc_required. |
415 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, | 2560 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
416 Label* use_runtime, | 2561 Label* use_runtime, |
417 Label* gc_required, | 2562 Label* gc_required, |
418 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { | 2563 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
419 UNIMPLEMENTED_MIPS(); | 2564 Label not_smis; |
| 2565 |
| 2566 Register left = a1; |
| 2567 Register right = a0; |
| 2568 Register scratch1 = t3; |
| 2569 Register scratch2 = t5; |
| 2570 |
| 2571 // Perform combined smi check on both operands. |
| 2572 __ Or(scratch1, left, Operand(right)); |
| 2573 STATIC_ASSERT(kSmiTag == 0); |
| 2574 __ JumpIfNotSmi(scratch1, ¬_smis); |
| 2575 |
| 2576 // If the smi-smi operation results in a smi return is generated. |
| 2577 GenerateSmiSmiOperation(masm); |
| 2578 |
| 2579 // If heap number results are possible generate the result in an allocated |
| 2580 // heap number. |
| 2581 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { |
| 2582 GenerateFPOperation(masm, true, use_runtime, gc_required); |
| 2583 } |
| 2584 __ bind(¬_smis); |
420 } | 2585 } |
421 | 2586 |
422 | 2587 |
423 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 2588 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
424 UNIMPLEMENTED_MIPS(); | 2589 Label not_smis, call_runtime; |
| 2590 |
| 2591 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || |
| 2592 result_type_ == TRBinaryOpIC::SMI) { |
| 2593 // Only allow smi results. |
| 2594 GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS); |
| 2595 } else { |
| 2596 // Allow heap number result and don't make a transition if a heap number |
| 2597 // cannot be allocated. |
| 2598 GenerateSmiCode(masm, |
| 2599 &call_runtime, |
| 2600 &call_runtime, |
| 2601 ALLOW_HEAPNUMBER_RESULTS); |
| 2602 } |
| 2603 |
| 2604 // Code falls through if the result is not returned as either a smi or heap |
| 2605 // number. |
| 2606 GenerateTypeTransition(masm); |
| 2607 |
| 2608 __ bind(&call_runtime); |
| 2609 GenerateCallRuntime(masm); |
425 } | 2610 } |
426 | 2611 |
427 | 2612 |
428 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { | 2613 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { |
429 UNIMPLEMENTED_MIPS(); | 2614 ASSERT(operands_type_ == TRBinaryOpIC::STRING); |
| 2615 // Try to add arguments as strings, otherwise, transition to the generic |
| 2616 // TRBinaryOpIC type. |
| 2617 GenerateAddStrings(masm); |
| 2618 GenerateTypeTransition(masm); |
| 2619 } |
| 2620 |
| 2621 |
| 2622 void TypeRecordingBinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { |
| 2623 Label call_runtime; |
| 2624 ASSERT(operands_type_ == TRBinaryOpIC::BOTH_STRING); |
| 2625 ASSERT(op_ == Token::ADD); |
| 2626 // If both arguments are strings, call the string add stub. |
| 2627 // Otherwise, do a transition. |
| 2628 |
| 2629 // Registers containing left and right operands respectively. |
| 2630 Register left = a1; |
| 2631 Register right = a0; |
| 2632 |
| 2633 // Test if left operand is a string. |
| 2634 __ JumpIfSmi(left, &call_runtime); |
| 2635 __ GetObjectType(left, a2, a2); |
| 2636 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); |
| 2637 |
| 2638 // Test if right operand is a string. |
| 2639 __ JumpIfSmi(right, &call_runtime); |
| 2640 __ GetObjectType(right, a2, a2); |
| 2641 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); |
| 2642 |
| 2643 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); |
| 2644 GenerateRegisterArgsPush(masm); |
| 2645 __ TailCallStub(&string_add_stub); |
| 2646 |
| 2647 __ bind(&call_runtime); |
| 2648 GenerateTypeTransition(masm); |
430 } | 2649 } |
431 | 2650 |
432 | 2651 |
433 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | 2652 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
434 UNIMPLEMENTED_MIPS(); | 2653 ASSERT(operands_type_ == TRBinaryOpIC::INT32); |
| 2654 |
| 2655 Register left = a1; |
| 2656 Register right = a0; |
| 2657 Register scratch1 = t3; |
| 2658 Register scratch2 = t5; |
| 2659 FPURegister double_scratch = f0; |
| 2660 FPURegister single_scratch = f6; |
| 2661 |
| 2662 Register heap_number_result = no_reg; |
| 2663 Register heap_number_map = t2; |
| 2664 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 2665 |
| 2666 Label call_runtime; |
| 2667 // Labels for type transition, used for wrong input or output types. |
| 2668 // Both label are currently actually bound to the same position. We use two |
| 2669 // different label to differentiate the cause leading to type transition. |
| 2670 Label transition; |
| 2671 |
| 2672 // Smi-smi fast case. |
| 2673 Label skip; |
| 2674 __ Or(scratch1, left, right); |
| 2675 __ JumpIfNotSmi(scratch1, &skip); |
| 2676 GenerateSmiSmiOperation(masm); |
| 2677 // Fall through if the result is not a smi. |
| 2678 __ bind(&skip); |
| 2679 |
| 2680 switch (op_) { |
| 2681 case Token::ADD: |
| 2682 case Token::SUB: |
| 2683 case Token::MUL: |
| 2684 case Token::DIV: |
| 2685 case Token::MOD: { |
| 2686 // Load both operands and check that they are 32-bit integer. |
| 2687 // Jump to type transition if they are not. The registers a0 and a1 (right |
| 2688 // and left) are preserved for the runtime call. |
| 2689 FloatingPointHelper::Destination destination = |
| 2690 CpuFeatures::IsSupported(FPU) && |
| 2691 op_ != Token::MOD ? |
| 2692 FloatingPointHelper::kFPURegisters : |
| 2693 FloatingPointHelper::kCoreRegisters; |
| 2694 |
| 2695 FloatingPointHelper::LoadNumberAsInt32Double(masm, |
| 2696 right, |
| 2697 destination, |
| 2698 f14, |
| 2699 a2, |
| 2700 a3, |
| 2701 heap_number_map, |
| 2702 scratch1, |
| 2703 scratch2, |
| 2704 f2, |
| 2705 &transition); |
| 2706 FloatingPointHelper::LoadNumberAsInt32Double(masm, |
| 2707 left, |
| 2708 destination, |
| 2709 f12, |
| 2710 t0, |
| 2711 t1, |
| 2712 heap_number_map, |
| 2713 scratch1, |
| 2714 scratch2, |
| 2715 f2, |
| 2716 &transition); |
| 2717 |
| 2718 if (destination == FloatingPointHelper::kFPURegisters) { |
| 2719 CpuFeatures::Scope scope(FPU); |
| 2720 Label return_heap_number; |
| 2721 switch (op_) { |
| 2722 case Token::ADD: |
| 2723 __ add_d(f10, f12, f14); |
| 2724 break; |
| 2725 case Token::SUB: |
| 2726 __ sub_d(f10, f12, f14); |
| 2727 break; |
| 2728 case Token::MUL: |
| 2729 __ mul_d(f10, f12, f14); |
| 2730 break; |
| 2731 case Token::DIV: |
| 2732 __ div_d(f10, f12, f14); |
| 2733 break; |
| 2734 default: |
| 2735 UNREACHABLE(); |
| 2736 } |
| 2737 |
| 2738 if (op_ != Token::DIV) { |
| 2739 // These operations produce an integer result. |
| 2740 // Try to return a smi if we can. |
| 2741 // Otherwise return a heap number if allowed, or jump to type |
| 2742 // transition. |
| 2743 |
| 2744 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). |
| 2745 // On MIPS a lot of things cannot be implemented the same way so right |
| 2746 // now it makes a lot more sense to just do things manually. |
| 2747 |
| 2748 // Save FCSR. |
| 2749 __ cfc1(scratch1, FCSR); |
| 2750 // Disable FPU exceptions. |
| 2751 __ ctc1(zero_reg, FCSR); |
| 2752 __ trunc_w_d(single_scratch, f10); |
| 2753 // Retrieve FCSR. |
| 2754 __ cfc1(scratch2, FCSR); |
| 2755 // Restore FCSR. |
| 2756 __ ctc1(scratch1, FCSR); |
| 2757 |
| 2758 // Check for inexact conversion. |
| 2759 __ srl(scratch2, scratch2, kFCSRFlagShift); |
| 2760 __ And(scratch2, scratch2, kFCSRFlagMask); |
| 2761 |
| 2762 if (result_type_ <= TRBinaryOpIC::INT32) { |
| 2763 // If scratch2 != 0, result does not fit in a 32-bit integer. |
| 2764 __ Branch(&transition, ne, scratch2, Operand(zero_reg)); |
| 2765 } |
| 2766 |
| 2767 // Check if the result fits in a smi. |
| 2768 __ mfc1(scratch1, single_scratch); |
| 2769 __ Addu(scratch2, scratch1, Operand(0x40000000)); |
| 2770 // If not try to return a heap number. |
| 2771 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg)); |
| 2772 // Check for minus zero. Return heap number for minus zero. |
| 2773 Label not_zero; |
| 2774 __ Branch(¬_zero, ne, scratch1, Operand(zero_reg)); |
| 2775 __ mfc1(scratch2, f11); |
| 2776 __ And(scratch2, scratch2, HeapNumber::kSignMask); |
| 2777 __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg)); |
| 2778 __ bind(¬_zero); |
| 2779 |
| 2780 // Tag the result and return. |
| 2781 __ SmiTag(v0, scratch1); |
| 2782 __ Ret(); |
| 2783 } else { |
| 2784 // DIV just falls through to allocating a heap number. |
| 2785 } |
| 2786 |
| 2787 if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER |
| 2788 : TRBinaryOpIC::INT32) { |
| 2789 __ bind(&return_heap_number); |
| 2790 // We are using FPU registers so s0 is available. |
| 2791 heap_number_result = s0; |
| 2792 GenerateHeapResultAllocation(masm, |
| 2793 heap_number_result, |
| 2794 heap_number_map, |
| 2795 scratch1, |
| 2796 scratch2, |
| 2797 &call_runtime); |
| 2798 __ mov(v0, heap_number_result); |
| 2799 __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset)); |
| 2800 __ Ret(); |
| 2801 } |
| 2802 |
| 2803 // A DIV operation expecting an integer result falls through |
| 2804 // to type transition. |
| 2805 |
| 2806 } else { |
| 2807 // We preserved a0 and a1 to be able to call runtime. |
| 2808 // Save the left value on the stack. |
| 2809 __ Push(t1, t0); |
| 2810 |
| 2811 Label pop_and_call_runtime; |
| 2812 |
| 2813 // Allocate a heap number to store the result. |
| 2814 heap_number_result = s0; |
| 2815 GenerateHeapResultAllocation(masm, |
| 2816 heap_number_result, |
| 2817 heap_number_map, |
| 2818 scratch1, |
| 2819 scratch2, |
| 2820 &pop_and_call_runtime); |
| 2821 |
| 2822 // Load the left value from the value saved on the stack. |
| 2823 __ Pop(a1, a0); |
| 2824 |
| 2825 // Call the C function to handle the double operation. |
| 2826 FloatingPointHelper::CallCCodeForDoubleOperation( |
| 2827 masm, op_, heap_number_result, scratch1); |
| 2828 if (FLAG_debug_code) { |
| 2829 __ stop("Unreachable code."); |
| 2830 } |
| 2831 |
| 2832 __ bind(&pop_and_call_runtime); |
| 2833 __ Drop(2); |
| 2834 __ Branch(&call_runtime); |
| 2835 } |
| 2836 |
| 2837 break; |
| 2838 } |
| 2839 |
| 2840 case Token::BIT_OR: |
| 2841 case Token::BIT_XOR: |
| 2842 case Token::BIT_AND: |
| 2843 case Token::SAR: |
| 2844 case Token::SHR: |
| 2845 case Token::SHL: { |
| 2846 Label return_heap_number; |
| 2847 Register scratch3 = t1; |
| 2848 // Convert operands to 32-bit integers. Right in a2 and left in a3. The |
| 2849 // registers a0 and a1 (right and left) are preserved for the runtime |
| 2850 // call. |
| 2851 FloatingPointHelper::LoadNumberAsInt32(masm, |
| 2852 left, |
| 2853 a3, |
| 2854 heap_number_map, |
| 2855 scratch1, |
| 2856 scratch2, |
| 2857 scratch3, |
| 2858 f0, |
| 2859 &transition); |
| 2860 FloatingPointHelper::LoadNumberAsInt32(masm, |
| 2861 right, |
| 2862 a2, |
| 2863 heap_number_map, |
| 2864 scratch1, |
| 2865 scratch2, |
| 2866 scratch3, |
| 2867 f0, |
| 2868 &transition); |
| 2869 |
| 2870 // The ECMA-262 standard specifies that, for shift operations, only the |
| 2871 // 5 least significant bits of the shift value should be used. |
| 2872 switch (op_) { |
| 2873 case Token::BIT_OR: |
| 2874 __ Or(a2, a3, Operand(a2)); |
| 2875 break; |
| 2876 case Token::BIT_XOR: |
| 2877 __ Xor(a2, a3, Operand(a2)); |
| 2878 break; |
| 2879 case Token::BIT_AND: |
| 2880 __ And(a2, a3, Operand(a2)); |
| 2881 break; |
| 2882 case Token::SAR: |
| 2883 __ And(a2, a2, Operand(0x1f)); |
| 2884 __ srav(a2, a3, a2); |
| 2885 break; |
| 2886 case Token::SHR: |
| 2887 __ And(a2, a2, Operand(0x1f)); |
| 2888 __ srlv(a2, a3, a2); |
| 2889 // SHR is special because it is required to produce a positive answer. |
| 2890 // We only get a negative result if the shift value (a2) is 0. |
| 2891 // This result cannot be respresented as a signed 32-bit integer, try |
| 2892 // to return a heap number if we can. |
| 2893 // The non FPU code does not support this special case, so jump to |
| 2894 // runtime if we don't support it. |
| 2895 if (CpuFeatures::IsSupported(FPU)) { |
| 2896 __ Branch((result_type_ <= TRBinaryOpIC::INT32) |
| 2897 ? &transition |
| 2898 : &return_heap_number, |
| 2899 lt, |
| 2900 a2, |
| 2901 Operand(zero_reg)); |
| 2902 } else { |
| 2903 __ Branch((result_type_ <= TRBinaryOpIC::INT32) |
| 2904 ? &transition |
| 2905 : &call_runtime, |
| 2906 lt, |
| 2907 a2, |
| 2908 Operand(zero_reg)); |
| 2909 } |
| 2910 break; |
| 2911 case Token::SHL: |
| 2912 __ And(a2, a2, Operand(0x1f)); |
| 2913 __ sllv(a2, a3, a2); |
| 2914 break; |
| 2915 default: |
| 2916 UNREACHABLE(); |
| 2917 } |
| 2918 |
| 2919 // Check if the result fits in a smi. |
| 2920 __ Addu(scratch1, a2, Operand(0x40000000)); |
| 2921 // If not try to return a heap number. (We know the result is an int32.) |
| 2922 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg)); |
| 2923 // Tag the result and return. |
| 2924 __ SmiTag(v0, a2); |
| 2925 __ Ret(); |
| 2926 |
| 2927 __ bind(&return_heap_number); |
| 2928 heap_number_result = t1; |
| 2929 GenerateHeapResultAllocation(masm, |
| 2930 heap_number_result, |
| 2931 heap_number_map, |
| 2932 scratch1, |
| 2933 scratch2, |
| 2934 &call_runtime); |
| 2935 |
| 2936 if (CpuFeatures::IsSupported(FPU)) { |
| 2937 CpuFeatures::Scope scope(FPU); |
| 2938 |
| 2939 if (op_ != Token::SHR) { |
| 2940 // Convert the result to a floating point value. |
| 2941 __ mtc1(a2, double_scratch); |
| 2942 __ cvt_d_w(double_scratch, double_scratch); |
| 2943 } else { |
| 2944 // The result must be interpreted as an unsigned 32-bit integer. |
| 2945 __ mtc1(a2, double_scratch); |
| 2946 __ Cvt_d_uw(double_scratch, double_scratch); |
| 2947 } |
| 2948 |
| 2949 // Store the result. |
| 2950 __ mov(v0, heap_number_result); |
| 2951 __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset)); |
| 2952 __ Ret(); |
| 2953 } else { |
| 2954 // Tail call that writes the int32 in a2 to the heap number in v0, using |
| 2955 // a3 and a1 as scratch. v0 is preserved and returned. |
| 2956 __ mov(a0, t1); |
| 2957 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1); |
| 2958 __ TailCallStub(&stub); |
| 2959 } |
| 2960 |
| 2961 break; |
| 2962 } |
| 2963 |
| 2964 default: |
| 2965 UNREACHABLE(); |
| 2966 } |
| 2967 |
| 2968 if (transition.is_linked()) { |
| 2969 __ bind(&transition); |
| 2970 GenerateTypeTransition(masm); |
| 2971 } |
| 2972 |
| 2973 __ bind(&call_runtime); |
| 2974 GenerateCallRuntime(masm); |
| 2975 } |
| 2976 |
| 2977 |
| 2978 void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { |
| 2979 Label call_runtime; |
| 2980 |
| 2981 if (op_ == Token::ADD) { |
| 2982 // Handle string addition here, because it is the only operation |
| 2983 // that does not do a ToNumber conversion on the operands. |
| 2984 GenerateAddStrings(masm); |
| 2985 } |
| 2986 |
| 2987 // Convert oddball arguments to numbers. |
| 2988 Label check, done; |
| 2989 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); |
| 2990 __ Branch(&check, ne, a1, Operand(t0)); |
| 2991 if (Token::IsBitOp(op_)) { |
| 2992 __ li(a1, Operand(Smi::FromInt(0))); |
| 2993 } else { |
| 2994 __ LoadRoot(a1, Heap::kNanValueRootIndex); |
| 2995 } |
| 2996 __ jmp(&done); |
| 2997 __ bind(&check); |
| 2998 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); |
| 2999 __ Branch(&done, ne, a0, Operand(t0)); |
| 3000 if (Token::IsBitOp(op_)) { |
| 3001 __ li(a0, Operand(Smi::FromInt(0))); |
| 3002 } else { |
| 3003 __ LoadRoot(a0, Heap::kNanValueRootIndex); |
| 3004 } |
| 3005 __ bind(&done); |
| 3006 |
| 3007 GenerateHeapNumberStub(masm); |
435 } | 3008 } |
436 | 3009 |
437 | 3010 |
438 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { | 3011 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
439 UNIMPLEMENTED_MIPS(); | 3012 Label call_runtime; |
| 3013 GenerateFPOperation(masm, false, &call_runtime, &call_runtime); |
| 3014 |
| 3015 __ bind(&call_runtime); |
| 3016 GenerateCallRuntime(masm); |
440 } | 3017 } |
441 | 3018 |
442 | 3019 |
443 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 3020 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
444 UNIMPLEMENTED_MIPS(); | 3021 Label call_runtime, call_string_add_or_runtime; |
| 3022 |
| 3023 GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); |
| 3024 |
| 3025 GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); |
| 3026 |
| 3027 __ bind(&call_string_add_or_runtime); |
| 3028 if (op_ == Token::ADD) { |
| 3029 GenerateAddStrings(masm); |
| 3030 } |
| 3031 |
| 3032 __ bind(&call_runtime); |
| 3033 GenerateCallRuntime(masm); |
445 } | 3034 } |
446 | 3035 |
447 | 3036 |
448 void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { | 3037 void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { |
449 UNIMPLEMENTED_MIPS(); | 3038 ASSERT(op_ == Token::ADD); |
| 3039 Label left_not_string, call_runtime; |
| 3040 |
| 3041 Register left = a1; |
| 3042 Register right = a0; |
| 3043 |
| 3044 // Check if left argument is a string. |
| 3045 __ JumpIfSmi(left, &left_not_string); |
| 3046 __ GetObjectType(left, a2, a2); |
| 3047 __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE)); |
| 3048 |
| 3049 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); |
| 3050 GenerateRegisterArgsPush(masm); |
| 3051 __ TailCallStub(&string_add_left_stub); |
| 3052 |
| 3053 // Left operand is not a string, test right. |
| 3054 __ bind(&left_not_string); |
| 3055 __ JumpIfSmi(right, &call_runtime); |
| 3056 __ GetObjectType(right, a2, a2); |
| 3057 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); |
| 3058 |
| 3059 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); |
| 3060 GenerateRegisterArgsPush(masm); |
| 3061 __ TailCallStub(&string_add_right_stub); |
| 3062 |
| 3063 // At least one argument is not a string. |
| 3064 __ bind(&call_runtime); |
450 } | 3065 } |
451 | 3066 |
452 | 3067 |
453 void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { | 3068 void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { |
454 UNIMPLEMENTED_MIPS(); | 3069 GenerateRegisterArgsPush(masm); |
455 } | 3070 switch (op_) { |
456 | 3071 case Token::ADD: |
457 | 3072 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); |
| 3073 break; |
| 3074 case Token::SUB: |
| 3075 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); |
| 3076 break; |
| 3077 case Token::MUL: |
| 3078 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); |
| 3079 break; |
| 3080 case Token::DIV: |
| 3081 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); |
| 3082 break; |
| 3083 case Token::MOD: |
| 3084 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); |
| 3085 break; |
| 3086 case Token::BIT_OR: |
| 3087 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); |
| 3088 break; |
| 3089 case Token::BIT_AND: |
| 3090 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); |
| 3091 break; |
| 3092 case Token::BIT_XOR: |
| 3093 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); |
| 3094 break; |
| 3095 case Token::SAR: |
| 3096 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); |
| 3097 break; |
| 3098 case Token::SHR: |
| 3099 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); |
| 3100 break; |
| 3101 case Token::SHL: |
| 3102 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); |
| 3103 break; |
| 3104 default: |
| 3105 UNREACHABLE(); |
| 3106 } |
| 3107 } |
| 3108 |
| 3109 |
458 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( | 3110 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( |
459 MacroAssembler* masm, | 3111 MacroAssembler* masm, |
460 Register result, | 3112 Register result, |
461 Register heap_number_map, | 3113 Register heap_number_map, |
462 Register scratch1, | 3114 Register scratch1, |
463 Register scratch2, | 3115 Register scratch2, |
464 Label* gc_required) { | 3116 Label* gc_required) { |
465 UNIMPLEMENTED_MIPS(); | 3117 |
| 3118 // Code below will scratch result if allocation fails. To keep both arguments |
| 3119 // intact for the runtime call result cannot be one of these. |
| 3120 ASSERT(!result.is(a0) && !result.is(a1)); |
| 3121 |
| 3122 if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) { |
| 3123 Label skip_allocation, allocated; |
| 3124 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0; |
| 3125 // If the overwritable operand is already an object, we skip the |
| 3126 // allocation of a heap number. |
| 3127 __ JumpIfNotSmi(overwritable_operand, &skip_allocation); |
| 3128 // Allocate a heap number for the result. |
| 3129 __ AllocateHeapNumber( |
| 3130 result, scratch1, scratch2, heap_number_map, gc_required); |
| 3131 __ Branch(&allocated); |
| 3132 __ bind(&skip_allocation); |
| 3133 // Use object holding the overwritable operand for result. |
| 3134 __ mov(result, overwritable_operand); |
| 3135 __ bind(&allocated); |
| 3136 } else { |
| 3137 ASSERT(mode_ == NO_OVERWRITE); |
| 3138 __ AllocateHeapNumber( |
| 3139 result, scratch1, scratch2, heap_number_map, gc_required); |
| 3140 } |
466 } | 3141 } |
467 | 3142 |
468 | 3143 |
469 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { | 3144 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
470 UNIMPLEMENTED_MIPS(); | 3145 __ Push(a1, a0); |
471 } | 3146 } |
472 | 3147 |
473 | 3148 |
474 | 3149 |
475 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { | 3150 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
476 UNIMPLEMENTED_MIPS(); | 3151 // Untagged case: double input in f4, double result goes |
| 3152 // into f4. |
| 3153 // Tagged case: tagged input on top of stack and in a0, |
| 3154 // tagged result (heap number) goes into v0. |
| 3155 |
| 3156 Label input_not_smi; |
| 3157 Label loaded; |
| 3158 Label calculate; |
| 3159 Label invalid_cache; |
| 3160 const Register scratch0 = t5; |
| 3161 const Register scratch1 = t3; |
| 3162 const Register cache_entry = a0; |
| 3163 const bool tagged = (argument_type_ == TAGGED); |
| 3164 |
| 3165 if (CpuFeatures::IsSupported(FPU)) { |
| 3166 CpuFeatures::Scope scope(FPU); |
| 3167 |
| 3168 if (tagged) { |
| 3169 // Argument is a number and is on stack and in a0. |
| 3170 // Load argument and check if it is a smi. |
| 3171 __ JumpIfNotSmi(a0, &input_not_smi); |
| 3172 |
| 3173 // Input is a smi. Convert to double and load the low and high words |
| 3174 // of the double into a2, a3. |
| 3175 __ sra(t0, a0, kSmiTagSize); |
| 3176 __ mtc1(t0, f4); |
| 3177 __ cvt_d_w(f4, f4); |
| 3178 __ mfc1(a2, f4); |
| 3179 __ mfc1(a3, f5); |
| 3180 __ Branch(&loaded); |
| 3181 |
| 3182 __ bind(&input_not_smi); |
| 3183 // Check if input is a HeapNumber. |
| 3184 __ CheckMap(a0, |
| 3185 a1, |
| 3186 Heap::kHeapNumberMapRootIndex, |
| 3187 &calculate, |
| 3188 true); |
| 3189 // Input is a HeapNumber. Store the |
| 3190 // low and high words into a2, a3. |
| 3191 __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset)); |
| 3192 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4)); |
| 3193 } else { |
| 3194 // Input is untagged double in f4. Output goes to f4. |
| 3195 __ mfc1(a2, f4); |
| 3196 __ mfc1(a3, f5); |
| 3197 } |
| 3198 __ bind(&loaded); |
| 3199 // a2 = low 32 bits of double value. |
| 3200 // a3 = high 32 bits of double value. |
| 3201 // Compute hash (the shifts are arithmetic): |
| 3202 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); |
| 3203 __ Xor(a1, a2, a3); |
| 3204 __ sra(t0, a1, 16); |
| 3205 __ Xor(a1, a1, t0); |
| 3206 __ sra(t0, a1, 8); |
| 3207 __ Xor(a1, a1, t0); |
| 3208 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); |
| 3209 __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); |
| 3210 |
| 3211 // a2 = low 32 bits of double value. |
| 3212 // a3 = high 32 bits of double value. |
| 3213 // a1 = TranscendentalCache::hash(double value). |
| 3214 __ li(cache_entry, Operand( |
| 3215 ExternalReference::transcendental_cache_array_address( |
| 3216 masm->isolate()))); |
| 3217 // a0 points to cache array. |
| 3218 __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof( |
| 3219 Isolate::Current()->transcendental_cache()->caches_[0]))); |
| 3220 // a0 points to the cache for the type type_. |
| 3221 // If NULL, the cache hasn't been initialized yet, so go through runtime. |
| 3222 __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg)); |
| 3223 |
| 3224 #ifdef DEBUG |
| 3225 // Check that the layout of cache elements match expectations. |
| 3226 { TranscendentalCache::SubCache::Element test_elem[2]; |
| 3227 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); |
| 3228 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); |
| 3229 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); |
| 3230 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); |
| 3231 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); |
| 3232 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. |
| 3233 CHECK_EQ(0, elem_in0 - elem_start); |
| 3234 CHECK_EQ(kIntSize, elem_in1 - elem_start); |
| 3235 CHECK_EQ(2 * kIntSize, elem_out - elem_start); |
| 3236 } |
| 3237 #endif |
| 3238 |
| 3239 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12]. |
| 3240 __ sll(t0, a1, 1); |
| 3241 __ Addu(a1, a1, t0); |
| 3242 __ sll(t0, a1, 2); |
| 3243 __ Addu(cache_entry, cache_entry, t0); |
| 3244 |
| 3245 // Check if cache matches: Double value is stored in uint32_t[2] array. |
| 3246 __ lw(t0, MemOperand(cache_entry, 0)); |
| 3247 __ lw(t1, MemOperand(cache_entry, 4)); |
| 3248 __ lw(t2, MemOperand(cache_entry, 8)); |
| 3249 __ Addu(cache_entry, cache_entry, 12); |
| 3250 __ Branch(&calculate, ne, a2, Operand(t0)); |
| 3251 __ Branch(&calculate, ne, a3, Operand(t1)); |
| 3252 // Cache hit. Load result, cleanup and return. |
| 3253 if (tagged) { |
| 3254 // Pop input value from stack and load result into v0. |
| 3255 __ Drop(1); |
| 3256 __ mov(v0, t2); |
| 3257 } else { |
| 3258 // Load result into f4. |
| 3259 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); |
| 3260 } |
| 3261 __ Ret(); |
| 3262 } // if (CpuFeatures::IsSupported(FPU)) |
| 3263 |
| 3264 __ bind(&calculate); |
| 3265 if (tagged) { |
| 3266 __ bind(&invalid_cache); |
| 3267 __ TailCallExternalReference(ExternalReference(RuntimeFunction(), |
| 3268 masm->isolate()), |
| 3269 1, |
| 3270 1); |
| 3271 } else { |
| 3272 if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE(); |
| 3273 CpuFeatures::Scope scope(FPU); |
| 3274 |
| 3275 Label no_update; |
| 3276 Label skip_cache; |
| 3277 const Register heap_number_map = t2; |
| 3278 |
| 3279 // Call C function to calculate the result and update the cache. |
| 3280 // Register a0 holds precalculated cache entry address; preserve |
| 3281 // it on the stack and pop it into register cache_entry after the |
| 3282 // call. |
| 3283 __ push(cache_entry); |
| 3284 GenerateCallCFunction(masm, scratch0); |
| 3285 __ GetCFunctionDoubleResult(f4); |
| 3286 |
| 3287 // Try to update the cache. If we cannot allocate a |
| 3288 // heap number, we return the result without updating. |
| 3289 __ pop(cache_entry); |
| 3290 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); |
| 3291 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update); |
| 3292 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); |
| 3293 |
| 3294 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize)); |
| 3295 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize)); |
| 3296 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize)); |
| 3297 |
| 3298 __ mov(v0, cache_entry); |
| 3299 __ Ret(); |
| 3300 |
| 3301 __ bind(&invalid_cache); |
| 3302 // The cache is invalid. Call runtime which will recreate the |
| 3303 // cache. |
| 3304 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); |
| 3305 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache); |
| 3306 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset)); |
| 3307 __ EnterInternalFrame(); |
| 3308 __ push(a0); |
| 3309 __ CallRuntime(RuntimeFunction(), 1); |
| 3310 __ LeaveInternalFrame(); |
| 3311 __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset)); |
| 3312 __ Ret(); |
| 3313 |
| 3314 __ bind(&skip_cache); |
| 3315 // Call C function to calculate the result and answer directly |
| 3316 // without updating the cache. |
| 3317 GenerateCallCFunction(masm, scratch0); |
| 3318 __ GetCFunctionDoubleResult(f4); |
| 3319 __ bind(&no_update); |
| 3320 |
| 3321 // We return the value in f4 without adding it to the cache, but |
| 3322 // we cause a scavenging GC so that future allocations will succeed. |
| 3323 __ EnterInternalFrame(); |
| 3324 |
| 3325 // Allocate an aligned object larger than a HeapNumber. |
| 3326 ASSERT(4 * kPointerSize >= HeapNumber::kSize); |
| 3327 __ li(scratch0, Operand(4 * kPointerSize)); |
| 3328 __ push(scratch0); |
| 3329 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); |
| 3330 __ LeaveInternalFrame(); |
| 3331 __ Ret(); |
| 3332 } |
| 3333 } |
| 3334 |
| 3335 |
| 3336 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, |
| 3337 Register scratch) { |
| 3338 __ push(ra); |
| 3339 __ PrepareCallCFunction(2, scratch); |
| 3340 __ mfc1(v0, f4); |
| 3341 __ mfc1(v1, f5); |
| 3342 switch (type_) { |
| 3343 case TranscendentalCache::SIN: |
| 3344 __ CallCFunction( |
| 3345 ExternalReference::math_sin_double_function(masm->isolate()), 2); |
| 3346 break; |
| 3347 case TranscendentalCache::COS: |
| 3348 __ CallCFunction( |
| 3349 ExternalReference::math_cos_double_function(masm->isolate()), 2); |
| 3350 break; |
| 3351 case TranscendentalCache::LOG: |
| 3352 __ CallCFunction( |
| 3353 ExternalReference::math_log_double_function(masm->isolate()), 2); |
| 3354 break; |
| 3355 default: |
| 3356 UNIMPLEMENTED(); |
| 3357 break; |
| 3358 } |
| 3359 __ pop(ra); |
477 } | 3360 } |
478 | 3361 |
479 | 3362 |
480 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { | 3363 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { |
481 UNIMPLEMENTED_MIPS(); | 3364 switch (type_) { |
482 return Runtime::kAbort; | 3365 // Add more cases when necessary. |
| 3366 case TranscendentalCache::SIN: return Runtime::kMath_sin; |
| 3367 case TranscendentalCache::COS: return Runtime::kMath_cos; |
| 3368 case TranscendentalCache::LOG: return Runtime::kMath_log; |
| 3369 default: |
| 3370 UNIMPLEMENTED(); |
| 3371 return Runtime::kAbort; |
| 3372 } |
483 } | 3373 } |
484 | 3374 |
485 | 3375 |
486 void StackCheckStub::Generate(MacroAssembler* masm) { | 3376 void StackCheckStub::Generate(MacroAssembler* masm) { |
487 UNIMPLEMENTED_MIPS(); | 3377 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); |
488 } | 3378 } |
489 | 3379 |
490 | 3380 |
491 void MathPowStub::Generate(MacroAssembler* masm) { | 3381 void MathPowStub::Generate(MacroAssembler* masm) { |
492 UNIMPLEMENTED_MIPS(); | 3382 Label call_runtime; |
493 } | 3383 |
494 | 3384 if (CpuFeatures::IsSupported(FPU)) { |
495 | 3385 CpuFeatures::Scope scope(FPU); |
| 3386 |
| 3387 Label base_not_smi; |
| 3388 Label exponent_not_smi; |
| 3389 Label convert_exponent; |
| 3390 |
| 3391 const Register base = a0; |
| 3392 const Register exponent = a2; |
| 3393 const Register heapnumbermap = t1; |
| 3394 const Register heapnumber = s0; // Callee-saved register. |
| 3395 const Register scratch = t2; |
| 3396 const Register scratch2 = t3; |
| 3397 |
| 3398 // Alocate FP values in the ABI-parameter-passing regs. |
| 3399 const DoubleRegister double_base = f12; |
| 3400 const DoubleRegister double_exponent = f14; |
| 3401 const DoubleRegister double_result = f0; |
| 3402 const DoubleRegister double_scratch = f2; |
| 3403 |
| 3404 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); |
| 3405 __ lw(base, MemOperand(sp, 1 * kPointerSize)); |
| 3406 __ lw(exponent, MemOperand(sp, 0 * kPointerSize)); |
| 3407 |
| 3408 // Convert base to double value and store it in f0. |
| 3409 __ JumpIfNotSmi(base, &base_not_smi); |
| 3410 // Base is a Smi. Untag and convert it. |
| 3411 __ SmiUntag(base); |
| 3412 __ mtc1(base, double_scratch); |
| 3413 __ cvt_d_w(double_base, double_scratch); |
| 3414 __ Branch(&convert_exponent); |
| 3415 |
| 3416 __ bind(&base_not_smi); |
| 3417 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset)); |
| 3418 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); |
| 3419 // Base is a heapnumber. Load it into double register. |
| 3420 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); |
| 3421 |
| 3422 __ bind(&convert_exponent); |
| 3423 __ JumpIfNotSmi(exponent, &exponent_not_smi); |
| 3424 __ SmiUntag(exponent); |
| 3425 |
| 3426 // The base is in a double register and the exponent is |
| 3427 // an untagged smi. Allocate a heap number and call a |
| 3428 // C function for integer exponents. The register containing |
| 3429 // the heap number is callee-saved. |
| 3430 __ AllocateHeapNumber(heapnumber, |
| 3431 scratch, |
| 3432 scratch2, |
| 3433 heapnumbermap, |
| 3434 &call_runtime); |
| 3435 __ push(ra); |
| 3436 __ PrepareCallCFunction(3, scratch); |
| 3437 // ABI (o32) for func(double d, int x): d in f12, x in a2. |
| 3438 ASSERT(double_base.is(f12)); |
| 3439 ASSERT(exponent.is(a2)); |
| 3440 if (IsMipsSoftFloatABI) { |
| 3441 // Simulator case, supports FPU, but with soft-float passing. |
| 3442 __ mfc1(a0, double_base); |
| 3443 __ mfc1(a1, FPURegister::from_code(double_base.code() + 1)); |
| 3444 } |
| 3445 __ CallCFunction( |
| 3446 ExternalReference::power_double_int_function(masm->isolate()), 3); |
| 3447 __ pop(ra); |
| 3448 __ GetCFunctionDoubleResult(double_result); |
| 3449 __ sdc1(double_result, |
| 3450 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); |
| 3451 __ mov(v0, heapnumber); |
| 3452 __ DropAndRet(2 * kPointerSize); |
| 3453 |
| 3454 __ bind(&exponent_not_smi); |
| 3455 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); |
| 3456 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); |
| 3457 // Exponent is a heapnumber. Load it into double register. |
| 3458 __ ldc1(double_exponent, |
| 3459 FieldMemOperand(exponent, HeapNumber::kValueOffset)); |
| 3460 |
| 3461 // The base and the exponent are in double registers. |
| 3462 // Allocate a heap number and call a C function for |
| 3463 // double exponents. The register containing |
| 3464 // the heap number is callee-saved. |
| 3465 __ AllocateHeapNumber(heapnumber, |
| 3466 scratch, |
| 3467 scratch2, |
| 3468 heapnumbermap, |
| 3469 &call_runtime); |
| 3470 __ push(ra); |
| 3471 __ PrepareCallCFunction(4, scratch); |
| 3472 // ABI (o32) for func(double a, double b): a in f12, b in f14. |
| 3473 ASSERT(double_base.is(f12)); |
| 3474 ASSERT(double_exponent.is(f14)); |
| 3475 if (IsMipsSoftFloatABI) { |
| 3476 __ mfc1(a0, double_base); |
| 3477 __ mfc1(a1, FPURegister::from_code(double_base.code() + 1)); |
| 3478 __ mfc1(a2, double_exponent); |
| 3479 __ mfc1(a3, FPURegister::from_code(double_exponent.code() + 1)); |
| 3480 } |
| 3481 __ CallCFunction( |
| 3482 ExternalReference::power_double_double_function(masm->isolate()), 4); |
| 3483 __ pop(ra); |
| 3484 __ GetCFunctionDoubleResult(double_result); |
| 3485 __ sdc1(double_result, |
| 3486 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); |
| 3487 __ mov(v0, heapnumber); |
| 3488 __ DropAndRet(2 * kPointerSize); |
| 3489 } |
| 3490 |
| 3491 __ bind(&call_runtime); |
| 3492 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); |
| 3493 } |
| 3494 |
| 3495 |
496 bool CEntryStub::NeedsImmovableCode() { | 3496 bool CEntryStub::NeedsImmovableCode() { |
497 return true; | 3497 return true; |
498 } | 3498 } |
499 | 3499 |
500 | 3500 |
501 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { | 3501 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { |
502 UNIMPLEMENTED_MIPS(); | 3502 __ Throw(v0); |
503 } | 3503 } |
504 | 3504 |
505 | 3505 |
506 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, | 3506 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, |
507 UncatchableExceptionType type) { | 3507 UncatchableExceptionType type) { |
508 UNIMPLEMENTED_MIPS(); | 3508 __ ThrowUncatchable(type, v0); |
509 } | 3509 } |
510 | 3510 |
511 | 3511 |
512 void CEntryStub::GenerateCore(MacroAssembler* masm, | 3512 void CEntryStub::GenerateCore(MacroAssembler* masm, |
513 Label* throw_normal_exception, | 3513 Label* throw_normal_exception, |
514 Label* throw_termination_exception, | 3514 Label* throw_termination_exception, |
515 Label* throw_out_of_memory_exception, | 3515 Label* throw_out_of_memory_exception, |
516 bool do_gc, | 3516 bool do_gc, |
517 bool always_allocate) { | 3517 bool always_allocate) { |
518 UNIMPLEMENTED_MIPS(); | 3518 // v0: result parameter for PerformGC, if any |
| 3519 // s0: number of arguments including receiver (C callee-saved) |
| 3520 // s1: pointer to the first argument (C callee-saved) |
| 3521 // s2: pointer to builtin function (C callee-saved) |
| 3522 |
| 3523 if (do_gc) { |
| 3524 // Move result passed in v0 into a0 to call PerformGC. |
| 3525 __ mov(a0, v0); |
| 3526 __ PrepareCallCFunction(1, a1); |
| 3527 __ CallCFunction( |
| 3528 ExternalReference::perform_gc_function(masm->isolate()), 1); |
| 3529 } |
| 3530 |
| 3531 ExternalReference scope_depth = |
| 3532 ExternalReference::heap_always_allocate_scope_depth(masm->isolate()); |
| 3533 if (always_allocate) { |
| 3534 __ li(a0, Operand(scope_depth)); |
| 3535 __ lw(a1, MemOperand(a0)); |
| 3536 __ Addu(a1, a1, Operand(1)); |
| 3537 __ sw(a1, MemOperand(a0)); |
| 3538 } |
| 3539 |
| 3540 // Prepare arguments for C routine: a0 = argc, a1 = argv |
| 3541 __ mov(a0, s0); |
| 3542 __ mov(a1, s1); |
| 3543 |
| 3544 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We |
| 3545 // also need to reserve the 4 argument slots on the stack. |
| 3546 |
| 3547 __ AssertStackIsAligned(); |
| 3548 |
| 3549 __ li(a2, Operand(ExternalReference::isolate_address())); |
| 3550 |
| 3551 // From arm version of this function: |
| 3552 // TODO(1242173): To let the GC traverse the return address of the exit |
| 3553 // frames, we need to know where the return address is. Right now, |
| 3554 // we push it on the stack to be able to find it again, but we never |
| 3555 // restore from it in case of changes, which makes it impossible to |
| 3556 // support moving the C entry code stub. This should be fixed, but currently |
| 3557 // this is OK because the CEntryStub gets generated so early in the V8 boot |
| 3558 // sequence that it is not moving ever. |
| 3559 |
| 3560 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); |
| 3561 // This branch-and-link sequence is needed to find the current PC on mips, |
| 3562 // saved to the ra register. |
| 3563 // Use masm-> here instead of the double-underscore macro since extra |
| 3564 // coverage code can interfere with the proper calculation of ra. |
| 3565 Label find_ra; |
| 3566 masm->bal(&find_ra); // bal exposes branch delay slot. |
| 3567 masm->nop(); // Branch delay slot nop. |
| 3568 masm->bind(&find_ra); |
| 3569 |
| 3570 // Adjust the value in ra to point to the correct return location, 2nd |
| 3571 // instruction past the real call into C code (the jalr(t9)), and push it. |
| 3572 // This is the return address of the exit frame. |
| 3573 const int kNumInstructionsToJump = 6; |
| 3574 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize); |
| 3575 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame. |
| 3576 masm->Subu(sp, sp, StandardFrameConstants::kCArgsSlotsSize); |
| 3577 // Stack is still aligned. |
| 3578 |
| 3579 // Call the C routine. |
| 3580 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC. |
| 3581 masm->jalr(t9); |
| 3582 masm->nop(); // Branch delay slot nop. |
| 3583 // Make sure the stored 'ra' points to this position. |
| 3584 ASSERT_EQ(kNumInstructionsToJump, |
| 3585 masm->InstructionsGeneratedSince(&find_ra)); |
| 3586 } |
| 3587 |
| 3588 // Restore stack (remove arg slots). |
| 3589 __ Addu(sp, sp, StandardFrameConstants::kCArgsSlotsSize); |
| 3590 |
| 3591 if (always_allocate) { |
| 3592 // It's okay to clobber a2 and a3 here. v0 & v1 contain result. |
| 3593 __ li(a2, Operand(scope_depth)); |
| 3594 __ lw(a3, MemOperand(a2)); |
| 3595 __ Subu(a3, a3, Operand(1)); |
| 3596 __ sw(a3, MemOperand(a2)); |
| 3597 } |
| 3598 |
| 3599 // Check for failure result. |
| 3600 Label failure_returned; |
| 3601 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); |
| 3602 __ addiu(a2, v0, 1); |
| 3603 __ andi(t0, a2, kFailureTagMask); |
| 3604 __ Branch(&failure_returned, eq, t0, Operand(zero_reg)); |
| 3605 |
| 3606 // Exit C frame and return. |
| 3607 // v0:v1: result |
| 3608 // sp: stack pointer |
| 3609 // fp: frame pointer |
| 3610 __ LeaveExitFrame(save_doubles_, s0); |
| 3611 __ Ret(); |
| 3612 |
| 3613 // Check if we should retry or throw exception. |
| 3614 Label retry; |
| 3615 __ bind(&failure_returned); |
| 3616 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); |
| 3617 __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize); |
| 3618 __ Branch(&retry, eq, t0, Operand(zero_reg)); |
| 3619 |
| 3620 // Special handling of out of memory exceptions. |
| 3621 Failure* out_of_memory = Failure::OutOfMemoryException(); |
| 3622 __ Branch(throw_out_of_memory_exception, eq, |
| 3623 v0, Operand(reinterpret_cast<int32_t>(out_of_memory))); |
| 3624 |
| 3625 // Retrieve the pending exception and clear the variable. |
| 3626 __ li(t0, |
| 3627 Operand(ExternalReference::the_hole_value_location(masm->isolate()))); |
| 3628 __ lw(a3, MemOperand(t0)); |
| 3629 __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address, |
| 3630 masm->isolate()))); |
| 3631 __ lw(v0, MemOperand(t0)); |
| 3632 __ sw(a3, MemOperand(t0)); |
| 3633 |
| 3634 // Special handling of termination exceptions which are uncatchable |
| 3635 // by javascript code. |
| 3636 __ Branch(throw_termination_exception, eq, |
| 3637 v0, Operand(masm->isolate()->factory()->termination_exception())); |
| 3638 |
| 3639 // Handle normal exception. |
| 3640 __ jmp(throw_normal_exception); |
| 3641 |
| 3642 __ bind(&retry); |
| 3643 // Last failure (v0) will be moved to (a0) for parameter when retrying. |
519 } | 3644 } |
520 | 3645 |
521 | 3646 |
522 void CEntryStub::Generate(MacroAssembler* masm) { | 3647 void CEntryStub::Generate(MacroAssembler* masm) { |
523 UNIMPLEMENTED_MIPS(); | 3648 // Called from JavaScript; parameters are on stack as if calling JS function |
| 3649 // a0: number of arguments including receiver |
| 3650 // a1: pointer to builtin function |
| 3651 // fp: frame pointer (restored after C call) |
| 3652 // sp: stack pointer (restored as callee's sp after C call) |
| 3653 // cp: current context (C callee-saved) |
| 3654 |
| 3655 // NOTE: Invocations of builtins may return failure objects |
| 3656 // instead of a proper result. The builtin entry handles |
| 3657 // this by performing a garbage collection and retrying the |
| 3658 // builtin once. |
| 3659 |
| 3660 // Compute the argv pointer in a callee-saved register. |
| 3661 __ sll(s1, a0, kPointerSizeLog2); |
| 3662 __ Addu(s1, sp, s1); |
| 3663 __ Subu(s1, s1, Operand(kPointerSize)); |
| 3664 |
| 3665 // Enter the exit frame that transitions from JavaScript to C++. |
| 3666 __ EnterExitFrame(save_doubles_); |
| 3667 |
| 3668 // Setup argc and the builtin function in callee-saved registers. |
| 3669 __ mov(s0, a0); |
| 3670 __ mov(s2, a1); |
| 3671 |
| 3672 // s0: number of arguments (C callee-saved) |
| 3673 // s1: pointer to first argument (C callee-saved) |
| 3674 // s2: pointer to builtin function (C callee-saved) |
| 3675 |
| 3676 Label throw_normal_exception; |
| 3677 Label throw_termination_exception; |
| 3678 Label throw_out_of_memory_exception; |
| 3679 |
| 3680 // Call into the runtime system. |
| 3681 GenerateCore(masm, |
| 3682 &throw_normal_exception, |
| 3683 &throw_termination_exception, |
| 3684 &throw_out_of_memory_exception, |
| 3685 false, |
| 3686 false); |
| 3687 |
| 3688 // Do space-specific GC and retry runtime call. |
| 3689 GenerateCore(masm, |
| 3690 &throw_normal_exception, |
| 3691 &throw_termination_exception, |
| 3692 &throw_out_of_memory_exception, |
| 3693 true, |
| 3694 false); |
| 3695 |
| 3696 // Do full GC and retry runtime call one final time. |
| 3697 Failure* failure = Failure::InternalError(); |
| 3698 __ li(v0, Operand(reinterpret_cast<int32_t>(failure))); |
| 3699 GenerateCore(masm, |
| 3700 &throw_normal_exception, |
| 3701 &throw_termination_exception, |
| 3702 &throw_out_of_memory_exception, |
| 3703 true, |
| 3704 true); |
| 3705 |
| 3706 __ bind(&throw_out_of_memory_exception); |
| 3707 GenerateThrowUncatchable(masm, OUT_OF_MEMORY); |
| 3708 |
| 3709 __ bind(&throw_termination_exception); |
| 3710 GenerateThrowUncatchable(masm, TERMINATION); |
| 3711 |
| 3712 __ bind(&throw_normal_exception); |
| 3713 GenerateThrowTOS(masm); |
524 } | 3714 } |
525 | 3715 |
526 | 3716 |
527 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { | 3717 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
528 UNIMPLEMENTED_MIPS(); | 3718 Label invoke, exit; |
| 3719 |
| 3720 // Registers: |
| 3721 // a0: entry address |
| 3722 // a1: function |
| 3723 // a2: reveiver |
| 3724 // a3: argc |
| 3725 // |
| 3726 // Stack: |
| 3727 // 4 args slots |
| 3728 // args |
| 3729 |
| 3730 // Save callee saved registers on the stack. |
| 3731 __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit()); |
| 3732 |
| 3733 // Load argv in s0 register. |
| 3734 __ lw(s0, MemOperand(sp, kNumCalleeSaved * kPointerSize + |
| 3735 StandardFrameConstants::kCArgsSlotsSize)); |
| 3736 |
| 3737 // We build an EntryFrame. |
| 3738 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used. |
| 3739 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; |
| 3740 __ li(t2, Operand(Smi::FromInt(marker))); |
| 3741 __ li(t1, Operand(Smi::FromInt(marker))); |
| 3742 __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address, |
| 3743 masm->isolate()))); |
| 3744 __ lw(t0, MemOperand(t0)); |
| 3745 __ Push(t3, t2, t1, t0); |
| 3746 // Setup frame pointer for the frame to be pushed. |
| 3747 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset); |
| 3748 |
| 3749 // Registers: |
| 3750 // a0: entry_address |
| 3751 // a1: function |
| 3752 // a2: reveiver_pointer |
| 3753 // a3: argc |
| 3754 // s0: argv |
| 3755 // |
| 3756 // Stack: |
| 3757 // caller fp | |
| 3758 // function slot | entry frame |
| 3759 // context slot | |
| 3760 // bad fp (0xff...f) | |
| 3761 // callee saved registers + ra |
| 3762 // 4 args slots |
| 3763 // args |
| 3764 |
| 3765 #ifdef ENABLE_LOGGING_AND_PROFILING |
| 3766 // If this is the outermost JS call, set js_entry_sp value. |
| 3767 ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, |
| 3768 masm->isolate()); |
| 3769 __ li(t1, Operand(ExternalReference(js_entry_sp))); |
| 3770 __ lw(t2, MemOperand(t1)); |
| 3771 { |
| 3772 Label skip; |
| 3773 __ Branch(&skip, ne, t2, Operand(zero_reg)); |
| 3774 __ sw(fp, MemOperand(t1)); |
| 3775 __ bind(&skip); |
| 3776 } |
| 3777 #endif |
| 3778 |
| 3779 // Call a faked try-block that does the invoke. |
| 3780 __ bal(&invoke); // bal exposes branch delay slot. |
| 3781 __ nop(); // Branch delay slot nop. |
| 3782 |
| 3783 // Caught exception: Store result (exception) in the pending |
| 3784 // exception field in the JSEnv and return a failure sentinel. |
| 3785 // Coming in here the fp will be invalid because the PushTryHandler below |
| 3786 // sets it to 0 to signal the existence of the JSEntry frame. |
| 3787 __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address, |
| 3788 masm->isolate()))); |
| 3789 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0. |
| 3790 __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); |
| 3791 __ b(&exit); // b exposes branch delay slot. |
| 3792 __ nop(); // Branch delay slot nop. |
| 3793 |
| 3794 // Invoke: Link this frame into the handler chain. |
| 3795 __ bind(&invoke); |
| 3796 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); |
| 3797 // If an exception not caught by another handler occurs, this handler |
| 3798 // returns control to the code after the bal(&invoke) above, which |
| 3799 // restores all kCalleeSaved registers (including cp and fp) to their |
| 3800 // saved values before returning a failure to C. |
| 3801 |
| 3802 // Clear any pending exceptions. |
| 3803 __ li(t0, |
| 3804 Operand(ExternalReference::the_hole_value_location(masm->isolate()))); |
| 3805 __ lw(t1, MemOperand(t0)); |
| 3806 __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address, |
| 3807 masm->isolate()))); |
| 3808 __ sw(t1, MemOperand(t0)); |
| 3809 |
| 3810 // Invoke the function by calling through JS entry trampoline builtin. |
| 3811 // Notice that we cannot store a reference to the trampoline code directly in |
| 3812 // this stub, because runtime stubs are not traversed when doing GC. |
| 3813 |
| 3814 // Registers: |
| 3815 // a0: entry_address |
| 3816 // a1: function |
| 3817 // a2: reveiver_pointer |
| 3818 // a3: argc |
| 3819 // s0: argv |
| 3820 // |
| 3821 // Stack: |
| 3822 // handler frame |
| 3823 // entry frame |
| 3824 // callee saved registers + ra |
| 3825 // 4 args slots |
| 3826 // args |
| 3827 |
| 3828 if (is_construct) { |
| 3829 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, |
| 3830 masm->isolate()); |
| 3831 __ li(t0, Operand(construct_entry)); |
| 3832 } else { |
| 3833 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate()); |
| 3834 __ li(t0, Operand(entry)); |
| 3835 } |
| 3836 __ lw(t9, MemOperand(t0)); // Deref address. |
| 3837 |
| 3838 // Call JSEntryTrampoline. |
| 3839 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); |
| 3840 __ Call(t9); |
| 3841 |
| 3842 // Unlink this frame from the handler chain. When reading the |
| 3843 // address of the next handler, there is no need to use the address |
| 3844 // displacement since the current stack pointer (sp) points directly |
| 3845 // to the stack handler. |
| 3846 __ lw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset)); |
| 3847 __ li(t0, Operand(ExternalReference(Isolate::k_handler_address, |
| 3848 masm->isolate()))); |
| 3849 __ sw(t1, MemOperand(t0)); |
| 3850 |
| 3851 // This restores sp to its position before PushTryHandler. |
| 3852 __ addiu(sp, sp, StackHandlerConstants::kSize); |
| 3853 |
| 3854 #ifdef ENABLE_LOGGING_AND_PROFILING |
| 3855 // If current FP value is the same as js_entry_sp value, it means that |
| 3856 // the current function is the outermost. |
| 3857 __ li(t1, Operand(ExternalReference(js_entry_sp))); |
| 3858 __ lw(t2, MemOperand(t1)); |
| 3859 { |
| 3860 Label skip; |
| 3861 __ Branch(&skip, ne, fp, Operand(t2)); |
| 3862 __ sw(zero_reg, MemOperand(t1)); |
| 3863 __ bind(&skip); |
| 3864 } |
| 3865 #endif |
| 3866 |
| 3867 __ bind(&exit); // v0 holds result. |
| 3868 // Restore the top frame descriptors from the stack. |
| 3869 __ pop(t1); |
| 3870 __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address, |
| 3871 masm->isolate()))); |
| 3872 __ sw(t1, MemOperand(t0)); |
| 3873 |
| 3874 // Reset the stack to the callee saved registers. |
| 3875 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); |
| 3876 |
| 3877 // Restore callee saved registers from the stack. |
| 3878 __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit()); |
| 3879 // Return. |
| 3880 __ Jump(ra); |
529 } | 3881 } |
530 | 3882 |
531 | 3883 |
532 // Uses registers a0 to t0. Expected input is | 3884 // Uses registers a0 to t0. Expected input is |
533 // object in a0 (or at sp+1*kPointerSize) and function in | 3885 // object in a0 (or at sp+1*kPointerSize) and function in |
534 // a1 (or at sp), depending on whether or not | 3886 // a1 (or at sp), depending on whether or not |
535 // args_in_registers() is true. | 3887 // args_in_registers() is true. |
536 void InstanceofStub::Generate(MacroAssembler* masm) { | 3888 void InstanceofStub::Generate(MacroAssembler* masm) { |
537 UNIMPLEMENTED_MIPS(); | 3889 // Fixed register usage throughout the stub: |
| 3890 const Register object = a0; // Object (lhs). |
| 3891 const Register map = a3; // Map of the object. |
| 3892 const Register function = a1; // Function (rhs). |
| 3893 const Register prototype = t0; // Prototype of the function. |
| 3894 const Register scratch = a2; |
| 3895 Label slow, loop, is_instance, is_not_instance, not_js_object; |
| 3896 if (!HasArgsInRegisters()) { |
| 3897 __ lw(object, MemOperand(sp, 1 * kPointerSize)); |
| 3898 __ lw(function, MemOperand(sp, 0)); |
| 3899 } |
| 3900 |
| 3901 // Check that the left hand is a JS object and load map. |
| 3902 __ JumpIfSmi(object, ¬_js_object); |
| 3903 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); |
| 3904 |
| 3905 // Look up the function and the map in the instanceof cache. |
| 3906 Label miss; |
| 3907 __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex); |
| 3908 __ Branch(&miss, ne, function, Operand(t1)); |
| 3909 __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex); |
| 3910 __ Branch(&miss, ne, map, Operand(t1)); |
| 3911 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
| 3912 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
| 3913 |
| 3914 __ bind(&miss); |
| 3915 __ TryGetFunctionPrototype(function, prototype, scratch, &slow); |
| 3916 |
| 3917 // Check that the function prototype is a JS object. |
| 3918 __ JumpIfSmi(prototype, &slow); |
| 3919 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); |
| 3920 |
| 3921 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); |
| 3922 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); |
| 3923 |
| 3924 // Register mapping: a3 is object map and t0 is function prototype. |
| 3925 // Get prototype of object into a2. |
| 3926 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); |
| 3927 |
| 3928 // Loop through the prototype chain looking for the function prototype. |
| 3929 __ bind(&loop); |
| 3930 __ Branch(&is_instance, eq, scratch, Operand(prototype)); |
| 3931 __ LoadRoot(t1, Heap::kNullValueRootIndex); |
| 3932 __ Branch(&is_not_instance, eq, scratch, Operand(t1)); |
| 3933 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
| 3934 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); |
| 3935 __ Branch(&loop); |
| 3936 |
| 3937 __ bind(&is_instance); |
| 3938 ASSERT(Smi::FromInt(0) == 0); |
| 3939 __ mov(v0, zero_reg); |
| 3940 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
| 3941 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
| 3942 |
| 3943 __ bind(&is_not_instance); |
| 3944 __ li(v0, Operand(Smi::FromInt(1))); |
| 3945 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
| 3946 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
| 3947 |
| 3948 Label object_not_null, object_not_null_or_smi; |
| 3949 __ bind(¬_js_object); |
| 3950 // Before null, smi and string value checks, check that the rhs is a function |
| 3951 // as for a non-function rhs an exception needs to be thrown. |
| 3952 __ JumpIfSmi(function, &slow); |
| 3953 __ GetObjectType(function, map, scratch); |
| 3954 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE)); |
| 3955 |
| 3956 // Null is not instance of anything. |
| 3957 __ Branch(&object_not_null, ne, scratch, |
| 3958 Operand(masm->isolate()->factory()->null_value())); |
| 3959 __ li(v0, Operand(Smi::FromInt(1))); |
| 3960 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
| 3961 |
| 3962 __ bind(&object_not_null); |
| 3963 // Smi values are not instances of anything. |
| 3964 __ JumpIfNotSmi(object, &object_not_null_or_smi); |
| 3965 __ li(v0, Operand(Smi::FromInt(1))); |
| 3966 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
| 3967 |
| 3968 __ bind(&object_not_null_or_smi); |
| 3969 // String values are not instances of anything. |
| 3970 __ IsObjectJSStringType(object, scratch, &slow); |
| 3971 __ li(v0, Operand(Smi::FromInt(1))); |
| 3972 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
| 3973 |
| 3974 // Slow-case. Tail call builtin. |
| 3975 __ bind(&slow); |
| 3976 if (HasArgsInRegisters()) { |
| 3977 __ Push(a0, a1); |
| 3978 } |
| 3979 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); |
538 } | 3980 } |
539 | 3981 |
540 | 3982 |
541 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { | 3983 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { |
542 UNIMPLEMENTED_MIPS(); | 3984 // The displacement is the offset of the last parameter (if any) |
| 3985 // relative to the frame pointer. |
| 3986 static const int kDisplacement = |
| 3987 StandardFrameConstants::kCallerSPOffset - kPointerSize; |
| 3988 |
| 3989 // Check that the key is a smiGenerateReadElement. |
| 3990 Label slow; |
| 3991 __ JumpIfNotSmi(a1, &slow); |
| 3992 |
| 3993 // Check if the calling frame is an arguments adaptor frame. |
| 3994 Label adaptor; |
| 3995 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 3996 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); |
| 3997 __ Branch(&adaptor, |
| 3998 eq, |
| 3999 a3, |
| 4000 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| 4001 |
| 4002 // Check index (a1) against formal parameters count limit passed in |
| 4003 // through register a0. Use unsigned comparison to get negative |
| 4004 // check for free. |
| 4005 __ Branch(&slow, hs, a1, Operand(a0)); |
| 4006 |
| 4007 // Read the argument from the stack and return it. |
| 4008 __ subu(a3, a0, a1); |
| 4009 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); |
| 4010 __ Addu(a3, fp, Operand(t3)); |
| 4011 __ lw(v0, MemOperand(a3, kDisplacement)); |
| 4012 __ Ret(); |
| 4013 |
| 4014 // Arguments adaptor case: Check index (a1) against actual arguments |
| 4015 // limit found in the arguments adaptor frame. Use unsigned |
| 4016 // comparison to get negative check for free. |
| 4017 __ bind(&adaptor); |
| 4018 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| 4019 __ Branch(&slow, Ugreater_equal, a1, Operand(a0)); |
| 4020 |
| 4021 // Read the argument from the adaptor frame and return it. |
| 4022 __ subu(a3, a0, a1); |
| 4023 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); |
| 4024 __ Addu(a3, a2, Operand(t3)); |
| 4025 __ lw(v0, MemOperand(a3, kDisplacement)); |
| 4026 __ Ret(); |
| 4027 |
| 4028 // Slow-case: Handle non-smi or out-of-bounds access to arguments |
| 4029 // by calling the runtime system. |
| 4030 __ bind(&slow); |
| 4031 __ push(a1); |
| 4032 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); |
543 } | 4033 } |
544 | 4034 |
545 | 4035 |
546 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { | 4036 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { |
547 UNIMPLEMENTED_MIPS(); | 4037 // sp[0] : number of parameters |
| 4038 // sp[4] : receiver displacement |
| 4039 // sp[8] : function |
| 4040 |
| 4041 // Check if the calling frame is an arguments adaptor frame. |
| 4042 Label adaptor_frame, try_allocate, runtime; |
| 4043 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 4044 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); |
| 4045 __ Branch(&adaptor_frame, |
| 4046 eq, |
| 4047 a3, |
| 4048 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| 4049 |
| 4050 // Get the length from the frame. |
| 4051 __ lw(a1, MemOperand(sp, 0)); |
| 4052 __ Branch(&try_allocate); |
| 4053 |
| 4054 // Patch the arguments.length and the parameters pointer. |
| 4055 __ bind(&adaptor_frame); |
| 4056 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| 4057 __ sw(a1, MemOperand(sp, 0)); |
| 4058 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize); |
| 4059 __ Addu(a3, a2, Operand(at)); |
| 4060 |
| 4061 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset)); |
| 4062 __ sw(a3, MemOperand(sp, 1 * kPointerSize)); |
| 4063 |
| 4064 // Try the new space allocation. Start out with computing the size |
| 4065 // of the arguments object and the elements array in words. |
| 4066 Label add_arguments_object; |
| 4067 __ bind(&try_allocate); |
| 4068 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg)); |
| 4069 __ srl(a1, a1, kSmiTagSize); |
| 4070 |
| 4071 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize)); |
| 4072 __ bind(&add_arguments_object); |
| 4073 __ Addu(a1, a1, Operand(GetArgumentsObjectSize() / kPointerSize)); |
| 4074 |
| 4075 // Do the allocation of both objects in one go. |
| 4076 __ AllocateInNewSpace( |
| 4077 a1, |
| 4078 v0, |
| 4079 a2, |
| 4080 a3, |
| 4081 &runtime, |
| 4082 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); |
| 4083 |
| 4084 // Get the arguments boilerplate from the current (global) context. |
| 4085 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
| 4086 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset)); |
| 4087 __ lw(t0, MemOperand(t0, |
| 4088 Context::SlotOffset(GetArgumentsBoilerplateIndex()))); |
| 4089 |
| 4090 // Copy the JS object part. |
| 4091 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize); |
| 4092 |
| 4093 if (type_ == NEW_NON_STRICT) { |
| 4094 // Setup the callee in-object property. |
| 4095 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); |
| 4096 __ lw(a3, MemOperand(sp, 2 * kPointerSize)); |
| 4097 const int kCalleeOffset = JSObject::kHeaderSize + |
| 4098 Heap::kArgumentsCalleeIndex * kPointerSize; |
| 4099 __ sw(a3, FieldMemOperand(v0, kCalleeOffset)); |
| 4100 } |
| 4101 |
| 4102 // Get the length (smi tagged) and set that as an in-object property too. |
| 4103 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); |
| 4104 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); |
| 4105 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize + |
| 4106 Heap::kArgumentsLengthIndex * kPointerSize)); |
| 4107 |
| 4108 Label done; |
| 4109 __ Branch(&done, eq, a1, Operand(zero_reg)); |
| 4110 |
| 4111 // Get the parameters pointer from the stack. |
| 4112 __ lw(a2, MemOperand(sp, 1 * kPointerSize)); |
| 4113 |
| 4114 // Setup the elements pointer in the allocated arguments object and |
| 4115 // initialize the header in the elements fixed array. |
| 4116 __ Addu(t0, v0, Operand(GetArgumentsObjectSize())); |
| 4117 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset)); |
| 4118 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex); |
| 4119 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset)); |
| 4120 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset)); |
| 4121 __ srl(a1, a1, kSmiTagSize); // Untag the length for the loop. |
| 4122 |
| 4123 // Copy the fixed array slots. |
| 4124 Label loop; |
| 4125 // Setup t0 to point to the first array slot. |
| 4126 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 4127 __ bind(&loop); |
| 4128 // Pre-decrement a2 with kPointerSize on each iteration. |
| 4129 // Pre-decrement in order to skip receiver. |
| 4130 __ Addu(a2, a2, Operand(-kPointerSize)); |
| 4131 __ lw(a3, MemOperand(a2)); |
| 4132 // Post-increment t0 with kPointerSize on each iteration. |
| 4133 __ sw(a3, MemOperand(t0)); |
| 4134 __ Addu(t0, t0, Operand(kPointerSize)); |
| 4135 __ Subu(a1, a1, Operand(1)); |
| 4136 __ Branch(&loop, ne, a1, Operand(zero_reg)); |
| 4137 |
| 4138 // Return and remove the on-stack parameters. |
| 4139 __ bind(&done); |
| 4140 __ Addu(sp, sp, Operand(3 * kPointerSize)); |
| 4141 __ Ret(); |
| 4142 |
| 4143 // Do the runtime call to allocate the arguments object. |
| 4144 __ bind(&runtime); |
| 4145 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
548 } | 4146 } |
549 | 4147 |
550 | 4148 |
551 void RegExpExecStub::Generate(MacroAssembler* masm) { | 4149 void RegExpExecStub::Generate(MacroAssembler* masm) { |
552 UNIMPLEMENTED_MIPS(); | 4150 // Just jump directly to runtime if native RegExp is not selected at compile |
| 4151 // time or if regexp entry in generated code is turned off runtime switch or |
| 4152 // at compilation. |
| 4153 #ifdef V8_INTERPRETED_REGEXP |
| 4154 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
| 4155 #else // V8_INTERPRETED_REGEXP |
| 4156 if (!FLAG_regexp_entry_native) { |
| 4157 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
| 4158 return; |
| 4159 } |
| 4160 |
| 4161 // Stack frame on entry. |
| 4162 // sp[0]: last_match_info (expected JSArray) |
| 4163 // sp[4]: previous index |
| 4164 // sp[8]: subject string |
| 4165 // sp[12]: JSRegExp object |
| 4166 |
| 4167 static const int kLastMatchInfoOffset = 0 * kPointerSize; |
| 4168 static const int kPreviousIndexOffset = 1 * kPointerSize; |
| 4169 static const int kSubjectOffset = 2 * kPointerSize; |
| 4170 static const int kJSRegExpOffset = 3 * kPointerSize; |
| 4171 |
| 4172 Label runtime, invoke_regexp; |
| 4173 |
| 4174 // Allocation of registers for this function. These are in callee save |
| 4175 // registers and will be preserved by the call to the native RegExp code, as |
| 4176 // this code is called using the normal C calling convention. When calling |
| 4177 // directly from generated code the native RegExp code will not do a GC and |
| 4178 // therefore the content of these registers are safe to use after the call. |
| 4179 // MIPS - using s0..s2, since we are not using CEntry Stub. |
| 4180 Register subject = s0; |
| 4181 Register regexp_data = s1; |
| 4182 Register last_match_info_elements = s2; |
| 4183 |
| 4184 // Ensure that a RegExp stack is allocated. |
| 4185 ExternalReference address_of_regexp_stack_memory_address = |
| 4186 ExternalReference::address_of_regexp_stack_memory_address( |
| 4187 masm->isolate()); |
| 4188 ExternalReference address_of_regexp_stack_memory_size = |
| 4189 ExternalReference::address_of_regexp_stack_memory_size(masm->isolate()); |
| 4190 __ li(a0, Operand(address_of_regexp_stack_memory_size)); |
| 4191 __ lw(a0, MemOperand(a0, 0)); |
| 4192 __ Branch(&runtime, eq, a0, Operand(zero_reg)); |
| 4193 |
| 4194 // Check that the first argument is a JSRegExp object. |
| 4195 __ lw(a0, MemOperand(sp, kJSRegExpOffset)); |
| 4196 STATIC_ASSERT(kSmiTag == 0); |
| 4197 __ JumpIfSmi(a0, &runtime); |
| 4198 __ GetObjectType(a0, a1, a1); |
| 4199 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE)); |
| 4200 |
| 4201 // Check that the RegExp has been compiled (data contains a fixed array). |
| 4202 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset)); |
| 4203 if (FLAG_debug_code) { |
| 4204 __ And(t0, regexp_data, Operand(kSmiTagMask)); |
| 4205 __ Check(nz, |
| 4206 "Unexpected type for RegExp data, FixedArray expected", |
| 4207 t0, |
| 4208 Operand(zero_reg)); |
| 4209 __ GetObjectType(regexp_data, a0, a0); |
| 4210 __ Check(eq, |
| 4211 "Unexpected type for RegExp data, FixedArray expected", |
| 4212 a0, |
| 4213 Operand(FIXED_ARRAY_TYPE)); |
| 4214 } |
| 4215 |
| 4216 // regexp_data: RegExp data (FixedArray) |
| 4217 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. |
| 4218 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); |
| 4219 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); |
| 4220 |
| 4221 // regexp_data: RegExp data (FixedArray) |
| 4222 // Check that the number of captures fit in the static offsets vector buffer. |
| 4223 __ lw(a2, |
| 4224 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); |
| 4225 // Calculate number of capture registers (number_of_captures + 1) * 2. This |
| 4226 // uses the asumption that smis are 2 * their untagged value. |
| 4227 STATIC_ASSERT(kSmiTag == 0); |
| 4228 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
| 4229 __ Addu(a2, a2, Operand(2)); // a2 was a smi. |
| 4230 // Check that the static offsets vector buffer is large enough. |
| 4231 __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize)); |
| 4232 |
| 4233 // a2: Number of capture registers |
| 4234 // regexp_data: RegExp data (FixedArray) |
| 4235 // Check that the second argument is a string. |
| 4236 __ lw(subject, MemOperand(sp, kSubjectOffset)); |
| 4237 __ JumpIfSmi(subject, &runtime); |
| 4238 __ GetObjectType(subject, a0, a0); |
| 4239 __ And(a0, a0, Operand(kIsNotStringMask)); |
| 4240 STATIC_ASSERT(kStringTag == 0); |
| 4241 __ Branch(&runtime, ne, a0, Operand(zero_reg)); |
| 4242 |
| 4243 // Get the length of the string to r3. |
| 4244 __ lw(a3, FieldMemOperand(subject, String::kLengthOffset)); |
| 4245 |
| 4246 // a2: Number of capture registers |
| 4247 // a3: Length of subject string as a smi |
| 4248 // subject: Subject string |
| 4249 // regexp_data: RegExp data (FixedArray) |
| 4250 // Check that the third argument is a positive smi less than the subject |
| 4251 // string length. A negative value will be greater (unsigned comparison). |
| 4252 __ lw(a0, MemOperand(sp, kPreviousIndexOffset)); |
| 4253 __ And(at, a0, Operand(kSmiTagMask)); |
| 4254 __ Branch(&runtime, ne, at, Operand(zero_reg)); |
| 4255 __ Branch(&runtime, ls, a3, Operand(a0)); |
| 4256 |
| 4257 // a2: Number of capture registers |
| 4258 // subject: Subject string |
| 4259 // regexp_data: RegExp data (FixedArray) |
| 4260 // Check that the fourth object is a JSArray object. |
| 4261 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset)); |
| 4262 __ JumpIfSmi(a0, &runtime); |
| 4263 __ GetObjectType(a0, a1, a1); |
| 4264 __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE)); |
| 4265 // Check that the JSArray is in fast case. |
| 4266 __ lw(last_match_info_elements, |
| 4267 FieldMemOperand(a0, JSArray::kElementsOffset)); |
| 4268 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); |
| 4269 __ Branch(&runtime, ne, a0, Operand( |
| 4270 masm->isolate()->factory()->fixed_array_map())); |
| 4271 // Check that the last match info has space for the capture registers and the |
| 4272 // additional information. |
| 4273 __ lw(a0, |
| 4274 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); |
| 4275 __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead)); |
| 4276 __ sra(at, a0, kSmiTagSize); // Untag length for comparison. |
| 4277 __ Branch(&runtime, gt, a2, Operand(at)); |
| 4278 // subject: Subject string |
| 4279 // regexp_data: RegExp data (FixedArray) |
| 4280 // Check the representation and encoding of the subject string. |
| 4281 Label seq_string; |
| 4282 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); |
| 4283 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); |
| 4284 // First check for flat string. |
| 4285 __ And(at, a0, Operand(kIsNotStringMask | kStringRepresentationMask)); |
| 4286 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); |
| 4287 __ Branch(&seq_string, eq, at, Operand(zero_reg)); |
| 4288 |
| 4289 // subject: Subject string |
| 4290 // a0: instance type if Subject string |
| 4291 // regexp_data: RegExp data (FixedArray) |
| 4292 // Check for flat cons string. |
| 4293 // A flat cons string is a cons string where the second part is the empty |
| 4294 // string. In that case the subject string is just the first part of the cons |
| 4295 // string. Also in this case the first part of the cons string is known to be |
| 4296 // a sequential string or an external string. |
| 4297 STATIC_ASSERT(kExternalStringTag != 0); |
| 4298 STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0); |
| 4299 __ And(at, a0, Operand(kIsNotStringMask | kExternalStringTag)); |
| 4300 __ Branch(&runtime, ne, at, Operand(zero_reg)); |
| 4301 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset)); |
| 4302 __ LoadRoot(a1, Heap::kEmptyStringRootIndex); |
| 4303 __ Branch(&runtime, ne, a0, Operand(a1)); |
| 4304 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); |
| 4305 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); |
| 4306 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); |
| 4307 // Is first part a flat string? |
| 4308 STATIC_ASSERT(kSeqStringTag == 0); |
| 4309 __ And(at, a0, Operand(kStringRepresentationMask)); |
| 4310 __ Branch(&runtime, ne, at, Operand(zero_reg)); |
| 4311 |
| 4312 __ bind(&seq_string); |
| 4313 // subject: Subject string |
| 4314 // regexp_data: RegExp data (FixedArray) |
| 4315 // a0: Instance type of subject string |
| 4316 STATIC_ASSERT(kStringEncodingMask == 4); |
| 4317 STATIC_ASSERT(kAsciiStringTag == 4); |
| 4318 STATIC_ASSERT(kTwoByteStringTag == 0); |
| 4319 // Find the code object based on the assumptions above. |
| 4320 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ascii. |
| 4321 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset)); |
| 4322 __ sra(a3, a0, 2); // a3 is 1 for ascii, 0 for UC16 (usyed below). |
| 4323 __ lw(t0, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset)); |
| 4324 __ movz(t9, t0, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset. |
| 4325 |
| 4326 // Check that the irregexp code has been generated for the actual string |
| 4327 // encoding. If it has, the field contains a code object otherwise it |
| 4328 // contains the hole. |
| 4329 __ GetObjectType(t9, a0, a0); |
| 4330 __ Branch(&runtime, ne, a0, Operand(CODE_TYPE)); |
| 4331 |
| 4332 // a3: encoding of subject string (1 if ASCII, 0 if two_byte); |
| 4333 // t9: code |
| 4334 // subject: Subject string |
| 4335 // regexp_data: RegExp data (FixedArray) |
| 4336 // Load used arguments before starting to push arguments for call to native |
| 4337 // RegExp code to avoid handling changing stack height. |
| 4338 __ lw(a1, MemOperand(sp, kPreviousIndexOffset)); |
| 4339 __ sra(a1, a1, kSmiTagSize); // Untag the Smi. |
| 4340 |
| 4341 // a1: previous index |
| 4342 // a3: encoding of subject string (1 if ASCII, 0 if two_byte); |
| 4343 // t9: code |
| 4344 // subject: Subject string |
| 4345 // regexp_data: RegExp data (FixedArray) |
| 4346 // All checks done. Now push arguments for native regexp code. |
| 4347 __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(), |
| 4348 1, a0, a2); |
| 4349 |
| 4350 // Isolates: note we add an additional parameter here (isolate pointer). |
| 4351 static const int kRegExpExecuteArguments = 8; |
| 4352 static const int kParameterRegisters = 4; |
| 4353 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); |
| 4354 |
| 4355 // Stack pointer now points to cell where return address is to be written. |
| 4356 // Arguments are before that on the stack or in registers, meaning we |
| 4357 // treat the return address as argument 5. Thus every argument after that |
| 4358 // needs to be shifted back by 1. Since DirectCEntryStub will handle |
| 4359 // allocating space for the c argument slots, we don't need to calculate |
| 4360 // that into the argument positions on the stack. This is how the stack will |
| 4361 // look (sp meaning the value of sp at this moment): |
| 4362 // [sp + 4] - Argument 8 |
| 4363 // [sp + 3] - Argument 7 |
| 4364 // [sp + 2] - Argument 6 |
| 4365 // [sp + 1] - Argument 5 |
| 4366 // [sp + 0] - saved ra |
| 4367 |
| 4368 // Argument 8: Pass current isolate address. |
| 4369 // CFunctionArgumentOperand handles MIPS stack argument slots. |
| 4370 __ li(a0, Operand(ExternalReference::isolate_address())); |
| 4371 __ sw(a0, MemOperand(sp, 4 * kPointerSize)); |
| 4372 |
| 4373 // Argument 7: Indicate that this is a direct call from JavaScript. |
| 4374 __ li(a0, Operand(1)); |
| 4375 __ sw(a0, MemOperand(sp, 3 * kPointerSize)); |
| 4376 |
| 4377 // Argument 6: Start (high end) of backtracking stack memory area. |
| 4378 __ li(a0, Operand(address_of_regexp_stack_memory_address)); |
| 4379 __ lw(a0, MemOperand(a0, 0)); |
| 4380 __ li(a2, Operand(address_of_regexp_stack_memory_size)); |
| 4381 __ lw(a2, MemOperand(a2, 0)); |
| 4382 __ addu(a0, a0, a2); |
| 4383 __ sw(a0, MemOperand(sp, 2 * kPointerSize)); |
| 4384 |
| 4385 // Argument 5: static offsets vector buffer. |
| 4386 __ li(a0, Operand( |
| 4387 ExternalReference::address_of_static_offsets_vector(masm->isolate()))); |
| 4388 __ sw(a0, MemOperand(sp, 1 * kPointerSize)); |
| 4389 |
| 4390 // For arguments 4 and 3 get string length, calculate start of string data |
| 4391 // and calculate the shift of the index (0 for ASCII and 1 for two byte). |
| 4392 __ lw(a0, FieldMemOperand(subject, String::kLengthOffset)); |
| 4393 __ sra(a0, a0, kSmiTagSize); |
| 4394 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); |
| 4395 __ Addu(t0, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
| 4396 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte. |
| 4397 // Argument 4 (a3): End of string data |
| 4398 // Argument 3 (a2): Start of string data |
| 4399 __ sllv(t1, a1, a3); |
| 4400 __ addu(a2, t0, t1); |
| 4401 __ sllv(t1, a0, a3); |
| 4402 __ addu(a3, t0, t1); |
| 4403 |
| 4404 // Argument 2 (a1): Previous index. |
| 4405 // Already there |
| 4406 |
| 4407 // Argument 1 (a0): Subject string. |
| 4408 __ mov(a0, subject); |
| 4409 |
| 4410 // Locate the code entry and call it. |
| 4411 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 4412 DirectCEntryStub stub; |
| 4413 stub.GenerateCall(masm, t9); |
| 4414 |
| 4415 __ LeaveExitFrame(false, no_reg); |
| 4416 |
| 4417 // v0: result |
| 4418 // subject: subject string (callee saved) |
| 4419 // regexp_data: RegExp data (callee saved) |
| 4420 // last_match_info_elements: Last match info elements (callee saved) |
| 4421 |
| 4422 // Check the result. |
| 4423 |
| 4424 Label success; |
| 4425 __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS)); |
| 4426 Label failure; |
| 4427 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE)); |
| 4428 // If not exception it can only be retry. Handle that in the runtime system. |
| 4429 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); |
| 4430 // Result must now be exception. If there is no pending exception already a |
| 4431 // stack overflow (on the backtrack stack) was detected in RegExp code but |
| 4432 // haven't created the exception yet. Handle that in the runtime system. |
| 4433 // TODO(592): Rerunning the RegExp to get the stack overflow exception. |
| 4434 __ li(a1, Operand( |
| 4435 ExternalReference::the_hole_value_location(masm->isolate()))); |
| 4436 __ lw(a1, MemOperand(a1, 0)); |
| 4437 __ li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address, |
| 4438 masm->isolate()))); |
| 4439 __ lw(v0, MemOperand(a2, 0)); |
| 4440 __ Branch(&runtime, eq, v0, Operand(a1)); |
| 4441 |
| 4442 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception. |
| 4443 |
| 4444 // Check if the exception is a termination. If so, throw as uncatchable. |
| 4445 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex); |
| 4446 Label termination_exception; |
| 4447 __ Branch(&termination_exception, eq, v0, Operand(a0)); |
| 4448 |
| 4449 __ Throw(a0); // Expects thrown value in v0. |
| 4450 |
| 4451 __ bind(&termination_exception); |
| 4452 __ ThrowUncatchable(TERMINATION, v0); // Expects thrown value in v0. |
| 4453 |
| 4454 __ bind(&failure); |
| 4455 // For failure and exception return null. |
| 4456 __ li(v0, Operand(masm->isolate()->factory()->null_value())); |
| 4457 __ Addu(sp, sp, Operand(4 * kPointerSize)); |
| 4458 __ Ret(); |
| 4459 |
| 4460 // Process the result from the native regexp code. |
| 4461 __ bind(&success); |
| 4462 __ lw(a1, |
| 4463 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); |
| 4464 // Calculate number of capture registers (number_of_captures + 1) * 2. |
| 4465 STATIC_ASSERT(kSmiTag == 0); |
| 4466 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
| 4467 __ Addu(a1, a1, Operand(2)); // a1 was a smi. |
| 4468 |
| 4469 // a1: number of capture registers |
| 4470 // subject: subject string |
| 4471 // Store the capture count. |
| 4472 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi. |
| 4473 __ sw(a2, FieldMemOperand(last_match_info_elements, |
| 4474 RegExpImpl::kLastCaptureCountOffset)); |
| 4475 // Store last subject and last input. |
| 4476 __ mov(a3, last_match_info_elements); // Moved up to reduce latency. |
| 4477 __ sw(subject, |
| 4478 FieldMemOperand(last_match_info_elements, |
| 4479 RegExpImpl::kLastSubjectOffset)); |
| 4480 __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0); |
| 4481 __ sw(subject, |
| 4482 FieldMemOperand(last_match_info_elements, |
| 4483 RegExpImpl::kLastInputOffset)); |
| 4484 __ mov(a3, last_match_info_elements); |
| 4485 __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0); |
| 4486 |
| 4487 // Get the static offsets vector filled by the native regexp code. |
| 4488 ExternalReference address_of_static_offsets_vector = |
| 4489 ExternalReference::address_of_static_offsets_vector(masm->isolate()); |
| 4490 __ li(a2, Operand(address_of_static_offsets_vector)); |
| 4491 |
| 4492 // a1: number of capture registers |
| 4493 // a2: offsets vector |
| 4494 Label next_capture, done; |
| 4495 // Capture register counter starts from number of capture registers and |
| 4496 // counts down until wrapping after zero. |
| 4497 __ Addu(a0, |
| 4498 last_match_info_elements, |
| 4499 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); |
| 4500 __ bind(&next_capture); |
| 4501 __ Subu(a1, a1, Operand(1)); |
| 4502 __ Branch(&done, lt, a1, Operand(zero_reg)); |
| 4503 // Read the value from the static offsets vector buffer. |
| 4504 __ lw(a3, MemOperand(a2, 0)); |
| 4505 __ addiu(a2, a2, kPointerSize); |
| 4506 // Store the smi value in the last match info. |
| 4507 __ sll(a3, a3, kSmiTagSize); // Convert to Smi. |
| 4508 __ sw(a3, MemOperand(a0, 0)); |
| 4509 __ Branch(&next_capture, USE_DELAY_SLOT); |
| 4510 __ addiu(a0, a0, kPointerSize); // In branch delay slot. |
| 4511 |
| 4512 __ bind(&done); |
| 4513 |
| 4514 // Return last match info. |
| 4515 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset)); |
| 4516 __ Addu(sp, sp, Operand(4 * kPointerSize)); |
| 4517 __ Ret(); |
| 4518 |
| 4519 // Do the runtime call to execute the regexp. |
| 4520 __ bind(&runtime); |
| 4521 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
| 4522 #endif // V8_INTERPRETED_REGEXP |
553 } | 4523 } |
554 | 4524 |
555 | 4525 |
556 void RegExpConstructResultStub::Generate(MacroAssembler* masm) { | 4526 void RegExpConstructResultStub::Generate(MacroAssembler* masm) { |
557 UNIMPLEMENTED_MIPS(); | 4527 const int kMaxInlineLength = 100; |
| 4528 Label slowcase; |
| 4529 Label done; |
| 4530 __ lw(a1, MemOperand(sp, kPointerSize * 2)); |
| 4531 STATIC_ASSERT(kSmiTag == 0); |
| 4532 STATIC_ASSERT(kSmiTagSize == 1); |
| 4533 __ JumpIfNotSmi(a1, &slowcase); |
| 4534 __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength))); |
| 4535 // Smi-tagging is equivalent to multiplying by 2. |
| 4536 // Allocate RegExpResult followed by FixedArray with size in ebx. |
| 4537 // JSArray: [Map][empty properties][Elements][Length-smi][index][input] |
| 4538 // Elements: [Map][Length][..elements..] |
| 4539 // Size of JSArray with two in-object properties and the header of a |
| 4540 // FixedArray. |
| 4541 int objects_size = |
| 4542 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; |
| 4543 __ srl(t1, a1, kSmiTagSize + kSmiShiftSize); |
| 4544 __ Addu(a2, t1, Operand(objects_size)); |
| 4545 __ AllocateInNewSpace( |
| 4546 a2, // In: Size, in words. |
| 4547 v0, // Out: Start of allocation (tagged). |
| 4548 a3, // Scratch register. |
| 4549 t0, // Scratch register. |
| 4550 &slowcase, |
| 4551 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); |
| 4552 // v0: Start of allocated area, object-tagged. |
| 4553 // a1: Number of elements in array, as smi. |
| 4554 // t1: Number of elements, untagged. |
| 4555 |
| 4556 // Set JSArray map to global.regexp_result_map(). |
| 4557 // Set empty properties FixedArray. |
| 4558 // Set elements to point to FixedArray allocated right after the JSArray. |
| 4559 // Interleave operations for better latency. |
| 4560 __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX)); |
| 4561 __ Addu(a3, v0, Operand(JSRegExpResult::kSize)); |
| 4562 __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array())); |
| 4563 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset)); |
| 4564 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); |
| 4565 __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX)); |
| 4566 __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset)); |
| 4567 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); |
| 4568 |
| 4569 // Set input, index and length fields from arguments. |
| 4570 __ lw(a1, MemOperand(sp, kPointerSize * 0)); |
| 4571 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset)); |
| 4572 __ lw(a1, MemOperand(sp, kPointerSize * 1)); |
| 4573 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset)); |
| 4574 __ lw(a1, MemOperand(sp, kPointerSize * 2)); |
| 4575 __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset)); |
| 4576 |
| 4577 // Fill out the elements FixedArray. |
| 4578 // v0: JSArray, tagged. |
| 4579 // a3: FixedArray, tagged. |
| 4580 // t1: Number of elements in array, untagged. |
| 4581 |
| 4582 // Set map. |
| 4583 __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map())); |
| 4584 __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset)); |
| 4585 // Set FixedArray length. |
| 4586 __ sll(t2, t1, kSmiTagSize); |
| 4587 __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset)); |
| 4588 // Fill contents of fixed-array with the-hole. |
| 4589 __ li(a2, Operand(masm->isolate()->factory()->the_hole_value())); |
| 4590 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 4591 // Fill fixed array elements with hole. |
| 4592 // v0: JSArray, tagged. |
| 4593 // a2: the hole. |
| 4594 // a3: Start of elements in FixedArray. |
| 4595 // t1: Number of elements to fill. |
| 4596 Label loop; |
| 4597 __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes. |
| 4598 __ addu(t1, t1, a3); // Point past last element to store. |
| 4599 __ bind(&loop); |
| 4600 __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem. |
| 4601 __ sw(a2, MemOperand(a3)); |
| 4602 __ Branch(&loop, USE_DELAY_SLOT); |
| 4603 __ addiu(a3, a3, kPointerSize); // In branch delay slot. |
| 4604 |
| 4605 __ bind(&done); |
| 4606 __ Addu(sp, sp, Operand(3 * kPointerSize)); |
| 4607 __ Ret(); |
| 4608 |
| 4609 __ bind(&slowcase); |
| 4610 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); |
558 } | 4611 } |
559 | 4612 |
560 | 4613 |
561 void CallFunctionStub::Generate(MacroAssembler* masm) { | 4614 void CallFunctionStub::Generate(MacroAssembler* masm) { |
562 UNIMPLEMENTED_MIPS(); | 4615 Label slow; |
| 4616 |
| 4617 // If the receiver might be a value (string, number or boolean) check |
| 4618 // for this and box it if it is. |
| 4619 if (ReceiverMightBeValue()) { |
| 4620 // Get the receiver from the stack. |
| 4621 // function, receiver [, arguments] |
| 4622 Label receiver_is_value, receiver_is_js_object; |
| 4623 __ lw(a1, MemOperand(sp, argc_ * kPointerSize)); |
| 4624 |
| 4625 // Check if receiver is a smi (which is a number value). |
| 4626 __ JumpIfSmi(a1, &receiver_is_value); |
| 4627 |
| 4628 // Check if the receiver is a valid JS object. |
| 4629 __ GetObjectType(a1, a2, a2); |
| 4630 __ Branch(&receiver_is_js_object, |
| 4631 ge, |
| 4632 a2, |
| 4633 Operand(FIRST_JS_OBJECT_TYPE)); |
| 4634 |
| 4635 // Call the runtime to box the value. |
| 4636 __ bind(&receiver_is_value); |
| 4637 // We need natives to execute this. |
| 4638 __ EnterInternalFrame(); |
| 4639 __ push(a1); |
| 4640 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); |
| 4641 __ LeaveInternalFrame(); |
| 4642 __ sw(v0, MemOperand(sp, argc_ * kPointerSize)); |
| 4643 |
| 4644 __ bind(&receiver_is_js_object); |
| 4645 } |
| 4646 |
| 4647 // Get the function to call from the stack. |
| 4648 // function, receiver [, arguments] |
| 4649 __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize)); |
| 4650 |
| 4651 // Check that the function is really a JavaScript function. |
| 4652 // a1: pushed function (to be verified) |
| 4653 __ JumpIfSmi(a1, &slow); |
| 4654 // Get the map of the function object. |
| 4655 __ GetObjectType(a1, a2, a2); |
| 4656 __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE)); |
| 4657 |
| 4658 // Fast-case: Invoke the function now. |
| 4659 // a1: pushed function |
| 4660 ParameterCount actual(argc_); |
| 4661 __ InvokeFunction(a1, actual, JUMP_FUNCTION); |
| 4662 |
| 4663 // Slow-case: Non-function called. |
| 4664 __ bind(&slow); |
| 4665 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead |
| 4666 // of the original receiver from the call site). |
| 4667 __ sw(a1, MemOperand(sp, argc_ * kPointerSize)); |
| 4668 __ li(a0, Operand(argc_)); // Setup the number of arguments. |
| 4669 __ mov(a2, zero_reg); |
| 4670 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION); |
| 4671 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
| 4672 RelocInfo::CODE_TARGET); |
563 } | 4673 } |
564 | 4674 |
565 | 4675 |
566 // Unfortunately you have to run without snapshots to see most of these | 4676 // Unfortunately you have to run without snapshots to see most of these |
567 // names in the profile since most compare stubs end up in the snapshot. | 4677 // names in the profile since most compare stubs end up in the snapshot. |
568 const char* CompareStub::GetName() { | 4678 const char* CompareStub::GetName() { |
569 UNIMPLEMENTED_MIPS(); | 4679 ASSERT((lhs_.is(a0) && rhs_.is(a1)) || |
| 4680 (lhs_.is(a1) && rhs_.is(a0))); |
| 4681 |
| 4682 if (name_ != NULL) return name_; |
| 4683 const int kMaxNameLength = 100; |
| 4684 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( |
| 4685 kMaxNameLength); |
| 4686 if (name_ == NULL) return "OOM"; |
| 4687 |
| 4688 const char* cc_name; |
| 4689 switch (cc_) { |
| 4690 case lt: cc_name = "LT"; break; |
| 4691 case gt: cc_name = "GT"; break; |
| 4692 case le: cc_name = "LE"; break; |
| 4693 case ge: cc_name = "GE"; break; |
| 4694 case eq: cc_name = "EQ"; break; |
| 4695 case ne: cc_name = "NE"; break; |
| 4696 default: cc_name = "UnknownCondition"; break; |
| 4697 } |
| 4698 |
| 4699 const char* lhs_name = lhs_.is(a0) ? "_a0" : "_a1"; |
| 4700 const char* rhs_name = rhs_.is(a0) ? "_a0" : "_a1"; |
| 4701 |
| 4702 const char* strict_name = ""; |
| 4703 if (strict_ && (cc_ == eq || cc_ == ne)) { |
| 4704 strict_name = "_STRICT"; |
| 4705 } |
| 4706 |
| 4707 const char* never_nan_nan_name = ""; |
| 4708 if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) { |
| 4709 never_nan_nan_name = "_NO_NAN"; |
| 4710 } |
| 4711 |
| 4712 const char* include_number_compare_name = ""; |
| 4713 if (!include_number_compare_) { |
| 4714 include_number_compare_name = "_NO_NUMBER"; |
| 4715 } |
| 4716 |
| 4717 const char* include_smi_compare_name = ""; |
| 4718 if (!include_smi_compare_) { |
| 4719 include_smi_compare_name = "_NO_SMI"; |
| 4720 } |
| 4721 |
| 4722 OS::SNPrintF(Vector<char>(name_, kMaxNameLength), |
| 4723 "CompareStub_%s%s%s%s%s%s", |
| 4724 cc_name, |
| 4725 lhs_name, |
| 4726 rhs_name, |
| 4727 strict_name, |
| 4728 never_nan_nan_name, |
| 4729 include_number_compare_name, |
| 4730 include_smi_compare_name); |
570 return name_; | 4731 return name_; |
571 } | 4732 } |
572 | 4733 |
573 | 4734 |
574 int CompareStub::MinorKey() { | 4735 int CompareStub::MinorKey() { |
575 UNIMPLEMENTED_MIPS(); | 4736 // Encode the two parameters in a unique 16 bit value. |
576 return 0; | 4737 ASSERT(static_cast<unsigned>(cc_) < (1 << 14)); |
| 4738 ASSERT((lhs_.is(a0) && rhs_.is(a1)) || |
| 4739 (lhs_.is(a1) && rhs_.is(a0))); |
| 4740 return ConditionField::encode(static_cast<unsigned>(cc_)) |
| 4741 | RegisterField::encode(lhs_.is(a0)) |
| 4742 | StrictField::encode(strict_) |
| 4743 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) |
| 4744 | IncludeSmiCompareField::encode(include_smi_compare_); |
577 } | 4745 } |
578 | 4746 |
579 | 4747 |
580 // StringCharCodeAtGenerator. | 4748 // StringCharCodeAtGenerator. |
581 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | 4749 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { |
582 UNIMPLEMENTED_MIPS(); | 4750 Label flat_string; |
| 4751 Label ascii_string; |
| 4752 Label got_char_code; |
| 4753 |
| 4754 ASSERT(!t0.is(scratch_)); |
| 4755 ASSERT(!t0.is(index_)); |
| 4756 ASSERT(!t0.is(result_)); |
| 4757 ASSERT(!t0.is(object_)); |
| 4758 |
| 4759 // If the receiver is a smi trigger the non-string case. |
| 4760 __ JumpIfSmi(object_, receiver_not_string_); |
| 4761 |
| 4762 // Fetch the instance type of the receiver into result register. |
| 4763 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
| 4764 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
| 4765 // If the receiver is not a string trigger the non-string case. |
| 4766 __ And(t0, result_, Operand(kIsNotStringMask)); |
| 4767 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg)); |
| 4768 |
| 4769 // If the index is non-smi trigger the non-smi case. |
| 4770 __ JumpIfNotSmi(index_, &index_not_smi_); |
| 4771 |
| 4772 // Put smi-tagged index into scratch register. |
| 4773 __ mov(scratch_, index_); |
| 4774 __ bind(&got_smi_index_); |
| 4775 |
| 4776 // Check for index out of range. |
| 4777 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset)); |
| 4778 __ Branch(index_out_of_range_, ls, t0, Operand(scratch_)); |
| 4779 |
| 4780 // We need special handling for non-flat strings. |
| 4781 STATIC_ASSERT(kSeqStringTag == 0); |
| 4782 __ And(t0, result_, Operand(kStringRepresentationMask)); |
| 4783 __ Branch(&flat_string, eq, t0, Operand(zero_reg)); |
| 4784 |
| 4785 // Handle non-flat strings. |
| 4786 __ And(t0, result_, Operand(kIsConsStringMask)); |
| 4787 __ Branch(&call_runtime_, eq, t0, Operand(zero_reg)); |
| 4788 |
| 4789 // ConsString. |
| 4790 // Check whether the right hand side is the empty string (i.e. if |
| 4791 // this is really a flat string in a cons string). If that is not |
| 4792 // the case we would rather go to the runtime system now to flatten |
| 4793 // the string. |
| 4794 __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset)); |
| 4795 __ LoadRoot(t0, Heap::kEmptyStringRootIndex); |
| 4796 __ Branch(&call_runtime_, ne, result_, Operand(t0)); |
| 4797 |
| 4798 // Get the first of the two strings and load its instance type. |
| 4799 __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset)); |
| 4800 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
| 4801 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
| 4802 // If the first cons component is also non-flat, then go to runtime. |
| 4803 STATIC_ASSERT(kSeqStringTag == 0); |
| 4804 |
| 4805 __ And(t0, result_, Operand(kStringRepresentationMask)); |
| 4806 __ Branch(&call_runtime_, ne, t0, Operand(zero_reg)); |
| 4807 |
| 4808 // Check for 1-byte or 2-byte string. |
| 4809 __ bind(&flat_string); |
| 4810 STATIC_ASSERT(kAsciiStringTag != 0); |
| 4811 __ And(t0, result_, Operand(kStringEncodingMask)); |
| 4812 __ Branch(&ascii_string, ne, t0, Operand(zero_reg)); |
| 4813 |
| 4814 // 2-byte string. |
| 4815 // Load the 2-byte character code into the result register. We can |
| 4816 // add without shifting since the smi tag size is the log2 of the |
| 4817 // number of bytes in a two-byte character. |
| 4818 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0); |
| 4819 __ Addu(scratch_, object_, Operand(scratch_)); |
| 4820 __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize)); |
| 4821 __ Branch(&got_char_code); |
| 4822 |
| 4823 // ASCII string. |
| 4824 // Load the byte into the result register. |
| 4825 __ bind(&ascii_string); |
| 4826 |
| 4827 __ srl(t0, scratch_, kSmiTagSize); |
| 4828 __ Addu(scratch_, object_, t0); |
| 4829 |
| 4830 __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize)); |
| 4831 |
| 4832 __ bind(&got_char_code); |
| 4833 __ sll(result_, result_, kSmiTagSize); |
| 4834 __ bind(&exit_); |
583 } | 4835 } |
584 | 4836 |
585 | 4837 |
586 void StringCharCodeAtGenerator::GenerateSlow( | 4838 void StringCharCodeAtGenerator::GenerateSlow( |
587 MacroAssembler* masm, const RuntimeCallHelper& call_helper) { | 4839 MacroAssembler* masm, const RuntimeCallHelper& call_helper) { |
588 UNIMPLEMENTED_MIPS(); | 4840 __ Abort("Unexpected fallthrough to CharCodeAt slow case"); |
| 4841 |
| 4842 // Index is not a smi. |
| 4843 __ bind(&index_not_smi_); |
| 4844 // If index is a heap number, try converting it to an integer. |
| 4845 __ CheckMap(index_, |
| 4846 scratch_, |
| 4847 Heap::kHeapNumberMapRootIndex, |
| 4848 index_not_number_, |
| 4849 true); |
| 4850 call_helper.BeforeCall(masm); |
| 4851 // Consumed by runtime conversion function: |
| 4852 __ Push(object_, index_, index_); |
| 4853 if (index_flags_ == STRING_INDEX_IS_NUMBER) { |
| 4854 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); |
| 4855 } else { |
| 4856 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); |
| 4857 // NumberToSmi discards numbers that are not exact integers. |
| 4858 __ CallRuntime(Runtime::kNumberToSmi, 1); |
| 4859 } |
| 4860 |
| 4861 // Save the conversion result before the pop instructions below |
| 4862 // have a chance to overwrite it. |
| 4863 |
| 4864 __ Move(scratch_, v0); |
| 4865 |
| 4866 __ pop(index_); |
| 4867 __ pop(object_); |
| 4868 // Reload the instance type. |
| 4869 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
| 4870 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
| 4871 call_helper.AfterCall(masm); |
| 4872 // If index is still not a smi, it must be out of range. |
| 4873 __ JumpIfNotSmi(scratch_, index_out_of_range_); |
| 4874 // Otherwise, return to the fast path. |
| 4875 __ Branch(&got_smi_index_); |
| 4876 |
| 4877 // Call runtime. We get here when the receiver is a string and the |
| 4878 // index is a number, but the code of getting the actual character |
| 4879 // is too complex (e.g., when the string needs to be flattened). |
| 4880 __ bind(&call_runtime_); |
| 4881 call_helper.BeforeCall(masm); |
| 4882 __ Push(object_, index_); |
| 4883 __ CallRuntime(Runtime::kStringCharCodeAt, 2); |
| 4884 |
| 4885 __ Move(result_, v0); |
| 4886 |
| 4887 call_helper.AfterCall(masm); |
| 4888 __ jmp(&exit_); |
| 4889 |
| 4890 __ Abort("Unexpected fallthrough from CharCodeAt slow case"); |
589 } | 4891 } |
590 | 4892 |
591 | 4893 |
592 // ------------------------------------------------------------------------- | 4894 // ------------------------------------------------------------------------- |
593 // StringCharFromCodeGenerator | 4895 // StringCharFromCodeGenerator |
594 | 4896 |
595 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | 4897 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { |
596 UNIMPLEMENTED_MIPS(); | 4898 // Fast case of Heap::LookupSingleCharacterStringFromCode. |
| 4899 |
| 4900 ASSERT(!t0.is(result_)); |
| 4901 ASSERT(!t0.is(code_)); |
| 4902 |
| 4903 STATIC_ASSERT(kSmiTag == 0); |
| 4904 STATIC_ASSERT(kSmiShiftSize == 0); |
| 4905 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); |
| 4906 __ And(t0, |
| 4907 code_, |
| 4908 Operand(kSmiTagMask | |
| 4909 ((~String::kMaxAsciiCharCode) << kSmiTagSize))); |
| 4910 __ Branch(&slow_case_, ne, t0, Operand(zero_reg)); |
| 4911 |
| 4912 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); |
| 4913 // At this point code register contains smi tagged ASCII char code. |
| 4914 STATIC_ASSERT(kSmiTag == 0); |
| 4915 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize); |
| 4916 __ Addu(result_, result_, t0); |
| 4917 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); |
| 4918 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); |
| 4919 __ Branch(&slow_case_, eq, result_, Operand(t0)); |
| 4920 __ bind(&exit_); |
597 } | 4921 } |
598 | 4922 |
599 | 4923 |
600 void StringCharFromCodeGenerator::GenerateSlow( | 4924 void StringCharFromCodeGenerator::GenerateSlow( |
601 MacroAssembler* masm, const RuntimeCallHelper& call_helper) { | 4925 MacroAssembler* masm, const RuntimeCallHelper& call_helper) { |
602 UNIMPLEMENTED_MIPS(); | 4926 __ Abort("Unexpected fallthrough to CharFromCode slow case"); |
| 4927 |
| 4928 __ bind(&slow_case_); |
| 4929 call_helper.BeforeCall(masm); |
| 4930 __ push(code_); |
| 4931 __ CallRuntime(Runtime::kCharFromCode, 1); |
| 4932 __ Move(result_, v0); |
| 4933 |
| 4934 call_helper.AfterCall(masm); |
| 4935 __ Branch(&exit_); |
| 4936 |
| 4937 __ Abort("Unexpected fallthrough from CharFromCode slow case"); |
603 } | 4938 } |
604 | 4939 |
605 | 4940 |
606 // ------------------------------------------------------------------------- | 4941 // ------------------------------------------------------------------------- |
607 // StringCharAtGenerator | 4942 // StringCharAtGenerator |
608 | 4943 |
609 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { | 4944 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { |
610 UNIMPLEMENTED_MIPS(); | 4945 char_code_at_generator_.GenerateFast(masm); |
| 4946 char_from_code_generator_.GenerateFast(masm); |
611 } | 4947 } |
612 | 4948 |
613 | 4949 |
614 void StringCharAtGenerator::GenerateSlow( | 4950 void StringCharAtGenerator::GenerateSlow( |
615 MacroAssembler* masm, const RuntimeCallHelper& call_helper) { | 4951 MacroAssembler* masm, const RuntimeCallHelper& call_helper) { |
616 UNIMPLEMENTED_MIPS(); | 4952 char_code_at_generator_.GenerateSlow(masm, call_helper); |
| 4953 char_from_code_generator_.GenerateSlow(masm, call_helper); |
617 } | 4954 } |
618 | 4955 |
619 | 4956 |
620 class StringHelper : public AllStatic { | 4957 class StringHelper : public AllStatic { |
621 public: | 4958 public: |
622 // Generate code for copying characters using a simple loop. This should only | 4959 // Generate code for copying characters using a simple loop. This should only |
623 // be used in places where the number of characters is small and the | 4960 // be used in places where the number of characters is small and the |
624 // additional setup and checking in GenerateCopyCharactersLong adds too much | 4961 // additional setup and checking in GenerateCopyCharactersLong adds too much |
625 // overhead. Copying of overlapping regions is not supported. | 4962 // overhead. Copying of overlapping regions is not supported. |
626 // Dest register ends at the position after the last character written. | 4963 // Dest register ends at the position after the last character written. |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
680 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); | 5017 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); |
681 }; | 5018 }; |
682 | 5019 |
683 | 5020 |
684 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, | 5021 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, |
685 Register dest, | 5022 Register dest, |
686 Register src, | 5023 Register src, |
687 Register count, | 5024 Register count, |
688 Register scratch, | 5025 Register scratch, |
689 bool ascii) { | 5026 bool ascii) { |
690 UNIMPLEMENTED_MIPS(); | 5027 Label loop; |
| 5028 Label done; |
| 5029 // This loop just copies one character at a time, as it is only used for |
| 5030 // very short strings. |
| 5031 if (!ascii) { |
| 5032 __ addu(count, count, count); |
| 5033 } |
| 5034 __ Branch(&done, eq, count, Operand(zero_reg)); |
| 5035 __ addu(count, dest, count); // Count now points to the last dest byte. |
| 5036 |
| 5037 __ bind(&loop); |
| 5038 __ lbu(scratch, MemOperand(src)); |
| 5039 __ addiu(src, src, 1); |
| 5040 __ sb(scratch, MemOperand(dest)); |
| 5041 __ addiu(dest, dest, 1); |
| 5042 __ Branch(&loop, lt, dest, Operand(count)); |
| 5043 |
| 5044 __ bind(&done); |
691 } | 5045 } |
692 | 5046 |
693 | 5047 |
694 enum CopyCharactersFlags { | 5048 enum CopyCharactersFlags { |
695 COPY_ASCII = 1, | 5049 COPY_ASCII = 1, |
696 DEST_ALWAYS_ALIGNED = 2 | 5050 DEST_ALWAYS_ALIGNED = 2 |
697 }; | 5051 }; |
698 | 5052 |
699 | 5053 |
700 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, | 5054 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, |
701 Register dest, | 5055 Register dest, |
702 Register src, | 5056 Register src, |
703 Register count, | 5057 Register count, |
704 Register scratch1, | 5058 Register scratch1, |
705 Register scratch2, | 5059 Register scratch2, |
706 Register scratch3, | 5060 Register scratch3, |
707 Register scratch4, | 5061 Register scratch4, |
708 Register scratch5, | 5062 Register scratch5, |
709 int flags) { | 5063 int flags) { |
710 UNIMPLEMENTED_MIPS(); | 5064 bool ascii = (flags & COPY_ASCII) != 0; |
| 5065 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; |
| 5066 |
| 5067 if (dest_always_aligned && FLAG_debug_code) { |
| 5068 // Check that destination is actually word aligned if the flag says |
| 5069 // that it is. |
| 5070 __ And(scratch4, dest, Operand(kPointerAlignmentMask)); |
| 5071 __ Check(eq, |
| 5072 "Destination of copy not aligned.", |
| 5073 scratch4, |
| 5074 Operand(zero_reg)); |
| 5075 } |
| 5076 |
| 5077 const int kReadAlignment = 4; |
| 5078 const int kReadAlignmentMask = kReadAlignment - 1; |
| 5079 // Ensure that reading an entire aligned word containing the last character |
| 5080 // of a string will not read outside the allocated area (because we pad up |
| 5081 // to kObjectAlignment). |
| 5082 STATIC_ASSERT(kObjectAlignment >= kReadAlignment); |
| 5083 // Assumes word reads and writes are little endian. |
| 5084 // Nothing to do for zero characters. |
| 5085 Label done; |
| 5086 |
| 5087 if (!ascii) { |
| 5088 __ addu(count, count, count); |
| 5089 } |
| 5090 __ Branch(&done, eq, count, Operand(zero_reg)); |
| 5091 |
| 5092 Label byte_loop; |
| 5093 // Must copy at least eight bytes, otherwise just do it one byte at a time. |
| 5094 __ Subu(scratch1, count, Operand(8)); |
| 5095 __ Addu(count, dest, Operand(count)); |
| 5096 Register limit = count; // Read until src equals this. |
| 5097 __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg)); |
| 5098 |
| 5099 if (!dest_always_aligned) { |
| 5100 // Align dest by byte copying. Copies between zero and three bytes. |
| 5101 __ And(scratch4, dest, Operand(kReadAlignmentMask)); |
| 5102 Label dest_aligned; |
| 5103 __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg)); |
| 5104 Label aligned_loop; |
| 5105 __ bind(&aligned_loop); |
| 5106 __ lbu(scratch1, MemOperand(src)); |
| 5107 __ addiu(src, src, 1); |
| 5108 __ sb(scratch1, MemOperand(dest)); |
| 5109 __ addiu(dest, dest, 1); |
| 5110 __ addiu(scratch4, scratch4, 1); |
| 5111 __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask)); |
| 5112 __ bind(&dest_aligned); |
| 5113 } |
| 5114 |
| 5115 Label simple_loop; |
| 5116 |
| 5117 __ And(scratch4, src, Operand(kReadAlignmentMask)); |
| 5118 __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg)); |
| 5119 |
| 5120 // Loop for src/dst that are not aligned the same way. |
| 5121 // This loop uses lwl and lwr instructions. These instructions |
| 5122 // depend on the endianness, and the implementation assumes little-endian. |
| 5123 { |
| 5124 Label loop; |
| 5125 __ bind(&loop); |
| 5126 __ lwr(scratch1, MemOperand(src)); |
| 5127 __ Addu(src, src, Operand(kReadAlignment)); |
| 5128 __ lwl(scratch1, MemOperand(src, -1)); |
| 5129 __ sw(scratch1, MemOperand(dest)); |
| 5130 __ Addu(dest, dest, Operand(kReadAlignment)); |
| 5131 __ Subu(scratch2, limit, dest); |
| 5132 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment)); |
| 5133 } |
| 5134 |
| 5135 __ Branch(&byte_loop); |
| 5136 |
| 5137 // Simple loop. |
| 5138 // Copy words from src to dest, until less than four bytes left. |
| 5139 // Both src and dest are word aligned. |
| 5140 __ bind(&simple_loop); |
| 5141 { |
| 5142 Label loop; |
| 5143 __ bind(&loop); |
| 5144 __ lw(scratch1, MemOperand(src)); |
| 5145 __ Addu(src, src, Operand(kReadAlignment)); |
| 5146 __ sw(scratch1, MemOperand(dest)); |
| 5147 __ Addu(dest, dest, Operand(kReadAlignment)); |
| 5148 __ Subu(scratch2, limit, dest); |
| 5149 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment)); |
| 5150 } |
| 5151 |
| 5152 // Copy bytes from src to dest until dest hits limit. |
| 5153 __ bind(&byte_loop); |
| 5154 // Test if dest has already reached the limit. |
| 5155 __ Branch(&done, ge, dest, Operand(limit)); |
| 5156 __ lbu(scratch1, MemOperand(src)); |
| 5157 __ addiu(src, src, 1); |
| 5158 __ sb(scratch1, MemOperand(dest)); |
| 5159 __ addiu(dest, dest, 1); |
| 5160 __ Branch(&byte_loop); |
| 5161 |
| 5162 __ bind(&done); |
711 } | 5163 } |
712 | 5164 |
713 | 5165 |
714 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, | 5166 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, |
715 Register c1, | 5167 Register c1, |
716 Register c2, | 5168 Register c2, |
717 Register scratch1, | 5169 Register scratch1, |
718 Register scratch2, | 5170 Register scratch2, |
719 Register scratch3, | 5171 Register scratch3, |
720 Register scratch4, | 5172 Register scratch4, |
721 Register scratch5, | 5173 Register scratch5, |
722 Label* not_found) { | 5174 Label* not_found) { |
723 UNIMPLEMENTED_MIPS(); | 5175 // Register scratch3 is the general scratch register in this function. |
| 5176 Register scratch = scratch3; |
| 5177 |
| 5178 // Make sure that both characters are not digits as such strings has a |
| 5179 // different hash algorithm. Don't try to look for these in the symbol table. |
| 5180 Label not_array_index; |
| 5181 __ Subu(scratch, c1, Operand(static_cast<int>('0'))); |
| 5182 __ Branch(¬_array_index, |
| 5183 Ugreater, |
| 5184 scratch, |
| 5185 Operand(static_cast<int>('9' - '0'))); |
| 5186 __ Subu(scratch, c2, Operand(static_cast<int>('0'))); |
| 5187 |
| 5188 // If check failed combine both characters into single halfword. |
| 5189 // This is required by the contract of the method: code at the |
| 5190 // not_found branch expects this combination in c1 register. |
| 5191 Label tmp; |
| 5192 __ sll(scratch1, c2, kBitsPerByte); |
| 5193 __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0'))); |
| 5194 __ Or(c1, c1, scratch1); |
| 5195 __ bind(&tmp); |
| 5196 __ Branch(not_found, |
| 5197 Uless_equal, |
| 5198 scratch, |
| 5199 Operand(static_cast<int>('9' - '0'))); |
| 5200 |
| 5201 __ bind(¬_array_index); |
| 5202 // Calculate the two character string hash. |
| 5203 Register hash = scratch1; |
| 5204 StringHelper::GenerateHashInit(masm, hash, c1); |
| 5205 StringHelper::GenerateHashAddCharacter(masm, hash, c2); |
| 5206 StringHelper::GenerateHashGetHash(masm, hash); |
| 5207 |
| 5208 // Collect the two characters in a register. |
| 5209 Register chars = c1; |
| 5210 __ sll(scratch, c2, kBitsPerByte); |
| 5211 __ Or(chars, chars, scratch); |
| 5212 |
| 5213 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. |
| 5214 // hash: hash of two character string. |
| 5215 |
| 5216 // Load symbol table. |
| 5217 // Load address of first element of the symbol table. |
| 5218 Register symbol_table = c2; |
| 5219 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); |
| 5220 |
| 5221 Register undefined = scratch4; |
| 5222 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
| 5223 |
| 5224 // Calculate capacity mask from the symbol table capacity. |
| 5225 Register mask = scratch2; |
| 5226 __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset)); |
| 5227 __ sra(mask, mask, 1); |
| 5228 __ Addu(mask, mask, -1); |
| 5229 |
| 5230 // Calculate untagged address of the first element of the symbol table. |
| 5231 Register first_symbol_table_element = symbol_table; |
| 5232 __ Addu(first_symbol_table_element, symbol_table, |
| 5233 Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag)); |
| 5234 |
| 5235 // Registers. |
| 5236 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. |
| 5237 // hash: hash of two character string |
| 5238 // mask: capacity mask |
| 5239 // first_symbol_table_element: address of the first element of |
| 5240 // the symbol table |
| 5241 // undefined: the undefined object |
| 5242 // scratch: - |
| 5243 |
| 5244 // Perform a number of probes in the symbol table. |
| 5245 static const int kProbes = 4; |
| 5246 Label found_in_symbol_table; |
| 5247 Label next_probe[kProbes]; |
| 5248 Register candidate = scratch5; // Scratch register contains candidate. |
| 5249 for (int i = 0; i < kProbes; i++) { |
| 5250 // Calculate entry in symbol table. |
| 5251 if (i > 0) { |
| 5252 __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i))); |
| 5253 } else { |
| 5254 __ mov(candidate, hash); |
| 5255 } |
| 5256 |
| 5257 __ And(candidate, candidate, Operand(mask)); |
| 5258 |
| 5259 // Load the entry from the symble table. |
| 5260 STATIC_ASSERT(SymbolTable::kEntrySize == 1); |
| 5261 __ sll(scratch, candidate, kPointerSizeLog2); |
| 5262 __ Addu(scratch, scratch, first_symbol_table_element); |
| 5263 __ lw(candidate, MemOperand(scratch)); |
| 5264 |
| 5265 // If entry is undefined no string with this hash can be found. |
| 5266 Label is_string; |
| 5267 __ GetObjectType(candidate, scratch, scratch); |
| 5268 __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE)); |
| 5269 |
| 5270 __ Branch(not_found, eq, undefined, Operand(candidate)); |
| 5271 // Must be null (deleted entry). |
| 5272 if (FLAG_debug_code) { |
| 5273 __ LoadRoot(scratch, Heap::kNullValueRootIndex); |
| 5274 __ Assert(eq, "oddball in symbol table is not undefined or null", |
| 5275 scratch, Operand(candidate)); |
| 5276 } |
| 5277 __ jmp(&next_probe[i]); |
| 5278 |
| 5279 __ bind(&is_string); |
| 5280 |
| 5281 // Check that the candidate is a non-external ASCII string. The instance |
| 5282 // type is still in the scratch register from the CompareObjectType |
| 5283 // operation. |
| 5284 __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]); |
| 5285 |
| 5286 // If length is not 2 the string is not a candidate. |
| 5287 __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset)); |
| 5288 __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2))); |
| 5289 |
| 5290 // Check if the two characters match. |
| 5291 // Assumes that word load is little endian. |
| 5292 __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize)); |
| 5293 __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch)); |
| 5294 __ bind(&next_probe[i]); |
| 5295 } |
| 5296 |
| 5297 // No matching 2 character string found by probing. |
| 5298 __ jmp(not_found); |
| 5299 |
| 5300 // Scratch register contains result when we fall through to here. |
| 5301 Register result = candidate; |
| 5302 __ bind(&found_in_symbol_table); |
| 5303 __ mov(v0, result); |
724 } | 5304 } |
725 | 5305 |
726 | 5306 |
727 void StringHelper::GenerateHashInit(MacroAssembler* masm, | 5307 void StringHelper::GenerateHashInit(MacroAssembler* masm, |
728 Register hash, | 5308 Register hash, |
729 Register character) { | 5309 Register character) { |
730 UNIMPLEMENTED_MIPS(); | 5310 // hash = character + (character << 10); |
| 5311 __ sll(hash, character, 10); |
| 5312 __ addu(hash, hash, character); |
| 5313 // hash ^= hash >> 6; |
| 5314 __ sra(at, hash, 6); |
| 5315 __ xor_(hash, hash, at); |
731 } | 5316 } |
732 | 5317 |
733 | 5318 |
734 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, | 5319 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, |
735 Register hash, | 5320 Register hash, |
736 Register character) { | 5321 Register character) { |
737 UNIMPLEMENTED_MIPS(); | 5322 // hash += character; |
| 5323 __ addu(hash, hash, character); |
| 5324 // hash += hash << 10; |
| 5325 __ sll(at, hash, 10); |
| 5326 __ addu(hash, hash, at); |
| 5327 // hash ^= hash >> 6; |
| 5328 __ sra(at, hash, 6); |
| 5329 __ xor_(hash, hash, at); |
738 } | 5330 } |
739 | 5331 |
740 | 5332 |
741 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, | 5333 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, |
742 Register hash) { | 5334 Register hash) { |
743 UNIMPLEMENTED_MIPS(); | 5335 // hash += hash << 3; |
| 5336 __ sll(at, hash, 3); |
| 5337 __ addu(hash, hash, at); |
| 5338 // hash ^= hash >> 11; |
| 5339 __ sra(at, hash, 11); |
| 5340 __ xor_(hash, hash, at); |
| 5341 // hash += hash << 15; |
| 5342 __ sll(at, hash, 15); |
| 5343 __ addu(hash, hash, at); |
| 5344 |
| 5345 // if (hash == 0) hash = 27; |
| 5346 __ ori(at, zero_reg, 27); |
| 5347 __ movz(hash, at, hash); |
744 } | 5348 } |
745 | 5349 |
746 | 5350 |
747 void SubStringStub::Generate(MacroAssembler* masm) { | 5351 void SubStringStub::Generate(MacroAssembler* masm) { |
748 UNIMPLEMENTED_MIPS(); | 5352 Label sub_string_runtime; |
749 } | 5353 // Stack frame on entry. |
750 | 5354 // ra: return address |
751 | 5355 // sp[0]: to |
| 5356 // sp[4]: from |
| 5357 // sp[8]: string |
| 5358 |
| 5359 // This stub is called from the native-call %_SubString(...), so |
| 5360 // nothing can be assumed about the arguments. It is tested that: |
| 5361 // "string" is a sequential string, |
| 5362 // both "from" and "to" are smis, and |
| 5363 // 0 <= from <= to <= string.length. |
| 5364 // If any of these assumptions fail, we call the runtime system. |
| 5365 |
| 5366 static const int kToOffset = 0 * kPointerSize; |
| 5367 static const int kFromOffset = 1 * kPointerSize; |
| 5368 static const int kStringOffset = 2 * kPointerSize; |
| 5369 |
| 5370 Register to = t2; |
| 5371 Register from = t3; |
| 5372 |
| 5373 // Check bounds and smi-ness. |
| 5374 __ lw(to, MemOperand(sp, kToOffset)); |
| 5375 __ lw(from, MemOperand(sp, kFromOffset)); |
| 5376 STATIC_ASSERT(kFromOffset == kToOffset + 4); |
| 5377 STATIC_ASSERT(kSmiTag == 0); |
| 5378 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
| 5379 |
| 5380 __ JumpIfNotSmi(from, &sub_string_runtime); |
| 5381 __ JumpIfNotSmi(to, &sub_string_runtime); |
| 5382 |
| 5383 __ sra(a3, from, kSmiTagSize); // Remove smi tag. |
| 5384 __ sra(t5, to, kSmiTagSize); // Remove smi tag. |
| 5385 |
| 5386 // a3: from index (untagged smi) |
| 5387 // t5: to index (untagged smi) |
| 5388 |
| 5389 __ Branch(&sub_string_runtime, lt, a3, Operand(zero_reg)); // From < 0. |
| 5390 |
| 5391 __ subu(a2, t5, a3); |
| 5392 __ Branch(&sub_string_runtime, gt, a3, Operand(t5)); // Fail if from > to. |
| 5393 |
| 5394 // Special handling of sub-strings of length 1 and 2. One character strings |
| 5395 // are handled in the runtime system (looked up in the single character |
| 5396 // cache). Two character strings are looked for in the symbol cache. |
| 5397 __ Branch(&sub_string_runtime, lt, a2, Operand(2)); |
| 5398 |
| 5399 // Both to and from are smis. |
| 5400 |
| 5401 // a2: result string length |
| 5402 // a3: from index (untagged smi) |
| 5403 // t2: (a.k.a. to): to (smi) |
| 5404 // t3: (a.k.a. from): from offset (smi) |
| 5405 // t5: to index (untagged smi) |
| 5406 |
| 5407 // Make sure first argument is a sequential (or flat) string. |
| 5408 __ lw(t1, MemOperand(sp, kStringOffset)); |
| 5409 __ Branch(&sub_string_runtime, eq, t1, Operand(kSmiTagMask)); |
| 5410 |
| 5411 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset)); |
| 5412 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); |
| 5413 __ And(t4, a1, Operand(kIsNotStringMask)); |
| 5414 |
| 5415 __ Branch(&sub_string_runtime, ne, t4, Operand(zero_reg)); |
| 5416 |
| 5417 // a1: instance type |
| 5418 // a2: result string length |
| 5419 // a3: from index (untagged smi) |
| 5420 // t1: string |
| 5421 // t2: (a.k.a. to): to (smi) |
| 5422 // t3: (a.k.a. from): from offset (smi) |
| 5423 // t5: to index (untagged smi) |
| 5424 |
| 5425 Label seq_string; |
| 5426 __ And(t0, a1, Operand(kStringRepresentationMask)); |
| 5427 STATIC_ASSERT(kSeqStringTag < kConsStringTag); |
| 5428 STATIC_ASSERT(kConsStringTag < kExternalStringTag); |
| 5429 |
| 5430 // External strings go to runtime. |
| 5431 __ Branch(&sub_string_runtime, gt, t0, Operand(kConsStringTag)); |
| 5432 |
| 5433 // Sequential strings are handled directly. |
| 5434 __ Branch(&seq_string, lt, t0, Operand(kConsStringTag)); |
| 5435 |
| 5436 // Cons string. Try to recurse (once) on the first substring. |
| 5437 // (This adds a little more generality than necessary to handle flattened |
| 5438 // cons strings, but not much). |
| 5439 __ lw(t1, FieldMemOperand(t1, ConsString::kFirstOffset)); |
| 5440 __ lw(t0, FieldMemOperand(t1, HeapObject::kMapOffset)); |
| 5441 __ lbu(a1, FieldMemOperand(t0, Map::kInstanceTypeOffset)); |
| 5442 STATIC_ASSERT(kSeqStringTag == 0); |
| 5443 // Cons and External strings go to runtime. |
| 5444 __ Branch(&sub_string_runtime, ne, a1, Operand(kStringRepresentationMask)); |
| 5445 |
| 5446 // Definitly a sequential string. |
| 5447 __ bind(&seq_string); |
| 5448 |
| 5449 // a1: instance type |
| 5450 // a2: result string length |
| 5451 // a3: from index (untagged smi) |
| 5452 // t1: string |
| 5453 // t2: (a.k.a. to): to (smi) |
| 5454 // t3: (a.k.a. from): from offset (smi) |
| 5455 // t5: to index (untagged smi) |
| 5456 |
| 5457 __ lw(t0, FieldMemOperand(t1, String::kLengthOffset)); |
| 5458 __ Branch(&sub_string_runtime, lt, t0, Operand(to)); // Fail if to > length. |
| 5459 to = no_reg; |
| 5460 |
| 5461 // a1: instance type |
| 5462 // a2: result string length |
| 5463 // a3: from index (untagged smi) |
| 5464 // t1: string |
| 5465 // t3: (a.k.a. from): from offset (smi) |
| 5466 // t5: to index (untagged smi) |
| 5467 |
| 5468 // Check for flat ASCII string. |
| 5469 Label non_ascii_flat; |
| 5470 STATIC_ASSERT(kTwoByteStringTag == 0); |
| 5471 |
| 5472 __ And(t4, a1, Operand(kStringEncodingMask)); |
| 5473 __ Branch(&non_ascii_flat, eq, t4, Operand(zero_reg)); |
| 5474 |
| 5475 Label result_longer_than_two; |
| 5476 __ Branch(&result_longer_than_two, gt, a2, Operand(2)); |
| 5477 |
| 5478 // Sub string of length 2 requested. |
| 5479 // Get the two characters forming the sub string. |
| 5480 __ Addu(t1, t1, Operand(a3)); |
| 5481 __ lbu(a3, FieldMemOperand(t1, SeqAsciiString::kHeaderSize)); |
| 5482 __ lbu(t0, FieldMemOperand(t1, SeqAsciiString::kHeaderSize + 1)); |
| 5483 |
| 5484 // Try to lookup two character string in symbol table. |
| 5485 Label make_two_character_string; |
| 5486 StringHelper::GenerateTwoCharacterSymbolTableProbe( |
| 5487 masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string); |
| 5488 Counters* counters = masm->isolate()->counters(); |
| 5489 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0); |
| 5490 __ Addu(sp, sp, Operand(3 * kPointerSize)); |
| 5491 __ Ret(); |
| 5492 |
| 5493 |
| 5494 // a2: result string length. |
| 5495 // a3: two characters combined into halfword in little endian byte order. |
| 5496 __ bind(&make_two_character_string); |
| 5497 __ AllocateAsciiString(v0, a2, t0, t1, t4, &sub_string_runtime); |
| 5498 __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize)); |
| 5499 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0); |
| 5500 __ Addu(sp, sp, Operand(3 * kPointerSize)); |
| 5501 __ Ret(); |
| 5502 |
| 5503 __ bind(&result_longer_than_two); |
| 5504 |
| 5505 // Allocate the result. |
| 5506 __ AllocateAsciiString(v0, a2, t4, t0, a1, &sub_string_runtime); |
| 5507 |
| 5508 // v0: result string. |
| 5509 // a2: result string length. |
| 5510 // a3: from index (untagged smi) |
| 5511 // t1: string. |
| 5512 // t3: (a.k.a. from): from offset (smi) |
| 5513 // Locate first character of result. |
| 5514 __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
| 5515 // Locate 'from' character of string. |
| 5516 __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
| 5517 __ Addu(t1, t1, Operand(a3)); |
| 5518 |
| 5519 // v0: result string. |
| 5520 // a1: first character of result string. |
| 5521 // a2: result string length. |
| 5522 // t1: first character of sub string to copy. |
| 5523 STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); |
| 5524 StringHelper::GenerateCopyCharactersLong( |
| 5525 masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED); |
| 5526 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0); |
| 5527 __ Addu(sp, sp, Operand(3 * kPointerSize)); |
| 5528 __ Ret(); |
| 5529 |
| 5530 __ bind(&non_ascii_flat); |
| 5531 // a2: result string length. |
| 5532 // t1: string. |
| 5533 // t3: (a.k.a. from): from offset (smi) |
| 5534 // Check for flat two byte string. |
| 5535 |
| 5536 // Allocate the result. |
| 5537 __ AllocateTwoByteString(v0, a2, a1, a3, t0, &sub_string_runtime); |
| 5538 |
| 5539 // v0: result string. |
| 5540 // a2: result string length. |
| 5541 // t1: string. |
| 5542 // Locate first character of result. |
| 5543 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
| 5544 // Locate 'from' character of string. |
| 5545 __ Addu(t1, t1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
| 5546 // As "from" is a smi it is 2 times the value which matches the size of a two |
| 5547 // byte character. |
| 5548 __ Addu(t1, t1, Operand(from)); |
| 5549 from = no_reg; |
| 5550 |
| 5551 // v0: result string. |
| 5552 // a1: first character of result. |
| 5553 // a2: result length. |
| 5554 // t1: first character of string to copy. |
| 5555 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
| 5556 StringHelper::GenerateCopyCharactersLong( |
| 5557 masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED); |
| 5558 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0); |
| 5559 __ Addu(sp, sp, Operand(3 * kPointerSize)); |
| 5560 __ Ret(); |
| 5561 |
| 5562 // Just jump to runtime to create the sub string. |
| 5563 __ bind(&sub_string_runtime); |
| 5564 __ TailCallRuntime(Runtime::kSubString, 3, 1); |
| 5565 } |
| 5566 |
| 5567 |
| 5568 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, |
| 5569 Register left, |
| 5570 Register right, |
| 5571 Register scratch1, |
| 5572 Register scratch2, |
| 5573 Register scratch3) { |
| 5574 Register length = scratch1; |
| 5575 |
| 5576 // Compare lengths. |
| 5577 Label strings_not_equal, check_zero_length; |
| 5578 __ lw(length, FieldMemOperand(left, String::kLengthOffset)); |
| 5579 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
| 5580 __ Branch(&check_zero_length, eq, length, Operand(scratch2)); |
| 5581 __ bind(&strings_not_equal); |
| 5582 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL))); |
| 5583 __ Ret(); |
| 5584 |
| 5585 // Check if the length is zero. |
| 5586 Label compare_chars; |
| 5587 __ bind(&check_zero_length); |
| 5588 STATIC_ASSERT(kSmiTag == 0); |
| 5589 __ Branch(&compare_chars, ne, length, Operand(zero_reg)); |
| 5590 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
| 5591 __ Ret(); |
| 5592 |
| 5593 // Compare characters. |
| 5594 __ bind(&compare_chars); |
| 5595 |
| 5596 GenerateAsciiCharsCompareLoop(masm, |
| 5597 left, right, length, scratch2, scratch3, v0, |
| 5598 &strings_not_equal); |
| 5599 |
| 5600 // Characters are equal. |
| 5601 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
| 5602 __ Ret(); |
| 5603 } |
| 5604 |
| 5605 |
752 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, | 5606 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, |
753 Register left, | 5607 Register left, |
754 Register right, | 5608 Register right, |
755 Register scratch1, | 5609 Register scratch1, |
756 Register scratch2, | 5610 Register scratch2, |
757 Register scratch3, | 5611 Register scratch3, |
758 Register scratch4) { | 5612 Register scratch4) { |
759 UNIMPLEMENTED_MIPS(); | 5613 Label result_not_equal, compare_lengths; |
| 5614 // Find minimum length and length difference. |
| 5615 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset)); |
| 5616 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
| 5617 __ Subu(scratch3, scratch1, Operand(scratch2)); |
| 5618 Register length_delta = scratch3; |
| 5619 __ slt(scratch4, scratch2, scratch1); |
| 5620 __ movn(scratch1, scratch2, scratch4); |
| 5621 Register min_length = scratch1; |
| 5622 STATIC_ASSERT(kSmiTag == 0); |
| 5623 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg)); |
| 5624 |
| 5625 // Compare loop. |
| 5626 GenerateAsciiCharsCompareLoop(masm, |
| 5627 left, right, min_length, scratch2, scratch4, v0, |
| 5628 &result_not_equal); |
| 5629 |
| 5630 // Compare lengths - strings up to min-length are equal. |
| 5631 __ bind(&compare_lengths); |
| 5632 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); |
| 5633 // Use length_delta as result if it's zero. |
| 5634 __ mov(scratch2, length_delta); |
| 5635 __ mov(scratch4, zero_reg); |
| 5636 __ mov(v0, zero_reg); |
| 5637 |
| 5638 __ bind(&result_not_equal); |
| 5639 // Conditionally update the result based either on length_delta or |
| 5640 // the last comparion performed in the loop above. |
| 5641 Label ret; |
| 5642 __ Branch(&ret, eq, scratch2, Operand(scratch4)); |
| 5643 __ li(v0, Operand(Smi::FromInt(GREATER))); |
| 5644 __ Branch(&ret, gt, scratch2, Operand(scratch4)); |
| 5645 __ li(v0, Operand(Smi::FromInt(LESS))); |
| 5646 __ bind(&ret); |
| 5647 __ Ret(); |
| 5648 } |
| 5649 |
| 5650 |
| 5651 void StringCompareStub::GenerateAsciiCharsCompareLoop( |
| 5652 MacroAssembler* masm, |
| 5653 Register left, |
| 5654 Register right, |
| 5655 Register length, |
| 5656 Register scratch1, |
| 5657 Register scratch2, |
| 5658 Register scratch3, |
| 5659 Label* chars_not_equal) { |
| 5660 // Change index to run from -length to -1 by adding length to string |
| 5661 // start. This means that loop ends when index reaches zero, which |
| 5662 // doesn't need an additional compare. |
| 5663 __ SmiUntag(length); |
| 5664 __ Addu(scratch1, length, |
| 5665 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
| 5666 __ Addu(left, left, Operand(scratch1)); |
| 5667 __ Addu(right, right, Operand(scratch1)); |
| 5668 __ Subu(length, zero_reg, length); |
| 5669 Register index = length; // index = -length; |
| 5670 |
| 5671 |
| 5672 // Compare loop. |
| 5673 Label loop; |
| 5674 __ bind(&loop); |
| 5675 __ Addu(scratch3, left, index); |
| 5676 __ lbu(scratch1, MemOperand(scratch3)); |
| 5677 __ Addu(scratch3, right, index); |
| 5678 __ lbu(scratch2, MemOperand(scratch3)); |
| 5679 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2)); |
| 5680 __ Addu(index, index, 1); |
| 5681 __ Branch(&loop, ne, index, Operand(zero_reg)); |
760 } | 5682 } |
761 | 5683 |
762 | 5684 |
763 void StringCompareStub::Generate(MacroAssembler* masm) { | 5685 void StringCompareStub::Generate(MacroAssembler* masm) { |
764 UNIMPLEMENTED_MIPS(); | 5686 Label runtime; |
| 5687 |
| 5688 Counters* counters = masm->isolate()->counters(); |
| 5689 |
| 5690 // Stack frame on entry. |
| 5691 // sp[0]: right string |
| 5692 // sp[4]: left string |
| 5693 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left. |
| 5694 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right. |
| 5695 |
| 5696 Label not_same; |
| 5697 __ Branch(¬_same, ne, a0, Operand(a1)); |
| 5698 STATIC_ASSERT(EQUAL == 0); |
| 5699 STATIC_ASSERT(kSmiTag == 0); |
| 5700 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
| 5701 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2); |
| 5702 __ Addu(sp, sp, Operand(2 * kPointerSize)); |
| 5703 __ Ret(); |
| 5704 |
| 5705 __ bind(¬_same); |
| 5706 |
| 5707 // Check that both objects are sequential ASCII strings. |
| 5708 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime); |
| 5709 |
| 5710 // Compare flat ASCII strings natively. Remove arguments from stack first. |
| 5711 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3); |
| 5712 __ Addu(sp, sp, Operand(2 * kPointerSize)); |
| 5713 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1); |
| 5714 |
| 5715 __ bind(&runtime); |
| 5716 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
765 } | 5717 } |
766 | 5718 |
767 | 5719 |
768 void StringAddStub::Generate(MacroAssembler* masm) { | 5720 void StringAddStub::Generate(MacroAssembler* masm) { |
769 UNIMPLEMENTED_MIPS(); | 5721 Label string_add_runtime, call_builtin; |
| 5722 Builtins::JavaScript builtin_id = Builtins::ADD; |
| 5723 |
| 5724 Counters* counters = masm->isolate()->counters(); |
| 5725 |
| 5726 // Stack on entry: |
| 5727 // sp[0]: second argument (right). |
| 5728 // sp[4]: first argument (left). |
| 5729 |
| 5730 // Load the two arguments. |
| 5731 __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument. |
| 5732 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument. |
| 5733 |
| 5734 // Make sure that both arguments are strings if not known in advance. |
| 5735 if (flags_ == NO_STRING_ADD_FLAGS) { |
| 5736 __ JumpIfEitherSmi(a0, a1, &string_add_runtime); |
| 5737 // Load instance types. |
| 5738 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset)); |
| 5739 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); |
| 5740 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); |
| 5741 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset)); |
| 5742 STATIC_ASSERT(kStringTag == 0); |
| 5743 // If either is not a string, go to runtime. |
| 5744 __ Or(t4, t0, Operand(t1)); |
| 5745 __ And(t4, t4, Operand(kIsNotStringMask)); |
| 5746 __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg)); |
| 5747 } else { |
| 5748 // Here at least one of the arguments is definitely a string. |
| 5749 // We convert the one that is not known to be a string. |
| 5750 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { |
| 5751 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); |
| 5752 GenerateConvertArgument( |
| 5753 masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin); |
| 5754 builtin_id = Builtins::STRING_ADD_RIGHT; |
| 5755 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { |
| 5756 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); |
| 5757 GenerateConvertArgument( |
| 5758 masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin); |
| 5759 builtin_id = Builtins::STRING_ADD_LEFT; |
| 5760 } |
| 5761 } |
| 5762 |
| 5763 // Both arguments are strings. |
| 5764 // a0: first string |
| 5765 // a1: second string |
| 5766 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
| 5767 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
| 5768 { |
| 5769 Label strings_not_empty; |
| 5770 // Check if either of the strings are empty. In that case return the other. |
| 5771 // These tests use zero-length check on string-length whch is an Smi. |
| 5772 // Assert that Smi::FromInt(0) is really 0. |
| 5773 STATIC_ASSERT(kSmiTag == 0); |
| 5774 ASSERT(Smi::FromInt(0) == 0); |
| 5775 __ lw(a2, FieldMemOperand(a0, String::kLengthOffset)); |
| 5776 __ lw(a3, FieldMemOperand(a1, String::kLengthOffset)); |
| 5777 __ mov(v0, a0); // Assume we'll return first string (from a0). |
| 5778 __ movz(v0, a1, a2); // If first is empty, return second (from a1). |
| 5779 __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1. |
| 5780 __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1. |
| 5781 __ and_(t4, t4, t5); // Branch if both strings were non-empty. |
| 5782 __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg)); |
| 5783 |
| 5784 __ IncrementCounter(counters->string_add_native(), 1, a2, a3); |
| 5785 __ Addu(sp, sp, Operand(2 * kPointerSize)); |
| 5786 __ Ret(); |
| 5787 |
| 5788 __ bind(&strings_not_empty); |
| 5789 } |
| 5790 |
| 5791 // Untag both string-lengths. |
| 5792 __ sra(a2, a2, kSmiTagSize); |
| 5793 __ sra(a3, a3, kSmiTagSize); |
| 5794 |
| 5795 // Both strings are non-empty. |
| 5796 // a0: first string |
| 5797 // a1: second string |
| 5798 // a2: length of first string |
| 5799 // a3: length of second string |
| 5800 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
| 5801 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
| 5802 // Look at the length of the result of adding the two strings. |
| 5803 Label string_add_flat_result, longer_than_two; |
| 5804 // Adding two lengths can't overflow. |
| 5805 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); |
| 5806 __ Addu(t2, a2, Operand(a3)); |
| 5807 // Use the symbol table when adding two one character strings, as it |
| 5808 // helps later optimizations to return a symbol here. |
| 5809 __ Branch(&longer_than_two, ne, t2, Operand(2)); |
| 5810 |
| 5811 // Check that both strings are non-external ASCII strings. |
| 5812 if (flags_ != NO_STRING_ADD_FLAGS) { |
| 5813 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset)); |
| 5814 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); |
| 5815 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); |
| 5816 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset)); |
| 5817 } |
| 5818 __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3, |
| 5819 &string_add_runtime); |
| 5820 |
| 5821 // Get the two characters forming the sub string. |
| 5822 __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize)); |
| 5823 __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize)); |
| 5824 |
| 5825 // Try to lookup two character string in symbol table. If it is not found |
| 5826 // just allocate a new one. |
| 5827 Label make_two_character_string; |
| 5828 StringHelper::GenerateTwoCharacterSymbolTableProbe( |
| 5829 masm, a2, a3, t2, t3, t0, t1, t4, &make_two_character_string); |
| 5830 __ IncrementCounter(counters->string_add_native(), 1, a2, a3); |
| 5831 __ Addu(sp, sp, Operand(2 * kPointerSize)); |
| 5832 __ Ret(); |
| 5833 |
| 5834 __ bind(&make_two_character_string); |
| 5835 // Resulting string has length 2 and first chars of two strings |
| 5836 // are combined into single halfword in a2 register. |
| 5837 // So we can fill resulting string without two loops by a single |
| 5838 // halfword store instruction (which assumes that processor is |
| 5839 // in a little endian mode). |
| 5840 __ li(t2, Operand(2)); |
| 5841 __ AllocateAsciiString(v0, t2, t0, t1, t4, &string_add_runtime); |
| 5842 __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize)); |
| 5843 __ IncrementCounter(counters->string_add_native(), 1, a2, a3); |
| 5844 __ Addu(sp, sp, Operand(2 * kPointerSize)); |
| 5845 __ Ret(); |
| 5846 |
| 5847 __ bind(&longer_than_two); |
| 5848 // Check if resulting string will be flat. |
| 5849 __ Branch(&string_add_flat_result, lt, t2, |
| 5850 Operand(String::kMinNonFlatLength)); |
| 5851 // Handle exceptionally long strings in the runtime system. |
| 5852 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); |
| 5853 ASSERT(IsPowerOf2(String::kMaxLength + 1)); |
| 5854 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not. |
| 5855 __ Branch(&string_add_runtime, hs, t2, Operand(String::kMaxLength + 1)); |
| 5856 |
| 5857 // If result is not supposed to be flat, allocate a cons string object. |
| 5858 // If both strings are ASCII the result is an ASCII cons string. |
| 5859 if (flags_ != NO_STRING_ADD_FLAGS) { |
| 5860 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset)); |
| 5861 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); |
| 5862 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); |
| 5863 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset)); |
| 5864 } |
| 5865 Label non_ascii, allocated, ascii_data; |
| 5866 STATIC_ASSERT(kTwoByteStringTag == 0); |
| 5867 // Branch to non_ascii if either string-encoding field is zero (non-ascii). |
| 5868 __ And(t4, t0, Operand(t1)); |
| 5869 __ And(t4, t4, Operand(kStringEncodingMask)); |
| 5870 __ Branch(&non_ascii, eq, t4, Operand(zero_reg)); |
| 5871 |
| 5872 // Allocate an ASCII cons string. |
| 5873 __ bind(&ascii_data); |
| 5874 __ AllocateAsciiConsString(t3, t2, t0, t1, &string_add_runtime); |
| 5875 __ bind(&allocated); |
| 5876 // Fill the fields of the cons string. |
| 5877 __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset)); |
| 5878 __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset)); |
| 5879 __ mov(v0, t3); |
| 5880 __ IncrementCounter(counters->string_add_native(), 1, a2, a3); |
| 5881 __ Addu(sp, sp, Operand(2 * kPointerSize)); |
| 5882 __ Ret(); |
| 5883 |
| 5884 __ bind(&non_ascii); |
| 5885 // At least one of the strings is two-byte. Check whether it happens |
| 5886 // to contain only ASCII characters. |
| 5887 // t0: first instance type. |
| 5888 // t1: second instance type. |
| 5889 // Branch to if _both_ instances have kAsciiDataHintMask set. |
| 5890 __ And(at, t0, Operand(kAsciiDataHintMask)); |
| 5891 __ and_(at, at, t1); |
| 5892 __ Branch(&ascii_data, ne, at, Operand(zero_reg)); |
| 5893 |
| 5894 __ xor_(t0, t0, t1); |
| 5895 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); |
| 5896 __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag)); |
| 5897 __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag)); |
| 5898 |
| 5899 // Allocate a two byte cons string. |
| 5900 __ AllocateTwoByteConsString(t3, t2, t0, t1, &string_add_runtime); |
| 5901 __ Branch(&allocated); |
| 5902 |
| 5903 // Handle creating a flat result. First check that both strings are |
| 5904 // sequential and that they have the same encoding. |
| 5905 // a0: first string |
| 5906 // a1: second string |
| 5907 // a2: length of first string |
| 5908 // a3: length of second string |
| 5909 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
| 5910 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
| 5911 // t2: sum of lengths. |
| 5912 __ bind(&string_add_flat_result); |
| 5913 if (flags_ != NO_STRING_ADD_FLAGS) { |
| 5914 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset)); |
| 5915 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); |
| 5916 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); |
| 5917 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset)); |
| 5918 } |
| 5919 // Check that both strings are sequential, meaning that we |
| 5920 // branch to runtime if either string tag is non-zero. |
| 5921 STATIC_ASSERT(kSeqStringTag == 0); |
| 5922 __ Or(t4, t0, Operand(t1)); |
| 5923 __ And(t4, t4, Operand(kStringRepresentationMask)); |
| 5924 __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg)); |
| 5925 |
| 5926 // Now check if both strings have the same encoding (ASCII/Two-byte). |
| 5927 // a0: first string |
| 5928 // a1: second string |
| 5929 // a2: length of first string |
| 5930 // a3: length of second string |
| 5931 // t0: first string instance type |
| 5932 // t1: second string instance type |
| 5933 // t2: sum of lengths. |
| 5934 Label non_ascii_string_add_flat_result; |
| 5935 ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test. |
| 5936 __ xor_(t3, t1, t0); |
| 5937 __ And(t3, t3, Operand(kStringEncodingMask)); |
| 5938 __ Branch(&string_add_runtime, ne, t3, Operand(zero_reg)); |
| 5939 // And see if it's ASCII (0) or two-byte (1). |
| 5940 __ And(t3, t0, Operand(kStringEncodingMask)); |
| 5941 __ Branch(&non_ascii_string_add_flat_result, eq, t3, Operand(zero_reg)); |
| 5942 |
| 5943 // Both strings are sequential ASCII strings. We also know that they are |
| 5944 // short (since the sum of the lengths is less than kMinNonFlatLength). |
| 5945 // t2: length of resulting flat string |
| 5946 __ AllocateAsciiString(t3, t2, t0, t1, t4, &string_add_runtime); |
| 5947 // Locate first character of result. |
| 5948 __ Addu(t2, t3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
| 5949 // Locate first character of first argument. |
| 5950 __ Addu(a0, a0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
| 5951 // a0: first character of first string. |
| 5952 // a1: second string. |
| 5953 // a2: length of first string. |
| 5954 // a3: length of second string. |
| 5955 // t2: first character of result. |
| 5956 // t3: result string. |
| 5957 StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, true); |
| 5958 |
| 5959 // Load second argument and locate first character. |
| 5960 __ Addu(a1, a1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
| 5961 // a1: first character of second string. |
| 5962 // a3: length of second string. |
| 5963 // t2: next character of result. |
| 5964 // t3: result string. |
| 5965 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true); |
| 5966 __ mov(v0, t3); |
| 5967 __ IncrementCounter(counters->string_add_native(), 1, a2, a3); |
| 5968 __ Addu(sp, sp, Operand(2 * kPointerSize)); |
| 5969 __ Ret(); |
| 5970 |
| 5971 __ bind(&non_ascii_string_add_flat_result); |
| 5972 // Both strings are sequential two byte strings. |
| 5973 // a0: first string. |
| 5974 // a1: second string. |
| 5975 // a2: length of first string. |
| 5976 // a3: length of second string. |
| 5977 // t2: sum of length of strings. |
| 5978 __ AllocateTwoByteString(t3, t2, t0, t1, t4, &string_add_runtime); |
| 5979 // a0: first string. |
| 5980 // a1: second string. |
| 5981 // a2: length of first string. |
| 5982 // a3: length of second string. |
| 5983 // t3: result string. |
| 5984 |
| 5985 // Locate first character of result. |
| 5986 __ Addu(t2, t3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
| 5987 // Locate first character of first argument. |
| 5988 __ Addu(a0, a0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
| 5989 |
| 5990 // a0: first character of first string. |
| 5991 // a1: second string. |
| 5992 // a2: length of first string. |
| 5993 // a3: length of second string. |
| 5994 // t2: first character of result. |
| 5995 // t3: result string. |
| 5996 StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, false); |
| 5997 |
| 5998 // Locate first character of second argument. |
| 5999 __ Addu(a1, a1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
| 6000 |
| 6001 // a1: first character of second string. |
| 6002 // a3: length of second string. |
| 6003 // t2: next character of result (after copy of first string). |
| 6004 // t3: result string. |
| 6005 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false); |
| 6006 |
| 6007 __ mov(v0, t3); |
| 6008 __ IncrementCounter(counters->string_add_native(), 1, a2, a3); |
| 6009 __ Addu(sp, sp, Operand(2 * kPointerSize)); |
| 6010 __ Ret(); |
| 6011 |
| 6012 // Just jump to runtime to add the two strings. |
| 6013 __ bind(&string_add_runtime); |
| 6014 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); |
| 6015 |
| 6016 if (call_builtin.is_linked()) { |
| 6017 __ bind(&call_builtin); |
| 6018 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); |
| 6019 } |
| 6020 } |
| 6021 |
| 6022 |
| 6023 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, |
| 6024 int stack_offset, |
| 6025 Register arg, |
| 6026 Register scratch1, |
| 6027 Register scratch2, |
| 6028 Register scratch3, |
| 6029 Register scratch4, |
| 6030 Label* slow) { |
| 6031 // First check if the argument is already a string. |
| 6032 Label not_string, done; |
| 6033 __ JumpIfSmi(arg, ¬_string); |
| 6034 __ GetObjectType(arg, scratch1, scratch1); |
| 6035 __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE)); |
| 6036 |
| 6037 // Check the number to string cache. |
| 6038 Label not_cached; |
| 6039 __ bind(¬_string); |
| 6040 // Puts the cached result into scratch1. |
| 6041 NumberToStringStub::GenerateLookupNumberStringCache(masm, |
| 6042 arg, |
| 6043 scratch1, |
| 6044 scratch2, |
| 6045 scratch3, |
| 6046 scratch4, |
| 6047 false, |
| 6048 ¬_cached); |
| 6049 __ mov(arg, scratch1); |
| 6050 __ sw(arg, MemOperand(sp, stack_offset)); |
| 6051 __ jmp(&done); |
| 6052 |
| 6053 // Check if the argument is a safe string wrapper. |
| 6054 __ bind(¬_cached); |
| 6055 __ JumpIfSmi(arg, slow); |
| 6056 __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1. |
| 6057 __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE)); |
| 6058 __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset)); |
| 6059 __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf); |
| 6060 __ And(scratch2, scratch2, scratch4); |
| 6061 __ Branch(slow, ne, scratch2, Operand(scratch4)); |
| 6062 __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset)); |
| 6063 __ sw(arg, MemOperand(sp, stack_offset)); |
| 6064 |
| 6065 __ bind(&done); |
770 } | 6066 } |
771 | 6067 |
772 | 6068 |
773 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 6069 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
774 UNIMPLEMENTED_MIPS(); | 6070 ASSERT(state_ == CompareIC::SMIS); |
| 6071 Label miss; |
| 6072 __ Or(a2, a1, a0); |
| 6073 __ JumpIfNotSmi(a2, &miss); |
| 6074 |
| 6075 if (GetCondition() == eq) { |
| 6076 // For equality we do not care about the sign of the result. |
| 6077 __ Subu(v0, a0, a1); |
| 6078 } else { |
| 6079 // Untag before subtracting to avoid handling overflow. |
| 6080 __ SmiUntag(a1); |
| 6081 __ SmiUntag(a0); |
| 6082 __ Subu(v0, a1, a0); |
| 6083 } |
| 6084 __ Ret(); |
| 6085 |
| 6086 __ bind(&miss); |
| 6087 GenerateMiss(masm); |
775 } | 6088 } |
776 | 6089 |
777 | 6090 |
778 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { | 6091 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { |
779 UNIMPLEMENTED_MIPS(); | 6092 ASSERT(state_ == CompareIC::HEAP_NUMBERS); |
| 6093 |
| 6094 Label generic_stub; |
| 6095 Label unordered; |
| 6096 Label miss; |
| 6097 __ And(a2, a1, Operand(a0)); |
| 6098 __ JumpIfSmi(a2, &generic_stub); |
| 6099 |
| 6100 __ GetObjectType(a0, a2, a2); |
| 6101 __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE)); |
| 6102 __ GetObjectType(a1, a2, a2); |
| 6103 __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE)); |
| 6104 |
| 6105 // Inlining the double comparison and falling back to the general compare |
| 6106 // stub if NaN is involved or FPU is unsupported. |
| 6107 if (CpuFeatures::IsSupported(FPU)) { |
| 6108 CpuFeatures::Scope scope(FPU); |
| 6109 |
| 6110 // Load left and right operand. |
| 6111 __ Subu(a2, a1, Operand(kHeapObjectTag)); |
| 6112 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); |
| 6113 __ Subu(a2, a0, Operand(kHeapObjectTag)); |
| 6114 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); |
| 6115 |
| 6116 Label fpu_eq, fpu_lt, fpu_gt; |
| 6117 // Compare operands (test if unordered). |
| 6118 __ c(UN, D, f0, f2); |
| 6119 // Don't base result on status bits when a NaN is involved. |
| 6120 __ bc1t(&unordered); |
| 6121 __ nop(); |
| 6122 |
| 6123 // Test if equal. |
| 6124 __ c(EQ, D, f0, f2); |
| 6125 __ bc1t(&fpu_eq); |
| 6126 __ nop(); |
| 6127 |
| 6128 // Test if unordered or less (unordered case is already handled). |
| 6129 __ c(ULT, D, f0, f2); |
| 6130 __ bc1t(&fpu_lt); |
| 6131 __ nop(); |
| 6132 |
| 6133 // Otherwise it's greater. |
| 6134 __ bc1f(&fpu_gt); |
| 6135 __ nop(); |
| 6136 |
| 6137 // Return a result of -1, 0, or 1. |
| 6138 __ bind(&fpu_eq); |
| 6139 __ li(v0, Operand(EQUAL)); |
| 6140 __ Ret(); |
| 6141 |
| 6142 __ bind(&fpu_lt); |
| 6143 __ li(v0, Operand(LESS)); |
| 6144 __ Ret(); |
| 6145 |
| 6146 __ bind(&fpu_gt); |
| 6147 __ li(v0, Operand(GREATER)); |
| 6148 __ Ret(); |
| 6149 |
| 6150 __ bind(&unordered); |
| 6151 } |
| 6152 |
| 6153 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0); |
| 6154 __ bind(&generic_stub); |
| 6155 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
| 6156 |
| 6157 __ bind(&miss); |
| 6158 GenerateMiss(masm); |
780 } | 6159 } |
781 | 6160 |
782 | 6161 |
783 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { | 6162 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { |
784 UNIMPLEMENTED_MIPS(); | 6163 ASSERT(state_ == CompareIC::SYMBOLS); |
785 } | 6164 Label miss; |
| 6165 |
| 6166 // Registers containing left and right operands respectively. |
| 6167 Register left = a1; |
| 6168 Register right = a0; |
| 6169 Register tmp1 = a2; |
| 6170 Register tmp2 = a3; |
| 6171 |
| 6172 // Check that both operands are heap objects. |
| 6173 __ JumpIfEitherSmi(left, right, &miss); |
| 6174 |
| 6175 // Check that both operands are symbols. |
| 6176 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
| 6177 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
| 6178 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
| 6179 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
| 6180 STATIC_ASSERT(kSymbolTag != 0); |
| 6181 __ And(tmp1, tmp1, Operand(tmp2)); |
| 6182 __ And(tmp1, tmp1, kIsSymbolMask); |
| 6183 __ Branch(&miss, eq, tmp1, Operand(zero_reg)); |
| 6184 // Make sure a0 is non-zero. At this point input operands are |
| 6185 // guaranteed to be non-zero. |
| 6186 ASSERT(right.is(a0)); |
| 6187 STATIC_ASSERT(EQUAL == 0); |
| 6188 STATIC_ASSERT(kSmiTag == 0); |
| 6189 __ mov(v0, right); |
| 6190 // Symbols are compared by identity. |
| 6191 __ Ret(ne, left, Operand(right)); |
| 6192 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
| 6193 __ Ret(); |
| 6194 |
| 6195 __ bind(&miss); |
| 6196 GenerateMiss(masm); |
| 6197 } |
786 | 6198 |
787 | 6199 |
788 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { | 6200 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
789 UNIMPLEMENTED_MIPS(); | 6201 ASSERT(state_ == CompareIC::STRINGS); |
| 6202 Label miss; |
| 6203 |
| 6204 // Registers containing left and right operands respectively. |
| 6205 Register left = a1; |
| 6206 Register right = a0; |
| 6207 Register tmp1 = a2; |
| 6208 Register tmp2 = a3; |
| 6209 Register tmp3 = t0; |
| 6210 Register tmp4 = t1; |
| 6211 Register tmp5 = t2; |
| 6212 |
| 6213 // Check that both operands are heap objects. |
| 6214 __ JumpIfEitherSmi(left, right, &miss); |
| 6215 |
| 6216 // Check that both operands are strings. This leaves the instance |
| 6217 // types loaded in tmp1 and tmp2. |
| 6218 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
| 6219 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
| 6220 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
| 6221 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
| 6222 STATIC_ASSERT(kNotStringTag != 0); |
| 6223 __ Or(tmp3, tmp1, tmp2); |
| 6224 __ And(tmp5, tmp3, Operand(kIsNotStringMask)); |
| 6225 __ Branch(&miss, ne, tmp5, Operand(zero_reg)); |
| 6226 |
| 6227 // Fast check for identical strings. |
| 6228 Label left_ne_right; |
| 6229 STATIC_ASSERT(EQUAL == 0); |
| 6230 STATIC_ASSERT(kSmiTag == 0); |
| 6231 __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT); |
| 6232 __ mov(v0, zero_reg); // In the delay slot. |
| 6233 __ Ret(); |
| 6234 __ bind(&left_ne_right); |
| 6235 |
| 6236 // Handle not identical strings. |
| 6237 |
| 6238 // Check that both strings are symbols. If they are, we're done |
| 6239 // because we already know they are not identical. |
| 6240 ASSERT(GetCondition() == eq); |
| 6241 STATIC_ASSERT(kSymbolTag != 0); |
| 6242 __ And(tmp3, tmp1, Operand(tmp2)); |
| 6243 __ And(tmp5, tmp3, Operand(kIsSymbolMask)); |
| 6244 Label is_symbol; |
| 6245 __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT); |
| 6246 __ mov(v0, a0); // In the delay slot. |
| 6247 // Make sure a0 is non-zero. At this point input operands are |
| 6248 // guaranteed to be non-zero. |
| 6249 ASSERT(right.is(a0)); |
| 6250 __ Ret(); |
| 6251 __ bind(&is_symbol); |
| 6252 |
| 6253 // Check that both strings are sequential ASCII. |
| 6254 Label runtime; |
| 6255 __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4, |
| 6256 &runtime); |
| 6257 |
| 6258 // Compare flat ASCII strings. Returns when done. |
| 6259 StringCompareStub::GenerateFlatAsciiStringEquals( |
| 6260 masm, left, right, tmp1, tmp2, tmp3); |
| 6261 |
| 6262 // Handle more complex cases in runtime. |
| 6263 __ bind(&runtime); |
| 6264 __ Push(left, right); |
| 6265 __ TailCallRuntime(Runtime::kStringEquals, 2, 1); |
| 6266 |
| 6267 __ bind(&miss); |
| 6268 GenerateMiss(masm); |
790 } | 6269 } |
791 | 6270 |
792 | 6271 |
793 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { | 6272 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { |
794 UNIMPLEMENTED_MIPS(); | 6273 ASSERT(state_ == CompareIC::OBJECTS); |
| 6274 Label miss; |
| 6275 __ And(a2, a1, Operand(a0)); |
| 6276 __ JumpIfSmi(a2, &miss); |
| 6277 |
| 6278 __ GetObjectType(a0, a2, a2); |
| 6279 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); |
| 6280 __ GetObjectType(a1, a2, a2); |
| 6281 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); |
| 6282 |
| 6283 ASSERT(GetCondition() == eq); |
| 6284 __ Subu(v0, a0, Operand(a1)); |
| 6285 __ Ret(); |
| 6286 |
| 6287 __ bind(&miss); |
| 6288 GenerateMiss(masm); |
795 } | 6289 } |
796 | 6290 |
797 | 6291 |
798 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { | 6292 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { |
799 UNIMPLEMENTED_MIPS(); | 6293 __ Push(a1, a0); |
800 } | 6294 __ push(ra); |
801 | 6295 |
802 | 6296 // Call the runtime system in a fresh internal frame. |
| 6297 ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), |
| 6298 masm->isolate()); |
| 6299 __ EnterInternalFrame(); |
| 6300 __ Push(a1, a0); |
| 6301 __ li(t0, Operand(Smi::FromInt(op_))); |
| 6302 __ push(t0); |
| 6303 __ CallExternalReference(miss, 3); |
| 6304 __ LeaveInternalFrame(); |
| 6305 // Compute the entry point of the rewritten stub. |
| 6306 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 6307 // Restore registers. |
| 6308 __ pop(ra); |
| 6309 __ pop(a0); |
| 6310 __ pop(a1); |
| 6311 __ Jump(a2); |
| 6312 } |
| 6313 |
| 6314 void DirectCEntryStub::Generate(MacroAssembler* masm) { |
| 6315 // No need to pop or drop anything, LeaveExitFrame will restore the old |
| 6316 // stack, thus dropping the allocated space for the return value. |
| 6317 // The saved ra is after the reserved stack space for the 4 args. |
| 6318 __ lw(t9, MemOperand(sp, kCArgsSlotsSize)); |
| 6319 |
| 6320 if (FLAG_debug_code && EnableSlowAsserts()) { |
| 6321 // In case of an error the return address may point to a memory area |
| 6322 // filled with kZapValue by the GC. |
| 6323 // Dereference the address and check for this. |
| 6324 __ lw(t0, MemOperand(t9)); |
| 6325 __ Assert(ne, "Received invalid return address.", t0, |
| 6326 Operand(reinterpret_cast<uint32_t>(kZapValue))); |
| 6327 } |
| 6328 __ Jump(t9); |
| 6329 } |
| 6330 |
| 6331 |
| 6332 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, |
| 6333 ExternalReference function) { |
| 6334 __ li(t9, Operand(function)); |
| 6335 this->GenerateCall(masm, t9); |
| 6336 } |
| 6337 |
| 6338 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, |
| 6339 Register target) { |
| 6340 __ Move(t9, target); |
| 6341 __ AssertStackIsAligned(); |
| 6342 // Allocate space for arg slots. |
| 6343 __ Subu(sp, sp, kCArgsSlotsSize); |
| 6344 |
| 6345 // Block the trampoline pool through the whole function to make sure the |
| 6346 // number of generated instructions is constant. |
| 6347 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); |
| 6348 |
| 6349 // We need to get the current 'pc' value, which is not available on MIPS. |
| 6350 Label find_ra; |
| 6351 masm->bal(&find_ra); // ra = pc + 8. |
| 6352 masm->nop(); // Branch delay slot nop. |
| 6353 masm->bind(&find_ra); |
| 6354 |
| 6355 const int kNumInstructionsToJump = 6; |
| 6356 masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize); |
| 6357 // Push return address (accessible to GC through exit frame pc). |
| 6358 // This spot for ra was reserved in EnterExitFrame. |
| 6359 masm->sw(ra, MemOperand(sp, kCArgsSlotsSize)); |
| 6360 masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()), |
| 6361 RelocInfo::CODE_TARGET), true); |
| 6362 // Call the function. |
| 6363 masm->Jump(t9); |
| 6364 // Make sure the stored 'ra' points to this position. |
| 6365 ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra)); |
| 6366 } |
| 6367 |
| 6368 |
| 6369 MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup( |
| 6370 MacroAssembler* masm, |
| 6371 Label* miss, |
| 6372 Label* done, |
| 6373 Register receiver, |
| 6374 Register properties, |
| 6375 String* name, |
| 6376 Register scratch0) { |
| 6377 // If names of slots in range from 1 to kProbes - 1 for the hash value are |
| 6378 // not equal to the name and kProbes-th slot is not used (its name is the |
| 6379 // undefined value), it guarantees the hash table doesn't contain the |
| 6380 // property. It's true even if some slots represent deleted properties |
| 6381 // (their names are the null value). |
| 6382 for (int i = 0; i < kInlinedProbes; i++) { |
| 6383 // scratch0 points to properties hash. |
| 6384 // Compute the masked index: (hash + i + i * i) & mask. |
| 6385 Register index = scratch0; |
| 6386 // Capacity is smi 2^n. |
| 6387 __ lw(index, FieldMemOperand(properties, kCapacityOffset)); |
| 6388 __ Subu(index, index, Operand(1)); |
| 6389 __ And(index, index, Operand( |
| 6390 Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i)))); |
| 6391 |
| 6392 // Scale the index by multiplying by the entry size. |
| 6393 ASSERT(StringDictionary::kEntrySize == 3); |
| 6394 // index *= 3. |
| 6395 __ mov(at, index); |
| 6396 __ sll(index, index, 1); |
| 6397 __ Addu(index, index, at); |
| 6398 |
| 6399 Register entity_name = scratch0; |
| 6400 // Having undefined at this place means the name is not contained. |
| 6401 ASSERT_EQ(kSmiTagSize, 1); |
| 6402 Register tmp = properties; |
| 6403 |
| 6404 __ sll(scratch0, index, 1); |
| 6405 __ Addu(tmp, properties, scratch0); |
| 6406 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); |
| 6407 |
| 6408 ASSERT(!tmp.is(entity_name)); |
| 6409 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); |
| 6410 __ Branch(done, eq, entity_name, Operand(tmp)); |
| 6411 |
| 6412 if (i != kInlinedProbes - 1) { |
| 6413 // Stop if found the property. |
| 6414 __ Branch(miss, eq, entity_name, Operand(Handle<String>(name))); |
| 6415 |
| 6416 // Check if the entry name is not a symbol. |
| 6417 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); |
| 6418 __ lbu(entity_name, |
| 6419 FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); |
| 6420 __ And(scratch0, entity_name, Operand(kIsSymbolMask)); |
| 6421 __ Branch(miss, eq, scratch0, Operand(zero_reg)); |
| 6422 |
| 6423 // Restore the properties. |
| 6424 __ lw(properties, |
| 6425 FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| 6426 } |
| 6427 } |
| 6428 |
| 6429 const int spill_mask = |
| 6430 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() | |
| 6431 a2.bit() | a1.bit() | a0.bit()); |
| 6432 |
| 6433 __ MultiPush(spill_mask); |
| 6434 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| 6435 __ li(a1, Operand(Handle<String>(name))); |
| 6436 StringDictionaryLookupStub stub(NEGATIVE_LOOKUP); |
| 6437 MaybeObject* result = masm->TryCallStub(&stub); |
| 6438 if (result->IsFailure()) return result; |
| 6439 __ MultiPop(spill_mask); |
| 6440 |
| 6441 __ Branch(done, eq, v0, Operand(zero_reg)); |
| 6442 __ Branch(miss, ne, v0, Operand(zero_reg)); |
| 6443 return result; |
| 6444 } |
| 6445 |
| 6446 |
| 6447 // Probe the string dictionary in the |elements| register. Jump to the |
| 6448 // |done| label if a property with the given name is found. Jump to |
| 6449 // the |miss| label otherwise. |
| 6450 // If lookup was successful |scratch2| will be equal to elements + 4 * index. |
803 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, | 6451 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, |
804 Label* miss, | 6452 Label* miss, |
805 Label* done, | 6453 Label* done, |
806 Register elements, | 6454 Register elements, |
807 Register name, | 6455 Register name, |
808 Register scratch1, | 6456 Register scratch1, |
809 Register scratch2) { | 6457 Register scratch2) { |
810 UNIMPLEMENTED_MIPS(); | 6458 // Assert that name contains a string. |
| 6459 if (FLAG_debug_code) __ AbortIfNotString(name); |
| 6460 |
| 6461 // Compute the capacity mask. |
| 6462 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset)); |
| 6463 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int |
| 6464 __ Subu(scratch1, scratch1, Operand(1)); |
| 6465 |
| 6466 // Generate an unrolled loop that performs a few probes before |
| 6467 // giving up. Measurements done on Gmail indicate that 2 probes |
| 6468 // cover ~93% of loads from dictionaries. |
| 6469 for (int i = 0; i < kInlinedProbes; i++) { |
| 6470 // Compute the masked index: (hash + i + i * i) & mask. |
| 6471 __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset)); |
| 6472 if (i > 0) { |
| 6473 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
| 6474 // the hash in a separate instruction. The value hash + i + i * i is right |
| 6475 // shifted in the following and instruction. |
| 6476 ASSERT(StringDictionary::GetProbeOffset(i) < |
| 6477 1 << (32 - String::kHashFieldOffset)); |
| 6478 __ Addu(scratch2, scratch2, Operand( |
| 6479 StringDictionary::GetProbeOffset(i) << String::kHashShift)); |
| 6480 } |
| 6481 __ srl(scratch2, scratch2, String::kHashShift); |
| 6482 __ And(scratch2, scratch1, scratch2); |
| 6483 |
| 6484 // Scale the index by multiplying by the element size. |
| 6485 ASSERT(StringDictionary::kEntrySize == 3); |
| 6486 // scratch2 = scratch2 * 3. |
| 6487 |
| 6488 __ mov(at, scratch2); |
| 6489 __ sll(scratch2, scratch2, 1); |
| 6490 __ Addu(scratch2, scratch2, at); |
| 6491 |
| 6492 // Check if the key is identical to the name. |
| 6493 __ sll(at, scratch2, 2); |
| 6494 __ Addu(scratch2, elements, at); |
| 6495 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset)); |
| 6496 __ Branch(done, eq, name, Operand(at)); |
| 6497 } |
| 6498 |
| 6499 const int spill_mask = |
| 6500 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | |
| 6501 a3.bit() | a2.bit() | a1.bit() | a0.bit()) & |
| 6502 ~(scratch1.bit() | scratch2.bit()); |
| 6503 |
| 6504 __ MultiPush(spill_mask); |
| 6505 __ Move(a0, elements); |
| 6506 __ Move(a1, name); |
| 6507 StringDictionaryLookupStub stub(POSITIVE_LOOKUP); |
| 6508 __ CallStub(&stub); |
| 6509 __ mov(scratch2, a2); |
| 6510 __ MultiPop(spill_mask); |
| 6511 |
| 6512 __ Branch(done, ne, v0, Operand(zero_reg)); |
| 6513 __ Branch(miss, eq, v0, Operand(zero_reg)); |
811 } | 6514 } |
812 | 6515 |
813 | 6516 |
814 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { | 6517 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { |
815 UNIMPLEMENTED_MIPS(); | 6518 // Registers: |
| 6519 // result: StringDictionary to probe |
| 6520 // a1: key |
| 6521 // : StringDictionary to probe. |
| 6522 // index_: will hold an index of entry if lookup is successful. |
| 6523 // might alias with result_. |
| 6524 // Returns: |
| 6525 // result_ is zero if lookup failed, non zero otherwise. |
| 6526 |
| 6527 Register result = v0; |
| 6528 Register dictionary = a0; |
| 6529 Register key = a1; |
| 6530 Register index = a2; |
| 6531 Register mask = a3; |
| 6532 Register hash = t0; |
| 6533 Register undefined = t1; |
| 6534 Register entry_key = t2; |
| 6535 |
| 6536 Label in_dictionary, maybe_in_dictionary, not_in_dictionary; |
| 6537 |
| 6538 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset)); |
| 6539 __ sra(mask, mask, kSmiTagSize); |
| 6540 __ Subu(mask, mask, Operand(1)); |
| 6541 |
| 6542 __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset)); |
| 6543 |
| 6544 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
| 6545 |
| 6546 for (int i = kInlinedProbes; i < kTotalProbes; i++) { |
| 6547 // Compute the masked index: (hash + i + i * i) & mask. |
| 6548 // Capacity is smi 2^n. |
| 6549 if (i > 0) { |
| 6550 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
| 6551 // the hash in a separate instruction. The value hash + i + i * i is right |
| 6552 // shifted in the following and instruction. |
| 6553 ASSERT(StringDictionary::GetProbeOffset(i) < |
| 6554 1 << (32 - String::kHashFieldOffset)); |
| 6555 __ Addu(index, hash, Operand( |
| 6556 StringDictionary::GetProbeOffset(i) << String::kHashShift)); |
| 6557 } else { |
| 6558 __ mov(index, hash); |
| 6559 } |
| 6560 __ srl(index, index, String::kHashShift); |
| 6561 __ And(index, mask, index); |
| 6562 |
| 6563 // Scale the index by multiplying by the entry size. |
| 6564 ASSERT(StringDictionary::kEntrySize == 3); |
| 6565 // index *= 3. |
| 6566 __ mov(at, index); |
| 6567 __ sll(index, index, 1); |
| 6568 __ Addu(index, index, at); |
| 6569 |
| 6570 |
| 6571 ASSERT_EQ(kSmiTagSize, 1); |
| 6572 __ sll(index, index, 2); |
| 6573 __ Addu(index, index, dictionary); |
| 6574 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset)); |
| 6575 |
| 6576 // Having undefined at this place means the name is not contained. |
| 6577 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined)); |
| 6578 |
| 6579 // Stop if found the property. |
| 6580 __ Branch(&in_dictionary, eq, entry_key, Operand(key)); |
| 6581 |
| 6582 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { |
| 6583 // Check if the entry name is not a symbol. |
| 6584 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); |
| 6585 __ lbu(entry_key, |
| 6586 FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); |
| 6587 __ And(result, entry_key, Operand(kIsSymbolMask)); |
| 6588 __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg)); |
| 6589 } |
| 6590 } |
| 6591 |
| 6592 __ bind(&maybe_in_dictionary); |
| 6593 // If we are doing negative lookup then probing failure should be |
| 6594 // treated as a lookup success. For positive lookup probing failure |
| 6595 // should be treated as lookup failure. |
| 6596 if (mode_ == POSITIVE_LOOKUP) { |
| 6597 __ mov(result, zero_reg); |
| 6598 __ Ret(); |
| 6599 } |
| 6600 |
| 6601 __ bind(&in_dictionary); |
| 6602 __ li(result, 1); |
| 6603 __ Ret(); |
| 6604 |
| 6605 __ bind(¬_in_dictionary); |
| 6606 __ mov(result, zero_reg); |
| 6607 __ Ret(); |
816 } | 6608 } |
817 | 6609 |
818 | 6610 |
819 #undef __ | 6611 #undef __ |
820 | 6612 |
821 } } // namespace v8::internal | 6613 } } // namespace v8::internal |
822 | 6614 |
823 #endif // V8_TARGET_ARCH_MIPS | 6615 #endif // V8_TARGET_ARCH_MIPS |
824 | 6616 |
OLD | NEW |