OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 4213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4224 | 4224 |
4225 __ bind(&miss_force_generic); | 4225 __ bind(&miss_force_generic); |
4226 Code* stub = masm->isolate()->builtins()->builtin( | 4226 Code* stub = masm->isolate()->builtins()->builtin( |
4227 Builtins::kKeyedLoadIC_MissForceGeneric); | 4227 Builtins::kKeyedLoadIC_MissForceGeneric); |
4228 __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET); | 4228 __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET); |
4229 } | 4229 } |
4230 | 4230 |
4231 | 4231 |
4232 void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( | 4232 void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( |
4233 MacroAssembler* masm) { | 4233 MacroAssembler* masm) { |
4234 UNIMPLEMENTED(); | 4234 // ----------- S t a t e ------------- |
| 4235 // -- ra : return address |
| 4236 // -- a0 : key |
| 4237 // -- a1 : receiver |
| 4238 // ----------------------------------- |
| 4239 Label miss_force_generic, slow_allocate_heapnumber; |
| 4240 |
| 4241 Register key_reg = a0; |
| 4242 Register receiver_reg = a1; |
| 4243 Register elements_reg = a2; |
| 4244 Register heap_number_reg = a2; |
| 4245 Register indexed_double_offset = a3; |
| 4246 Register scratch = t0; |
| 4247 Register scratch2 = t1; |
| 4248 Register scratch3 = t2; |
| 4249 Register heap_number_map = t3; |
| 4250 |
| 4251 // This stub is meant to be tail-jumped to, the receiver must already |
| 4252 // have been verified by the caller to not be a smi. |
| 4253 |
| 4254 // Check that the key is a smi. |
| 4255 __ JumpIfNotSmi(key_reg, &miss_force_generic); |
| 4256 |
| 4257 // Get the elements array. |
| 4258 __ lw(elements_reg, |
| 4259 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); |
| 4260 |
| 4261 // Check that the key is within bounds. |
| 4262 __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); |
| 4263 __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch)); |
| 4264 |
| 4265 // Load the upper word of the double in the fixed array and test for NaN. |
| 4266 __ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize); |
| 4267 __ Addu(indexed_double_offset, elements_reg, Operand(scratch2)); |
| 4268 uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32); |
| 4269 __ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset)); |
| 4270 __ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32)); |
| 4271 |
| 4272 // Non-NaN. Allocate a new heap number and copy the double value into it. |
| 4273 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 4274 __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3, |
| 4275 heap_number_map, &slow_allocate_heapnumber); |
| 4276 |
| 4277 // Don't need to reload the upper 32 bits of the double, it's already in |
| 4278 // scratch. |
| 4279 __ sw(scratch, FieldMemOperand(heap_number_reg, |
| 4280 HeapNumber::kExponentOffset)); |
| 4281 __ lw(scratch, FieldMemOperand(indexed_double_offset, |
| 4282 FixedArray::kHeaderSize)); |
| 4283 __ sw(scratch, FieldMemOperand(heap_number_reg, |
| 4284 HeapNumber::kMantissaOffset)); |
| 4285 |
| 4286 __ mov(v0, heap_number_reg); |
| 4287 __ Ret(); |
| 4288 |
| 4289 __ bind(&slow_allocate_heapnumber); |
| 4290 Handle<Code> slow_ic = |
| 4291 masm->isolate()->builtins()->KeyedLoadIC_Slow(); |
| 4292 __ Jump(slow_ic, RelocInfo::CODE_TARGET); |
| 4293 |
| 4294 __ bind(&miss_force_generic); |
| 4295 Handle<Code> miss_ic = |
| 4296 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); |
| 4297 __ Jump(miss_ic, RelocInfo::CODE_TARGET); |
4235 } | 4298 } |
4236 | 4299 |
4237 | 4300 |
4238 void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, | 4301 void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, |
4239 bool is_js_array) { | 4302 bool is_js_array) { |
4240 // ----------- S t a t e ------------- | 4303 // ----------- S t a t e ------------- |
4241 // -- a0 : value | 4304 // -- a0 : value |
4242 // -- a1 : key | 4305 // -- a1 : key |
4243 // -- a2 : receiver | 4306 // -- a2 : receiver |
4244 // -- ra : return address | 4307 // -- ra : return address |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4294 __ bind(&miss_force_generic); | 4357 __ bind(&miss_force_generic); |
4295 Handle<Code> ic = | 4358 Handle<Code> ic = |
4296 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); | 4359 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); |
4297 __ Jump(ic, RelocInfo::CODE_TARGET); | 4360 __ Jump(ic, RelocInfo::CODE_TARGET); |
4298 } | 4361 } |
4299 | 4362 |
4300 | 4363 |
4301 void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( | 4364 void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( |
4302 MacroAssembler* masm, | 4365 MacroAssembler* masm, |
4303 bool is_js_array) { | 4366 bool is_js_array) { |
4304 UNIMPLEMENTED(); | 4367 // ----------- S t a t e ------------- |
| 4368 // -- a0 : value |
| 4369 // -- a1 : key |
| 4370 // -- a2 : receiver |
| 4371 // -- ra : return address |
| 4372 // -- a3 : scratch |
| 4373 // -- t0 : scratch (elements_reg) |
| 4374 // -- t1 : scratch (mantissa_reg) |
| 4375 // -- t2 : scratch (exponent_reg) |
| 4376 // -- t3 : scratch4 |
| 4377 // ----------------------------------- |
| 4378 Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value; |
| 4379 |
| 4380 Register value_reg = a0; |
| 4381 Register key_reg = a1; |
| 4382 Register receiver_reg = a2; |
| 4383 Register scratch = a3; |
| 4384 Register elements_reg = t0; |
| 4385 Register mantissa_reg = t1; |
| 4386 Register exponent_reg = t2; |
| 4387 Register scratch4 = t3; |
| 4388 |
| 4389 // This stub is meant to be tail-jumped to, the receiver must already |
| 4390 // have been verified by the caller to not be a smi. |
| 4391 __ JumpIfNotSmi(key_reg, &miss_force_generic); |
| 4392 |
| 4393 __ lw(elements_reg, |
| 4394 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); |
| 4395 |
| 4396 // Check that the key is within bounds. |
| 4397 if (is_js_array) { |
| 4398 __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); |
| 4399 } else { |
| 4400 __ lw(scratch, |
| 4401 FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); |
| 4402 } |
| 4403 // Compare smis, unsigned compare catches both negative and out-of-bound |
| 4404 // indexes. |
| 4405 __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch)); |
| 4406 |
| 4407 // Handle smi values specially. |
| 4408 __ JumpIfSmi(value_reg, &smi_value); |
| 4409 |
| 4410 // Ensure that the object is a heap number |
| 4411 __ CheckMap(value_reg, |
| 4412 scratch, |
| 4413 masm->isolate()->factory()->heap_number_map(), |
| 4414 &miss_force_generic, |
| 4415 DONT_DO_SMI_CHECK); |
| 4416 |
| 4417 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 |
| 4418 // in the exponent. |
| 4419 __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32)); |
| 4420 __ lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); |
| 4421 __ Branch(&maybe_nan, ge, exponent_reg, Operand(scratch)); |
| 4422 |
| 4423 __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); |
| 4424 |
| 4425 __ bind(&have_double_value); |
| 4426 __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize); |
| 4427 __ Addu(scratch, elements_reg, Operand(scratch4)); |
| 4428 __ sw(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize)); |
| 4429 uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); |
| 4430 __ sw(exponent_reg, FieldMemOperand(scratch, offset)); |
| 4431 __ Ret(); |
| 4432 |
| 4433 __ bind(&maybe_nan); |
| 4434 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise |
| 4435 // it's an Infinity, and the non-NaN code path applies. |
| 4436 __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32)); |
| 4437 __ Branch(&is_nan, gt, exponent_reg, Operand(scratch)); |
| 4438 __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); |
| 4439 __ Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg)); |
| 4440 |
| 4441 __ bind(&is_nan); |
| 4442 // Load canonical NaN for storing into the double array. |
| 4443 uint64_t nan_int64 = BitCast<uint64_t>( |
| 4444 FixedDoubleArray::canonical_not_the_hole_nan_as_double()); |
| 4445 __ li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64))); |
| 4446 __ li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32))); |
| 4447 __ jmp(&have_double_value); |
| 4448 |
| 4449 __ bind(&smi_value); |
| 4450 __ Addu(scratch, elements_reg, |
| 4451 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
| 4452 __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize); |
| 4453 __ Addu(scratch, scratch, scratch4); |
| 4454 // scratch is now effective address of the double element |
| 4455 |
| 4456 FloatingPointHelper::Destination destination; |
| 4457 if (CpuFeatures::IsSupported(FPU)) { |
| 4458 destination = FloatingPointHelper::kFPURegisters; |
| 4459 } else { |
| 4460 destination = FloatingPointHelper::kCoreRegisters; |
| 4461 } |
| 4462 __ SmiUntag(value_reg, value_reg); |
| 4463 FloatingPointHelper::ConvertIntToDouble( |
| 4464 masm, value_reg, destination, |
| 4465 f0, mantissa_reg, exponent_reg, // These are: double_dst, dst1, dst2. |
| 4466 scratch4, f2); // These are: scratch2, single_scratch. |
| 4467 if (destination == FloatingPointHelper::kFPURegisters) { |
| 4468 CpuFeatures::Scope scope(FPU); |
| 4469 __ sdc1(f0, MemOperand(scratch, 0)); |
| 4470 } else { |
| 4471 __ sw(mantissa_reg, MemOperand(scratch, 0)); |
| 4472 __ sw(exponent_reg, MemOperand(scratch, Register::kSizeInBytes)); |
| 4473 } |
| 4474 __ Ret(); |
| 4475 |
| 4476 // Handle store cache miss, replacing the ic with the generic stub. |
| 4477 __ bind(&miss_force_generic); |
| 4478 Handle<Code> ic = |
| 4479 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); |
| 4480 __ Jump(ic, RelocInfo::CODE_TARGET); |
4305 } | 4481 } |
4306 | 4482 |
4307 | 4483 |
4308 #undef __ | 4484 #undef __ |
4309 | 4485 |
4310 } } // namespace v8::internal | 4486 } } // namespace v8::internal |
4311 | 4487 |
4312 #endif // V8_TARGET_ARCH_MIPS | 4488 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |