| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 20 matching lines...) Expand all Loading... |
| 31 | 31 |
| 32 #include "bootstrapper.h" | 32 #include "bootstrapper.h" |
| 33 #include "code-stubs.h" | 33 #include "code-stubs.h" |
| 34 #include "regexp-macro-assembler.h" | 34 #include "regexp-macro-assembler.h" |
| 35 #include "stub-cache.h" | 35 #include "stub-cache.h" |
| 36 | 36 |
| 37 namespace v8 { | 37 namespace v8 { |
| 38 namespace internal { | 38 namespace internal { |
| 39 | 39 |
| 40 | 40 |
| 41 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( |
| 42 Isolate* isolate, |
| 43 CodeStubInterfaceDescriptor* descriptor) { |
| 44 static Register registers[] = { r3, r2, r1 }; |
| 45 descriptor->register_param_count_ = 3; |
| 46 descriptor->register_params_ = registers; |
| 47 descriptor->stack_parameter_count_ = NULL; |
| 48 descriptor->deoptimization_handler_ = |
| 49 Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry; |
| 50 } |
| 51 |
| 52 |
| 41 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( | 53 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( |
| 42 Isolate* isolate, | 54 Isolate* isolate, |
| 43 CodeStubInterfaceDescriptor* descriptor) { | 55 CodeStubInterfaceDescriptor* descriptor) { |
| 44 static Register registers[] = { r3, r2, r1, r0 }; | 56 static Register registers[] = { r3, r2, r1, r0 }; |
| 45 descriptor->register_param_count_ = 4; | 57 descriptor->register_param_count_ = 4; |
| 46 descriptor->register_params_ = registers; | 58 descriptor->register_params_ = registers; |
| 47 descriptor->stack_parameter_count_ = NULL; | 59 descriptor->stack_parameter_count_ = NULL; |
| 48 descriptor->deoptimization_handler_ = | 60 descriptor->deoptimization_handler_ = |
| 49 Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; | 61 Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; |
| 50 } | 62 } |
| (...skipping 345 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 396 __ mov(cp, r0); | 408 __ mov(cp, r0); |
| 397 __ add(sp, sp, Operand(2 * kPointerSize)); | 409 __ add(sp, sp, Operand(2 * kPointerSize)); |
| 398 __ Ret(); | 410 __ Ret(); |
| 399 | 411 |
| 400 // Need to collect. Call into runtime system. | 412 // Need to collect. Call into runtime system. |
| 401 __ bind(&gc); | 413 __ bind(&gc); |
| 402 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); | 414 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); |
| 403 } | 415 } |
| 404 | 416 |
| 405 | 417 |
| 406 static void GenerateFastCloneShallowArrayCommon( | |
| 407 MacroAssembler* masm, | |
| 408 int length, | |
| 409 FastCloneShallowArrayStub::Mode mode, | |
| 410 AllocationSiteMode allocation_site_mode, | |
| 411 Label* fail) { | |
| 412 // Registers on entry: | |
| 413 // | |
| 414 // r3: boilerplate literal array. | |
| 415 ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS); | |
| 416 | |
| 417 // All sizes here are multiples of kPointerSize. | |
| 418 int elements_size = 0; | |
| 419 if (length > 0) { | |
| 420 elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS | |
| 421 ? FixedDoubleArray::SizeFor(length) | |
| 422 : FixedArray::SizeFor(length); | |
| 423 } | |
| 424 | |
| 425 int size = JSArray::kSize; | |
| 426 int allocation_info_start = size; | |
| 427 if (allocation_site_mode == TRACK_ALLOCATION_SITE) { | |
| 428 size += AllocationSiteInfo::kSize; | |
| 429 } | |
| 430 size += elements_size; | |
| 431 | |
| 432 // Allocate both the JS array and the elements array in one big | |
| 433 // allocation. This avoids multiple limit checks. | |
| 434 AllocationFlags flags = TAG_OBJECT; | |
| 435 if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) { | |
| 436 flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags); | |
| 437 } | |
| 438 __ Allocate(size, r0, r1, r2, fail, flags); | |
| 439 | |
| 440 if (allocation_site_mode == TRACK_ALLOCATION_SITE) { | |
| 441 __ mov(r2, Operand(Handle<Map>(masm->isolate()->heap()-> | |
| 442 allocation_site_info_map()))); | |
| 443 __ str(r2, FieldMemOperand(r0, allocation_info_start)); | |
| 444 __ str(r3, FieldMemOperand(r0, allocation_info_start + kPointerSize)); | |
| 445 } | |
| 446 | |
| 447 // Copy the JS array part. | |
| 448 for (int i = 0; i < JSArray::kSize; i += kPointerSize) { | |
| 449 if ((i != JSArray::kElementsOffset) || (length == 0)) { | |
| 450 __ ldr(r1, FieldMemOperand(r3, i)); | |
| 451 __ str(r1, FieldMemOperand(r0, i)); | |
| 452 } | |
| 453 } | |
| 454 | |
| 455 if (length > 0) { | |
| 456 // Get hold of the elements array of the boilerplate and setup the | |
| 457 // elements pointer in the resulting object. | |
| 458 __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); | |
| 459 if (allocation_site_mode == TRACK_ALLOCATION_SITE) { | |
| 460 __ add(r2, r0, Operand(JSArray::kSize + AllocationSiteInfo::kSize)); | |
| 461 } else { | |
| 462 __ add(r2, r0, Operand(JSArray::kSize)); | |
| 463 } | |
| 464 __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset)); | |
| 465 | |
| 466 // Copy the elements array. | |
| 467 ASSERT((elements_size % kPointerSize) == 0); | |
| 468 __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize); | |
| 469 } | |
| 470 } | |
| 471 | |
| 472 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { | |
| 473 // Stack layout on entry: | |
| 474 // | |
| 475 // [sp]: constant elements. | |
| 476 // [sp + kPointerSize]: literal index. | |
| 477 // [sp + (2 * kPointerSize)]: literals array. | |
| 478 | |
| 479 // Load boilerplate object into r3 and check if we need to create a | |
| 480 // boilerplate. | |
| 481 Label slow_case; | |
| 482 __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); | |
| 483 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); | |
| 484 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
| 485 __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); | |
| 486 __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); | |
| 487 __ b(eq, &slow_case); | |
| 488 | |
| 489 FastCloneShallowArrayStub::Mode mode = mode_; | |
| 490 if (mode == CLONE_ANY_ELEMENTS) { | |
| 491 Label double_elements, check_fast_elements; | |
| 492 __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset)); | |
| 493 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); | |
| 494 __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex); | |
| 495 __ b(ne, &check_fast_elements); | |
| 496 GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS, | |
| 497 allocation_site_mode_, | |
| 498 &slow_case); | |
| 499 // Return and remove the on-stack parameters. | |
| 500 __ add(sp, sp, Operand(3 * kPointerSize)); | |
| 501 __ Ret(); | |
| 502 | |
| 503 __ bind(&check_fast_elements); | |
| 504 __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); | |
| 505 __ b(ne, &double_elements); | |
| 506 GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS, | |
| 507 allocation_site_mode_, | |
| 508 &slow_case); | |
| 509 // Return and remove the on-stack parameters. | |
| 510 __ add(sp, sp, Operand(3 * kPointerSize)); | |
| 511 __ Ret(); | |
| 512 | |
| 513 __ bind(&double_elements); | |
| 514 mode = CLONE_DOUBLE_ELEMENTS; | |
| 515 // Fall through to generate the code to handle double elements. | |
| 516 } | |
| 517 | |
| 518 if (FLAG_debug_code) { | |
| 519 const char* message; | |
| 520 Heap::RootListIndex expected_map_index; | |
| 521 if (mode == CLONE_ELEMENTS) { | |
| 522 message = "Expected (writable) fixed array"; | |
| 523 expected_map_index = Heap::kFixedArrayMapRootIndex; | |
| 524 } else if (mode == CLONE_DOUBLE_ELEMENTS) { | |
| 525 message = "Expected (writable) fixed double array"; | |
| 526 expected_map_index = Heap::kFixedDoubleArrayMapRootIndex; | |
| 527 } else { | |
| 528 ASSERT(mode == COPY_ON_WRITE_ELEMENTS); | |
| 529 message = "Expected copy-on-write fixed array"; | |
| 530 expected_map_index = Heap::kFixedCOWArrayMapRootIndex; | |
| 531 } | |
| 532 __ push(r3); | |
| 533 __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); | |
| 534 __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); | |
| 535 __ CompareRoot(r3, expected_map_index); | |
| 536 __ Assert(eq, message); | |
| 537 __ pop(r3); | |
| 538 } | |
| 539 | |
| 540 GenerateFastCloneShallowArrayCommon(masm, length_, mode, | |
| 541 allocation_site_mode_, | |
| 542 &slow_case); | |
| 543 | |
| 544 // Return and remove the on-stack parameters. | |
| 545 __ add(sp, sp, Operand(3 * kPointerSize)); | |
| 546 __ Ret(); | |
| 547 | |
| 548 __ bind(&slow_case); | |
| 549 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); | |
| 550 } | |
| 551 | |
| 552 | |
| 553 // Takes a Smi and converts to an IEEE 64 bit floating point value in two | 418 // Takes a Smi and converts to an IEEE 64 bit floating point value in two |
| 554 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and | 419 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and |
| 555 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a | 420 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a |
| 556 // scratch register. Destroys the source register. No GC occurs during this | 421 // scratch register. Destroys the source register. No GC occurs during this |
| 557 // stub so you don't have to set up the frame. | 422 // stub so you don't have to set up the frame. |
| 558 class ConvertToDoubleStub : public PlatformCodeStub { | 423 class ConvertToDoubleStub : public PlatformCodeStub { |
| 559 public: | 424 public: |
| 560 ConvertToDoubleStub(Register result_reg_1, | 425 ConvertToDoubleStub(Register result_reg_1, |
| 561 Register result_reg_2, | 426 Register result_reg_2, |
| 562 Register source_reg, | 427 Register source_reg, |
| (...skipping 3309 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3872 bool CEntryStub::IsPregenerated() { | 3737 bool CEntryStub::IsPregenerated() { |
| 3873 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && | 3738 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && |
| 3874 result_size_ == 1; | 3739 result_size_ == 1; |
| 3875 } | 3740 } |
| 3876 | 3741 |
| 3877 | 3742 |
| 3878 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { | 3743 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { |
| 3879 CEntryStub::GenerateAheadOfTime(isolate); | 3744 CEntryStub::GenerateAheadOfTime(isolate); |
| 3880 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); | 3745 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); |
| 3881 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); | 3746 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); |
| 3747 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); |
| 3882 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); | 3748 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); |
| 3883 } | 3749 } |
| 3884 | 3750 |
| 3885 | 3751 |
| 3886 void CodeStub::GenerateFPStubs(Isolate* isolate) { | 3752 void CodeStub::GenerateFPStubs(Isolate* isolate) { |
| 3887 SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2) | 3753 SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2) |
| 3888 ? kSaveFPRegs | 3754 ? kSaveFPRegs |
| 3889 : kDontSaveFPRegs; | 3755 : kDontSaveFPRegs; |
| 3890 CEntryStub save_doubles(1, mode); | 3756 CEntryStub save_doubles(1, mode); |
| 3891 StoreBufferOverflowStub stub(mode); | 3757 StoreBufferOverflowStub stub(mode); |
| 3892 // These stubs might already be in the snapshot, detect that and don't | 3758 // These stubs might already be in the snapshot, detect that and don't |
| 3893 // regenerate, which would lead to code stub initialization state being messed | 3759 // regenerate, which would lead to code stub initialization state being messed |
| 3894 // up. | 3760 // up. |
| 3895 Code* save_doubles_code; | 3761 Code* save_doubles_code; |
| 3896 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { | 3762 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { |
| 3897 save_doubles_code = *save_doubles.GetCode(isolate); | 3763 save_doubles_code = *save_doubles.GetCode(isolate); |
| 3898 save_doubles_code->set_is_pregenerated(true); | |
| 3899 | |
| 3900 Code* store_buffer_overflow_code = *stub.GetCode(isolate); | |
| 3901 store_buffer_overflow_code->set_is_pregenerated(true); | |
| 3902 } | 3764 } |
| 3765 Code* store_buffer_overflow_code; |
| 3766 if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) { |
| 3767 store_buffer_overflow_code = *stub.GetCode(isolate); |
| 3768 } |
| 3769 save_doubles_code->set_is_pregenerated(true); |
| 3770 store_buffer_overflow_code->set_is_pregenerated(true); |
| 3903 isolate->set_fp_stubs_generated(true); | 3771 isolate->set_fp_stubs_generated(true); |
| 3904 } | 3772 } |
| 3905 | 3773 |
| 3906 | 3774 |
| 3907 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { | 3775 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { |
| 3908 CEntryStub stub(1, kDontSaveFPRegs); | 3776 CEntryStub stub(1, kDontSaveFPRegs); |
| 3909 Handle<Code> code = stub.GetCode(isolate); | 3777 Handle<Code> code = stub.GetCode(isolate); |
| 3910 code->set_is_pregenerated(true); | 3778 code->set_is_pregenerated(true); |
| 3911 } | 3779 } |
| 3912 | 3780 |
| (...skipping 3742 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7655 address_.is(entry->address) && | 7523 address_.is(entry->address) && |
| 7656 remembered_set_action_ == entry->action && | 7524 remembered_set_action_ == entry->action && |
| 7657 save_fp_regs_mode_ == kDontSaveFPRegs) { | 7525 save_fp_regs_mode_ == kDontSaveFPRegs) { |
| 7658 return true; | 7526 return true; |
| 7659 } | 7527 } |
| 7660 } | 7528 } |
| 7661 return false; | 7529 return false; |
| 7662 } | 7530 } |
| 7663 | 7531 |
| 7664 | 7532 |
| 7665 bool StoreBufferOverflowStub::IsPregenerated() { | |
| 7666 return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated(); | |
| 7667 } | |
| 7668 | |
| 7669 | |
| 7670 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( | 7533 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( |
| 7671 Isolate* isolate) { | 7534 Isolate* isolate) { |
| 7672 StoreBufferOverflowStub stub1(kDontSaveFPRegs); | 7535 StoreBufferOverflowStub stub1(kDontSaveFPRegs); |
| 7673 stub1.GetCode(isolate)->set_is_pregenerated(true); | 7536 stub1.GetCode(isolate)->set_is_pregenerated(true); |
| 7674 } | 7537 } |
| 7675 | 7538 |
| 7676 | 7539 |
| 7677 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) { | 7540 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) { |
| 7678 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; | 7541 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; |
| 7679 !entry->object.is(no_reg); | 7542 !entry->object.is(no_reg); |
| (...skipping 268 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7948 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); | 7811 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); |
| 7949 __ StoreNumberToDoubleElements(r0, r3, | 7812 __ StoreNumberToDoubleElements(r0, r3, |
| 7950 // Overwrites all regs after this. | 7813 // Overwrites all regs after this. |
| 7951 r5, r6, r7, r9, r2, | 7814 r5, r6, r7, r9, r2, |
| 7952 &slow_elements); | 7815 &slow_elements); |
| 7953 __ Ret(); | 7816 __ Ret(); |
| 7954 } | 7817 } |
| 7955 | 7818 |
| 7956 | 7819 |
| 7957 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { | 7820 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { |
| 7958 ASSERT(!Serializer::enabled()); | 7821 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); |
| 7959 bool save_fp_regs = CpuFeatures::IsSupported(VFP2); | |
| 7960 CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs); | |
| 7961 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | 7822 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
| 7962 int parameter_count_offset = | 7823 int parameter_count_offset = |
| 7963 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; | 7824 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; |
| 7964 __ ldr(r1, MemOperand(fp, parameter_count_offset)); | 7825 __ ldr(r1, MemOperand(fp, parameter_count_offset)); |
| 7965 if (function_mode_ == JS_FUNCTION_STUB_MODE) { | 7826 if (function_mode_ == JS_FUNCTION_STUB_MODE) { |
| 7966 __ add(r1, r1, Operand(1)); | 7827 __ add(r1, r1, Operand(1)); |
| 7967 } | 7828 } |
| 7968 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); | 7829 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); |
| 7969 __ mov(r1, Operand(r1, LSL, kPointerSizeLog2)); | 7830 __ mov(r1, Operand(r1, LSL, kPointerSizeLog2)); |
| 7970 __ add(sp, sp, r1); | 7831 __ add(sp, sp, r1); |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 8029 | 7890 |
| 8030 __ Pop(lr, r5, r1); | 7891 __ Pop(lr, r5, r1); |
| 8031 __ Ret(); | 7892 __ Ret(); |
| 8032 } | 7893 } |
| 8033 | 7894 |
| 8034 #undef __ | 7895 #undef __ |
| 8035 | 7896 |
| 8036 } } // namespace v8::internal | 7897 } } // namespace v8::internal |
| 8037 | 7898 |
| 8038 #endif // V8_TARGET_ARCH_ARM | 7899 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |