Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(867)

Side by Side Diff: src/mips/code-stubs-mips.cc

Issue 13529018: MIPS: Compile FastCloneShallowArrayStub using Crankshaft. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/code-stubs-mips.h ('k') | src/mips/full-codegen-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 21 matching lines...) Expand all
32 #include "bootstrapper.h" 32 #include "bootstrapper.h"
33 #include "code-stubs.h" 33 #include "code-stubs.h"
34 #include "codegen.h" 34 #include "codegen.h"
35 #include "regexp-macro-assembler.h" 35 #include "regexp-macro-assembler.h"
36 #include "stub-cache.h" 36 #include "stub-cache.h"
37 37
38 namespace v8 { 38 namespace v8 {
39 namespace internal { 39 namespace internal {
40 40
41 41
42 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
43 Isolate* isolate,
44 CodeStubInterfaceDescriptor* descriptor) {
45 static Register registers[] = { a3, a2, a1 };
46 descriptor->register_param_count_ = 3;
47 descriptor->register_params_ = registers;
48 descriptor->stack_parameter_count_ = NULL;
49 descriptor->deoptimization_handler_ =
50 Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
51 }
52
53
42 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( 54 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
43 Isolate* isolate, 55 Isolate* isolate,
44 CodeStubInterfaceDescriptor* descriptor) { 56 CodeStubInterfaceDescriptor* descriptor) {
45 static Register registers[] = { a3, a2, a1, a0 }; 57 static Register registers[] = { a3, a2, a1, a0 };
46 descriptor->register_param_count_ = 4; 58 descriptor->register_param_count_ = 4;
47 descriptor->register_params_ = registers; 59 descriptor->register_params_ = registers;
48 descriptor->stack_parameter_count_ = NULL; 60 descriptor->stack_parameter_count_ = NULL;
49 descriptor->deoptimization_handler_ = 61 descriptor->deoptimization_handler_ =
50 Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; 62 Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
51 } 63 }
(...skipping 343 matching lines...) Expand 10 before | Expand all | Expand 10 after
395 // Remove the on-stack argument and return. 407 // Remove the on-stack argument and return.
396 __ mov(cp, v0); 408 __ mov(cp, v0);
397 __ DropAndRet(2); 409 __ DropAndRet(2);
398 410
399 // Need to collect. Call into runtime system. 411 // Need to collect. Call into runtime system.
400 __ bind(&gc); 412 __ bind(&gc);
401 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); 413 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
402 } 414 }
403 415
404 416
405 static void GenerateFastCloneShallowArrayCommon(
406 MacroAssembler* masm,
407 int length,
408 FastCloneShallowArrayStub::Mode mode,
409 AllocationSiteMode allocation_site_mode,
410 Label* fail) {
411 // Registers on entry:
412 // a3: boilerplate literal array.
413 ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
414
415 // All sizes here are multiples of kPointerSize.
416 int elements_size = 0;
417 if (length > 0) {
418 elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
419 ? FixedDoubleArray::SizeFor(length)
420 : FixedArray::SizeFor(length);
421 }
422
423 int size = JSArray::kSize;
424 int allocation_info_start = size;
425 if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
426 size += AllocationSiteInfo::kSize;
427 }
428 size += elements_size;
429
430 // Allocate both the JS array and the elements array in one big
431 // allocation. This avoids multiple limit checks.
432 __ Allocate(size, v0, a1, a2, fail, TAG_OBJECT);
433
434 if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
435 __ li(a2, Operand(Handle<Map>(masm->isolate()->heap()->
436 allocation_site_info_map())));
437 __ sw(a2, FieldMemOperand(v0, allocation_info_start));
438 __ sw(a3, FieldMemOperand(v0, allocation_info_start + kPointerSize));
439 }
440
441 // Copy the JS array part.
442 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
443 if ((i != JSArray::kElementsOffset) || (length == 0)) {
444 __ lw(a1, FieldMemOperand(a3, i));
445 __ sw(a1, FieldMemOperand(v0, i));
446 }
447 }
448
449 if (length > 0) {
450 // Get hold of the elements array of the boilerplate and setup the
451 // elements pointer in the resulting object.
452 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
453 if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
454 __ Addu(a2, v0, Operand(JSArray::kSize + AllocationSiteInfo::kSize));
455 } else {
456 __ Addu(a2, v0, Operand(JSArray::kSize));
457 }
458 __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
459
460 // Copy the elements array.
461 ASSERT((elements_size % kPointerSize) == 0);
462 __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
463 }
464 }
465
466 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
467 // Stack layout on entry:
468 //
469 // [sp]: constant elements.
470 // [sp + kPointerSize]: literal index.
471 // [sp + (2 * kPointerSize)]: literals array.
472
473 // Load boilerplate object into r3 and check if we need to create a
474 // boilerplate.
475 Label slow_case;
476 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
477 __ lw(a0, MemOperand(sp, 1 * kPointerSize));
478 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
479 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
480 __ Addu(t0, a3, t0);
481 __ lw(a3, MemOperand(t0));
482 __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
483 __ Branch(&slow_case, eq, a3, Operand(t1));
484
485 FastCloneShallowArrayStub::Mode mode = mode_;
486 if (mode == CLONE_ANY_ELEMENTS) {
487 Label double_elements, check_fast_elements;
488 __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
489 __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
490 __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
491 __ Branch(&check_fast_elements, ne, v0, Operand(t1));
492 GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
493 allocation_site_mode_,
494 &slow_case);
495 // Return and remove the on-stack parameters.
496 __ DropAndRet(3);
497
498 __ bind(&check_fast_elements);
499 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
500 __ Branch(&double_elements, ne, v0, Operand(t1));
501 GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
502 allocation_site_mode_,
503 &slow_case);
504 // Return and remove the on-stack parameters.
505 __ DropAndRet(3);
506
507 __ bind(&double_elements);
508 mode = CLONE_DOUBLE_ELEMENTS;
509 // Fall through to generate the code to handle double elements.
510 }
511
512 if (FLAG_debug_code) {
513 const char* message;
514 Heap::RootListIndex expected_map_index;
515 if (mode == CLONE_ELEMENTS) {
516 message = "Expected (writable) fixed array";
517 expected_map_index = Heap::kFixedArrayMapRootIndex;
518 } else if (mode == CLONE_DOUBLE_ELEMENTS) {
519 message = "Expected (writable) fixed double array";
520 expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
521 } else {
522 ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
523 message = "Expected copy-on-write fixed array";
524 expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
525 }
526 __ push(a3);
527 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
528 __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
529 __ LoadRoot(at, expected_map_index);
530 __ Assert(eq, message, a3, Operand(at));
531 __ pop(a3);
532 }
533
534 GenerateFastCloneShallowArrayCommon(masm, length_, mode,
535 allocation_site_mode_,
536 &slow_case);
537
538 // Return and remove the on-stack parameters.
539 __ DropAndRet(3);
540
541 __ bind(&slow_case);
542 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
543 }
544
545
546 // Takes a Smi and converts to an IEEE 64 bit floating point value in two 417 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
547 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and 418 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
548 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a 419 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
549 // scratch register. Destroys the source register. No GC occurs during this 420 // scratch register. Destroys the source register. No GC occurs during this
550 // stub so you don't have to set up the frame. 421 // stub so you don't have to set up the frame.
551 class ConvertToDoubleStub : public PlatformCodeStub { 422 class ConvertToDoubleStub : public PlatformCodeStub {
552 public: 423 public:
553 ConvertToDoubleStub(Register result_reg_1, 424 ConvertToDoubleStub(Register result_reg_1,
554 Register result_reg_2, 425 Register result_reg_2,
555 Register source_reg, 426 Register source_reg,
(...skipping 3387 matching lines...) Expand 10 before | Expand all | Expand 10 after
3943 bool CEntryStub::IsPregenerated() { 3814 bool CEntryStub::IsPregenerated() {
3944 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && 3815 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
3945 result_size_ == 1; 3816 result_size_ == 1;
3946 } 3817 }
3947 3818
3948 3819
3949 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { 3820 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
3950 CEntryStub::GenerateAheadOfTime(isolate); 3821 CEntryStub::GenerateAheadOfTime(isolate);
3951 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); 3822 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
3952 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); 3823 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
3824 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
3953 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); 3825 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
3954 } 3826 }
3955 3827
3956 3828
3957 void CodeStub::GenerateFPStubs(Isolate* isolate) { 3829 void CodeStub::GenerateFPStubs(Isolate* isolate) {
3958 SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU) 3830 SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
3959 ? kSaveFPRegs 3831 ? kSaveFPRegs
3960 : kDontSaveFPRegs; 3832 : kDontSaveFPRegs;
3961 CEntryStub save_doubles(1, mode); 3833 CEntryStub save_doubles(1, mode);
3962 StoreBufferOverflowStub stub(mode); 3834 StoreBufferOverflowStub stub(mode);
3963 // These stubs might already be in the snapshot, detect that and don't 3835 // These stubs might already be in the snapshot, detect that and don't
3964 // regenerate, which would lead to code stub initialization state being messed 3836 // regenerate, which would lead to code stub initialization state being messed
3965 // up. 3837 // up.
3966 Code* save_doubles_code; 3838 Code* save_doubles_code;
3967 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { 3839 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
3968 save_doubles_code = *save_doubles.GetCode(isolate); 3840 save_doubles_code = *save_doubles.GetCode(isolate);
3969 save_doubles_code->set_is_pregenerated(true);
3970
3971 Code* store_buffer_overflow_code = *stub.GetCode(isolate);
3972 store_buffer_overflow_code->set_is_pregenerated(true);
3973 } 3841 }
3842 Code* store_buffer_overflow_code;
3843 if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
3844 store_buffer_overflow_code = *stub.GetCode(isolate);
3845 }
3846 save_doubles_code->set_is_pregenerated(true);
3847 store_buffer_overflow_code->set_is_pregenerated(true);
3974 isolate->set_fp_stubs_generated(true); 3848 isolate->set_fp_stubs_generated(true);
3975 } 3849 }
3976 3850
3977 3851
3978 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { 3852 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
3979 CEntryStub stub(1, kDontSaveFPRegs); 3853 CEntryStub stub(1, kDontSaveFPRegs);
3980 Handle<Code> code = stub.GetCode(isolate); 3854 Handle<Code> code = stub.GetCode(isolate);
3981 code->set_is_pregenerated(true); 3855 code->set_is_pregenerated(true);
3982 } 3856 }
3983 3857
(...skipping 3820 matching lines...) Expand 10 before | Expand all | Expand 10 after
7804 address_.is(entry->address) && 7678 address_.is(entry->address) &&
7805 remembered_set_action_ == entry->action && 7679 remembered_set_action_ == entry->action &&
7806 save_fp_regs_mode_ == kDontSaveFPRegs) { 7680 save_fp_regs_mode_ == kDontSaveFPRegs) {
7807 return true; 7681 return true;
7808 } 7682 }
7809 } 7683 }
7810 return false; 7684 return false;
7811 } 7685 }
7812 7686
7813 7687
7814 bool StoreBufferOverflowStub::IsPregenerated() {
7815 return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
7816 }
7817
7818
7819 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( 7688 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
7820 Isolate* isolate) { 7689 Isolate* isolate) {
7821 StoreBufferOverflowStub stub1(kDontSaveFPRegs); 7690 StoreBufferOverflowStub stub1(kDontSaveFPRegs);
7822 stub1.GetCode(isolate)->set_is_pregenerated(true); 7691 stub1.GetCode(isolate)->set_is_pregenerated(true);
7823 } 7692 }
7824 7693
7825 7694
7826 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) { 7695 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
7827 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; 7696 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7828 !entry->object.is(no_reg); 7697 !entry->object.is(no_reg);
(...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after
8099 __ StoreNumberToDoubleElements(a0, a3, 7968 __ StoreNumberToDoubleElements(a0, a3,
8100 // Overwrites all regs after this. 7969 // Overwrites all regs after this.
8101 t1, t2, t3, t5, a2, 7970 t1, t2, t3, t5, a2,
8102 &slow_elements); 7971 &slow_elements);
8103 __ Ret(USE_DELAY_SLOT); 7972 __ Ret(USE_DELAY_SLOT);
8104 __ mov(v0, a0); 7973 __ mov(v0, a0);
8105 } 7974 }
8106 7975
8107 7976
8108 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { 7977 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
8109 ASSERT(!Serializer::enabled()); 7978 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
8110 bool save_fp_regs = CpuFeatures::IsSupported(FPU);
8111 CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
8112 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); 7979 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
8113 int parameter_count_offset = 7980 int parameter_count_offset =
8114 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; 7981 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
8115 __ lw(a1, MemOperand(fp, parameter_count_offset)); 7982 __ lw(a1, MemOperand(fp, parameter_count_offset));
8116 if (function_mode_ == JS_FUNCTION_STUB_MODE) { 7983 if (function_mode_ == JS_FUNCTION_STUB_MODE) {
8117 __ Addu(a1, a1, Operand(1)); 7984 __ Addu(a1, a1, Operand(1));
8118 } 7985 }
8119 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); 7986 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
8120 __ sll(a1, a1, kPointerSizeLog2); 7987 __ sll(a1, a1, kPointerSizeLog2);
8121 __ Addu(sp, sp, a1); 7988 __ Addu(sp, sp, a1);
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
8181 __ Pop(ra, t1, a1); 8048 __ Pop(ra, t1, a1);
8182 __ Ret(); 8049 __ Ret();
8183 } 8050 }
8184 8051
8185 8052
8186 #undef __ 8053 #undef __
8187 8054
8188 } } // namespace v8::internal 8055 } } // namespace v8::internal
8189 8056
8190 #endif // V8_TARGET_ARCH_MIPS 8057 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/code-stubs-mips.h ('k') | src/mips/full-codegen-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698