Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(410)

Side by Side Diff: src/mips/code-stubs-mips.cc

Issue 1480003002: [runtime] Replace global object link with native context link in all contexts. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Add patch from Orion for interpreter cementation test. Disable obsolete/invalid tests. Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mips/builtins-mips.cc ('k') | src/mips/macro-assembler-mips.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if V8_TARGET_ARCH_MIPS 5 #if V8_TARGET_ARCH_MIPS
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/bootstrapper.h" 8 #include "src/bootstrapper.h"
9 #include "src/code-stubs.h" 9 #include "src/code-stubs.h"
10 #include "src/codegen.h" 10 #include "src/codegen.h"
(...skipping 1680 matching lines...) Expand 10 before | Expand all | Expand 10 after
1691 __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT); 1691 __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT);
1692 1692
1693 // v0 = address of new object(s) (tagged) 1693 // v0 = address of new object(s) (tagged)
1694 // a2 = argument count (smi-tagged) 1694 // a2 = argument count (smi-tagged)
1695 // Get the arguments boilerplate from the current native context into t0. 1695 // Get the arguments boilerplate from the current native context into t0.
1696 const int kNormalOffset = 1696 const int kNormalOffset =
1697 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX); 1697 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
1698 const int kAliasedOffset = 1698 const int kAliasedOffset =
1699 Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX); 1699 Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
1700 1700
1701 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 1701 __ lw(t0, NativeContextMemOperand());
1702 __ lw(t0, FieldMemOperand(t0, JSGlobalObject::kNativeContextOffset));
1703 Label skip2_ne, skip2_eq; 1702 Label skip2_ne, skip2_eq;
1704 __ Branch(&skip2_ne, ne, t2, Operand(zero_reg)); 1703 __ Branch(&skip2_ne, ne, t2, Operand(zero_reg));
1705 __ lw(t0, MemOperand(t0, kNormalOffset)); 1704 __ lw(t0, MemOperand(t0, kNormalOffset));
1706 __ bind(&skip2_ne); 1705 __ bind(&skip2_ne);
1707 1706
1708 __ Branch(&skip2_eq, eq, t2, Operand(zero_reg)); 1707 __ Branch(&skip2_eq, eq, t2, Operand(zero_reg));
1709 __ lw(t0, MemOperand(t0, kAliasedOffset)); 1708 __ lw(t0, MemOperand(t0, kAliasedOffset));
1710 __ bind(&skip2_eq); 1709 __ bind(&skip2_eq);
1711 1710
1712 // v0 = address of new object (tagged) 1711 // v0 = address of new object (tagged)
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after
1895 1894
1896 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize / kPointerSize)); 1895 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize / kPointerSize));
1897 __ bind(&add_arguments_object); 1896 __ bind(&add_arguments_object);
1898 __ Addu(t5, t5, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize)); 1897 __ Addu(t5, t5, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
1899 1898
1900 // Do the allocation of both objects in one go. 1899 // Do the allocation of both objects in one go.
1901 __ Allocate(t5, v0, t0, t1, &runtime, 1900 __ Allocate(t5, v0, t0, t1, &runtime,
1902 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); 1901 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1903 1902
1904 // Get the arguments boilerplate from the current native context. 1903 // Get the arguments boilerplate from the current native context.
1905 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 1904 __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, t0);
1906 __ lw(t0, FieldMemOperand(t0, JSGlobalObject::kNativeContextOffset));
1907 __ lw(t0, MemOperand(
1908 t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
1909 1905
1910 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset)); 1906 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
1911 __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex); 1907 __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
1912 __ sw(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset)); 1908 __ sw(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
1913 __ sw(t1, FieldMemOperand(v0, JSObject::kElementsOffset)); 1909 __ sw(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
1914 1910
1915 // Get the length (smi tagged) and set that as an in-object property too. 1911 // Get the length (smi tagged) and set that as an in-object property too.
1916 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 1912 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1917 __ AssertSmi(a2); 1913 __ AssertSmi(a2);
1918 __ sw(a2, 1914 __ sw(a2,
(...skipping 533 matching lines...) Expand 10 before | Expand all | Expand 10 after
2452 2448
2453 __ bind(&check_allocation_site); 2449 __ bind(&check_allocation_site);
2454 // If we came here, we need to see if we are the array function. 2450 // If we came here, we need to see if we are the array function.
2455 // If we didn't have a matching function, and we didn't find the megamorph 2451 // If we didn't have a matching function, and we didn't find the megamorph
2456 // sentinel, then we have in the slot either some other function or an 2452 // sentinel, then we have in the slot either some other function or an
2457 // AllocationSite. 2453 // AllocationSite.
2458 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); 2454 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2459 __ Branch(&miss, ne, feedback_map, Operand(at)); 2455 __ Branch(&miss, ne, feedback_map, Operand(at));
2460 2456
2461 // Make sure the function is the Array() function 2457 // Make sure the function is the Array() function
2462 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2); 2458 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
2463 __ Branch(&megamorphic, ne, a1, Operand(t2)); 2459 __ Branch(&megamorphic, ne, a1, Operand(t2));
2464 __ jmp(&done); 2460 __ jmp(&done);
2465 2461
2466 __ bind(&miss); 2462 __ bind(&miss);
2467 2463
2468 // A monomorphic miss (i.e, here the cache is not uninitialized) goes 2464 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2469 // megamorphic. 2465 // megamorphic.
2470 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex); 2466 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2471 __ Branch(&initialize, eq, t2, Operand(at)); 2467 __ Branch(&initialize, eq, t2, Operand(at));
2472 // MegamorphicSentinel is an immortal immovable object (undefined) so no 2468 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2473 // write-barrier is needed. 2469 // write-barrier is needed.
2474 __ bind(&megamorphic); 2470 __ bind(&megamorphic);
2475 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); 2471 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
2476 __ Addu(t2, a2, Operand(t2)); 2472 __ Addu(t2, a2, Operand(t2));
2477 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); 2473 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2478 __ sw(at, FieldMemOperand(t2, FixedArray::kHeaderSize)); 2474 __ sw(at, FieldMemOperand(t2, FixedArray::kHeaderSize));
2479 __ jmp(&done); 2475 __ jmp(&done);
2480 2476
2481 // An uninitialized cache is patched with the function. 2477 // An uninitialized cache is patched with the function.
2482 __ bind(&initialize); 2478 __ bind(&initialize);
2483 // Make sure the function is the Array() function. 2479 // Make sure the function is the Array() function.
2484 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2); 2480 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
2485 __ Branch(&not_array_function, ne, a1, Operand(t2)); 2481 __ Branch(&not_array_function, ne, a1, Operand(t2));
2486 2482
2487 // The target function is the Array constructor, 2483 // The target function is the Array constructor,
2488 // Create an AllocationSite if we don't already have it, store it in the 2484 // Create an AllocationSite if we don't already have it, store it in the
2489 // slot. 2485 // slot.
2490 CreateAllocationSiteStub create_stub(masm->isolate()); 2486 CreateAllocationSiteStub create_stub(masm->isolate());
2491 CallStubInRecordCallTarget(masm, &create_stub); 2487 CallStubInRecordCallTarget(masm, &create_stub);
2492 __ Branch(&done); 2488 __ Branch(&done);
2493 2489
2494 __ bind(&not_array_function); 2490 __ bind(&not_array_function);
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
2540 __ mov(a3, a1); 2536 __ mov(a3, a1);
2541 __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET); 2537 __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
2542 } 2538 }
2543 2539
2544 2540
2545 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) { 2541 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
2546 // a1 - function 2542 // a1 - function
2547 // a3 - slot id 2543 // a3 - slot id
2548 // a2 - vector 2544 // a2 - vector
2549 // t0 - loaded from vector[slot] 2545 // t0 - loaded from vector[slot]
2550 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at); 2546 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
2551 __ Branch(miss, ne, a1, Operand(at)); 2547 __ Branch(miss, ne, a1, Operand(at));
2552 2548
2553 __ li(a0, Operand(arg_count())); 2549 __ li(a0, Operand(arg_count()));
2554 2550
2555 // Increment the call count for monomorphic function calls. 2551 // Increment the call count for monomorphic function calls.
2556 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); 2552 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2557 __ Addu(at, a2, Operand(at)); 2553 __ Addu(at, a2, Operand(at));
2558 __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); 2554 __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
2559 __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement))); 2555 __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2560 __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); 2556 __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
2668 2664
2669 // We are going monomorphic, provided we actually have a JSFunction. 2665 // We are going monomorphic, provided we actually have a JSFunction.
2670 __ JumpIfSmi(a1, &miss); 2666 __ JumpIfSmi(a1, &miss);
2671 2667
2672 // Goto miss case if we do not have a function. 2668 // Goto miss case if we do not have a function.
2673 __ GetObjectType(a1, t0, t0); 2669 __ GetObjectType(a1, t0, t0);
2674 __ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE)); 2670 __ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE));
2675 2671
2676 // Make sure the function is not the Array() function, which requires special 2672 // Make sure the function is not the Array() function, which requires special
2677 // behavior on MISS. 2673 // behavior on MISS.
2678 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0); 2674 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t0);
2679 __ Branch(&miss, eq, a1, Operand(t0)); 2675 __ Branch(&miss, eq, a1, Operand(t0));
2680 2676
2681 // Make sure the function belongs to the same native context (which implies 2677 // Make sure the function belongs to the same native context.
2682 // the same global object).
2683 __ lw(t0, FieldMemOperand(a1, JSFunction::kContextOffset)); 2678 __ lw(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
2684 __ lw(t0, ContextOperand(t0, Context::GLOBAL_OBJECT_INDEX)); 2679 __ lw(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX));
2685 __ lw(t1, GlobalObjectOperand()); 2680 __ lw(t1, NativeContextMemOperand());
2686 __ Branch(&miss, ne, t0, Operand(t1)); 2681 __ Branch(&miss, ne, t0, Operand(t1));
2687 2682
2688 // Update stats. 2683 // Update stats.
2689 __ lw(t0, FieldMemOperand(a2, with_types_offset)); 2684 __ lw(t0, FieldMemOperand(a2, with_types_offset));
2690 __ Addu(t0, t0, Operand(Smi::FromInt(1))); 2685 __ Addu(t0, t0, Operand(Smi::FromInt(1)));
2691 __ sw(t0, FieldMemOperand(a2, with_types_offset)); 2686 __ sw(t0, FieldMemOperand(a2, with_types_offset));
2692 2687
2693 // Initialize the call counter. 2688 // Initialize the call counter.
2694 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); 2689 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2695 __ Addu(at, a2, Operand(at)); 2690 __ Addu(at, a2, Operand(at));
(...skipping 2428 matching lines...) Expand 10 before | Expand all | Expand 10 after
5124 5119
5125 5120
5126 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) { 5121 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
5127 Register context_reg = cp; 5122 Register context_reg = cp;
5128 Register slot_reg = a2; 5123 Register slot_reg = a2;
5129 Register result_reg = v0; 5124 Register result_reg = v0;
5130 Label slow_case; 5125 Label slow_case;
5131 5126
5132 // Go up context chain to the script context. 5127 // Go up context chain to the script context.
5133 for (int i = 0; i < depth(); ++i) { 5128 for (int i = 0; i < depth(); ++i) {
5134 __ lw(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX)); 5129 __ lw(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
5135 context_reg = result_reg; 5130 context_reg = result_reg;
5136 } 5131 }
5137 5132
5138 // Load the PropertyCell value at the specified slot. 5133 // Load the PropertyCell value at the specified slot.
5139 __ sll(at, slot_reg, kPointerSizeLog2); 5134 __ sll(at, slot_reg, kPointerSizeLog2);
5140 __ Addu(at, at, Operand(context_reg)); 5135 __ Addu(at, at, Operand(context_reg));
5141 __ lw(result_reg, ContextOperand(at, 0)); 5136 __ lw(result_reg, ContextMemOperand(at, 0));
5142 __ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset)); 5137 __ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
5143 5138
5144 // Check that value is not the_hole. 5139 // Check that value is not the_hole.
5145 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 5140 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5146 __ Branch(&slow_case, eq, result_reg, Operand(at)); 5141 __ Branch(&slow_case, eq, result_reg, Operand(at));
5147 __ Ret(); 5142 __ Ret();
5148 5143
5149 // Fallback to the runtime. 5144 // Fallback to the runtime.
5150 __ bind(&slow_case); 5145 __ bind(&slow_case);
5151 __ SmiTag(slot_reg); 5146 __ SmiTag(slot_reg);
(...skipping 11 matching lines...) Expand all
5163 Register cell_details_reg = t2; 5158 Register cell_details_reg = t2;
5164 Label fast_heapobject_case, fast_smi_case, slow_case; 5159 Label fast_heapobject_case, fast_smi_case, slow_case;
5165 5160
5166 if (FLAG_debug_code) { 5161 if (FLAG_debug_code) {
5167 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 5162 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5168 __ Check(ne, kUnexpectedValue, value_reg, Operand(at)); 5163 __ Check(ne, kUnexpectedValue, value_reg, Operand(at));
5169 } 5164 }
5170 5165
5171 // Go up context chain to the script context. 5166 // Go up context chain to the script context.
5172 for (int i = 0; i < depth(); ++i) { 5167 for (int i = 0; i < depth(); ++i) {
5173 __ lw(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX)); 5168 __ lw(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
5174 context_reg = cell_reg; 5169 context_reg = cell_reg;
5175 } 5170 }
5176 5171
5177 // Load the PropertyCell at the specified slot. 5172 // Load the PropertyCell at the specified slot.
5178 __ sll(at, slot_reg, kPointerSizeLog2); 5173 __ sll(at, slot_reg, kPointerSizeLog2);
5179 __ Addu(at, at, Operand(context_reg)); 5174 __ Addu(at, at, Operand(context_reg));
5180 __ lw(cell_reg, ContextOperand(at, 0)); 5175 __ lw(cell_reg, ContextMemOperand(at, 0));
5181 5176
5182 // Load PropertyDetails for the cell (actually only the cell_type and kind). 5177 // Load PropertyDetails for the cell (actually only the cell_type and kind).
5183 __ lw(cell_details_reg, 5178 __ lw(cell_details_reg,
5184 FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset)); 5179 FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset));
5185 __ SmiUntag(cell_details_reg); 5180 __ SmiUntag(cell_details_reg);
5186 __ And(cell_details_reg, cell_details_reg, 5181 __ And(cell_details_reg, cell_details_reg,
5187 PropertyDetails::PropertyCellTypeField::kMask | 5182 PropertyDetails::PropertyCellTypeField::kMask |
5188 PropertyDetails::KindField::kMask | 5183 PropertyDetails::KindField::kMask |
5189 PropertyDetails::kAttributesReadOnlyMask); 5184 PropertyDetails::kAttributesReadOnlyMask);
5190 5185
(...skipping 380 matching lines...) Expand 10 before | Expand all | Expand 10 after
5571 MemOperand(fp, 6 * kPointerSize), NULL); 5566 MemOperand(fp, 6 * kPointerSize), NULL);
5572 } 5567 }
5573 5568
5574 5569
5575 #undef __ 5570 #undef __
5576 5571
5577 } // namespace internal 5572 } // namespace internal
5578 } // namespace v8 5573 } // namespace v8
5579 5574
5580 #endif // V8_TARGET_ARCH_MIPS 5575 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/builtins-mips.cc ('k') | src/mips/macro-assembler-mips.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698