Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(647)

Side by Side Diff: src/mips/code-stubs-mips.cc

Issue 1478303002: Revert of [runtime] Replace global object link with native context link in all contexts. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mips/builtins-mips.cc ('k') | src/mips/macro-assembler-mips.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if V8_TARGET_ARCH_MIPS 5 #if V8_TARGET_ARCH_MIPS
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/bootstrapper.h" 8 #include "src/bootstrapper.h"
9 #include "src/code-stubs.h" 9 #include "src/code-stubs.h"
10 #include "src/codegen.h" 10 #include "src/codegen.h"
(...skipping 1680 matching lines...) Expand 10 before | Expand all | Expand 10 after
1691 __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT); 1691 __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT);
1692 1692
1693 // v0 = address of new object(s) (tagged) 1693 // v0 = address of new object(s) (tagged)
1694 // a2 = argument count (smi-tagged) 1694 // a2 = argument count (smi-tagged)
1695 // Get the arguments boilerplate from the current native context into t0. 1695 // Get the arguments boilerplate from the current native context into t0.
1696 const int kNormalOffset = 1696 const int kNormalOffset =
1697 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX); 1697 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
1698 const int kAliasedOffset = 1698 const int kAliasedOffset =
1699 Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX); 1699 Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
1700 1700
1701 __ lw(t0, NativeContextMemOperand()); 1701 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1702 __ lw(t0, FieldMemOperand(t0, JSGlobalObject::kNativeContextOffset));
1702 Label skip2_ne, skip2_eq; 1703 Label skip2_ne, skip2_eq;
1703 __ Branch(&skip2_ne, ne, t2, Operand(zero_reg)); 1704 __ Branch(&skip2_ne, ne, t2, Operand(zero_reg));
1704 __ lw(t0, MemOperand(t0, kNormalOffset)); 1705 __ lw(t0, MemOperand(t0, kNormalOffset));
1705 __ bind(&skip2_ne); 1706 __ bind(&skip2_ne);
1706 1707
1707 __ Branch(&skip2_eq, eq, t2, Operand(zero_reg)); 1708 __ Branch(&skip2_eq, eq, t2, Operand(zero_reg));
1708 __ lw(t0, MemOperand(t0, kAliasedOffset)); 1709 __ lw(t0, MemOperand(t0, kAliasedOffset));
1709 __ bind(&skip2_eq); 1710 __ bind(&skip2_eq);
1710 1711
1711 // v0 = address of new object (tagged) 1712 // v0 = address of new object (tagged)
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after
1894 1895
1895 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize / kPointerSize)); 1896 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize / kPointerSize));
1896 __ bind(&add_arguments_object); 1897 __ bind(&add_arguments_object);
1897 __ Addu(t5, t5, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize)); 1898 __ Addu(t5, t5, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
1898 1899
1899 // Do the allocation of both objects in one go. 1900 // Do the allocation of both objects in one go.
1900 __ Allocate(t5, v0, t0, t1, &runtime, 1901 __ Allocate(t5, v0, t0, t1, &runtime,
1901 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); 1902 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1902 1903
1903 // Get the arguments boilerplate from the current native context. 1904 // Get the arguments boilerplate from the current native context.
1904 __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, t0); 1905 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1906 __ lw(t0, FieldMemOperand(t0, JSGlobalObject::kNativeContextOffset));
1907 __ lw(t0, MemOperand(
1908 t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
1905 1909
1906 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset)); 1910 __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
1907 __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex); 1911 __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
1908 __ sw(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset)); 1912 __ sw(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
1909 __ sw(t1, FieldMemOperand(v0, JSObject::kElementsOffset)); 1913 __ sw(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
1910 1914
1911 // Get the length (smi tagged) and set that as an in-object property too. 1915 // Get the length (smi tagged) and set that as an in-object property too.
1912 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 1916 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1913 __ AssertSmi(a2); 1917 __ AssertSmi(a2);
1914 __ sw(a2, 1918 __ sw(a2,
(...skipping 533 matching lines...) Expand 10 before | Expand all | Expand 10 after
2448 2452
2449 __ bind(&check_allocation_site); 2453 __ bind(&check_allocation_site);
2450 // If we came here, we need to see if we are the array function. 2454 // If we came here, we need to see if we are the array function.
2451 // If we didn't have a matching function, and we didn't find the megamorph 2455 // If we didn't have a matching function, and we didn't find the megamorph
2452 // sentinel, then we have in the slot either some other function or an 2456 // sentinel, then we have in the slot either some other function or an
2453 // AllocationSite. 2457 // AllocationSite.
2454 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); 2458 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2455 __ Branch(&miss, ne, feedback_map, Operand(at)); 2459 __ Branch(&miss, ne, feedback_map, Operand(at));
2456 2460
2457 // Make sure the function is the Array() function 2461 // Make sure the function is the Array() function
2458 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2); 2462 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2);
2459 __ Branch(&megamorphic, ne, a1, Operand(t2)); 2463 __ Branch(&megamorphic, ne, a1, Operand(t2));
2460 __ jmp(&done); 2464 __ jmp(&done);
2461 2465
2462 __ bind(&miss); 2466 __ bind(&miss);
2463 2467
2464 // A monomorphic miss (i.e, here the cache is not uninitialized) goes 2468 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2465 // megamorphic. 2469 // megamorphic.
2466 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex); 2470 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2467 __ Branch(&initialize, eq, t2, Operand(at)); 2471 __ Branch(&initialize, eq, t2, Operand(at));
2468 // MegamorphicSentinel is an immortal immovable object (undefined) so no 2472 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2469 // write-barrier is needed. 2473 // write-barrier is needed.
2470 __ bind(&megamorphic); 2474 __ bind(&megamorphic);
2471 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); 2475 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
2472 __ Addu(t2, a2, Operand(t2)); 2476 __ Addu(t2, a2, Operand(t2));
2473 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); 2477 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2474 __ sw(at, FieldMemOperand(t2, FixedArray::kHeaderSize)); 2478 __ sw(at, FieldMemOperand(t2, FixedArray::kHeaderSize));
2475 __ jmp(&done); 2479 __ jmp(&done);
2476 2480
2477 // An uninitialized cache is patched with the function. 2481 // An uninitialized cache is patched with the function.
2478 __ bind(&initialize); 2482 __ bind(&initialize);
2479 // Make sure the function is the Array() function. 2483 // Make sure the function is the Array() function.
2480 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2); 2484 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2);
2481 __ Branch(&not_array_function, ne, a1, Operand(t2)); 2485 __ Branch(&not_array_function, ne, a1, Operand(t2));
2482 2486
2483 // The target function is the Array constructor, 2487 // The target function is the Array constructor,
2484 // Create an AllocationSite if we don't already have it, store it in the 2488 // Create an AllocationSite if we don't already have it, store it in the
2485 // slot. 2489 // slot.
2486 CreateAllocationSiteStub create_stub(masm->isolate()); 2490 CreateAllocationSiteStub create_stub(masm->isolate());
2487 CallStubInRecordCallTarget(masm, &create_stub); 2491 CallStubInRecordCallTarget(masm, &create_stub);
2488 __ Branch(&done); 2492 __ Branch(&done);
2489 2493
2490 __ bind(&not_array_function); 2494 __ bind(&not_array_function);
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
2536 __ mov(a3, a1); 2540 __ mov(a3, a1);
2537 __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET); 2541 __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
2538 } 2542 }
2539 2543
2540 2544
2541 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) { 2545 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
2542 // a1 - function 2546 // a1 - function
2543 // a3 - slot id 2547 // a3 - slot id
2544 // a2 - vector 2548 // a2 - vector
2545 // t0 - loaded from vector[slot] 2549 // t0 - loaded from vector[slot]
2546 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at); 2550 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
2547 __ Branch(miss, ne, a1, Operand(at)); 2551 __ Branch(miss, ne, a1, Operand(at));
2548 2552
2549 __ li(a0, Operand(arg_count())); 2553 __ li(a0, Operand(arg_count()));
2550 2554
2551 // Increment the call count for monomorphic function calls. 2555 // Increment the call count for monomorphic function calls.
2552 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); 2556 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2553 __ Addu(at, a2, Operand(at)); 2557 __ Addu(at, a2, Operand(at));
2554 __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); 2558 __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
2555 __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement))); 2559 __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2556 __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); 2560 __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
2664 2668
2665 // We are going monomorphic, provided we actually have a JSFunction. 2669 // We are going monomorphic, provided we actually have a JSFunction.
2666 __ JumpIfSmi(a1, &miss); 2670 __ JumpIfSmi(a1, &miss);
2667 2671
2668 // Goto miss case if we do not have a function. 2672 // Goto miss case if we do not have a function.
2669 __ GetObjectType(a1, t0, t0); 2673 __ GetObjectType(a1, t0, t0);
2670 __ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE)); 2674 __ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE));
2671 2675
2672 // Make sure the function is not the Array() function, which requires special 2676 // Make sure the function is not the Array() function, which requires special
2673 // behavior on MISS. 2677 // behavior on MISS.
2674 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t0); 2678 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
2675 __ Branch(&miss, eq, a1, Operand(t0)); 2679 __ Branch(&miss, eq, a1, Operand(t0));
2676 2680
2677 // Make sure the function belongs to the same native context. 2681 // Make sure the function belongs to the same native context (which implies
2682 // the same global object).
2678 __ lw(t0, FieldMemOperand(a1, JSFunction::kContextOffset)); 2683 __ lw(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
2679 __ lw(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX)); 2684 __ lw(t0, ContextOperand(t0, Context::GLOBAL_OBJECT_INDEX));
2680 __ lw(t1, NativeContextMemOperand()); 2685 __ lw(t1, GlobalObjectOperand());
2681 __ Branch(&miss, ne, t0, Operand(t1)); 2686 __ Branch(&miss, ne, t0, Operand(t1));
2682 2687
2683 // Update stats. 2688 // Update stats.
2684 __ lw(t0, FieldMemOperand(a2, with_types_offset)); 2689 __ lw(t0, FieldMemOperand(a2, with_types_offset));
2685 __ Addu(t0, t0, Operand(Smi::FromInt(1))); 2690 __ Addu(t0, t0, Operand(Smi::FromInt(1)));
2686 __ sw(t0, FieldMemOperand(a2, with_types_offset)); 2691 __ sw(t0, FieldMemOperand(a2, with_types_offset));
2687 2692
2688 // Initialize the call counter. 2693 // Initialize the call counter.
2689 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); 2694 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
2690 __ Addu(at, a2, Operand(at)); 2695 __ Addu(at, a2, Operand(at));
(...skipping 2428 matching lines...) Expand 10 before | Expand all | Expand 10 after
5119 5124
5120 5125
5121 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) { 5126 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
5122 Register context_reg = cp; 5127 Register context_reg = cp;
5123 Register slot_reg = a2; 5128 Register slot_reg = a2;
5124 Register result_reg = v0; 5129 Register result_reg = v0;
5125 Label slow_case; 5130 Label slow_case;
5126 5131
5127 // Go up context chain to the script context. 5132 // Go up context chain to the script context.
5128 for (int i = 0; i < depth(); ++i) { 5133 for (int i = 0; i < depth(); ++i) {
5129 __ lw(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX)); 5134 __ lw(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
5130 context_reg = result_reg; 5135 context_reg = result_reg;
5131 } 5136 }
5132 5137
5133 // Load the PropertyCell value at the specified slot. 5138 // Load the PropertyCell value at the specified slot.
5134 __ sll(at, slot_reg, kPointerSizeLog2); 5139 __ sll(at, slot_reg, kPointerSizeLog2);
5135 __ Addu(at, at, Operand(context_reg)); 5140 __ Addu(at, at, Operand(context_reg));
5136 __ lw(result_reg, ContextMemOperand(at, 0)); 5141 __ lw(result_reg, ContextOperand(at, 0));
5137 __ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset)); 5142 __ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
5138 5143
5139 // Check that value is not the_hole. 5144 // Check that value is not the_hole.
5140 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 5145 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5141 __ Branch(&slow_case, eq, result_reg, Operand(at)); 5146 __ Branch(&slow_case, eq, result_reg, Operand(at));
5142 __ Ret(); 5147 __ Ret();
5143 5148
5144 // Fallback to the runtime. 5149 // Fallback to the runtime.
5145 __ bind(&slow_case); 5150 __ bind(&slow_case);
5146 __ SmiTag(slot_reg); 5151 __ SmiTag(slot_reg);
(...skipping 11 matching lines...) Expand all
5158 Register cell_details_reg = t2; 5163 Register cell_details_reg = t2;
5159 Label fast_heapobject_case, fast_smi_case, slow_case; 5164 Label fast_heapobject_case, fast_smi_case, slow_case;
5160 5165
5161 if (FLAG_debug_code) { 5166 if (FLAG_debug_code) {
5162 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 5167 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5163 __ Check(ne, kUnexpectedValue, value_reg, Operand(at)); 5168 __ Check(ne, kUnexpectedValue, value_reg, Operand(at));
5164 } 5169 }
5165 5170
5166 // Go up context chain to the script context. 5171 // Go up context chain to the script context.
5167 for (int i = 0; i < depth(); ++i) { 5172 for (int i = 0; i < depth(); ++i) {
5168 __ lw(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX)); 5173 __ lw(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
5169 context_reg = cell_reg; 5174 context_reg = cell_reg;
5170 } 5175 }
5171 5176
5172 // Load the PropertyCell at the specified slot. 5177 // Load the PropertyCell at the specified slot.
5173 __ sll(at, slot_reg, kPointerSizeLog2); 5178 __ sll(at, slot_reg, kPointerSizeLog2);
5174 __ Addu(at, at, Operand(context_reg)); 5179 __ Addu(at, at, Operand(context_reg));
5175 __ lw(cell_reg, ContextMemOperand(at, 0)); 5180 __ lw(cell_reg, ContextOperand(at, 0));
5176 5181
5177 // Load PropertyDetails for the cell (actually only the cell_type and kind). 5182 // Load PropertyDetails for the cell (actually only the cell_type and kind).
5178 __ lw(cell_details_reg, 5183 __ lw(cell_details_reg,
5179 FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset)); 5184 FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset));
5180 __ SmiUntag(cell_details_reg); 5185 __ SmiUntag(cell_details_reg);
5181 __ And(cell_details_reg, cell_details_reg, 5186 __ And(cell_details_reg, cell_details_reg,
5182 PropertyDetails::PropertyCellTypeField::kMask | 5187 PropertyDetails::PropertyCellTypeField::kMask |
5183 PropertyDetails::KindField::kMask | 5188 PropertyDetails::KindField::kMask |
5184 PropertyDetails::kAttributesReadOnlyMask); 5189 PropertyDetails::kAttributesReadOnlyMask);
5185 5190
(...skipping 380 matching lines...) Expand 10 before | Expand all | Expand 10 after
5566 MemOperand(fp, 6 * kPointerSize), NULL); 5571 MemOperand(fp, 6 * kPointerSize), NULL);
5567 } 5572 }
5568 5573
5569 5574
5570 #undef __ 5575 #undef __
5571 5576
5572 } // namespace internal 5577 } // namespace internal
5573 } // namespace v8 5578 } // namespace v8
5574 5579
5575 #endif // V8_TARGET_ARCH_MIPS 5580 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/builtins-mips.cc ('k') | src/mips/macro-assembler-mips.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698