Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(369)

Side by Side Diff: src/mips64/code-stubs-mips64.cc

Issue 1478303002: Revert of [runtime] Replace global object link with native context link in all contexts. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mips64/builtins-mips64.cc ('k') | src/mips64/macro-assembler-mips64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if V8_TARGET_ARCH_MIPS64 5 #if V8_TARGET_ARCH_MIPS64
6 6
7 #include "src/bootstrapper.h" 7 #include "src/bootstrapper.h"
8 #include "src/code-stubs.h" 8 #include "src/code-stubs.h"
9 #include "src/codegen.h" 9 #include "src/codegen.h"
10 #include "src/ic/handler-compiler.h" 10 #include "src/ic/handler-compiler.h"
(...skipping 1682 matching lines...) Expand 10 before | Expand all | Expand 10 after
1693 __ Allocate(t1, v0, t1, a4, &runtime, TAG_OBJECT); 1693 __ Allocate(t1, v0, t1, a4, &runtime, TAG_OBJECT);
1694 1694
1695 // v0 = address of new object(s) (tagged) 1695 // v0 = address of new object(s) (tagged)
1696 // a2 = argument count (smi-tagged) 1696 // a2 = argument count (smi-tagged)
1697 // Get the arguments boilerplate from the current native context into a4. 1697 // Get the arguments boilerplate from the current native context into a4.
1698 const int kNormalOffset = 1698 const int kNormalOffset =
1699 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX); 1699 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
1700 const int kAliasedOffset = 1700 const int kAliasedOffset =
1701 Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX); 1701 Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
1702 1702
1703 __ ld(a4, NativeContextMemOperand()); 1703 __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1704 __ ld(a4, FieldMemOperand(a4, JSGlobalObject::kNativeContextOffset));
1704 Label skip2_ne, skip2_eq; 1705 Label skip2_ne, skip2_eq;
1705 __ Branch(&skip2_ne, ne, a6, Operand(zero_reg)); 1706 __ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
1706 __ ld(a4, MemOperand(a4, kNormalOffset)); 1707 __ ld(a4, MemOperand(a4, kNormalOffset));
1707 __ bind(&skip2_ne); 1708 __ bind(&skip2_ne);
1708 1709
1709 __ Branch(&skip2_eq, eq, a6, Operand(zero_reg)); 1710 __ Branch(&skip2_eq, eq, a6, Operand(zero_reg));
1710 __ ld(a4, MemOperand(a4, kAliasedOffset)); 1711 __ ld(a4, MemOperand(a4, kAliasedOffset));
1711 __ bind(&skip2_eq); 1712 __ bind(&skip2_eq);
1712 1713
1713 // v0 = address of new object (tagged) 1714 // v0 = address of new object (tagged)
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after
1896 1897
1897 __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize / kPointerSize)); 1898 __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize / kPointerSize));
1898 __ bind(&add_arguments_object); 1899 __ bind(&add_arguments_object);
1899 __ Daddu(t1, t1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize)); 1900 __ Daddu(t1, t1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
1900 1901
1901 // Do the allocation of both objects in one go. 1902 // Do the allocation of both objects in one go.
1902 __ Allocate(t1, v0, a4, a5, &runtime, 1903 __ Allocate(t1, v0, a4, a5, &runtime,
1903 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); 1904 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1904 1905
1905 // Get the arguments boilerplate from the current native context. 1906 // Get the arguments boilerplate from the current native context.
1906 __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, a4); 1907 __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1908 __ ld(a4, FieldMemOperand(a4, JSGlobalObject::kNativeContextOffset));
1909 __ ld(a4, MemOperand(a4, Context::SlotOffset(
1910 Context::STRICT_ARGUMENTS_MAP_INDEX)));
1907 1911
1908 __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset)); 1912 __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
1909 __ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex); 1913 __ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex);
1910 __ sd(a5, FieldMemOperand(v0, JSObject::kPropertiesOffset)); 1914 __ sd(a5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
1911 __ sd(a5, FieldMemOperand(v0, JSObject::kElementsOffset)); 1915 __ sd(a5, FieldMemOperand(v0, JSObject::kElementsOffset));
1912 1916
1913 // Get the length (smi tagged) and set that as an in-object property too. 1917 // Get the length (smi tagged) and set that as an in-object property too.
1914 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 1918 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1915 __ AssertSmi(a2); 1919 __ AssertSmi(a2);
1916 __ sd(a2, 1920 __ sd(a2,
(...skipping 566 matching lines...) Expand 10 before | Expand all | Expand 10 after
2483 2487
2484 __ bind(&check_allocation_site); 2488 __ bind(&check_allocation_site);
2485 // If we came here, we need to see if we are the array function. 2489 // If we came here, we need to see if we are the array function.
2486 // If we didn't have a matching function, and we didn't find the megamorph 2490 // If we didn't have a matching function, and we didn't find the megamorph
2487 // sentinel, then we have in the slot either some other function or an 2491 // sentinel, then we have in the slot either some other function or an
2488 // AllocationSite. 2492 // AllocationSite.
2489 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); 2493 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2490 __ Branch(&miss, ne, feedback_map, Operand(at)); 2494 __ Branch(&miss, ne, feedback_map, Operand(at));
2491 2495
2492 // Make sure the function is the Array() function 2496 // Make sure the function is the Array() function
2493 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5); 2497 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5);
2494 __ Branch(&megamorphic, ne, a1, Operand(a5)); 2498 __ Branch(&megamorphic, ne, a1, Operand(a5));
2495 __ jmp(&done); 2499 __ jmp(&done);
2496 2500
2497 __ bind(&miss); 2501 __ bind(&miss);
2498 2502
2499 // A monomorphic miss (i.e, here the cache is not uninitialized) goes 2503 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2500 // megamorphic. 2504 // megamorphic.
2501 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex); 2505 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2502 __ Branch(&initialize, eq, a5, Operand(at)); 2506 __ Branch(&initialize, eq, a5, Operand(at));
2503 // MegamorphicSentinel is an immortal immovable object (undefined) so no 2507 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2504 // write-barrier is needed. 2508 // write-barrier is needed.
2505 __ bind(&megamorphic); 2509 __ bind(&megamorphic);
2506 __ dsrl(a5, a3, 32 - kPointerSizeLog2); 2510 __ dsrl(a5, a3, 32 - kPointerSizeLog2);
2507 __ Daddu(a5, a2, Operand(a5)); 2511 __ Daddu(a5, a2, Operand(a5));
2508 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); 2512 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2509 __ sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize)); 2513 __ sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize));
2510 __ jmp(&done); 2514 __ jmp(&done);
2511 2515
2512 // An uninitialized cache is patched with the function. 2516 // An uninitialized cache is patched with the function.
2513 __ bind(&initialize); 2517 __ bind(&initialize);
2514 // Make sure the function is the Array() function. 2518 // Make sure the function is the Array() function.
2515 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5); 2519 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5);
2516 __ Branch(&not_array_function, ne, a1, Operand(a5)); 2520 __ Branch(&not_array_function, ne, a1, Operand(a5));
2517 2521
2518 // The target function is the Array constructor, 2522 // The target function is the Array constructor,
2519 // Create an AllocationSite if we don't already have it, store it in the 2523 // Create an AllocationSite if we don't already have it, store it in the
2520 // slot. 2524 // slot.
2521 CreateAllocationSiteStub create_stub(masm->isolate()); 2525 CreateAllocationSiteStub create_stub(masm->isolate());
2522 CallStubInRecordCallTarget(masm, &create_stub); 2526 CallStubInRecordCallTarget(masm, &create_stub);
2523 __ Branch(&done); 2527 __ Branch(&done);
2524 2528
2525 __ bind(&not_array_function); 2529 __ bind(&not_array_function);
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
2612 __ SmiTag(result_); 2616 __ SmiTag(result_);
2613 __ bind(&exit_); 2617 __ bind(&exit_);
2614 } 2618 }
2615 2619
2616 2620
2617 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) { 2621 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
2618 // a1 - function 2622 // a1 - function
2619 // a3 - slot id 2623 // a3 - slot id
2620 // a2 - vector 2624 // a2 - vector
2621 // a4 - allocation site (loaded from vector[slot]) 2625 // a4 - allocation site (loaded from vector[slot])
2622 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at); 2626 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
2623 __ Branch(miss, ne, a1, Operand(at)); 2627 __ Branch(miss, ne, a1, Operand(at));
2624 2628
2625 __ li(a0, Operand(arg_count())); 2629 __ li(a0, Operand(arg_count()));
2626 2630
2627 // Increment the call count for monomorphic function calls. 2631 // Increment the call count for monomorphic function calls.
2628 __ dsrl(t0, a3, 32 - kPointerSizeLog2); 2632 __ dsrl(t0, a3, 32 - kPointerSizeLog2);
2629 __ Daddu(a3, a2, Operand(t0)); 2633 __ Daddu(a3, a2, Operand(t0));
2630 __ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize)); 2634 __ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
2631 __ Daddu(t0, t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement))); 2635 __ Daddu(t0, t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2632 __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize)); 2636 __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
2740 2744
2741 // We are going monomorphic, provided we actually have a JSFunction. 2745 // We are going monomorphic, provided we actually have a JSFunction.
2742 __ JumpIfSmi(a1, &miss); 2746 __ JumpIfSmi(a1, &miss);
2743 2747
2744 // Goto miss case if we do not have a function. 2748 // Goto miss case if we do not have a function.
2745 __ GetObjectType(a1, a4, a4); 2749 __ GetObjectType(a1, a4, a4);
2746 __ Branch(&miss, ne, a4, Operand(JS_FUNCTION_TYPE)); 2750 __ Branch(&miss, ne, a4, Operand(JS_FUNCTION_TYPE));
2747 2751
2748 // Make sure the function is not the Array() function, which requires special 2752 // Make sure the function is not the Array() function, which requires special
2749 // behavior on MISS. 2753 // behavior on MISS.
2750 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a4); 2754 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
2751 __ Branch(&miss, eq, a1, Operand(a4)); 2755 __ Branch(&miss, eq, a1, Operand(a4));
2752 2756
2753 // Make sure the function belongs to the same native context. 2757 // Make sure the function belongs to the same native context (which implies
2758 // the same global object).
2754 __ ld(t0, FieldMemOperand(a1, JSFunction::kContextOffset)); 2759 __ ld(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
2755 __ ld(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX)); 2760 __ ld(t0, ContextOperand(t0, Context::GLOBAL_OBJECT_INDEX));
2756 __ ld(t1, NativeContextMemOperand()); 2761 __ ld(t1, GlobalObjectOperand());
2757 __ Branch(&miss, ne, t0, Operand(t1)); 2762 __ Branch(&miss, ne, t0, Operand(t1));
2758 2763
2759 // Update stats. 2764 // Update stats.
2760 __ ld(a4, FieldMemOperand(a2, with_types_offset)); 2765 __ ld(a4, FieldMemOperand(a2, with_types_offset));
2761 __ Daddu(a4, a4, Operand(Smi::FromInt(1))); 2766 __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
2762 __ sd(a4, FieldMemOperand(a2, with_types_offset)); 2767 __ sd(a4, FieldMemOperand(a2, with_types_offset));
2763 2768
2764 // Initialize the call counter. 2769 // Initialize the call counter.
2765 __ dsrl(at, a3, 32 - kPointerSizeLog2); 2770 __ dsrl(at, a3, 32 - kPointerSizeLog2);
2766 __ Daddu(at, a2, Operand(at)); 2771 __ Daddu(at, a2, Operand(at));
(...skipping 2377 matching lines...) Expand 10 before | Expand all | Expand 10 after
5144 5149
5145 5150
5146 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) { 5151 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
5147 Register context_reg = cp; 5152 Register context_reg = cp;
5148 Register slot_reg = a2; 5153 Register slot_reg = a2;
5149 Register result_reg = v0; 5154 Register result_reg = v0;
5150 Label slow_case; 5155 Label slow_case;
5151 5156
5152 // Go up context chain to the script context. 5157 // Go up context chain to the script context.
5153 for (int i = 0; i < depth(); ++i) { 5158 for (int i = 0; i < depth(); ++i) {
5154 __ ld(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX)); 5159 __ ld(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
5155 context_reg = result_reg; 5160 context_reg = result_reg;
5156 } 5161 }
5157 5162
5158 // Load the PropertyCell value at the specified slot. 5163 // Load the PropertyCell value at the specified slot.
5159 __ dsll(at, slot_reg, kPointerSizeLog2); 5164 __ dsll(at, slot_reg, kPointerSizeLog2);
5160 __ Daddu(at, at, Operand(context_reg)); 5165 __ Daddu(at, at, Operand(context_reg));
5161 __ ld(result_reg, ContextMemOperand(at, 0)); 5166 __ ld(result_reg, ContextOperand(at, 0));
5162 __ ld(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset)); 5167 __ ld(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
5163 5168
5164 // Check that value is not the_hole. 5169 // Check that value is not the_hole.
5165 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 5170 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5166 __ Branch(&slow_case, eq, result_reg, Operand(at)); 5171 __ Branch(&slow_case, eq, result_reg, Operand(at));
5167 __ Ret(); 5172 __ Ret();
5168 5173
5169 // Fallback to the runtime. 5174 // Fallback to the runtime.
5170 __ bind(&slow_case); 5175 __ bind(&slow_case);
5171 __ SmiTag(slot_reg); 5176 __ SmiTag(slot_reg);
(...skipping 11 matching lines...) Expand all
5183 Register cell_details_reg = a6; 5188 Register cell_details_reg = a6;
5184 Label fast_heapobject_case, fast_smi_case, slow_case; 5189 Label fast_heapobject_case, fast_smi_case, slow_case;
5185 5190
5186 if (FLAG_debug_code) { 5191 if (FLAG_debug_code) {
5187 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 5192 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5188 __ Check(ne, kUnexpectedValue, value_reg, Operand(at)); 5193 __ Check(ne, kUnexpectedValue, value_reg, Operand(at));
5189 } 5194 }
5190 5195
5191 // Go up context chain to the script context. 5196 // Go up context chain to the script context.
5192 for (int i = 0; i < depth(); ++i) { 5197 for (int i = 0; i < depth(); ++i) {
5193 __ ld(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX)); 5198 __ ld(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
5194 context_reg = cell_reg; 5199 context_reg = cell_reg;
5195 } 5200 }
5196 5201
5197 // Load the PropertyCell at the specified slot. 5202 // Load the PropertyCell at the specified slot.
5198 __ dsll(at, slot_reg, kPointerSizeLog2); 5203 __ dsll(at, slot_reg, kPointerSizeLog2);
5199 __ Daddu(at, at, Operand(context_reg)); 5204 __ Daddu(at, at, Operand(context_reg));
5200 __ ld(cell_reg, ContextMemOperand(at, 0)); 5205 __ ld(cell_reg, ContextOperand(at, 0));
5201 5206
5202 // Load PropertyDetails for the cell (actually only the cell_type and kind). 5207 // Load PropertyDetails for the cell (actually only the cell_type and kind).
5203 __ ld(cell_details_reg, 5208 __ ld(cell_details_reg,
5204 FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset)); 5209 FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset));
5205 __ SmiUntag(cell_details_reg); 5210 __ SmiUntag(cell_details_reg);
5206 __ And(cell_details_reg, cell_details_reg, 5211 __ And(cell_details_reg, cell_details_reg,
5207 PropertyDetails::PropertyCellTypeField::kMask | 5212 PropertyDetails::PropertyCellTypeField::kMask |
5208 PropertyDetails::KindField::kMask | 5213 PropertyDetails::KindField::kMask |
5209 PropertyDetails::kAttributesReadOnlyMask); 5214 PropertyDetails::kAttributesReadOnlyMask);
5210 5215
(...skipping 385 matching lines...) Expand 10 before | Expand all | Expand 10 after
5596 MemOperand(fp, 6 * kPointerSize), NULL); 5601 MemOperand(fp, 6 * kPointerSize), NULL);
5597 } 5602 }
5598 5603
5599 5604
5600 #undef __ 5605 #undef __
5601 5606
5602 } // namespace internal 5607 } // namespace internal
5603 } // namespace v8 5608 } // namespace v8
5604 5609
5605 #endif // V8_TARGET_ARCH_MIPS64 5610 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW
« no previous file with comments | « src/mips64/builtins-mips64.cc ('k') | src/mips64/macro-assembler-mips64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698