Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(80)

Side by Side Diff: src/mips64/code-stubs-mips64.cc

Issue 1480003002: [runtime] Replace global object link with native context link in all contexts. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Add patch from Orion for interpreter cementation test. Disable obsolete/invalid tests. Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mips64/builtins-mips64.cc ('k') | src/mips64/macro-assembler-mips64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if V8_TARGET_ARCH_MIPS64 5 #if V8_TARGET_ARCH_MIPS64
6 6
7 #include "src/bootstrapper.h" 7 #include "src/bootstrapper.h"
8 #include "src/code-stubs.h" 8 #include "src/code-stubs.h"
9 #include "src/codegen.h" 9 #include "src/codegen.h"
10 #include "src/ic/handler-compiler.h" 10 #include "src/ic/handler-compiler.h"
(...skipping 1682 matching lines...) Expand 10 before | Expand all | Expand 10 after
1693 __ Allocate(t1, v0, t1, a4, &runtime, TAG_OBJECT); 1693 __ Allocate(t1, v0, t1, a4, &runtime, TAG_OBJECT);
1694 1694
1695 // v0 = address of new object(s) (tagged) 1695 // v0 = address of new object(s) (tagged)
1696 // a2 = argument count (smi-tagged) 1696 // a2 = argument count (smi-tagged)
1697 // Get the arguments boilerplate from the current native context into a4. 1697 // Get the arguments boilerplate from the current native context into a4.
1698 const int kNormalOffset = 1698 const int kNormalOffset =
1699 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX); 1699 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
1700 const int kAliasedOffset = 1700 const int kAliasedOffset =
1701 Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX); 1701 Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
1702 1702
1703 __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 1703 __ ld(a4, NativeContextMemOperand());
1704 __ ld(a4, FieldMemOperand(a4, JSGlobalObject::kNativeContextOffset));
1705 Label skip2_ne, skip2_eq; 1704 Label skip2_ne, skip2_eq;
1706 __ Branch(&skip2_ne, ne, a6, Operand(zero_reg)); 1705 __ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
1707 __ ld(a4, MemOperand(a4, kNormalOffset)); 1706 __ ld(a4, MemOperand(a4, kNormalOffset));
1708 __ bind(&skip2_ne); 1707 __ bind(&skip2_ne);
1709 1708
1710 __ Branch(&skip2_eq, eq, a6, Operand(zero_reg)); 1709 __ Branch(&skip2_eq, eq, a6, Operand(zero_reg));
1711 __ ld(a4, MemOperand(a4, kAliasedOffset)); 1710 __ ld(a4, MemOperand(a4, kAliasedOffset));
1712 __ bind(&skip2_eq); 1711 __ bind(&skip2_eq);
1713 1712
1714 // v0 = address of new object (tagged) 1713 // v0 = address of new object (tagged)
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after
1897 1896
1898 __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize / kPointerSize)); 1897 __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize / kPointerSize));
1899 __ bind(&add_arguments_object); 1898 __ bind(&add_arguments_object);
1900 __ Daddu(t1, t1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize)); 1899 __ Daddu(t1, t1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
1901 1900
1902 // Do the allocation of both objects in one go. 1901 // Do the allocation of both objects in one go.
1903 __ Allocate(t1, v0, a4, a5, &runtime, 1902 __ Allocate(t1, v0, a4, a5, &runtime,
1904 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); 1903 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1905 1904
1906 // Get the arguments boilerplate from the current native context. 1905 // Get the arguments boilerplate from the current native context.
1907 __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 1906 __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, a4);
1908 __ ld(a4, FieldMemOperand(a4, JSGlobalObject::kNativeContextOffset));
1909 __ ld(a4, MemOperand(a4, Context::SlotOffset(
1910 Context::STRICT_ARGUMENTS_MAP_INDEX)));
1911 1907
1912 __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset)); 1908 __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
1913 __ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex); 1909 __ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex);
1914 __ sd(a5, FieldMemOperand(v0, JSObject::kPropertiesOffset)); 1910 __ sd(a5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
1915 __ sd(a5, FieldMemOperand(v0, JSObject::kElementsOffset)); 1911 __ sd(a5, FieldMemOperand(v0, JSObject::kElementsOffset));
1916 1912
1917 // Get the length (smi tagged) and set that as an in-object property too. 1913 // Get the length (smi tagged) and set that as an in-object property too.
1918 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 1914 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1919 __ AssertSmi(a2); 1915 __ AssertSmi(a2);
1920 __ sd(a2, 1916 __ sd(a2,
(...skipping 566 matching lines...) Expand 10 before | Expand all | Expand 10 after
2487 2483
2488 __ bind(&check_allocation_site); 2484 __ bind(&check_allocation_site);
2489 // If we came here, we need to see if we are the array function. 2485 // If we came here, we need to see if we are the array function.
2490 // If we didn't have a matching function, and we didn't find the megamorph 2486 // If we didn't have a matching function, and we didn't find the megamorph
2491 // sentinel, then we have in the slot either some other function or an 2487 // sentinel, then we have in the slot either some other function or an
2492 // AllocationSite. 2488 // AllocationSite.
2493 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); 2489 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2494 __ Branch(&miss, ne, feedback_map, Operand(at)); 2490 __ Branch(&miss, ne, feedback_map, Operand(at));
2495 2491
2496 // Make sure the function is the Array() function 2492 // Make sure the function is the Array() function
2497 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5); 2493 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
2498 __ Branch(&megamorphic, ne, a1, Operand(a5)); 2494 __ Branch(&megamorphic, ne, a1, Operand(a5));
2499 __ jmp(&done); 2495 __ jmp(&done);
2500 2496
2501 __ bind(&miss); 2497 __ bind(&miss);
2502 2498
2503 // A monomorphic miss (i.e, here the cache is not uninitialized) goes 2499 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2504 // megamorphic. 2500 // megamorphic.
2505 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex); 2501 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2506 __ Branch(&initialize, eq, a5, Operand(at)); 2502 __ Branch(&initialize, eq, a5, Operand(at));
2507 // MegamorphicSentinel is an immortal immovable object (undefined) so no 2503 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2508 // write-barrier is needed. 2504 // write-barrier is needed.
2509 __ bind(&megamorphic); 2505 __ bind(&megamorphic);
2510 __ dsrl(a5, a3, 32 - kPointerSizeLog2); 2506 __ dsrl(a5, a3, 32 - kPointerSizeLog2);
2511 __ Daddu(a5, a2, Operand(a5)); 2507 __ Daddu(a5, a2, Operand(a5));
2512 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); 2508 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2513 __ sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize)); 2509 __ sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize));
2514 __ jmp(&done); 2510 __ jmp(&done);
2515 2511
2516 // An uninitialized cache is patched with the function. 2512 // An uninitialized cache is patched with the function.
2517 __ bind(&initialize); 2513 __ bind(&initialize);
2518 // Make sure the function is the Array() function. 2514 // Make sure the function is the Array() function.
2519 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5); 2515 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
2520 __ Branch(&not_array_function, ne, a1, Operand(a5)); 2516 __ Branch(&not_array_function, ne, a1, Operand(a5));
2521 2517
2522 // The target function is the Array constructor, 2518 // The target function is the Array constructor,
2523 // Create an AllocationSite if we don't already have it, store it in the 2519 // Create an AllocationSite if we don't already have it, store it in the
2524 // slot. 2520 // slot.
2525 CreateAllocationSiteStub create_stub(masm->isolate()); 2521 CreateAllocationSiteStub create_stub(masm->isolate());
2526 CallStubInRecordCallTarget(masm, &create_stub); 2522 CallStubInRecordCallTarget(masm, &create_stub);
2527 __ Branch(&done); 2523 __ Branch(&done);
2528 2524
2529 __ bind(&not_array_function); 2525 __ bind(&not_array_function);
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
2616 __ SmiTag(result_); 2612 __ SmiTag(result_);
2617 __ bind(&exit_); 2613 __ bind(&exit_);
2618 } 2614 }
2619 2615
2620 2616
2621 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) { 2617 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
2622 // a1 - function 2618 // a1 - function
2623 // a3 - slot id 2619 // a3 - slot id
2624 // a2 - vector 2620 // a2 - vector
2625 // a4 - allocation site (loaded from vector[slot]) 2621 // a4 - allocation site (loaded from vector[slot])
2626 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at); 2622 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
2627 __ Branch(miss, ne, a1, Operand(at)); 2623 __ Branch(miss, ne, a1, Operand(at));
2628 2624
2629 __ li(a0, Operand(arg_count())); 2625 __ li(a0, Operand(arg_count()));
2630 2626
2631 // Increment the call count for monomorphic function calls. 2627 // Increment the call count for monomorphic function calls.
2632 __ dsrl(t0, a3, 32 - kPointerSizeLog2); 2628 __ dsrl(t0, a3, 32 - kPointerSizeLog2);
2633 __ Daddu(a3, a2, Operand(t0)); 2629 __ Daddu(a3, a2, Operand(t0));
2634 __ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize)); 2630 __ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
2635 __ Daddu(t0, t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement))); 2631 __ Daddu(t0, t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2636 __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize)); 2632 __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
2744 2740
2745 // We are going monomorphic, provided we actually have a JSFunction. 2741 // We are going monomorphic, provided we actually have a JSFunction.
2746 __ JumpIfSmi(a1, &miss); 2742 __ JumpIfSmi(a1, &miss);
2747 2743
2748 // Goto miss case if we do not have a function. 2744 // Goto miss case if we do not have a function.
2749 __ GetObjectType(a1, a4, a4); 2745 __ GetObjectType(a1, a4, a4);
2750 __ Branch(&miss, ne, a4, Operand(JS_FUNCTION_TYPE)); 2746 __ Branch(&miss, ne, a4, Operand(JS_FUNCTION_TYPE));
2751 2747
2752 // Make sure the function is not the Array() function, which requires special 2748 // Make sure the function is not the Array() function, which requires special
2753 // behavior on MISS. 2749 // behavior on MISS.
2754 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4); 2750 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a4);
2755 __ Branch(&miss, eq, a1, Operand(a4)); 2751 __ Branch(&miss, eq, a1, Operand(a4));
2756 2752
2757 // Make sure the function belongs to the same native context (which implies 2753 // Make sure the function belongs to the same native context.
2758 // the same global object).
2759 __ ld(t0, FieldMemOperand(a1, JSFunction::kContextOffset)); 2754 __ ld(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
2760 __ ld(t0, ContextOperand(t0, Context::GLOBAL_OBJECT_INDEX)); 2755 __ ld(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX));
2761 __ ld(t1, GlobalObjectOperand()); 2756 __ ld(t1, NativeContextMemOperand());
2762 __ Branch(&miss, ne, t0, Operand(t1)); 2757 __ Branch(&miss, ne, t0, Operand(t1));
2763 2758
2764 // Update stats. 2759 // Update stats.
2765 __ ld(a4, FieldMemOperand(a2, with_types_offset)); 2760 __ ld(a4, FieldMemOperand(a2, with_types_offset));
2766 __ Daddu(a4, a4, Operand(Smi::FromInt(1))); 2761 __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
2767 __ sd(a4, FieldMemOperand(a2, with_types_offset)); 2762 __ sd(a4, FieldMemOperand(a2, with_types_offset));
2768 2763
2769 // Initialize the call counter. 2764 // Initialize the call counter.
2770 __ dsrl(at, a3, 32 - kPointerSizeLog2); 2765 __ dsrl(at, a3, 32 - kPointerSizeLog2);
2771 __ Daddu(at, a2, Operand(at)); 2766 __ Daddu(at, a2, Operand(at));
(...skipping 2377 matching lines...) Expand 10 before | Expand all | Expand 10 after
5149 5144
5150 5145
5151 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) { 5146 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
5152 Register context_reg = cp; 5147 Register context_reg = cp;
5153 Register slot_reg = a2; 5148 Register slot_reg = a2;
5154 Register result_reg = v0; 5149 Register result_reg = v0;
5155 Label slow_case; 5150 Label slow_case;
5156 5151
5157 // Go up context chain to the script context. 5152 // Go up context chain to the script context.
5158 for (int i = 0; i < depth(); ++i) { 5153 for (int i = 0; i < depth(); ++i) {
5159 __ ld(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX)); 5154 __ ld(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
5160 context_reg = result_reg; 5155 context_reg = result_reg;
5161 } 5156 }
5162 5157
5163 // Load the PropertyCell value at the specified slot. 5158 // Load the PropertyCell value at the specified slot.
5164 __ dsll(at, slot_reg, kPointerSizeLog2); 5159 __ dsll(at, slot_reg, kPointerSizeLog2);
5165 __ Daddu(at, at, Operand(context_reg)); 5160 __ Daddu(at, at, Operand(context_reg));
5166 __ ld(result_reg, ContextOperand(at, 0)); 5161 __ ld(result_reg, ContextMemOperand(at, 0));
5167 __ ld(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset)); 5162 __ ld(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
5168 5163
5169 // Check that value is not the_hole. 5164 // Check that value is not the_hole.
5170 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 5165 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5171 __ Branch(&slow_case, eq, result_reg, Operand(at)); 5166 __ Branch(&slow_case, eq, result_reg, Operand(at));
5172 __ Ret(); 5167 __ Ret();
5173 5168
5174 // Fallback to the runtime. 5169 // Fallback to the runtime.
5175 __ bind(&slow_case); 5170 __ bind(&slow_case);
5176 __ SmiTag(slot_reg); 5171 __ SmiTag(slot_reg);
(...skipping 11 matching lines...) Expand all
5188 Register cell_details_reg = a6; 5183 Register cell_details_reg = a6;
5189 Label fast_heapobject_case, fast_smi_case, slow_case; 5184 Label fast_heapobject_case, fast_smi_case, slow_case;
5190 5185
5191 if (FLAG_debug_code) { 5186 if (FLAG_debug_code) {
5192 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 5187 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5193 __ Check(ne, kUnexpectedValue, value_reg, Operand(at)); 5188 __ Check(ne, kUnexpectedValue, value_reg, Operand(at));
5194 } 5189 }
5195 5190
5196 // Go up context chain to the script context. 5191 // Go up context chain to the script context.
5197 for (int i = 0; i < depth(); ++i) { 5192 for (int i = 0; i < depth(); ++i) {
5198 __ ld(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX)); 5193 __ ld(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
5199 context_reg = cell_reg; 5194 context_reg = cell_reg;
5200 } 5195 }
5201 5196
5202 // Load the PropertyCell at the specified slot. 5197 // Load the PropertyCell at the specified slot.
5203 __ dsll(at, slot_reg, kPointerSizeLog2); 5198 __ dsll(at, slot_reg, kPointerSizeLog2);
5204 __ Daddu(at, at, Operand(context_reg)); 5199 __ Daddu(at, at, Operand(context_reg));
5205 __ ld(cell_reg, ContextOperand(at, 0)); 5200 __ ld(cell_reg, ContextMemOperand(at, 0));
5206 5201
5207 // Load PropertyDetails for the cell (actually only the cell_type and kind). 5202 // Load PropertyDetails for the cell (actually only the cell_type and kind).
5208 __ ld(cell_details_reg, 5203 __ ld(cell_details_reg,
5209 FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset)); 5204 FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset));
5210 __ SmiUntag(cell_details_reg); 5205 __ SmiUntag(cell_details_reg);
5211 __ And(cell_details_reg, cell_details_reg, 5206 __ And(cell_details_reg, cell_details_reg,
5212 PropertyDetails::PropertyCellTypeField::kMask | 5207 PropertyDetails::PropertyCellTypeField::kMask |
5213 PropertyDetails::KindField::kMask | 5208 PropertyDetails::KindField::kMask |
5214 PropertyDetails::kAttributesReadOnlyMask); 5209 PropertyDetails::kAttributesReadOnlyMask);
5215 5210
(...skipping 385 matching lines...) Expand 10 before | Expand all | Expand 10 after
5601 MemOperand(fp, 6 * kPointerSize), NULL); 5596 MemOperand(fp, 6 * kPointerSize), NULL);
5602 } 5597 }
5603 5598
5604 5599
5605 #undef __ 5600 #undef __
5606 5601
5607 } // namespace internal 5602 } // namespace internal
5608 } // namespace v8 5603 } // namespace v8
5609 5604
5610 #endif // V8_TARGET_ARCH_MIPS64 5605 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW
« no previous file with comments | « src/mips64/builtins-mips64.cc ('k') | src/mips64/macro-assembler-mips64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698