Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(243)

Side by Side Diff: src/arm64/full-codegen-arm64.cc

Issue 366083004: ARM64: optimize fullcodegen pushes (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #if V8_TARGET_ARCH_ARM64 7 #if V8_TARGET_ARCH_ARM64
8 8
9 #include "src/code-stubs.h" 9 #include "src/code-stubs.h"
10 #include "src/codegen.h" 10 #include "src/codegen.h"
(...skipping 1147 matching lines...) Expand 10 before | Expand all | Expand 10 after
1158 1158
1159 __ EnumLengthUntagged(x1, x0); 1159 __ EnumLengthUntagged(x1, x0);
1160 __ Cbz(x1, &no_descriptors); 1160 __ Cbz(x1, &no_descriptors);
1161 1161
1162 __ LoadInstanceDescriptors(x0, x2); 1162 __ LoadInstanceDescriptors(x0, x2);
1163 __ Ldr(x2, FieldMemOperand(x2, DescriptorArray::kEnumCacheOffset)); 1163 __ Ldr(x2, FieldMemOperand(x2, DescriptorArray::kEnumCacheOffset));
1164 __ Ldr(x2, 1164 __ Ldr(x2,
1165 FieldMemOperand(x2, DescriptorArray::kEnumCacheBridgeCacheOffset)); 1165 FieldMemOperand(x2, DescriptorArray::kEnumCacheBridgeCacheOffset));
1166 1166
1167 // Set up the four remaining stack slots. 1167 // Set up the four remaining stack slots.
1168 __ Push(x0, x2); // Map, enumeration cache. 1168 __ SmiTag(x1);
1169 __ SmiTagAndPush(x1, xzr); // Enum cache length, zero (both as smis). 1169 // Map, enumeration cache, enum cache length, zero (both last as smis).
1170 __ Push(x0, x2, x1, xzr);
1170 __ B(&loop); 1171 __ B(&loop);
1171 1172
1172 __ Bind(&no_descriptors); 1173 __ Bind(&no_descriptors);
1173 __ Drop(1); 1174 __ Drop(1);
1174 __ B(&exit); 1175 __ B(&exit);
1175 1176
1176 // We got a fixed array in register x0. Iterate through that. 1177 // We got a fixed array in register x0. Iterate through that.
1177 __ Bind(&fixed_array); 1178 __ Bind(&fixed_array);
1178 1179
1179 __ LoadObject(x1, FeedbackVector()); 1180 __ LoadObject(x1, FeedbackVector());
1180 __ Mov(x10, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate()))); 1181 __ Mov(x10, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
1181 __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot))); 1182 __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot)));
1182 1183
1183 __ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check. 1184 __ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check.
1184 __ Peek(x10, 0); // Get enumerated object. 1185 __ Peek(x10, 0); // Get enumerated object.
1185 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); 1186 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
1186 // TODO(all): similar check was done already. Can we avoid it here? 1187 // TODO(all): similar check was done already. Can we avoid it here?
1187 __ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE); 1188 __ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE);
1188 ASSERT(Smi::FromInt(0) == 0); 1189 ASSERT(Smi::FromInt(0) == 0);
1189 __ CzeroX(x1, le); // Zero indicates proxy. 1190 __ CzeroX(x1, le); // Zero indicates proxy.
1190 __ Push(x1, x0); // Smi and array 1191 __ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset));
1191 __ Ldr(x1, FieldMemOperand(x0, FixedArray::kLengthOffset)); 1192 // Smi and array, fixed array length (as smi) and initial index.
1192 __ Push(x1, xzr); // Fixed array length (as smi) and initial index. 1193 __ Push(x1, x0, x2, xzr);
1193 1194
1194 // Generate code for doing the condition check. 1195 // Generate code for doing the condition check.
1195 PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS); 1196 PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
1196 __ Bind(&loop); 1197 __ Bind(&loop);
1197 // Load the current count to x0, load the length to x1. 1198 // Load the current count to x0, load the length to x1.
1198 __ PeekPair(x0, x1, 0); 1199 __ PeekPair(x0, x1, 0);
1199 __ Cmp(x0, x1); // Compare to the array length. 1200 __ Cmp(x0, x1); // Compare to the array length.
1200 __ B(hs, loop_statement.break_label()); 1201 __ B(hs, loop_statement.break_label());
1201 1202
1202 // Get the current entry of the array into register r3. 1203 // Get the current entry of the array into register r3.
(...skipping 592 matching lines...) Expand 10 before | Expand all | Expand 10 after
1795 1796
1796 // Emit code to evaluate all the non-constant subexpressions and to store 1797 // Emit code to evaluate all the non-constant subexpressions and to store
1797 // them into the newly cloned array. 1798 // them into the newly cloned array.
1798 for (int i = 0; i < length; i++) { 1799 for (int i = 0; i < length; i++) {
1799 Expression* subexpr = subexprs->at(i); 1800 Expression* subexpr = subexprs->at(i);
1800 // If the subexpression is a literal or a simple materialized literal it 1801 // If the subexpression is a literal or a simple materialized literal it
1801 // is already set in the cloned array. 1802 // is already set in the cloned array.
1802 if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue; 1803 if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
1803 1804
1804 if (!result_saved) { 1805 if (!result_saved) {
1805 __ Push(x0); 1806 __ Mov(x1, Smi::FromInt(expr->literal_index()));
1806 __ Push(Smi::FromInt(expr->literal_index())); 1807 __ Push(x0, x1);
1807 result_saved = true; 1808 result_saved = true;
1808 } 1809 }
1809 VisitForAccumulatorValue(subexpr); 1810 VisitForAccumulatorValue(subexpr);
1810 1811
1811 if (IsFastObjectElementsKind(constant_elements_kind)) { 1812 if (IsFastObjectElementsKind(constant_elements_kind)) {
1812 int offset = FixedArray::kHeaderSize + (i * kPointerSize); 1813 int offset = FixedArray::kHeaderSize + (i * kPointerSize);
1813 __ Peek(x6, kPointerSize); // Copy of array literal. 1814 __ Peek(x6, kPointerSize); // Copy of array literal.
1814 __ Ldr(x1, FieldMemOperand(x6, JSObject::kElementsOffset)); 1815 __ Ldr(x1, FieldMemOperand(x6, JSObject::kElementsOffset));
1815 __ Str(result_register(), FieldMemOperand(x1, offset)); 1816 __ Str(result_register(), FieldMemOperand(x1, offset));
1816 // Update the write barrier for the array store. 1817 // Update the write barrier for the array store.
(...skipping 340 matching lines...) Expand 10 before | Expand all | Expand 10 after
2157 if (var->IsUnallocated()) { 2158 if (var->IsUnallocated()) {
2158 // Global var, const, or let. 2159 // Global var, const, or let.
2159 __ Mov(x2, Operand(var->name())); 2160 __ Mov(x2, Operand(var->name()));
2160 __ Ldr(x1, GlobalObjectMemOperand()); 2161 __ Ldr(x1, GlobalObjectMemOperand());
2161 CallStoreIC(); 2162 CallStoreIC();
2162 2163
2163 } else if (op == Token::INIT_CONST_LEGACY) { 2164 } else if (op == Token::INIT_CONST_LEGACY) {
2164 // Const initializers need a write barrier. 2165 // Const initializers need a write barrier.
2165 ASSERT(!var->IsParameter()); // No const parameters. 2166 ASSERT(!var->IsParameter()); // No const parameters.
2166 if (var->IsLookupSlot()) { 2167 if (var->IsLookupSlot()) {
2167 __ Push(x0); 2168 __ Mov(x1, Operand(var->name()));
2168 __ Mov(x0, Operand(var->name())); 2169 __ Push(x0, cp, x1);
2169 __ Push(cp, x0); // Context and name.
2170 __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); 2170 __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
2171 } else { 2171 } else {
2172 ASSERT(var->IsStackLocal() || var->IsContextSlot()); 2172 ASSERT(var->IsStackLocal() || var->IsContextSlot());
2173 Label skip; 2173 Label skip;
2174 MemOperand location = VarOperand(var, x1); 2174 MemOperand location = VarOperand(var, x1);
2175 __ Ldr(x10, location); 2175 __ Ldr(x10, location);
2176 __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &skip); 2176 __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &skip);
2177 EmitStoreToStackLocalOrContextSlot(var, location); 2177 EmitStoreToStackLocalOrContextSlot(var, location);
2178 __ Bind(&skip); 2178 __ Bind(&skip);
2179 } 2179 }
(...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after
2373 if (arg_count > 0) { 2373 if (arg_count > 0) {
2374 __ Peek(x10, arg_count * kXRegSize); 2374 __ Peek(x10, arg_count * kXRegSize);
2375 } else { 2375 } else {
2376 __ LoadRoot(x10, Heap::kUndefinedValueRootIndex); 2376 __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
2377 } 2377 }
2378 2378
2379 // Prepare to push the receiver of the enclosing function. 2379 // Prepare to push the receiver of the enclosing function.
2380 int receiver_offset = 2 + info_->scope()->num_parameters(); 2380 int receiver_offset = 2 + info_->scope()->num_parameters();
2381 __ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize)); 2381 __ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize));
2382 2382
2383 // Push.
2384 __ Push(x10, x11);
2385
2386 // Prepare to push the language mode. 2383 // Prepare to push the language mode.
2387 __ Mov(x10, Smi::FromInt(strict_mode())); 2384 __ Mov(x12, Smi::FromInt(strict_mode()));
2388 // Prepare to push the start position of the scope the calls resides in. 2385 // Prepare to push the start position of the scope the calls resides in.
2389 __ Mov(x11, Smi::FromInt(scope()->start_position())); 2386 __ Mov(x13, Smi::FromInt(scope()->start_position()));
2390 2387
2391 // Push. 2388 // Push.
2392 __ Push(x10, x11); 2389 __ Push(x10, x11, x12, x13);
2393 2390
2394 // Do the runtime call. 2391 // Do the runtime call.
2395 __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5); 2392 __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
2396 } 2393 }
2397 2394
2398 2395
2399 void FullCodeGenerator::VisitCall(Call* expr) { 2396 void FullCodeGenerator::VisitCall(Call* expr) {
2400 #ifdef DEBUG 2397 #ifdef DEBUG
2401 // We want to verify that RecordJSReturnSite gets called on all paths 2398 // We want to verify that RecordJSReturnSite gets called on all paths
2402 // through this function. Avoid early returns. 2399 // through this function. Avoid early returns.
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
2459 2456
2460 { PreservePositionScope scope(masm()->positions_recorder()); 2457 { PreservePositionScope scope(masm()->positions_recorder());
2461 // Generate code for loading from variables potentially shadowed 2458 // Generate code for loading from variables potentially shadowed
2462 // by eval-introduced variables. 2459 // by eval-introduced variables.
2463 EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done); 2460 EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
2464 } 2461 }
2465 2462
2466 __ Bind(&slow); 2463 __ Bind(&slow);
2467 // Call the runtime to find the function to call (returned in x0) 2464 // Call the runtime to find the function to call (returned in x0)
2468 // and the object holding it (returned in x1). 2465 // and the object holding it (returned in x1).
2469 __ Push(context_register());
2470 __ Mov(x10, Operand(proxy->name())); 2466 __ Mov(x10, Operand(proxy->name()));
2471 __ Push(x10); 2467 __ Push(context_register(), x10);
2472 __ CallRuntime(Runtime::kLoadContextSlot, 2); 2468 __ CallRuntime(Runtime::kLoadContextSlot, 2);
2473 __ Push(x0, x1); // Receiver, function. 2469 __ Push(x0, x1); // Receiver, function.
2474 2470
2475 // If fast case code has been generated, emit code to push the 2471 // If fast case code has been generated, emit code to push the
2476 // function and receiver and have the slow path jump around this 2472 // function and receiver and have the slow path jump around this
2477 // code. 2473 // code.
2478 if (done.is_linked()) { 2474 if (done.is_linked()) {
2479 Label call; 2475 Label call;
2480 __ B(&call); 2476 __ B(&call);
2481 __ Bind(&done); 2477 __ Bind(&done);
2482 // Push function. 2478 // Push function.
2483 __ Push(x0);
2484 // The receiver is implicitly the global receiver. Indicate this 2479 // The receiver is implicitly the global receiver. Indicate this
2485 // by passing the undefined to the call function stub. 2480 // by passing the undefined to the call function stub.
2486 __ LoadRoot(x1, Heap::kUndefinedValueRootIndex); 2481 __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
2487 __ Push(x1); 2482 __ Push(x0, x1);
2488 __ Bind(&call); 2483 __ Bind(&call);
2489 } 2484 }
2490 2485
2491 // The receiver is either the global receiver or an object found 2486 // The receiver is either the global receiver or an object found
2492 // by LoadContextSlot. 2487 // by LoadContextSlot.
2493 EmitCall(expr); 2488 EmitCall(expr);
2494 } else if (call_type == Call::PROPERTY_CALL) { 2489 } else if (call_type == Call::PROPERTY_CALL) {
2495 Property* property = callee->AsProperty(); 2490 Property* property = callee->AsProperty();
2496 { PreservePositionScope scope(masm()->positions_recorder()); 2491 { PreservePositionScope scope(masm()->positions_recorder());
2497 VisitForStackValue(property->obj()); 2492 VisitForStackValue(property->obj());
(...skipping 2396 matching lines...) Expand 10 before | Expand all | Expand 10 after
4894 return previous_; 4889 return previous_;
4895 } 4890 }
4896 4891
4897 4892
4898 #undef __ 4893 #undef __
4899 4894
4900 4895
4901 } } // namespace v8::internal 4896 } } // namespace v8::internal
4902 4897
4903 #endif // V8_TARGET_ARCH_ARM64 4898 #endif // V8_TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698