Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(948)

Side by Side Diff: src/compiler/arm64/code-generator-arm64.cc

Issue 1494123002: [turbofan, arm64] Fix native stack parameters on arm64. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/code-generator.h" 5 #include "src/compiler/code-generator.h"
6 6
7 #include "src/arm64/frames-arm64.h" 7 #include "src/arm64/frames-arm64.h"
8 #include "src/arm64/macro-assembler-arm64.h" 8 #include "src/arm64/macro-assembler-arm64.h"
9 #include "src/ast/scopes.h" 9 #include "src/ast/scopes.h"
10 #include "src/compiler/code-generator-impl.h" 10 #include "src/compiler/code-generator-impl.h"
(...skipping 859 matching lines...) Expand 10 before | Expand all | Expand 10 after
870 case kArm64CompareAndBranch32: 870 case kArm64CompareAndBranch32:
871 // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch. 871 // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
872 break; 872 break;
873 case kArm64ClaimForCallArguments: { 873 case kArm64ClaimForCallArguments: {
874 __ Claim(i.InputInt32(0)); 874 __ Claim(i.InputInt32(0));
875 frame_access_state()->IncreaseSPDelta(i.InputInt32(0)); 875 frame_access_state()->IncreaseSPDelta(i.InputInt32(0));
876 break; 876 break;
877 } 877 }
878 case kArm64Poke: { 878 case kArm64Poke: {
879 Operand operand(i.InputInt32(1) * kPointerSize); 879 Operand operand(i.InputInt32(1) * kPointerSize);
880 __ Poke(i.InputRegister(0), operand); 880 if (instr->InputAt(0)->IsDoubleRegister()) {
881 __ Poke(i.InputFloat64Register(0), operand);
882 } else {
883 __ Poke(i.InputRegister(0), operand);
884 }
881 break; 885 break;
882 } 886 }
883 case kArm64PokePair: { 887 case kArm64PokePair: {
884 int slot = i.InputInt32(2) - 1; 888 int slot = i.InputInt32(2) - 1;
885 __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize); 889 if (instr->InputAt(0)->IsDoubleRegister()) {
890 __ PokePair(i.InputFloat64Register(1), i.InputFloat64Register(0),
jbramley 2015/12/03 11:22:34 Consider: DCHECK(instr->InputAt(1)->IsDoubleRegist
ahaas 2015/12/03 15:25:06 I didn't add this DCHECK because i.InputFloat64Reg
891 slot * kPointerSize);
892 } else {
893 __ PokePair(i.InputRegister(1), i.InputRegister(0),
894 slot * kPointerSize);
895 }
886 break; 896 break;
887 } 897 }
888 case kArm64Clz: 898 case kArm64Clz:
889 __ Clz(i.OutputRegister64(), i.InputRegister64(0)); 899 __ Clz(i.OutputRegister64(), i.InputRegister64(0));
890 break; 900 break;
891 case kArm64Clz32: 901 case kArm64Clz32:
892 __ Clz(i.OutputRegister32(), i.InputRegister32(0)); 902 __ Clz(i.OutputRegister32(), i.InputRegister32(0));
893 break; 903 break;
894 case kArm64Cmp: 904 case kArm64Cmp:
895 __ Cmp(i.InputOrZeroRegister64(0), i.InputOperand(1)); 905 __ Cmp(i.InputOrZeroRegister64(0), i.InputOperand(1));
(...skipping 389 matching lines...) Expand 10 before | Expand all | Expand 10 after
1285 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); 1295 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1286 if (descriptor->IsCFunctionCall()) { 1296 if (descriptor->IsCFunctionCall()) {
1287 __ SetStackPointer(csp); 1297 __ SetStackPointer(csp);
1288 __ Push(lr, fp); 1298 __ Push(lr, fp);
1289 __ Mov(fp, csp); 1299 __ Mov(fp, csp);
1290 } else if (descriptor->IsJSFunctionCall()) { 1300 } else if (descriptor->IsJSFunctionCall()) {
1291 CompilationInfo* info = this->info(); 1301 CompilationInfo* info = this->info();
1292 __ SetStackPointer(jssp); 1302 __ SetStackPointer(jssp);
1293 __ Prologue(info->IsCodePreAgingActive()); 1303 __ Prologue(info->IsCodePreAgingActive());
1294 } else if (frame()->needs_frame()) { 1304 } else if (frame()->needs_frame()) {
1295 __ SetStackPointer(jssp); 1305 if (descriptor->UseNativeStack()) {
1306 __ SetStackPointer(csp);
1307 } else {
1308 __ SetStackPointer(jssp);
1309 }
1296 __ StubPrologue(); 1310 __ StubPrologue();
1297 } else { 1311 } else {
1312 if (descriptor->UseNativeStack()) {
1313 __ SetStackPointer(csp);
jbramley 2015/12/03 11:22:34 Is it jssp by default? (I can't remember.) If it i
titzer 2015/12/03 11:31:59 Actually, I think it's best to be explicit in both
ahaas 2015/12/03 15:25:06 Done, I made it explicit in both cases.
1314 }
1298 frame()->SetElidedFrameSizeInSlots(0); 1315 frame()->SetElidedFrameSizeInSlots(0);
1299 } 1316 }
1300 frame_access_state()->SetFrameAccessToDefault(); 1317 frame_access_state()->SetFrameAccessToDefault();
1301 1318
1302 int stack_shrink_slots = frame()->GetSpillSlotCount(); 1319 int stack_shrink_slots = frame()->GetSpillSlotCount();
1303 if (info()->is_osr()) { 1320 if (info()->is_osr()) {
1304 // TurboFan OSR-compiled functions cannot be entered directly. 1321 // TurboFan OSR-compiled functions cannot be entered directly.
1305 __ Abort(kShouldNotDirectlyEnterOsrFunction); 1322 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1306 1323
1307 // Unoptimized code jumps directly to this entrypoint while the unoptimized 1324 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1308 // frame is still on the stack. Optimized code uses OSR values directly from 1325 // frame is still on the stack. Optimized code uses OSR values directly from
1309 // the unoptimized frame. Thus, all that needs to be done is to allocate the 1326 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1310 // remaining stack slots. 1327 // remaining stack slots.
1311 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --"); 1328 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1312 osr_pc_offset_ = __ pc_offset(); 1329 osr_pc_offset_ = __ pc_offset();
1313 // TODO(titzer): cannot address target function == local #-1 1330 // TODO(titzer): cannot address target function == local #-1
1314 __ ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); 1331 __ ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1315 stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots(); 1332 stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
1316 } 1333 }
1317 1334
1318 if (csp.Is(masm()->StackPointer())) { 1335 if (csp.Is(masm()->StackPointer()) && !descriptor->UseNativeStack()) {
jbramley 2015/12/03 11:22:34 Why not for the UseNativeStack() case? In particu
ahaas 2015/12/03 15:25:06 The additional condition is required because Align
1319 // The system stack pointer requires 16-byte alignment at function call 1336 // The system stack pointer requires 16-byte alignment at function call
1320 // boundaries. 1337 // boundaries.
1338
1321 stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots(); 1339 stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
1322 } 1340 }
1323 __ Claim(stack_shrink_slots); 1341 __ Claim(stack_shrink_slots);
1324 1342
1325 // Save FP registers. 1343 // Save FP registers.
1326 CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 1344 CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
1327 descriptor->CalleeSavedFPRegisters()); 1345 descriptor->CalleeSavedFPRegisters());
1328 int saved_count = saves_fp.Count(); 1346 int saved_count = saves_fp.Count();
1329 if (saved_count != 0) { 1347 if (saved_count != 0) {
1330 DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list()); 1348 DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
1367 if (descriptor->IsCFunctionCall()) { 1385 if (descriptor->IsCFunctionCall()) {
1368 __ Mov(csp, fp); 1386 __ Mov(csp, fp);
1369 __ Pop(fp, lr); 1387 __ Pop(fp, lr);
1370 } else if (frame()->needs_frame()) { 1388 } else if (frame()->needs_frame()) {
1371 // Canonicalize JSFunction return sites for now. 1389 // Canonicalize JSFunction return sites for now.
1372 if (return_label_.is_bound()) { 1390 if (return_label_.is_bound()) {
1373 __ B(&return_label_); 1391 __ B(&return_label_);
1374 return; 1392 return;
1375 } else { 1393 } else {
1376 __ Bind(&return_label_); 1394 __ Bind(&return_label_);
1377 __ Mov(jssp, fp); 1395 if (descriptor->UseNativeStack()) {
1396 __ Mov(csp, fp);
1397 } else {
1398 __ Mov(jssp, fp);
1399 }
1378 __ Pop(fp, lr); 1400 __ Pop(fp, lr);
1379 } 1401 }
1402 } else if (descriptor->UseNativeStack()) {
1403 bool pop_count_uneven = pop_count & 1;
1404 if (pop_count_uneven) {
1405 pop_count++;
1406 }
jbramley 2015/12/03 11:22:34 Why not just "pop_count += (pop_count & 1)"?
ahaas 2015/12/03 15:25:06 Done.
1380 } 1407 }
1381 __ Drop(pop_count); 1408 __ Drop(pop_count);
1382 __ Ret(); 1409 __ Ret();
1383 } 1410 }
1384 1411
1385 1412
1386 void CodeGenerator::AssembleMove(InstructionOperand* source, 1413 void CodeGenerator::AssembleMove(InstructionOperand* source,
1387 InstructionOperand* destination) { 1414 InstructionOperand* destination) {
1388 Arm64OperandConverter g(this, NULL); 1415 Arm64OperandConverter g(this, NULL);
1389 // Dispatch on the source and destination operand kinds. Not all 1416 // Dispatch on the source and destination operand kinds. Not all
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after
1565 padding_size -= kInstructionSize; 1592 padding_size -= kInstructionSize;
1566 } 1593 }
1567 } 1594 }
1568 } 1595 }
1569 1596
1570 #undef __ 1597 #undef __
1571 1598
1572 } // namespace compiler 1599 } // namespace compiler
1573 } // namespace internal 1600 } // namespace internal
1574 } // namespace v8 1601 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698