| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/code-generator.h" | 5 #include "src/compiler/code-generator.h" |
| 6 | 6 |
| 7 #include "src/arm64/macro-assembler-arm64.h" | 7 #include "src/arm64/macro-assembler-arm64.h" |
| 8 #include "src/compiler/code-generator-impl.h" | 8 #include "src/compiler/code-generator-impl.h" |
| 9 #include "src/compiler/gap-resolver.h" | 9 #include "src/compiler/gap-resolver.h" |
| 10 #include "src/compiler/node-matchers.h" | 10 #include "src/compiler/node-matchers.h" |
| 11 #include "src/compiler/osr.h" | |
| 12 #include "src/scopes.h" | 11 #include "src/scopes.h" |
| 13 | 12 |
| 14 namespace v8 { | 13 namespace v8 { |
| 15 namespace internal { | 14 namespace internal { |
| 16 namespace compiler { | 15 namespace compiler { |
| 17 | 16 |
| 18 #define __ masm()-> | 17 #define __ masm()-> |
| 19 | 18 |
| 20 | 19 |
| 21 // Adds Arm64-specific methods to convert InstructionOperands. | 20 // Adds Arm64-specific methods to convert InstructionOperands. |
| (...skipping 871 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 893 if (info()->is_osr()) { | 892 if (info()->is_osr()) { |
| 894 // TurboFan OSR-compiled functions cannot be entered directly. | 893 // TurboFan OSR-compiled functions cannot be entered directly. |
| 895 __ Abort(kShouldNotDirectlyEnterOsrFunction); | 894 __ Abort(kShouldNotDirectlyEnterOsrFunction); |
| 896 | 895 |
| 897 // Unoptimized code jumps directly to this entrypoint while the unoptimized | 896 // Unoptimized code jumps directly to this entrypoint while the unoptimized |
| 898 // frame is still on the stack. Optimized code uses OSR values directly from | 897 // frame is still on the stack. Optimized code uses OSR values directly from |
| 899 // the unoptimized frame. Thus, all that needs to be done is to allocate the | 898 // the unoptimized frame. Thus, all that needs to be done is to allocate the |
| 900 // remaining stack slots. | 899 // remaining stack slots. |
| 901 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --"); | 900 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --"); |
| 902 osr_pc_offset_ = __ pc_offset(); | 901 osr_pc_offset_ = __ pc_offset(); |
| 903 int unoptimized_slots = | 902 DCHECK(stack_slots >= frame()->GetOsrStackSlotCount()); |
| 904 static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots()); | 903 stack_slots -= frame()->GetOsrStackSlotCount(); |
| 905 DCHECK(stack_slots >= unoptimized_slots); | |
| 906 stack_slots -= unoptimized_slots; | |
| 907 } | 904 } |
| 908 | 905 |
| 909 if (stack_slots > 0) { | 906 if (stack_slots > 0) { |
| 910 Register sp = __ StackPointer(); | 907 Register sp = __ StackPointer(); |
| 911 if (!sp.Is(csp)) { | 908 if (!sp.Is(csp)) { |
| 912 __ Sub(sp, sp, stack_slots * kPointerSize); | 909 __ Sub(sp, sp, stack_slots * kPointerSize); |
| 913 } | 910 } |
| 914 __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize); | 911 __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize); |
| 915 } | 912 } |
| 916 } | 913 } |
| (...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1110 } | 1107 } |
| 1111 } | 1108 } |
| 1112 MarkLazyDeoptSite(); | 1109 MarkLazyDeoptSite(); |
| 1113 } | 1110 } |
| 1114 | 1111 |
| 1115 #undef __ | 1112 #undef __ |
| 1116 | 1113 |
| 1117 } // namespace compiler | 1114 } // namespace compiler |
| 1118 } // namespace internal | 1115 } // namespace internal |
| 1119 } // namespace v8 | 1116 } // namespace v8 |
| OLD | NEW |