Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(61)

Side by Side Diff: src/ppc/deoptimizer-ppc.cc

Issue 422063005: Contribution of PowerPC port. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Copyright 2011 the V8 project authors. All rights reserved. 2 //
3 // Copyright IBM Corp. 2012, 2013. All rights reserved.
4 //
3 // Use of this source code is governed by a BSD-style license that can be 5 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file. 6 // found in the LICENSE file.
5 7
6 #include "src/v8.h" 8 #include "src/v8.h"
7 9
8 #include "src/codegen.h" 10 #include "src/codegen.h"
9 #include "src/deoptimizer.h" 11 #include "src/deoptimizer.h"
10 #include "src/full-codegen.h" 12 #include "src/full-codegen.h"
11 #include "src/safepoint-table.h" 13 #include "src/safepoint-table.h"
12 14
13 namespace v8 { 15 namespace v8 {
14 namespace internal { 16 namespace internal {
15 17
18 const int Deoptimizer::table_entry_size_ = 8;
19
16 20
17 int Deoptimizer::patch_size() { 21 int Deoptimizer::patch_size() {
22 #if V8_TARGET_ARCH_PPC64
23 const int kCallInstructionSizeInWords = 7;
24 #else
18 const int kCallInstructionSizeInWords = 4; 25 const int kCallInstructionSizeInWords = 4;
26 #endif
19 return kCallInstructionSizeInWords * Assembler::kInstrSize; 27 return kCallInstructionSizeInWords * Assembler::kInstrSize;
20 } 28 }
21 29
22 30
23 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { 31 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
24 Address code_start_address = code->instruction_start(); 32 Address code_start_address = code->instruction_start();
33
25 // Invalidate the relocation information, as it will become invalid by the 34 // Invalidate the relocation information, as it will become invalid by the
26 // code patching below, and is not needed any more. 35 // code patching below, and is not needed any more.
27 code->InvalidateRelocation(); 36 code->InvalidateRelocation();
28 37
29 if (FLAG_zap_code_space) { 38 if (FLAG_zap_code_space) {
30 // Fail hard and early if we enter this code object again. 39 // Fail hard and early if we enter this code object again.
31 byte* pointer = code->FindCodeAgeSequence(); 40 byte* pointer = code->FindCodeAgeSequence();
32 if (pointer != NULL) { 41 if (pointer != NULL) {
33 pointer += kNoCodeAgeSequenceLength; 42 pointer += kNoCodeAgeSequenceLength;
34 } else { 43 } else {
35 pointer = code->instruction_start(); 44 pointer = code->instruction_start();
36 } 45 }
37 CodePatcher patcher(pointer, 1); 46 CodePatcher patcher(pointer, 1);
38 patcher.masm()->break_(0xCC); 47 patcher.masm()->bkpt(0);
39 48
40 DeoptimizationInputData* data = 49 DeoptimizationInputData* data =
41 DeoptimizationInputData::cast(code->deoptimization_data()); 50 DeoptimizationInputData::cast(code->deoptimization_data());
42 int osr_offset = data->OsrPcOffset()->value(); 51 int osr_offset = data->OsrPcOffset()->value();
43 if (osr_offset > 0) { 52 if (osr_offset > 0) {
44 CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1); 53 CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
45 osr_patcher.masm()->break_(0xCC); 54 osr_patcher.masm()->bkpt(0);
46 } 55 }
47 } 56 }
48 57
49 DeoptimizationInputData* deopt_data = 58 DeoptimizationInputData* deopt_data =
50 DeoptimizationInputData::cast(code->deoptimization_data()); 59 DeoptimizationInputData::cast(code->deoptimization_data());
51 SharedFunctionInfo* shared = 60 SharedFunctionInfo* shared =
52 SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()); 61 SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
53 shared->EvictFromOptimizedCodeMap(code, "deoptimized code"); 62 shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
54 #ifdef DEBUG 63 #ifdef DEBUG
55 Address prev_call_address = NULL; 64 Address prev_call_address = NULL;
56 #endif 65 #endif
57 // For each LLazyBailout instruction insert a call to the corresponding 66 // For each LLazyBailout instruction insert a call to the corresponding
58 // deoptimization entry. 67 // deoptimization entry.
59 for (int i = 0; i < deopt_data->DeoptCount(); i++) { 68 for (int i = 0; i < deopt_data->DeoptCount(); i++) {
60 if (deopt_data->Pc(i)->value() == -1) continue; 69 if (deopt_data->Pc(i)->value() == -1) continue;
61 Address call_address = code_start_address + deopt_data->Pc(i)->value(); 70 Address call_address = code_start_address + deopt_data->Pc(i)->value();
62 Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); 71 Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
63 int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry, 72 // We need calls to have a predictable size in the unoptimized code, but
64 RelocInfo::NONE32); 73 // this is optimized code, so we don't have to have a predictable size.
74 int call_size_in_bytes =
75 MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry,
76 kRelocInfo_NONEPTR);
65 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; 77 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
66 ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0); 78 ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
67 ASSERT(call_size_in_bytes <= patch_size()); 79 ASSERT(call_size_in_bytes <= patch_size());
68 CodePatcher patcher(call_address, call_size_in_words); 80 CodePatcher patcher(call_address, call_size_in_words);
69 patcher.masm()->Call(deopt_entry, RelocInfo::NONE32); 81 patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR);
70 ASSERT(prev_call_address == NULL || 82 ASSERT(prev_call_address == NULL ||
71 call_address >= prev_call_address + patch_size()); 83 call_address >= prev_call_address + patch_size());
72 ASSERT(call_address + patch_size() <= code->instruction_end()); 84 ASSERT(call_address + patch_size() <= code->instruction_end());
73
74 #ifdef DEBUG 85 #ifdef DEBUG
75 prev_call_address = call_address; 86 prev_call_address = call_address;
76 #endif 87 #endif
77 } 88 }
78 } 89 }
79 90
80 91
81 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { 92 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
82 // Set the register values. The values are not important as there are no 93 // Set the register values. The values are not important as there are no
83 // callee saved registers in JavaScript frames, so all registers are 94 // callee saved registers in JavaScript frames, so all registers are
84 // spilled. Registers fp and sp are set to the correct values though. 95 // spilled. Registers fp and sp are set to the correct values though.
85 96
86 for (int i = 0; i < Register::kNumRegisters; i++) { 97 for (int i = 0; i < Register::kNumRegisters; i++) {
87 input_->SetRegister(i, i * 4); 98 input_->SetRegister(i, i * 4);
88 } 99 }
89 input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp())); 100 input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
90 input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp())); 101 input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
91 for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { 102 for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
92 input_->SetDoubleRegister(i, 0.0); 103 input_->SetDoubleRegister(i, 0.0);
93 } 104 }
94 105
95 // Fill the frame content from the actual data on the frame. 106 // Fill the frame content from the actual data on the frame.
96 for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) { 107 for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
97 input_->SetFrameSlot(i, Memory::uint32_at(tos + i)); 108 input_->SetFrameSlot(i, reinterpret_cast<intptr_t>(
109 Memory::Address_at(tos + i)));
98 } 110 }
99 } 111 }
100 112
101 113
102 void Deoptimizer::SetPlatformCompiledStubRegisters( 114 void Deoptimizer::SetPlatformCompiledStubRegisters(
103 FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { 115 FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
104 ApiFunction function(descriptor->deoptimization_handler()); 116 ApiFunction function(descriptor->deoptimization_handler());
105 ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); 117 ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
106 intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); 118 intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
107 int params = descriptor->GetHandlerParameterCount(); 119 int params = descriptor->GetHandlerParameterCount();
108 output_frame->SetRegister(s0.code(), params); 120 output_frame->SetRegister(r3.code(), params);
109 output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize); 121 output_frame->SetRegister(r4.code(), handler);
110 output_frame->SetRegister(s2.code(), handler);
111 } 122 }
112 123
113 124
114 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { 125 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
115 for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) { 126 for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
116 double double_value = input_->GetDoubleRegister(i); 127 double double_value = input_->GetDoubleRegister(i);
117 output_frame->SetDoubleRegister(i, double_value); 128 output_frame->SetDoubleRegister(i, double_value);
118 } 129 }
119 } 130 }
120 131
121 132
122 bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { 133 bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
123 // There is no dynamic alignment padding on MIPS in the input frame. 134 // There is no dynamic alignment padding on PPC in the input frame.
124 return false; 135 return false;
125 } 136 }
126 137
127 138
128 #define __ masm()-> 139 #define __ masm()->
129 140
130
131 // This code tries to be close to ia32 code so that any changes can be 141 // This code tries to be close to ia32 code so that any changes can be
132 // easily ported. 142 // easily ported.
133 void Deoptimizer::EntryGenerator::Generate() { 143 void Deoptimizer::EntryGenerator::Generate() {
134 GeneratePrologue(); 144 GeneratePrologue();
135 145
136 // Unlike on ARM we don't save all the registers, just the useful ones. 146 // Unlike on ARM we don't save all the registers, just the useful ones.
137 // For the rest, there are gaps on the stack, so the offsets remain the same. 147 // For the rest, there are gaps on the stack, so the offsets remain the same.
138 const int kNumberOfRegisters = Register::kNumRegisters; 148 const int kNumberOfRegisters = Register::kNumRegisters;
139 149
140 RegList restored_regs = kJSCallerSaved | kCalleeSaved; 150 RegList restored_regs = kJSCallerSaved | kCalleeSaved;
141 RegList saved_regs = restored_regs | sp.bit() | ra.bit(); 151 RegList saved_regs = restored_regs | sp.bit();
142 152
143 const int kDoubleRegsSize = 153 const int kDoubleRegsSize =
144 kDoubleSize * FPURegister::kMaxNumAllocatableRegisters; 154 kDoubleSize * DoubleRegister::kMaxNumAllocatableRegisters;
145 155
146 // Save all FPU registers before messing with them. 156 // Save all FPU registers before messing with them.
147 __ Subu(sp, sp, Operand(kDoubleRegsSize)); 157 __ subi(sp, sp, Operand(kDoubleRegsSize));
148 for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) { 158 for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) {
149 FPURegister fpu_reg = FPURegister::FromAllocationIndex(i); 159 DoubleRegister fpu_reg = DoubleRegister::FromAllocationIndex(i);
150 int offset = i * kDoubleSize; 160 int offset = i * kDoubleSize;
151 __ sdc1(fpu_reg, MemOperand(sp, offset)); 161 __ stfd(fpu_reg, MemOperand(sp, offset));
152 } 162 }
153 163
154 // Push saved_regs (needed to populate FrameDescription::registers_). 164 // Push saved_regs (needed to populate FrameDescription::registers_).
155 // Leave gaps for other registers. 165 // Leave gaps for other registers.
156 __ Subu(sp, sp, kNumberOfRegisters * kPointerSize); 166 __ subi(sp, sp, Operand(kNumberOfRegisters * kPointerSize));
157 for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) { 167 for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
158 if ((saved_regs & (1 << i)) != 0) { 168 if ((saved_regs & (1 << i)) != 0) {
159 __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i)); 169 __ StoreP(ToRegister(i), MemOperand(sp, kPointerSize * i));
160 } 170 }
161 } 171 }
162 172
163 const int kSavedRegistersAreaSize = 173 const int kSavedRegistersAreaSize =
164 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; 174 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
165 175
166 // Get the bailout id from the stack. 176 // Get the bailout id from the stack.
167 __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize)); 177 __ LoadP(r5, MemOperand(sp, kSavedRegistersAreaSize));
168 178
169 // Get the address of the location in the code object (a3) (return 179 // Get the address of the location in the code object (r6) (return
170 // address for lazy deoptimization) and compute the fp-to-sp delta in 180 // address for lazy deoptimization) and compute the fp-to-sp delta in
171 // register t0. 181 // register r7.
172 __ mov(a3, ra); 182 __ mflr(r6);
173 // Correct one word for bailout id. 183 // Correct one word for bailout id.
174 __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); 184 __ addi(r7, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
175 185 __ sub(r7, fp, r7);
176 __ Subu(t0, fp, t0);
177 186
178 // Allocate a new deoptimizer object. 187 // Allocate a new deoptimizer object.
179 // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack. 188 // Pass six arguments in r3 to r8.
180 __ PrepareCallCFunction(6, t1); 189 __ PrepareCallCFunction(6, r8);
181 __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); 190 __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
182 __ li(a1, Operand(type())); // bailout type, 191 __ li(r4, Operand(type())); // bailout type,
183 // a2: bailout id already loaded. 192 // r5: bailout id already loaded.
184 // a3: code address or 0 already loaded. 193 // r6: code address or 0 already loaded.
185 __ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta. 194 // r7: Fp-to-sp delta.
186 __ li(t1, Operand(ExternalReference::isolate_address(isolate()))); 195 __ mov(r8, Operand(ExternalReference::isolate_address(isolate())));
187 __ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
188 // Call Deoptimizer::New(). 196 // Call Deoptimizer::New().
189 { 197 {
190 AllowExternalCallThatCantCauseGC scope(masm()); 198 AllowExternalCallThatCantCauseGC scope(masm());
191 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); 199 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
192 } 200 }
193 201
194 // Preserve "deoptimizer" object in register v0 and get the input 202 // Preserve "deoptimizer" object in register r3 and get the input
195 // frame descriptor pointer to a1 (deoptimizer->input_); 203 // frame descriptor pointer to r4 (deoptimizer->input_);
196 // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below. 204 __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
197 __ mov(a0, v0);
198 __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
199 205
200 // Copy core registers into FrameDescription::registers_[kNumRegisters]. 206 // Copy core registers into FrameDescription::registers_[kNumRegisters].
201 ASSERT(Register::kNumRegisters == kNumberOfRegisters); 207 ASSERT(Register::kNumRegisters == kNumberOfRegisters);
202 for (int i = 0; i < kNumberOfRegisters; i++) { 208 for (int i = 0; i < kNumberOfRegisters; i++) {
203 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); 209 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
204 if ((saved_regs & (1 << i)) != 0) { 210 __ LoadP(r5, MemOperand(sp, i * kPointerSize));
205 __ lw(a2, MemOperand(sp, i * kPointerSize)); 211 __ StoreP(r5, MemOperand(r4, offset));
206 __ sw(a2, MemOperand(a1, offset));
207 } else if (FLAG_debug_code) {
208 __ li(a2, kDebugZapValue);
209 __ sw(a2, MemOperand(a1, offset));
210 }
211 } 212 }
212 213
213 int double_regs_offset = FrameDescription::double_registers_offset(); 214 int double_regs_offset = FrameDescription::double_registers_offset();
214 // Copy FPU registers to 215 // Copy VFP registers to
215 // double_registers_[DoubleRegister::kNumAllocatableRegisters] 216 // double_registers_[DoubleRegister::kNumAllocatableRegisters]
216 for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) { 217 for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
217 int dst_offset = i * kDoubleSize + double_regs_offset; 218 int dst_offset = i * kDoubleSize + double_regs_offset;
218 int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; 219 int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
219 __ ldc1(f0, MemOperand(sp, src_offset)); 220 __ lfd(d0, MemOperand(sp, src_offset));
220 __ sdc1(f0, MemOperand(a1, dst_offset)); 221 __ stfd(d0, MemOperand(r4, dst_offset));
221 } 222 }
222 223
223 // Remove the bailout id and the saved registers from the stack. 224 // Remove the bailout id and the saved registers from the stack.
224 __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); 225 __ addi(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
225 226
226 // Compute a pointer to the unwinding limit in register a2; that is 227 // Compute a pointer to the unwinding limit in register r5; that is
227 // the first stack slot not part of the input frame. 228 // the first stack slot not part of the input frame.
228 __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset())); 229 __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
229 __ Addu(a2, a2, sp); 230 __ add(r5, r5, sp);
230 231
231 // Unwind the stack down to - but not including - the unwinding 232 // Unwind the stack down to - but not including - the unwinding
232 // limit and copy the contents of the activation frame to the input 233 // limit and copy the contents of the activation frame to the input
233 // frame description. 234 // frame description.
234 __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset())); 235 __ addi(r6, r4, Operand(FrameDescription::frame_content_offset()));
235 Label pop_loop; 236 Label pop_loop;
236 Label pop_loop_header; 237 Label pop_loop_header;
237 __ BranchShort(&pop_loop_header); 238 __ b(&pop_loop_header);
238 __ bind(&pop_loop); 239 __ bind(&pop_loop);
239 __ pop(t0); 240 __ pop(r7);
240 __ sw(t0, MemOperand(a3, 0)); 241 __ StoreP(r7, MemOperand(r6, 0));
241 __ addiu(a3, a3, sizeof(uint32_t)); 242 __ addi(r6, r6, Operand(kPointerSize));
242 __ bind(&pop_loop_header); 243 __ bind(&pop_loop_header);
243 __ BranchShort(&pop_loop, ne, a2, Operand(sp)); 244 __ cmp(r5, sp);
245 __ bne(&pop_loop);
244 246
245 // Compute the output frame in the deoptimizer. 247 // Compute the output frame in the deoptimizer.
246 __ push(a0); // Preserve deoptimizer object across call. 248 __ push(r3); // Preserve deoptimizer object across call.
247 // a0: deoptimizer object; a1: scratch. 249 // r3: deoptimizer object; r4: scratch.
248 __ PrepareCallCFunction(1, a1); 250 __ PrepareCallCFunction(1, r4);
249 // Call Deoptimizer::ComputeOutputFrames(). 251 // Call Deoptimizer::ComputeOutputFrames().
250 { 252 {
251 AllowExternalCallThatCantCauseGC scope(masm()); 253 AllowExternalCallThatCantCauseGC scope(masm());
252 __ CallCFunction( 254 __ CallCFunction(
253 ExternalReference::compute_output_frames_function(isolate()), 1); 255 ExternalReference::compute_output_frames_function(isolate()), 1);
254 } 256 }
255 __ pop(a0); // Restore deoptimizer object (class Deoptimizer). 257 __ pop(r3); // Restore deoptimizer object (class Deoptimizer).
256 258
257 // Replace the current (input) frame with the output frames. 259 // Replace the current (input) frame with the output frames.
258 Label outer_push_loop, inner_push_loop, 260 Label outer_push_loop, inner_push_loop,
259 outer_loop_header, inner_loop_header; 261 outer_loop_header, inner_loop_header;
260 // Outer loop state: t0 = current "FrameDescription** output_", 262 // Outer loop state: r7 = current "FrameDescription** output_",
261 // a1 = one past the last FrameDescription**. 263 // r4 = one past the last FrameDescription**.
262 __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset())); 264 __ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
263 __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_. 265 __ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
264 __ sll(a1, a1, kPointerSizeLog2); // Count to offset. 266 __ ShiftLeftImm(r4, r4, Operand(kPointerSizeLog2));
265 __ addu(a1, t0, a1); // a1 = one past the last FrameDescription**. 267 __ add(r4, r7, r4);
266 __ jmp(&outer_loop_header); 268 __ b(&outer_loop_header);
269
267 __ bind(&outer_push_loop); 270 __ bind(&outer_push_loop);
268 // Inner loop state: a2 = current FrameDescription*, a3 = loop index. 271 // Inner loop state: r5 = current FrameDescription*, r6 = loop index.
269 __ lw(a2, MemOperand(t0, 0)); // output_[ix] 272 __ LoadP(r5, MemOperand(r7, 0)); // output_[ix]
270 __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset())); 273 __ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
271 __ jmp(&inner_loop_header); 274 __ b(&inner_loop_header);
275
272 __ bind(&inner_push_loop); 276 __ bind(&inner_push_loop);
273 __ Subu(a3, a3, Operand(sizeof(uint32_t))); 277 __ addi(r6, r6, Operand(-sizeof(intptr_t)));
274 __ Addu(t2, a2, Operand(a3)); 278 __ add(r9, r5, r6);
275 __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset())); 279 __ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
276 __ push(t3); 280 __ push(r9);
281
277 __ bind(&inner_loop_header); 282 __ bind(&inner_loop_header);
278 __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg)); 283 __ cmpi(r6, Operand::Zero());
284 __ bne(&inner_push_loop); // test for gt?
279 285
280 __ Addu(t0, t0, Operand(kPointerSize)); 286 __ addi(r7, r7, Operand(kPointerSize));
281 __ bind(&outer_loop_header); 287 __ bind(&outer_loop_header);
282 __ BranchShort(&outer_push_loop, lt, t0, Operand(a1)); 288 __ cmp(r7, r4);
289 __ blt(&outer_push_loop);
283 290
284 __ lw(a1, MemOperand(a0, Deoptimizer::input_offset())); 291 __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
285 for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) { 292 for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) {
286 const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i); 293 const DoubleRegister dreg = DoubleRegister::FromAllocationIndex(i);
287 int src_offset = i * kDoubleSize + double_regs_offset; 294 int src_offset = i * kDoubleSize + double_regs_offset;
288 __ ldc1(fpu_reg, MemOperand(a1, src_offset)); 295 __ lfd(dreg, MemOperand(r4, src_offset));
289 } 296 }
290 297
291 // Push state, pc, and continuation from the last output frame. 298 // Push state, pc, and continuation from the last output frame.
292 __ lw(t2, MemOperand(a2, FrameDescription::state_offset())); 299 __ LoadP(r9, MemOperand(r5, FrameDescription::state_offset()));
293 __ push(t2); 300 __ push(r9);
301 __ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset()));
302 __ push(r9);
303 __ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset()));
304 __ push(r9);
294 305
295 __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
296 __ push(t2);
297 __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
298 __ push(t2);
299
300
301 // Technically restoring 'at' should work unless zero_reg is also restored
302 // but it's safer to check for this.
303 ASSERT(!(at.bit() & restored_regs));
304 // Restore the registers from the last output frame. 306 // Restore the registers from the last output frame.
305 __ mov(at, a2); 307 ASSERT(!(ip.bit() & restored_regs));
308 __ mr(ip, r5);
306 for (int i = kNumberOfRegisters - 1; i >= 0; i--) { 309 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
307 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); 310 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
308 if ((restored_regs & (1 << i)) != 0) { 311 if ((restored_regs & (1 << i)) != 0) {
309 __ lw(ToRegister(i), MemOperand(at, offset)); 312 __ LoadP(ToRegister(i), MemOperand(ip, offset));
310 } 313 }
311 } 314 }
312 315
313 __ InitializeRootRegister(); 316 __ InitializeRootRegister();
314 317
315 __ pop(at); // Get continuation, leave pc on stack. 318 __ pop(ip); // get continuation, leave pc on stack
316 __ pop(ra); 319 __ pop(r0);
317 __ Jump(at); 320 __ mtlr(r0);
321 __ Jump(ip);
318 __ stop("Unreachable."); 322 __ stop("Unreachable.");
319 } 323 }
320 324
321 325
322 // Maximum size of a table entry generated below.
323 const int Deoptimizer::table_entry_size_ = 2 * Assembler::kInstrSize;
324
325 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { 326 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
326 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); 327 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
327 328
328 // Create a sequence of deoptimization entries. 329 // Create a sequence of deoptimization entries.
329 // Note that registers are still live when jumping to an entry. 330 // Note that registers are still live when jumping to an entry.
330 Label table_start, done; 331 Label done;
331 __ bind(&table_start);
332 for (int i = 0; i < count(); i++) { 332 for (int i = 0; i < count(); i++) {
333 Label start; 333 int start = masm()->pc_offset();
334 __ bind(&start); 334 USE(start);
335 ASSERT(is_int16(i)); 335 __ li(ip, Operand(i));
336 __ Branch(USE_DELAY_SLOT, &done); // Expose delay slot. 336 __ b(&done);
337 __ li(at, i); // In the delay slot. 337 ASSERT(masm()->pc_offset() - start == table_entry_size_);
338
339 ASSERT_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
340 } 338 }
341
342 ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
343 count() * table_entry_size_);
344 __ bind(&done); 339 __ bind(&done);
345 __ Push(at); 340 __ push(ip);
346 } 341 }
347 342
348 343
349 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { 344 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
350 SetFrameSlot(offset, value); 345 SetFrameSlot(offset, value);
351 } 346 }
352 347
353 348
354 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { 349 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
355 SetFrameSlot(offset, value); 350 SetFrameSlot(offset, value);
356 } 351 }
357 352
358 353
359 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { 354 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
355 #if V8_OOL_CONSTANT_POOL
356 ASSERT(FLAG_enable_ool_constant_pool);
357 SetFrameSlot(offset, value);
358 #else
360 // No out-of-line constant pool support. 359 // No out-of-line constant pool support.
361 UNREACHABLE(); 360 UNREACHABLE();
361 #endif
362 } 362 }
363 363
364 364
365 #undef __ 365 #undef __
366 366
367
368 } } // namespace v8::internal 367 } } // namespace v8::internal
OLDNEW
« src/objects-inl.h ('K') | « src/ppc/debug-ppc.cc ('k') | src/ppc/disasm-ppc.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698