| OLD | NEW |
| (Empty) | |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are |
| 4 // met: |
| 5 // |
| 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. |
| 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 |
| 28 #include "v8.h" |
| 29 |
| 30 #include "codegen.h" |
| 31 #include "deoptimizer.h" |
| 32 #include "full-codegen.h" |
| 33 #include "safepoint-table.h" |
| 34 |
| 35 |
| 36 namespace v8 { |
| 37 namespace internal { |
| 38 |
| 39 |
| 40 int Deoptimizer::patch_size() { |
| 41 // Size of the code used to patch lazy bailout points. |
| 42 // Patching is done by Deoptimizer::DeoptimizeFunction. |
| 43 return 4 * kInstructionSize; |
| 44 } |
| 45 |
| 46 |
| 47 void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList( |
| 48 JSFunction* function) { |
| 49 Isolate* isolate = function->GetIsolate(); |
| 50 HandleScope scope(isolate); |
| 51 DisallowHeapAllocation no_allocation; |
| 52 |
| 53 ASSERT(function->IsOptimized()); |
| 54 ASSERT(function->FunctionsInFunctionListShareSameCode()); |
| 55 |
| 56 // Get the optimized code. |
| 57 Code* code = function->code(); |
| 58 |
| 59 // The optimized code is going to be patched, so we cannot use it any more. |
| 60 function->shared()->EvictFromOptimizedCodeMap(code, "deoptimized function"); |
| 61 |
| 62 // Invalidate the relocation information, as it will become invalid by the |
| 63 // code patching below, and is not needed any more. |
| 64 code->InvalidateRelocation(); |
| 65 |
| 66 // For each LLazyBailout instruction insert a call to the corresponding |
| 67 // deoptimization entry. |
| 68 DeoptimizationInputData* deopt_data = |
| 69 DeoptimizationInputData::cast(code->deoptimization_data()); |
| 70 Address code_start_address = code->instruction_start(); |
| 71 #ifdef DEBUG |
| 72 Address prev_call_address = NULL; |
| 73 #endif |
| 74 |
| 75 for (int i = 0; i < deopt_data->DeoptCount(); i++) { |
| 76 if (deopt_data->Pc(i)->value() == -1) continue; |
| 77 |
| 78 Address call_address = code_start_address + deopt_data->Pc(i)->value(); |
| 79 Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); |
| 80 |
| 81 PatchingAssembler patcher(call_address, patch_size() / kInstructionSize); |
| 82 patcher.LoadLiteral(ip0, 2 * kInstructionSize); |
| 83 patcher.blr(ip0); |
| 84 patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry)); |
| 85 |
| 86 ASSERT((prev_call_address == NULL) || |
| 87 (call_address >= prev_call_address + patch_size())); |
| 88 ASSERT(call_address + patch_size() <= code->instruction_end()); |
| 89 #ifdef DEBUG |
| 90 prev_call_address = call_address; |
| 91 #endif |
| 92 } |
| 93 |
| 94 // Add the deoptimizing code to the list. |
| 95 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); |
| 96 DeoptimizerData* data = isolate->deoptimizer_data(); |
| 97 node->set_next(data->deoptimizing_code_list_); |
| 98 data->deoptimizing_code_list_ = node; |
| 99 |
| 100 // We might be in the middle of incremental marking with compaction. |
| 101 // Tell collector to treat this code object in a special way and |
| 102 // ignore all slots that might have been recorded on it. |
| 103 isolate->heap()->mark_compact_collector()->InvalidateCode(code); |
| 104 |
| 105 ReplaceCodeForRelatedFunctions(function, code); |
| 106 |
| 107 if (FLAG_trace_deopt) { |
| 108 PrintF("[forced deoptimization: "); |
| 109 function->PrintName(); |
| 110 PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); |
| 111 } |
| 112 } |
| 113 |
| 114 |
| 115 void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code, |
| 116 Address pc_after, |
| 117 Code* interrupt_code, |
| 118 Code* replacement_code) { |
| 119 UNIMPLEMENTED(); |
| 120 ASSERT(!InterruptCodeIsPatched(unoptimized_code, |
| 121 pc_after, |
| 122 interrupt_code, |
| 123 replacement_code)); |
| 124 } |
| 125 |
| 126 |
| 127 void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code, |
| 128 Address pc_after, |
| 129 Code* interrupt_code, |
| 130 Code* replacement_code) { |
| 131 UNIMPLEMENTED(); |
| 132 ASSERT(InterruptCodeIsPatched(unoptimized_code, |
| 133 pc_after, |
| 134 interrupt_code, |
| 135 replacement_code)); |
| 136 } |
| 137 |
| 138 |
| 139 #ifdef DEBUG |
| 140 bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code, |
| 141 Address pc_after, |
| 142 Code* interrupt_code, |
| 143 Code* replacement_code) { |
| 144 UNIMPLEMENTED(); |
| 145 return false; |
| 146 } |
| 147 #endif |
| 148 |
| 149 |
| 150 void Deoptimizer::DoComputeOsrOutputFrame() { |
| 151 UNIMPLEMENTED(); |
| 152 } |
| 153 |
| 154 |
| 155 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { |
| 156 // Set the register values. The values are not important as there are no |
| 157 // callee saved registers in JavaScript frames, so all registers are |
| 158 // spilled. Registers fp and sp are set to the correct values though. |
| 159 for (int i = 0; i < Register::NumRegisters(); i++) { |
| 160 input_->SetRegister(i, 0); |
| 161 } |
| 162 |
| 163 // TODO(all): Do we also need to set a value to csp? |
| 164 input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp())); |
| 165 input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp())); |
| 166 |
| 167 for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { |
| 168 input_->SetDoubleRegister(i, 0.0); |
| 169 } |
| 170 |
| 171 // Fill the frame content from the actual data on the frame. |
| 172 for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) { |
| 173 input_->SetFrameSlot(i, Memory::uint64_at(tos + i)); |
| 174 } |
| 175 } |
| 176 |
| 177 |
| 178 bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { |
| 179 // There is no dynamic alignment padding on A64 in the input frame. |
| 180 return false; |
| 181 } |
| 182 |
| 183 |
| 184 void Deoptimizer::SetPlatformCompiledStubRegisters( |
| 185 FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { |
| 186 ApiFunction function(descriptor->deoptimization_handler_); |
| 187 ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); |
| 188 intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); |
| 189 int params = descriptor->register_param_count_; |
| 190 if (descriptor->stack_parameter_count_ != NULL) { |
| 191 params++; |
| 192 } |
| 193 output_frame->SetRegister(x0.code(), params); |
| 194 output_frame->SetRegister(x1.code(), handler); |
| 195 } |
| 196 |
| 197 |
| 198 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { |
| 199 for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) { |
| 200 double double_value = input_->GetDoubleRegister(i); |
| 201 output_frame->SetDoubleRegister(i, double_value); |
| 202 } |
| 203 } |
| 204 |
| 205 |
| 206 #define __ masm()-> |
| 207 |
| 208 void Deoptimizer::EntryGenerator::Generate() { |
| 209 GeneratePrologue(); |
| 210 |
| 211 // TODO(all): This code needs to be revisited. We probably only need to save |
| 212 // caller-saved registers here. Callee-saved registers can be stored directly |
| 213 // in the input frame. |
| 214 |
| 215 // Save all allocatable floating point registers. |
| 216 CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSize, |
| 217 0, FPRegister::NumAllocatableRegisters()); |
| 218 __ PushCPURegList(saved_fp_registers); |
| 219 |
| 220 // We save all the registers expcept jssp, sp and lr. |
| 221 CPURegList saved_registers(CPURegister::kRegister, kXRegSize, 0, 27); |
| 222 saved_registers.Combine(fp); |
| 223 __ PushCPURegList(saved_registers); |
| 224 |
| 225 const int kSavedRegistersAreaSize = |
| 226 (saved_registers.Count() * kXRegSizeInBytes) + |
| 227 (saved_fp_registers.Count() * kDRegSizeInBytes); |
| 228 |
| 229 // Floating point registers are saved on the stack above core registers. |
| 230 const int kFPRegistersOffset = saved_registers.Count() * kXRegSizeInBytes; |
| 231 |
| 232 // Get the bailout id from the stack. |
| 233 Register bailout_id = x2; |
| 234 __ Peek(bailout_id, kSavedRegistersAreaSize); |
| 235 |
| 236 // Get the address of the location in the code object if possible |
| 237 // and compute the fp-to-sp delta. |
| 238 Register code_object = x3; |
| 239 Register fp_to_sp = x4; |
| 240 if ((type() == EAGER) || (type() == SOFT)) { |
| 241 __ Mov(code_object, 0); |
| 242 // Correct one word for bailout id. |
| 243 __ Add(fp_to_sp, |
| 244 masm()->StackPointer(), |
| 245 kSavedRegistersAreaSize + (1 * kPointerSize)); |
| 246 } else if (type() == OSR) { |
| 247 UNIMPLEMENTED(); |
| 248 } else { |
| 249 __ Mov(code_object, lr); |
| 250 // Correct two words for bailout id and return address. |
| 251 __ Add(fp_to_sp, |
| 252 masm()->StackPointer(), |
| 253 kSavedRegistersAreaSize + (2 * kPointerSize)); |
| 254 } |
| 255 __ Sub(fp_to_sp, fp, fp_to_sp); |
| 256 |
| 257 // Allocate a new deoptimizer object. |
| 258 __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
| 259 __ Mov(x1, type()); |
| 260 // Following arguments are already loaded: |
| 261 // - x2: bailout id |
| 262 // - x3: code object address |
| 263 // - x4: fp-to-sp delta |
| 264 __ Mov(x5, Operand(ExternalReference::isolate_address(isolate()))); |
| 265 |
| 266 { |
| 267 // Call Deoptimizer::New(). |
| 268 AllowExternalCallThatCantCauseGC scope(masm()); |
| 269 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); |
| 270 } |
| 271 |
| 272 // Preserve "deoptimizer" object in register x0. |
| 273 Register deoptimizer = x0; |
| 274 |
| 275 // Get the input frame descriptor pointer. |
| 276 __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset())); |
| 277 |
| 278 // Copy core registers into the input frame. |
| 279 CPURegList copy_to_input = saved_registers; |
| 280 for (int i = 0; i < saved_registers.Count(); i++) { |
| 281 // TODO(all): Look for opportunities to optimize this by using ldp/stp. |
| 282 __ Peek(x2, i * kPointerSize); |
| 283 CPURegister current_reg = copy_to_input.PopLowestIndex(); |
| 284 int offset = (current_reg.code() * kPointerSize) + |
| 285 FrameDescription::registers_offset(); |
| 286 __ Str(x2, MemOperand(x1, offset)); |
| 287 } |
| 288 |
| 289 // Copy FP registers to the input frame. |
| 290 for (int i = 0; i < saved_fp_registers.Count(); i++) { |
| 291 // TODO(all): Look for opportunities to optimize this by using ldp/stp. |
| 292 int dst_offset = FrameDescription::double_registers_offset() + |
| 293 (i * kDoubleSize); |
| 294 int src_offset = kFPRegistersOffset + (i * kDoubleSize); |
| 295 __ Peek(x2, src_offset); |
| 296 __ Str(x2, MemOperand(x1, dst_offset)); |
| 297 } |
| 298 |
| 299 // Remove the bailout id, eventually return address, and the saved registers |
| 300 // from the stack. |
| 301 if ((type() == EAGER) || (type() == SOFT) || (type() == OSR)) { |
| 302 __ Drop(1 + (kSavedRegistersAreaSize / kXRegSizeInBytes)); |
| 303 } else { |
| 304 // Also remove return address for lazy deopt. |
| 305 __ Drop(2 + (kSavedRegistersAreaSize / kXRegSizeInBytes)); |
| 306 } |
| 307 |
| 308 // Compute a pointer to the unwinding limit in register x2; that is |
| 309 // the first stack slot not part of the input frame. |
| 310 Register unwind_limit = x2; |
| 311 __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset())); |
| 312 __ Add(unwind_limit, unwind_limit, __ StackPointer()); |
| 313 |
| 314 // Unwind the stack down to - but not including - the unwinding |
| 315 // limit and copy the contents of the activation frame to the input |
| 316 // frame description. |
| 317 __ Add(x3, x1, FrameDescription::frame_content_offset()); |
| 318 Label pop_loop; |
| 319 Label pop_loop_header; |
| 320 __ B(&pop_loop_header); |
| 321 __ Bind(&pop_loop); |
| 322 __ Pop(x4); |
| 323 __ Str(x4, MemOperand(x3, kPointerSize, PostIndex)); |
| 324 __ Bind(&pop_loop_header); |
| 325 __ Cmp(unwind_limit, __ StackPointer()); |
| 326 __ B(ne, &pop_loop); |
| 327 |
| 328 // Compute the output frame in the deoptimizer. |
| 329 __ Push(x0); // Preserve deoptimizer object across call. |
| 330 |
| 331 { |
| 332 // Call Deoptimizer::ComputeOutputFrames(). |
| 333 AllowExternalCallThatCantCauseGC scope(masm()); |
| 334 __ CallCFunction( |
| 335 ExternalReference::compute_output_frames_function(isolate()), 1); |
| 336 } |
| 337 __ Pop(x0); // Restore deoptimizer object (class Deoptimizer). |
| 338 |
| 339 // Replace the current (input) frame with the output frames. |
| 340 Label outer_push_loop, inner_push_loop, |
| 341 outer_loop_header, inner_loop_header; |
| 342 __ Ldrsw(x1, MemOperand(x0, Deoptimizer::output_count_offset())); |
| 343 __ Ldr(x0, MemOperand(x0, Deoptimizer::output_offset())); |
| 344 __ Add(x1, x0, Operand(x1, LSL, kPointerSizeLog2)); |
| 345 __ B(&outer_loop_header); |
| 346 |
| 347 __ Bind(&outer_push_loop); |
| 348 Register current_frame = x2; |
| 349 __ Ldr(current_frame, MemOperand(x0, 0)); |
| 350 __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset())); |
| 351 __ B(&inner_loop_header); |
| 352 |
| 353 __ Bind(&inner_push_loop); |
| 354 __ Sub(x3, x3, kPointerSize); |
| 355 __ Add(x6, current_frame, x3); |
| 356 __ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset())); |
| 357 __ Push(x7); |
| 358 __ Bind(&inner_loop_header); |
| 359 __ Cbnz(x3, &inner_push_loop); |
| 360 |
| 361 __ Add(x0, x0, kPointerSize); |
| 362 __ Bind(&outer_loop_header); |
| 363 __ Cmp(x0, x1); |
| 364 __ B(lt, &outer_push_loop); |
| 365 |
| 366 // TODO(jbramley): The ARM code restores FP registers here. |
| 367 TODO_UNIMPLEMENTED("EntryGenerator::Generate: Restore FP registers."); |
| 368 |
| 369 // Push state, pc, and continuation from the last output frame. |
| 370 if (type() != OSR) { |
| 371 __ Ldr(x6, MemOperand(current_frame, FrameDescription::state_offset())); |
| 372 __ Push(x6); |
| 373 } |
| 374 |
| 375 // TODO(all): This code needs to be revisited, We probably don't need to |
| 376 // restore all the registers as fullcodegen does not keep live values in |
| 377 // registers (note that at least fp must be restored though). |
| 378 |
| 379 // Restore registers from the last output frame. |
| 380 // Note that lr is not in the list of saved_registers and will be restored |
| 381 // later. We can use it to hold the address of last output frame while |
| 382 // reloading the other registers. |
| 383 ASSERT(!saved_registers.IncludesAliasOf(lr)); |
| 384 Register last_output_frame = lr; |
| 385 __ Mov(last_output_frame, current_frame); |
| 386 |
| 387 // We don't need to restore x7 as it will be clobbered later to hold the |
| 388 // continuation address. |
| 389 Register continuation = x7; |
| 390 saved_registers.Remove(continuation); |
| 391 |
| 392 while (!saved_registers.IsEmpty()) { |
| 393 // TODO(all): Look for opportunities to optimize this by using ldp. |
| 394 CPURegister current_reg = saved_registers.PopLowestIndex(); |
| 395 int offset = (current_reg.code() * kPointerSize) + |
| 396 FrameDescription::registers_offset(); |
| 397 __ Ldr(current_reg, MemOperand(last_output_frame, offset)); |
| 398 } |
| 399 |
| 400 __ Ldr(continuation, MemOperand(last_output_frame, |
| 401 FrameDescription::continuation_offset())); |
| 402 __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset())); |
| 403 __ InitializeRootRegister(); |
| 404 __ Br(continuation); |
| 405 } |
| 406 |
| 407 |
| 408 // Size of an entry of the second level deopt table. |
| 409 // This is the code size generated by GeneratePrologue for one entry. |
| 410 const int Deoptimizer::table_entry_size_ = 4 * kInstructionSize; |
| 411 |
| 412 |
| 413 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { |
| 414 // Create a sequence of deoptimization entries. |
| 415 // Note that any registers may be still live. |
| 416 Label done; |
| 417 { |
| 418 InstructionAccurateScope scope(masm()); |
| 419 |
| 420 // The number of entry will never exceed kMaxNumberOfEntries. |
| 421 // As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use |
| 422 // a movz instruction to load the entry id. |
| 423 ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries)); |
| 424 |
| 425 for (int i = 0; i < count(); i++) { |
| 426 int start = masm()->pc_offset(); |
| 427 USE(start); |
| 428 if ((type() == EAGER) || (type() == SOFT)) { |
| 429 // These nops are fillers; all entries must be the same size. |
| 430 __ nop(); |
| 431 __ nop(); |
| 432 } else { |
| 433 // Push lr on the stack. |
| 434 // We cannot use Push from the MacroAssembler here since we are in an |
| 435 // instruction accurate scope. |
| 436 __ sub(csp, jssp, kPointerSize); |
| 437 __ str(lr, MemOperand(jssp, -kPointerSize, PreIndex)); |
| 438 } |
| 439 |
| 440 __ movz(masm()->Tmp0(), i); |
| 441 __ b(&done); |
| 442 ASSERT(masm()->pc_offset() - start == table_entry_size_); |
| 443 } |
| 444 } |
| 445 __ Bind(&done); |
| 446 // TODO(all): We need to add some kind of assertion to verify that Tmp0() |
| 447 // is not clobbered by Push. |
| 448 __ Push(masm()->Tmp0()); |
| 449 } |
| 450 |
| 451 #undef __ |
| 452 |
| 453 } } // namespace v8::internal |
| OLD | NEW |