| OLD | NEW |
| (Empty) | |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are |
| 4 // met: |
| 5 // |
| 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. |
| 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 |
| 28 #include "v8.h" |
| 29 |
| 30 #include "codegen.h" |
| 31 #include "deoptimizer.h" |
| 32 #include "disasm.h" |
| 33 #include "full-codegen.h" |
| 34 #include "global-handles.h" |
| 35 #include "macro-assembler.h" |
| 36 #include "prettyprinter.h" |
| 37 |
| 38 |
| 39 namespace v8 { |
| 40 namespace internal { |
| 41 |
| 42 DeoptimizerData::DeoptimizerData() { |
| 43 eager_deoptimization_entry_code_ = NULL; |
| 44 lazy_deoptimization_entry_code_ = NULL; |
| 45 current_ = NULL; |
| 46 deoptimizing_code_list_ = NULL; |
| 47 } |
| 48 |
| 49 |
| 50 DeoptimizerData::~DeoptimizerData() { |
| 51 if (eager_deoptimization_entry_code_ != NULL) { |
| 52 eager_deoptimization_entry_code_->Free(EXECUTABLE); |
| 53 eager_deoptimization_entry_code_ = NULL; |
| 54 } |
| 55 if (lazy_deoptimization_entry_code_ != NULL) { |
| 56 lazy_deoptimization_entry_code_->Free(EXECUTABLE); |
| 57 lazy_deoptimization_entry_code_ = NULL; |
| 58 } |
| 59 } |
| 60 |
| 61 Deoptimizer* Deoptimizer::New(JSFunction* function, |
| 62 BailoutType type, |
| 63 unsigned bailout_id, |
| 64 Address from, |
| 65 int fp_to_sp_delta, |
| 66 Isolate* isolate) { |
| 67 ASSERT(isolate == Isolate::Current()); |
| 68 Deoptimizer* deoptimizer = new Deoptimizer(isolate, |
| 69 function, |
| 70 type, |
| 71 bailout_id, |
| 72 from, |
| 73 fp_to_sp_delta); |
| 74 ASSERT(isolate->deoptimizer_data()->current_ == NULL); |
| 75 isolate->deoptimizer_data()->current_ = deoptimizer; |
| 76 return deoptimizer; |
| 77 } |
| 78 |
| 79 |
| 80 Deoptimizer* Deoptimizer::Grab(Isolate* isolate) { |
| 81 ASSERT(isolate == Isolate::Current()); |
| 82 Deoptimizer* result = isolate->deoptimizer_data()->current_; |
| 83 ASSERT(result != NULL); |
| 84 result->DeleteFrameDescriptions(); |
| 85 isolate->deoptimizer_data()->current_ = NULL; |
| 86 return result; |
| 87 } |
| 88 |
| 89 |
| 90 void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, |
| 91 int count, |
| 92 BailoutType type) { |
| 93 TableEntryGenerator generator(masm, type, count); |
| 94 generator.Generate(); |
| 95 } |
| 96 |
| 97 |
| 98 class DeoptimizingVisitor : public OptimizedFunctionVisitor { |
| 99 public: |
| 100 virtual void EnterContext(Context* context) { |
| 101 if (FLAG_trace_deopt) { |
| 102 PrintF("[deoptimize context: %" V8PRIxPTR "]\n", |
| 103 reinterpret_cast<intptr_t>(context)); |
| 104 } |
| 105 } |
| 106 |
| 107 virtual void VisitFunction(JSFunction* function) { |
| 108 Deoptimizer::DeoptimizeFunction(function); |
| 109 } |
| 110 |
| 111 virtual void LeaveContext(Context* context) { |
| 112 context->ClearOptimizedFunctions(); |
| 113 } |
| 114 }; |
| 115 |
| 116 |
| 117 void Deoptimizer::DeoptimizeAll() { |
| 118 AssertNoAllocation no_allocation; |
| 119 |
| 120 if (FLAG_trace_deopt) { |
| 121 PrintF("[deoptimize all contexts]\n"); |
| 122 } |
| 123 |
| 124 DeoptimizingVisitor visitor; |
| 125 VisitAllOptimizedFunctions(&visitor); |
| 126 } |
| 127 |
| 128 |
| 129 void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) { |
| 130 AssertNoAllocation no_allocation; |
| 131 |
| 132 DeoptimizingVisitor visitor; |
| 133 VisitAllOptimizedFunctionsForGlobalObject(object, &visitor); |
| 134 } |
| 135 |
| 136 |
| 137 void Deoptimizer::VisitAllOptimizedFunctionsForContext( |
| 138 Context* context, OptimizedFunctionVisitor* visitor) { |
| 139 AssertNoAllocation no_allocation; |
| 140 |
| 141 ASSERT(context->IsGlobalContext()); |
| 142 |
| 143 visitor->EnterContext(context); |
| 144 // Run through the list of optimized functions and deoptimize them. |
| 145 Object* element = context->OptimizedFunctionsListHead(); |
| 146 while (!element->IsUndefined()) { |
| 147 JSFunction* element_function = JSFunction::cast(element); |
| 148 // Get the next link before deoptimizing as deoptimizing will clear the |
| 149 // next link. |
| 150 element = element_function->next_function_link(); |
| 151 visitor->VisitFunction(element_function); |
| 152 } |
| 153 visitor->LeaveContext(context); |
| 154 } |
| 155 |
| 156 |
| 157 void Deoptimizer::VisitAllOptimizedFunctionsForGlobalObject( |
| 158 JSObject* object, OptimizedFunctionVisitor* visitor) { |
| 159 AssertNoAllocation no_allocation; |
| 160 |
| 161 if (object->IsJSGlobalProxy()) { |
| 162 Object* proto = object->GetPrototype(); |
| 163 ASSERT(proto->IsJSGlobalObject()); |
| 164 VisitAllOptimizedFunctionsForContext( |
| 165 GlobalObject::cast(proto)->global_context(), visitor); |
| 166 } else if (object->IsGlobalObject()) { |
| 167 VisitAllOptimizedFunctionsForContext( |
| 168 GlobalObject::cast(object)->global_context(), visitor); |
| 169 } |
| 170 } |
| 171 |
| 172 |
| 173 void Deoptimizer::VisitAllOptimizedFunctions( |
| 174 OptimizedFunctionVisitor* visitor) { |
| 175 AssertNoAllocation no_allocation; |
| 176 |
| 177 // Run through the list of all global contexts and deoptimize. |
| 178 Object* global = Isolate::Current()->heap()->global_contexts_list(); |
| 179 while (!global->IsUndefined()) { |
| 180 VisitAllOptimizedFunctionsForGlobalObject(Context::cast(global)->global(), |
| 181 visitor); |
| 182 global = Context::cast(global)->get(Context::NEXT_CONTEXT_LINK); |
| 183 } |
| 184 } |
| 185 |
| 186 |
| 187 void Deoptimizer::HandleWeakDeoptimizedCode( |
| 188 v8::Persistent<v8::Value> obj, void* data) { |
| 189 DeoptimizingCodeListNode* node = |
| 190 reinterpret_cast<DeoptimizingCodeListNode*>(data); |
| 191 RemoveDeoptimizingCode(*node->code()); |
| 192 #ifdef DEBUG |
| 193 node = Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_; |
| 194 while (node != NULL) { |
| 195 ASSERT(node != reinterpret_cast<DeoptimizingCodeListNode*>(data)); |
| 196 node = node->next(); |
| 197 } |
| 198 #endif |
| 199 } |
| 200 |
| 201 |
| 202 void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer, |
| 203 Isolate* isolate) { |
| 204 deoptimizer->DoComputeOutputFrames(); |
| 205 } |
| 206 |
| 207 |
| 208 Deoptimizer::Deoptimizer(Isolate* isolate, |
| 209 JSFunction* function, |
| 210 BailoutType type, |
| 211 unsigned bailout_id, |
| 212 Address from, |
| 213 int fp_to_sp_delta) |
| 214 : isolate_(isolate), |
| 215 function_(function), |
| 216 bailout_id_(bailout_id), |
| 217 bailout_type_(type), |
| 218 from_(from), |
| 219 fp_to_sp_delta_(fp_to_sp_delta), |
| 220 output_count_(0), |
| 221 output_(NULL), |
| 222 integer32_values_(NULL), |
| 223 double_values_(NULL) { |
| 224 if (FLAG_trace_deopt && type != OSR) { |
| 225 PrintF("**** DEOPT: "); |
| 226 function->PrintName(); |
| 227 PrintF(" at bailout #%u, address 0x%" V8PRIxPTR ", frame size %d\n", |
| 228 bailout_id, |
| 229 reinterpret_cast<intptr_t>(from), |
| 230 fp_to_sp_delta - (2 * kPointerSize)); |
| 231 } else if (FLAG_trace_osr && type == OSR) { |
| 232 PrintF("**** OSR: "); |
| 233 function->PrintName(); |
| 234 PrintF(" at ast id #%u, address 0x%" V8PRIxPTR ", frame size %d\n", |
| 235 bailout_id, |
| 236 reinterpret_cast<intptr_t>(from), |
| 237 fp_to_sp_delta - (2 * kPointerSize)); |
| 238 } |
| 239 // Find the optimized code. |
| 240 if (type == EAGER) { |
| 241 ASSERT(from == NULL); |
| 242 optimized_code_ = function_->code(); |
| 243 } else if (type == LAZY) { |
| 244 optimized_code_ = FindDeoptimizingCodeFromAddress(from); |
| 245 ASSERT(optimized_code_ != NULL); |
| 246 } else if (type == OSR) { |
| 247 // The function has already been optimized and we're transitioning |
| 248 // from the unoptimized shared version to the optimized one in the |
| 249 // function. The return address (from) points to unoptimized code. |
| 250 optimized_code_ = function_->code(); |
| 251 ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION); |
| 252 ASSERT(!optimized_code_->contains(from)); |
| 253 } |
| 254 ASSERT(HEAP->allow_allocation(false)); |
| 255 unsigned size = ComputeInputFrameSize(); |
| 256 input_ = new(size) FrameDescription(size, function); |
| 257 } |
| 258 |
| 259 |
| 260 Deoptimizer::~Deoptimizer() { |
| 261 ASSERT(input_ == NULL && output_ == NULL); |
| 262 delete[] integer32_values_; |
| 263 delete[] double_values_; |
| 264 } |
| 265 |
| 266 |
| 267 void Deoptimizer::DeleteFrameDescriptions() { |
| 268 delete input_; |
| 269 for (int i = 0; i < output_count_; ++i) { |
| 270 if (output_[i] != input_) delete output_[i]; |
| 271 } |
| 272 delete[] output_; |
| 273 input_ = NULL; |
| 274 output_ = NULL; |
| 275 ASSERT(!HEAP->allow_allocation(true)); |
| 276 } |
| 277 |
| 278 |
| 279 Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) { |
| 280 ASSERT(id >= 0); |
| 281 if (id >= kNumberOfEntries) return NULL; |
| 282 LargeObjectChunk* base = NULL; |
| 283 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); |
| 284 if (type == EAGER) { |
| 285 if (data->eager_deoptimization_entry_code_ == NULL) { |
| 286 data->eager_deoptimization_entry_code_ = CreateCode(type); |
| 287 } |
| 288 base = data->eager_deoptimization_entry_code_; |
| 289 } else { |
| 290 if (data->lazy_deoptimization_entry_code_ == NULL) { |
| 291 data->lazy_deoptimization_entry_code_ = CreateCode(type); |
| 292 } |
| 293 base = data->lazy_deoptimization_entry_code_; |
| 294 } |
| 295 return |
| 296 static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_); |
| 297 } |
| 298 |
| 299 |
| 300 int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) { |
| 301 LargeObjectChunk* base = NULL; |
| 302 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); |
| 303 if (type == EAGER) { |
| 304 base = data->eager_deoptimization_entry_code_; |
| 305 } else { |
| 306 base = data->lazy_deoptimization_entry_code_; |
| 307 } |
| 308 if (base == NULL || |
| 309 addr < base->GetStartAddress() || |
| 310 addr >= base->GetStartAddress() + |
| 311 (kNumberOfEntries * table_entry_size_)) { |
| 312 return kNotDeoptimizationEntry; |
| 313 } |
| 314 ASSERT_EQ(0, |
| 315 static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_); |
| 316 return (addr - base->GetStartAddress()) / table_entry_size_; |
| 317 } |
| 318 |
| 319 |
| 320 unsigned Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data, |
| 321 unsigned id, |
| 322 SharedFunctionInfo* shared) { |
| 323 // TODO(kasperl): For now, we do a simple linear search for the PC |
| 324 // offset associated with the given node id. This should probably be |
| 325 // changed to a binary search. |
| 326 int length = data->DeoptPoints(); |
| 327 Smi* smi_id = Smi::FromInt(id); |
| 328 for (int i = 0; i < length; i++) { |
| 329 if (data->AstId(i) == smi_id) { |
| 330 return data->PcAndState(i)->value(); |
| 331 } |
| 332 } |
| 333 PrintF("[couldn't find pc offset for node=%u]\n", id); |
| 334 PrintF("[method: %s]\n", *shared->DebugName()->ToCString()); |
| 335 // Print the source code if available. |
| 336 HeapStringAllocator string_allocator; |
| 337 StringStream stream(&string_allocator); |
| 338 shared->SourceCodePrint(&stream, -1); |
| 339 PrintF("[source:\n%s\n]", *stream.ToCString()); |
| 340 |
| 341 UNREACHABLE(); |
| 342 return -1; |
| 343 } |
| 344 |
| 345 |
| 346 int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) { |
| 347 int length = 0; |
| 348 DeoptimizingCodeListNode* node = |
| 349 isolate->deoptimizer_data()->deoptimizing_code_list_; |
| 350 while (node != NULL) { |
| 351 length++; |
| 352 node = node->next(); |
| 353 } |
| 354 return length; |
| 355 } |
| 356 |
| 357 |
| 358 void Deoptimizer::DoComputeOutputFrames() { |
| 359 if (bailout_type_ == OSR) { |
| 360 DoComputeOsrOutputFrame(); |
| 361 return; |
| 362 } |
| 363 |
| 364 // Print some helpful diagnostic information. |
| 365 int64_t start = OS::Ticks(); |
| 366 if (FLAG_trace_deopt) { |
| 367 PrintF("[deoptimizing%s: begin 0x%08" V8PRIxPTR " ", |
| 368 (bailout_type_ == LAZY ? " (lazy)" : ""), |
| 369 reinterpret_cast<intptr_t>(function_)); |
| 370 function_->PrintName(); |
| 371 PrintF(" @%d]\n", bailout_id_); |
| 372 } |
| 373 |
| 374 // Determine basic deoptimization information. The optimized frame is |
| 375 // described by the input data. |
| 376 DeoptimizationInputData* input_data = |
| 377 DeoptimizationInputData::cast(optimized_code_->deoptimization_data()); |
| 378 unsigned node_id = input_data->AstId(bailout_id_)->value(); |
| 379 ByteArray* translations = input_data->TranslationByteArray(); |
| 380 unsigned translation_index = |
| 381 input_data->TranslationIndex(bailout_id_)->value(); |
| 382 |
| 383 // Do the input frame to output frame(s) translation. |
| 384 TranslationIterator iterator(translations, translation_index); |
| 385 Translation::Opcode opcode = |
| 386 static_cast<Translation::Opcode>(iterator.Next()); |
| 387 ASSERT(Translation::BEGIN == opcode); |
| 388 USE(opcode); |
| 389 // Read the number of output frames and allocate an array for their |
| 390 // descriptions. |
| 391 int count = iterator.Next(); |
| 392 ASSERT(output_ == NULL); |
| 393 output_ = new FrameDescription*[count]; |
| 394 // Per-frame lists of untagged and unboxed int32 and double values. |
| 395 integer32_values_ = new List<ValueDescriptionInteger32>[count]; |
| 396 double_values_ = new List<ValueDescriptionDouble>[count]; |
| 397 for (int i = 0; i < count; ++i) { |
| 398 output_[i] = NULL; |
| 399 integer32_values_[i].Initialize(0); |
| 400 double_values_[i].Initialize(0); |
| 401 } |
| 402 output_count_ = count; |
| 403 |
| 404 // Translate each output frame. |
| 405 for (int i = 0; i < count; ++i) { |
| 406 DoComputeFrame(&iterator, i); |
| 407 } |
| 408 |
| 409 // Print some helpful diagnostic information. |
| 410 if (FLAG_trace_deopt) { |
| 411 double ms = static_cast<double>(OS::Ticks() - start) / 1000; |
| 412 int index = output_count_ - 1; // Index of the topmost frame. |
| 413 JSFunction* function = output_[index]->GetFunction(); |
| 414 PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ", |
| 415 reinterpret_cast<intptr_t>(function)); |
| 416 function->PrintName(); |
| 417 PrintF(" => node=%u, pc=0x%08x, state=%s, took %0.3f ms]\n", |
| 418 node_id, |
| 419 output_[index]->GetPc(), |
| 420 FullCodeGenerator::State2String( |
| 421 static_cast<FullCodeGenerator::State>( |
| 422 output_[index]->GetState()->value())), |
| 423 ms); |
| 424 } |
| 425 } |
| 426 |
| 427 |
| 428 void Deoptimizer::InsertHeapNumberValues(int index, JavaScriptFrame* frame) { |
| 429 // We need to adjust the stack index by one for the top-most frame. |
| 430 int extra_slot_count = (index == output_count() - 1) ? 1 : 0; |
| 431 List<ValueDescriptionInteger32>* ints = &integer32_values_[index]; |
| 432 for (int i = 0; i < ints->length(); i++) { |
| 433 ValueDescriptionInteger32 value = ints->at(i); |
| 434 double val = static_cast<double>(value.int32_value()); |
| 435 InsertHeapNumberValue(frame, value.stack_index(), val, extra_slot_count); |
| 436 } |
| 437 |
| 438 // Iterate over double values and convert them to a heap number. |
| 439 List<ValueDescriptionDouble>* doubles = &double_values_[index]; |
| 440 for (int i = 0; i < doubles->length(); ++i) { |
| 441 ValueDescriptionDouble value = doubles->at(i); |
| 442 InsertHeapNumberValue(frame, value.stack_index(), value.double_value(), |
| 443 extra_slot_count); |
| 444 } |
| 445 } |
| 446 |
| 447 |
| 448 void Deoptimizer::InsertHeapNumberValue(JavaScriptFrame* frame, |
| 449 int stack_index, |
| 450 double val, |
| 451 int extra_slot_count) { |
| 452 // Add one to the TOS index to take the 'state' pushed before jumping |
| 453 // to the stub that calls Runtime::NotifyDeoptimized into account. |
| 454 int tos_index = stack_index + extra_slot_count; |
| 455 int index = (frame->ComputeExpressionsCount() - 1) - tos_index; |
| 456 if (FLAG_trace_deopt) PrintF("Allocating a new heap number: %e\n", val); |
| 457 Handle<Object> num = isolate_->factory()->NewNumber(val); |
| 458 frame->SetExpression(index, *num); |
| 459 } |
| 460 |
| 461 |
| 462 void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, |
| 463 int frame_index, |
| 464 unsigned output_offset) { |
| 465 disasm::NameConverter converter; |
| 466 // A GC-safe temporary placeholder that we can put in the output frame. |
| 467 const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0)); |
| 468 |
| 469 // Ignore commands marked as duplicate and act on the first non-duplicate. |
| 470 Translation::Opcode opcode = |
| 471 static_cast<Translation::Opcode>(iterator->Next()); |
| 472 while (opcode == Translation::DUPLICATE) { |
| 473 opcode = static_cast<Translation::Opcode>(iterator->Next()); |
| 474 iterator->Skip(Translation::NumberOfOperandsFor(opcode)); |
| 475 opcode = static_cast<Translation::Opcode>(iterator->Next()); |
| 476 } |
| 477 |
| 478 switch (opcode) { |
| 479 case Translation::BEGIN: |
| 480 case Translation::FRAME: |
| 481 case Translation::DUPLICATE: |
| 482 UNREACHABLE(); |
| 483 return; |
| 484 |
| 485 case Translation::REGISTER: { |
| 486 int input_reg = iterator->Next(); |
| 487 uint32_t input_value = input_->GetRegister(input_reg); |
| 488 if (FLAG_trace_deopt) { |
| 489 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; %s\n", |
| 490 output_[frame_index]->GetTop() + output_offset, |
| 491 output_offset, |
| 492 input_value, |
| 493 converter.NameOfCPURegister(input_reg)); |
| 494 } |
| 495 output_[frame_index]->SetFrameSlot(output_offset, input_value); |
| 496 return; |
| 497 } |
| 498 |
| 499 case Translation::INT32_REGISTER: { |
| 500 int input_reg = iterator->Next(); |
| 501 uint32_t value = input_->GetRegister(input_reg); |
| 502 bool is_smi = Smi::IsValid(value); |
| 503 unsigned output_index = output_offset / kPointerSize; |
| 504 if (FLAG_trace_deopt) { |
| 505 PrintF(" 0x%08x: [top + %d] <- %d ; %s (%s)\n", |
| 506 output_[frame_index]->GetTop() + output_offset, |
| 507 output_offset, |
| 508 value, |
| 509 converter.NameOfCPURegister(input_reg), |
| 510 is_smi ? "smi" : "heap number"); |
| 511 } |
| 512 if (is_smi) { |
| 513 intptr_t tagged_value = |
| 514 reinterpret_cast<intptr_t>(Smi::FromInt(value)); |
| 515 output_[frame_index]->SetFrameSlot(output_offset, tagged_value); |
| 516 } else { |
| 517 // We save the untagged value on the side and store a GC-safe |
| 518 // temporary placeholder in the frame. |
| 519 AddInteger32Value(frame_index, output_index, value); |
| 520 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder); |
| 521 } |
| 522 return; |
| 523 } |
| 524 |
| 525 case Translation::DOUBLE_REGISTER: { |
| 526 int input_reg = iterator->Next(); |
| 527 double value = input_->GetDoubleRegister(input_reg); |
| 528 unsigned output_index = output_offset / kPointerSize; |
| 529 if (FLAG_trace_deopt) { |
| 530 PrintF(" 0x%08x: [top + %d] <- %e ; %s\n", |
| 531 output_[frame_index]->GetTop() + output_offset, |
| 532 output_offset, |
| 533 value, |
| 534 DoubleRegister::AllocationIndexToString(input_reg)); |
| 535 } |
| 536 // We save the untagged value on the side and store a GC-safe |
| 537 // temporary placeholder in the frame. |
| 538 AddDoubleValue(frame_index, output_index, value); |
| 539 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder); |
| 540 return; |
| 541 } |
| 542 |
| 543 case Translation::STACK_SLOT: { |
| 544 int input_slot_index = iterator->Next(); |
| 545 unsigned input_offset = |
| 546 input_->GetOffsetFromSlotIndex(this, input_slot_index); |
| 547 uint32_t input_value = input_->GetFrameSlot(input_offset); |
| 548 if (FLAG_trace_deopt) { |
| 549 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; [esp + %d]\n", |
| 550 output_[frame_index]->GetTop() + output_offset, |
| 551 output_offset, |
| 552 input_value, |
| 553 input_offset); |
| 554 } |
| 555 output_[frame_index]->SetFrameSlot(output_offset, input_value); |
| 556 return; |
| 557 } |
| 558 |
| 559 case Translation::INT32_STACK_SLOT: { |
| 560 int input_slot_index = iterator->Next(); |
| 561 unsigned input_offset = |
| 562 input_->GetOffsetFromSlotIndex(this, input_slot_index); |
| 563 int32_t value = input_->GetFrameSlot(input_offset); |
| 564 bool is_smi = Smi::IsValid(value); |
| 565 unsigned output_index = output_offset / kPointerSize; |
| 566 if (FLAG_trace_deopt) { |
| 567 PrintF(" 0x%08x: [top + %d] <- %d ; [esp + %d] (%s)\n", |
| 568 output_[frame_index]->GetTop() + output_offset, |
| 569 output_offset, |
| 570 value, |
| 571 input_offset, |
| 572 is_smi ? "smi" : "heap number"); |
| 573 } |
| 574 if (is_smi) { |
| 575 intptr_t tagged_value = |
| 576 reinterpret_cast<intptr_t>(Smi::FromInt(value)); |
| 577 output_[frame_index]->SetFrameSlot(output_offset, tagged_value); |
| 578 } else { |
| 579 // We save the untagged value on the side and store a GC-safe |
| 580 // temporary placeholder in the frame. |
| 581 AddInteger32Value(frame_index, output_index, value); |
| 582 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder); |
| 583 } |
| 584 return; |
| 585 } |
| 586 |
| 587 case Translation::DOUBLE_STACK_SLOT: { |
| 588 int input_slot_index = iterator->Next(); |
| 589 unsigned input_offset = |
| 590 input_->GetOffsetFromSlotIndex(this, input_slot_index); |
| 591 double value = input_->GetDoubleFrameSlot(input_offset); |
| 592 unsigned output_index = output_offset / kPointerSize; |
| 593 if (FLAG_trace_deopt) { |
| 594 PrintF(" 0x%08x: [top + %d] <- %e ; [esp + %d]\n", |
| 595 output_[frame_index]->GetTop() + output_offset, |
| 596 output_offset, |
| 597 value, |
| 598 input_offset); |
| 599 } |
| 600 // We save the untagged value on the side and store a GC-safe |
| 601 // temporary placeholder in the frame. |
| 602 AddDoubleValue(frame_index, output_index, value); |
| 603 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder); |
| 604 return; |
| 605 } |
| 606 |
| 607 case Translation::LITERAL: { |
| 608 Object* literal = ComputeLiteral(iterator->Next()); |
| 609 if (FLAG_trace_deopt) { |
| 610 PrintF(" 0x%08x: [top + %d] <- ", |
| 611 output_[frame_index]->GetTop() + output_offset, |
| 612 output_offset); |
| 613 literal->ShortPrint(); |
| 614 PrintF(" ; literal\n"); |
| 615 } |
| 616 intptr_t value = reinterpret_cast<intptr_t>(literal); |
| 617 output_[frame_index]->SetFrameSlot(output_offset, value); |
| 618 return; |
| 619 } |
| 620 |
| 621 case Translation::ARGUMENTS_OBJECT: { |
| 622 // Use the hole value as a sentinel and fill in the arguments object |
| 623 // after the deoptimized frame is built. |
| 624 ASSERT(frame_index == 0); // Only supported for first frame. |
| 625 if (FLAG_trace_deopt) { |
| 626 PrintF(" 0x%08x: [top + %d] <- ", |
| 627 output_[frame_index]->GetTop() + output_offset, |
| 628 output_offset); |
| 629 isolate_->heap()->the_hole_value()->ShortPrint(); |
| 630 PrintF(" ; arguments object\n"); |
| 631 } |
| 632 intptr_t value = reinterpret_cast<intptr_t>( |
| 633 isolate_->heap()->the_hole_value()); |
| 634 output_[frame_index]->SetFrameSlot(output_offset, value); |
| 635 return; |
| 636 } |
| 637 } |
| 638 } |
| 639 |
| 640 |
| 641 bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator, |
| 642 int* input_offset) { |
| 643 disasm::NameConverter converter; |
| 644 FrameDescription* output = output_[0]; |
| 645 |
| 646 // The input values are all part of the unoptimized frame so they |
| 647 // are all tagged pointers. |
| 648 uint32_t input_value = input_->GetFrameSlot(*input_offset); |
| 649 Object* input_object = reinterpret_cast<Object*>(input_value); |
| 650 |
| 651 Translation::Opcode opcode = |
| 652 static_cast<Translation::Opcode>(iterator->Next()); |
| 653 bool duplicate = (opcode == Translation::DUPLICATE); |
| 654 if (duplicate) { |
| 655 opcode = static_cast<Translation::Opcode>(iterator->Next()); |
| 656 } |
| 657 |
| 658 switch (opcode) { |
| 659 case Translation::BEGIN: |
| 660 case Translation::FRAME: |
| 661 case Translation::DUPLICATE: |
| 662 UNREACHABLE(); // Malformed input. |
| 663 return false; |
| 664 |
| 665 case Translation::REGISTER: { |
| 666 int output_reg = iterator->Next(); |
| 667 if (FLAG_trace_osr) { |
| 668 PrintF(" %s <- 0x%08x ; [esp + %d]\n", |
| 669 converter.NameOfCPURegister(output_reg), |
| 670 input_value, |
| 671 *input_offset); |
| 672 } |
| 673 output->SetRegister(output_reg, input_value); |
| 674 break; |
| 675 } |
| 676 |
| 677 case Translation::INT32_REGISTER: { |
| 678 // Abort OSR if we don't have a number. |
| 679 if (!input_object->IsNumber()) return false; |
| 680 |
| 681 int output_reg = iterator->Next(); |
| 682 int int32_value = input_object->IsSmi() |
| 683 ? Smi::cast(input_object)->value() |
| 684 : FastD2I(input_object->Number()); |
| 685 // Abort the translation if the conversion lost information. |
| 686 if (!input_object->IsSmi() && |
| 687 FastI2D(int32_value) != input_object->Number()) { |
| 688 if (FLAG_trace_osr) { |
| 689 PrintF("**** %g could not be converted to int32 ****\n", |
| 690 input_object->Number()); |
| 691 } |
| 692 return false; |
| 693 } |
| 694 if (FLAG_trace_osr) { |
| 695 PrintF(" %s <- %d (int32) ; [esp + %d]\n", |
| 696 converter.NameOfCPURegister(output_reg), |
| 697 int32_value, |
| 698 *input_offset); |
| 699 } |
| 700 output->SetRegister(output_reg, int32_value); |
| 701 break; |
| 702 } |
| 703 |
| 704 case Translation::DOUBLE_REGISTER: { |
| 705 // Abort OSR if we don't have a number. |
| 706 if (!input_object->IsNumber()) return false; |
| 707 |
| 708 int output_reg = iterator->Next(); |
| 709 double double_value = input_object->Number(); |
| 710 if (FLAG_trace_osr) { |
| 711 PrintF(" %s <- %g (double) ; [esp + %d]\n", |
| 712 DoubleRegister::AllocationIndexToString(output_reg), |
| 713 double_value, |
| 714 *input_offset); |
| 715 } |
| 716 output->SetDoubleRegister(output_reg, double_value); |
| 717 break; |
| 718 } |
| 719 |
| 720 case Translation::STACK_SLOT: { |
| 721 int output_index = iterator->Next(); |
| 722 unsigned output_offset = |
| 723 output->GetOffsetFromSlotIndex(this, output_index); |
| 724 if (FLAG_trace_osr) { |
| 725 PrintF(" [esp + %d] <- 0x%08x ; [esp + %d]\n", |
| 726 output_offset, |
| 727 input_value, |
| 728 *input_offset); |
| 729 } |
| 730 output->SetFrameSlot(output_offset, input_value); |
| 731 break; |
| 732 } |
| 733 |
| 734 case Translation::INT32_STACK_SLOT: { |
| 735 // Abort OSR if we don't have a number. |
| 736 if (!input_object->IsNumber()) return false; |
| 737 |
| 738 int output_index = iterator->Next(); |
| 739 unsigned output_offset = |
| 740 output->GetOffsetFromSlotIndex(this, output_index); |
| 741 int int32_value = input_object->IsSmi() |
| 742 ? Smi::cast(input_object)->value() |
| 743 : DoubleToInt32(input_object->Number()); |
| 744 // Abort the translation if the conversion lost information. |
| 745 if (!input_object->IsSmi() && |
| 746 FastI2D(int32_value) != input_object->Number()) { |
| 747 if (FLAG_trace_osr) { |
| 748 PrintF("**** %g could not be converted to int32 ****\n", |
| 749 input_object->Number()); |
| 750 } |
| 751 return false; |
| 752 } |
| 753 if (FLAG_trace_osr) { |
| 754 PrintF(" [esp + %d] <- %d (int32) ; [esp + %d]\n", |
| 755 output_offset, |
| 756 int32_value, |
| 757 *input_offset); |
| 758 } |
| 759 output->SetFrameSlot(output_offset, int32_value); |
| 760 break; |
| 761 } |
| 762 |
| 763 case Translation::DOUBLE_STACK_SLOT: { |
| 764 static const int kLowerOffset = 0 * kPointerSize; |
| 765 static const int kUpperOffset = 1 * kPointerSize; |
| 766 |
| 767 // Abort OSR if we don't have a number. |
| 768 if (!input_object->IsNumber()) return false; |
| 769 |
| 770 int output_index = iterator->Next(); |
| 771 unsigned output_offset = |
| 772 output->GetOffsetFromSlotIndex(this, output_index); |
| 773 double double_value = input_object->Number(); |
| 774 uint64_t int_value = BitCast<uint64_t, double>(double_value); |
| 775 int32_t lower = static_cast<int32_t>(int_value); |
| 776 int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt); |
| 777 if (FLAG_trace_osr) { |
| 778 PrintF(" [esp + %d] <- 0x%08x (upper bits of %g) ; [esp + %d]\n", |
| 779 output_offset + kUpperOffset, |
| 780 upper, |
| 781 double_value, |
| 782 *input_offset); |
| 783 PrintF(" [esp + %d] <- 0x%08x (lower bits of %g) ; [esp + %d]\n", |
| 784 output_offset + kLowerOffset, |
| 785 lower, |
| 786 double_value, |
| 787 *input_offset); |
| 788 } |
| 789 output->SetFrameSlot(output_offset + kLowerOffset, lower); |
| 790 output->SetFrameSlot(output_offset + kUpperOffset, upper); |
| 791 break; |
| 792 } |
| 793 |
| 794 case Translation::LITERAL: { |
| 795 // Just ignore non-materialized literals. |
| 796 iterator->Next(); |
| 797 break; |
| 798 } |
| 799 |
| 800 case Translation::ARGUMENTS_OBJECT: { |
| 801 // Optimized code assumes that the argument object has not been |
| 802 // materialized and so bypasses it when doing arguments access. |
| 803 // We should have bailed out before starting the frame |
| 804 // translation. |
| 805 UNREACHABLE(); |
| 806 return false; |
| 807 } |
| 808 } |
| 809 |
| 810 if (!duplicate) *input_offset -= kPointerSize; |
| 811 return true; |
| 812 } |
| 813 |
| 814 |
| 815 unsigned Deoptimizer::ComputeInputFrameSize() const { |
| 816 unsigned fixed_size = ComputeFixedSize(function_); |
| 817 // The fp-to-sp delta already takes the context and the function |
| 818 // into account so we have to avoid double counting them (-2). |
| 819 unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize); |
| 820 #ifdef DEBUG |
| 821 if (bailout_type_ == OSR) { |
| 822 // TODO(kasperl): It would be nice if we could verify that the |
| 823 // size matches with the stack height we can compute based on the |
| 824 // environment at the OSR entry. The code for that his built into |
| 825 // the DoComputeOsrOutputFrame function for now. |
| 826 } else { |
| 827 unsigned stack_slots = optimized_code_->stack_slots(); |
| 828 unsigned outgoing_size = ComputeOutgoingArgumentSize(); |
| 829 ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size); |
| 830 } |
| 831 #endif |
| 832 return result; |
| 833 } |
| 834 |
| 835 |
| 836 unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const { |
| 837 // The fixed part of the frame consists of the return address, frame |
| 838 // pointer, function, context, and all the incoming arguments. |
| 839 static const unsigned kFixedSlotSize = 4 * kPointerSize; |
| 840 return ComputeIncomingArgumentSize(function) + kFixedSlotSize; |
| 841 } |
| 842 |
| 843 |
| 844 unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const { |
| 845 // The incoming arguments is the values for formal parameters and |
| 846 // the receiver. Every slot contains a pointer. |
| 847 unsigned arguments = function->shared()->formal_parameter_count() + 1; |
| 848 return arguments * kPointerSize; |
| 849 } |
| 850 |
| 851 |
| 852 unsigned Deoptimizer::ComputeOutgoingArgumentSize() const { |
| 853 DeoptimizationInputData* data = DeoptimizationInputData::cast( |
| 854 optimized_code_->deoptimization_data()); |
| 855 unsigned height = data->ArgumentsStackHeight(bailout_id_)->value(); |
| 856 return height * kPointerSize; |
| 857 } |
| 858 |
| 859 |
| 860 Object* Deoptimizer::ComputeLiteral(int index) const { |
| 861 DeoptimizationInputData* data = DeoptimizationInputData::cast( |
| 862 optimized_code_->deoptimization_data()); |
| 863 FixedArray* literals = data->LiteralArray(); |
| 864 return literals->get(index); |
| 865 } |
| 866 |
| 867 |
| 868 void Deoptimizer::AddInteger32Value(int frame_index, |
| 869 int slot_index, |
| 870 int32_t value) { |
| 871 ValueDescriptionInteger32 value_desc(slot_index, value); |
| 872 integer32_values_[frame_index].Add(value_desc); |
| 873 } |
| 874 |
| 875 |
| 876 void Deoptimizer::AddDoubleValue(int frame_index, |
| 877 int slot_index, |
| 878 double value) { |
| 879 ValueDescriptionDouble value_desc(slot_index, value); |
| 880 double_values_[frame_index].Add(value_desc); |
| 881 } |
| 882 |
| 883 |
| 884 static Mutex* flag_mutex = OS::CreateMutex(); |
| 885 |
| 886 |
| 887 LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) { |
| 888 // We cannot run this if the serializer is enabled because this will |
| 889 // cause us to emit relocation information for the external |
| 890 // references. This is fine because the deoptimizer's code section |
| 891 // isn't meant to be serialized at all. |
| 892 ASSERT(!Serializer::enabled()); |
| 893 // Grab a mutex because we're changing a global flag. |
| 894 ScopedLock lock(flag_mutex); |
| 895 bool old_debug_code = FLAG_debug_code; |
| 896 FLAG_debug_code = false; |
| 897 |
| 898 MacroAssembler masm(NULL, 16 * KB); |
| 899 GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type); |
| 900 CodeDesc desc; |
| 901 masm.GetCode(&desc); |
| 902 ASSERT(desc.reloc_size == 0); |
| 903 |
| 904 LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE); |
| 905 memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size); |
| 906 CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size); |
| 907 FLAG_debug_code = old_debug_code; |
| 908 return chunk; |
| 909 } |
| 910 |
| 911 |
| 912 Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) { |
| 913 DeoptimizingCodeListNode* node = |
| 914 Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_; |
| 915 while (node != NULL) { |
| 916 if (node->code()->contains(addr)) return *node->code(); |
| 917 node = node->next(); |
| 918 } |
| 919 return NULL; |
| 920 } |
| 921 |
| 922 |
| 923 void Deoptimizer::RemoveDeoptimizingCode(Code* code) { |
| 924 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); |
| 925 ASSERT(data->deoptimizing_code_list_ != NULL); |
| 926 // Run through the code objects to find this one and remove it. |
| 927 DeoptimizingCodeListNode* prev = NULL; |
| 928 DeoptimizingCodeListNode* current = data->deoptimizing_code_list_; |
| 929 while (current != NULL) { |
| 930 if (*current->code() == code) { |
| 931 // Unlink from list. If prev is NULL we are looking at the first element. |
| 932 if (prev == NULL) { |
| 933 data->deoptimizing_code_list_ = current->next(); |
| 934 } else { |
| 935 prev->set_next(current->next()); |
| 936 } |
| 937 delete current; |
| 938 return; |
| 939 } |
| 940 // Move to next in list. |
| 941 prev = current; |
| 942 current = current->next(); |
| 943 } |
| 944 // Deoptimizing code is removed through weak callback. Each object is expected |
| 945 // to be removed once and only once. |
| 946 UNREACHABLE(); |
| 947 } |
| 948 |
| 949 |
| 950 FrameDescription::FrameDescription(uint32_t frame_size, |
| 951 JSFunction* function) |
| 952 : frame_size_(frame_size), |
| 953 function_(function), |
| 954 top_(kZapUint32), |
| 955 pc_(kZapUint32), |
| 956 fp_(kZapUint32) { |
| 957 // Zap all the registers. |
| 958 for (int r = 0; r < Register::kNumRegisters; r++) { |
| 959 SetRegister(r, kZapUint32); |
| 960 } |
| 961 |
| 962 // Zap all the slots. |
| 963 for (unsigned o = 0; o < frame_size; o += kPointerSize) { |
| 964 SetFrameSlot(o, kZapUint32); |
| 965 } |
| 966 } |
| 967 |
| 968 |
| 969 unsigned FrameDescription::GetOffsetFromSlotIndex(Deoptimizer* deoptimizer, |
| 970 int slot_index) { |
| 971 if (slot_index >= 0) { |
| 972 // Local or spill slots. Skip the fixed part of the frame |
| 973 // including all arguments. |
| 974 unsigned base = |
| 975 GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction()); |
| 976 return base - ((slot_index + 1) * kPointerSize); |
| 977 } else { |
| 978 // Incoming parameter. |
| 979 unsigned base = GetFrameSize() - |
| 980 deoptimizer->ComputeIncomingArgumentSize(GetFunction()); |
| 981 return base - ((slot_index + 1) * kPointerSize); |
| 982 } |
| 983 } |
| 984 |
| 985 |
| 986 void TranslationBuffer::Add(int32_t value) { |
| 987 // Encode the sign bit in the least significant bit. |
| 988 bool is_negative = (value < 0); |
| 989 uint32_t bits = ((is_negative ? -value : value) << 1) | |
| 990 static_cast<int32_t>(is_negative); |
| 991 // Encode the individual bytes using the least significant bit of |
| 992 // each byte to indicate whether or not more bytes follow. |
| 993 do { |
| 994 uint32_t next = bits >> 7; |
| 995 contents_.Add(((bits << 1) & 0xFF) | (next != 0)); |
| 996 bits = next; |
| 997 } while (bits != 0); |
| 998 } |
| 999 |
| 1000 |
| 1001 int32_t TranslationIterator::Next() { |
| 1002 ASSERT(HasNext()); |
| 1003 // Run through the bytes until we reach one with a least significant |
| 1004 // bit of zero (marks the end). |
| 1005 uint32_t bits = 0; |
| 1006 for (int i = 0; true; i += 7) { |
| 1007 uint8_t next = buffer_->get(index_++); |
| 1008 bits |= (next >> 1) << i; |
| 1009 if ((next & 1) == 0) break; |
| 1010 } |
| 1011 // The bits encode the sign in the least significant bit. |
| 1012 bool is_negative = (bits & 1) == 1; |
| 1013 int32_t result = bits >> 1; |
| 1014 return is_negative ? -result : result; |
| 1015 } |
| 1016 |
| 1017 |
| 1018 Handle<ByteArray> TranslationBuffer::CreateByteArray() { |
| 1019 int length = contents_.length(); |
| 1020 Handle<ByteArray> result = |
| 1021 Isolate::Current()->factory()->NewByteArray(length, TENURED); |
| 1022 memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length); |
| 1023 return result; |
| 1024 } |
| 1025 |
| 1026 |
| 1027 void Translation::BeginFrame(int node_id, int literal_id, unsigned height) { |
| 1028 buffer_->Add(FRAME); |
| 1029 buffer_->Add(node_id); |
| 1030 buffer_->Add(literal_id); |
| 1031 buffer_->Add(height); |
| 1032 } |
| 1033 |
| 1034 |
| 1035 void Translation::StoreRegister(Register reg) { |
| 1036 buffer_->Add(REGISTER); |
| 1037 buffer_->Add(reg.code()); |
| 1038 } |
| 1039 |
| 1040 |
| 1041 void Translation::StoreInt32Register(Register reg) { |
| 1042 buffer_->Add(INT32_REGISTER); |
| 1043 buffer_->Add(reg.code()); |
| 1044 } |
| 1045 |
| 1046 |
| 1047 void Translation::StoreDoubleRegister(DoubleRegister reg) { |
| 1048 buffer_->Add(DOUBLE_REGISTER); |
| 1049 buffer_->Add(DoubleRegister::ToAllocationIndex(reg)); |
| 1050 } |
| 1051 |
| 1052 |
| 1053 void Translation::StoreStackSlot(int index) { |
| 1054 buffer_->Add(STACK_SLOT); |
| 1055 buffer_->Add(index); |
| 1056 } |
| 1057 |
| 1058 |
| 1059 void Translation::StoreInt32StackSlot(int index) { |
| 1060 buffer_->Add(INT32_STACK_SLOT); |
| 1061 buffer_->Add(index); |
| 1062 } |
| 1063 |
| 1064 |
| 1065 void Translation::StoreDoubleStackSlot(int index) { |
| 1066 buffer_->Add(DOUBLE_STACK_SLOT); |
| 1067 buffer_->Add(index); |
| 1068 } |
| 1069 |
| 1070 |
| 1071 void Translation::StoreLiteral(int literal_id) { |
| 1072 buffer_->Add(LITERAL); |
| 1073 buffer_->Add(literal_id); |
| 1074 } |
| 1075 |
| 1076 |
| 1077 void Translation::StoreArgumentsObject() { |
| 1078 buffer_->Add(ARGUMENTS_OBJECT); |
| 1079 } |
| 1080 |
| 1081 |
| 1082 void Translation::MarkDuplicate() { |
| 1083 buffer_->Add(DUPLICATE); |
| 1084 } |
| 1085 |
| 1086 |
| 1087 int Translation::NumberOfOperandsFor(Opcode opcode) { |
| 1088 switch (opcode) { |
| 1089 case ARGUMENTS_OBJECT: |
| 1090 case DUPLICATE: |
| 1091 return 0; |
| 1092 case BEGIN: |
| 1093 case REGISTER: |
| 1094 case INT32_REGISTER: |
| 1095 case DOUBLE_REGISTER: |
| 1096 case STACK_SLOT: |
| 1097 case INT32_STACK_SLOT: |
| 1098 case DOUBLE_STACK_SLOT: |
| 1099 case LITERAL: |
| 1100 return 1; |
| 1101 case FRAME: |
| 1102 return 3; |
| 1103 } |
| 1104 UNREACHABLE(); |
| 1105 return -1; |
| 1106 } |
| 1107 |
| 1108 |
| 1109 #ifdef DEBUG |
| 1110 |
| 1111 const char* Translation::StringFor(Opcode opcode) { |
| 1112 switch (opcode) { |
| 1113 case BEGIN: |
| 1114 return "BEGIN"; |
| 1115 case FRAME: |
| 1116 return "FRAME"; |
| 1117 case REGISTER: |
| 1118 return "REGISTER"; |
| 1119 case INT32_REGISTER: |
| 1120 return "INT32_REGISTER"; |
| 1121 case DOUBLE_REGISTER: |
| 1122 return "DOUBLE_REGISTER"; |
| 1123 case STACK_SLOT: |
| 1124 return "STACK_SLOT"; |
| 1125 case INT32_STACK_SLOT: |
| 1126 return "INT32_STACK_SLOT"; |
| 1127 case DOUBLE_STACK_SLOT: |
| 1128 return "DOUBLE_STACK_SLOT"; |
| 1129 case LITERAL: |
| 1130 return "LITERAL"; |
| 1131 case ARGUMENTS_OBJECT: |
| 1132 return "ARGUMENTS_OBJECT"; |
| 1133 case DUPLICATE: |
| 1134 return "DUPLICATE"; |
| 1135 } |
| 1136 UNREACHABLE(); |
| 1137 return ""; |
| 1138 } |
| 1139 |
| 1140 #endif |
| 1141 |
| 1142 |
| 1143 DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) { |
| 1144 GlobalHandles* global_handles = Isolate::Current()->global_handles(); |
| 1145 // Globalize the code object and make it weak. |
| 1146 code_ = Handle<Code>::cast(global_handles->Create(code)); |
| 1147 global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()), |
| 1148 this, |
| 1149 Deoptimizer::HandleWeakDeoptimizedCode); |
| 1150 } |
| 1151 |
| 1152 |
| 1153 DeoptimizingCodeListNode::~DeoptimizingCodeListNode() { |
| 1154 GlobalHandles* global_handles = Isolate::Current()->global_handles(); |
| 1155 global_handles->Destroy(reinterpret_cast<Object**>(code_.location())); |
| 1156 } |
| 1157 |
| 1158 |
| 1159 } } // namespace v8::internal |
| OLD | NEW |