OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 313 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
324 // Run through the list of all native contexts and deoptimize. | 324 // Run through the list of all native contexts and deoptimize. |
325 Object* context = isolate->heap()->native_contexts_list(); | 325 Object* context = isolate->heap()->native_contexts_list(); |
326 while (!context->IsUndefined()) { | 326 while (!context->IsUndefined()) { |
327 VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor); | 327 VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor); |
328 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); | 328 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); |
329 } | 329 } |
330 } | 330 } |
331 | 331 |
332 | 332 |
333 // Removes the functions selected by the given filter from the optimized | 333 // Removes the functions selected by the given filter from the optimized |
334 // function list of the given context and partitions the removed functions | 334 // function list of the given context and adds their code to the list of |
335 // into one or more lists such that all functions in a list share the same | 335 // code objects to be deoptimized. |
336 // code. The head of each list is written in the deoptimizing_functions field | 336 static void SelectCodeToDeoptimize(Context* context, |
337 // of the corresponding code object. | 337 OptimizedFunctionFilter* filter, |
338 // The found code objects are returned in the given zone list. | 338 ZoneList<Code*>* codes, |
339 static void PartitionOptimizedFunctions(Context* context, | 339 Zone* zone, |
340 OptimizedFunctionFilter* filter, | 340 Object* undefined) { |
341 ZoneList<Code*>* partitions, | |
342 Zone* zone, | |
343 Object* undefined) { | |
344 DisallowHeapAllocation no_allocation; | 341 DisallowHeapAllocation no_allocation; |
345 Object* current = context->get(Context::OPTIMIZED_FUNCTIONS_LIST); | 342 Object* current = context->get(Context::OPTIMIZED_FUNCTIONS_LIST); |
346 Object* remainder_head = undefined; | 343 Object* remainder_head = undefined; |
347 Object* remainder_tail = undefined; | 344 Object* remainder_tail = undefined; |
348 ASSERT_EQ(0, partitions->length()); | 345 |
| 346 // TODO(titzer): rewrite to not modify unselected functions. |
349 while (current != undefined) { | 347 while (current != undefined) { |
350 JSFunction* function = JSFunction::cast(current); | 348 JSFunction* function = JSFunction::cast(current); |
351 current = function->next_function_link(); | 349 current = function->next_function_link(); |
352 if (filter->TakeFunction(function)) { | 350 if (filter->TakeFunction(function)) { |
| 351 // Extract this function from the context's list and remember the code. |
353 Code* code = function->code(); | 352 Code* code = function->code(); |
354 if (code->deoptimizing_functions() == undefined) { | 353 ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION); |
355 partitions->Add(code, zone); | 354 if (code->marked_for_deoptimization()) { |
| 355 ASSERT(codes->Contains(code)); |
356 } else { | 356 } else { |
357 ASSERT(partitions->Contains(code)); | 357 code->set_marked_for_deoptimization(true); |
| 358 codes->Add(code, zone); |
358 } | 359 } |
359 function->set_next_function_link(code->deoptimizing_functions()); | 360 SharedFunctionInfo* shared = function->shared(); |
360 code->set_deoptimizing_functions(function); | 361 // Replace the function's code with the shared code. |
| 362 function->set_code(shared->code()); |
| 363 // Evict the code from the optimized code map. |
| 364 shared->EvictFromOptimizedCodeMap(code, "deoptimized function"); |
| 365 // Remove the function from the optimized functions list. |
| 366 function->set_next_function_link(undefined); |
| 367 |
| 368 if (FLAG_trace_deopt) { |
| 369 PrintF("[forced deoptimization: "); |
| 370 function->PrintName(); |
| 371 PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); |
| 372 } |
361 } else { | 373 } else { |
| 374 // Don't select this function; link it back into the list. |
362 if (remainder_head == undefined) { | 375 if (remainder_head == undefined) { |
363 remainder_head = function; | 376 remainder_head = function; |
364 } else { | 377 } else { |
365 JSFunction::cast(remainder_tail)->set_next_function_link(function); | 378 JSFunction::cast(remainder_tail)->set_next_function_link(function); |
366 } | 379 } |
367 remainder_tail = function; | 380 remainder_tail = function; |
368 } | 381 } |
369 } | 382 } |
370 if (remainder_tail != undefined) { | 383 if (remainder_tail != undefined) { |
371 JSFunction::cast(remainder_tail)->set_next_function_link(undefined); | 384 JSFunction::cast(remainder_tail)->set_next_function_link(undefined); |
(...skipping 14 matching lines...) Expand all Loading... |
386 public: | 399 public: |
387 explicit DeoptimizeWithMatchingCodeFilter(Code* code) : code_(code) {} | 400 explicit DeoptimizeWithMatchingCodeFilter(Code* code) : code_(code) {} |
388 virtual bool TakeFunction(JSFunction* function) { | 401 virtual bool TakeFunction(JSFunction* function) { |
389 return function->code() == code_; | 402 return function->code() == code_; |
390 } | 403 } |
391 private: | 404 private: |
392 Code* code_; | 405 Code* code_; |
393 }; | 406 }; |
394 | 407 |
395 | 408 |
| 409 class DeoptimizeMarkedCodeFilter : public OptimizedFunctionFilter { |
| 410 public: |
| 411 virtual bool TakeFunction(JSFunction* function) { |
| 412 return function->code()->marked_for_deoptimization(); |
| 413 } |
| 414 }; |
| 415 |
| 416 |
396 void Deoptimizer::DeoptimizeAll(Isolate* isolate) { | 417 void Deoptimizer::DeoptimizeAll(Isolate* isolate) { |
397 DisallowHeapAllocation no_allocation; | 418 DisallowHeapAllocation no_allocation; |
398 | 419 |
399 if (FLAG_trace_deopt) { | 420 if (FLAG_trace_deopt) { |
400 PrintF("[deoptimize all contexts]\n"); | 421 PrintF("[deoptimize all contexts]\n"); |
401 } | 422 } |
402 | 423 |
403 DeoptimizeAllFilter filter; | 424 DeoptimizeAllFilter filter; |
404 DeoptimizeAllFunctionsWith(isolate, &filter); | 425 DeoptimizeAllFunctionsWith(isolate, &filter); |
405 } | 426 } |
406 | 427 |
407 | 428 |
408 void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) { | 429 void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) { |
409 DisallowHeapAllocation no_allocation; | 430 DisallowHeapAllocation no_allocation; |
410 DeoptimizeAllFilter filter; | 431 DeoptimizeAllFilter filter; |
411 if (object->IsJSGlobalProxy()) { | 432 if (object->IsJSGlobalProxy()) { |
412 Object* proto = object->GetPrototype(); | 433 Object* proto = object->GetPrototype(); |
413 ASSERT(proto->IsJSGlobalObject()); | 434 ASSERT(proto->IsJSGlobalObject()); |
414 DeoptimizeAllFunctionsForContext( | 435 DeoptimizeAllFunctionsForContext( |
415 GlobalObject::cast(proto)->native_context(), &filter); | 436 GlobalObject::cast(proto)->native_context(), &filter); |
416 } else if (object->IsGlobalObject()) { | 437 } else if (object->IsGlobalObject()) { |
417 DeoptimizeAllFunctionsForContext( | 438 DeoptimizeAllFunctionsForContext( |
418 GlobalObject::cast(object)->native_context(), &filter); | 439 GlobalObject::cast(object)->native_context(), &filter); |
419 } | 440 } |
420 } | 441 } |
421 | 442 |
422 | 443 |
423 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { | 444 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { |
424 if (!function->IsOptimized()) return; | |
425 Code* code = function->code(); | 445 Code* code = function->code(); |
426 Context* context = function->context()->native_context(); | 446 if (code->kind() != Code::OPTIMIZED_FUNCTION) return; |
427 Isolate* isolate = context->GetIsolate(); | |
428 Object* undefined = isolate->heap()->undefined_value(); | |
429 Zone zone(isolate); | |
430 ZoneList<Code*> codes(1, &zone); | |
431 DeoptimizeWithMatchingCodeFilter filter(code); | 447 DeoptimizeWithMatchingCodeFilter filter(code); |
432 PartitionOptimizedFunctions(context, &filter, &codes, &zone, undefined); | 448 DeoptimizeAllFunctionsForContext( |
433 ASSERT_EQ(1, codes.length()); | 449 function->context()->native_context(), &filter); |
434 DeoptimizeFunctionWithPreparedFunctionList( | |
435 JSFunction::cast(codes.at(0)->deoptimizing_functions())); | |
436 codes.at(0)->set_deoptimizing_functions(undefined); | |
437 } | 450 } |
438 | 451 |
439 | 452 |
440 void Deoptimizer::DeoptimizeAllFunctionsForContext( | 453 void Deoptimizer::DeoptimizeAllFunctionsForContext( |
441 Context* context, OptimizedFunctionFilter* filter) { | 454 Context* context, OptimizedFunctionFilter* filter) { |
442 ASSERT(context->IsNativeContext()); | 455 ASSERT(context->IsNativeContext()); |
443 Isolate* isolate = context->GetIsolate(); | 456 Isolate* isolate = context->GetIsolate(); |
444 Object* undefined = isolate->heap()->undefined_value(); | 457 Object* undefined = isolate->heap()->undefined_value(); |
445 Zone zone(isolate); | 458 Zone zone(isolate); |
446 ZoneList<Code*> codes(1, &zone); | 459 ZoneList<Code*> codes(4, &zone); |
447 PartitionOptimizedFunctions(context, filter, &codes, &zone, undefined); | 460 SelectCodeToDeoptimize(context, filter, &codes, &zone, undefined); |
448 for (int i = 0; i < codes.length(); ++i) { | 461 for (int i = 0; i < codes.length(); i++) { |
449 DeoptimizeFunctionWithPreparedFunctionList( | 462 DeoptimizeCode(isolate, codes.at(i)); |
450 JSFunction::cast(codes.at(i)->deoptimizing_functions())); | |
451 codes.at(i)->set_deoptimizing_functions(undefined); | |
452 } | 463 } |
453 } | 464 } |
454 | 465 |
455 | 466 |
456 void Deoptimizer::DeoptimizeAllFunctionsWith(Isolate* isolate, | 467 void Deoptimizer::DeoptimizeAllFunctionsWith(Isolate* isolate, |
457 OptimizedFunctionFilter* filter) { | 468 OptimizedFunctionFilter* filter) { |
458 DisallowHeapAllocation no_allocation; | 469 DisallowHeapAllocation no_allocation; |
459 | 470 |
460 // Run through the list of all native contexts and deoptimize. | 471 // Run through the list of all native contexts and deoptimize. |
461 Object* context = isolate->heap()->native_contexts_list(); | 472 Object* context = isolate->heap()->native_contexts_list(); |
462 while (!context->IsUndefined()) { | 473 while (!context->IsUndefined()) { |
463 DeoptimizeAllFunctionsForContext(Context::cast(context), filter); | 474 DeoptimizeAllFunctionsForContext(Context::cast(context), filter); |
464 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); | 475 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); |
465 } | 476 } |
466 } | 477 } |
467 | 478 |
468 | 479 |
| 480 void Deoptimizer::DeoptimizeCodeList(Isolate* isolate, ZoneList<Code*>* codes) { |
| 481 if (codes->length() == 0) return; // Nothing to do. |
| 482 |
| 483 // Mark the code; any functions refering to this code will be selected. |
| 484 for (int i = 0; i < codes->length(); i++) { |
| 485 ASSERT(!codes->at(i)->marked_for_deoptimization()); |
| 486 codes->at(i)->set_marked_for_deoptimization(true); |
| 487 } |
| 488 |
| 489 // For all contexts, remove optimized functions that refer to the selected |
| 490 // code from the optimized function lists. |
| 491 Object* undefined = isolate->heap()->undefined_value(); |
| 492 Zone zone(isolate); |
| 493 Object* list = isolate->heap()->native_contexts_list(); |
| 494 DeoptimizeMarkedCodeFilter filter; |
| 495 while (!list->IsUndefined()) { |
| 496 Context* context = Context::cast(list); |
| 497 // Note that selecting code unlinks the functions that refer to it. |
| 498 SelectCodeToDeoptimize(context, &filter, codes, &zone, undefined); |
| 499 list = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); |
| 500 } |
| 501 |
| 502 // Now deoptimize all the code. |
| 503 for (int i = 0; i < codes->length(); i++) { |
| 504 DeoptimizeCode(isolate, codes->at(i)); |
| 505 } |
| 506 } |
| 507 |
| 508 |
| 509 void Deoptimizer::DeoptimizeCode(Isolate* isolate, Code* code) { |
| 510 HandleScope scope(isolate); |
| 511 DisallowHeapAllocation nha; |
| 512 |
| 513 // Do platform-specific patching of the optimized code. |
| 514 PatchCodeForDeoptimization(isolate, code); |
| 515 |
| 516 // Add the deoptimizing code to the list. |
| 517 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); |
| 518 DeoptimizerData* data = isolate->deoptimizer_data(); |
| 519 node->set_next(data->deoptimizing_code_list_); |
| 520 data->deoptimizing_code_list_ = node; |
| 521 |
| 522 // We might be in the middle of incremental marking with compaction. |
| 523 // Tell collector to treat this code object in a special way and |
| 524 // ignore all slots that might have been recorded on it. |
| 525 isolate->heap()->mark_compact_collector()->InvalidateCode(code); |
| 526 } |
| 527 |
| 528 |
469 void Deoptimizer::HandleWeakDeoptimizedCode(v8::Isolate* isolate, | 529 void Deoptimizer::HandleWeakDeoptimizedCode(v8::Isolate* isolate, |
470 v8::Persistent<v8::Value>* obj, | 530 v8::Persistent<v8::Value>* obj, |
471 void* parameter) { | 531 void* parameter) { |
472 DeoptimizingCodeListNode* node = | 532 DeoptimizingCodeListNode* node = |
473 reinterpret_cast<DeoptimizingCodeListNode*>(parameter); | 533 reinterpret_cast<DeoptimizingCodeListNode*>(parameter); |
474 DeoptimizerData* data = | 534 DeoptimizerData* data = |
475 reinterpret_cast<Isolate*>(isolate)->deoptimizer_data(); | 535 reinterpret_cast<Isolate*>(isolate)->deoptimizer_data(); |
476 data->RemoveDeoptimizingCode(*node->code()); | 536 data->RemoveDeoptimizingCode(*node->code()); |
477 #ifdef DEBUG | 537 #ifdef DEBUG |
478 for (DeoptimizingCodeListNode* current = data->deoptimizing_code_list_; | 538 for (DeoptimizingCodeListNode* current = data->deoptimizing_code_list_; |
(...skipping 414 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
893 input_offset -= (parameter_count * kPointerSize); | 953 input_offset -= (parameter_count * kPointerSize); |
894 | 954 |
895 // There are no translation commands for the caller's pc and fp, the | 955 // There are no translation commands for the caller's pc and fp, the |
896 // context, and the function. Synthesize their values and set them up | 956 // context, and the function. Synthesize their values and set them up |
897 // explicitly. | 957 // explicitly. |
898 // | 958 // |
899 // The caller's pc for the bottommost output frame is the same as in the | 959 // The caller's pc for the bottommost output frame is the same as in the |
900 // input frame. For all subsequent output frames, it can be read from the | 960 // input frame. For all subsequent output frames, it can be read from the |
901 // previous one. This frame's pc can be computed from the non-optimized | 961 // previous one. This frame's pc can be computed from the non-optimized |
902 // function code and AST id of the bailout. | 962 // function code and AST id of the bailout. |
903 output_offset -= kPointerSize; | 963 output_offset -= kPCOnStackSize; |
904 input_offset -= kPointerSize; | 964 input_offset -= kPCOnStackSize; |
905 intptr_t value; | 965 intptr_t value; |
906 if (is_bottommost) { | 966 if (is_bottommost) { |
907 value = input_->GetFrameSlot(input_offset); | 967 value = input_->GetFrameSlot(input_offset); |
908 } else { | 968 } else { |
909 value = output_[frame_index - 1]->GetPc(); | 969 value = output_[frame_index - 1]->GetPc(); |
910 } | 970 } |
911 output_frame->SetFrameSlot(output_offset, value); | 971 output_frame->SetCallerPc(output_offset, value); |
912 if (trace_) { | 972 if (trace_) { |
913 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" | 973 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
914 V8PRIxPTR " ; caller's pc\n", | 974 V8PRIxPTR " ; caller's pc\n", |
915 top_address + output_offset, output_offset, value); | 975 top_address + output_offset, output_offset, value); |
916 } | 976 } |
917 | 977 |
918 // The caller's frame pointer for the bottommost output frame is the same | 978 // The caller's frame pointer for the bottommost output frame is the same |
919 // as in the input frame. For all subsequent output frames, it can be | 979 // as in the input frame. For all subsequent output frames, it can be |
920 // read from the previous one. Also compute and set this frame's frame | 980 // read from the previous one. Also compute and set this frame's frame |
921 // pointer. | 981 // pointer. |
922 output_offset -= kPointerSize; | 982 output_offset -= kFPOnStackSize; |
923 input_offset -= kPointerSize; | 983 input_offset -= kFPOnStackSize; |
924 if (is_bottommost) { | 984 if (is_bottommost) { |
925 value = input_->GetFrameSlot(input_offset); | 985 value = input_->GetFrameSlot(input_offset); |
926 } else { | 986 } else { |
927 value = output_[frame_index - 1]->GetFp(); | 987 value = output_[frame_index - 1]->GetFp(); |
928 } | 988 } |
929 output_frame->SetFrameSlot(output_offset, value); | 989 output_frame->SetCallerFp(output_offset, value); |
930 intptr_t fp_value = top_address + output_offset; | 990 intptr_t fp_value = top_address + output_offset; |
931 ASSERT(!is_bottommost || (input_->GetRegister(fp_reg.code()) + | 991 ASSERT(!is_bottommost || (input_->GetRegister(fp_reg.code()) + |
932 has_alignment_padding_ * kPointerSize) == fp_value); | 992 has_alignment_padding_ * kPointerSize) == fp_value); |
933 output_frame->SetFp(fp_value); | 993 output_frame->SetFp(fp_value); |
934 if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value); | 994 if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value); |
935 if (trace_) { | 995 if (trace_) { |
936 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" | 996 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
937 V8PRIxPTR " ; caller's fp\n", | 997 V8PRIxPTR " ; caller's fp\n", |
938 fp_value, output_offset, value); | 998 fp_value, output_offset, value); |
939 } | 999 } |
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1042 | 1102 |
1043 // Compute the incoming parameter translation. | 1103 // Compute the incoming parameter translation. |
1044 int parameter_count = height; | 1104 int parameter_count = height; |
1045 unsigned output_offset = output_frame_size; | 1105 unsigned output_offset = output_frame_size; |
1046 for (int i = 0; i < parameter_count; ++i) { | 1106 for (int i = 0; i < parameter_count; ++i) { |
1047 output_offset -= kPointerSize; | 1107 output_offset -= kPointerSize; |
1048 DoTranslateCommand(iterator, frame_index, output_offset); | 1108 DoTranslateCommand(iterator, frame_index, output_offset); |
1049 } | 1109 } |
1050 | 1110 |
1051 // Read caller's PC from the previous frame. | 1111 // Read caller's PC from the previous frame. |
1052 output_offset -= kPointerSize; | 1112 output_offset -= kPCOnStackSize; |
1053 intptr_t callers_pc = output_[frame_index - 1]->GetPc(); | 1113 intptr_t callers_pc = output_[frame_index - 1]->GetPc(); |
1054 output_frame->SetFrameSlot(output_offset, callers_pc); | 1114 output_frame->SetCallerPc(output_offset, callers_pc); |
1055 if (trace_) { | 1115 if (trace_) { |
1056 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" | 1116 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
1057 V8PRIxPTR " ; caller's pc\n", | 1117 V8PRIxPTR " ; caller's pc\n", |
1058 top_address + output_offset, output_offset, callers_pc); | 1118 top_address + output_offset, output_offset, callers_pc); |
1059 } | 1119 } |
1060 | 1120 |
1061 // Read caller's FP from the previous frame, and set this frame's FP. | 1121 // Read caller's FP from the previous frame, and set this frame's FP. |
1062 output_offset -= kPointerSize; | 1122 output_offset -= kFPOnStackSize; |
1063 intptr_t value = output_[frame_index - 1]->GetFp(); | 1123 intptr_t value = output_[frame_index - 1]->GetFp(); |
1064 output_frame->SetFrameSlot(output_offset, value); | 1124 output_frame->SetCallerFp(output_offset, value); |
1065 intptr_t fp_value = top_address + output_offset; | 1125 intptr_t fp_value = top_address + output_offset; |
1066 output_frame->SetFp(fp_value); | 1126 output_frame->SetFp(fp_value); |
1067 if (trace_) { | 1127 if (trace_) { |
1068 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" | 1128 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
1069 V8PRIxPTR " ; caller's fp\n", | 1129 V8PRIxPTR " ; caller's fp\n", |
1070 fp_value, output_offset, value); | 1130 fp_value, output_offset, value); |
1071 } | 1131 } |
1072 | 1132 |
1073 // A marker value is used in place of the context. | 1133 // A marker value is used in place of the context. |
1074 output_offset -= kPointerSize; | 1134 output_offset -= kPointerSize; |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1145 | 1205 |
1146 // Compute the incoming parameter translation. | 1206 // Compute the incoming parameter translation. |
1147 int parameter_count = height; | 1207 int parameter_count = height; |
1148 unsigned output_offset = output_frame_size; | 1208 unsigned output_offset = output_frame_size; |
1149 for (int i = 0; i < parameter_count; ++i) { | 1209 for (int i = 0; i < parameter_count; ++i) { |
1150 output_offset -= kPointerSize; | 1210 output_offset -= kPointerSize; |
1151 DoTranslateCommand(iterator, frame_index, output_offset); | 1211 DoTranslateCommand(iterator, frame_index, output_offset); |
1152 } | 1212 } |
1153 | 1213 |
1154 // Read caller's PC from the previous frame. | 1214 // Read caller's PC from the previous frame. |
1155 output_offset -= kPointerSize; | 1215 output_offset -= kPCOnStackSize; |
1156 intptr_t callers_pc = output_[frame_index - 1]->GetPc(); | 1216 intptr_t callers_pc = output_[frame_index - 1]->GetPc(); |
1157 output_frame->SetFrameSlot(output_offset, callers_pc); | 1217 output_frame->SetCallerPc(output_offset, callers_pc); |
1158 if (trace_) { | 1218 if (trace_) { |
1159 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" | 1219 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
1160 V8PRIxPTR " ; caller's pc\n", | 1220 V8PRIxPTR " ; caller's pc\n", |
1161 top_address + output_offset, output_offset, callers_pc); | 1221 top_address + output_offset, output_offset, callers_pc); |
1162 } | 1222 } |
1163 | 1223 |
1164 // Read caller's FP from the previous frame, and set this frame's FP. | 1224 // Read caller's FP from the previous frame, and set this frame's FP. |
1165 output_offset -= kPointerSize; | 1225 output_offset -= kFPOnStackSize; |
1166 intptr_t value = output_[frame_index - 1]->GetFp(); | 1226 intptr_t value = output_[frame_index - 1]->GetFp(); |
1167 output_frame->SetFrameSlot(output_offset, value); | 1227 output_frame->SetCallerFp(output_offset, value); |
1168 intptr_t fp_value = top_address + output_offset; | 1228 intptr_t fp_value = top_address + output_offset; |
1169 output_frame->SetFp(fp_value); | 1229 output_frame->SetFp(fp_value); |
1170 if (trace_) { | 1230 if (trace_) { |
1171 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" | 1231 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
1172 V8PRIxPTR " ; caller's fp\n", | 1232 V8PRIxPTR " ; caller's fp\n", |
1173 fp_value, output_offset, value); | 1233 fp_value, output_offset, value); |
1174 } | 1234 } |
1175 | 1235 |
1176 // The context can be gotten from the previous frame. | 1236 // The context can be gotten from the previous frame. |
1177 output_offset -= kPointerSize; | 1237 output_offset -= kPointerSize; |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1258 const char* kind = is_setter_stub_frame ? "setter" : "getter"; | 1318 const char* kind = is_setter_stub_frame ? "setter" : "getter"; |
1259 if (trace_) { | 1319 if (trace_) { |
1260 PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes); | 1320 PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes); |
1261 } | 1321 } |
1262 | 1322 |
1263 // We need 1 stack entry for the return address + 4 stack entries from | 1323 // We need 1 stack entry for the return address + 4 stack entries from |
1264 // StackFrame::INTERNAL (FP, context, frame type, code object, see | 1324 // StackFrame::INTERNAL (FP, context, frame type, code object, see |
1265 // MacroAssembler::EnterFrame). For a setter stub frame we need one additional | 1325 // MacroAssembler::EnterFrame). For a setter stub frame we need one additional |
1266 // entry for the implicit return value, see | 1326 // entry for the implicit return value, see |
1267 // StoreStubCompiler::CompileStoreViaSetter. | 1327 // StoreStubCompiler::CompileStoreViaSetter. |
1268 unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0); | 1328 unsigned fixed_frame_entries = (kPCOnStackSize / kPointerSize) + |
| 1329 (kFPOnStackSize / kPointerSize) + 3 + |
| 1330 (is_setter_stub_frame ? 1 : 0); |
1269 unsigned fixed_frame_size = fixed_frame_entries * kPointerSize; | 1331 unsigned fixed_frame_size = fixed_frame_entries * kPointerSize; |
1270 unsigned output_frame_size = height_in_bytes + fixed_frame_size; | 1332 unsigned output_frame_size = height_in_bytes + fixed_frame_size; |
1271 | 1333 |
1272 // Allocate and store the output frame description. | 1334 // Allocate and store the output frame description. |
1273 FrameDescription* output_frame = | 1335 FrameDescription* output_frame = |
1274 new(output_frame_size) FrameDescription(output_frame_size, accessor); | 1336 new(output_frame_size) FrameDescription(output_frame_size, accessor); |
1275 output_frame->SetFrameType(StackFrame::INTERNAL); | 1337 output_frame->SetFrameType(StackFrame::INTERNAL); |
1276 | 1338 |
1277 // A frame for an accessor stub can not be the topmost or bottommost one. | 1339 // A frame for an accessor stub can not be the topmost or bottommost one. |
1278 ASSERT(frame_index > 0 && frame_index < output_count_ - 1); | 1340 ASSERT(frame_index > 0 && frame_index < output_count_ - 1); |
1279 ASSERT(output_[frame_index] == NULL); | 1341 ASSERT(output_[frame_index] == NULL); |
1280 output_[frame_index] = output_frame; | 1342 output_[frame_index] = output_frame; |
1281 | 1343 |
1282 // The top address of the frame is computed from the previous frame's top and | 1344 // The top address of the frame is computed from the previous frame's top and |
1283 // this frame's size. | 1345 // this frame's size. |
1284 intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size; | 1346 intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size; |
1285 output_frame->SetTop(top_address); | 1347 output_frame->SetTop(top_address); |
1286 | 1348 |
1287 unsigned output_offset = output_frame_size; | 1349 unsigned output_offset = output_frame_size; |
1288 | 1350 |
1289 // Read caller's PC from the previous frame. | 1351 // Read caller's PC from the previous frame. |
1290 output_offset -= kPointerSize; | 1352 output_offset -= kPCOnStackSize; |
1291 intptr_t callers_pc = output_[frame_index - 1]->GetPc(); | 1353 intptr_t callers_pc = output_[frame_index - 1]->GetPc(); |
1292 output_frame->SetFrameSlot(output_offset, callers_pc); | 1354 output_frame->SetCallerPc(output_offset, callers_pc); |
1293 if (trace_) { | 1355 if (trace_) { |
1294 PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR | 1356 PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR |
1295 " ; caller's pc\n", | 1357 " ; caller's pc\n", |
1296 top_address + output_offset, output_offset, callers_pc); | 1358 top_address + output_offset, output_offset, callers_pc); |
1297 } | 1359 } |
1298 | 1360 |
1299 // Read caller's FP from the previous frame, and set this frame's FP. | 1361 // Read caller's FP from the previous frame, and set this frame's FP. |
1300 output_offset -= kPointerSize; | 1362 output_offset -= kFPOnStackSize; |
1301 intptr_t value = output_[frame_index - 1]->GetFp(); | 1363 intptr_t value = output_[frame_index - 1]->GetFp(); |
1302 output_frame->SetFrameSlot(output_offset, value); | 1364 output_frame->SetCallerFp(output_offset, value); |
1303 intptr_t fp_value = top_address + output_offset; | 1365 intptr_t fp_value = top_address + output_offset; |
1304 output_frame->SetFp(fp_value); | 1366 output_frame->SetFp(fp_value); |
1305 if (trace_) { | 1367 if (trace_) { |
1306 PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR | 1368 PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR |
1307 " ; caller's fp\n", | 1369 " ; caller's fp\n", |
1308 fp_value, output_offset, value); | 1370 fp_value, output_offset, value); |
1309 } | 1371 } |
1310 | 1372 |
1311 // The context can be gotten from the previous frame. | 1373 // The context can be gotten from the previous frame. |
1312 output_offset -= kPointerSize; | 1374 output_offset -= kPointerSize; |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1428 | 1490 |
1429 // The top address for the output frame can be computed from the input | 1491 // The top address for the output frame can be computed from the input |
1430 // frame pointer and the output frame's height. Subtract space for the | 1492 // frame pointer and the output frame's height. Subtract space for the |
1431 // context and function slots. | 1493 // context and function slots. |
1432 Register fp_reg = StubFailureTrampolineFrame::fp_register(); | 1494 Register fp_reg = StubFailureTrampolineFrame::fp_register(); |
1433 intptr_t top_address = input_->GetRegister(fp_reg.code()) - | 1495 intptr_t top_address = input_->GetRegister(fp_reg.code()) - |
1434 (2 * kPointerSize) - height_in_bytes; | 1496 (2 * kPointerSize) - height_in_bytes; |
1435 output_frame->SetTop(top_address); | 1497 output_frame->SetTop(top_address); |
1436 | 1498 |
1437 // Read caller's PC (JSFunction continuation) from the input frame. | 1499 // Read caller's PC (JSFunction continuation) from the input frame. |
1438 unsigned input_frame_offset = input_frame_size - kPointerSize; | 1500 unsigned input_frame_offset = input_frame_size - kPCOnStackSize; |
1439 unsigned output_frame_offset = output_frame_size - kPointerSize; | 1501 unsigned output_frame_offset = output_frame_size - kFPOnStackSize; |
1440 intptr_t value = input_->GetFrameSlot(input_frame_offset); | 1502 intptr_t value = input_->GetFrameSlot(input_frame_offset); |
1441 output_frame->SetFrameSlot(output_frame_offset, value); | 1503 output_frame->SetCallerPc(output_frame_offset, value); |
1442 if (trace_) { | 1504 if (trace_) { |
1443 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" | 1505 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
1444 V8PRIxPTR " ; caller's pc\n", | 1506 V8PRIxPTR " ; caller's pc\n", |
1445 top_address + output_frame_offset, output_frame_offset, value); | 1507 top_address + output_frame_offset, output_frame_offset, value); |
1446 } | 1508 } |
1447 | 1509 |
1448 // Read caller's FP from the input frame, and set this frame's FP. | 1510 // Read caller's FP from the input frame, and set this frame's FP. |
1449 input_frame_offset -= kPointerSize; | 1511 input_frame_offset -= kFPOnStackSize; |
1450 value = input_->GetFrameSlot(input_frame_offset); | 1512 value = input_->GetFrameSlot(input_frame_offset); |
1451 output_frame_offset -= kPointerSize; | 1513 output_frame_offset -= kFPOnStackSize; |
1452 output_frame->SetFrameSlot(output_frame_offset, value); | 1514 output_frame->SetCallerFp(output_frame_offset, value); |
1453 intptr_t frame_ptr = input_->GetRegister(fp_reg.code()); | 1515 intptr_t frame_ptr = input_->GetRegister(fp_reg.code()); |
1454 output_frame->SetRegister(fp_reg.code(), frame_ptr); | 1516 output_frame->SetRegister(fp_reg.code(), frame_ptr); |
1455 output_frame->SetFp(frame_ptr); | 1517 output_frame->SetFp(frame_ptr); |
1456 if (trace_) { | 1518 if (trace_) { |
1457 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" | 1519 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
1458 V8PRIxPTR " ; caller's fp\n", | 1520 V8PRIxPTR " ; caller's fp\n", |
1459 top_address + output_frame_offset, output_frame_offset, value); | 1521 top_address + output_frame_offset, output_frame_offset, value); |
1460 } | 1522 } |
1461 | 1523 |
1462 // The context can be gotten from the input frame. | 1524 // The context can be gotten from the input frame. |
(...skipping 1099 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2562 desc.instr_size); | 2624 desc.instr_size); |
2563 chunk->CommitArea(desc.instr_size); | 2625 chunk->CommitArea(desc.instr_size); |
2564 CopyBytes(chunk->area_start(), desc.buffer, | 2626 CopyBytes(chunk->area_start(), desc.buffer, |
2565 static_cast<size_t>(desc.instr_size)); | 2627 static_cast<size_t>(desc.instr_size)); |
2566 CPU::FlushICache(chunk->area_start(), desc.instr_size); | 2628 CPU::FlushICache(chunk->area_start(), desc.instr_size); |
2567 | 2629 |
2568 data->deopt_entry_code_entries_[type] = entry_count; | 2630 data->deopt_entry_code_entries_[type] = entry_count; |
2569 } | 2631 } |
2570 | 2632 |
2571 | 2633 |
2572 void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function, | |
2573 Code* code) { | |
2574 SharedFunctionInfo* shared = function->shared(); | |
2575 Object* undefined = function->GetHeap()->undefined_value(); | |
2576 Object* current = function; | |
2577 | |
2578 while (current != undefined) { | |
2579 JSFunction* func = JSFunction::cast(current); | |
2580 current = func->next_function_link(); | |
2581 func->set_code(shared->code()); | |
2582 func->set_next_function_link(undefined); | |
2583 } | |
2584 } | |
2585 | |
2586 | |
2587 FrameDescription::FrameDescription(uint32_t frame_size, | 2634 FrameDescription::FrameDescription(uint32_t frame_size, |
2588 JSFunction* function) | 2635 JSFunction* function) |
2589 : frame_size_(frame_size), | 2636 : frame_size_(frame_size), |
2590 function_(function), | 2637 function_(function), |
2591 top_(kZapUint32), | 2638 top_(kZapUint32), |
2592 pc_(kZapUint32), | 2639 pc_(kZapUint32), |
2593 fp_(kZapUint32), | 2640 fp_(kZapUint32), |
2594 context_(kZapUint32) { | 2641 context_(kZapUint32) { |
2595 // Zap all the registers. | 2642 // Zap all the registers. |
2596 for (int r = 0; r < Register::kNumRegisters; r++) { | 2643 for (int r = 0; r < Register::kNumRegisters; r++) { |
(...skipping 502 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3099 | 3146 |
3100 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { | 3147 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { |
3101 v->VisitPointer(BitCast<Object**>(&function_)); | 3148 v->VisitPointer(BitCast<Object**>(&function_)); |
3102 v->VisitPointers(parameters_, parameters_ + parameters_count_); | 3149 v->VisitPointers(parameters_, parameters_ + parameters_count_); |
3103 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); | 3150 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); |
3104 } | 3151 } |
3105 | 3152 |
3106 #endif // ENABLE_DEBUGGER_SUPPORT | 3153 #endif // ENABLE_DEBUGGER_SUPPORT |
3107 | 3154 |
3108 } } // namespace v8::internal | 3155 } } // namespace v8::internal |
OLD | NEW |