OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 26 matching lines...) Expand all Loading... |
37 #include "prettyprinter.h" | 37 #include "prettyprinter.h" |
38 | 38 |
39 | 39 |
40 namespace v8 { | 40 namespace v8 { |
41 namespace internal { | 41 namespace internal { |
42 | 42 |
43 DeoptimizerData::DeoptimizerData() { | 43 DeoptimizerData::DeoptimizerData() { |
44 eager_deoptimization_entry_code_entries_ = -1; | 44 eager_deoptimization_entry_code_entries_ = -1; |
45 lazy_deoptimization_entry_code_entries_ = -1; | 45 lazy_deoptimization_entry_code_entries_ = -1; |
46 size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize(); | 46 size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize(); |
| 47 #if defined(V8_TARGET_ARCH_X64) |
| 48 CodeRange* code_range = Isolate::Current()->code_range(); |
| 49 eager_deoptimization_entry_start_ = |
| 50 code_range->ReserveChunk(deopt_table_size, |
| 51 &eager_deoptimization_reserved_size_); |
| 52 eager_deoptimization_entry_code_ = NULL; |
| 53 lazy_deoptimization_entry_start_ = |
| 54 code_range->ReserveChunk(deopt_table_size, |
| 55 &lazy_deoptimization_reserved_size_); |
| 56 lazy_deoptimization_entry_code_ = NULL; |
| 57 #else |
47 eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size); | 58 eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size); |
48 lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size); | 59 lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size); |
| 60 #endif |
49 current_ = NULL; | 61 current_ = NULL; |
50 deoptimizing_code_list_ = NULL; | 62 deoptimizing_code_list_ = NULL; |
51 #ifdef ENABLE_DEBUGGER_SUPPORT | 63 #ifdef ENABLE_DEBUGGER_SUPPORT |
52 deoptimized_frame_info_ = NULL; | 64 deoptimized_frame_info_ = NULL; |
53 #endif | 65 #endif |
54 } | 66 } |
55 | 67 |
56 | 68 |
57 DeoptimizerData::~DeoptimizerData() { | 69 DeoptimizerData::~DeoptimizerData() { |
| 70 #if defined(V8_TARGET_ARCH_X64) |
| 71 if (eager_deoptimization_entry_code_ != NULL) { |
| 72 Isolate::Current()->memory_allocator()->Free( |
| 73 eager_deoptimization_entry_code_); |
| 74 eager_deoptimization_entry_code_ = NULL; |
| 75 } |
| 76 if (lazy_deoptimization_entry_code_ != NULL) { |
| 77 Isolate::Current()->memory_allocator()->Free( |
| 78 lazy_deoptimization_entry_code_); |
| 79 lazy_deoptimization_entry_code_ = NULL; |
| 80 } |
| 81 #else |
58 delete eager_deoptimization_entry_code_; | 82 delete eager_deoptimization_entry_code_; |
59 eager_deoptimization_entry_code_ = NULL; | 83 eager_deoptimization_entry_code_ = NULL; |
60 delete lazy_deoptimization_entry_code_; | 84 delete lazy_deoptimization_entry_code_; |
61 lazy_deoptimization_entry_code_ = NULL; | 85 lazy_deoptimization_entry_code_ = NULL; |
| 86 #endif |
62 | 87 |
63 DeoptimizingCodeListNode* current = deoptimizing_code_list_; | 88 DeoptimizingCodeListNode* current = deoptimizing_code_list_; |
64 while (current != NULL) { | 89 while (current != NULL) { |
65 DeoptimizingCodeListNode* prev = current; | 90 DeoptimizingCodeListNode* prev = current; |
66 current = current->next(); | 91 current = current->next(); |
67 delete prev; | 92 delete prev; |
68 } | 93 } |
69 deoptimizing_code_list_ = NULL; | 94 deoptimizing_code_list_ = NULL; |
70 } | 95 } |
71 | 96 |
(...skipping 399 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
471 output_ = NULL; | 496 output_ = NULL; |
472 ASSERT(!HEAP->allow_allocation(true)); | 497 ASSERT(!HEAP->allow_allocation(true)); |
473 } | 498 } |
474 | 499 |
475 | 500 |
476 Address Deoptimizer::GetDeoptimizationEntry(int id, | 501 Address Deoptimizer::GetDeoptimizationEntry(int id, |
477 BailoutType type, | 502 BailoutType type, |
478 GetEntryMode mode) { | 503 GetEntryMode mode) { |
479 ASSERT(id >= 0); | 504 ASSERT(id >= 0); |
480 if (id >= kMaxNumberOfEntries) return NULL; | 505 if (id >= kMaxNumberOfEntries) return NULL; |
| 506 #if defined(V8_TARGET_ARCH_X64) |
| 507 Address base = 0; |
| 508 #else |
481 VirtualMemory* base = NULL; | 509 VirtualMemory* base = NULL; |
| 510 #endif |
482 if (mode == ENSURE_ENTRY_CODE) { | 511 if (mode == ENSURE_ENTRY_CODE) { |
483 EnsureCodeForDeoptimizationEntry(type, id); | 512 EnsureCodeForDeoptimizationEntry(type, id); |
484 } else { | 513 } else { |
485 ASSERT(mode == CALCULATE_ENTRY_ADDRESS); | 514 ASSERT(mode == CALCULATE_ENTRY_ADDRESS); |
486 } | 515 } |
487 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); | 516 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); |
| 517 #if defined(V8_TARGET_ARCH_X64) |
| 518 if (type == EAGER) { |
| 519 base = data->eager_deoptimization_entry_start_ + |
| 520 MemoryAllocator::CodePageAreaStartOffset(); |
| 521 } else { |
| 522 base = data->lazy_deoptimization_entry_start_ + |
| 523 MemoryAllocator::CodePageAreaStartOffset(); |
| 524 } |
| 525 return |
| 526 base + (id * table_entry_size_); |
| 527 #else |
488 if (type == EAGER) { | 528 if (type == EAGER) { |
489 base = data->eager_deoptimization_entry_code_; | 529 base = data->eager_deoptimization_entry_code_; |
490 } else { | 530 } else { |
491 base = data->lazy_deoptimization_entry_code_; | 531 base = data->lazy_deoptimization_entry_code_; |
492 } | 532 } |
493 return | 533 return |
494 static_cast<Address>(base->address()) + (id * table_entry_size_); | 534 static_cast<Address>(base->address()) + (id * table_entry_size_); |
| 535 #endif |
495 } | 536 } |
496 | 537 |
497 | 538 |
498 int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) { | 539 int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) { |
| 540 #if defined(V8_TARGET_ARCH_X64) |
| 541 Address base = 0; |
| 542 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); |
| 543 if (type == EAGER) { |
| 544 base = data->eager_deoptimization_entry_start_ + |
| 545 MemoryAllocator::CodePageAreaStartOffset(); |
| 546 } else { |
| 547 base = data->lazy_deoptimization_entry_start_ + |
| 548 MemoryAllocator::CodePageAreaStartOffset(); |
| 549 } |
| 550 if (base == 0 || |
| 551 addr < base || |
| 552 addr >= base + (kMaxNumberOfEntries * table_entry_size_)) { |
| 553 return kNotDeoptimizationEntry; |
| 554 } |
| 555 ASSERT_EQ(0, |
| 556 static_cast<int>(addr - base) % table_entry_size_); |
| 557 return static_cast<int>(addr - base) / table_entry_size_; |
| 558 #else |
499 VirtualMemory* base = NULL; | 559 VirtualMemory* base = NULL; |
500 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); | 560 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); |
501 if (type == EAGER) { | 561 if (type == EAGER) { |
502 base = data->eager_deoptimization_entry_code_; | 562 base = data->eager_deoptimization_entry_code_; |
503 } else { | 563 } else { |
504 base = data->lazy_deoptimization_entry_code_; | 564 base = data->lazy_deoptimization_entry_code_; |
505 } | 565 } |
506 Address base_casted = reinterpret_cast<Address>(base->address()); | 566 Address base_casted = reinterpret_cast<Address>(base->address()); |
507 if (base == NULL || | 567 if (base == NULL || |
508 addr < base->address() || | 568 addr < base->address() || |
509 addr >= base_casted + (kMaxNumberOfEntries * table_entry_size_)) { | 569 addr >= base_casted + (kMaxNumberOfEntries * table_entry_size_)) { |
510 return kNotDeoptimizationEntry; | 570 return kNotDeoptimizationEntry; |
511 } | 571 } |
512 ASSERT_EQ(0, | 572 ASSERT_EQ(0, |
513 static_cast<int>(addr - base_casted) % table_entry_size_); | 573 static_cast<int>(addr - base_casted) % table_entry_size_); |
514 return static_cast<int>(addr - base_casted) / table_entry_size_; | 574 return static_cast<int>(addr - base_casted) / table_entry_size_; |
| 575 #endif |
515 } | 576 } |
516 | 577 |
517 | 578 |
518 int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data, | 579 int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data, |
519 BailoutId id, | 580 BailoutId id, |
520 SharedFunctionInfo* shared) { | 581 SharedFunctionInfo* shared) { |
521 // TODO(kasperl): For now, we do a simple linear search for the PC | 582 // TODO(kasperl): For now, we do a simple linear search for the PC |
522 // offset associated with the given node id. This should probably be | 583 // offset associated with the given node id. This should probably be |
523 // changed to a binary search. | 584 // changed to a binary search. |
524 int length = data->DeoptPoints(); | 585 int length = data->DeoptPoints(); |
(...skipping 889 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1414 entry_count = Min(Max(entry_count * 2, Deoptimizer::kMinNumberOfEntries), | 1475 entry_count = Min(Max(entry_count * 2, Deoptimizer::kMinNumberOfEntries), |
1415 Deoptimizer::kMaxNumberOfEntries); | 1476 Deoptimizer::kMaxNumberOfEntries); |
1416 | 1477 |
1417 MacroAssembler masm(Isolate::Current(), NULL, 16 * KB); | 1478 MacroAssembler masm(Isolate::Current(), NULL, 16 * KB); |
1418 masm.set_emit_debug_code(false); | 1479 masm.set_emit_debug_code(false); |
1419 GenerateDeoptimizationEntries(&masm, entry_count, type); | 1480 GenerateDeoptimizationEntries(&masm, entry_count, type); |
1420 CodeDesc desc; | 1481 CodeDesc desc; |
1421 masm.GetCode(&desc); | 1482 masm.GetCode(&desc); |
1422 ASSERT(desc.reloc_size == 0); | 1483 ASSERT(desc.reloc_size == 0); |
1423 | 1484 |
| 1485 size_t table_size = Deoptimizer::GetMaxDeoptTableSize(); |
| 1486 ASSERT(static_cast<int>(table_size) >= desc.instr_size); |
| 1487 #if defined(V8_TARGET_ARCH_X64) |
| 1488 MemoryAllocator* allocator = Isolate::Current()->memory_allocator(); |
| 1489 Address base = type == EAGER |
| 1490 ? data->eager_deoptimization_entry_start_ |
| 1491 : data->lazy_deoptimization_entry_start_; |
| 1492 size_t reserved_size = type == EAGER |
| 1493 ? data->eager_deoptimization_reserved_size_ |
| 1494 : data->lazy_deoptimization_reserved_size_; |
| 1495 MemoryChunk** chunk = type == EAGER |
| 1496 ? &data->eager_deoptimization_entry_code_ |
| 1497 : &data->lazy_deoptimization_entry_code_; |
| 1498 *chunk = allocator->CommitChunkInCodeRange(base, table_size, reserved_size); |
| 1499 if (*chunk == NULL) { |
| 1500 V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table"); |
| 1501 } |
| 1502 memcpy((*chunk)->area_start(), desc.buffer, desc.instr_size); |
| 1503 CPU::FlushICache((*chunk)->area_start(), desc.instr_size); |
| 1504 #else |
1424 VirtualMemory* memory = type == EAGER | 1505 VirtualMemory* memory = type == EAGER |
1425 ? data->eager_deoptimization_entry_code_ | 1506 ? data->eager_deoptimization_entry_code_ |
1426 : data->lazy_deoptimization_entry_code_; | 1507 : data->lazy_deoptimization_entry_code_; |
1427 size_t table_size = Deoptimizer::GetMaxDeoptTableSize(); | |
1428 ASSERT(static_cast<int>(table_size) >= desc.instr_size); | |
1429 memory->Commit(memory->address(), table_size, true); | 1508 memory->Commit(memory->address(), table_size, true); |
1430 memcpy(memory->address(), desc.buffer, desc.instr_size); | 1509 memcpy(memory->address(), desc.buffer, desc.instr_size); |
1431 CPU::FlushICache(memory->address(), desc.instr_size); | 1510 CPU::FlushICache(memory->address(), desc.instr_size); |
| 1511 #endif |
1432 | 1512 |
1433 if (type == EAGER) { | 1513 if (type == EAGER) { |
1434 data->eager_deoptimization_entry_code_entries_ = entry_count; | 1514 data->eager_deoptimization_entry_code_entries_ = entry_count; |
1435 } else { | 1515 } else { |
1436 data->lazy_deoptimization_entry_code_entries_ = entry_count; | 1516 data->lazy_deoptimization_entry_code_entries_ = entry_count; |
1437 } | 1517 } |
1438 } | 1518 } |
1439 | 1519 |
1440 | 1520 |
1441 Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) { | 1521 Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) { |
(...skipping 580 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2022 | 2102 |
2023 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { | 2103 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { |
2024 v->VisitPointer(BitCast<Object**>(&function_)); | 2104 v->VisitPointer(BitCast<Object**>(&function_)); |
2025 v->VisitPointers(parameters_, parameters_ + parameters_count_); | 2105 v->VisitPointers(parameters_, parameters_ + parameters_count_); |
2026 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); | 2106 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); |
2027 } | 2107 } |
2028 | 2108 |
2029 #endif // ENABLE_DEBUGGER_SUPPORT | 2109 #endif // ENABLE_DEBUGGER_SUPPORT |
2030 | 2110 |
2031 } } // namespace v8::internal | 2111 } } // namespace v8::internal |
OLD | NEW |