| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 65 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); | 65 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); |
| 66 limit_ = start_ + (kStoreBufferSize / kPointerSize); | 66 limit_ = start_ + (kStoreBufferSize / kPointerSize); |
| 67 | 67 |
| 68 old_virtual_memory_ = | 68 old_virtual_memory_ = |
| 69 new VirtualMemory(kOldStoreBufferLength * kPointerSize); | 69 new VirtualMemory(kOldStoreBufferLength * kPointerSize); |
| 70 old_top_ = old_start_ = | 70 old_top_ = old_start_ = |
| 71 reinterpret_cast<Address*>(old_virtual_memory_->address()); | 71 reinterpret_cast<Address*>(old_virtual_memory_->address()); |
| 72 // Don't know the alignment requirements of the OS, but it is certainly not | 72 // Don't know the alignment requirements of the OS, but it is certainly not |
| 73 // less than 0xfff. | 73 // less than 0xfff. |
| 74 ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0); | 74 ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0); |
| 75 int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize); | 75 int initial_length = |
| 76 static_cast<int>(VirtualMemory::GetPageSize() / kPointerSize); |
| 76 ASSERT(initial_length > 0); | 77 ASSERT(initial_length > 0); |
| 77 ASSERT(initial_length <= kOldStoreBufferLength); | 78 ASSERT(initial_length <= kOldStoreBufferLength); |
| 78 old_limit_ = old_start_ + initial_length; | 79 old_limit_ = old_start_ + initial_length; |
| 79 old_reserved_limit_ = old_start_ + kOldStoreBufferLength; | 80 old_reserved_limit_ = old_start_ + kOldStoreBufferLength; |
| 80 | 81 |
| 81 CHECK(old_virtual_memory_->Commit( | 82 CHECK(old_virtual_memory_->Commit( |
| 82 reinterpret_cast<void*>(old_start_), | 83 reinterpret_cast<void*>(old_start_), |
| 83 (old_limit_ - old_start_) * kPointerSize, | 84 (old_limit_ - old_start_) * kPointerSize, |
| 84 false)); | 85 VirtualMemory::NOT_EXECUTABLE)); |
| 85 | 86 |
| 86 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); | 87 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); |
| 87 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); | 88 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); |
| 88 Address* vm_limit = reinterpret_cast<Address*>( | 89 Address* vm_limit = reinterpret_cast<Address*>( |
| 89 reinterpret_cast<char*>(virtual_memory_->address()) + | 90 reinterpret_cast<char*>(virtual_memory_->address()) + |
| 90 virtual_memory_->size()); | 91 virtual_memory_->size()); |
| 91 ASSERT(start_ <= vm_limit); | 92 ASSERT(start_ <= vm_limit); |
| 92 ASSERT(limit_ <= vm_limit); | 93 ASSERT(limit_ <= vm_limit); |
| 93 USE(vm_limit); | 94 USE(vm_limit); |
| 94 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); | 95 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); |
| 95 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == | 96 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == |
| 96 0); | 97 0); |
| 97 | 98 |
| 98 CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_), | 99 CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_), |
| 99 kStoreBufferSize, | 100 kStoreBufferSize, |
| 100 false)); // Not executable. | 101 VirtualMemory::NOT_EXECUTABLE)); |
| 101 heap_->public_set_store_buffer_top(start_); | 102 heap_->public_set_store_buffer_top(start_); |
| 102 | 103 |
| 103 hash_set_1_ = new uintptr_t[kHashSetLength]; | 104 hash_set_1_ = new uintptr_t[kHashSetLength]; |
| 104 hash_set_2_ = new uintptr_t[kHashSetLength]; | 105 hash_set_2_ = new uintptr_t[kHashSetLength]; |
| 105 hash_sets_are_empty_ = false; | 106 hash_sets_are_empty_ = false; |
| 106 | 107 |
| 107 ClearFilteringHashSets(); | 108 ClearFilteringHashSets(); |
| 108 } | 109 } |
| 109 | 110 |
| 110 | 111 |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 147 return old_limit_ - old_top_ >= space_needed; | 148 return old_limit_ - old_top_ >= space_needed; |
| 148 } | 149 } |
| 149 | 150 |
| 150 | 151 |
| 151 void StoreBuffer::EnsureSpace(intptr_t space_needed) { | 152 void StoreBuffer::EnsureSpace(intptr_t space_needed) { |
| 152 while (old_limit_ - old_top_ < space_needed && | 153 while (old_limit_ - old_top_ < space_needed && |
| 153 old_limit_ < old_reserved_limit_) { | 154 old_limit_ < old_reserved_limit_) { |
| 154 size_t grow = old_limit_ - old_start_; // Double size. | 155 size_t grow = old_limit_ - old_start_; // Double size. |
| 155 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), | 156 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), |
| 156 grow * kPointerSize, | 157 grow * kPointerSize, |
| 157 false)); | 158 VirtualMemory::NOT_EXECUTABLE)); |
| 158 old_limit_ += grow; | 159 old_limit_ += grow; |
| 159 } | 160 } |
| 160 | 161 |
| 161 if (SpaceAvailable(space_needed)) return; | 162 if (SpaceAvailable(space_needed)) return; |
| 162 | 163 |
| 163 if (old_buffer_is_filtered_) return; | 164 if (old_buffer_is_filtered_) return; |
| 164 ASSERT(may_move_store_buffer_entries_); | 165 ASSERT(may_move_store_buffer_entries_); |
| 165 Compact(); | 166 Compact(); |
| 166 | 167 |
| 167 old_buffer_is_filtered_ = true; | 168 old_buffer_is_filtered_ = true; |
| (...skipping 556 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 724 } | 725 } |
| 725 old_buffer_is_sorted_ = false; | 726 old_buffer_is_sorted_ = false; |
| 726 old_buffer_is_filtered_ = false; | 727 old_buffer_is_filtered_ = false; |
| 727 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); | 728 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); |
| 728 ASSERT(old_top_ <= old_limit_); | 729 ASSERT(old_top_ <= old_limit_); |
| 729 } | 730 } |
| 730 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); | 731 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); |
| 731 } | 732 } |
| 732 | 733 |
| 733 } } // namespace v8::internal | 734 } } // namespace v8::internal |
| OLD | NEW |