| OLD | NEW |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. | 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its | 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived | 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. | 14 // from this software without specific prior written permission. |
| 15 // | 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8-counters.h" | 28 #include "v8-counters.h" |
| 29 #include "write-buffer.h" | 29 #include "write-buffer.h" |
| 30 #include "write-buffer-inl.h" |
| 30 | 31 |
| 31 namespace v8 { | 32 namespace v8 { |
| 32 namespace internal { | 33 namespace internal { |
| 33 | 34 |
| 34 Address* WriteBuffer::top_ = NULL; | |
| 35 Address* WriteBuffer::start_ = NULL; | 35 Address* WriteBuffer::start_ = NULL; |
| 36 Address* WriteBuffer::limit_ = NULL; | 36 Address* WriteBuffer::limit_ = NULL; |
| 37 uintptr_t* WriteBuffer::hash_map_1_ = NULL; | 37 uintptr_t* WriteBuffer::hash_map_1_ = NULL; |
| 38 uintptr_t* WriteBuffer::hash_map_2_ = NULL; | 38 uintptr_t* WriteBuffer::hash_map_2_ = NULL; |
| 39 VirtualMemory* WriteBuffer::virtual_memory_ = NULL; | 39 VirtualMemory* WriteBuffer::virtual_memory_ = NULL; |
| 40 | 40 |
| 41 void WriteBuffer::Setup() { | 41 void WriteBuffer::Setup() { |
| 42 virtual_memory_ = new VirtualMemory(kWriteBufferSize * 3); | 42 virtual_memory_ = new VirtualMemory(kWriteBufferSize * 3); |
| 43 uintptr_t start_as_int = | 43 uintptr_t start_as_int = |
| 44 reinterpret_cast<uintptr_t>(virtual_memory_->address()); | 44 reinterpret_cast<uintptr_t>(virtual_memory_->address()); |
| 45 start_ = | 45 start_ = |
| 46 reinterpret_cast<Address*>(RoundUp(start_as_int, kWriteBufferSize * 2)); | 46 reinterpret_cast<Address*>(RoundUp(start_as_int, kWriteBufferSize * 2)); |
| 47 limit_ = start_ + (kWriteBufferSize / sizeof(*start_)); | 47 limit_ = start_ + (kWriteBufferSize / sizeof(*start_)); |
| 48 | 48 |
| 49 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); | 49 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); |
| 50 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); | 50 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); |
| 51 Address* vm_limit = reinterpret_cast<Address*>( | 51 Address* vm_limit = reinterpret_cast<Address*>( |
| 52 reinterpret_cast<char*>(virtual_memory_->address()) + | 52 reinterpret_cast<char*>(virtual_memory_->address()) + |
| 53 virtual_memory_->size()); | 53 virtual_memory_->size()); |
| 54 ASSERT(start_ <= vm_limit); | 54 ASSERT(start_ <= vm_limit); |
| 55 ASSERT(limit_ <= vm_limit); | 55 ASSERT(limit_ <= vm_limit); |
| 56 USE(vm_limit); | 56 USE(vm_limit); |
| 57 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kWriteBufferOverflowBit) != 0); | 57 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kWriteBufferOverflowBit) != 0); |
| 58 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kWriteBufferOverflowBit) == | 58 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kWriteBufferOverflowBit) == |
| 59 0); | 59 0); |
| 60 | 60 |
| 61 virtual_memory_->Commit(reinterpret_cast<Address>(start_), | 61 virtual_memory_->Commit(reinterpret_cast<Address>(start_), |
| 62 kWriteBufferSize, | 62 kWriteBufferSize, |
| 63 false); // Not executable. | 63 false); // Not executable. |
| 64 top_ = start_; | 64 Heap::public_set_write_buffer_top(start_); |
| 65 | 65 |
| 66 hash_map_1_ = new uintptr_t[kHashMapLength]; | 66 hash_map_1_ = new uintptr_t[kHashMapLength]; |
| 67 hash_map_2_ = new uintptr_t[kHashMapLength]; | 67 hash_map_2_ = new uintptr_t[kHashMapLength]; |
| 68 } | 68 } |
| 69 | 69 |
| 70 | 70 |
| 71 void WriteBuffer::TearDown() { | 71 void WriteBuffer::TearDown() { |
| 72 delete virtual_memory_; | 72 delete virtual_memory_; |
| 73 delete[] hash_map_1_; | 73 delete[] hash_map_1_; |
| 74 delete[] hash_map_2_; | 74 delete[] hash_map_2_; |
| 75 top_ = start_ = limit_ = NULL; | 75 start_ = limit_ = NULL; |
| 76 Heap::public_set_write_buffer_top(start_); |
| 76 } | 77 } |
| 77 | 78 |
| 78 | 79 |
| 79 void WriteBuffer::Compact() { | 80 void WriteBuffer::Compact() { |
| 80 memset(reinterpret_cast<void*>(hash_map_1_), | 81 memset(reinterpret_cast<void*>(hash_map_1_), |
| 81 0, | 82 0, |
| 82 sizeof(uintptr_t) * kHashMapLength); | 83 sizeof(uintptr_t) * kHashMapLength); |
| 83 memset(reinterpret_cast<void*>(hash_map_2_), | 84 memset(reinterpret_cast<void*>(hash_map_2_), |
| 84 0, | 85 0, |
| 85 sizeof(uintptr_t) * kHashMapLength); | 86 sizeof(uintptr_t) * kHashMapLength); |
| 86 ASSERT(top_ <= limit_); | 87 Address* top = reinterpret_cast<Address*>(Heap::write_buffer_top()); |
| 87 Address* stop = top_; | 88 Address* stop = top; |
| 88 top_ = start_; | 89 ASSERT(top <= limit_); |
| 90 top = start_; |
| 89 // Goes through the addresses in the write buffer attempting to remove | 91 // Goes through the addresses in the write buffer attempting to remove |
| 90 // duplicates. In the interest of speed this is a lossy operation. Some | 92 // duplicates. In the interest of speed this is a lossy operation. Some |
| 91 // duplicates will remain. We have two hash tables with different hash | 93 // duplicates will remain. We have two hash tables with different hash |
| 92 // functions to reduce the number of unnecessary clashes. | 94 // functions to reduce the number of unnecessary clashes. |
| 93 for (Address* current = start_; current < stop; current++) { | 95 for (Address* current = start_; current < stop; current++) { |
| 94 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); | 96 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); |
| 95 // Shift out the last bits including any tags. | 97 // Shift out the last bits including any tags. |
| 96 int_addr >>= kPointerSizeLog2; | 98 int_addr >>= kPointerSizeLog2; |
| 97 int hash1 = | 99 int hash1 = |
| 98 ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); | 100 ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); |
| 99 if (hash_map_1_[hash1] == int_addr) continue; | 101 if (hash_map_1_[hash1] == int_addr) continue; |
| 100 int hash2 = | 102 int hash2 = |
| 101 ((int_addr - (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); | 103 ((int_addr - (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); |
| 102 hash2 ^= hash2 >> (kHashMapLengthLog2 * 2); | 104 hash2 ^= hash2 >> (kHashMapLengthLog2 * 2); |
| 103 if (hash_map_2_[hash2] == int_addr) continue; | 105 if (hash_map_2_[hash2] == int_addr) continue; |
| 104 if (hash_map_1_[hash1] == 0) { | 106 if (hash_map_1_[hash1] == 0) { |
| 105 hash_map_1_[hash1] = int_addr; | 107 hash_map_1_[hash1] = int_addr; |
| 106 } else if (hash_map_2_[hash2] == 0) { | 108 } else if (hash_map_2_[hash2] == 0) { |
| 107 hash_map_2_[hash2] = int_addr; | 109 hash_map_2_[hash2] = int_addr; |
| 108 } else { | 110 } else { |
| 109 // Rather than slowing down we just throw away some entries. This will | 111 // Rather than slowing down we just throw away some entries. This will |
| 110 // cause some duplicates to remain undetected. | 112 // cause some duplicates to remain undetected. |
| 111 hash_map_1_[hash1] = int_addr; | 113 hash_map_1_[hash1] = int_addr; |
| 112 hash_map_2_[hash2] = 0; | 114 hash_map_2_[hash2] = 0; |
| 113 } | 115 } |
| 114 ASSERT(top_ <= current); | 116 ASSERT(top <= current); |
| 115 ASSERT(top_ <= limit_); | 117 ASSERT(top <= limit_); |
| 116 *top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); | 118 *top++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); |
| 117 } | 119 } |
| 118 Counters::write_buffer_compactions.Increment(); | 120 Counters::write_buffer_compactions.Increment(); |
| 119 if (limit_ - top_ < top_ - start_) { | 121 if (limit_ - top < top - start_) { |
| 120 // Compression did not free up at least half. | 122 // Compression did not free up at least half. |
| 121 // TODO(gc): Set an interrupt to do a GC on the next back edge. | 123 // TODO(gc): Set an interrupt to do a GC on the next back edge. |
| 122 // TODO(gc): Allocate the rest of new space to force a GC on the next | 124 // TODO(gc): Allocate the rest of new space to force a GC on the next |
| 123 // allocation. | 125 // allocation. |
| 124 if (limit_ - top_ < (top_ - start_) >> 1) { | 126 if (limit_ - top < (top - start_) >> 1) { |
| 125 // Compression did not free up at least one quarter. | 127 // Compression did not free up at least one quarter. |
| 126 // TODO(gc): Set a flag to scan all of memory. | 128 // TODO(gc): Set a flag to scan all of memory. |
| 127 top_ = start_; | 129 top = start_; |
| 128 Counters::write_buffer_overflows.Increment(); | 130 Counters::write_buffer_overflows.Increment(); |
| 129 } | 131 } |
| 130 } | 132 } |
| 133 Heap::public_set_write_buffer_top(top); |
| 131 } | 134 } |
| 132 | 135 |
| 133 } } // namespace v8::internal | 136 } } // namespace v8::internal |
| OLD | NEW |