| OLD | NEW |
| (Empty) | |
| 1 // Copyright (c) 2017 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "gpu/command_buffer/common/discardable_handle.h" |
| 6 |
| 7 #include "base/atomicops.h" |
| 8 #include "gpu/command_buffer/common/buffer.h" |
| 9 |
| 10 namespace gpu { |
| 11 namespace { |
| 12 const int32_t kHandleDeleted = 0; |
| 13 const int32_t kHandleUnlocked = 1; |
| 14 const int32_t kHandleLockedStart = 2; |
| 15 |
| 16 } // namespace |
| 17 |
| 18 DiscardableHandleBase::DiscardableHandleBase(scoped_refptr<Buffer> buffer, |
| 19 uint32_t byte_offset, |
| 20 int32_t shm_id) |
| 21 : buffer_(std::move(buffer)), byte_offset_(byte_offset), shm_id_(shm_id) {} |
| 22 |
| 23 DiscardableHandleBase::DiscardableHandleBase( |
| 24 const DiscardableHandleBase& other) = default; |
| 25 DiscardableHandleBase::DiscardableHandleBase(DiscardableHandleBase&& other) = |
| 26 default; |
| 27 DiscardableHandleBase::~DiscardableHandleBase() = default; |
| 28 DiscardableHandleBase& DiscardableHandleBase::operator=( |
| 29 const DiscardableHandleBase& other) = default; |
| 30 DiscardableHandleBase& DiscardableHandleBase::operator=( |
| 31 DiscardableHandleBase&& other) = default; |
| 32 |
| 33 bool DiscardableHandleBase::IsLockedForTesting() { |
| 34 return kHandleLockedStart <= base::subtle::NoBarrier_Load(AsAtomic()); |
| 35 } |
| 36 |
| 37 bool DiscardableHandleBase::IsDeletedForTesting() { |
| 38 return kHandleDeleted == base::subtle::NoBarrier_Load(AsAtomic()); |
| 39 } |
| 40 |
| 41 volatile base::subtle::Atomic32* DiscardableHandleBase::AsAtomic() const { |
| 42 return reinterpret_cast<volatile base::subtle::Atomic32*>( |
| 43 buffer_->GetDataAddress(byte_offset_, sizeof(base::subtle::Atomic32))); |
| 44 } |
| 45 |
| 46 ClientDiscardableHandle::ClientDiscardableHandle(scoped_refptr<Buffer> buffer, |
| 47 uint32_t byte_offset, |
| 48 int32_t shm_id) |
| 49 : DiscardableHandleBase(std::move(buffer), byte_offset, shm_id) { |
| 50 // Handle always starts locked. |
| 51 base::subtle::NoBarrier_Store(AsAtomic(), kHandleLockedStart); |
| 52 } |
| 53 |
| 54 ClientDiscardableHandle::ClientDiscardableHandle( |
| 55 const ClientDiscardableHandle& other) = default; |
| 56 ClientDiscardableHandle::ClientDiscardableHandle( |
| 57 ClientDiscardableHandle&& other) = default; |
| 58 ClientDiscardableHandle& ClientDiscardableHandle::operator=( |
| 59 const ClientDiscardableHandle& other) = default; |
| 60 ClientDiscardableHandle& ClientDiscardableHandle::operator=( |
| 61 ClientDiscardableHandle&& other) = default; |
| 62 |
| 63 bool ClientDiscardableHandle::Lock() { |
| 64 while (true) { |
| 65 base::subtle::Atomic32 current_value = |
| 66 base::subtle::NoBarrier_Load(AsAtomic()); |
| 67 if (current_value == kHandleDeleted) { |
| 68 // Once a handle is deleted, it cannot be modified further. |
| 69 return false; |
| 70 } |
| 71 base::subtle::Atomic32 new_value = current_value + 1; |
| 72 // No barrier is needed, as any commands which depend on this operation |
| 73 // will flow over the command buffer, which ensures a memory barrier |
| 74 // between here and where these commands are executed on the GPU process. |
| 75 base::subtle::Atomic32 previous_value = |
| 76 base::subtle::NoBarrier_CompareAndSwap(AsAtomic(), current_value, |
| 77 new_value); |
| 78 if (current_value == previous_value) { |
| 79 return true; |
| 80 } |
| 81 } |
| 82 } |
| 83 |
| 84 bool ClientDiscardableHandle::CanBeReUsed() const { |
| 85 return kHandleDeleted == base::subtle::Acquire_Load(AsAtomic()); |
| 86 } |
| 87 |
| 88 ServiceDiscardableHandle::ServiceDiscardableHandle(scoped_refptr<Buffer> buffer, |
| 89 uint32_t byte_offset, |
| 90 int32_t shm_id) |
| 91 : DiscardableHandleBase(std::move(buffer), byte_offset, shm_id) {} |
| 92 |
| 93 ServiceDiscardableHandle::ServiceDiscardableHandle( |
| 94 const ServiceDiscardableHandle& other) = default; |
| 95 ServiceDiscardableHandle::ServiceDiscardableHandle( |
| 96 ServiceDiscardableHandle&& other) = default; |
| 97 ServiceDiscardableHandle& ServiceDiscardableHandle::operator=( |
| 98 const ServiceDiscardableHandle& other) = default; |
| 99 ServiceDiscardableHandle& ServiceDiscardableHandle::operator=( |
| 100 ServiceDiscardableHandle&& other) = default; |
| 101 |
| 102 void ServiceDiscardableHandle::Unlock() { |
| 103 // No barrier is needed as all GPU process access happens on a single thread, |
| 104 // and communication of dependent data between the GPU process and the |
| 105 // renderer process happens across the command buffer and includes barriers. |
| 106 base::subtle::NoBarrier_AtomicIncrement(AsAtomic(), -1); |
| 107 } |
| 108 |
| 109 bool ServiceDiscardableHandle::Delete() { |
| 110 // No barrier is needed as all GPU process access happens on a single thread, |
| 111 // and communication of dependent data between the GPU process and the |
| 112 // renderer process happens across the command buffer and includes barriers. |
| 113 return kHandleUnlocked == base::subtle::NoBarrier_CompareAndSwap( |
| 114 AsAtomic(), kHandleUnlocked, kHandleDeleted); |
| 115 } |
| 116 |
| 117 } // namespace gpu |
| OLD | NEW |