Chromium Code Reviews| Index: gpu/command_buffer/common/discardable_handle.cc | 
| diff --git a/gpu/command_buffer/common/discardable_handle.cc b/gpu/command_buffer/common/discardable_handle.cc | 
| new file mode 100644 | 
| index 0000000000000000000000000000000000000000..1715d4fc33afba0e25b4dcf6352764e588310e1b | 
| --- /dev/null | 
| +++ b/gpu/command_buffer/common/discardable_handle.cc | 
| @@ -0,0 +1,81 @@ | 
| +// Copyright (c) 2017 The Chromium Authors. All rights reserved. | 
| +// Use of this source code is governed by a BSD-style license that can be | 
| +// found in the LICENSE file. | 
| + | 
| +#include "gpu/command_buffer/common/discardable_handle.h" | 
| +#include "base/atomicops.h" | 
| + | 
| +namespace gpu { | 
| +namespace { | 
| +const uint32_t kHandleDeleted = 0; | 
| +const uint32_t kHandleUnlocked = 1; | 
| +const uint32_t kHandleLockedStart = 2; | 
| + | 
| +} // namespace | 
| + | 
| +void DiscardableHandle::InitializeWithNewData(scoped_refptr<Buffer> buffer, | 
| + uint32_t byte_offset, | 
| + int32_t shm_id) { | 
| + buffer_ = std::move(buffer); | 
| + byte_offset_ = byte_offset; | 
| + shm_id_ = shm_id; | 
| + | 
| + // Handle always starts locked. | 
| + base::subtle::NoBarrier_Store(AsAtomic(), kHandleLockedStart); | 
| +} | 
| + | 
| +void DiscardableHandle::InitializeWithExistingData(scoped_refptr<Buffer> buffer, | 
| + uint32_t byte_offset, | 
| + int32_t shm_id) { | 
| + buffer_ = std::move(buffer); | 
| + byte_offset_ = byte_offset; | 
| + shm_id_ = shm_id; | 
| +} | 
| + | 
| +void DiscardableHandle::Unlock() { | 
| + DCHECK(base::subtle::NoBarrier_Load(AsAtomic()) >= kHandleLockedStart); | 
| 
 
piman
2017/03/20 23:51:44
Will this be called by the service? If so, a malic
 
ericrk
2017/03/27 22:58:13
True, I wasn't so concerned with the DCHECK, as it
 
piman
2017/03/28 00:43:00
Generally, I prefer avoiding service-side DCHECKs,
 
ericrk
2017/03/28 01:12:10
Fair enough, I'll remove the DCHECK. It was more t
 
ericrk
2017/03/28 01:19:44
hmm, plus, a malicious client could already just d
 
piman
2017/03/28 21:47:25
Right, if the worse that could happen is that the
 
 | 
| + // Barrier prevents gl operations from being re-ordered across Unlock. | 
| + base::subtle::Barrier_AtomicIncrement(AsAtomic(), -1); | 
| 
 
piman
2017/03/20 23:51:44
Here and other places where we use barriers (and/o
 
ericrk
2017/03/27 22:58:13
I may have been over-thinking things. I was sort o
 
 | 
| +} | 
| + | 
| +bool DiscardableHandle::Delete() { | 
| + // Acquire semantics prevent GL operations after Delete from being re-ordered | 
| + // before. | 
| + return kHandleUnlocked == base::subtle::Acquire_CompareAndSwap( | 
| + AsAtomic(), kHandleUnlocked, kHandleDeleted); | 
| +} | 
| + | 
| +bool DiscardableHandle::Lock() { | 
| + while (true) { | 
| 
 
piman
2017/03/20 23:51:44
Will this be called on the service side? If so, ca
 
ericrk
2017/03/27 22:58:13
This should be called client-side only. I'll updat
 
 | 
| + base::subtle::Atomic32 current_value = | 
| + base::subtle::NoBarrier_Load(AsAtomic()); | 
| + if (current_value == kHandleDeleted) { | 
| + // Once a handle is deleted, it cannot be modified further. | 
| + return false; | 
| + } | 
| + base::subtle::Atomic32 new_value = current_value + 1; | 
| + // Acquire semantics prevent gl operations after Lock from being re-ordered | 
| + // before. | 
| + base::subtle::Atomic32 previous_value = | 
| + base::subtle::Acquire_CompareAndSwap(AsAtomic(), current_value, | 
| + new_value); | 
| + if (current_value == previous_value) { | 
| + return true; | 
| + } | 
| + } | 
| +} | 
| + | 
| +bool DiscardableHandle::IsDeleted() const { | 
| + return kHandleDeleted == base::subtle::Acquire_Load(AsAtomic()); | 
| 
 
piman
2017/03/20 23:51:44
Which side is planning to call this?
From the clie
 
ericrk
2017/03/27 22:58:13
We only call this from the client side, to check i
 
 | 
| +} | 
| + | 
| +bool DiscardableHandle::IsLockedForTesting() { | 
| + return kHandleLockedStart <= base::subtle::Acquire_Load(AsAtomic()); | 
| +} | 
| + | 
| +volatile base::subtle::Atomic32* DiscardableHandle::AsAtomic() const { | 
| + return reinterpret_cast<volatile base::subtle::Atomic32*>( | 
| + buffer_->GetDataAddress(byte_offset_, sizeof(base::subtle::Atomic32))); | 
| +} | 
| + | 
| +} // namespace gpu |