| Index: skia/ext/SkThread_chrome.cc
|
| diff --git a/skia/ext/SkThread_chrome.cc b/skia/ext/SkThread_chrome.cc
|
| index 98375fa66ce54e6d52753b6d452bb2d92a473477..948cf1fc21b16a0e9a8330bd1aba563cb288651e 100644
|
| --- a/skia/ext/SkThread_chrome.cc
|
| +++ b/skia/ext/SkThread_chrome.cc
|
| @@ -8,8 +8,8 @@
|
|
|
| #include "base/atomicops.h"
|
| #include "base/basictypes.h"
|
| -#include "base/lock.h"
|
| #include "base/logging.h"
|
| +#include "base/synchronization/lock.h"
|
|
|
| int32_t sk_atomic_inc(int32_t* addr) {
|
| // sk_atomic_inc is expected to return the old value, Barrier_AtomicIncrement
|
| @@ -24,22 +24,22 @@ int32_t sk_atomic_dec(int32_t* addr) {
|
| }
|
|
|
| SkMutex::SkMutex(bool isGlobal) : fIsGlobal(isGlobal) {
|
| - COMPILE_ASSERT(sizeof(Lock) <= sizeof(fStorage), Lock_is_too_big_for_SkMutex);
|
| - Lock* lock = reinterpret_cast<Lock*>(fStorage);
|
| - new(lock) Lock();
|
| + COMPILE_ASSERT(sizeof(base::Lock) <= sizeof(fStorage), Lock_is_too_big_for_SkMutex);
|
| + base::Lock* lock = reinterpret_cast<base::Lock*>(fStorage);
|
| + new(lock) base::Lock();
|
| }
|
|
|
| SkMutex::~SkMutex() {
|
| - Lock* lock = reinterpret_cast<Lock*>(fStorage);
|
| + base::Lock* lock = reinterpret_cast<base::Lock*>(fStorage);
|
| lock->~Lock();
|
| }
|
|
|
| void SkMutex::acquire() {
|
| - Lock* lock = reinterpret_cast<Lock*>(fStorage);
|
| + base::Lock* lock = reinterpret_cast<base::Lock*>(fStorage);
|
| lock->Acquire();
|
| }
|
|
|
| void SkMutex::release() {
|
| - Lock* lock = reinterpret_cast<Lock*>(fStorage);
|
| + base::Lock* lock = reinterpret_cast<base::Lock*>(fStorage);
|
| lock->Release();
|
| }
|
|
|