Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(350)

Side by Side Diff: src/core/SkSharedMutex.cpp

Issue 1285973003: Add asserts for shared mutex. (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: fix line size. Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright 2015 Google Inc. 2 * Copyright 2015 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "SkSharedMutex.h" 8 #include "SkSharedMutex.h"
9 9
10 #include "SkAtomics.h" 10 #include "SkAtomics.h"
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
134 134
135 if (waitingShared > 0) { 135 if (waitingShared > 0) {
136 // Run all the shared. 136 // Run all the shared.
137 fSharedQueue.signal(waitingShared); 137 fSharedQueue.signal(waitingShared);
138 } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) { 138 } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
139 // Run a single exclusive waiter. 139 // Run a single exclusive waiter.
140 fExclusiveQueue.signal(); 140 fExclusiveQueue.signal();
141 } 141 }
142 } 142 }
143 143
144
145 void SkSharedMutex::assertHeld() const {
146 int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed);
147 // These are very loose asserts about the mutex being held exclusively.
148 SkASSERTF(0 == (queueCounts & kSharedMask),
149 "running shared: %d, exclusive: %d, waiting shared: %d",
150 (queueCounts & kSharedMask) >> kSharedOffset,
151 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset,
152 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset);
153 SkASSERTF((queueCounts & kWaitingExclusiveMask) > 0,
154 "running shared: %d, exclusive: %d, waiting shared: %d",
155 (queueCounts & kSharedMask) >> kSharedOffset,
156 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset,
157 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset);
158 }
159
144 void SkSharedMutex::acquireShared() { 160 void SkSharedMutex::acquireShared() {
145 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); 161 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
146 int32_t newQueueCounts; 162 int32_t newQueueCounts;
147 do { 163 do {
148 newQueueCounts = oldQueueCounts; 164 newQueueCounts = oldQueueCounts;
149 // If there are waiting exclusives then this shared lock waits else it r uns. 165 // If there are waiting exclusives then this shared lock waits else it r uns.
150 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { 166 if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
151 newQueueCounts += 1 << kWaitingSharedOffset; 167 newQueueCounts += 1 << kWaitingSharedOffset;
152 } else { 168 } else {
153 newQueueCounts += 1 << kSharedOffset; 169 newQueueCounts += 1 << kSharedOffset;
(...skipping 16 matching lines...) Expand all
170 int32_t oldQueueCounts = fQueueCounts.fetch_add(~0U << kSharedOffset, 186 int32_t oldQueueCounts = fQueueCounts.fetch_add(~0U << kSharedOffset,
171 sk_memory_order_release); 187 sk_memory_order_release);
172 188
173 // If shared count is going to zero (because the old count == 1) and there a re exclusive 189 // If shared count is going to zero (because the old count == 1) and there a re exclusive
174 // waiters, then run a single exclusive waiter. 190 // waiters, then run a single exclusive waiter.
175 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 191 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
176 && (oldQueueCounts & kWaitingExclusiveMask) > 0) { 192 && (oldQueueCounts & kWaitingExclusiveMask) > 0) {
177 fExclusiveQueue.signal(); 193 fExclusiveQueue.signal();
178 } 194 }
179 } 195 }
196
197 void SkSharedMutex::assertHeldShared() const {
198 int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed);
199 // A very loose assert about the mutex being shared.
200 SkASSERTF((queueCounts & kSharedMask) > 0,
201 "running shared: %d, exclusive: %d, waiting shared: %d",
202 (queueCounts & kSharedMask) >> kSharedOffset,
203 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset,
204 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset);
205 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698