Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(176)

Side by Side Diff: cc/raster/one_copy_raster_buffer_provider.cc

Issue 2046033002: Revert of cc: Add mailbox support to ResourceProvider write locks. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@worker_context_stream
Patch Set: Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « cc/raster/one_copy_raster_buffer_provider.h ('k') | cc/raster/raster_buffer_provider.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/raster/one_copy_raster_buffer_provider.h" 5 #include "cc/raster/one_copy_raster_buffer_provider.h"
6 6
7 #include <stdint.h> 7 #include <stdint.h>
8 8
9 #include <algorithm> 9 #include <algorithm>
10 #include <limits> 10 #include <limits>
11 #include <utility> 11 #include <utility>
12 12
13 #include "base/macros.h" 13 #include "base/macros.h"
14 #include "cc/base/math_util.h" 14 #include "cc/base/math_util.h"
15 #include "cc/resources/platform_color.h" 15 #include "cc/resources/platform_color.h"
16 #include "cc/resources/resource_format.h" 16 #include "cc/resources/resource_format.h"
17 #include "cc/resources/resource_util.h" 17 #include "cc/resources/resource_util.h"
18 #include "cc/resources/scoped_resource.h" 18 #include "cc/resources/scoped_resource.h"
19 #include "gpu/GLES2/gl2extchromium.h" 19 #include "gpu/GLES2/gl2extchromium.h"
20 #include "gpu/command_buffer/client/gles2_interface.h" 20 #include "gpu/command_buffer/client/gles2_interface.h"
21 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" 21 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
22 #include "ui/gfx/buffer_format_util.h" 22 #include "ui/gfx/buffer_format_util.h"
23 23
24 namespace cc { 24 namespace cc {
25 namespace { 25 namespace {
26 26
27 class RasterBufferImpl : public RasterBuffer {
28 public:
29 RasterBufferImpl(OneCopyRasterBufferProvider* worker_pool,
30 ResourceProvider* resource_provider,
31 ResourceFormat resource_format,
32 const Resource* resource,
33 uint64_t previous_content_id)
34 : worker_pool_(worker_pool),
35 resource_(resource),
36 lock_(resource_provider, resource->id()),
37 previous_content_id_(previous_content_id) {}
38
39 ~RasterBufferImpl() override {}
40
41 // Overridden from RasterBuffer:
42 void Playback(
43 const RasterSource* raster_source,
44 const gfx::Rect& raster_full_rect,
45 const gfx::Rect& raster_dirty_rect,
46 uint64_t new_content_id,
47 float scale,
48 const RasterSource::PlaybackSettings& playback_settings) override {
49 worker_pool_->PlaybackAndCopyOnWorkerThread(
50 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect,
51 scale, playback_settings, previous_content_id_, new_content_id);
52 }
53
54 private:
55 OneCopyRasterBufferProvider* worker_pool_;
56 const Resource* resource_;
57 ResourceProvider::ScopedWriteLockGL lock_;
58 uint64_t previous_content_id_;
59
60 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
61 };
62
27 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good 63 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good
28 // default batch size for copy operations. 64 // default batch size for copy operations.
29 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; 65 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4;
30 66
31 } // namespace 67 } // namespace
32 68
33 OneCopyRasterBufferProvider::RasterBufferImpl::RasterBufferImpl(
34 OneCopyRasterBufferProvider* client,
35 ResourceProvider* resource_provider,
36 const Resource* resource,
37 uint64_t previous_content_id,
38 bool async_worker_context_enabled)
39 : client_(client),
40 resource_(resource),
41 lock_(resource_provider, resource->id(), async_worker_context_enabled),
42 previous_content_id_(previous_content_id) {
43 client_->pending_raster_buffers_.insert(this);
44 }
45
46 OneCopyRasterBufferProvider::RasterBufferImpl::~RasterBufferImpl() {
47 client_->pending_raster_buffers_.erase(this);
48 }
49
50 void OneCopyRasterBufferProvider::RasterBufferImpl::Playback(
51 const RasterSource* raster_source,
52 const gfx::Rect& raster_full_rect,
53 const gfx::Rect& raster_dirty_rect,
54 uint64_t new_content_id,
55 float scale,
56 const RasterSource::PlaybackSettings& playback_settings) {
57 client_->PlaybackAndCopyOnWorkerThread(
58 resource_, &lock_, sync_token_, raster_source, raster_full_rect,
59 raster_dirty_rect, scale, playback_settings, previous_content_id_,
60 new_content_id);
61 }
62
63 OneCopyRasterBufferProvider::OneCopyRasterBufferProvider( 69 OneCopyRasterBufferProvider::OneCopyRasterBufferProvider(
64 base::SequencedTaskRunner* task_runner, 70 base::SequencedTaskRunner* task_runner,
65 ContextProvider* compositor_context_provider, 71 ContextProvider* compositor_context_provider,
66 ContextProvider* worker_context_provider, 72 ContextProvider* worker_context_provider,
67 ResourceProvider* resource_provider, 73 ResourceProvider* resource_provider,
68 int max_copy_texture_chromium_size, 74 int max_copy_texture_chromium_size,
69 bool use_partial_raster, 75 bool use_partial_raster,
70 int max_staging_buffer_usage_in_bytes, 76 int max_staging_buffer_usage_in_bytes,
71 ResourceFormat preferred_tile_format, 77 ResourceFormat preferred_tile_format)
72 bool async_worker_context_enabled)
73 : compositor_context_provider_(compositor_context_provider), 78 : compositor_context_provider_(compositor_context_provider),
74 worker_context_provider_(worker_context_provider), 79 worker_context_provider_(worker_context_provider),
75 resource_provider_(resource_provider), 80 resource_provider_(resource_provider),
76 max_bytes_per_copy_operation_( 81 max_bytes_per_copy_operation_(
77 max_copy_texture_chromium_size 82 max_copy_texture_chromium_size
78 ? std::min(kMaxBytesPerCopyOperation, 83 ? std::min(kMaxBytesPerCopyOperation,
79 max_copy_texture_chromium_size) 84 max_copy_texture_chromium_size)
80 : kMaxBytesPerCopyOperation), 85 : kMaxBytesPerCopyOperation),
81 use_partial_raster_(use_partial_raster), 86 use_partial_raster_(use_partial_raster),
82 bytes_scheduled_since_last_flush_(0), 87 bytes_scheduled_since_last_flush_(0),
83 preferred_tile_format_(preferred_tile_format), 88 preferred_tile_format_(preferred_tile_format),
84 staging_pool_(task_runner, 89 staging_pool_(task_runner,
85 worker_context_provider, 90 worker_context_provider,
86 resource_provider, 91 resource_provider,
87 use_partial_raster, 92 use_partial_raster,
88 max_staging_buffer_usage_in_bytes), 93 max_staging_buffer_usage_in_bytes) {
89 async_worker_context_enabled_(async_worker_context_enabled) { 94 DCHECK(compositor_context_provider_);
90 DCHECK(compositor_context_provider);
91 DCHECK(worker_context_provider); 95 DCHECK(worker_context_provider);
92 } 96 }
93 97
94 OneCopyRasterBufferProvider::~OneCopyRasterBufferProvider() { 98 OneCopyRasterBufferProvider::~OneCopyRasterBufferProvider() {}
95 DCHECK(pending_raster_buffers_.empty());
96 }
97 99
98 std::unique_ptr<RasterBuffer> 100 std::unique_ptr<RasterBuffer>
99 OneCopyRasterBufferProvider::AcquireBufferForRaster( 101 OneCopyRasterBufferProvider::AcquireBufferForRaster(
100 const Resource* resource, 102 const Resource* resource,
101 uint64_t resource_content_id, 103 uint64_t resource_content_id,
102 uint64_t previous_content_id) { 104 uint64_t previous_content_id) {
103 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload 105 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload
104 // the dirty rect. 106 // the dirty rect.
105 return base::WrapUnique(new RasterBufferImpl(this, resource_provider_, 107 return base::WrapUnique<RasterBuffer>(
106 resource, previous_content_id, 108 new RasterBufferImpl(this, resource_provider_, resource->format(),
107 async_worker_context_enabled_)); 109 resource, previous_content_id));
108 } 110 }
109 111
110 void OneCopyRasterBufferProvider::ReleaseBufferForRaster( 112 void OneCopyRasterBufferProvider::ReleaseBufferForRaster(
111 std::unique_ptr<RasterBuffer> buffer) { 113 std::unique_ptr<RasterBuffer> buffer) {
112 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. 114 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
113 } 115 }
114 116
115 void OneCopyRasterBufferProvider::OrderingBarrier() { 117 void OneCopyRasterBufferProvider::OrderingBarrier() {
116 TRACE_EVENT0("cc", "OneCopyRasterBufferProvider::OrderingBarrier"); 118 TRACE_EVENT0("cc", "OneCopyRasterBufferProvider::OrderingBarrier");
117 119 compositor_context_provider_->ContextGL()->OrderingBarrierCHROMIUM();
118 gpu::gles2::GLES2Interface* gl = compositor_context_provider_->ContextGL();
119 GLuint64 fence = gl->InsertFenceSyncCHROMIUM();
120 gl->OrderingBarrierCHROMIUM();
121
122 gpu::SyncToken sync_token;
123 gl->GenUnverifiedSyncTokenCHROMIUM(fence, sync_token.GetData());
124
125 for (RasterBufferImpl* buffer : pending_raster_buffers_)
126 buffer->set_sync_token(sync_token);
127 pending_raster_buffers_.clear();
128 } 120 }
129 121
130 ResourceFormat OneCopyRasterBufferProvider::GetResourceFormat( 122 ResourceFormat OneCopyRasterBufferProvider::GetResourceFormat(
131 bool must_support_alpha) const { 123 bool must_support_alpha) const {
132 if (resource_provider_->IsResourceFormatSupported(preferred_tile_format_) && 124 if (resource_provider_->IsResourceFormatSupported(preferred_tile_format_) &&
133 (DoesResourceFormatSupportAlpha(preferred_tile_format_) || 125 (DoesResourceFormatSupportAlpha(preferred_tile_format_) ||
134 !must_support_alpha)) { 126 !must_support_alpha)) {
135 return preferred_tile_format_; 127 return preferred_tile_format_;
136 } 128 }
137 129
138 return resource_provider_->best_texture_format(); 130 return resource_provider_->best_texture_format();
139 } 131 }
140 132
141 bool OneCopyRasterBufferProvider::GetResourceRequiresSwizzle( 133 bool OneCopyRasterBufferProvider::GetResourceRequiresSwizzle(
142 bool must_support_alpha) const { 134 bool must_support_alpha) const {
143 return ResourceFormatRequiresSwizzle(GetResourceFormat(must_support_alpha)); 135 return ResourceFormatRequiresSwizzle(GetResourceFormat(must_support_alpha));
144 } 136 }
145 137
146 void OneCopyRasterBufferProvider::Shutdown() { 138 void OneCopyRasterBufferProvider::Shutdown() {
147 staging_pool_.Shutdown(); 139 staging_pool_.Shutdown();
148 pending_raster_buffers_.clear();
149 } 140 }
150 141
151 void OneCopyRasterBufferProvider::PlaybackAndCopyOnWorkerThread( 142 void OneCopyRasterBufferProvider::PlaybackAndCopyOnWorkerThread(
152 const Resource* resource, 143 const Resource* resource,
153 ResourceProvider::ScopedWriteLockGL* resource_lock, 144 ResourceProvider::ScopedWriteLockGL* resource_lock,
154 const gpu::SyncToken& sync_token,
155 const RasterSource* raster_source, 145 const RasterSource* raster_source,
156 const gfx::Rect& raster_full_rect, 146 const gfx::Rect& raster_full_rect,
157 const gfx::Rect& raster_dirty_rect, 147 const gfx::Rect& raster_dirty_rect,
158 float scale, 148 float scale,
159 const RasterSource::PlaybackSettings& playback_settings, 149 const RasterSource::PlaybackSettings& playback_settings,
160 uint64_t previous_content_id, 150 uint64_t previous_content_id,
161 uint64_t new_content_id) { 151 uint64_t new_content_id) {
162 std::unique_ptr<StagingBuffer> staging_buffer = 152 std::unique_ptr<StagingBuffer> staging_buffer =
163 staging_pool_.AcquireStagingBuffer(resource, previous_content_id); 153 staging_pool_.AcquireStagingBuffer(resource, previous_content_id);
164 154
165 PlaybackToStagingBuffer(staging_buffer.get(), resource, raster_source, 155 PlaybackToStagingBuffer(staging_buffer.get(), resource, raster_source,
166 raster_full_rect, raster_dirty_rect, scale, 156 raster_full_rect, raster_dirty_rect, scale,
167 playback_settings, previous_content_id, 157 playback_settings, previous_content_id,
168 new_content_id); 158 new_content_id);
169 159
170 CopyOnWorkerThread(staging_buffer.get(), resource_lock, sync_token, 160 CopyOnWorkerThread(staging_buffer.get(), resource, resource_lock,
171 raster_source, previous_content_id, new_content_id); 161 raster_source, previous_content_id, new_content_id);
172 162
173 staging_pool_.ReleaseStagingBuffer(std::move(staging_buffer)); 163 staging_pool_.ReleaseStagingBuffer(std::move(staging_buffer));
174 } 164 }
175 165
176 void OneCopyRasterBufferProvider::PlaybackToStagingBuffer( 166 void OneCopyRasterBufferProvider::PlaybackToStagingBuffer(
177 StagingBuffer* staging_buffer, 167 StagingBuffer* staging_buffer,
178 const Resource* resource, 168 const Resource* resource,
179 const RasterSource* raster_source, 169 const RasterSource* raster_source,
180 const gfx::Rect& raster_full_rect, 170 const gfx::Rect& raster_full_rect,
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
219 buffer->memory(0), resource->format(), staging_buffer->size, 209 buffer->memory(0), resource->format(), staging_buffer->size,
220 buffer->stride(0), raster_source, raster_full_rect, playback_rect, 210 buffer->stride(0), raster_source, raster_full_rect, playback_rect,
221 scale, playback_settings); 211 scale, playback_settings);
222 buffer->Unmap(); 212 buffer->Unmap();
223 staging_buffer->content_id = new_content_id; 213 staging_buffer->content_id = new_content_id;
224 } 214 }
225 } 215 }
226 216
227 void OneCopyRasterBufferProvider::CopyOnWorkerThread( 217 void OneCopyRasterBufferProvider::CopyOnWorkerThread(
228 StagingBuffer* staging_buffer, 218 StagingBuffer* staging_buffer,
219 const Resource* resource,
229 ResourceProvider::ScopedWriteLockGL* resource_lock, 220 ResourceProvider::ScopedWriteLockGL* resource_lock,
230 const gpu::SyncToken& sync_token,
231 const RasterSource* raster_source, 221 const RasterSource* raster_source,
232 uint64_t previous_content_id, 222 uint64_t previous_content_id,
233 uint64_t new_content_id) { 223 uint64_t new_content_id) {
234 ContextProvider::ScopedContextLock scoped_context(worker_context_provider_); 224 {
225 ContextProvider::ScopedContextLock scoped_context(worker_context_provider_);
235 226
236 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); 227 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
237 DCHECK(gl); 228 DCHECK(gl);
238 229
239 ResourceProvider::ScopedTextureProvider scoped_texture( 230 unsigned image_target =
240 gl, resource_lock, async_worker_context_enabled_); 231 resource_provider_->GetImageTextureTarget(resource->format());
241 232
242 // Synchronize with compositor. 233 // Create and bind staging texture.
243 DCHECK(sync_token.HasData()); 234 if (!staging_buffer->texture_id) {
244 gl->WaitSyncTokenCHROMIUM(sync_token.GetConstData()); 235 gl->GenTextures(1, &staging_buffer->texture_id);
236 gl->BindTexture(image_target, staging_buffer->texture_id);
237 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
238 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
239 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
240 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
241 } else {
242 gl->BindTexture(image_target, staging_buffer->texture_id);
243 }
245 244
246 unsigned resource_texture_id = scoped_texture.texture_id(); 245 // Create and bind image.
247 unsigned image_target = 246 if (!staging_buffer->image_id) {
248 resource_provider_->GetImageTextureTarget(resource_lock->format()); 247 if (staging_buffer->gpu_memory_buffer) {
249 248 staging_buffer->image_id = gl->CreateImageCHROMIUM(
250 // Create and bind staging texture. 249 staging_buffer->gpu_memory_buffer->AsClientBuffer(),
251 if (!staging_buffer->texture_id) { 250 staging_buffer->size.width(), staging_buffer->size.height(),
252 gl->GenTextures(1, &staging_buffer->texture_id); 251 GLInternalFormat(resource->format()));
253 gl->BindTexture(image_target, staging_buffer->texture_id); 252 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
254 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST); 253 }
255 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST); 254 } else {
256 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); 255 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
257 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
258 } else {
259 gl->BindTexture(image_target, staging_buffer->texture_id);
260 }
261
262 // Create and bind image.
263 if (!staging_buffer->image_id) {
264 if (staging_buffer->gpu_memory_buffer) {
265 staging_buffer->image_id = gl->CreateImageCHROMIUM(
266 staging_buffer->gpu_memory_buffer->AsClientBuffer(),
267 staging_buffer->size.width(), staging_buffer->size.height(),
268 GLInternalFormat(resource_lock->format()));
269 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); 256 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
270 } 257 }
271 } else {
272 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
273 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
274 }
275 258
276 // Unbind staging texture. 259 // Unbind staging texture.
277 gl->BindTexture(image_target, 0); 260 gl->BindTexture(image_target, 0);
278 261
279 if (resource_provider_->use_sync_query()) { 262 if (resource_provider_->use_sync_query()) {
280 if (!staging_buffer->query_id) 263 if (!staging_buffer->query_id)
281 gl->GenQueriesEXT(1, &staging_buffer->query_id); 264 gl->GenQueriesEXT(1, &staging_buffer->query_id);
282 265
283 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY) 266 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
284 // TODO(reveman): This avoids a performance problem on ARM ChromeOS 267 // TODO(reveman): This avoids a performance problem on ARM ChromeOS
285 // devices. crbug.com/580166 268 // devices. crbug.com/580166
286 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id); 269 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id);
287 #else 270 #else
288 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, staging_buffer->query_id); 271 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM,
272 staging_buffer->query_id);
289 #endif 273 #endif
290 } 274 }
291 275
292 // Since compressed texture's cannot be pre-allocated we might have an 276 // Since compressed texture's cannot be pre-allocated we might have an
293 // unallocated resource in which case we need to perform a full size copy. 277 // unallocated resource in which case we need to perform a full size copy.
294 if (IsResourceFormatCompressed(resource_lock->format())) { 278 if (IsResourceFormatCompressed(resource->format())) {
295 gl->CompressedCopyTextureCHROMIUM(staging_buffer->texture_id, 279 gl->CompressedCopyTextureCHROMIUM(staging_buffer->texture_id,
296 resource_texture_id); 280 resource_lock->texture_id());
297 } else { 281 } else {
298 int bytes_per_row = ResourceUtil::UncheckedWidthInBytes<int>( 282 int bytes_per_row = ResourceUtil::UncheckedWidthInBytes<int>(
299 resource_lock->size().width(), resource_lock->format()); 283 resource->size().width(), resource->format());
300 int chunk_size_in_rows = 284 int chunk_size_in_rows =
301 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); 285 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
302 // Align chunk size to 4. Required to support compressed texture formats. 286 // Align chunk size to 4. Required to support compressed texture formats.
303 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4); 287 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4);
304 int y = 0; 288 int y = 0;
305 int height = resource_lock->size().height(); 289 int height = resource->size().height();
306 while (y < height) { 290 while (y < height) {
307 // Copy at most |chunk_size_in_rows|. 291 // Copy at most |chunk_size_in_rows|.
308 int rows_to_copy = std::min(chunk_size_in_rows, height - y); 292 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
309 DCHECK_GT(rows_to_copy, 0); 293 DCHECK_GT(rows_to_copy, 0);
310 294
311 gl->CopySubTextureCHROMIUM( 295 gl->CopySubTextureCHROMIUM(
312 staging_buffer->texture_id, resource_texture_id, 0, y, 0, y, 296 staging_buffer->texture_id, resource_lock->texture_id(), 0, y, 0, y,
313 resource_lock->size().width(), rows_to_copy, false, false, false); 297 resource->size().width(), rows_to_copy, false, false, false);
314 y += rows_to_copy; 298 y += rows_to_copy;
315 299
316 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory 300 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
317 // used for this copy operation. 301 // used for this copy operation.
318 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; 302 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
319 303
320 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { 304 if (bytes_scheduled_since_last_flush_ >=
321 gl->ShallowFlushCHROMIUM(); 305 max_bytes_per_copy_operation_) {
322 bytes_scheduled_since_last_flush_ = 0; 306 gl->ShallowFlushCHROMIUM();
307 bytes_scheduled_since_last_flush_ = 0;
308 }
323 } 309 }
324 } 310 }
311
312 if (resource_provider_->use_sync_query()) {
313 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
314 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
315 #else
316 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
317 #endif
318 }
319
320 const uint64_t fence_sync = gl->InsertFenceSyncCHROMIUM();
321
322 // Barrier to sync worker context output to cc context.
323 gl->OrderingBarrierCHROMIUM();
324
325 // Generate sync token after the barrier for cross context synchronization.
326 gpu::SyncToken sync_token;
327 gl->GenUnverifiedSyncTokenCHROMIUM(fence_sync, sync_token.GetData());
328 resource_lock->UpdateResourceSyncToken(sync_token);
325 } 329 }
326
327 if (resource_provider_->use_sync_query()) {
328 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
329 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
330 #else
331 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
332 #endif
333 }
334
335 const uint64_t fence_sync = gl->InsertFenceSyncCHROMIUM();
336
337 // Barrier to sync worker context output to cc context.
338 gl->OrderingBarrierCHROMIUM();
339
340 // Generate sync token after the barrier for cross context synchronization.
341 gpu::SyncToken resource_sync_token;
342 gl->GenUnverifiedSyncTokenCHROMIUM(fence_sync, resource_sync_token.GetData());
343 resource_lock->set_sync_token(resource_sync_token);
344 } 330 }
345 331
346 } // namespace cc 332 } // namespace cc
OLDNEW
« no previous file with comments | « cc/raster/one_copy_raster_buffer_provider.h ('k') | cc/raster/raster_buffer_provider.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698