Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(609)

Side by Side Diff: cc/raster/one_copy_raster_buffer_provider.cc

Issue 1951193002: cc: Add mailbox support to ResourceProvider write locks. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@worker_context_stream
Patch Set: rebase Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/raster/one_copy_raster_buffer_provider.h" 5 #include "cc/raster/one_copy_raster_buffer_provider.h"
6 6
7 #include <stdint.h> 7 #include <stdint.h>
8 8
9 #include <algorithm> 9 #include <algorithm>
10 #include <limits> 10 #include <limits>
11 #include <utility> 11 #include <utility>
12 12
13 #include "base/macros.h" 13 #include "base/macros.h"
14 #include "cc/base/math_util.h" 14 #include "cc/base/math_util.h"
15 #include "cc/resources/platform_color.h" 15 #include "cc/resources/platform_color.h"
16 #include "cc/resources/resource_format.h" 16 #include "cc/resources/resource_format.h"
17 #include "cc/resources/resource_util.h" 17 #include "cc/resources/resource_util.h"
18 #include "cc/resources/scoped_resource.h" 18 #include "cc/resources/scoped_resource.h"
19 #include "gpu/GLES2/gl2extchromium.h" 19 #include "gpu/GLES2/gl2extchromium.h"
20 #include "gpu/command_buffer/client/gles2_interface.h" 20 #include "gpu/command_buffer/client/gles2_interface.h"
21 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" 21 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
22 #include "ui/gfx/buffer_format_util.h" 22 #include "ui/gfx/buffer_format_util.h"
23 23
24 namespace cc { 24 namespace cc {
25 namespace { 25 namespace {
26 26
27 class RasterBufferImpl : public RasterBuffer { 27 class RasterBufferImpl : public RasterBuffer {
28 public: 28 public:
29 RasterBufferImpl(OneCopyRasterBufferProvider* worker_pool, 29 RasterBufferImpl(OneCopyRasterBufferProvider* client,
30 ResourceProvider* resource_provider, 30 ResourceProvider* resource_provider,
31 ResourceFormat resource_format,
32 const Resource* resource, 31 const Resource* resource,
33 uint64_t previous_content_id) 32 uint64_t previous_content_id,
34 : worker_pool_(worker_pool), 33 bool async_worker_context_enabled)
34 : client_(client),
35 resource_(resource), 35 resource_(resource),
36 lock_(resource_provider, resource->id()), 36 lock_(resource_provider, resource->id(), async_worker_context_enabled),
37 previous_content_id_(previous_content_id) {} 37 previous_content_id_(previous_content_id) {}
38 38
39 ~RasterBufferImpl() override {} 39 ~RasterBufferImpl() override {}
40 40
41 // Overridden from RasterBuffer: 41 // Overridden from RasterBuffer:
42 void Playback( 42 void Playback(
43 const RasterSource* raster_source, 43 const RasterSource* raster_source,
44 const gfx::Rect& raster_full_rect, 44 const gfx::Rect& raster_full_rect,
45 const gfx::Rect& raster_dirty_rect, 45 const gfx::Rect& raster_dirty_rect,
46 uint64_t new_content_id, 46 uint64_t new_content_id,
47 float scale, 47 float scale,
48 const RasterSource::PlaybackSettings& playback_settings) override { 48 const RasterSource::PlaybackSettings& playback_settings) override {
49 worker_pool_->PlaybackAndCopyOnWorkerThread( 49 client_->PlaybackAndCopyOnWorkerThread(
50 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect, 50 resource_, &lock_, sync_token_, raster_source, raster_full_rect,
51 scale, playback_settings, previous_content_id_, new_content_id); 51 raster_dirty_rect, scale, playback_settings, previous_content_id_,
52 new_content_id);
53 }
54
55 void SetSyncToken(const gpu::SyncToken& sync_token) override {
56 sync_token_ = sync_token;
52 } 57 }
53 58
54 private: 59 private:
55 OneCopyRasterBufferProvider* worker_pool_; 60 OneCopyRasterBufferProvider* client_;
56 const Resource* resource_; 61 const Resource* resource_;
57 ResourceProvider::ScopedWriteLockGL lock_; 62 ResourceProvider::ScopedWriteLockGL lock_;
58 uint64_t previous_content_id_; 63 uint64_t previous_content_id_;
59 64
65 gpu::SyncToken sync_token_;
66
60 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); 67 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
61 }; 68 };
62 69
63 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good 70 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good
64 // default batch size for copy operations. 71 // default batch size for copy operations.
65 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; 72 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4;
66 73
67 } // namespace 74 } // namespace
68 75
69 OneCopyRasterBufferProvider::OneCopyRasterBufferProvider( 76 OneCopyRasterBufferProvider::OneCopyRasterBufferProvider(
70 base::SequencedTaskRunner* task_runner, 77 base::SequencedTaskRunner* task_runner,
71 ContextProvider* compositor_context_provider, 78 ContextProvider* compositor_context_provider,
72 ContextProvider* worker_context_provider, 79 ContextProvider* worker_context_provider,
73 ResourceProvider* resource_provider, 80 ResourceProvider* resource_provider,
74 int max_copy_texture_chromium_size, 81 int max_copy_texture_chromium_size,
75 bool use_partial_raster, 82 bool use_partial_raster,
76 int max_staging_buffer_usage_in_bytes, 83 int max_staging_buffer_usage_in_bytes,
77 ResourceFormat preferred_tile_format) 84 ResourceFormat preferred_tile_format,
85 bool async_worker_context_enabled)
78 : compositor_context_provider_(compositor_context_provider), 86 : compositor_context_provider_(compositor_context_provider),
79 worker_context_provider_(worker_context_provider), 87 worker_context_provider_(worker_context_provider),
80 resource_provider_(resource_provider), 88 resource_provider_(resource_provider),
81 max_bytes_per_copy_operation_( 89 max_bytes_per_copy_operation_(
82 max_copy_texture_chromium_size 90 max_copy_texture_chromium_size
83 ? std::min(kMaxBytesPerCopyOperation, 91 ? std::min(kMaxBytesPerCopyOperation,
84 max_copy_texture_chromium_size) 92 max_copy_texture_chromium_size)
85 : kMaxBytesPerCopyOperation), 93 : kMaxBytesPerCopyOperation),
86 use_partial_raster_(use_partial_raster), 94 use_partial_raster_(use_partial_raster),
87 bytes_scheduled_since_last_flush_(0), 95 bytes_scheduled_since_last_flush_(0),
88 preferred_tile_format_(preferred_tile_format), 96 preferred_tile_format_(preferred_tile_format),
89 staging_pool_(task_runner, 97 staging_pool_(task_runner,
90 worker_context_provider, 98 worker_context_provider,
91 resource_provider, 99 resource_provider,
92 use_partial_raster, 100 use_partial_raster,
93 max_staging_buffer_usage_in_bytes) { 101 max_staging_buffer_usage_in_bytes),
94 DCHECK(compositor_context_provider_); 102 async_worker_context_enabled_(async_worker_context_enabled) {
103 DCHECK(compositor_context_provider);
95 DCHECK(worker_context_provider); 104 DCHECK(worker_context_provider);
96 } 105 }
97 106
98 OneCopyRasterBufferProvider::~OneCopyRasterBufferProvider() {} 107 OneCopyRasterBufferProvider::~OneCopyRasterBufferProvider() {
108 DCHECK(pending_raster_buffers_.empty());
109 }
99 110
100 std::unique_ptr<RasterBuffer> 111 std::unique_ptr<RasterBuffer>
101 OneCopyRasterBufferProvider::AcquireBufferForRaster( 112 OneCopyRasterBufferProvider::AcquireBufferForRaster(
102 const Resource* resource, 113 const Resource* resource,
103 uint64_t resource_content_id, 114 uint64_t resource_content_id,
104 uint64_t previous_content_id) { 115 uint64_t previous_content_id) {
105 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload 116 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload
106 // the dirty rect. 117 // the dirty rect.
107 return base::WrapUnique<RasterBuffer>( 118 std::unique_ptr<RasterBuffer> buffer(
108 new RasterBufferImpl(this, resource_provider_, resource->format(), 119 new RasterBufferImpl(this, resource_provider_, resource,
109 resource, previous_content_id)); 120 previous_content_id, async_worker_context_enabled_));
121 pending_raster_buffers_.insert(buffer.get());
122 return buffer;
110 } 123 }
111 124
112 void OneCopyRasterBufferProvider::ReleaseBufferForRaster( 125 void OneCopyRasterBufferProvider::ReleaseBufferForRaster(
113 std::unique_ptr<RasterBuffer> buffer) { 126 std::unique_ptr<RasterBuffer> buffer) {
114 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. 127 pending_raster_buffers_.erase(buffer.get());
115 } 128 }
116 129
117 void OneCopyRasterBufferProvider::OrderingBarrier() { 130 void OneCopyRasterBufferProvider::OrderingBarrier() {
118 TRACE_EVENT0("cc", "OneCopyRasterBufferProvider::OrderingBarrier"); 131 TRACE_EVENT0("cc", "OneCopyRasterBufferProvider::OrderingBarrier");
119 compositor_context_provider_->ContextGL()->OrderingBarrierCHROMIUM(); 132
133 gpu::gles2::GLES2Interface* gl = compositor_context_provider_->ContextGL();
134 GLuint64 fence = gl->InsertFenceSyncCHROMIUM();
135 gl->OrderingBarrierCHROMIUM();
136
137 gpu::SyncToken sync_token;
138 gl->GenUnverifiedSyncTokenCHROMIUM(fence, sync_token.GetData());
139
140 for (RasterBuffer* buffer : pending_raster_buffers_)
141 buffer->SetSyncToken(sync_token);
142 pending_raster_buffers_.clear();
120 } 143 }
121 144
122 ResourceFormat OneCopyRasterBufferProvider::GetResourceFormat( 145 ResourceFormat OneCopyRasterBufferProvider::GetResourceFormat(
123 bool must_support_alpha) const { 146 bool must_support_alpha) const {
124 if (resource_provider_->IsResourceFormatSupported(preferred_tile_format_) && 147 if (resource_provider_->IsResourceFormatSupported(preferred_tile_format_) &&
125 (DoesResourceFormatSupportAlpha(preferred_tile_format_) || 148 (DoesResourceFormatSupportAlpha(preferred_tile_format_) ||
126 !must_support_alpha)) { 149 !must_support_alpha)) {
127 return preferred_tile_format_; 150 return preferred_tile_format_;
128 } 151 }
129 152
130 return resource_provider_->best_texture_format(); 153 return resource_provider_->best_texture_format();
131 } 154 }
132 155
133 bool OneCopyRasterBufferProvider::GetResourceRequiresSwizzle( 156 bool OneCopyRasterBufferProvider::GetResourceRequiresSwizzle(
134 bool must_support_alpha) const { 157 bool must_support_alpha) const {
135 return ResourceFormatRequiresSwizzle(GetResourceFormat(must_support_alpha)); 158 return ResourceFormatRequiresSwizzle(GetResourceFormat(must_support_alpha));
136 } 159 }
137 160
138 void OneCopyRasterBufferProvider::Shutdown() { 161 void OneCopyRasterBufferProvider::Shutdown() {
139 staging_pool_.Shutdown(); 162 staging_pool_.Shutdown();
163 pending_raster_buffers_.clear();
140 } 164 }
141 165
142 void OneCopyRasterBufferProvider::PlaybackAndCopyOnWorkerThread( 166 void OneCopyRasterBufferProvider::PlaybackAndCopyOnWorkerThread(
143 const Resource* resource, 167 const Resource* resource,
144 ResourceProvider::ScopedWriteLockGL* resource_lock, 168 ResourceProvider::ScopedWriteLockGL* resource_lock,
169 const gpu::SyncToken& sync_token,
145 const RasterSource* raster_source, 170 const RasterSource* raster_source,
146 const gfx::Rect& raster_full_rect, 171 const gfx::Rect& raster_full_rect,
147 const gfx::Rect& raster_dirty_rect, 172 const gfx::Rect& raster_dirty_rect,
148 float scale, 173 float scale,
149 const RasterSource::PlaybackSettings& playback_settings, 174 const RasterSource::PlaybackSettings& playback_settings,
150 uint64_t previous_content_id, 175 uint64_t previous_content_id,
151 uint64_t new_content_id) { 176 uint64_t new_content_id) {
152 std::unique_ptr<StagingBuffer> staging_buffer = 177 std::unique_ptr<StagingBuffer> staging_buffer =
153 staging_pool_.AcquireStagingBuffer(resource, previous_content_id); 178 staging_pool_.AcquireStagingBuffer(resource, previous_content_id);
154 179
155 PlaybackToStagingBuffer(staging_buffer.get(), resource, raster_source, 180 PlaybackToStagingBuffer(staging_buffer.get(), resource, raster_source,
156 raster_full_rect, raster_dirty_rect, scale, 181 raster_full_rect, raster_dirty_rect, scale,
157 playback_settings, previous_content_id, 182 playback_settings, previous_content_id,
158 new_content_id); 183 new_content_id);
159 184
160 CopyOnWorkerThread(staging_buffer.get(), resource, resource_lock, 185 CopyOnWorkerThread(staging_buffer.get(), resource_lock, sync_token,
161 raster_source, previous_content_id, new_content_id); 186 raster_source, previous_content_id, new_content_id);
162 187
163 staging_pool_.ReleaseStagingBuffer(std::move(staging_buffer)); 188 staging_pool_.ReleaseStagingBuffer(std::move(staging_buffer));
164 } 189 }
165 190
166 void OneCopyRasterBufferProvider::PlaybackToStagingBuffer( 191 void OneCopyRasterBufferProvider::PlaybackToStagingBuffer(
167 StagingBuffer* staging_buffer, 192 StagingBuffer* staging_buffer,
168 const Resource* resource, 193 const Resource* resource,
169 const RasterSource* raster_source, 194 const RasterSource* raster_source,
170 const gfx::Rect& raster_full_rect, 195 const gfx::Rect& raster_full_rect,
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
209 buffer->memory(0), resource->format(), staging_buffer->size, 234 buffer->memory(0), resource->format(), staging_buffer->size,
210 buffer->stride(0), raster_source, raster_full_rect, playback_rect, 235 buffer->stride(0), raster_source, raster_full_rect, playback_rect,
211 scale, playback_settings); 236 scale, playback_settings);
212 buffer->Unmap(); 237 buffer->Unmap();
213 staging_buffer->content_id = new_content_id; 238 staging_buffer->content_id = new_content_id;
214 } 239 }
215 } 240 }
216 241
217 void OneCopyRasterBufferProvider::CopyOnWorkerThread( 242 void OneCopyRasterBufferProvider::CopyOnWorkerThread(
218 StagingBuffer* staging_buffer, 243 StagingBuffer* staging_buffer,
219 const Resource* resource,
220 ResourceProvider::ScopedWriteLockGL* resource_lock, 244 ResourceProvider::ScopedWriteLockGL* resource_lock,
245 const gpu::SyncToken& sync_token,
221 const RasterSource* raster_source, 246 const RasterSource* raster_source,
222 uint64_t previous_content_id, 247 uint64_t previous_content_id,
223 uint64_t new_content_id) { 248 uint64_t new_content_id) {
224 { 249 ContextProvider::ScopedContextLock scoped_context(worker_context_provider_);
225 ContextProvider::ScopedContextLock scoped_context(worker_context_provider_);
226 250
227 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); 251 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
228 DCHECK(gl); 252 DCHECK(gl);
229 253
230 unsigned image_target = 254 ResourceProvider::ScopedTextureProvider scoped_texture(
231 resource_provider_->GetImageTextureTarget(resource->format()); 255 gl, resource_lock, async_worker_context_enabled_);
232 256
233 // Create and bind staging texture. 257 // Synchronize with compositor.
234 if (!staging_buffer->texture_id) { 258 DCHECK(sync_token.HasData());
235 gl->GenTextures(1, &staging_buffer->texture_id); 259 gl->WaitSyncTokenCHROMIUM(sync_token.GetConstData());
236 gl->BindTexture(image_target, staging_buffer->texture_id);
237 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
238 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
239 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
240 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
241 } else {
242 gl->BindTexture(image_target, staging_buffer->texture_id);
243 }
244 260
245 // Create and bind image. 261 unsigned resource_texture_id = scoped_texture.texture_id();
246 if (!staging_buffer->image_id) { 262 unsigned image_target =
247 if (staging_buffer->gpu_memory_buffer) { 263 resource_provider_->GetImageTextureTarget(resource_lock->format());
248 staging_buffer->image_id = gl->CreateImageCHROMIUM( 264
249 staging_buffer->gpu_memory_buffer->AsClientBuffer(), 265 // Create and bind staging texture.
250 staging_buffer->size.width(), staging_buffer->size.height(), 266 if (!staging_buffer->texture_id) {
251 GLInternalFormat(resource->format())); 267 gl->GenTextures(1, &staging_buffer->texture_id);
252 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); 268 gl->BindTexture(image_target, staging_buffer->texture_id);
253 } 269 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
254 } else { 270 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
255 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id); 271 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
272 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
273 } else {
274 gl->BindTexture(image_target, staging_buffer->texture_id);
275 }
276
277 // Create and bind image.
278 if (!staging_buffer->image_id) {
279 if (staging_buffer->gpu_memory_buffer) {
280 staging_buffer->image_id = gl->CreateImageCHROMIUM(
281 staging_buffer->gpu_memory_buffer->AsClientBuffer(),
282 staging_buffer->size.width(), staging_buffer->size.height(),
283 GLInternalFormat(resource_lock->format()));
256 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); 284 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
257 } 285 }
286 } else {
287 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
288 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
289 }
258 290
259 // Unbind staging texture. 291 // Unbind staging texture.
260 gl->BindTexture(image_target, 0); 292 gl->BindTexture(image_target, 0);
261 293
262 if (resource_provider_->use_sync_query()) { 294 if (resource_provider_->use_sync_query()) {
263 if (!staging_buffer->query_id) 295 if (!staging_buffer->query_id)
264 gl->GenQueriesEXT(1, &staging_buffer->query_id); 296 gl->GenQueriesEXT(1, &staging_buffer->query_id);
265 297
266 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY) 298 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
267 // TODO(reveman): This avoids a performance problem on ARM ChromeOS 299 // TODO(reveman): This avoids a performance problem on ARM ChromeOS
268 // devices. crbug.com/580166 300 // devices. crbug.com/580166
269 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id); 301 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id);
270 #else 302 #else
271 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, 303 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, staging_buffer->query_id);
272 staging_buffer->query_id);
273 #endif 304 #endif
274 } 305 }
275 306
276 // Since compressed texture's cannot be pre-allocated we might have an 307 // Since compressed texture's cannot be pre-allocated we might have an
277 // unallocated resource in which case we need to perform a full size copy. 308 // unallocated resource in which case we need to perform a full size copy.
278 if (IsResourceFormatCompressed(resource->format())) { 309 if (IsResourceFormatCompressed(resource_lock->format())) {
279 gl->CompressedCopyTextureCHROMIUM(staging_buffer->texture_id, 310 gl->CompressedCopyTextureCHROMIUM(staging_buffer->texture_id,
280 resource_lock->texture_id()); 311 resource_texture_id);
281 } else { 312 } else {
282 int bytes_per_row = ResourceUtil::UncheckedWidthInBytes<int>( 313 int bytes_per_row = ResourceUtil::UncheckedWidthInBytes<int>(
283 resource->size().width(), resource->format()); 314 resource_lock->size().width(), resource_lock->format());
284 int chunk_size_in_rows = 315 int chunk_size_in_rows =
285 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); 316 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
286 // Align chunk size to 4. Required to support compressed texture formats. 317 // Align chunk size to 4. Required to support compressed texture formats.
287 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4); 318 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4);
288 int y = 0; 319 int y = 0;
289 int height = resource->size().height(); 320 int height = resource_lock->size().height();
290 while (y < height) { 321 while (y < height) {
291 // Copy at most |chunk_size_in_rows|. 322 // Copy at most |chunk_size_in_rows|.
292 int rows_to_copy = std::min(chunk_size_in_rows, height - y); 323 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
293 DCHECK_GT(rows_to_copy, 0); 324 DCHECK_GT(rows_to_copy, 0);
294 325
295 gl->CopySubTextureCHROMIUM( 326 gl->CopySubTextureCHROMIUM(
296 staging_buffer->texture_id, resource_lock->texture_id(), 0, y, 0, y, 327 staging_buffer->texture_id, resource_texture_id, 0, y, 0, y,
297 resource->size().width(), rows_to_copy, false, false, false); 328 resource_lock->size().width(), rows_to_copy, false, false, false);
298 y += rows_to_copy; 329 y += rows_to_copy;
299 330
300 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory 331 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
301 // used for this copy operation. 332 // used for this copy operation.
302 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; 333 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
303 334
304 if (bytes_scheduled_since_last_flush_ >= 335 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
305 max_bytes_per_copy_operation_) { 336 gl->ShallowFlushCHROMIUM();
306 gl->ShallowFlushCHROMIUM(); 337 bytes_scheduled_since_last_flush_ = 0;
307 bytes_scheduled_since_last_flush_ = 0;
308 }
309 } 338 }
310 } 339 }
340 }
311 341
312 if (resource_provider_->use_sync_query()) { 342 if (resource_provider_->use_sync_query()) {
313 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY) 343 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
314 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM); 344 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
315 #else 345 #else
316 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM); 346 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
317 #endif 347 #endif
318 } 348 }
319 349
320 const uint64_t fence_sync = gl->InsertFenceSyncCHROMIUM(); 350 const uint64_t fence_sync = gl->InsertFenceSyncCHROMIUM();
321 351
322 // Barrier to sync worker context output to cc context. 352 // Barrier to sync worker context output to cc context.
323 gl->OrderingBarrierCHROMIUM(); 353 gl->OrderingBarrierCHROMIUM();
324 354
325 // Generate sync token after the barrier for cross context synchronization. 355 // Generate sync token after the barrier for cross context synchronization.
326 gpu::SyncToken sync_token; 356 gpu::SyncToken resource_sync_token;
327 gl->GenUnverifiedSyncTokenCHROMIUM(fence_sync, sync_token.GetData()); 357 gl->GenUnverifiedSyncTokenCHROMIUM(fence_sync, resource_sync_token.GetData());
328 resource_lock->UpdateResourceSyncToken(sync_token); 358 resource_lock->set_sync_token(resource_sync_token);
329 }
330 } 359 }
331 360
332 } // namespace cc 361 } // namespace cc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698