OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "cc/raster/one_copy_raster_buffer_provider.h" | 5 #include "cc/raster/one_copy_raster_buffer_provider.h" |
6 | 6 |
7 #include <stdint.h> | 7 #include <stdint.h> |
8 | 8 |
9 #include <algorithm> | 9 #include <algorithm> |
10 #include <limits> | 10 #include <limits> |
11 #include <utility> | 11 #include <utility> |
12 | 12 |
13 #include "base/macros.h" | 13 #include "base/macros.h" |
14 #include "cc/base/math_util.h" | 14 #include "cc/base/math_util.h" |
15 #include "cc/resources/platform_color.h" | 15 #include "cc/resources/platform_color.h" |
16 #include "cc/resources/resource_format.h" | 16 #include "cc/resources/resource_format.h" |
17 #include "cc/resources/resource_util.h" | 17 #include "cc/resources/resource_util.h" |
18 #include "cc/resources/scoped_resource.h" | 18 #include "cc/resources/scoped_resource.h" |
19 #include "gpu/GLES2/gl2extchromium.h" | 19 #include "gpu/GLES2/gl2extchromium.h" |
20 #include "gpu/command_buffer/client/gles2_interface.h" | 20 #include "gpu/command_buffer/client/gles2_interface.h" |
21 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" | 21 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" |
22 #include "ui/gfx/buffer_format_util.h" | 22 #include "ui/gfx/buffer_format_util.h" |
23 | 23 |
24 namespace cc { | 24 namespace cc { |
25 namespace { | 25 namespace { |
26 | 26 |
27 class RasterBufferImpl : public RasterBuffer { | 27 class RasterBufferImpl : public RasterBuffer { |
28 public: | 28 public: |
29 RasterBufferImpl(OneCopyRasterBufferProvider* worker_pool, | 29 RasterBufferImpl(OneCopyRasterBufferProvider* client, |
30 ResourceProvider* resource_provider, | 30 ResourceProvider* resource_provider, |
31 ResourceFormat resource_format, | |
32 const Resource* resource, | 31 const Resource* resource, |
33 uint64_t previous_content_id) | 32 uint64_t previous_content_id, |
34 : worker_pool_(worker_pool), | 33 bool async_worker_context_enabled) |
| 34 : client_(client), |
35 resource_(resource), | 35 resource_(resource), |
36 lock_(resource_provider, resource->id()), | 36 lock_(resource_provider, resource->id(), async_worker_context_enabled), |
37 previous_content_id_(previous_content_id) {} | 37 previous_content_id_(previous_content_id) {} |
38 | 38 |
39 ~RasterBufferImpl() override {} | 39 ~RasterBufferImpl() override {} |
40 | 40 |
41 // Overridden from RasterBuffer: | 41 // Overridden from RasterBuffer: |
42 void Playback( | 42 void Playback( |
43 const RasterSource* raster_source, | 43 const RasterSource* raster_source, |
44 const gfx::Rect& raster_full_rect, | 44 const gfx::Rect& raster_full_rect, |
45 const gfx::Rect& raster_dirty_rect, | 45 const gfx::Rect& raster_dirty_rect, |
46 uint64_t new_content_id, | 46 uint64_t new_content_id, |
47 float scale, | 47 float scale, |
48 const RasterSource::PlaybackSettings& playback_settings) override { | 48 const RasterSource::PlaybackSettings& playback_settings) override { |
49 worker_pool_->PlaybackAndCopyOnWorkerThread( | 49 client_->PlaybackAndCopyOnWorkerThread( |
50 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect, | 50 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect, |
51 scale, playback_settings, previous_content_id_, new_content_id); | 51 scale, playback_settings, previous_content_id_, new_content_id); |
52 } | 52 } |
53 | 53 |
54 private: | 54 private: |
55 OneCopyRasterBufferProvider* worker_pool_; | 55 OneCopyRasterBufferProvider* client_; |
56 const Resource* resource_; | 56 const Resource* resource_; |
57 ResourceProvider::ScopedWriteLockGL lock_; | 57 ResourceProvider::ScopedWriteLockGL lock_; |
58 uint64_t previous_content_id_; | 58 uint64_t previous_content_id_; |
59 | 59 |
60 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); | 60 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); |
61 }; | 61 }; |
62 | 62 |
63 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good | 63 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good |
64 // default batch size for copy operations. | 64 // default batch size for copy operations. |
65 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; | 65 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; |
66 | 66 |
67 } // namespace | 67 } // namespace |
68 | 68 |
69 OneCopyRasterBufferProvider::OneCopyRasterBufferProvider( | 69 OneCopyRasterBufferProvider::OneCopyRasterBufferProvider( |
70 base::SequencedTaskRunner* task_runner, | 70 base::SequencedTaskRunner* task_runner, |
71 ContextProvider* compositor_context_provider, | 71 ContextProvider* compositor_context_provider, |
72 ContextProvider* worker_context_provider, | 72 ContextProvider* worker_context_provider, |
73 ResourceProvider* resource_provider, | 73 ResourceProvider* resource_provider, |
74 int max_copy_texture_chromium_size, | 74 int max_copy_texture_chromium_size, |
75 bool use_partial_raster, | 75 bool use_partial_raster, |
76 int max_staging_buffer_usage_in_bytes, | 76 int max_staging_buffer_usage_in_bytes, |
77 ResourceFormat preferred_tile_format) | 77 ResourceFormat preferred_tile_format, |
| 78 bool async_worker_context_enabled) |
78 : compositor_context_provider_(compositor_context_provider), | 79 : compositor_context_provider_(compositor_context_provider), |
79 worker_context_provider_(worker_context_provider), | 80 worker_context_provider_(worker_context_provider), |
80 resource_provider_(resource_provider), | 81 resource_provider_(resource_provider), |
81 max_bytes_per_copy_operation_( | 82 max_bytes_per_copy_operation_( |
82 max_copy_texture_chromium_size | 83 max_copy_texture_chromium_size |
83 ? std::min(kMaxBytesPerCopyOperation, | 84 ? std::min(kMaxBytesPerCopyOperation, |
84 max_copy_texture_chromium_size) | 85 max_copy_texture_chromium_size) |
85 : kMaxBytesPerCopyOperation), | 86 : kMaxBytesPerCopyOperation), |
86 use_partial_raster_(use_partial_raster), | 87 use_partial_raster_(use_partial_raster), |
87 bytes_scheduled_since_last_flush_(0), | 88 bytes_scheduled_since_last_flush_(0), |
88 preferred_tile_format_(preferred_tile_format), | 89 preferred_tile_format_(preferred_tile_format), |
89 staging_pool_(task_runner, | 90 staging_pool_(task_runner, |
90 worker_context_provider, | 91 worker_context_provider, |
91 resource_provider, | 92 resource_provider, |
92 use_partial_raster, | 93 use_partial_raster, |
93 max_staging_buffer_usage_in_bytes) { | 94 max_staging_buffer_usage_in_bytes), |
94 DCHECK(compositor_context_provider_); | 95 async_worker_context_enabled_(async_worker_context_enabled) { |
| 96 DCHECK(compositor_context_provider); |
95 DCHECK(worker_context_provider); | 97 DCHECK(worker_context_provider); |
96 } | 98 } |
97 | 99 |
98 OneCopyRasterBufferProvider::~OneCopyRasterBufferProvider() {} | 100 OneCopyRasterBufferProvider::~OneCopyRasterBufferProvider() {} |
99 | 101 |
100 std::unique_ptr<RasterBuffer> | 102 std::unique_ptr<RasterBuffer> |
101 OneCopyRasterBufferProvider::AcquireBufferForRaster( | 103 OneCopyRasterBufferProvider::AcquireBufferForRaster( |
102 const Resource* resource, | 104 const Resource* resource, |
103 uint64_t resource_content_id, | 105 uint64_t resource_content_id, |
104 uint64_t previous_content_id) { | 106 uint64_t previous_content_id) { |
105 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload | 107 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload |
106 // the dirty rect. | 108 // the dirty rect. |
107 return base::WrapUnique<RasterBuffer>( | 109 return base::WrapUnique<RasterBuffer>( |
108 new RasterBufferImpl(this, resource_provider_, resource->format(), | 110 new RasterBufferImpl(this, resource_provider_, resource, |
109 resource, previous_content_id)); | 111 previous_content_id, async_worker_context_enabled_)); |
110 } | 112 } |
111 | 113 |
112 void OneCopyRasterBufferProvider::ReleaseBufferForRaster( | 114 void OneCopyRasterBufferProvider::ReleaseBufferForRaster( |
113 std::unique_ptr<RasterBuffer> buffer) { | 115 std::unique_ptr<RasterBuffer> buffer) { |
114 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. | 116 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. |
115 } | 117 } |
116 | 118 |
117 void OneCopyRasterBufferProvider::OrderingBarrier() { | 119 void OneCopyRasterBufferProvider::OrderingBarrier() { |
118 TRACE_EVENT0("cc", "OneCopyRasterBufferProvider::OrderingBarrier"); | 120 TRACE_EVENT0("cc", "OneCopyRasterBufferProvider::OrderingBarrier"); |
119 compositor_context_provider_->ContextGL()->OrderingBarrierCHROMIUM(); | 121 gpu::gles2::GLES2Interface* gl = compositor_context_provider_->ContextGL(); |
| 122 GLuint64 fence = gl->InsertFenceSyncCHROMIUM(); |
| 123 gl->OrderingBarrierCHROMIUM(); |
| 124 gl->GenUnverifiedSyncTokenCHROMIUM(fence, sync_token_.GetData()); |
120 } | 125 } |
121 | 126 |
122 ResourceFormat OneCopyRasterBufferProvider::GetResourceFormat( | 127 ResourceFormat OneCopyRasterBufferProvider::GetResourceFormat( |
123 bool must_support_alpha) const { | 128 bool must_support_alpha) const { |
124 if (resource_provider_->IsResourceFormatSupported(preferred_tile_format_) && | 129 if (resource_provider_->IsResourceFormatSupported(preferred_tile_format_) && |
125 (DoesResourceFormatSupportAlpha(preferred_tile_format_) || | 130 (DoesResourceFormatSupportAlpha(preferred_tile_format_) || |
126 !must_support_alpha)) { | 131 !must_support_alpha)) { |
127 return preferred_tile_format_; | 132 return preferred_tile_format_; |
128 } | 133 } |
129 | 134 |
(...skipping 20 matching lines...) Expand all Loading... |
150 uint64_t previous_content_id, | 155 uint64_t previous_content_id, |
151 uint64_t new_content_id) { | 156 uint64_t new_content_id) { |
152 std::unique_ptr<StagingBuffer> staging_buffer = | 157 std::unique_ptr<StagingBuffer> staging_buffer = |
153 staging_pool_.AcquireStagingBuffer(resource, previous_content_id); | 158 staging_pool_.AcquireStagingBuffer(resource, previous_content_id); |
154 | 159 |
155 PlaybackToStagingBuffer(staging_buffer.get(), resource, raster_source, | 160 PlaybackToStagingBuffer(staging_buffer.get(), resource, raster_source, |
156 raster_full_rect, raster_dirty_rect, scale, | 161 raster_full_rect, raster_dirty_rect, scale, |
157 playback_settings, previous_content_id, | 162 playback_settings, previous_content_id, |
158 new_content_id); | 163 new_content_id); |
159 | 164 |
160 CopyOnWorkerThread(staging_buffer.get(), resource, resource_lock, | 165 CopyOnWorkerThread(staging_buffer.get(), resource_lock, raster_source, |
161 raster_source, previous_content_id, new_content_id); | 166 previous_content_id, new_content_id); |
162 | 167 |
163 staging_pool_.ReleaseStagingBuffer(std::move(staging_buffer)); | 168 staging_pool_.ReleaseStagingBuffer(std::move(staging_buffer)); |
164 } | 169 } |
165 | 170 |
166 void OneCopyRasterBufferProvider::PlaybackToStagingBuffer( | 171 void OneCopyRasterBufferProvider::PlaybackToStagingBuffer( |
167 StagingBuffer* staging_buffer, | 172 StagingBuffer* staging_buffer, |
168 const Resource* resource, | 173 const Resource* resource, |
169 const RasterSource* raster_source, | 174 const RasterSource* raster_source, |
170 const gfx::Rect& raster_full_rect, | 175 const gfx::Rect& raster_full_rect, |
171 const gfx::Rect& raster_dirty_rect, | 176 const gfx::Rect& raster_dirty_rect, |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
209 buffer->memory(0), resource->format(), staging_buffer->size, | 214 buffer->memory(0), resource->format(), staging_buffer->size, |
210 buffer->stride(0), raster_source, raster_full_rect, playback_rect, | 215 buffer->stride(0), raster_source, raster_full_rect, playback_rect, |
211 scale, playback_settings); | 216 scale, playback_settings); |
212 buffer->Unmap(); | 217 buffer->Unmap(); |
213 staging_buffer->content_id = new_content_id; | 218 staging_buffer->content_id = new_content_id; |
214 } | 219 } |
215 } | 220 } |
216 | 221 |
217 void OneCopyRasterBufferProvider::CopyOnWorkerThread( | 222 void OneCopyRasterBufferProvider::CopyOnWorkerThread( |
218 StagingBuffer* staging_buffer, | 223 StagingBuffer* staging_buffer, |
219 const Resource* resource, | |
220 ResourceProvider::ScopedWriteLockGL* resource_lock, | 224 ResourceProvider::ScopedWriteLockGL* resource_lock, |
221 const RasterSource* raster_source, | 225 const RasterSource* raster_source, |
222 uint64_t previous_content_id, | 226 uint64_t previous_content_id, |
223 uint64_t new_content_id) { | 227 uint64_t new_content_id) { |
224 { | 228 { |
225 ContextProvider::ScopedContextLock scoped_context(worker_context_provider_); | 229 ContextProvider::ScopedContextLock scoped_context(worker_context_provider_); |
226 | 230 |
227 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); | 231 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); |
228 DCHECK(gl); | 232 DCHECK(gl); |
229 | 233 |
| 234 ResourceProvider::ScopedTextureProvider scoped_texture( |
| 235 gl, resource_lock, async_worker_context_enabled_); |
| 236 |
| 237 // Synchronize with compositor. |
| 238 DCHECK(sync_token_.HasData()); |
| 239 gl->WaitSyncTokenCHROMIUM(sync_token_.GetConstData()); |
| 240 |
| 241 unsigned resource_texture_id = scoped_texture.texture_id(); |
230 unsigned image_target = | 242 unsigned image_target = |
231 resource_provider_->GetImageTextureTarget(resource->format()); | 243 resource_provider_->GetImageTextureTarget(resource_lock->format()); |
232 | 244 |
233 // Create and bind staging texture. | 245 // Create and bind staging texture. |
234 if (!staging_buffer->texture_id) { | 246 if (!staging_buffer->texture_id) { |
235 gl->GenTextures(1, &staging_buffer->texture_id); | 247 gl->GenTextures(1, &staging_buffer->texture_id); |
236 gl->BindTexture(image_target, staging_buffer->texture_id); | 248 gl->BindTexture(image_target, staging_buffer->texture_id); |
237 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST); | 249 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST); |
238 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST); | 250 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST); |
239 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); | 251 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); |
240 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); | 252 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); |
241 } else { | 253 } else { |
242 gl->BindTexture(image_target, staging_buffer->texture_id); | 254 gl->BindTexture(image_target, staging_buffer->texture_id); |
243 } | 255 } |
244 | 256 |
245 // Create and bind image. | 257 // Create and bind image. |
246 if (!staging_buffer->image_id) { | 258 if (!staging_buffer->image_id) { |
247 if (staging_buffer->gpu_memory_buffer) { | 259 if (staging_buffer->gpu_memory_buffer) { |
248 staging_buffer->image_id = gl->CreateImageCHROMIUM( | 260 staging_buffer->image_id = gl->CreateImageCHROMIUM( |
249 staging_buffer->gpu_memory_buffer->AsClientBuffer(), | 261 staging_buffer->gpu_memory_buffer->AsClientBuffer(), |
250 staging_buffer->size.width(), staging_buffer->size.height(), | 262 staging_buffer->size.width(), staging_buffer->size.height(), |
251 GLInternalFormat(resource->format())); | 263 GLInternalFormat(resource_lock->format())); |
252 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); | 264 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); |
253 } | 265 } |
254 } else { | 266 } else { |
255 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id); | 267 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id); |
256 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); | 268 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); |
257 } | 269 } |
258 | 270 |
259 // Unbind staging texture. | 271 // Unbind staging texture. |
260 gl->BindTexture(image_target, 0); | 272 gl->BindTexture(image_target, 0); |
261 | 273 |
262 if (resource_provider_->use_sync_query()) { | 274 if (resource_provider_->use_sync_query()) { |
263 if (!staging_buffer->query_id) | 275 if (!staging_buffer->query_id) |
264 gl->GenQueriesEXT(1, &staging_buffer->query_id); | 276 gl->GenQueriesEXT(1, &staging_buffer->query_id); |
265 | 277 |
266 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY) | 278 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY) |
267 // TODO(reveman): This avoids a performance problem on ARM ChromeOS | 279 // TODO(reveman): This avoids a performance problem on ARM ChromeOS |
268 // devices. crbug.com/580166 | 280 // devices. crbug.com/580166 |
269 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id); | 281 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id); |
270 #else | 282 #else |
271 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, | 283 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, |
272 staging_buffer->query_id); | 284 staging_buffer->query_id); |
273 #endif | 285 #endif |
274 } | 286 } |
275 | 287 |
276 // Since compressed texture's cannot be pre-allocated we might have an | 288 // Since compressed texture's cannot be pre-allocated we might have an |
277 // unallocated resource in which case we need to perform a full size copy. | 289 // unallocated resource in which case we need to perform a full size copy. |
278 if (IsResourceFormatCompressed(resource->format())) { | 290 if (IsResourceFormatCompressed(resource_lock->format())) { |
279 gl->CompressedCopyTextureCHROMIUM(staging_buffer->texture_id, | 291 gl->CompressedCopyTextureCHROMIUM(staging_buffer->texture_id, |
280 resource_lock->texture_id()); | 292 resource_texture_id); |
281 } else { | 293 } else { |
282 int bytes_per_row = ResourceUtil::UncheckedWidthInBytes<int>( | 294 int bytes_per_row = ResourceUtil::UncheckedWidthInBytes<int>( |
283 resource->size().width(), resource->format()); | 295 resource_lock->size().width(), resource_lock->format()); |
284 int chunk_size_in_rows = | 296 int chunk_size_in_rows = |
285 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); | 297 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); |
286 // Align chunk size to 4. Required to support compressed texture formats. | 298 // Align chunk size to 4. Required to support compressed texture formats. |
287 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4); | 299 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4); |
288 int y = 0; | 300 int y = 0; |
289 int height = resource->size().height(); | 301 int height = resource_lock->size().height(); |
290 while (y < height) { | 302 while (y < height) { |
291 // Copy at most |chunk_size_in_rows|. | 303 // Copy at most |chunk_size_in_rows|. |
292 int rows_to_copy = std::min(chunk_size_in_rows, height - y); | 304 int rows_to_copy = std::min(chunk_size_in_rows, height - y); |
293 DCHECK_GT(rows_to_copy, 0); | 305 DCHECK_GT(rows_to_copy, 0); |
294 | 306 |
295 gl->CopySubTextureCHROMIUM( | 307 gl->CopySubTextureCHROMIUM( |
296 staging_buffer->texture_id, resource_lock->texture_id(), 0, y, 0, y, | 308 staging_buffer->texture_id, resource_texture_id, 0, y, 0, y, |
297 resource->size().width(), rows_to_copy, false, false, false); | 309 resource_lock->size().width(), rows_to_copy, false, false, false); |
298 y += rows_to_copy; | 310 y += rows_to_copy; |
299 | 311 |
300 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory | 312 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory |
301 // used for this copy operation. | 313 // used for this copy operation. |
302 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; | 314 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; |
303 | 315 |
304 if (bytes_scheduled_since_last_flush_ >= | 316 if (bytes_scheduled_since_last_flush_ >= |
305 max_bytes_per_copy_operation_) { | 317 max_bytes_per_copy_operation_) { |
306 gl->ShallowFlushCHROMIUM(); | 318 gl->ShallowFlushCHROMIUM(); |
307 bytes_scheduled_since_last_flush_ = 0; | 319 bytes_scheduled_since_last_flush_ = 0; |
(...skipping 10 matching lines...) Expand all Loading... |
318 } | 330 } |
319 | 331 |
320 const uint64_t fence_sync = gl->InsertFenceSyncCHROMIUM(); | 332 const uint64_t fence_sync = gl->InsertFenceSyncCHROMIUM(); |
321 | 333 |
322 // Barrier to sync worker context output to cc context. | 334 // Barrier to sync worker context output to cc context. |
323 gl->OrderingBarrierCHROMIUM(); | 335 gl->OrderingBarrierCHROMIUM(); |
324 | 336 |
325 // Generate sync token after the barrier for cross context synchronization. | 337 // Generate sync token after the barrier for cross context synchronization. |
326 gpu::SyncToken sync_token; | 338 gpu::SyncToken sync_token; |
327 gl->GenUnverifiedSyncTokenCHROMIUM(fence_sync, sync_token.GetData()); | 339 gl->GenUnverifiedSyncTokenCHROMIUM(fence_sync, sync_token.GetData()); |
328 resource_lock->UpdateResourceSyncToken(sync_token); | 340 resource_lock->set_sync_token(sync_token); |
329 } | 341 } |
330 } | 342 } |
331 | 343 |
332 } // namespace cc | 344 } // namespace cc |
OLD | NEW |