OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "cc/raster/one_copy_raster_buffer_provider.h" | 5 #include "cc/raster/one_copy_raster_buffer_provider.h" |
6 | 6 |
7 #include <stdint.h> | 7 #include <stdint.h> |
8 | 8 |
9 #include <algorithm> | 9 #include <algorithm> |
10 #include <limits> | 10 #include <limits> |
11 #include <utility> | 11 #include <utility> |
12 | 12 |
13 #include "base/macros.h" | 13 #include "base/macros.h" |
14 #include "cc/base/math_util.h" | 14 #include "cc/base/math_util.h" |
15 #include "cc/resources/platform_color.h" | 15 #include "cc/resources/platform_color.h" |
16 #include "cc/resources/resource_format.h" | 16 #include "cc/resources/resource_format.h" |
17 #include "cc/resources/resource_util.h" | 17 #include "cc/resources/resource_util.h" |
18 #include "cc/resources/scoped_resource.h" | 18 #include "cc/resources/scoped_resource.h" |
19 #include "gpu/GLES2/gl2extchromium.h" | 19 #include "gpu/GLES2/gl2extchromium.h" |
20 #include "gpu/command_buffer/client/gles2_interface.h" | 20 #include "gpu/command_buffer/client/gles2_interface.h" |
21 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" | 21 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" |
22 #include "ui/gfx/buffer_format_util.h" | 22 #include "ui/gfx/buffer_format_util.h" |
23 | 23 |
24 namespace cc { | 24 namespace cc { |
25 namespace { | 25 namespace { |
26 | 26 |
| 27 class RasterBufferImpl : public RasterBuffer { |
| 28 public: |
| 29 RasterBufferImpl(OneCopyRasterBufferProvider* worker_pool, |
| 30 ResourceProvider* resource_provider, |
| 31 ResourceFormat resource_format, |
| 32 const Resource* resource, |
| 33 uint64_t previous_content_id) |
| 34 : worker_pool_(worker_pool), |
| 35 resource_(resource), |
| 36 lock_(resource_provider, resource->id()), |
| 37 previous_content_id_(previous_content_id) {} |
| 38 |
| 39 ~RasterBufferImpl() override {} |
| 40 |
| 41 // Overridden from RasterBuffer: |
| 42 void Playback( |
| 43 const RasterSource* raster_source, |
| 44 const gfx::Rect& raster_full_rect, |
| 45 const gfx::Rect& raster_dirty_rect, |
| 46 uint64_t new_content_id, |
| 47 float scale, |
| 48 const RasterSource::PlaybackSettings& playback_settings) override { |
| 49 TRACE_EVENT0("cc", "OneCopyRasterBuffer::Playback"); |
| 50 worker_pool_->PlaybackAndCopyOnWorkerThread( |
| 51 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect, |
| 52 scale, playback_settings, previous_content_id_, new_content_id); |
| 53 } |
| 54 |
| 55 private: |
| 56 OneCopyRasterBufferProvider* worker_pool_; |
| 57 const Resource* resource_; |
| 58 ResourceProvider::ScopedWriteLockGL lock_; |
| 59 uint64_t previous_content_id_; |
| 60 |
| 61 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); |
| 62 }; |
| 63 |
27 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good | 64 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good |
28 // default batch size for copy operations. | 65 // default batch size for copy operations. |
29 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; | 66 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; |
30 | 67 |
31 } // namespace | 68 } // namespace |
32 | 69 |
33 OneCopyRasterBufferProvider::RasterBufferImpl::RasterBufferImpl( | |
34 OneCopyRasterBufferProvider* client, | |
35 ResourceProvider* resource_provider, | |
36 const Resource* resource, | |
37 uint64_t previous_content_id, | |
38 bool async_worker_context_enabled) | |
39 : client_(client), | |
40 resource_(resource), | |
41 lock_(resource_provider, resource->id(), async_worker_context_enabled), | |
42 previous_content_id_(previous_content_id) { | |
43 client_->pending_raster_buffers_.insert(this); | |
44 } | |
45 | |
46 OneCopyRasterBufferProvider::RasterBufferImpl::~RasterBufferImpl() { | |
47 client_->pending_raster_buffers_.erase(this); | |
48 } | |
49 | |
50 void OneCopyRasterBufferProvider::RasterBufferImpl::Playback( | |
51 const RasterSource* raster_source, | |
52 const gfx::Rect& raster_full_rect, | |
53 const gfx::Rect& raster_dirty_rect, | |
54 uint64_t new_content_id, | |
55 float scale, | |
56 const RasterSource::PlaybackSettings& playback_settings) { | |
57 TRACE_EVENT0("cc", "OneCopyRasterBuffer::Playback"); | |
58 client_->PlaybackAndCopyOnWorkerThread( | |
59 resource_, &lock_, sync_token_, raster_source, raster_full_rect, | |
60 raster_dirty_rect, scale, playback_settings, previous_content_id_, | |
61 new_content_id); | |
62 } | |
63 | |
64 OneCopyRasterBufferProvider::OneCopyRasterBufferProvider( | 70 OneCopyRasterBufferProvider::OneCopyRasterBufferProvider( |
65 base::SequencedTaskRunner* task_runner, | 71 base::SequencedTaskRunner* task_runner, |
66 ContextProvider* compositor_context_provider, | 72 ContextProvider* compositor_context_provider, |
67 ContextProvider* worker_context_provider, | 73 ContextProvider* worker_context_provider, |
68 ResourceProvider* resource_provider, | 74 ResourceProvider* resource_provider, |
69 int max_copy_texture_chromium_size, | 75 int max_copy_texture_chromium_size, |
70 bool use_partial_raster, | 76 bool use_partial_raster, |
71 int max_staging_buffer_usage_in_bytes, | 77 int max_staging_buffer_usage_in_bytes, |
72 ResourceFormat preferred_tile_format, | 78 ResourceFormat preferred_tile_format) |
73 bool async_worker_context_enabled) | |
74 : compositor_context_provider_(compositor_context_provider), | 79 : compositor_context_provider_(compositor_context_provider), |
75 worker_context_provider_(worker_context_provider), | 80 worker_context_provider_(worker_context_provider), |
76 resource_provider_(resource_provider), | 81 resource_provider_(resource_provider), |
77 max_bytes_per_copy_operation_( | 82 max_bytes_per_copy_operation_( |
78 max_copy_texture_chromium_size | 83 max_copy_texture_chromium_size |
79 ? std::min(kMaxBytesPerCopyOperation, | 84 ? std::min(kMaxBytesPerCopyOperation, |
80 max_copy_texture_chromium_size) | 85 max_copy_texture_chromium_size) |
81 : kMaxBytesPerCopyOperation), | 86 : kMaxBytesPerCopyOperation), |
82 use_partial_raster_(use_partial_raster), | 87 use_partial_raster_(use_partial_raster), |
83 bytes_scheduled_since_last_flush_(0), | 88 bytes_scheduled_since_last_flush_(0), |
84 preferred_tile_format_(preferred_tile_format), | 89 preferred_tile_format_(preferred_tile_format), |
85 staging_pool_(task_runner, | 90 staging_pool_(task_runner, |
86 worker_context_provider, | 91 worker_context_provider, |
87 resource_provider, | 92 resource_provider, |
88 use_partial_raster, | 93 use_partial_raster, |
89 max_staging_buffer_usage_in_bytes), | 94 max_staging_buffer_usage_in_bytes) { |
90 async_worker_context_enabled_(async_worker_context_enabled) { | 95 DCHECK(compositor_context_provider_); |
91 DCHECK(compositor_context_provider); | |
92 DCHECK(worker_context_provider); | 96 DCHECK(worker_context_provider); |
93 } | 97 } |
94 | 98 |
95 OneCopyRasterBufferProvider::~OneCopyRasterBufferProvider() { | 99 OneCopyRasterBufferProvider::~OneCopyRasterBufferProvider() {} |
96 DCHECK(pending_raster_buffers_.empty()); | |
97 } | |
98 | 100 |
99 std::unique_ptr<RasterBuffer> | 101 std::unique_ptr<RasterBuffer> |
100 OneCopyRasterBufferProvider::AcquireBufferForRaster( | 102 OneCopyRasterBufferProvider::AcquireBufferForRaster( |
101 const Resource* resource, | 103 const Resource* resource, |
102 uint64_t resource_content_id, | 104 uint64_t resource_content_id, |
103 uint64_t previous_content_id) { | 105 uint64_t previous_content_id) { |
104 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload | 106 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload |
105 // the dirty rect. | 107 // the dirty rect. |
106 return base::WrapUnique(new RasterBufferImpl(this, resource_provider_, | 108 return base::WrapUnique<RasterBuffer>( |
107 resource, previous_content_id, | 109 new RasterBufferImpl(this, resource_provider_, resource->format(), |
108 async_worker_context_enabled_)); | 110 resource, previous_content_id)); |
109 } | 111 } |
110 | 112 |
111 void OneCopyRasterBufferProvider::ReleaseBufferForRaster( | 113 void OneCopyRasterBufferProvider::ReleaseBufferForRaster( |
112 std::unique_ptr<RasterBuffer> buffer) { | 114 std::unique_ptr<RasterBuffer> buffer) { |
113 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. | 115 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. |
114 } | 116 } |
115 | 117 |
116 void OneCopyRasterBufferProvider::OrderingBarrier() { | 118 void OneCopyRasterBufferProvider::OrderingBarrier() { |
117 TRACE_EVENT0("cc", "OneCopyRasterBufferProvider::OrderingBarrier"); | 119 TRACE_EVENT0("cc", "OneCopyRasterBufferProvider::OrderingBarrier"); |
118 | 120 compositor_context_provider_->ContextGL()->OrderingBarrierCHROMIUM(); |
119 gpu::gles2::GLES2Interface* gl = compositor_context_provider_->ContextGL(); | |
120 if (async_worker_context_enabled_) { | |
121 GLuint64 fence = gl->InsertFenceSyncCHROMIUM(); | |
122 gl->OrderingBarrierCHROMIUM(); | |
123 | |
124 gpu::SyncToken sync_token; | |
125 gl->GenUnverifiedSyncTokenCHROMIUM(fence, sync_token.GetData()); | |
126 | |
127 DCHECK(sync_token.HasData() || | |
128 gl->GetGraphicsResetStatusKHR() != GL_NO_ERROR); | |
129 | |
130 for (RasterBufferImpl* buffer : pending_raster_buffers_) | |
131 buffer->set_sync_token(sync_token); | |
132 } else { | |
133 gl->OrderingBarrierCHROMIUM(); | |
134 } | |
135 pending_raster_buffers_.clear(); | |
136 } | 121 } |
137 | 122 |
138 ResourceFormat OneCopyRasterBufferProvider::GetResourceFormat( | 123 ResourceFormat OneCopyRasterBufferProvider::GetResourceFormat( |
139 bool must_support_alpha) const { | 124 bool must_support_alpha) const { |
140 if (resource_provider_->IsResourceFormatSupported(preferred_tile_format_) && | 125 if (resource_provider_->IsResourceFormatSupported(preferred_tile_format_) && |
141 (DoesResourceFormatSupportAlpha(preferred_tile_format_) || | 126 (DoesResourceFormatSupportAlpha(preferred_tile_format_) || |
142 !must_support_alpha)) { | 127 !must_support_alpha)) { |
143 return preferred_tile_format_; | 128 return preferred_tile_format_; |
144 } | 129 } |
145 | 130 |
146 return resource_provider_->best_texture_format(); | 131 return resource_provider_->best_texture_format(); |
147 } | 132 } |
148 | 133 |
149 bool OneCopyRasterBufferProvider::GetResourceRequiresSwizzle( | 134 bool OneCopyRasterBufferProvider::GetResourceRequiresSwizzle( |
150 bool must_support_alpha) const { | 135 bool must_support_alpha) const { |
151 return ResourceFormatRequiresSwizzle(GetResourceFormat(must_support_alpha)); | 136 return ResourceFormatRequiresSwizzle(GetResourceFormat(must_support_alpha)); |
152 } | 137 } |
153 | 138 |
154 void OneCopyRasterBufferProvider::Shutdown() { | 139 void OneCopyRasterBufferProvider::Shutdown() { |
155 staging_pool_.Shutdown(); | 140 staging_pool_.Shutdown(); |
156 pending_raster_buffers_.clear(); | |
157 } | 141 } |
158 | 142 |
159 void OneCopyRasterBufferProvider::PlaybackAndCopyOnWorkerThread( | 143 void OneCopyRasterBufferProvider::PlaybackAndCopyOnWorkerThread( |
160 const Resource* resource, | 144 const Resource* resource, |
161 ResourceProvider::ScopedWriteLockGL* resource_lock, | 145 ResourceProvider::ScopedWriteLockGL* resource_lock, |
162 const gpu::SyncToken& sync_token, | |
163 const RasterSource* raster_source, | 146 const RasterSource* raster_source, |
164 const gfx::Rect& raster_full_rect, | 147 const gfx::Rect& raster_full_rect, |
165 const gfx::Rect& raster_dirty_rect, | 148 const gfx::Rect& raster_dirty_rect, |
166 float scale, | 149 float scale, |
167 const RasterSource::PlaybackSettings& playback_settings, | 150 const RasterSource::PlaybackSettings& playback_settings, |
168 uint64_t previous_content_id, | 151 uint64_t previous_content_id, |
169 uint64_t new_content_id) { | 152 uint64_t new_content_id) { |
170 if (async_worker_context_enabled_) { | |
171 // Early out if sync token is invalid. This happens if the compositor | |
172 // context was lost before ScheduleTasks was called. | |
173 if (!sync_token.HasData()) | |
174 return; | |
175 ContextProvider::ScopedContextLock scoped_context(worker_context_provider_); | |
176 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); | |
177 DCHECK(gl); | |
178 // Synchronize with compositor. | |
179 gl->WaitSyncTokenCHROMIUM(sync_token.GetConstData()); | |
180 } | |
181 | |
182 std::unique_ptr<StagingBuffer> staging_buffer = | 153 std::unique_ptr<StagingBuffer> staging_buffer = |
183 staging_pool_.AcquireStagingBuffer(resource, previous_content_id); | 154 staging_pool_.AcquireStagingBuffer(resource, previous_content_id); |
184 | 155 |
185 PlaybackToStagingBuffer(staging_buffer.get(), resource, raster_source, | 156 PlaybackToStagingBuffer(staging_buffer.get(), resource, raster_source, |
186 raster_full_rect, raster_dirty_rect, scale, | 157 raster_full_rect, raster_dirty_rect, scale, |
187 playback_settings, previous_content_id, | 158 playback_settings, previous_content_id, |
188 new_content_id); | 159 new_content_id); |
189 | 160 |
190 CopyOnWorkerThread(staging_buffer.get(), resource_lock, sync_token, | 161 CopyOnWorkerThread(staging_buffer.get(), resource, resource_lock, |
191 raster_source, previous_content_id, new_content_id); | 162 raster_source, previous_content_id, new_content_id); |
192 | 163 |
193 staging_pool_.ReleaseStagingBuffer(std::move(staging_buffer)); | 164 staging_pool_.ReleaseStagingBuffer(std::move(staging_buffer)); |
194 } | 165 } |
195 | 166 |
196 void OneCopyRasterBufferProvider::PlaybackToStagingBuffer( | 167 void OneCopyRasterBufferProvider::PlaybackToStagingBuffer( |
197 StagingBuffer* staging_buffer, | 168 StagingBuffer* staging_buffer, |
198 const Resource* resource, | 169 const Resource* resource, |
199 const RasterSource* raster_source, | 170 const RasterSource* raster_source, |
200 const gfx::Rect& raster_full_rect, | 171 const gfx::Rect& raster_full_rect, |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
239 buffer->memory(0), resource->format(), staging_buffer->size, | 210 buffer->memory(0), resource->format(), staging_buffer->size, |
240 buffer->stride(0), raster_source, raster_full_rect, playback_rect, | 211 buffer->stride(0), raster_source, raster_full_rect, playback_rect, |
241 scale, playback_settings); | 212 scale, playback_settings); |
242 buffer->Unmap(); | 213 buffer->Unmap(); |
243 staging_buffer->content_id = new_content_id; | 214 staging_buffer->content_id = new_content_id; |
244 } | 215 } |
245 } | 216 } |
246 | 217 |
247 void OneCopyRasterBufferProvider::CopyOnWorkerThread( | 218 void OneCopyRasterBufferProvider::CopyOnWorkerThread( |
248 StagingBuffer* staging_buffer, | 219 StagingBuffer* staging_buffer, |
| 220 const Resource* resource, |
249 ResourceProvider::ScopedWriteLockGL* resource_lock, | 221 ResourceProvider::ScopedWriteLockGL* resource_lock, |
250 const gpu::SyncToken& sync_token, | |
251 const RasterSource* raster_source, | 222 const RasterSource* raster_source, |
252 uint64_t previous_content_id, | 223 uint64_t previous_content_id, |
253 uint64_t new_content_id) { | 224 uint64_t new_content_id) { |
254 ContextProvider::ScopedContextLock scoped_context(worker_context_provider_); | 225 { |
255 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); | 226 ContextProvider::ScopedContextLock scoped_context(worker_context_provider_); |
256 DCHECK(gl); | |
257 | 227 |
258 // Create texture after synchronizing with compositor. | 228 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); |
259 ResourceProvider::ScopedTextureProvider scoped_texture( | 229 DCHECK(gl); |
260 gl, resource_lock, async_worker_context_enabled_); | |
261 | 230 |
262 unsigned resource_texture_id = scoped_texture.texture_id(); | 231 unsigned image_target = |
263 unsigned image_target = | 232 resource_provider_->GetImageTextureTarget(resource->format()); |
264 resource_provider_->GetImageTextureTarget(resource_lock->format()); | |
265 | 233 |
266 // Create and bind staging texture. | 234 // Create and bind staging texture. |
267 if (!staging_buffer->texture_id) { | 235 if (!staging_buffer->texture_id) { |
268 gl->GenTextures(1, &staging_buffer->texture_id); | 236 gl->GenTextures(1, &staging_buffer->texture_id); |
269 gl->BindTexture(image_target, staging_buffer->texture_id); | 237 gl->BindTexture(image_target, staging_buffer->texture_id); |
270 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST); | 238 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST); |
271 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST); | 239 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST); |
272 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); | 240 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); |
273 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); | 241 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); |
274 } else { | 242 } else { |
275 gl->BindTexture(image_target, staging_buffer->texture_id); | 243 gl->BindTexture(image_target, staging_buffer->texture_id); |
276 } | 244 } |
277 | 245 |
278 // Create and bind image. | 246 // Create and bind image. |
279 if (!staging_buffer->image_id) { | 247 if (!staging_buffer->image_id) { |
280 if (staging_buffer->gpu_memory_buffer) { | 248 if (staging_buffer->gpu_memory_buffer) { |
281 staging_buffer->image_id = gl->CreateImageCHROMIUM( | 249 staging_buffer->image_id = gl->CreateImageCHROMIUM( |
282 staging_buffer->gpu_memory_buffer->AsClientBuffer(), | 250 staging_buffer->gpu_memory_buffer->AsClientBuffer(), |
283 staging_buffer->size.width(), staging_buffer->size.height(), | 251 staging_buffer->size.width(), staging_buffer->size.height(), |
284 GLInternalFormat(resource_lock->format())); | 252 GLInternalFormat(resource->format())); |
| 253 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); |
| 254 } |
| 255 } else { |
| 256 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id); |
285 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); | 257 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); |
286 } | 258 } |
287 } else { | |
288 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id); | |
289 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); | |
290 } | |
291 | 259 |
292 // Unbind staging texture. | 260 // Unbind staging texture. |
293 gl->BindTexture(image_target, 0); | 261 gl->BindTexture(image_target, 0); |
294 | 262 |
295 if (resource_provider_->use_sync_query()) { | 263 if (resource_provider_->use_sync_query()) { |
296 if (!staging_buffer->query_id) | 264 if (!staging_buffer->query_id) |
297 gl->GenQueriesEXT(1, &staging_buffer->query_id); | 265 gl->GenQueriesEXT(1, &staging_buffer->query_id); |
298 | 266 |
299 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY) | 267 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY) |
300 // TODO(reveman): This avoids a performance problem on ARM ChromeOS | 268 // TODO(reveman): This avoids a performance problem on ARM ChromeOS |
301 // devices. crbug.com/580166 | 269 // devices. crbug.com/580166 |
302 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id); | 270 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id); |
303 #else | 271 #else |
304 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, staging_buffer->query_id); | 272 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, |
| 273 staging_buffer->query_id); |
305 #endif | 274 #endif |
306 } | 275 } |
307 | 276 |
308 // Since compressed texture's cannot be pre-allocated we might have an | 277 // Since compressed texture's cannot be pre-allocated we might have an |
309 // unallocated resource in which case we need to perform a full size copy. | 278 // unallocated resource in which case we need to perform a full size copy. |
310 if (IsResourceFormatCompressed(resource_lock->format())) { | 279 if (IsResourceFormatCompressed(resource->format())) { |
311 gl->CompressedCopyTextureCHROMIUM(staging_buffer->texture_id, | 280 gl->CompressedCopyTextureCHROMIUM(staging_buffer->texture_id, |
312 resource_texture_id); | 281 resource_lock->texture_id()); |
313 } else { | 282 } else { |
314 int bytes_per_row = ResourceUtil::UncheckedWidthInBytes<int>( | 283 int bytes_per_row = ResourceUtil::UncheckedWidthInBytes<int>( |
315 resource_lock->size().width(), resource_lock->format()); | 284 resource->size().width(), resource->format()); |
316 int chunk_size_in_rows = | 285 int chunk_size_in_rows = |
317 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); | 286 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); |
318 // Align chunk size to 4. Required to support compressed texture formats. | 287 // Align chunk size to 4. Required to support compressed texture formats. |
319 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4); | 288 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4); |
320 int y = 0; | 289 int y = 0; |
321 int height = resource_lock->size().height(); | 290 int height = resource->size().height(); |
322 while (y < height) { | 291 while (y < height) { |
323 // Copy at most |chunk_size_in_rows|. | 292 // Copy at most |chunk_size_in_rows|. |
324 int rows_to_copy = std::min(chunk_size_in_rows, height - y); | 293 int rows_to_copy = std::min(chunk_size_in_rows, height - y); |
325 DCHECK_GT(rows_to_copy, 0); | 294 DCHECK_GT(rows_to_copy, 0); |
326 | 295 |
327 gl->CopySubTextureCHROMIUM( | 296 gl->CopySubTextureCHROMIUM( |
328 staging_buffer->texture_id, resource_texture_id, 0, y, 0, y, | 297 staging_buffer->texture_id, resource_lock->texture_id(), 0, y, 0, y, |
329 resource_lock->size().width(), rows_to_copy, false, false, false); | 298 resource->size().width(), rows_to_copy, false, false, false); |
330 y += rows_to_copy; | 299 y += rows_to_copy; |
331 | 300 |
332 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory | 301 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory |
333 // used for this copy operation. | 302 // used for this copy operation. |
334 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; | 303 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; |
335 | 304 |
336 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { | 305 if (bytes_scheduled_since_last_flush_ >= |
337 gl->ShallowFlushCHROMIUM(); | 306 max_bytes_per_copy_operation_) { |
338 bytes_scheduled_since_last_flush_ = 0; | 307 gl->ShallowFlushCHROMIUM(); |
| 308 bytes_scheduled_since_last_flush_ = 0; |
| 309 } |
339 } | 310 } |
340 } | 311 } |
| 312 |
| 313 if (resource_provider_->use_sync_query()) { |
| 314 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY) |
| 315 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM); |
| 316 #else |
| 317 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM); |
| 318 #endif |
| 319 } |
| 320 |
| 321 const uint64_t fence_sync = gl->InsertFenceSyncCHROMIUM(); |
| 322 |
| 323 // Barrier to sync worker context output to cc context. |
| 324 gl->OrderingBarrierCHROMIUM(); |
| 325 |
| 326 // Generate sync token after the barrier for cross context synchronization. |
| 327 gpu::SyncToken sync_token; |
| 328 gl->GenUnverifiedSyncTokenCHROMIUM(fence_sync, sync_token.GetData()); |
| 329 resource_lock->UpdateResourceSyncToken(sync_token); |
341 } | 330 } |
342 | |
343 if (resource_provider_->use_sync_query()) { | |
344 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY) | |
345 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM); | |
346 #else | |
347 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM); | |
348 #endif | |
349 } | |
350 | |
351 const uint64_t fence_sync = gl->InsertFenceSyncCHROMIUM(); | |
352 | |
353 // Barrier to sync worker context output to cc context. | |
354 gl->OrderingBarrierCHROMIUM(); | |
355 | |
356 // Generate sync token after the barrier for cross context synchronization. | |
357 gpu::SyncToken resource_sync_token; | |
358 gl->GenUnverifiedSyncTokenCHROMIUM(fence_sync, resource_sync_token.GetData()); | |
359 resource_lock->set_sync_token(resource_sync_token); | |
360 resource_lock->set_synchronized(!async_worker_context_enabled_); | |
361 } | 331 } |
362 | 332 |
363 } // namespace cc | 333 } // namespace cc |
OLD | NEW |