OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "components/web_cache/browser/web_cache_manager.h" | 5 #include "components/web_cache/browser/web_cache_manager.h" |
6 | 6 |
7 #include <string.h> | 7 #include <string.h> |
8 | 8 |
9 #include <algorithm> | 9 #include <algorithm> |
10 | 10 |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
128 inactive_renderers_.erase(elmt); | 128 inactive_renderers_.erase(elmt); |
129 | 129 |
130 // A renderer that was inactive, just became active. We should make sure | 130 // A renderer that was inactive, just became active. We should make sure |
131 // it is given a fair cache allocation, but we defer this for a bit in | 131 // it is given a fair cache allocation, but we defer this for a bit in |
132 // order to make this function call cheap. | 132 // order to make this function call cheap. |
133 ReviseAllocationStrategyLater(); | 133 ReviseAllocationStrategyLater(); |
134 } | 134 } |
135 } | 135 } |
136 | 136 |
137 void WebCacheManager::ObserveStats(int renderer_id, | 137 void WebCacheManager::ObserveStats(int renderer_id, |
138 uint64_t min_dead_capacity, | |
139 uint64_t max_dead_capacity, | |
140 uint64_t capacity, | 138 uint64_t capacity, |
141 uint64_t live_size, | 139 uint64_t size) { |
142 uint64_t dead_size) { | |
143 StatsMap::iterator entry = stats_.find(renderer_id); | 140 StatsMap::iterator entry = stats_.find(renderer_id); |
144 if (entry == stats_.end()) | 141 if (entry == stats_.end()) |
145 return; // We might see stats for a renderer that has been destroyed. | 142 return; // We might see stats for a renderer that has been destroyed. |
146 | 143 |
147 // Record the updated stats. | 144 // Record the updated stats. |
148 entry->second.capacity = capacity; | 145 entry->second.capacity = capacity; |
149 entry->second.dead_size = dead_size; | 146 entry->second.size = size; |
150 entry->second.live_size = live_size; | |
151 entry->second.max_dead_capacity = max_dead_capacity; | |
152 entry->second.min_dead_capacity = min_dead_capacity; | |
153 } | 147 } |
154 | 148 |
155 void WebCacheManager::SetGlobalSizeLimit(uint64_t bytes) { | 149 void WebCacheManager::SetGlobalSizeLimit(uint64_t bytes) { |
156 global_size_limit_ = bytes; | 150 global_size_limit_ = bytes; |
157 ReviseAllocationStrategyLater(); | 151 ReviseAllocationStrategyLater(); |
158 } | 152 } |
159 | 153 |
160 void WebCacheManager::ClearCache() { | 154 void WebCacheManager::ClearCache() { |
161 // Tell each renderer process to clear the cache. | 155 // Tell each renderer process to clear the cache. |
162 ClearRendererCache(active_renderers_, INSTANTLY); | 156 ClearRendererCache(active_renderers_, INSTANTLY); |
(...skipping 29 matching lines...) Expand all Loading... | |
192 } | 186 } |
193 } | 187 } |
194 | 188 |
195 // static | 189 // static |
196 uint64_t WebCacheManager::GetDefaultGlobalSizeLimit() { | 190 uint64_t WebCacheManager::GetDefaultGlobalSizeLimit() { |
197 return GetDefaultCacheSize(); | 191 return GetDefaultCacheSize(); |
198 } | 192 } |
199 | 193 |
200 void WebCacheManager::GatherStats(const std::set<int>& renderers, | 194 void WebCacheManager::GatherStats(const std::set<int>& renderers, |
201 uint64_t* capacity, | 195 uint64_t* capacity, |
202 uint64_t* live_size, | 196 uint64_t* size) { |
203 uint64_t* dead_size) { | 197 *capacity = *size = 0; |
204 *capacity = *live_size = *dead_size = 0; | |
205 | 198 |
206 std::set<int>::const_iterator iter = renderers.begin(); | 199 std::set<int>::const_iterator iter = renderers.begin(); |
207 while (iter != renderers.end()) { | 200 while (iter != renderers.end()) { |
208 StatsMap::iterator elmt = stats_.find(*iter); | 201 StatsMap::iterator elmt = stats_.find(*iter); |
209 if (elmt != stats_.end()) { | 202 if (elmt != stats_.end()) { |
210 *capacity += elmt->second.capacity; | 203 *capacity += elmt->second.capacity; |
211 *live_size += elmt->second.live_size; | 204 *size += elmt->second.size; |
212 *dead_size += elmt->second.dead_size; | |
213 } | 205 } |
214 ++iter; | 206 ++iter; |
215 } | 207 } |
216 } | 208 } |
217 | 209 |
218 // static | 210 // static |
219 uint64_t WebCacheManager::GetSize(AllocationTactic tactic, | 211 uint64_t WebCacheManager::GetSize(AllocationTactic tactic, uint64_t size) { |
220 uint64_t live_size, | |
221 uint64_t dead_size) { | |
222 switch (tactic) { | 212 switch (tactic) { |
223 case DIVIDE_EVENLY: | 213 case DIVIDE_EVENLY: |
224 // We aren't going to reserve any space for existing objects. | 214 // We aren't going to reserve any space for existing objects. |
225 return 0; | 215 return 0; |
226 case KEEP_CURRENT_WITH_HEADROOM: | 216 case KEEP_CURRENT_WITH_HEADROOM: |
227 // We need enough space for our current objects, plus some headroom. | 217 // We need enough space for our current objects, plus some headroom. |
228 return 3 * GetSize(KEEP_CURRENT, live_size, dead_size) / 2; | 218 return 3 * GetSize(KEEP_CURRENT, size) / 2; |
229 case KEEP_CURRENT: | 219 case KEEP_CURRENT: |
230 // We need enough space to keep our current objects. | 220 // We need enough space to keep our current objects. |
231 return live_size + dead_size; | 221 return size; |
232 case KEEP_LIVE_WITH_HEADROOM: | |
233 // We need enough space to keep out live resources, plus some headroom. | |
234 return 3 * GetSize(KEEP_LIVE, live_size, dead_size) / 2; | |
235 case KEEP_LIVE: | |
236 // We need enough space to keep our live resources. | |
237 return live_size; | |
238 default: | 222 default: |
239 NOTREACHED() << "Unknown cache allocation tactic"; | 223 NOTREACHED() << "Unknown cache allocation tactic"; |
240 return 0; | 224 return 0; |
241 } | 225 } |
242 } | 226 } |
243 | 227 |
244 bool WebCacheManager::AttemptTactic( | 228 bool WebCacheManager::AttemptTactic(AllocationTactic active_tactic, |
245 AllocationTactic active_tactic, | 229 uint64_t active_used_size, |
246 uint64_t active_live_size, | 230 AllocationTactic inactive_tactic, |
247 uint64_t active_dead_size, | 231 uint64_t inactive_used_size, |
248 AllocationTactic inactive_tactic, | 232 AllocationStrategy* strategy) { |
249 uint64_t inactive_live_size, | |
250 uint64_t inactive_dead_size, | |
251 AllocationStrategy* strategy) { | |
252 DCHECK(strategy); | 233 DCHECK(strategy); |
253 | 234 |
254 uint64_t active_size = GetSize(active_tactic, active_live_size, | 235 uint64_t active_size = GetSize(active_tactic, active_used_size); |
255 active_dead_size); | 236 uint64_t inactive_size = GetSize(inactive_tactic, inactive_used_size); |
256 uint64_t inactive_size = GetSize(inactive_tactic, inactive_live_size, | |
257 inactive_dead_size); | |
258 | 237 |
259 // Give up if we don't have enough space to use this tactic. | 238 // Give up if we don't have enough space to use this tactic. |
260 if (global_size_limit_ < active_size + inactive_size) | 239 if (global_size_limit_ < active_size + inactive_size) |
261 return false; | 240 return false; |
262 | 241 |
263 // Compute the unreserved space available. | 242 // Compute the unreserved space available. |
264 uint64_t total_extra = global_size_limit_ - (active_size + inactive_size); | 243 uint64_t total_extra = global_size_limit_ - (active_size + inactive_size); |
265 | 244 |
266 // The plan for the extra space is to divide it evenly amoung the active | 245 // The plan for the extra space is to divide it evenly amoung the active |
267 // renderers. | 246 // renderers. |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
300 // Divide the extra memory evenly among the renderers. | 279 // Divide the extra memory evenly among the renderers. |
301 uint64_t extra_each = extra_bytes_to_allocate / renderers.size(); | 280 uint64_t extra_each = extra_bytes_to_allocate / renderers.size(); |
302 | 281 |
303 std::set<int>::const_iterator iter = renderers.begin(); | 282 std::set<int>::const_iterator iter = renderers.begin(); |
304 while (iter != renderers.end()) { | 283 while (iter != renderers.end()) { |
305 uint64_t cache_size = extra_each; | 284 uint64_t cache_size = extra_each; |
306 | 285 |
307 // Add in the space required to implement |tactic|. | 286 // Add in the space required to implement |tactic|. |
308 StatsMap::iterator elmt = stats_.find(*iter); | 287 StatsMap::iterator elmt = stats_.find(*iter); |
309 if (elmt != stats_.end()) { | 288 if (elmt != stats_.end()) { |
310 cache_size += GetSize(tactic, elmt->second.live_size, | 289 cache_size += GetSize(tactic, elmt->second.size); |
311 elmt->second.dead_size); | |
312 } | 290 } |
313 | 291 |
314 // Record the allocation in our strategy. | 292 // Record the allocation in our strategy. |
315 strategy->push_back(Allocation(*iter, cache_size)); | 293 strategy->push_back(Allocation(*iter, cache_size)); |
316 ++iter; | 294 ++iter; |
317 } | 295 } |
318 } | 296 } |
319 | 297 |
320 void WebCacheManager::EnactStrategy(const AllocationStrategy& strategy) { | 298 void WebCacheManager::EnactStrategy(const AllocationStrategy& strategy) { |
321 // Inform each render process of its cache allocation. | 299 // Inform each render process of its cache allocation. |
322 AllocationStrategy::const_iterator allocation = strategy.begin(); | 300 AllocationStrategy::const_iterator allocation = strategy.begin(); |
323 while (allocation != strategy.end()) { | 301 while (allocation != strategy.end()) { |
324 content::RenderProcessHost* host = | 302 content::RenderProcessHost* host = |
325 content::RenderProcessHost::FromID(allocation->first); | 303 content::RenderProcessHost::FromID(allocation->first); |
326 if (host) { | 304 if (host) { |
327 // This is the capacity this renderer has been allocated. | 305 // This is the capacity this renderer has been allocated. |
328 uint64_t capacity = allocation->second; | 306 uint64_t capacity = allocation->second; |
329 | 307 |
330 // We don't reserve any space for dead objects in the cache. Instead, we | |
331 // prefer to keep live objects around. There is probably some performance | |
332 // tuning to be done here. | |
333 uint64_t min_dead_capacity = 0; | |
334 | |
335 // We allow the dead objects to consume up to half of the cache capacity. | |
336 uint64_t max_dead_capacity = capacity / 2; | |
337 if (base::SysInfo::IsLowEndDevice()) { | |
338 max_dead_capacity = std::min(static_cast<uint64_t>(512 * 1024u), | |
339 max_dead_capacity); | |
340 } | |
341 | |
342 // Find the WebCachePtr by renderer process id. | 308 // Find the WebCachePtr by renderer process id. |
343 auto it = web_cache_services_.find(allocation->first); | 309 auto it = web_cache_services_.find(allocation->first); |
344 DCHECK(it != web_cache_services_.end()); | 310 DCHECK(it != web_cache_services_.end()); |
345 const mojom::WebCachePtr& service = it->second; | 311 const mojom::WebCachePtr& service = it->second; |
346 DCHECK(service); | 312 DCHECK(service); |
347 service->SetCacheCapacities(min_dead_capacity, max_dead_capacity, | 313 service->SetCacheCapacity(capacity); |
348 capacity); | |
349 } | 314 } |
350 ++allocation; | 315 ++allocation; |
351 } | 316 } |
352 } | 317 } |
353 | 318 |
354 void WebCacheManager::ClearCacheForProcess(int render_process_id) { | 319 void WebCacheManager::ClearCacheForProcess(int render_process_id) { |
355 std::set<int> renderers; | 320 std::set<int> renderers; |
356 renderers.insert(render_process_id); | 321 renderers.insert(render_process_id); |
357 ClearRendererCache(renderers, INSTANTLY); | 322 ClearRendererCache(renderers, INSTANTLY); |
358 } | 323 } |
(...skipping 17 matching lines...) Expand all Loading... | |
376 } | 341 } |
377 | 342 |
378 void WebCacheManager::ReviseAllocationStrategy() { | 343 void WebCacheManager::ReviseAllocationStrategy() { |
379 DCHECK(stats_.size() <= | 344 DCHECK(stats_.size() <= |
380 active_renderers_.size() + inactive_renderers_.size()); | 345 active_renderers_.size() + inactive_renderers_.size()); |
381 | 346 |
382 // Check if renderers have gone inactive. | 347 // Check if renderers have gone inactive. |
383 FindInactiveRenderers(); | 348 FindInactiveRenderers(); |
384 | 349 |
385 // Gather statistics | 350 // Gather statistics |
386 uint64_t active_capacity, active_live_size, active_dead_size, | 351 uint64_t active_capacity, active_size, inactive_capacity, inactive_size; |
387 inactive_capacity, inactive_live_size, inactive_dead_size; | 352 GatherStats(active_renderers_, &active_capacity, &active_size); |
388 GatherStats(active_renderers_, &active_capacity, &active_live_size, | 353 GatherStats(inactive_renderers_, &inactive_capacity, &inactive_size); |
389 &active_dead_size); | |
390 GatherStats(inactive_renderers_, &inactive_capacity, &inactive_live_size, | |
391 &inactive_dead_size); | |
392 | 354 |
393 UMA_HISTOGRAM_COUNTS_100("Cache.ActiveTabs", active_renderers_.size()); | 355 UMA_HISTOGRAM_COUNTS_100("Cache.ActiveTabs", active_renderers_.size()); |
394 UMA_HISTOGRAM_COUNTS_100("Cache.InactiveTabs", inactive_renderers_.size()); | 356 UMA_HISTOGRAM_COUNTS_100("Cache.InactiveTabs", inactive_renderers_.size()); |
395 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveCapacityMB", | 357 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveCapacityMB", |
396 active_capacity / 1024 / 1024); | 358 active_capacity / 1024 / 1024); |
397 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveDeadSizeMB", | 359 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveLiveSizeMB", active_size / 1024 / 1024); |
bashi
2016/11/25 07:04:35
nit: Shouldn't we deprecate this metric in histogr
hiroshige
2016/11/25 09:03:19
There are no corresponding entries for these UMAs
| |
398 active_dead_size / 1024 / 1024); | |
399 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveLiveSizeMB", | |
400 active_live_size / 1024 / 1024); | |
401 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveCapacityMB", | 360 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveCapacityMB", |
402 inactive_capacity / 1024 / 1024); | 361 inactive_capacity / 1024 / 1024); |
403 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveDeadSizeMB", | |
bashi
2016/11/25 07:04:35
Same as Cache.ActiveDeadSizeMB
hiroshige
2016/11/25 09:03:19
ditto.
| |
404 inactive_dead_size / 1024 / 1024); | |
405 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveLiveSizeMB", | 362 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveLiveSizeMB", |
406 inactive_live_size / 1024 / 1024); | 363 inactive_size / 1024 / 1024); |
407 | 364 |
408 // Compute an allocation strategy. | 365 // Compute an allocation strategy. |
409 // | 366 // |
410 // We attempt various tactics in order of preference. Our first preference | 367 // We attempt various tactics in order of preference. Our first preference |
411 // is not to evict any objects. If we don't have enough resources, we'll | 368 // is not to evict any objects. If we don't have enough resources, we'll |
412 // first try to evict dead data only. If that fails, we'll just divide the | 369 // first try to evict dead data only. If that fails, we'll just divide the |
413 // resources we have evenly. | 370 // resources we have evenly. |
414 // | 371 // |
415 // We always try to give the active renderers some head room in their | 372 // We always try to give the active renderers some head room in their |
416 // allocations so they can take memory away from an inactive renderer with | 373 // allocations so they can take memory away from an inactive renderer with |
417 // a large cache allocation. | 374 // a large cache allocation. |
418 // | 375 // |
419 // Notice the early exit will prevent attempting less desirable tactics once | 376 // Notice the early exit will prevent attempting less desirable tactics once |
420 // we've found a workable strategy. | 377 // we've found a workable strategy. |
421 AllocationStrategy strategy; | 378 AllocationStrategy strategy; |
422 if ( // Ideally, we'd like to give the active renderers some headroom and | 379 if ( // Ideally, we'd like to give the active renderers some headroom and |
423 // keep all our current objects. | 380 // keep all our current objects. |
424 AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active_live_size, | 381 AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active_size, KEEP_CURRENT, |
425 active_dead_size, KEEP_CURRENT, inactive_live_size, | 382 inactive_size, &strategy) || |
426 inactive_dead_size, &strategy) || | 383 // Next, we try to keep the current objects in the active renders (with |
427 // If we can't have that, then we first try to evict the dead objects in | 384 // some room for new objects) and give whatever is left to the inactive |
428 // the caches of inactive renderers. | |
429 AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active_live_size, | |
430 active_dead_size, KEEP_LIVE, inactive_live_size, | |
431 inactive_dead_size, &strategy) || | |
432 // Next, we try to keep the live objects in the active renders (with some | |
433 // room for new objects) and give whatever is left to the inactive | |
434 // renderers. | 385 // renderers. |
435 AttemptTactic(KEEP_LIVE_WITH_HEADROOM, active_live_size, | 386 AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active_size, DIVIDE_EVENLY, |
436 active_dead_size, DIVIDE_EVENLY, inactive_live_size, | 387 inactive_size, &strategy) || |
437 inactive_dead_size, &strategy) || | |
438 // If we've gotten this far, then we are very tight on memory. Let's try | 388 // If we've gotten this far, then we are very tight on memory. Let's try |
439 // to at least keep around the live objects for the active renderers. | 389 // to at least keep around the live objects for the active renderers. |
440 AttemptTactic(KEEP_LIVE, active_live_size, active_dead_size, | 390 AttemptTactic(KEEP_CURRENT, active_size, DIVIDE_EVENLY, inactive_size, |
441 DIVIDE_EVENLY, inactive_live_size, inactive_dead_size, | |
442 &strategy) || | 391 &strategy) || |
443 // We're basically out of memory. The best we can do is just divide up | 392 // We're basically out of memory. The best we can do is just divide up |
444 // what we have and soldier on. | 393 // what we have and soldier on. |
445 AttemptTactic(DIVIDE_EVENLY, active_live_size, active_dead_size, | 394 AttemptTactic(DIVIDE_EVENLY, active_size, DIVIDE_EVENLY, inactive_size, |
446 DIVIDE_EVENLY, inactive_live_size, inactive_dead_size, | |
447 &strategy)) { | 395 &strategy)) { |
448 // Having found a workable strategy, we enact it. | 396 // Having found a workable strategy, we enact it. |
449 EnactStrategy(strategy); | 397 EnactStrategy(strategy); |
450 } else { | 398 } else { |
451 // DIVIDE_EVENLY / DIVIDE_EVENLY should always succeed. | 399 // DIVIDE_EVENLY / DIVIDE_EVENLY should always succeed. |
452 NOTREACHED() << "Unable to find a cache allocation"; | 400 NOTREACHED() << "Unable to find a cache allocation"; |
453 } | 401 } |
454 } | 402 } |
455 | 403 |
456 void WebCacheManager::ReviseAllocationStrategyLater() { | 404 void WebCacheManager::ReviseAllocationStrategyLater() { |
(...skipping 16 matching lines...) Expand all Loading... | |
473 inactive_renderers_.insert(*iter); | 421 inactive_renderers_.insert(*iter); |
474 active_renderers_.erase(*iter); | 422 active_renderers_.erase(*iter); |
475 iter = active_renderers_.begin(); | 423 iter = active_renderers_.begin(); |
476 continue; | 424 continue; |
477 } | 425 } |
478 ++iter; | 426 ++iter; |
479 } | 427 } |
480 } | 428 } |
481 | 429 |
482 } // namespace web_cache | 430 } // namespace web_cache |
OLD | NEW |