Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "v8.h" | 5 #include "v8.h" |
| 6 | 6 |
| 7 #include "accessors.h" | 7 #include "accessors.h" |
| 8 #include "api.h" | 8 #include "api.h" |
| 9 #include "bootstrapper.h" | 9 #include "bootstrapper.h" |
| 10 #include "codegen.h" | 10 #include "codegen.h" |
| (...skipping 6220 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 6231 switch (collector_) { | 6231 switch (collector_) { |
| 6232 case SCAVENGER: | 6232 case SCAVENGER: |
| 6233 return "Scavenge"; | 6233 return "Scavenge"; |
| 6234 case MARK_COMPACTOR: | 6234 case MARK_COMPACTOR: |
| 6235 return "Mark-sweep"; | 6235 return "Mark-sweep"; |
| 6236 } | 6236 } |
| 6237 return "Unknown GC"; | 6237 return "Unknown GC"; |
| 6238 } | 6238 } |
| 6239 | 6239 |
| 6240 | 6240 |
| 6241 int KeyedLookupCache::Hash(Map* map, Name* name) { | 6241 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) { |
| 6242 // Uses only lower 32 bits if pointers are larger. | 6242 // Uses only lower 32 bits if pointers are larger. |
| 6243 uintptr_t addr_hash = | 6243 uintptr_t addr_hash = |
| 6244 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift; | 6244 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift; |
| 6245 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask); | 6245 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask); |
| 6246 } | 6246 } |
| 6247 | 6247 |
| 6248 | 6248 |
| 6249 int KeyedLookupCache::Lookup(Map* map, Name* name) { | 6249 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) { |
| 6250 int index = (Hash(map, name) & kHashMask); | 6250 int index = (Hash(map, name) & kHashMask); |
| 6251 for (int i = 0; i < kEntriesPerBucket; i++) { | 6251 for (int i = 0; i < kEntriesPerBucket; i++) { |
| 6252 Key& key = keys_[index + i]; | 6252 Key& key = keys_[index + i]; |
| 6253 if ((key.map == map) && key.name->Equals(name)) { | 6253 if ((key.map == *map) && key.name->Equals(*name)) { |
| 6254 return field_offsets_[index + i]; | 6254 return field_offsets_[index + i]; |
| 6255 } | 6255 } |
| 6256 } | 6256 } |
| 6257 return kNotFound; | 6257 return kNotFound; |
| 6258 } | 6258 } |
| 6259 | 6259 |
| 6260 | 6260 |
| 6261 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) { | 6261 void KeyedLookupCache::Update(Handle<Map> map, |
| 6262 Handle<Name> name, | |
| 6263 int field_offset) { | |
| 6262 if (!name->IsUniqueName()) { | 6264 if (!name->IsUniqueName()) { |
| 6263 String* internalized_string; | 6265 String* internalized_string; |
|
Yang
2014/04/30 14:08:25
Could we add a DisallowHeapAllocation scope in eac
Igor Sheludko
2014/04/30 14:54:37
Done.
| |
| 6264 if (!map->GetIsolate()->heap()->InternalizeStringIfExists( | 6266 if (!map->GetIsolate()->heap()->InternalizeStringIfExists( |
| 6265 String::cast(name), &internalized_string)) { | 6267 String::cast(*name), &internalized_string)) { |
| 6266 return; | 6268 return; |
| 6267 } | 6269 } |
| 6268 name = internalized_string; | 6270 name = handle(internalized_string); |
| 6269 } | 6271 } |
| 6270 // This cache is cleared only between mark compact passes, so we expect the | 6272 // This cache is cleared only between mark compact passes, so we expect the |
| 6271 // cache to only contain old space names. | 6273 // cache to only contain old space names. |
| 6272 ASSERT(!map->GetIsolate()->heap()->InNewSpace(name)); | 6274 ASSERT(!map->GetIsolate()->heap()->InNewSpace(*name)); |
| 6273 | 6275 |
| 6274 int index = (Hash(map, name) & kHashMask); | 6276 int index = (Hash(map, name) & kHashMask); |
| 6275 // After a GC there will be free slots, so we use them in order (this may | 6277 // After a GC there will be free slots, so we use them in order (this may |
| 6276 // help to get the most frequently used one in position 0). | 6278 // help to get the most frequently used one in position 0). |
| 6277 for (int i = 0; i< kEntriesPerBucket; i++) { | 6279 for (int i = 0; i< kEntriesPerBucket; i++) { |
| 6278 Key& key = keys_[index]; | 6280 Key& key = keys_[index]; |
| 6279 Object* free_entry_indicator = NULL; | 6281 Object* free_entry_indicator = NULL; |
| 6280 if (key.map == free_entry_indicator) { | 6282 if (key.map == free_entry_indicator) { |
| 6281 key.map = map; | 6283 key.map = *map; |
| 6282 key.name = name; | 6284 key.name = *name; |
| 6283 field_offsets_[index + i] = field_offset; | 6285 field_offsets_[index + i] = field_offset; |
| 6284 return; | 6286 return; |
| 6285 } | 6287 } |
| 6286 } | 6288 } |
| 6287 // No free entry found in this bucket, so we move them all down one and | 6289 // No free entry found in this bucket, so we move them all down one and |
| 6288 // put the new entry at position zero. | 6290 // put the new entry at position zero. |
| 6289 for (int i = kEntriesPerBucket - 1; i > 0; i--) { | 6291 for (int i = kEntriesPerBucket - 1; i > 0; i--) { |
| 6290 Key& key = keys_[index + i]; | 6292 Key& key = keys_[index + i]; |
| 6291 Key& key2 = keys_[index + i - 1]; | 6293 Key& key2 = keys_[index + i - 1]; |
| 6292 key = key2; | 6294 key = key2; |
| 6293 field_offsets_[index + i] = field_offsets_[index + i - 1]; | 6295 field_offsets_[index + i] = field_offsets_[index + i - 1]; |
| 6294 } | 6296 } |
| 6295 | 6297 |
| 6296 // Write the new first entry. | 6298 // Write the new first entry. |
| 6297 Key& key = keys_[index]; | 6299 Key& key = keys_[index]; |
| 6298 key.map = map; | 6300 key.map = *map; |
| 6299 key.name = name; | 6301 key.name = *name; |
| 6300 field_offsets_[index] = field_offset; | 6302 field_offsets_[index] = field_offset; |
| 6301 } | 6303 } |
| 6302 | 6304 |
| 6303 | 6305 |
| 6304 void KeyedLookupCache::Clear() { | 6306 void KeyedLookupCache::Clear() { |
| 6305 for (int index = 0; index < kLength; index++) keys_[index].map = NULL; | 6307 for (int index = 0; index < kLength; index++) keys_[index].map = NULL; |
| 6306 } | 6308 } |
| 6307 | 6309 |
| 6308 | 6310 |
| 6309 void DescriptorLookupCache::Clear() { | 6311 void DescriptorLookupCache::Clear() { |
| (...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 6492 static_cast<int>(object_sizes_last_time_[index])); | 6494 static_cast<int>(object_sizes_last_time_[index])); |
| 6493 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) | 6495 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) |
| 6494 #undef ADJUST_LAST_TIME_OBJECT_COUNT | 6496 #undef ADJUST_LAST_TIME_OBJECT_COUNT |
| 6495 | 6497 |
| 6496 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); | 6498 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); |
| 6497 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); | 6499 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); |
| 6498 ClearObjectStats(); | 6500 ClearObjectStats(); |
| 6499 } | 6501 } |
| 6500 | 6502 |
| 6501 } } // namespace v8::internal | 6503 } } // namespace v8::internal |
| OLD | NEW |