Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(415)

Side by Side Diff: src/objects.cc

Issue 3329019: Dynamically determine optimal instance size.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1458 matching lines...) Expand 10 before | Expand all | Expand 10 after
1469 // Make a new map for the object. 1469 // Make a new map for the object.
1470 Object* new_map_unchecked = map()->CopyDropDescriptors(); 1470 Object* new_map_unchecked = map()->CopyDropDescriptors();
1471 if (new_map_unchecked->IsFailure()) return new_map_unchecked; 1471 if (new_map_unchecked->IsFailure()) return new_map_unchecked;
1472 Map* new_map = Map::cast(new_map_unchecked); 1472 Map* new_map = Map::cast(new_map_unchecked);
1473 new_map->set_instance_descriptors(new_descriptors); 1473 new_map->set_instance_descriptors(new_descriptors);
1474 1474
1475 // Make new properties array if necessary. 1475 // Make new properties array if necessary.
1476 FixedArray* new_properties = 0; // Will always be NULL or a valid pointer. 1476 FixedArray* new_properties = 0; // Will always be NULL or a valid pointer.
1477 int new_unused_property_fields = map()->unused_property_fields() - 1; 1477 int new_unused_property_fields = map()->unused_property_fields() - 1;
1478 if (map()->unused_property_fields() == 0) { 1478 if (map()->unused_property_fields() == 0) {
1479 new_unused_property_fields = kFieldsAdded - 1; 1479 new_unused_property_fields = kFieldsAdded - 1;
1480 Object* new_properties_unchecked = 1480 Object* new_properties_unchecked =
1481 properties()->CopySize(properties()->length() + kFieldsAdded); 1481 properties()->CopySize(properties()->length() + kFieldsAdded);
1482 if (new_properties_unchecked->IsFailure()) return new_properties_unchecked; 1482 if (new_properties_unchecked->IsFailure()) return new_properties_unchecked;
1483 new_properties = FixedArray::cast(new_properties_unchecked); 1483 new_properties = FixedArray::cast(new_properties_unchecked);
1484 } 1484 }
1485 1485
1486 // Update pointers to commit changes. 1486 // Update pointers to commit changes.
1487 // Object points to the new map. 1487 // Object points to the new map.
1488 new_map->set_unused_property_fields(new_unused_property_fields); 1488 new_map->set_unused_property_fields(new_unused_property_fields);
1489 set_map(new_map); 1489 set_map(new_map);
1490 if (new_properties) { 1490 if (new_properties) {
(...skipping 1773 matching lines...) Expand 10 before | Expand all | Expand 10 after
3264 3264
3265 3265
3266 void Map::RemoveFromCodeCache(String* name, Code* code, int index) { 3266 void Map::RemoveFromCodeCache(String* name, Code* code, int index) {
3267 // No GC is supposed to happen between a call to IndexInCodeCache and 3267 // No GC is supposed to happen between a call to IndexInCodeCache and
3268 // RemoveFromCodeCache so the code cache must be there. 3268 // RemoveFromCodeCache so the code cache must be there.
3269 ASSERT(!code_cache()->IsFixedArray()); 3269 ASSERT(!code_cache()->IsFixedArray());
3270 CodeCache::cast(code_cache())->RemoveByIndex(name, code, index); 3270 CodeCache::cast(code_cache())->RemoveByIndex(name, code, index);
3271 } 3271 }
3272 3272
3273 3273
3274 void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
3275 Map* current = this;
3276 while (current != Heap::meta_map()) {
3277 DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
3278 *RawField(current, Map::kInstanceDescriptorsOffset));
3279 if (d == Heap::empty_descriptor_array()) {
3280 Map* prev = current->map();
3281 current->set_map(Heap::meta_map());
3282 callback(current, data);
3283 current = prev;
3284 continue;
3285 }
3286
3287 FixedArray* contents = reinterpret_cast<FixedArray*>(
3288 d->get(DescriptorArray::kContentArrayIndex));
3289 Object** map_or_index_field = RawField(contents, HeapObject::kMapOffset);
3290 Object* map_or_index = *map_or_index_field;
3291 bool map_done = true;
3292 for (int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : 0;
3293 i < contents->length();
3294 i += 2) {
3295 PropertyDetails details(Smi::cast(contents->get(i + 1)));
3296 if (details.IsTransition()) {
3297 Map* next = reinterpret_cast<Map*>(contents->get(i));
3298 next->set_map(current);
3299 *map_or_index_field = Smi::FromInt(i + 2);
3300 current = next;
3301 map_done = false;
3302 break;
3303 }
3304 }
3305 if (!map_done) continue;
3306 *map_or_index_field = Heap::fixed_array_map();
3307 Map* prev = current->map();
3308 current->set_map(Heap::meta_map());
3309 callback(current, data);
3310 current = prev;
3311 }
3312 }
3313
3314
3274 Object* CodeCache::Update(String* name, Code* code) { 3315 Object* CodeCache::Update(String* name, Code* code) {
3275 ASSERT(code->ic_state() == MONOMORPHIC); 3316 ASSERT(code->ic_state() == MONOMORPHIC);
3276 3317
3277 // The number of monomorphic stubs for normal load/store/call IC's can grow to 3318 // The number of monomorphic stubs for normal load/store/call IC's can grow to
3278 // a large number and therefore they need to go into a hash table. They are 3319 // a large number and therefore they need to go into a hash table. They are
3279 // used to load global properties from cells. 3320 // used to load global properties from cells.
3280 if (code->type() == NORMAL) { 3321 if (code->type() == NORMAL) {
3281 // Make sure that a hash table is allocated for the normal load code cache. 3322 // Make sure that a hash table is allocated for the normal load code cache.
3282 if (normal_type_cache()->IsUndefined()) { 3323 if (normal_type_cache()->IsUndefined()) {
3283 Object* result = 3324 Object* result =
(...skipping 2086 matching lines...) Expand 10 before | Expand all | Expand 10 after
5370 accumulator->Put(script_source, 5411 accumulator->Put(script_source,
5371 start_position(), 5412 start_position(),
5372 start_position() + max_length); 5413 start_position() + max_length);
5373 accumulator->Add("...\n"); 5414 accumulator->Add("...\n");
5374 } else { 5415 } else {
5375 accumulator->Put(script_source, start_position(), end_position()); 5416 accumulator->Put(script_source, start_position(), end_position());
5376 } 5417 }
5377 } 5418 }
5378 5419
5379 5420
5421 void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
5422 ASSERT(!IsInobjectSlackTrackingInProgress());
5423
5424 // Only initiate the tracking the first time.
5425 if (live_objects_may_exist()) return;
5426 set_live_objects_may_exist(true);
5427
5428 // No tracking during the snapshot construction phase.
5429 if (Serializer::enabled()) return;
5430
5431 if (map->unused_property_fields() == 0) return;
5432
5433 // Nonzero counter is a leftover from the previous attempt interrupted
5434 // by GC, keep it.
5435 if (construction_count() == 0) {
5436 set_construction_count(kGenerousAllocationCount);
5437 }
5438 set_initial_map(map);
5439 ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubGeneric),
5440 construct_stub());
5441 set_construct_stub(Builtins::builtin(Builtins::JSConstructStubCountdown));
5442 }
5443
5444
5445 // Called from GC, hence reinterpret_cast and unchecked accessors.
5446 void SharedFunctionInfo::DetachInitialMap() {
5447 Map* map = reinterpret_cast<Map*>(initial_map());
5448
5449 // Make the map remember to restore the link if it survives the GC.
5450 map->set_bit_field2(
5451 map->bit_field2() | (1 << Map::kAttachedToSharedFunctionInfo));
5452
5453 // Undo state changes made by StartInobjectTracking (except the
5454 // construction_count). This way if the initial map does not survive the GC
5455 // then StartInobjectTracking will be called again the next time the
5456 // constructor is called. The countdown will continue and (possibly after
5457 // several more GCs) CompleteInobjectSlackTracking will eventually be called.
5458 set_initial_map(Heap::raw_unchecked_undefined_value());
5459 ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubCountdown),
5460 *RawField(this, kConstructStubOffset));
5461 set_construct_stub(Builtins::builtin(Builtins::JSConstructStubGeneric));
5462 // It is safe to clear the flag: it will be set again if the map is live.
5463 set_live_objects_may_exist(false);
5464 }
5465
5466
5467 // Called from GC, hence reinterpret_cast and unchecked accessors.
5468 void SharedFunctionInfo::AttachInitialMap(Map* map) {
5469 map->set_bit_field2(
5470 map->bit_field2() & ~(1 << Map::kAttachedToSharedFunctionInfo));
5471
5472 // Resume inobject slack tracking.
5473 set_initial_map(map);
5474 ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubGeneric),
5475 *RawField(this, kConstructStubOffset));
5476 set_construct_stub(Builtins::builtin(Builtins::JSConstructStubCountdown));
5477 // The map survived the gc, so there may be objects referencing it.
5478 set_live_objects_may_exist(true);
5479 }
5480
5481
5482 static void GetMinInobjectSlack(Map* map, void* data) {
5483 int slack = map->unused_property_fields();
5484 if (*reinterpret_cast<int*>(data) > slack) {
5485 *reinterpret_cast<int*>(data) = slack;
5486 }
5487 }
5488
5489
5490 static void ShrinkInstanceSize(Map* map, void* data) {
5491 int slack = *reinterpret_cast<int*>(data);
5492 map->set_inobject_properties(map->inobject_properties() - slack);
5493 map->set_unused_property_fields(map->unused_property_fields() - slack);
5494 map->set_instance_size(map->instance_size() - slack * kPointerSize);
5495
5496 // Visitor id might depend on the instance size, recalculate it.
5497 map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
5498 }
5499
5500
5501 void SharedFunctionInfo::CompleteInobjectSlackTracking() {
5502 ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress());
5503 Map* map = Map::cast(initial_map());
5504
5505 set_initial_map(Heap::undefined_value());
5506 ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubCountdown),
5507 construct_stub());
5508 set_construct_stub(Builtins::builtin(Builtins::JSConstructStubGeneric));
5509
5510 int slack = map->unused_property_fields();
5511 map->TraverseTransitionTree(&GetMinInobjectSlack, &slack);
5512 if (slack != 0) {
5513 // Resize the initial map and all maps in its transition tree.
5514 map->TraverseTransitionTree(&ShrinkInstanceSize, &slack);
5515 // Give the correct expected_nof_properties to initial maps created later.
5516 set_expected_nof_properties(expected_nof_properties() - slack);
Vitaly Repeshko 2010/09/22 14:40:25 Assert expected_nof_properties() - slack >= 0?
Vladislav Kaznacheev 2010/09/23 08:38:16 Done.
5517 }
5518 }
5519
5520
5380 void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) { 5521 void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
5381 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); 5522 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
5382 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); 5523 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
5383 Object* old_target = target; 5524 Object* old_target = target;
5384 VisitPointer(&target); 5525 VisitPointer(&target);
5385 CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target. 5526 CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target.
5386 } 5527 }
5387 5528
5388 5529
5389 void ObjectVisitor::VisitCodeEntry(Address entry_address) { 5530 void ObjectVisitor::VisitCodeEntry(Address entry_address) {
(...skipping 3510 matching lines...) Expand 10 before | Expand all | Expand 10 after
8900 if (break_point_objects()->IsUndefined()) return 0; 9041 if (break_point_objects()->IsUndefined()) return 0;
8901 // Single beak point. 9042 // Single beak point.
8902 if (!break_point_objects()->IsFixedArray()) return 1; 9043 if (!break_point_objects()->IsFixedArray()) return 1;
8903 // Multiple break points. 9044 // Multiple break points.
8904 return FixedArray::cast(break_point_objects())->length(); 9045 return FixedArray::cast(break_point_objects())->length();
8905 } 9046 }
8906 #endif 9047 #endif
8907 9048
8908 9049
8909 } } // namespace v8::internal 9050 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698