OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 3606 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3617 int instance_size_delta = map_of_this->instance_size() - new_instance_size; | 3617 int instance_size_delta = map_of_this->instance_size() - new_instance_size; |
3618 ASSERT(instance_size_delta >= 0); | 3618 ASSERT(instance_size_delta >= 0); |
3619 current_heap->CreateFillerObjectAt(this->address() + new_instance_size, | 3619 current_heap->CreateFillerObjectAt(this->address() + new_instance_size, |
3620 instance_size_delta); | 3620 instance_size_delta); |
3621 if (Marking::IsBlack(Marking::MarkBitFrom(this))) { | 3621 if (Marking::IsBlack(Marking::MarkBitFrom(this))) { |
3622 MemoryChunk::IncrementLiveBytesFromMutator(this->address(), | 3622 MemoryChunk::IncrementLiveBytesFromMutator(this->address(), |
3623 -instance_size_delta); | 3623 -instance_size_delta); |
3624 } | 3624 } |
3625 | 3625 |
3626 set_map(new_map); | 3626 set_map(new_map); |
| 3627 map_of_this->NotifyObjectLayoutChange(); |
3627 | 3628 |
3628 set_properties(dictionary); | 3629 set_properties(dictionary); |
3629 | 3630 |
3630 current_heap->isolate()->counters()->props_to_dictionary()->Increment(); | 3631 current_heap->isolate()->counters()->props_to_dictionary()->Increment(); |
3631 | 3632 |
3632 #ifdef DEBUG | 3633 #ifdef DEBUG |
3633 if (FLAG_trace_normalization) { | 3634 if (FLAG_trace_normalization) { |
3634 PrintF("Object properties have been normalized:\n"); | 3635 PrintF("Object properties have been normalized:\n"); |
3635 Print(); | 3636 Print(); |
3636 } | 3637 } |
(...skipping 1648 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5285 MaybeObject* maybe_result = RawCopy(instance_size()); | 5286 MaybeObject* maybe_result = RawCopy(instance_size()); |
5286 if (!maybe_result->To(&result)) return maybe_result; | 5287 if (!maybe_result->To(&result)) return maybe_result; |
5287 | 5288 |
5288 // Please note instance_type and instance_size are set when allocated. | 5289 // Please note instance_type and instance_size are set when allocated. |
5289 result->set_inobject_properties(inobject_properties()); | 5290 result->set_inobject_properties(inobject_properties()); |
5290 result->set_unused_property_fields(unused_property_fields()); | 5291 result->set_unused_property_fields(unused_property_fields()); |
5291 | 5292 |
5292 result->set_pre_allocated_property_fields(pre_allocated_property_fields()); | 5293 result->set_pre_allocated_property_fields(pre_allocated_property_fields()); |
5293 result->set_is_shared(false); | 5294 result->set_is_shared(false); |
5294 result->ClearCodeCache(GetHeap()); | 5295 result->ClearCodeCache(GetHeap()); |
| 5296 NotifyObjectLayoutChange(); |
5295 return result; | 5297 return result; |
5296 } | 5298 } |
5297 | 5299 |
5298 | 5300 |
5299 MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors, | 5301 MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors, |
5300 Descriptor* descriptor) { | 5302 Descriptor* descriptor) { |
5301 // Sanity check. This path is only to be taken if the map owns its descriptor | 5303 // Sanity check. This path is only to be taken if the map owns its descriptor |
5302 // array, implying that its NumberOfOwnDescriptors equals the number of | 5304 // array, implying that its NumberOfOwnDescriptors equals the number of |
5303 // descriptors in the descriptor array. | 5305 // descriptors in the descriptor array. |
5304 ASSERT(NumberOfOwnDescriptors() == | 5306 ASSERT(NumberOfOwnDescriptors() == |
(...skipping 4189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
9494 | 9496 |
9495 | 9497 |
9496 void Map::ZapPrototypeTransitions() { | 9498 void Map::ZapPrototypeTransitions() { |
9497 FixedArray* proto_transitions = GetPrototypeTransitions(); | 9499 FixedArray* proto_transitions = GetPrototypeTransitions(); |
9498 MemsetPointer(proto_transitions->data_start(), | 9500 MemsetPointer(proto_transitions->data_start(), |
9499 GetHeap()->the_hole_value(), | 9501 GetHeap()->the_hole_value(), |
9500 proto_transitions->length()); | 9502 proto_transitions->length()); |
9501 } | 9503 } |
9502 | 9504 |
9503 | 9505 |
9504 Handle<DependentCodes> DependentCodes::Append(Handle<DependentCodes> codes, | 9506 DependentCode::GroupStartIndexes::GroupStartIndexes(DependentCode* codes) { |
| 9507 Recompute(codes); |
| 9508 } |
| 9509 |
| 9510 |
| 9511 void DependentCode::GroupStartIndexes::Recompute(DependentCode* codes) { |
| 9512 start_indexes_[0] = 0; |
| 9513 for (int g = 1; g <= kGroupCount; g++) { |
| 9514 int count = codes->number_of_codes(static_cast<DependencyGroup>(g - 1)); |
| 9515 start_indexes_[g] = start_indexes_[g - 1] + count; |
| 9516 } |
| 9517 } |
| 9518 |
| 9519 |
| 9520 Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> codes, |
| 9521 DependencyGroup group, |
9505 Handle<Code> value) { | 9522 Handle<Code> value) { |
9506 int append_index = codes->number_of_codes(); | 9523 GroupStartIndexes starts(*codes); |
9507 if (append_index > 0 && codes->code_at(append_index - 1) == *value) { | 9524 int start = starts.at(group); |
| 9525 int end = starts.at(group + 1); |
| 9526 int number_of_codes = starts.at(kGroupCount); |
| 9527 if (start < end && codes->code_at(end - 1) == *value) { |
9508 // Do not append the code if it is already in the array. | 9528 // Do not append the code if it is already in the array. |
9509 // It is sufficient to just check only the last element because | 9529 // It is sufficient to just check only the last element because |
9510 // we process embedded maps of an optimized code in one batch. | 9530 // we process embedded maps of an optimized code in one batch. |
9511 return codes; | 9531 return codes; |
9512 } | 9532 } |
9513 if (codes->length() < kCodesIndex + append_index + 1) { | 9533 if (codes->length() < kCodesStartIndex + number_of_codes + 1) { |
9514 Factory* factory = codes->GetIsolate()->factory(); | 9534 Factory* factory = codes->GetIsolate()->factory(); |
9515 int capacity = kCodesIndex + append_index + 1; | 9535 int capacity = kCodesStartIndex + number_of_codes + 1; |
9516 if (capacity > 5) capacity = capacity * 5 / 4; | 9536 if (capacity > 5) capacity = capacity * 5 / 4; |
9517 Handle<DependentCodes> new_codes = Handle<DependentCodes>::cast( | 9537 Handle<DependentCode> new_codes = Handle<DependentCode>::cast( |
9518 factory->CopySizeFixedArray(codes, capacity)); | 9538 factory->CopySizeFixedArray(codes, capacity)); |
9519 // The number of codes can change after GC. | 9539 // The number of codes can change after GC. |
9520 append_index = codes->number_of_codes(); | 9540 starts.Recompute(*codes); |
9521 for (int i = 0; i < append_index; i++) { | 9541 start = starts.at(group); |
| 9542 end = starts.at(group + 1); |
| 9543 number_of_codes = starts.at(kGroupCount); |
| 9544 for (int i = 0; i < number_of_codes; i++) { |
9522 codes->clear_code_at(i); | 9545 codes->clear_code_at(i); |
9523 } | 9546 } |
| 9547 // If the old fixed array was empty, we need to reset counters of the |
| 9548 // new array. |
| 9549 if (number_of_codes == 0) { |
| 9550 for (int g = 0; g < kGroupCount; g++) { |
| 9551 new_codes->set_number_of_codes(static_cast<DependencyGroup>(g), 0); |
| 9552 } |
| 9553 } |
9524 codes = new_codes; | 9554 codes = new_codes; |
9525 } | 9555 } |
9526 codes->set_code_at(append_index, *value); | 9556 codes->ExtendGroup(group); |
9527 codes->set_number_of_codes(append_index + 1); | 9557 codes->set_code_at(end, *value); |
| 9558 codes->set_number_of_codes(group, end + 1 - start); |
9528 return codes; | 9559 return codes; |
9529 } | 9560 } |
9530 | 9561 |
9531 | 9562 |
9532 bool DependentCodes::Contains(Code* code) { | 9563 bool DependentCode::Contains(DependencyGroup group, Code* code) { |
9533 int limit = number_of_codes(); | 9564 GroupStartIndexes starts(this); |
9534 for (int i = 0; i < limit; i++) { | 9565 int number_of_codes = starts.at(kGroupCount); |
| 9566 for (int i = 0; i < number_of_codes; i++) { |
9535 if (code_at(i) == code) return true; | 9567 if (code_at(i) == code) return true; |
9536 } | 9568 } |
9537 return false; | 9569 return false; |
9538 } | 9570 } |
9539 | 9571 |
9540 | 9572 |
| 9573 class DeoptimizeDependentCodeFilter : public OptimizedFunctionFilter { |
| 9574 public: |
| 9575 virtual bool TakeFunction(JSFunction* function) { |
| 9576 return function->code()->marked_for_deoptimization(); |
| 9577 } |
| 9578 }; |
| 9579 |
| 9580 |
| 9581 void DependentCode::DeoptimizeDependentCodeGroup( |
| 9582 DependentCode::DependencyGroup group) { |
| 9583 AssertNoAllocation no_allocation_scope; |
| 9584 DependentCode::GroupStartIndexes starts(this); |
| 9585 int start = starts.at(group); |
| 9586 int end = starts.at(group + 1); |
| 9587 int number_of_codes = starts.at(DependentCode::kGroupCount); |
| 9588 if (start == end) return; |
| 9589 for (int i = start; i < end; i++) { |
| 9590 Code* code = code_at(i); |
| 9591 code->set_marked_for_deoptimization(true); |
| 9592 } |
| 9593 for (int src = end, dst = start; src < number_of_codes; src++, dst++) { |
| 9594 set_code_at(dst, code_at(src)); |
| 9595 } |
| 9596 set_number_of_codes(group, 0); |
| 9597 DeoptimizeDependentCodeFilter filter; |
| 9598 Deoptimizer::DeoptimizeAllFunctionsWith(&filter); |
| 9599 } |
| 9600 |
| 9601 |
9541 MaybeObject* JSReceiver::SetPrototype(Object* value, | 9602 MaybeObject* JSReceiver::SetPrototype(Object* value, |
9542 bool skip_hidden_prototypes) { | 9603 bool skip_hidden_prototypes) { |
9543 #ifdef DEBUG | 9604 #ifdef DEBUG |
9544 int size = Size(); | 9605 int size = Size(); |
9545 #endif | 9606 #endif |
9546 | 9607 |
9547 Heap* heap = GetHeap(); | 9608 Heap* heap = GetHeap(); |
9548 // Silently ignore the change if value is not a JSObject or null. | 9609 // Silently ignore the change if value is not a JSObject or null. |
9549 // SpiderMonkey behaves this way. | 9610 // SpiderMonkey behaves this way. |
9550 if (!value->IsJSReceiver() && !value->IsNull()) return value; | 9611 if (!value->IsJSReceiver() && !value->IsNull()) return value; |
(...skipping 4341 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
13892 set_year(Smi::FromInt(year), SKIP_WRITE_BARRIER); | 13953 set_year(Smi::FromInt(year), SKIP_WRITE_BARRIER); |
13893 set_month(Smi::FromInt(month), SKIP_WRITE_BARRIER); | 13954 set_month(Smi::FromInt(month), SKIP_WRITE_BARRIER); |
13894 set_day(Smi::FromInt(day), SKIP_WRITE_BARRIER); | 13955 set_day(Smi::FromInt(day), SKIP_WRITE_BARRIER); |
13895 set_weekday(Smi::FromInt(weekday), SKIP_WRITE_BARRIER); | 13956 set_weekday(Smi::FromInt(weekday), SKIP_WRITE_BARRIER); |
13896 set_hour(Smi::FromInt(hour), SKIP_WRITE_BARRIER); | 13957 set_hour(Smi::FromInt(hour), SKIP_WRITE_BARRIER); |
13897 set_min(Smi::FromInt(min), SKIP_WRITE_BARRIER); | 13958 set_min(Smi::FromInt(min), SKIP_WRITE_BARRIER); |
13898 set_sec(Smi::FromInt(sec), SKIP_WRITE_BARRIER); | 13959 set_sec(Smi::FromInt(sec), SKIP_WRITE_BARRIER); |
13899 } | 13960 } |
13900 | 13961 |
13901 } } // namespace v8::internal | 13962 } } // namespace v8::internal |
OLD | NEW |