OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/heap.h" | 5 #include "src/heap/heap.h" |
6 | 6 |
7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
8 #include "src/api.h" | 8 #include "src/api.h" |
9 #include "src/ast/scopeinfo.h" | 9 #include "src/ast/scopeinfo.h" |
10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
(...skipping 1925 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1936 HeapObject* target; | 1936 HeapObject* target; |
1937 int size; | 1937 int size; |
1938 promotion_queue()->remove(&target, &size); | 1938 promotion_queue()->remove(&target, &size); |
1939 | 1939 |
1940 // Promoted object might be already partially visited | 1940 // Promoted object might be already partially visited |
1941 // during old space pointer iteration. Thus we search specifically | 1941 // during old space pointer iteration. Thus we search specifically |
1942 // for pointers to from semispace instead of looking for pointers | 1942 // for pointers to from semispace instead of looking for pointers |
1943 // to new space. | 1943 // to new space. |
1944 DCHECK(!target->IsMap()); | 1944 DCHECK(!target->IsMap()); |
1945 | 1945 |
1946 IteratePointersToFromSpace(target, size, &Scavenger::ScavengeObject); | 1946 IteratePromotedObject(target, size, &Scavenger::ScavengeObject); |
1947 } | 1947 } |
1948 } | 1948 } |
1949 | 1949 |
1950 // Take another spin if there are now unswept objects in new space | 1950 // Take another spin if there are now unswept objects in new space |
1951 // (there are currently no more unswept promoted objects). | 1951 // (there are currently no more unswept promoted objects). |
1952 } while (new_space_front != new_space_.top()); | 1952 } while (new_space_front != new_space_.top()); |
1953 | 1953 |
1954 return new_space_front; | 1954 return new_space_front; |
1955 } | 1955 } |
1956 | 1956 |
(...skipping 562 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2519 DCHECK(capacity > 0); | 2519 DCHECK(capacity > 0); |
2520 HeapObject* raw_array = nullptr; | 2520 HeapObject* raw_array = nullptr; |
2521 { | 2521 { |
2522 AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED); | 2522 AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED); |
2523 if (!allocation.To(&raw_array)) return allocation; | 2523 if (!allocation.To(&raw_array)) return allocation; |
2524 } | 2524 } |
2525 raw_array->set_map_no_write_barrier(transition_array_map()); | 2525 raw_array->set_map_no_write_barrier(transition_array_map()); |
2526 TransitionArray* array = TransitionArray::cast(raw_array); | 2526 TransitionArray* array = TransitionArray::cast(raw_array); |
2527 array->set_length(capacity); | 2527 array->set_length(capacity); |
2528 MemsetPointer(array->data_start(), undefined_value(), capacity); | 2528 MemsetPointer(array->data_start(), undefined_value(), capacity); |
2529 // Transition arrays are tenured. When black allocation is on we have to | |
2530 // add the transition array to the list of encountered_transition_arrays. | |
2531 if (incremental_marking()->black_allocation()) { | |
2532 array->set_next_link(encountered_transition_arrays(), | |
2533 UPDATE_WEAK_WRITE_BARRIER); | |
2534 set_encountered_transition_arrays(array); | |
2535 } else { | |
2536 array->set_next_link(undefined_value(), SKIP_WRITE_BARRIER); | |
2537 } | |
2529 return array; | 2538 return array; |
2530 } | 2539 } |
2531 | 2540 |
2532 | 2541 |
2533 void Heap::CreateApiObjects() { | 2542 void Heap::CreateApiObjects() { |
2534 HandleScope scope(isolate()); | 2543 HandleScope scope(isolate()); |
2535 Factory* factory = isolate()->factory(); | 2544 Factory* factory = isolate()->factory(); |
2536 Handle<Map> new_neander_map = | 2545 Handle<Map> new_neander_map = |
2537 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); | 2546 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); |
2538 | 2547 |
(...skipping 743 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3282 return elements; | 3291 return elements; |
3283 } | 3292 } |
3284 | 3293 |
3285 | 3294 |
3286 AllocationResult Heap::AllocateCode(int object_size, bool immovable) { | 3295 AllocationResult Heap::AllocateCode(int object_size, bool immovable) { |
3287 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment)); | 3296 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment)); |
3288 AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE); | 3297 AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE); |
3289 | 3298 |
3290 HeapObject* result = nullptr; | 3299 HeapObject* result = nullptr; |
3291 if (!allocation.To(&result)) return allocation; | 3300 if (!allocation.To(&result)) return allocation; |
3292 | |
3293 if (immovable) { | 3301 if (immovable) { |
3294 Address address = result->address(); | 3302 Address address = result->address(); |
3295 // Code objects which should stay at a fixed address are allocated either | 3303 // Code objects which should stay at a fixed address are allocated either |
3296 // in the first page of code space (objects on the first page of each space | 3304 // in the first page of code space (objects on the first page of each space |
3297 // are never moved) or in large object space. | 3305 // are never moved) or in large object space. |
3298 if (!code_space_->FirstPage()->Contains(address) && | 3306 if (!code_space_->FirstPage()->Contains(address) && |
3299 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) { | 3307 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) { |
3300 // Discard the first code allocation, which was on a page where it could | 3308 // Discard the first code allocation, which was on a page where it could |
3301 // be moved. | 3309 // be moved. |
3302 CreateFillerObjectAt(result->address(), object_size); | 3310 CreateFillerObjectAt(result->address(), object_size); |
(...skipping 29 matching lines...) Expand all Loading... | |
3332 Address new_addr = result->address(); | 3340 Address new_addr = result->address(); |
3333 CopyBlock(new_addr, old_addr, obj_size); | 3341 CopyBlock(new_addr, old_addr, obj_size); |
3334 Code* new_code = Code::cast(result); | 3342 Code* new_code = Code::cast(result); |
3335 | 3343 |
3336 // Relocate the copy. | 3344 // Relocate the copy. |
3337 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment)); | 3345 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment)); |
3338 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() || | 3346 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() || |
3339 isolate_->code_range()->contains(code->address()) || | 3347 isolate_->code_range()->contains(code->address()) || |
3340 obj_size <= code_space()->AreaSize()); | 3348 obj_size <= code_space()->AreaSize()); |
3341 new_code->Relocate(new_addr - old_addr); | 3349 new_code->Relocate(new_addr - old_addr); |
3350 // We have to iterate over the object and process its pointers when black | |
3351 // allocation is on. | |
3352 incremental_marking()->IterateBlackObject(new_code); | |
3342 return new_code; | 3353 return new_code; |
3343 } | 3354 } |
3344 | 3355 |
3345 | 3356 |
3346 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) { | 3357 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) { |
3347 // Allocate ByteArray before the Code object, so that we do not risk | 3358 // Allocate ByteArray before the Code object, so that we do not risk |
3348 // leaving uninitialized Code object (and breaking the heap). | 3359 // leaving uninitialized Code object (and breaking the heap). |
3349 ByteArray* reloc_info_array = nullptr; | 3360 ByteArray* reloc_info_array = nullptr; |
3350 { | 3361 { |
3351 AllocationResult allocation = | 3362 AllocationResult allocation = |
(...skipping 27 matching lines...) Expand all Loading... | |
3379 CopyBytes(new_code->relocation_start(), reloc_info.start(), | 3390 CopyBytes(new_code->relocation_start(), reloc_info.start(), |
3380 static_cast<size_t>(reloc_info.length())); | 3391 static_cast<size_t>(reloc_info.length())); |
3381 | 3392 |
3382 // Relocate the copy. | 3393 // Relocate the copy. |
3383 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment)); | 3394 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment)); |
3384 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() || | 3395 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() || |
3385 isolate_->code_range()->contains(code->address()) || | 3396 isolate_->code_range()->contains(code->address()) || |
3386 new_obj_size <= code_space()->AreaSize()); | 3397 new_obj_size <= code_space()->AreaSize()); |
3387 | 3398 |
3388 new_code->Relocate(new_addr - old_addr); | 3399 new_code->Relocate(new_addr - old_addr); |
3389 | 3400 // We have to iterate over over the object and process its pointers when |
3401 // black allocation is on. | |
3402 incremental_marking()->IterateBlackObject(new_code); | |
3390 #ifdef VERIFY_HEAP | 3403 #ifdef VERIFY_HEAP |
3391 if (FLAG_verify_heap) code->ObjectVerify(); | 3404 if (FLAG_verify_heap) code->ObjectVerify(); |
3392 #endif | 3405 #endif |
3393 return new_code; | 3406 return new_code; |
3394 } | 3407 } |
3395 | 3408 |
3396 | 3409 |
3397 void Heap::InitializeAllocationMemento(AllocationMemento* memento, | 3410 void Heap::InitializeAllocationMemento(AllocationMemento* memento, |
3398 AllocationSite* allocation_site) { | 3411 AllocationSite* allocation_site) { |
3399 memento->set_map_no_write_barrier(allocation_memento_map()); | 3412 memento->set_map_no_write_barrier(allocation_memento_map()); |
(...skipping 747 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4147 gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact( | 4160 gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact( |
4148 static_cast<size_t>(idle_time_in_ms), size_of_objects, | 4161 static_cast<size_t>(idle_time_in_ms), size_of_objects, |
4149 final_incremental_mark_compact_speed_in_bytes_per_ms))) { | 4162 final_incremental_mark_compact_speed_in_bytes_per_ms))) { |
4150 CollectAllGarbage(current_gc_flags_, | 4163 CollectAllGarbage(current_gc_flags_, |
4151 "idle notification: finalize incremental marking"); | 4164 "idle notification: finalize incremental marking"); |
4152 return true; | 4165 return true; |
4153 } | 4166 } |
4154 return false; | 4167 return false; |
4155 } | 4168 } |
4156 | 4169 |
4170 void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) { | |
4171 // TODO(hpayer): We do not have to iterate reservations on black objects | |
4172 // for marking. We just have to execute the special visiting side effect | |
4173 // code that adds objects to global data structures, e.g. ifor array buffers. | |
4174 if (incremental_marking()->black_allocation()) { | |
4175 for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) { | |
4176 const Heap::Reservation& res = reservations[i]; | |
4177 for (auto& chunk : res) { | |
4178 Address addr = chunk.start; | |
4179 while (addr < chunk.end) { | |
4180 HeapObject* obj = HeapObject::FromAddress(addr); | |
4181 incremental_marking()->IterateBlackObject(obj); | |
4182 addr += obj->Size(); | |
4183 } | |
4184 } | |
4185 } | |
4186 } | |
4187 } | |
4157 | 4188 |
4158 GCIdleTimeHeapState Heap::ComputeHeapState() { | 4189 GCIdleTimeHeapState Heap::ComputeHeapState() { |
4159 GCIdleTimeHeapState heap_state; | 4190 GCIdleTimeHeapState heap_state; |
4160 heap_state.contexts_disposed = contexts_disposed_; | 4191 heap_state.contexts_disposed = contexts_disposed_; |
4161 heap_state.contexts_disposal_rate = | 4192 heap_state.contexts_disposal_rate = |
4162 tracer()->ContextDisposalRateInMilliseconds(); | 4193 tracer()->ContextDisposalRateInMilliseconds(); |
4163 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects()); | 4194 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects()); |
4164 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped(); | 4195 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped(); |
4165 return heap_state; | 4196 return heap_state; |
4166 } | 4197 } |
(...skipping 326 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4493 new_space_.FromSpaceEnd()); | 4524 new_space_.FromSpaceEnd()); |
4494 while (it.has_next()) { | 4525 while (it.has_next()) { |
4495 NewSpacePage* page = it.next(); | 4526 NewSpacePage* page = it.next(); |
4496 for (Address cursor = page->area_start(), limit = page->area_end(); | 4527 for (Address cursor = page->area_start(), limit = page->area_end(); |
4497 cursor < limit; cursor += kPointerSize) { | 4528 cursor < limit; cursor += kPointerSize) { |
4498 Memory::Address_at(cursor) = kFromSpaceZapValue; | 4529 Memory::Address_at(cursor) = kFromSpaceZapValue; |
4499 } | 4530 } |
4500 } | 4531 } |
4501 } | 4532 } |
4502 | 4533 |
4503 | 4534 void Heap::IteratePromotedObjectPointers(HeapObject* object, Address start, |
4504 void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start, | 4535 Address end, bool record_slots, |
4505 Address end, bool record_slots, | 4536 ObjectSlotCallback callback) { |
4506 ObjectSlotCallback callback) { | |
4507 Address slot_address = start; | 4537 Address slot_address = start; |
4508 | 4538 |
4509 while (slot_address < end) { | 4539 while (slot_address < end) { |
4510 Object** slot = reinterpret_cast<Object**>(slot_address); | 4540 Object** slot = reinterpret_cast<Object**>(slot_address); |
4511 Object* target = *slot; | 4541 Object* target = *slot; |
4512 // If the store buffer becomes overfull we mark pages as being exempt from | 4542 // If the store buffer becomes overfull we mark pages as being exempt from |
4513 // the store buffer. These pages are scanned to find pointers that point | 4543 // the store buffer. These pages are scanned to find pointers that point |
4514 // to the new space. In that case we may hit newly promoted objects and | 4544 // to the new space. In that case we may hit newly promoted objects and |
4515 // fix the pointers before the promotion queue gets to them. Thus the 'if'. | 4545 // fix the pointers before the promotion queue gets to them. Thus the 'if'. |
4516 if (target->IsHeapObject()) { | 4546 if (target->IsHeapObject()) { |
4517 if (Heap::InFromSpace(target)) { | 4547 if (Heap::InFromSpace(target)) { |
4518 callback(reinterpret_cast<HeapObject**>(slot), | 4548 callback(reinterpret_cast<HeapObject**>(slot), |
4519 HeapObject::cast(target)); | 4549 HeapObject::cast(target)); |
4520 Object* new_target = *slot; | 4550 Object* new_target = *slot; |
4521 if (InNewSpace(new_target)) { | 4551 if (InNewSpace(new_target)) { |
4522 SLOW_DCHECK(Heap::InToSpace(new_target)); | 4552 SLOW_DCHECK(Heap::InToSpace(new_target)); |
4523 SLOW_DCHECK(new_target->IsHeapObject()); | 4553 SLOW_DCHECK(new_target->IsHeapObject()); |
4524 store_buffer_.Mark(reinterpret_cast<Address>(slot)); | 4554 store_buffer_.Mark(reinterpret_cast<Address>(slot)); |
4525 } | 4555 } |
4526 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target)); | 4556 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target)); |
4527 } else if (record_slots && | 4557 } else if (record_slots && |
4528 MarkCompactCollector::IsOnEvacuationCandidate(target)) { | 4558 MarkCompactCollector::IsOnEvacuationCandidate(target)) { |
4529 mark_compact_collector()->RecordSlot(object, slot, target); | 4559 mark_compact_collector()->RecordSlot(object, slot, target); |
4530 } | 4560 } |
4531 } | 4561 } |
4532 slot_address += kPointerSize; | 4562 slot_address += kPointerSize; |
4533 } | 4563 } |
4534 } | 4564 } |
4535 | 4565 |
4536 | 4566 class IteratePromotedObjectsVisitor final : public ObjectVisitor { |
4537 class IteratePointersToFromSpaceVisitor final : public ObjectVisitor { | |
4538 public: | 4567 public: |
4539 IteratePointersToFromSpaceVisitor(Heap* heap, HeapObject* target, | 4568 IteratePromotedObjectsVisitor(Heap* heap, HeapObject* target, |
4540 bool record_slots, | 4569 bool record_slots, ObjectSlotCallback callback) |
4541 ObjectSlotCallback callback) | |
4542 : heap_(heap), | 4570 : heap_(heap), |
4543 target_(target), | 4571 target_(target), |
4544 record_slots_(record_slots), | 4572 record_slots_(record_slots), |
4545 callback_(callback) {} | 4573 callback_(callback) {} |
4546 | 4574 |
4547 V8_INLINE void VisitPointers(Object** start, Object** end) override { | 4575 V8_INLINE void VisitPointers(Object** start, Object** end) override { |
4548 heap_->IterateAndMarkPointersToFromSpace( | 4576 heap_->IteratePromotedObjectPointers( |
4549 target_, reinterpret_cast<Address>(start), | 4577 target_, reinterpret_cast<Address>(start), |
4550 reinterpret_cast<Address>(end), record_slots_, callback_); | 4578 reinterpret_cast<Address>(end), record_slots_, callback_); |
4579 // When black allocations is on, we have to visit objects promoted to | |
4580 // black pages to keep their references alive. | |
4581 // TODO(hpayer): Implement a special promotion visitor that incorporates | |
4582 // regular visiting and IteratePromotedObjectPointers. | |
4583 // TODO(hpayer): Right now we are also going to process black objects | |
ulan
2016/02/16 10:38:09
This could be a problem if the non-pure marking vi
Hannes Payer (out of office)
2016/03/11 14:20:48
As discussed offline, we are not going to process
| |
4584 // that got promoted to black pages. Filter them out to avoid unecessary | |
4585 // work. | |
4586 heap_->incremental_marking()->IterateBlackObject(target_); | |
ulan
2016/02/16 10:38:09
I think this should be called in line 4546, not he
Hannes Payer (out of office)
2016/03/11 14:20:48
Done.
| |
4551 } | 4587 } |
4552 | 4588 |
4553 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {} | 4589 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override { |
4590 // Black allocation requires us to process objects referenced by | |
4591 // promoted objects. | |
4592 if (heap_->incremental_marking()->black_allocation()) { | |
4593 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot)); | |
4594 IncrementalMarking::MarkObject(heap_, code); | |
4595 } | |
4596 } | |
4554 | 4597 |
4555 private: | 4598 private: |
4556 Heap* heap_; | 4599 Heap* heap_; |
4557 HeapObject* target_; | 4600 HeapObject* target_; |
4558 bool record_slots_; | 4601 bool record_slots_; |
4559 ObjectSlotCallback callback_; | 4602 ObjectSlotCallback callback_; |
4560 }; | 4603 }; |
4561 | 4604 |
4562 | 4605 void Heap::IteratePromotedObject(HeapObject* target, int size, |
4563 void Heap::IteratePointersToFromSpace(HeapObject* target, int size, | 4606 ObjectSlotCallback callback) { |
4564 ObjectSlotCallback callback) { | |
4565 // We are not collecting slots on new space objects during mutation | 4607 // We are not collecting slots on new space objects during mutation |
4566 // thus we have to scan for pointers to evacuation candidates when we | 4608 // thus we have to scan for pointers to evacuation candidates when we |
4567 // promote objects. But we should not record any slots in non-black | 4609 // promote objects. But we should not record any slots in non-black |
4568 // objects. Grey object's slots would be rescanned. | 4610 // objects. Grey object's slots would be rescanned. |
4569 // White object might not survive until the end of collection | 4611 // White object might not survive until the end of collection |
4570 // it would be a violation of the invariant to record it's slots. | 4612 // it would be a violation of the invariant to record it's slots. |
4571 bool record_slots = false; | 4613 bool record_slots = false; |
4572 if (incremental_marking()->IsCompacting()) { | 4614 if (incremental_marking()->IsCompacting()) { |
4573 MarkBit mark_bit = Marking::MarkBitFrom(target); | 4615 MarkBit mark_bit = Marking::MarkBitFrom(target); |
4574 record_slots = Marking::IsBlack(mark_bit); | 4616 record_slots = Marking::IsBlack(mark_bit); |
4575 } | 4617 } |
4576 | 4618 |
4577 IteratePointersToFromSpaceVisitor visitor(this, target, record_slots, | 4619 IteratePromotedObjectsVisitor visitor(this, target, record_slots, callback); |
4578 callback); | |
4579 target->IterateBody(target->map()->instance_type(), size, &visitor); | 4620 target->IterateBody(target->map()->instance_type(), size, &visitor); |
4621 | |
4622 // Black allocation requires us to process objects referenced by | |
4623 // promoted objects. | |
4624 if (incremental_marking()->black_allocation()) { | |
4625 Map* map = target->map(); | |
4626 IncrementalMarking::MarkObject(this, map); | |
4627 } | |
4580 } | 4628 } |
4581 | 4629 |
4582 | 4630 |
4583 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { | 4631 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { |
4584 IterateStrongRoots(v, mode); | 4632 IterateStrongRoots(v, mode); |
4585 IterateWeakRoots(v, mode); | 4633 IterateWeakRoots(v, mode); |
4586 } | 4634 } |
4587 | 4635 |
4588 | 4636 |
4589 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { | 4637 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { |
(...skipping 1643 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
6233 } | 6281 } |
6234 | 6282 |
6235 | 6283 |
6236 // static | 6284 // static |
6237 int Heap::GetStaticVisitorIdForMap(Map* map) { | 6285 int Heap::GetStaticVisitorIdForMap(Map* map) { |
6238 return StaticVisitorBase::GetVisitorId(map); | 6286 return StaticVisitorBase::GetVisitorId(map); |
6239 } | 6287 } |
6240 | 6288 |
6241 } // namespace internal | 6289 } // namespace internal |
6242 } // namespace v8 | 6290 } // namespace v8 |
OLD | NEW |