Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(869)

Unified Diff: src/spaces.h

Issue 11028027: Revert trunk to bleeding_edge at r12484 (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 8 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/snapshot-empty.cc ('k') | src/spaces.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/spaces.h
diff --git a/src/spaces.h b/src/spaces.h
index 97bcaa59ab4a6e0d4b31a2a954c292cdbfae5a35..6602c899dfbcbe35024909152ead1034e73078ee 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -284,9 +284,7 @@ class Bitmap {
bool IsClean() {
for (int i = 0; i < CellsCount(); i++) {
- if (cells()[i] != 0) {
- return false;
- }
+ if (cells()[i] != 0) return false;
}
return true;
}
@@ -375,11 +373,6 @@ class MemoryChunk {
return addr >= area_start() && addr <= area_end();
}
- // Every n write barrier invocations we go to runtime even though
- // we could have handled it in generated code. This lets us check
- // whether we have hit the limit and should do some more marking.
- static const int kWriteBarrierCounterGranularity = 500;
-
enum MemoryChunkFlags {
IS_EXECUTABLE,
ABOUT_TO_BE_FREED,
@@ -400,15 +393,6 @@ class MemoryChunk {
WAS_SWEPT_PRECISELY,
WAS_SWEPT_CONSERVATIVELY,
- // Used for large objects only. Indicates that the object has been
- // partially scanned by the incremental mark-sweep GC. Objects that have
- // been partially scanned are marked black so that the write barrier
- // triggers for them, and they are counted as live bytes. If the mutator
- // writes to them they may be turned grey and subtracted from the live byte
- // list. They move back to the marking deque either by an iteration over
- // the large object space or in the write barrier.
- IS_PARTIALLY_SCANNED,
-
// Last flag, keep at bottom.
NUM_MEMORY_CHUNK_FLAGS
};
@@ -429,25 +413,6 @@ class MemoryChunk {
(1 << IN_FROM_SPACE) |
(1 << IN_TO_SPACE);
- static const int kIsPartiallyScannedMask = 1 << IS_PARTIALLY_SCANNED;
-
- void SetPartiallyScannedProgress(int progress) {
- SetFlag(IS_PARTIALLY_SCANNED);
- partially_scanned_progress_ = progress;
- }
-
- bool IsPartiallyScanned() {
- return IsFlagSet(IS_PARTIALLY_SCANNED);
- }
-
- void SetCompletelyScanned() {
- ClearFlag(IS_PARTIALLY_SCANNED);
- }
-
- int PartiallyScannedProgress() {
- ASSERT(IsPartiallyScanned());
- return partially_scanned_progress_;
- }
void SetFlag(int flag) {
flags_ |= static_cast<uintptr_t>(1) << flag;
@@ -503,15 +468,6 @@ class MemoryChunk {
return live_byte_count_;
}
- int write_barrier_counter() {
- return static_cast<int>(write_barrier_counter_);
- }
-
- void set_write_barrier_counter(int counter) {
- write_barrier_counter_ = counter;
- }
-
-
static void IncrementLiveBytesFromGC(Address address, int by) {
MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
}
@@ -532,16 +488,8 @@ class MemoryChunk {
static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
- static const size_t kWriteBarrierCounterOffset =
+ static const size_t kHeaderSize =
kSlotsBufferOffset + kPointerSize + kPointerSize;
- static const size_t kPartiallyScannedProgress =
- kWriteBarrierCounterOffset + kPointerSize;
-
- // Actually the partially_scanned_progress_ member is only an int, but on
- // 64 bit the size of MemoryChunk gets rounded up to a 64 bit size so we
- // have to have the header start kPointerSize after the
- // partially_scanned_progress_ member.
- static const size_t kHeaderSize = kPartiallyScannedProgress + kPointerSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
@@ -677,8 +625,6 @@ class MemoryChunk {
int live_byte_count_;
SlotsBuffer* slots_buffer_;
SkipList* skip_list_;
- intptr_t write_barrier_counter_;
- int partially_scanned_progress_;
static MemoryChunk* Initialize(Heap* heap,
Address base,
@@ -844,6 +790,14 @@ class Space : public Malloced {
virtual void Print() = 0;
#endif
+ // After calling this we can allocate a certain number of bytes using only
+ // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
+ // without using freelists or causing a GC. This is used by partial
+ // snapshots. It returns true of space was reserved or false if a GC is
+ // needed. For paged spaces the space requested must include the space wasted
+ // at the end of each when allocating linearly.
+ virtual bool ReserveSpace(int bytes) = 0;
+
private:
Heap* heap_;
AllocationSpace id_;
@@ -1364,11 +1318,6 @@ class FreeListNode: public HeapObject {
inline void Zap();
- static inline FreeListNode* cast(MaybeObject* maybe) {
- ASSERT(!maybe->IsFailure());
- return reinterpret_cast<FreeListNode*>(maybe);
- }
-
private:
static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
@@ -1431,9 +1380,6 @@ class FreeList BASE_EMBEDDED {
bool IsVeryLong();
#endif
- // Used after booting the VM.
- void RepairLists(Heap* heap);
-
struct SizeStats {
intptr_t Total() {
return small_size_ + medium_size_ + large_size_ + huge_size_;
@@ -1514,10 +1460,6 @@ class PagedSpace : public Space {
// linear in the number of objects in the page. It may be slow.
MUST_USE_RESULT MaybeObject* FindObject(Address addr);
- // During boot the free_space_map is created, and afterwards we may need
- // to write it into the free list nodes that were already created.
- virtual void RepairFreeListsAfterBoot();
-
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact();
« no previous file with comments | « src/snapshot-empty.cc ('k') | src/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698