Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(263)

Side by Side Diff: src/heap-inl.h

Issue 435003: Patch for allowing several V8 instances in process:... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 11 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.cc ('k') | src/ia32/assembler-ia32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
44 uint32_t hash_field) { 44 uint32_t hash_field) {
45 unibrow::Utf8InputBuffer<> buffer(str.start(), 45 unibrow::Utf8InputBuffer<> buffer(str.start(),
46 static_cast<unsigned>(str.length())); 46 static_cast<unsigned>(str.length()));
47 return AllocateInternalSymbol(&buffer, chars, hash_field); 47 return AllocateInternalSymbol(&buffer, chars, hash_field);
48 } 48 }
49 49
50 50
51 Object* Heap::AllocateRaw(int size_in_bytes, 51 Object* Heap::AllocateRaw(int size_in_bytes,
52 AllocationSpace space, 52 AllocationSpace space,
53 AllocationSpace retry_space) { 53 AllocationSpace retry_space) {
54 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); 54 HeapData& heap_data = v8_context()->heap_data_;
55 ASSERT(heap_data.allocation_allowed_ && heap_data.gc_state_ == NOT_IN_GC);
55 ASSERT(space != NEW_SPACE || 56 ASSERT(space != NEW_SPACE ||
56 retry_space == OLD_POINTER_SPACE || 57 retry_space == OLD_POINTER_SPACE ||
57 retry_space == OLD_DATA_SPACE); 58 retry_space == OLD_DATA_SPACE);
58 #ifdef DEBUG 59 #ifdef DEBUG
59 if (FLAG_gc_interval >= 0 && 60 if (FLAG_gc_interval >= 0 &&
60 !disallow_allocation_failure_ && 61 !heap_data.disallow_allocation_failure_ &&
61 Heap::allocation_timeout_-- <= 0) { 62 heap_data.allocation_timeout_-- <= 0) {
62 return Failure::RetryAfterGC(size_in_bytes, space); 63 return Failure::RetryAfterGC(size_in_bytes, space);
63 } 64 }
64 Counters::objs_since_last_full.Increment(); 65 INC_COUNTER(objs_since_last_full);
65 Counters::objs_since_last_young.Increment(); 66 INC_COUNTER(objs_since_last_young);
66 #endif 67 #endif
67 Object* result; 68 Object* result;
68 if (NEW_SPACE == space) { 69 if (NEW_SPACE == space) {
69 result = new_space_.AllocateRaw(size_in_bytes); 70 result = heap_data.new_space_.AllocateRaw(size_in_bytes);
70 if (always_allocate() && result->IsFailure()) { 71 if (always_allocate() && result->IsFailure()) {
71 space = retry_space; 72 space = retry_space;
72 } else { 73 } else {
73 return result; 74 return result;
74 } 75 }
75 } 76 }
76 77
77 if (OLD_POINTER_SPACE == space) { 78 if (OLD_POINTER_SPACE == space) {
78 result = old_pointer_space_->AllocateRaw(size_in_bytes); 79 result = heap_data.old_pointer_space_->AllocateRaw(size_in_bytes);
79 } else if (OLD_DATA_SPACE == space) { 80 } else if (OLD_DATA_SPACE == space) {
80 result = old_data_space_->AllocateRaw(size_in_bytes); 81 result = heap_data.old_data_space_->AllocateRaw(size_in_bytes);
81 } else if (CODE_SPACE == space) { 82 } else if (CODE_SPACE == space) {
82 result = code_space_->AllocateRaw(size_in_bytes); 83 result = heap_data.code_space_->AllocateRaw(size_in_bytes);
83 } else if (LO_SPACE == space) { 84 } else if (LO_SPACE == space) {
84 result = lo_space_->AllocateRaw(size_in_bytes); 85 result = heap_data.lo_space_->AllocateRaw(size_in_bytes);
85 } else if (CELL_SPACE == space) { 86 } else if (CELL_SPACE == space) {
86 result = cell_space_->AllocateRaw(size_in_bytes); 87 result = heap_data.cell_space_->AllocateRaw(size_in_bytes);
87 } else { 88 } else {
88 ASSERT(MAP_SPACE == space); 89 ASSERT(MAP_SPACE == space);
89 result = map_space_->AllocateRaw(size_in_bytes); 90 result = heap_data.map_space_->AllocateRaw(size_in_bytes);
90 } 91 }
91 if (result->IsFailure()) old_gen_exhausted_ = true; 92 if (result->IsFailure()) heap_data.old_gen_exhausted_ = true;
92 return result; 93 return result;
93 } 94 }
94 95
95 96
96 Object* Heap::NumberFromInt32(int32_t value) { 97 Object* Heap::NumberFromInt32(int32_t value) {
97 if (Smi::IsValid(value)) return Smi::FromInt(value); 98 if (Smi::IsValid(value)) return Smi::FromInt(value);
98 // Bypass NumberFromDouble to avoid various redundant checks. 99 // Bypass NumberFromDouble to avoid various redundant checks.
99 return AllocateHeapNumber(FastI2D(value)); 100 return AllocateHeapNumber(FastI2D(value));
100 } 101 }
101 102
102 103
103 Object* Heap::NumberFromUint32(uint32_t value) { 104 Object* Heap::NumberFromUint32(uint32_t value) {
104 if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) { 105 if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) {
105 return Smi::FromInt((int32_t)value); 106 return Smi::FromInt((int32_t)value);
106 } 107 }
107 // Bypass NumberFromDouble to avoid various redundant checks. 108 // Bypass NumberFromDouble to avoid various redundant checks.
108 return AllocateHeapNumber(FastUI2D(value)); 109 return AllocateHeapNumber(FastUI2D(value));
109 } 110 }
110 111
111 112
112 Object* Heap::AllocateRawMap() { 113 Object* Heap::AllocateRawMap() {
113 #ifdef DEBUG 114 #ifdef DEBUG
114 Counters::objs_since_last_full.Increment(); 115 INC_COUNTER(objs_since_last_full);
115 Counters::objs_since_last_young.Increment(); 116 INC_COUNTER(objs_since_last_young);
116 #endif 117 #endif
117 Object* result = map_space_->AllocateRaw(Map::kSize); 118 HeapData& heap_data = v8_context()->heap_data_;
118 if (result->IsFailure()) old_gen_exhausted_ = true; 119 Object* result = heap_data.map_space_->AllocateRaw(Map::kSize);
120 if (result->IsFailure()) heap_data.old_gen_exhausted_ = true;
119 return result; 121 return result;
120 } 122 }
121 123
122 124
123 Object* Heap::AllocateRawCell() { 125 Object* Heap::AllocateRawCell() {
124 #ifdef DEBUG 126 #ifdef DEBUG
125 Counters::objs_since_last_full.Increment(); 127 INC_COUNTER(objs_since_last_full);
126 Counters::objs_since_last_young.Increment(); 128 INC_COUNTER(objs_since_last_young);
127 #endif 129 #endif
128 Object* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize); 130 HeapData& heap_data = v8_context()->heap_data_;
129 if (result->IsFailure()) old_gen_exhausted_ = true; 131 Object* result = heap_data.cell_space_->AllocateRaw(
132 JSGlobalPropertyCell::kSize);
133 if (result->IsFailure()) heap_data.old_gen_exhausted_ = true;
130 return result; 134 return result;
131 } 135 }
132 136
133 137
134 bool Heap::InNewSpace(Object* object) { 138 bool Heap::InNewSpace(Object* object) {
135 return new_space_.Contains(object); 139 return v8_context()->heap_data_.new_space_.Contains(object);
136 } 140 }
137 141
138 142
139 bool Heap::InFromSpace(Object* object) { 143 bool Heap::InFromSpace(Object* object) {
140 return new_space_.FromSpaceContains(object); 144 return v8_context()->heap_data_.new_space_.FromSpaceContains(object);
141 } 145 }
142 146
143 147
144 bool Heap::InToSpace(Object* object) { 148 bool Heap::InToSpace(Object* object) {
145 return new_space_.ToSpaceContains(object); 149 return v8_context()->heap_data_.new_space_.ToSpaceContains(object);
146 } 150 }
147 151
148 152
149 bool Heap::ShouldBePromoted(Address old_address, int object_size) { 153 bool Heap::ShouldBePromoted(Address old_address, int object_size) {
154 HeapData& heap_data = v8_context()->heap_data_;
150 // An object should be promoted if: 155 // An object should be promoted if:
151 // - the object has survived a scavenge operation or 156 // - the object has survived a scavenge operation or
152 // - to space is already 25% full. 157 // - to space is already 25% full.
153 return old_address < new_space_.age_mark() 158 return old_address < heap_data.new_space_.age_mark()
154 || (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2); 159 || ((heap_data.new_space_.Size() + object_size) >=
160 (heap_data.new_space_.Capacity() >> 2));
155 } 161 }
156 162
157 163
158 void Heap::RecordWrite(Address address, int offset) { 164 void Heap::RecordWrite(Address address, int offset) {
159 if (new_space_.Contains(address)) return; 165 HeapData& heap_data = v8_context()->heap_data_;
160 ASSERT(!new_space_.FromSpaceContains(address)); 166 if (heap_data.new_space_.Contains(address)) return;
167 ASSERT(!heap_data.new_space_.FromSpaceContains(address));
161 SLOW_ASSERT(Contains(address + offset)); 168 SLOW_ASSERT(Contains(address + offset));
162 Page::SetRSet(address, offset); 169 Page::SetRSet(address, offset);
163 } 170 }
164 171
165 172
166 OldSpace* Heap::TargetSpace(HeapObject* object) { 173 OldSpace* Heap::TargetSpace(HeapObject* object) {
167 InstanceType type = object->map()->instance_type(); 174 InstanceType type = object->map()->instance_type();
168 AllocationSpace space = TargetSpaceId(type); 175 AllocationSpace space = TargetSpaceId(type);
169 return (space == OLD_POINTER_SPACE) 176 return (space == OLD_POINTER_SPACE)
170 ? old_pointer_space_ 177 ? v8_context()->heap_data_.old_pointer_space_
171 : old_data_space_; 178 : v8_context()->heap_data_.old_data_space_;
172 } 179 }
173 180
174 181
175 AllocationSpace Heap::TargetSpaceId(InstanceType type) { 182 AllocationSpace Heap::TargetSpaceId(InstanceType type) {
176 // Heap numbers and sequential strings are promoted to old data space, all 183 // Heap numbers and sequential strings are promoted to old data space, all
177 // other object types are promoted to old pointer space. We do not use 184 // other object types are promoted to old pointer space. We do not use
178 // object->IsHeapNumber() and object->IsSeqString() because we already 185 // object->IsHeapNumber() and object->IsSeqString() because we already
179 // know that object has the heap object tag. 186 // know that object has the heap object tag.
180 ASSERT((type != CODE_TYPE) && (type != MAP_TYPE)); 187 ASSERT((type != CODE_TYPE) && (type != MAP_TYPE));
181 bool has_pointers = 188 bool has_pointers =
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
221 return; 228 return;
222 } 229 }
223 230
224 // Call the slow part of scavenge object. 231 // Call the slow part of scavenge object.
225 return ScavengeObjectSlow(p, object); 232 return ScavengeObjectSlow(p, object);
226 } 233 }
227 234
228 235
229 int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) { 236 int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
230 ASSERT(HasBeenSetup()); 237 ASSERT(HasBeenSetup());
231 int amount = amount_of_external_allocated_memory_ + change_in_bytes; 238 HeapData& heap_data = v8_context()->heap_data_;
239 int amount = heap_data.amount_of_external_allocated_memory_ + change_in_bytes;
232 if (change_in_bytes >= 0) { 240 if (change_in_bytes >= 0) {
233 // Avoid overflow. 241 // Avoid overflow.
234 if (amount > amount_of_external_allocated_memory_) { 242 if (amount > heap_data.amount_of_external_allocated_memory_) {
235 amount_of_external_allocated_memory_ = amount; 243 heap_data.amount_of_external_allocated_memory_ = amount;
236 } 244 }
237 int amount_since_last_global_gc = 245 int amount_since_last_global_gc =
238 amount_of_external_allocated_memory_ - 246 heap_data.amount_of_external_allocated_memory_ -
239 amount_of_external_allocated_memory_at_last_global_gc_; 247 heap_data.amount_of_external_allocated_memory_at_last_global_gc_;
240 if (amount_since_last_global_gc > external_allocation_limit_) { 248 if (amount_since_last_global_gc > heap_data.external_allocation_limit_) {
241 CollectAllGarbage(false); 249 CollectAllGarbage(false);
242 } 250 }
243 } else { 251 } else {
244 // Avoid underflow. 252 // Avoid underflow.
245 if (amount >= 0) { 253 if (amount >= 0) {
246 amount_of_external_allocated_memory_ = amount; 254 heap_data.amount_of_external_allocated_memory_ = amount;
247 } 255 }
248 } 256 }
249 ASSERT(amount_of_external_allocated_memory_ >= 0); 257 ASSERT(heap_data.amount_of_external_allocated_memory_ >= 0);
250 return amount_of_external_allocated_memory_; 258 return heap_data.amount_of_external_allocated_memory_;
251 } 259 }
252 260
253 261
254 void Heap::SetLastScriptId(Object* last_script_id) { 262 void Heap::SetLastScriptId(Object* last_script_id) {
255 roots_[kLastScriptIdRootIndex] = last_script_id; 263 v8_context()->heap_data_.roots_[kLastScriptIdRootIndex] = last_script_id;
256 } 264 }
257 265
258 266
259 #define GC_GREEDY_CHECK() \ 267 #define GC_GREEDY_CHECK() \
260 ASSERT(!FLAG_gc_greedy || v8::internal::Heap::GarbageCollectionGreedyCheck()) 268 ASSERT(!FLAG_gc_greedy || v8::internal::Heap::GarbageCollectionGreedyCheck())
261 269
262 270
263 // Calls the FUNCTION_CALL function and retries it up to three times 271 // Calls the FUNCTION_CALL function and retries it up to three times
264 // to guarantee that any allocations performed during the call will 272 // to guarantee that any allocations performed during the call will
265 // succeed if there's enough memory. 273 // succeed if there's enough memory.
(...skipping 11 matching lines...) Expand all
277 } \ 285 } \
278 if (!__object__->IsRetryAfterGC()) RETURN_EMPTY; \ 286 if (!__object__->IsRetryAfterGC()) RETURN_EMPTY; \
279 Heap::CollectGarbage(Failure::cast(__object__)->requested(), \ 287 Heap::CollectGarbage(Failure::cast(__object__)->requested(), \
280 Failure::cast(__object__)->allocation_space()); \ 288 Failure::cast(__object__)->allocation_space()); \
281 __object__ = FUNCTION_CALL; \ 289 __object__ = FUNCTION_CALL; \
282 if (!__object__->IsFailure()) RETURN_VALUE; \ 290 if (!__object__->IsFailure()) RETURN_VALUE; \
283 if (__object__->IsOutOfMemoryFailure()) { \ 291 if (__object__->IsOutOfMemoryFailure()) { \
284 v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1"); \ 292 v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1"); \
285 } \ 293 } \
286 if (!__object__->IsRetryAfterGC()) RETURN_EMPTY; \ 294 if (!__object__->IsRetryAfterGC()) RETURN_EMPTY; \
287 Counters::gc_last_resort_from_handles.Increment(); \ 295 INC_COUNTER(gc_last_resort_from_handles); \
288 Heap::CollectAllGarbage(false); \ 296 Heap::CollectAllGarbage(false); \
289 { \ 297 { \
290 AlwaysAllocateScope __scope__; \ 298 AlwaysAllocateScope __scope__; \
291 __object__ = FUNCTION_CALL; \ 299 __object__ = FUNCTION_CALL; \
292 } \ 300 } \
293 if (!__object__->IsFailure()) RETURN_VALUE; \ 301 if (!__object__->IsFailure()) RETURN_VALUE; \
294 if (__object__->IsOutOfMemoryFailure() || \ 302 if (__object__->IsOutOfMemoryFailure() || \
295 __object__->IsRetryAfterGC()) { \ 303 __object__->IsRetryAfterGC()) { \
296 /* TODO(1181417): Fix this. */ \ 304 /* TODO(1181417): Fix this. */ \
297 v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2"); \ 305 v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2"); \
298 } \ 306 } \
299 RETURN_EMPTY; \ 307 RETURN_EMPTY; \
300 } while (false) 308 } while (false)
301 309
302 310
303 #define CALL_HEAP_FUNCTION(FUNCTION_CALL, TYPE) \ 311 #define CALL_HEAP_FUNCTION(FUNCTION_CALL, TYPE) \
304 CALL_AND_RETRY(FUNCTION_CALL, \ 312 CALL_AND_RETRY(FUNCTION_CALL, \
305 return Handle<TYPE>(TYPE::cast(__object__)), \ 313 return Handle<TYPE>(TYPE::cast(__object__)), \
306 return Handle<TYPE>()) 314 return Handle<TYPE>())
307 315
308 316
309 #define CALL_HEAP_FUNCTION_VOID(FUNCTION_CALL) \ 317 #define CALL_HEAP_FUNCTION_VOID(FUNCTION_CALL) \
310 CALL_AND_RETRY(FUNCTION_CALL, return, return) 318 CALL_AND_RETRY(FUNCTION_CALL, return, return)
311 319
312 320
313 #ifdef DEBUG 321 #ifdef DEBUG
314 322
315 inline bool Heap::allow_allocation(bool new_state) { 323 inline bool Heap::allow_allocation(bool new_state) {
316 bool old = allocation_allowed_; 324 HeapData& heap_data = v8_context()->heap_data_;
317 allocation_allowed_ = new_state; 325 bool old = heap_data.allocation_allowed_;
326 heap_data.allocation_allowed_ = new_state;
318 return old; 327 return old;
319 } 328 }
320 329
321 #endif 330 #endif
322 331
323 332
324 } } // namespace v8::internal 333 } } // namespace v8::internal
325 334
326 #endif // V8_HEAP_INL_H_ 335 #endif // V8_HEAP_INL_H_
OLDNEW
« no previous file with comments | « src/heap.cc ('k') | src/ia32/assembler-ia32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698