OLD | NEW |
---|---|
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 21 matching lines...) Expand all Loading... | |
32 #include "v8-counters.h" | 32 #include "v8-counters.h" |
33 | 33 |
34 namespace v8 { namespace internal { | 34 namespace v8 { namespace internal { |
35 | 35 |
36 int Heap::MaxHeapObjectSize() { | 36 int Heap::MaxHeapObjectSize() { |
37 return Page::kMaxHeapObjectSize; | 37 return Page::kMaxHeapObjectSize; |
38 } | 38 } |
39 | 39 |
40 | 40 |
41 Object* Heap::AllocateRaw(int size_in_bytes, | 41 Object* Heap::AllocateRaw(int size_in_bytes, |
42 AllocationSpace space) { | 42 AllocationSpace space, |
43 AllocationSpace retry_space) { | |
Erik Corry
2008/10/30 09:08:43
This is fine for now. In the long run I think we
| |
43 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); | 44 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); |
45 ASSERT(space != NEW_SPACE || | |
46 retry_space == OLD_POINTER_SPACE || | |
47 retry_space == OLD_DATA_SPACE); | |
44 #ifdef DEBUG | 48 #ifdef DEBUG |
45 if (FLAG_gc_interval >= 0 && | 49 if (FLAG_gc_interval >= 0 && |
46 !disallow_allocation_failure_ && | 50 !disallow_allocation_failure_ && |
47 Heap::allocation_timeout_-- <= 0) { | 51 Heap::allocation_timeout_-- <= 0) { |
48 return Failure::RetryAfterGC(size_in_bytes, space); | 52 return Failure::RetryAfterGC(size_in_bytes, space); |
49 } | 53 } |
50 Counters::objs_since_last_full.Increment(); | 54 Counters::objs_since_last_full.Increment(); |
51 Counters::objs_since_last_young.Increment(); | 55 Counters::objs_since_last_young.Increment(); |
52 #endif | 56 #endif |
57 Object* result; | |
53 if (NEW_SPACE == space) { | 58 if (NEW_SPACE == space) { |
54 return new_space_.AllocateRaw(size_in_bytes); | 59 result = new_space_.AllocateRaw(size_in_bytes); |
60 if (!always_allocate() || !result->IsFailure()) return result; | |
Mads Ager (chromium)
2008/10/30 08:48:14
Normally, I like these bail-outs instead of if-els
| |
61 space = retry_space; | |
55 } | 62 } |
56 | 63 |
57 Object* result; | |
58 if (OLD_POINTER_SPACE == space) { | 64 if (OLD_POINTER_SPACE == space) { |
59 result = old_pointer_space_->AllocateRaw(size_in_bytes); | 65 result = old_pointer_space_->AllocateRaw(size_in_bytes); |
60 } else if (OLD_DATA_SPACE == space) { | 66 } else if (OLD_DATA_SPACE == space) { |
61 result = old_data_space_->AllocateRaw(size_in_bytes); | 67 result = old_data_space_->AllocateRaw(size_in_bytes); |
62 } else if (CODE_SPACE == space) { | 68 } else if (CODE_SPACE == space) { |
63 result = code_space_->AllocateRaw(size_in_bytes); | 69 result = code_space_->AllocateRaw(size_in_bytes); |
64 } else if (LO_SPACE == space) { | 70 } else if (LO_SPACE == space) { |
65 result = lo_space_->AllocateRaw(size_in_bytes); | 71 result = lo_space_->AllocateRaw(size_in_bytes); |
66 } else { | 72 } else { |
67 ASSERT(MAP_SPACE == space); | 73 ASSERT(MAP_SPACE == space); |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
125 | 131 |
126 void Heap::RecordWrite(Address address, int offset) { | 132 void Heap::RecordWrite(Address address, int offset) { |
127 if (new_space_.Contains(address)) return; | 133 if (new_space_.Contains(address)) return; |
128 ASSERT(!new_space_.FromSpaceContains(address)); | 134 ASSERT(!new_space_.FromSpaceContains(address)); |
129 SLOW_ASSERT(Contains(address + offset)); | 135 SLOW_ASSERT(Contains(address + offset)); |
130 Page::SetRSet(address, offset); | 136 Page::SetRSet(address, offset); |
131 } | 137 } |
132 | 138 |
133 | 139 |
134 OldSpace* Heap::TargetSpace(HeapObject* object) { | 140 OldSpace* Heap::TargetSpace(HeapObject* object) { |
141 InstanceType type = object->map()->instance_type(); | |
142 AllocationSpace space = TargetSpaceId(type); | |
143 return (space == OLD_POINTER_SPACE) | |
144 ? old_pointer_space_ | |
145 : old_data_space_; | |
146 } | |
147 | |
148 | |
149 AllocationSpace Heap::TargetSpaceId(InstanceType type) { | |
135 // Heap numbers and sequential strings are promoted to old data space, all | 150 // Heap numbers and sequential strings are promoted to old data space, all |
136 // other object types are promoted to old pointer space. We do not use | 151 // other object types are promoted to old pointer space. We do not use |
137 // object->IsHeapNumber() and object->IsSeqString() because we already | 152 // object->IsHeapNumber() and object->IsSeqString() because we already |
138 // know that object has the heap object tag. | 153 // know that object has the heap object tag. |
139 InstanceType type = object->map()->instance_type(); | |
140 ASSERT((type != CODE_TYPE) && (type != MAP_TYPE)); | 154 ASSERT((type != CODE_TYPE) && (type != MAP_TYPE)); |
141 bool has_pointers = | 155 bool has_pointers = |
142 type != HEAP_NUMBER_TYPE && | 156 type != HEAP_NUMBER_TYPE && |
143 (type >= FIRST_NONSTRING_TYPE || | 157 (type >= FIRST_NONSTRING_TYPE || |
144 String::cast(object)->representation_tag() != kSeqStringTag); | 158 (type & kStringRepresentationMask) != kSeqStringTag); |
145 return has_pointers ? old_pointer_space_ : old_data_space_; | 159 return has_pointers ? OLD_POINTER_SPACE : OLD_DATA_SPACE; |
146 } | 160 } |
147 | 161 |
148 | 162 |
149 void Heap::CopyBlock(Object** dst, Object** src, int byte_size) { | 163 void Heap::CopyBlock(Object** dst, Object** src, int byte_size) { |
150 ASSERT(IsAligned(byte_size, kPointerSize)); | 164 ASSERT(IsAligned(byte_size, kPointerSize)); |
151 | 165 |
152 // Use block copying memcpy if the segment we're copying is | 166 // Use block copying memcpy if the segment we're copying is |
153 // enough to justify the extra call/setup overhead. | 167 // enough to justify the extra call/setup overhead. |
154 static const int kBlockCopyLimit = 16 * kPointerSize; | 168 static const int kBlockCopyLimit = 16 * kPointerSize; |
155 | 169 |
(...skipping 25 matching lines...) Expand all Loading... | |
181 | 195 |
182 | 196 |
183 void Heap::ClearKeyedLookupCache() { | 197 void Heap::ClearKeyedLookupCache() { |
184 keyed_lookup_cache_ = undefined_value(); | 198 keyed_lookup_cache_ = undefined_value(); |
185 } | 199 } |
186 | 200 |
187 | 201 |
188 #define GC_GREEDY_CHECK() \ | 202 #define GC_GREEDY_CHECK() \ |
189 ASSERT(!FLAG_gc_greedy || v8::internal::Heap::GarbageCollectionGreedyCheck()) | 203 ASSERT(!FLAG_gc_greedy || v8::internal::Heap::GarbageCollectionGreedyCheck()) |
190 | 204 |
191 // Do not use the identifier __object__ in a call to this macro. | 205 |
192 // | 206 // Calls the FUNCTION_CALL function and retries it up to three times |
193 // Call the function FUNCTION_CALL. If it fails with a RetryAfterGC | 207 // to guarantee that any allocations performed during the call will |
194 // failure, call the garbage collector and retry the function. If the | 208 // succeed if there's enough memory. |
195 // garbage collector cannot reclaim the required space or the second | 209 |
196 // call fails with a RetryAfterGC failure, fail with out of memory. | 210 // Warning: Do not use the identifiers __object__ or __scope__ in a |
Erik Corry
2008/10/30 09:08:43
I don't really feel we need this warning, but if w
| |
197 // If there is any other failure, return a null handle. If either | 211 // call to this macro. |
198 // call succeeds, return a handle to the functions return value. | 212 |
199 // | 213 #define CALL_AND_RETRY(FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \ |
200 // Note that this macro always returns or raises a fatal error. | 214 do { \ |
201 #define CALL_HEAP_FUNCTION(FUNCTION_CALL, TYPE) \ | 215 GC_GREEDY_CHECK(); \ |
202 do { \ | 216 Object* __object__ = FUNCTION_CALL; \ |
203 GC_GREEDY_CHECK(); \ | 217 if (!__object__->IsFailure()) return RETURN_VALUE; \ |
204 Object* __object__ = FUNCTION_CALL; \ | 218 if (__object__->IsOutOfMemoryFailure()) { \ |
205 if (__object__->IsFailure()) { \ | 219 v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0"); \ |
Erik Corry
2008/10/30 09:08:43
Is there a subtle fall-through here? Comment need
| |
206 if (__object__->IsRetryAfterGC()) { \ | 220 } \ |
207 if (!Heap::CollectGarbage( \ | 221 if (!__object__->IsRetryAfterGC()) return RETURN_EMPTY; \ |
208 Failure::cast(__object__)->requested(), \ | 222 if (!Heap::CollectGarbage( \ |
209 Failure::cast(__object__)->allocation_space())) { \ | 223 Failure::cast(__object__)->requested(), \ |
210 /* TODO(1181417): Fix this. */ \ | 224 Failure::cast(__object__)->allocation_space())) { \ |
211 v8::internal::V8::FatalProcessOutOfMemory("CALL_HEAP_FUNCTION"); \ | 225 v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1"); \ |
212 } \ | 226 } \ |
213 __object__ = FUNCTION_CALL; \ | 227 __object__ = FUNCTION_CALL; \ |
214 if (__object__->IsFailure()) { \ | 228 if (!__object__->IsFailure()) return RETURN_VALUE; \ |
215 if (__object__->IsRetryAfterGC()) { \ | 229 if (__object__->IsOutOfMemoryFailure()) { \ |
216 /* TODO(1181417): Fix this. */ \ | 230 v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2"); \ |
217 v8::internal::V8::FatalProcessOutOfMemory("CALL_HEAP_FUNCTION"); \ | 231 } \ |
218 } \ | 232 if (!__object__->IsRetryAfterGC()) return RETURN_EMPTY; \ |
219 return Handle<TYPE>(); \ | 233 Counters::gc_last_resort_from_handles.Increment(); \ |
220 } \ | 234 Heap::CollectAllGarbage(); \ |
221 } else { \ | 235 { \ |
222 if (__object__->IsOutOfMemoryFailure()) { \ | 236 AlwaysAllocateScope __scope__; \ |
223 v8::internal::V8::FatalProcessOutOfMemory("CALL_HEAP_FUNCTION"); \ | 237 __object__ = FUNCTION_CALL; \ |
224 } \ | 238 } \ |
225 return Handle<TYPE>(); \ | 239 if (!__object__->IsFailure()) return RETURN_VALUE; \ |
226 } \ | 240 if (__object__->IsOutOfMemoryFailure()) { \ |
227 } \ | 241 /* TODO(1181417): Fix this. */ \ |
228 return Handle<TYPE>(TYPE::cast(__object__)); \ | 242 v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_3"); \ |
243 } \ | |
244 ASSERT(!__object__->IsRetryAfterGC()); \ | |
245 return RETURN_EMPTY; \ | |
229 } while (false) | 246 } while (false) |
230 | 247 |
231 | 248 |
232 // Don't use the following names: __object__, __failure__. | 249 #define CALL_HEAP_FUNCTION(FUNCTION_CALL, TYPE) \ |
233 #define CALL_HEAP_FUNCTION_VOID(FUNCTION_CALL) \ | 250 CALL_AND_RETRY(FUNCTION_CALL, \ |
234 GC_GREEDY_CHECK(); \ | 251 Handle<TYPE>(TYPE::cast(__object__)), \ |
235 Object* __object__ = FUNCTION_CALL; \ | 252 Handle<TYPE>()) |
236 if (__object__->IsFailure()) { \ | 253 |
237 if (__object__->IsRetryAfterGC()) { \ | 254 |
238 Failure* __failure__ = Failure::cast(__object__); \ | 255 #define CALL_HEAP_FUNCTION_VOID(FUNCTION_CALL) \ |
239 if (!Heap::CollectGarbage(__failure__->requested(), \ | 256 CALL_AND_RETRY(FUNCTION_CALL,,) |
240 __failure__->allocation_space())) { \ | |
241 /* TODO(1181417): Fix this. */ \ | |
242 V8::FatalProcessOutOfMemory("Handles"); \ | |
243 } \ | |
244 __object__ = FUNCTION_CALL; \ | |
245 if (__object__->IsFailure()) { \ | |
246 if (__object__->IsRetryAfterGC()) { \ | |
247 /* TODO(1181417): Fix this. */ \ | |
248 V8::FatalProcessOutOfMemory("Handles"); \ | |
249 } \ | |
250 return; \ | |
251 } \ | |
252 } else { \ | |
253 if (__object__->IsOutOfMemoryFailure()) { \ | |
254 V8::FatalProcessOutOfMemory("Handles"); \ | |
255 } \ | |
256 UNREACHABLE(); \ | |
257 } \ | |
258 } | |
259 | 257 |
260 | 258 |
261 #ifdef DEBUG | 259 #ifdef DEBUG |
262 | 260 |
263 inline bool Heap::allow_allocation(bool new_state) { | 261 inline bool Heap::allow_allocation(bool new_state) { |
264 bool old = allocation_allowed_; | 262 bool old = allocation_allowed_; |
265 allocation_allowed_ = new_state; | 263 allocation_allowed_ = new_state; |
266 return old; | 264 return old; |
267 } | 265 } |
268 | 266 |
269 #endif | 267 #endif |
270 | 268 |
271 | 269 |
272 } } // namespace v8::internal | 270 } } // namespace v8::internal |
273 | 271 |
274 #endif // V8_HEAP_INL_H_ | 272 #endif // V8_HEAP_INL_H_ |
OLD | NEW |