Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: src/heap-inl.h

Issue 253293003: Replace heap object access macros with functions and move them to the heap class (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Switched to V8_INLINE, rebase Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | src/objects-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_INL_H_ 5 #ifndef V8_HEAP_INL_H_
6 #define V8_HEAP_INL_H_ 6 #define V8_HEAP_INL_H_
7 7
8 #include <cmath> 8 #include <cmath>
9 9
10 #include "heap.h" 10 #include "heap.h"
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after
171 return CopyFixedDoubleArrayWithMap(src, src->map()); 171 return CopyFixedDoubleArrayWithMap(src, src->map());
172 } 172 }
173 173
174 174
175 MaybeObject* Heap::CopyConstantPoolArray(ConstantPoolArray* src) { 175 MaybeObject* Heap::CopyConstantPoolArray(ConstantPoolArray* src) {
176 if (src->length() == 0) return src; 176 if (src->length() == 0) return src;
177 return CopyConstantPoolArrayWithMap(src, src->map()); 177 return CopyConstantPoolArrayWithMap(src, src->map());
178 } 178 }
179 179
180 180
181 #define FIELD_ADDR(p, offset) \
182 (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
183
184
185 Object* Heap::read_field(HeapObject* p, int offset) {
186 return *reinterpret_cast<Object**>(FIELD_ADDR(p, offset));
187 }
188
189
190 intptr_t Heap::read_intptr_field(HeapObject* p, int offset) {
191 return *reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset));
192 }
193
194
195 int Heap::read_int_field(HeapObject* p, int offset) {
196 return *reinterpret_cast<int*>(FIELD_ADDR(p, offset));
197 }
198
199
200 int32_t Heap::read_int32_field(HeapObject* p, int offset) {
201 return *reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset));
202 }
203
204
205 uint32_t Heap::read_uint32_field(HeapObject* p, int offset) {
206 return *reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset));
207 }
208
209
210 int64_t Heap::read_int64_field(HeapObject* p, int offset) {
211 return *reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset));
212 }
213
214
215 int16_t Heap::read_short_field(HeapObject* p, int offset) {
216 return *reinterpret_cast<int16_t*>(FIELD_ADDR(p, offset));
217 }
218
219
220 byte Heap::read_byte_field(HeapObject* p, int offset) {
221 return *reinterpret_cast<byte*>(FIELD_ADDR(p, offset));
222 }
223
224
225 double Heap::read_double_field(HeapObject* p, int offset) {
226 #ifndef V8_TARGET_ARCH_MIPS
227 return *reinterpret_cast<double*>(FIELD_ADDR(p, offset));
228 #else // V8_TARGET_ARCH_MIPS
229 // Prevent gcc from using load-double (mips ldc1) on (possibly)
230 // non-64-bit aligned HeapNumber::value.
231 union conversion {
232 double d;
233 uint32_t u[2];
234 } c;
235 c.u[0] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)));
236 c.u[1] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4)));
237 return c.d;
238 #endif // V8_TARGET_ARCH_MIPS
239 }
240
241
242 void Heap::write_field(HeapObject* p,
243 int offset,
244 Object* value,
245 WriteBarrierMode mode) {
246 *reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value;
247 if (mode == UPDATE_WRITE_BARRIER) {
248 Heap* heap = p->GetHeap();
249 heap->incremental_marking()->RecordWrite(
250 p, HeapObject::RawField(p, offset), value);
251 if (heap->InNewSpace(value)) {
252 heap->RecordWrite(p->address(), offset);
253 }
254 }
255 }
256
257 void Heap::write_intptr_field(HeapObject* p,
258 int offset,
259 intptr_t value) {
260 *reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value;
261 }
262
263 void Heap::write_int_field(HeapObject* p, int offset, int value) {
264 *reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value;
265 }
266
267 void Heap::write_int32_field(HeapObject* p, int offset, int32_t value) {
268 *reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value;
269 }
270
271 void Heap::write_uint32_field(HeapObject* p,
272 int offset,
273 uint32_t value) {
274 *reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value;
275 }
276
277 void Heap::write_int64_field(HeapObject* p, int offset, int64_t value) {
278 *reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value;
279 }
280
281 void Heap::write_short_field(HeapObject* p, int offset, int16_t value) {
282 *reinterpret_cast<int16_t*>(FIELD_ADDR(p, offset)) = value;
283 }
284
285 void Heap::write_byte_field(HeapObject* p, int offset, byte value) {
286 *reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value;
287 }
288
289 void Heap::write_double_field(HeapObject* p, int offset, double value) {
290 #ifndef V8_TARGET_ARCH_MIPS
291 *reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value;
292 #else // V8_TARGET_ARCH_MIPS
293 // Prevent gcc from using store-double (mips sdc1) on (possibly)
294 // non-64-bit aligned HeapNumber::value.
295 union conversion {
296 double d;
297 uint32_t u[2];
298 } c;
299 c.d = value;
300 (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset))) = c.u[0];
301 (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4))) = c.u[1];
302 #endif // V8_TARGET_ARCH_MIPS
303 }
304
305 Address Heap::get_field_address(HeapObject* p, int offset) {
306 return reinterpret_cast<Address>(FIELD_ADDR(p, offset));
307 }
308
309 Object* Heap::acquire_read_field(HeapObject* p, int offset) {
310 return reinterpret_cast<Object*>(
311 Acquire_Load(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset))));
312 }
313
314 Object* Heap::nobarrier_read_field(HeapObject* p, int offset) {
315 return reinterpret_cast<Object*>(
316 NoBarrier_Load(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset))));
317 }
318
319 void Heap::release_write_field(HeapObject* p,
320 int offset,
321 Object* value) {
322 Release_Store(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset)),
323 reinterpret_cast<AtomicWord>(value));
324 }
325
326 void Heap::nobarrier_write_field(HeapObject* p,
327 int offset,
328 Object* value) {
329 NoBarrier_Store(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset)),
330 reinterpret_cast<AtomicWord>(value));
331 }
332
333 byte Heap::nobarrier_read_byte_field(HeapObject* p, int offset) {
334 return static_cast<byte>(NoBarrier_Load(
335 reinterpret_cast<Atomic8*>(FIELD_ADDR(p, offset))));
336 }
337
338 void Heap::nobarrier_write_byte_field(HeapObject* p,
339 int offset,
340 byte value) {
341 NoBarrier_Store(reinterpret_cast<Atomic8*>(FIELD_ADDR(p, offset)),
342 static_cast<Atomic8>(value));
343 }
344
345 #undef FIELD_ADDR
346
347
181 MaybeObject* Heap::AllocateRaw(int size_in_bytes, 348 MaybeObject* Heap::AllocateRaw(int size_in_bytes,
182 AllocationSpace space, 349 AllocationSpace space,
183 AllocationSpace retry_space) { 350 AllocationSpace retry_space) {
184 ASSERT(AllowHandleAllocation::IsAllowed()); 351 ASSERT(AllowHandleAllocation::IsAllowed());
185 ASSERT(AllowHeapAllocation::IsAllowed()); 352 ASSERT(AllowHeapAllocation::IsAllowed());
186 ASSERT(gc_state_ == NOT_IN_GC); 353 ASSERT(gc_state_ == NOT_IN_GC);
187 HeapProfiler* profiler = isolate_->heap_profiler(); 354 HeapProfiler* profiler = isolate_->heap_profiler();
188 #ifdef DEBUG 355 #ifdef DEBUG
189 if (FLAG_gc_interval >= 0 && 356 if (FLAG_gc_interval >= 0 &&
190 AllowAllocationFailure::IsAllowed(isolate_) && 357 AllowAllocationFailure::IsAllowed(isolate_) &&
(...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after
434 601
435 while (src_slot != end_slot) { 602 while (src_slot != end_slot) {
436 *dst_slot++ = *src_slot++; 603 *dst_slot++ = *src_slot++;
437 } 604 }
438 } else { 605 } else {
439 OS::MemMove(dst, src, static_cast<size_t>(byte_size)); 606 OS::MemMove(dst, src, static_cast<size_t>(byte_size));
440 } 607 }
441 } 608 }
442 609
443 610
444 void Heap::ScavengePointer(HeapObject** p) {
445 ScavengeObject(p, *p);
446 }
447
448
449 AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) { 611 AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
450 // Check if there is potentially a memento behind the object. If 612 // Check if there is potentially a memento behind the object. If
451 // the last word of the momento is on another page we return 613 // the last word of the momento is on another page we return
452 // immediately. 614 // immediately.
453 Address object_address = object->address(); 615 Address object_address = object->address();
454 Address memento_address = object_address + object->Size(); 616 Address memento_address = object_address + object->Size();
455 Address last_memento_word_address = memento_address + kPointerSize; 617 Address last_memento_word_address = memento_address + kPointerSize;
456 if (!NewSpacePage::OnSamePage(object_address, 618 if (!NewSpacePage::OnSamePage(object_address,
457 last_memento_word_address)) { 619 last_memento_word_address)) {
458 return NULL; 620 return NULL;
(...skipping 318 matching lines...) Expand 10 before | Expand all | Expand 10 after
777 939
778 940
779 double GCTracer::SizeOfHeapObjects() { 941 double GCTracer::SizeOfHeapObjects() {
780 return (static_cast<double>(heap_->SizeOfObjects())) / MB; 942 return (static_cast<double>(heap_->SizeOfObjects())) / MB;
781 } 943 }
782 944
783 945
784 } } // namespace v8::internal 946 } } // namespace v8::internal
785 947
786 #endif // V8_HEAP_INL_H_ 948 #endif // V8_HEAP_INL_H_
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/objects-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698