Chromium Code Reviews| Index: src/heap.h |
| diff --git a/src/heap.h b/src/heap.h |
| index 77c0984fa23e69c0f3262e3362015f5701900fb6..c69545f34fa52ed92a9040de0f6606ba27d5e5b9 100644 |
| --- a/src/heap.h |
| +++ b/src/heap.h |
| @@ -682,6 +682,68 @@ class Heap { |
| return old_data_space_->allocation_limit_address(); |
| } |
| + // Heap access methods. |
| + static inline Object* read_field(HeapObject* p, int offset); |
|
Hannes Payer (out of office)
2014/04/30 07:47:25
Our INLINE macro may force inlining of these funct
|
| + static inline intptr_t read_intptr_field(HeapObject* p, int offset); |
| + static inline int read_int_field(HeapObject* p, int offset); |
| + static inline int32_t read_int32_field(HeapObject* p, int offset); |
| + static inline uint32_t read_uint32_field(HeapObject* p, int offset); |
| + static inline int64_t read_int64_field(HeapObject* p, int offset); |
| + static inline int16_t read_short_field(HeapObject* p, int offset); |
| + static inline byte read_byte_field(HeapObject* p, int offset); |
| + static inline double read_double_field(HeapObject* p, int offset); |
| + |
| + |
| + static inline void write_intptr_field(HeapObject* p, |
| + int offset, |
| + intptr_t value); |
| + static inline void write_int_field(HeapObject* p, |
| + int offset, |
| + int value); |
| + static inline void write_int32_field(HeapObject* p, |
| + int offset, |
| + int32_t value); |
| + static inline void write_uint32_field(HeapObject* p, |
| + int offset, |
| + uint32_t value); |
| + static inline void write_int64_field(HeapObject* p, |
| + int offset, |
| + int64_t value); |
| + static inline void write_short_field(HeapObject* p, |
| + int offset, |
| + int16_t value); |
| + static inline void write_byte_field(HeapObject* p, |
| + int offset, |
| + byte value); |
| + static inline void write_double_field(HeapObject* p, |
| + int offset, |
| + double value); |
| + |
| + // TODO(jarin) expose the barrier reason here |
| + static inline void write_field(HeapObject* p, |
| + int offset, |
| + Object* value, |
| + WriteBarrierMode mode); |
| + |
| + // TODO(jarin) replace this with variants that do not expose |
| + // internal pointers. |
| + static inline Address get_field_address(HeapObject* p, int offset); |
| + |
| + // TODO(jarin) synchronized flavors of the field accesses should be |
| + // wrapped so that we do not expose the sync. |
| + static inline Object* acquire_read_field(HeapObject* p, int offset); |
| + static inline Object* nobarrier_read_field(HeapObject* p, int offset); |
| + static inline void release_write_field(HeapObject* p, |
| + int offset, |
| + Object* value); |
| + static inline void nobarrier_write_field(HeapObject* p, |
| + int offset, |
| + Object* value); |
| + static inline byte nobarrier_read_byte_field(HeapObject* p, int offset); |
| + static inline void nobarrier_write_byte_field(HeapObject* p, |
| + int offset, |
| + byte value); |
| + |
| // Returns a deep copy of the JavaScript object. |
| // Properties and elements are copied too. |
| // Returns failure if allocation failed. |
| @@ -1015,7 +1077,6 @@ class Heap { |
| // necessary, the object might be promoted to an old space. The caller must |
| // ensure the precondition that the object is (a) a heap object and (b) in |
| // the heap's from space. |
| - static inline void ScavengePointer(HeapObject** p); |
| static inline void ScavengeObject(HeapObject** p, HeapObject* object); |
| enum ScratchpadSlotMode { |