Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: src/common/memory.h

Issue 1688743002: Switch the Linux minidump writer to use MDCVInfoELF for CV data. (Closed) Base URL: https://chromium.googlesource.com/breakpad/breakpad.git@master
Patch Set: Rework to handle arbitrary size build ids Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/common/linux/file_id_unittest.cc ('k') | src/common/memory_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2009, Google Inc. 1 // Copyright (c) 2009, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
57 // Thus, it can be used even when the heap may be corrupted. 57 // Thus, it can be used even when the heap may be corrupted.
58 // 58 //
59 // There is no free operation. The pages are only freed when the object is 59 // There is no free operation. The pages are only freed when the object is
60 // destroyed. 60 // destroyed.
61 class PageAllocator { 61 class PageAllocator {
62 public: 62 public:
63 PageAllocator() 63 PageAllocator()
64 : page_size_(getpagesize()), 64 : page_size_(getpagesize()),
65 last_(NULL), 65 last_(NULL),
66 current_page_(NULL), 66 current_page_(NULL),
67 page_offset_(0) { 67 page_offset_(0),
68 pages_allocated_(0) {
68 } 69 }
69 70
70 ~PageAllocator() { 71 ~PageAllocator() {
71 FreeAll(); 72 FreeAll();
72 } 73 }
73 74
74 void *Alloc(size_t bytes) { 75 void *Alloc(size_t bytes) {
75 if (!bytes) 76 if (!bytes)
76 return NULL; 77 return NULL;
77 78
(...skipping 27 matching lines...) Expand all
105 bool OwnsPointer(const void* p) { 106 bool OwnsPointer(const void* p) {
106 for (PageHeader* header = last_; header; header = header->next) { 107 for (PageHeader* header = last_; header; header = header->next) {
107 const char* current = reinterpret_cast<char*>(header); 108 const char* current = reinterpret_cast<char*>(header);
108 if ((p >= current) && (p < current + header->num_pages * page_size_)) 109 if ((p >= current) && (p < current + header->num_pages * page_size_))
109 return true; 110 return true;
110 } 111 }
111 112
112 return false; 113 return false;
113 } 114 }
114 115
116 unsigned long pages_allocated() { return pages_allocated_; }
117
115 private: 118 private:
116 uint8_t *GetNPages(size_t num_pages) { 119 uint8_t *GetNPages(size_t num_pages) {
117 #if defined(__x86_64__) || defined(__aarch64__) || defined(__aarch64__) || \ 120 #if defined(__x86_64__) || defined(__aarch64__) || defined(__aarch64__) || \
118 ((defined(__mips__) && _MIPS_SIM == _ABI64)) 121 ((defined(__mips__) && _MIPS_SIM == _ABI64))
119 void *a = sys_mmap(NULL, page_size_ * num_pages, PROT_READ | PROT_WRITE, 122 void *a = sys_mmap(NULL, page_size_ * num_pages, PROT_READ | PROT_WRITE,
120 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 123 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
121 #else 124 #else
122 void *a = sys_mmap2(NULL, page_size_ * num_pages, PROT_READ | PROT_WRITE, 125 void *a = sys_mmap2(NULL, page_size_ * num_pages, PROT_READ | PROT_WRITE,
123 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 126 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
124 #endif 127 #endif
125 if (a == MAP_FAILED) 128 if (a == MAP_FAILED)
126 return NULL; 129 return NULL;
127 130
128 #if defined(MEMORY_SANITIZER) 131 #if defined(MEMORY_SANITIZER)
129 // We need to indicate to MSan that memory allocated through sys_mmap is 132 // We need to indicate to MSan that memory allocated through sys_mmap is
130 // initialized, since linux_syscall_support.h doesn't have MSan hooks. 133 // initialized, since linux_syscall_support.h doesn't have MSan hooks.
131 __msan_unpoison(a, page_size_ * num_pages); 134 __msan_unpoison(a, page_size_ * num_pages);
132 #endif 135 #endif
133 136
134 struct PageHeader *header = reinterpret_cast<PageHeader*>(a); 137 struct PageHeader *header = reinterpret_cast<PageHeader*>(a);
135 header->next = last_; 138 header->next = last_;
136 header->num_pages = num_pages; 139 header->num_pages = num_pages;
137 last_ = header; 140 last_ = header;
138 141
142 pages_allocated_ += num_pages;
143
139 return reinterpret_cast<uint8_t*>(a); 144 return reinterpret_cast<uint8_t*>(a);
140 } 145 }
141 146
142 void FreeAll() { 147 void FreeAll() {
143 PageHeader *next; 148 PageHeader *next;
144 149
145 for (PageHeader *cur = last_; cur; cur = next) { 150 for (PageHeader *cur = last_; cur; cur = next) {
146 next = cur->next; 151 next = cur->next;
147 sys_munmap(cur, cur->num_pages * page_size_); 152 sys_munmap(cur, cur->num_pages * page_size_);
148 } 153 }
149 } 154 }
150 155
151 struct PageHeader { 156 struct PageHeader {
152 PageHeader *next; // pointer to the start of the next set of pages. 157 PageHeader *next; // pointer to the start of the next set of pages.
153 size_t num_pages; // the number of pages in this set. 158 size_t num_pages; // the number of pages in this set.
154 }; 159 };
155 160
156 const size_t page_size_; 161 const size_t page_size_;
157 PageHeader *last_; 162 PageHeader *last_;
158 uint8_t *current_page_; 163 uint8_t *current_page_;
159 size_t page_offset_; 164 size_t page_offset_;
165 unsigned long pages_allocated_;
160 }; 166 };
161 167
162 // Wrapper to use with STL containers 168 // Wrapper to use with STL containers
163 template <typename T> 169 template <typename T>
164 struct PageStdAllocator : public std::allocator<T> { 170 struct PageStdAllocator : public std::allocator<T> {
165 typedef typename std::allocator<T>::pointer pointer; 171 typedef typename std::allocator<T>::pointer pointer;
166 typedef typename std::allocator<T>::size_type size_type; 172 typedef typename std::allocator<T>::size_type size_type;
167 173
168 explicit PageStdAllocator(PageAllocator& allocator): allocator_(allocator) {} 174 explicit PageStdAllocator(PageAllocator& allocator) : allocator_(allocator),
175 stackdata_(NULL),
176 stackdata_size_(0)
177 {}
178
169 template <class Other> PageStdAllocator(const PageStdAllocator<Other>& other) 179 template <class Other> PageStdAllocator(const PageStdAllocator<Other>& other)
170 : allocator_(other.allocator_) {} 180 : allocator_(other.allocator_),
181 stackdata_(nullptr),
182 stackdata_size_(0)
183 {}
184
185 explicit PageStdAllocator(PageAllocator& allocator,
186 pointer stackdata,
187 size_type stackdata_size) : allocator_(allocator),
188 stackdata_(stackdata),
189 stackdata_size_(stackdata_size)
190 {}
171 191
172 inline pointer allocate(size_type n, const void* = 0) { 192 inline pointer allocate(size_type n, const void* = 0) {
173 return static_cast<pointer>(allocator_.Alloc(sizeof(T) * n)); 193 const size_type size = sizeof(T) * n;
194 if (size <= stackdata_size_) {
195 return stackdata_;
196 }
197 return static_cast<pointer>(allocator_.Alloc(size));
174 } 198 }
175 199
176 inline void deallocate(pointer, size_type) { 200 inline void deallocate(pointer, size_type) {
177 // The PageAllocator doesn't free. 201 // The PageAllocator doesn't free.
178 } 202 }
179 203
180 template <typename U> struct rebind { 204 template <typename U> struct rebind {
181 typedef PageStdAllocator<U> other; 205 typedef PageStdAllocator<U> other;
182 }; 206 };
183 207
184 private: 208 private:
185 // Silly workaround for the gcc from Android's ndk (gcc 4.6), which will 209 // Silly workaround for the gcc from Android's ndk (gcc 4.6), which will
186 // otherwise complain that `other.allocator_` is private in the constructor 210 // otherwise complain that `other.allocator_` is private in the constructor
187 // code. 211 // code.
188 template<typename Other> friend struct PageStdAllocator; 212 template<typename Other> friend struct PageStdAllocator;
189 213
190 PageAllocator& allocator_; 214 PageAllocator& allocator_;
215 pointer stackdata_;
216 size_type stackdata_size_;
191 }; 217 };
192 218
193 // A wasteful vector is a std::vector, except that it allocates memory from a 219 // A wasteful vector is a std::vector, except that it allocates memory from a
194 // PageAllocator. It's wasteful because, when resizing, it always allocates a 220 // PageAllocator. It's wasteful because, when resizing, it always allocates a
195 // whole new array since the PageAllocator doesn't support realloc. 221 // whole new array since the PageAllocator doesn't support realloc.
196 template<class T> 222 template<class T>
197 class wasteful_vector : public std::vector<T, PageStdAllocator<T> > { 223 class wasteful_vector : public std::vector<T, PageStdAllocator<T> > {
198 public: 224 public:
199 wasteful_vector(PageAllocator* allocator, unsigned size_hint = 16) 225 wasteful_vector(PageAllocator* allocator, unsigned size_hint = 16)
200 : std::vector<T, PageStdAllocator<T> >(PageStdAllocator<T>(*allocator)) { 226 : std::vector<T, PageStdAllocator<T> >(PageStdAllocator<T>(*allocator)) {
201 std::vector<T, PageStdAllocator<T> >::reserve(size_hint); 227 std::vector<T, PageStdAllocator<T> >::reserve(size_hint);
202 } 228 }
229 protected:
230 wasteful_vector(PageStdAllocator<T> allocator)
231 : std::vector<T, PageStdAllocator<T> >(allocator) {}
232 };
233
234 // auto_wasteful_vector allocates space on the stack for N entries to avoid
235 // using the PageAllocator for small data, while still allowing for larger data.
236 template<class T, unsigned int N>
237 class auto_wasteful_vector : public wasteful_vector<T> {
238 T stackdata_[N];
239 public:
240 auto_wasteful_vector(PageAllocator* allocator)
241 : wasteful_vector<T>(
242 PageStdAllocator<T>(*allocator,
243 &stackdata_[0],
244 sizeof(stackdata_))) {
245 std::vector<T, PageStdAllocator<T> >::reserve(N);
246 }
203 }; 247 };
204 248
205 } // namespace google_breakpad 249 } // namespace google_breakpad
206 250
207 inline void* operator new(size_t nbytes, 251 inline void* operator new(size_t nbytes,
208 google_breakpad::PageAllocator& allocator) { 252 google_breakpad::PageAllocator& allocator) {
209 return allocator.Alloc(nbytes); 253 return allocator.Alloc(nbytes);
210 } 254 }
211 255
212 #endif // GOOGLE_BREAKPAD_COMMON_MEMORY_H_ 256 #endif // GOOGLE_BREAKPAD_COMMON_MEMORY_H_
OLDNEW
« no previous file with comments | « src/common/linux/file_id_unittest.cc ('k') | src/common/memory_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698