OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // Platform-specific code for FreeBSD goes here. For the POSIX-compatible | 5 // Platform-specific code for FreeBSD goes here. For the POSIX-compatible |
6 // parts, the implementation is in platform-posix.cc. | 6 // parts, the implementation is in platform-posix.cc. |
7 | 7 |
8 #include <pthread.h> | 8 #include <pthread.h> |
9 #include <semaphore.h> | 9 #include <semaphore.h> |
10 #include <signal.h> | 10 #include <signal.h> |
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
181 | 181 |
182 | 182 |
183 VirtualMemory::VirtualMemory(size_t size, size_t alignment) | 183 VirtualMemory::VirtualMemory(size_t size, size_t alignment) |
184 : address_(NULL), size_(0) { | 184 : address_(NULL), size_(0) { |
185 DCHECK((alignment % OS::AllocateAlignment()) == 0); | 185 DCHECK((alignment % OS::AllocateAlignment()) == 0); |
186 size_t request_size = RoundUp(size + alignment, | 186 size_t request_size = RoundUp(size + alignment, |
187 static_cast<intptr_t>(OS::AllocateAlignment())); | 187 static_cast<intptr_t>(OS::AllocateAlignment())); |
188 void* reservation = mmap(OS::GetRandomMmapAddr(), | 188 void* reservation = mmap(OS::GetRandomMmapAddr(), |
189 request_size, | 189 request_size, |
190 PROT_NONE, | 190 PROT_NONE, |
191 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, | 191 MAP_PRIVATE | MAP_ANON, |
192 kMmapFd, | 192 kMmapFd, |
193 kMmapFdOffset); | 193 kMmapFdOffset); |
194 if (reservation == MAP_FAILED) return; | 194 if (reservation == MAP_FAILED) return; |
195 | 195 |
196 uint8_t* base = static_cast<uint8_t*>(reservation); | 196 uint8_t* base = static_cast<uint8_t*>(reservation); |
197 uint8_t* aligned_base = RoundUp(base, alignment); | 197 uint8_t* aligned_base = RoundUp(base, alignment); |
198 DCHECK_LE(base, aligned_base); | 198 DCHECK_LE(base, aligned_base); |
199 | 199 |
200 // Unmap extra memory reserved before and after the desired block. | 200 // Unmap extra memory reserved before and after the desired block. |
201 if (aligned_base != base) { | 201 if (aligned_base != base) { |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
253 bool VirtualMemory::Guard(void* address) { | 253 bool VirtualMemory::Guard(void* address) { |
254 OS::Guard(address, OS::CommitPageSize()); | 254 OS::Guard(address, OS::CommitPageSize()); |
255 return true; | 255 return true; |
256 } | 256 } |
257 | 257 |
258 | 258 |
259 void* VirtualMemory::ReserveRegion(size_t size) { | 259 void* VirtualMemory::ReserveRegion(size_t size) { |
260 void* result = mmap(OS::GetRandomMmapAddr(), | 260 void* result = mmap(OS::GetRandomMmapAddr(), |
261 size, | 261 size, |
262 PROT_NONE, | 262 PROT_NONE, |
263 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, | 263 MAP_PRIVATE | MAP_ANON, |
264 kMmapFd, | 264 kMmapFd, |
265 kMmapFdOffset); | 265 kMmapFdOffset); |
266 | 266 |
267 if (result == MAP_FAILED) return NULL; | 267 if (result == MAP_FAILED) return NULL; |
268 | 268 |
269 return result; | 269 return result; |
270 } | 270 } |
271 | 271 |
272 | 272 |
273 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { | 273 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { |
274 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); | 274 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); |
275 if (MAP_FAILED == mmap(base, | 275 if (MAP_FAILED == mmap(base, |
276 size, | 276 size, |
277 prot, | 277 prot, |
278 MAP_PRIVATE | MAP_ANON | MAP_FIXED, | 278 MAP_PRIVATE | MAP_ANON | MAP_FIXED, |
279 kMmapFd, | 279 kMmapFd, |
280 kMmapFdOffset)) { | 280 kMmapFdOffset)) { |
281 return false; | 281 return false; |
282 } | 282 } |
283 return true; | 283 return true; |
284 } | 284 } |
285 | 285 |
286 | 286 |
287 bool VirtualMemory::UncommitRegion(void* base, size_t size) { | 287 bool VirtualMemory::UncommitRegion(void* base, size_t size) { |
288 return mmap(base, | 288 return mmap(base, |
289 size, | 289 size, |
290 PROT_NONE, | 290 PROT_NONE, |
291 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, | 291 MAP_PRIVATE | MAP_ANON | MAP_FIXED, |
292 kMmapFd, | 292 kMmapFd, |
293 kMmapFdOffset) != MAP_FAILED; | 293 kMmapFdOffset) != MAP_FAILED; |
294 } | 294 } |
295 | 295 |
296 | 296 |
297 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { | 297 bool VirtualMemory::ReleaseRegion(void* base, size_t size) { |
298 return munmap(base, size) == 0; | 298 return munmap(base, size) == 0; |
299 } | 299 } |
300 | 300 |
301 | 301 |
302 bool VirtualMemory::HasLazyCommits() { | 302 bool VirtualMemory::HasLazyCommits() { |
303 // TODO(alph): implement for the platform. | 303 // TODO(alph): implement for the platform. |
304 return false; | 304 return false; |
305 } | 305 } |
306 | 306 |
307 } } // namespace v8::base | 307 } } // namespace v8::base |
OLD | NEW |