| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_UTILS_H_ | 5 #ifndef V8_UTILS_H_ |
| 6 #define V8_UTILS_H_ | 6 #define V8_UTILS_H_ |
| 7 | 7 |
| 8 #include <limits.h> | 8 #include <limits.h> |
| 9 #include <stdlib.h> | 9 #include <stdlib.h> |
| 10 #include <string.h> | 10 #include <string.h> |
| (...skipping 419 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 430 | 430 |
| 431 // Initializes the codegen support that depends on CPU features. | 431 // Initializes the codegen support that depends on CPU features. |
| 432 void init_memcopy_functions(Isolate* isolate); | 432 void init_memcopy_functions(Isolate* isolate); |
| 433 | 433 |
| 434 #if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87) | 434 #if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87) |
| 435 // Limit below which the extra overhead of the MemCopy function is likely | 435 // Limit below which the extra overhead of the MemCopy function is likely |
| 436 // to outweigh the benefits of faster copying. | 436 // to outweigh the benefits of faster copying. |
| 437 const int kMinComplexMemCopy = 64; | 437 const int kMinComplexMemCopy = 64; |
| 438 | 438 |
| 439 // Copy memory area. No restrictions. | 439 // Copy memory area. No restrictions. |
| 440 void MemMove(void* dest, const void* src, size_t size); | 440 V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size); |
| 441 typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size); | 441 typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size); |
| 442 | 442 |
| 443 // Keep the distinction of "move" vs. "copy" for the benefit of other | 443 // Keep the distinction of "move" vs. "copy" for the benefit of other |
| 444 // architectures. | 444 // architectures. |
| 445 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { | 445 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { |
| 446 MemMove(dest, src, size); | 446 MemMove(dest, src, size); |
| 447 } | 447 } |
| 448 #elif defined(V8_HOST_ARCH_ARM) | 448 #elif defined(V8_HOST_ARCH_ARM) |
| 449 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src, | 449 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src, |
| 450 size_t size); | 450 size_t size); |
| 451 extern MemCopyUint8Function memcopy_uint8_function; | 451 V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function; |
| 452 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, | 452 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, |
| 453 size_t chars) { | 453 size_t chars) { |
| 454 memcpy(dest, src, chars); | 454 memcpy(dest, src, chars); |
| 455 } | 455 } |
| 456 // For values < 16, the assembler function is slower than the inlined C code. | 456 // For values < 16, the assembler function is slower than the inlined C code. |
| 457 const int kMinComplexMemCopy = 16; | 457 const int kMinComplexMemCopy = 16; |
| 458 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { | 458 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { |
| 459 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), | 459 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), |
| 460 reinterpret_cast<const uint8_t*>(src), size); | 460 reinterpret_cast<const uint8_t*>(src), size); |
| 461 } | 461 } |
| 462 V8_INLINE void MemMove(void* dest, const void* src, size_t size) { | 462 V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src, |
| 463 size_t size) { |
| 463 memmove(dest, src, size); | 464 memmove(dest, src, size); |
| 464 } | 465 } |
| 465 | 466 |
| 466 typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest, const uint8_t* src, | 467 typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest, const uint8_t* src, |
| 467 size_t size); | 468 size_t size); |
| 468 extern MemCopyUint16Uint8Function memcopy_uint16_uint8_function; | 469 extern MemCopyUint16Uint8Function memcopy_uint16_uint8_function; |
| 469 void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src, | 470 void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src, |
| 470 size_t chars); | 471 size_t chars); |
| 471 // For values < 12, the assembler function is slower than the inlined C code. | 472 // For values < 12, the assembler function is slower than the inlined C code. |
| 472 const int kMinComplexConvertMemCopy = 12; | 473 const int kMinComplexConvertMemCopy = 12; |
| 473 V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src, | 474 V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src, |
| 474 size_t size) { | 475 size_t size) { |
| 475 (*memcopy_uint16_uint8_function)(dest, src, size); | 476 (*memcopy_uint16_uint8_function)(dest, src, size); |
| 476 } | 477 } |
| 477 #elif defined(V8_HOST_ARCH_MIPS) | 478 #elif defined(V8_HOST_ARCH_MIPS) |
| 478 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src, | 479 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src, |
| 479 size_t size); | 480 size_t size); |
| 480 extern MemCopyUint8Function memcopy_uint8_function; | 481 V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function; |
| 481 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, | 482 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, |
| 482 size_t chars) { | 483 size_t chars) { |
| 483 memcpy(dest, src, chars); | 484 memcpy(dest, src, chars); |
| 484 } | 485 } |
| 485 // For values < 16, the assembler function is slower than the inlined C code. | 486 // For values < 16, the assembler function is slower than the inlined C code. |
| 486 const int kMinComplexMemCopy = 16; | 487 const int kMinComplexMemCopy = 16; |
| 487 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { | 488 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { |
| 488 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), | 489 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), |
| 489 reinterpret_cast<const uint8_t*>(src), size); | 490 reinterpret_cast<const uint8_t*>(src), size); |
| 490 } | 491 } |
| 491 V8_INLINE void MemMove(void* dest, const void* src, size_t size) { | 492 V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src, |
| 493 size_t size) { |
| 492 memmove(dest, src, size); | 494 memmove(dest, src, size); |
| 493 } | 495 } |
| 494 #else | 496 #else |
| 495 // Copy memory area to disjoint memory area. | 497 // Copy memory area to disjoint memory area. |
| 496 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { | 498 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { |
| 497 memcpy(dest, src, size); | 499 memcpy(dest, src, size); |
| 498 } | 500 } |
| 499 V8_INLINE void MemMove(void* dest, const void* src, size_t size) { | 501 V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src, |
| 502 size_t size) { |
| 500 memmove(dest, src, size); | 503 memmove(dest, src, size); |
| 501 } | 504 } |
| 502 const int kMinComplexMemCopy = 16 * kPointerSize; | 505 const int kMinComplexMemCopy = 16 * kPointerSize; |
| 503 #endif // V8_TARGET_ARCH_IA32 | 506 #endif // V8_TARGET_ARCH_IA32 |
| 504 | 507 |
| 505 | 508 |
| 506 // ---------------------------------------------------------------------------- | 509 // ---------------------------------------------------------------------------- |
| 507 // Miscellaneous | 510 // Miscellaneous |
| 508 | 511 |
| 509 // A static resource holds a static instance that can be reserved in | 512 // A static resource holds a static instance that can be reserved in |
| (...skipping 1081 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1591 byte* dst = reinterpret_cast<byte*>(p); | 1594 byte* dst = reinterpret_cast<byte*>(p); |
| 1592 for (size_t i = 0; i < sizeof(V); i++) { | 1595 for (size_t i = 0; i < sizeof(V); i++) { |
| 1593 dst[i] = src[sizeof(V) - i - 1]; | 1596 dst[i] = src[sizeof(V) - i - 1]; |
| 1594 } | 1597 } |
| 1595 #endif // V8_TARGET_LITTLE_ENDIAN | 1598 #endif // V8_TARGET_LITTLE_ENDIAN |
| 1596 } | 1599 } |
| 1597 } // namespace internal | 1600 } // namespace internal |
| 1598 } // namespace v8 | 1601 } // namespace v8 |
| 1599 | 1602 |
| 1600 #endif // V8_UTILS_H_ | 1603 #endif // V8_UTILS_H_ |
| OLD | NEW |