OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_UTILS_H_ | 5 #ifndef V8_UTILS_H_ |
6 #define V8_UTILS_H_ | 6 #define V8_UTILS_H_ |
7 | 7 |
8 #include <limits.h> | 8 #include <limits.h> |
9 #include <stdlib.h> | 9 #include <stdlib.h> |
10 #include <string.h> | 10 #include <string.h> |
(...skipping 419 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
430 | 430 |
431 // Initializes the codegen support that depends on CPU features. | 431 // Initializes the codegen support that depends on CPU features. |
432 void init_memcopy_functions(Isolate* isolate); | 432 void init_memcopy_functions(Isolate* isolate); |
433 | 433 |
434 #if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87) | 434 #if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87) |
435 // Limit below which the extra overhead of the MemCopy function is likely | 435 // Limit below which the extra overhead of the MemCopy function is likely |
436 // to outweigh the benefits of faster copying. | 436 // to outweigh the benefits of faster copying. |
437 const int kMinComplexMemCopy = 64; | 437 const int kMinComplexMemCopy = 64; |
438 | 438 |
439 // Copy memory area. No restrictions. | 439 // Copy memory area. No restrictions. |
440 V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size); | 440 void MemMove(void* dest, const void* src, size_t size); |
441 typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size); | 441 typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size); |
442 | 442 |
443 // Keep the distinction of "move" vs. "copy" for the benefit of other | 443 // Keep the distinction of "move" vs. "copy" for the benefit of other |
444 // architectures. | 444 // architectures. |
445 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { | 445 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { |
446 MemMove(dest, src, size); | 446 MemMove(dest, src, size); |
447 } | 447 } |
448 #elif defined(V8_HOST_ARCH_ARM) | 448 #elif defined(V8_HOST_ARCH_ARM) |
449 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src, | 449 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src, |
450 size_t size); | 450 size_t size); |
451 extern MemCopyUint8Function memcopy_uint8_function; | 451 extern MemCopyUint8Function memcopy_uint8_function; |
452 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, | 452 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, |
453 size_t chars) { | 453 size_t chars) { |
454 memcpy(dest, src, chars); | 454 memcpy(dest, src, chars); |
455 } | 455 } |
456 // For values < 16, the assembler function is slower than the inlined C code. | 456 // For values < 16, the assembler function is slower than the inlined C code. |
457 const int kMinComplexMemCopy = 16; | 457 const int kMinComplexMemCopy = 16; |
458 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { | 458 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { |
459 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), | 459 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), |
460 reinterpret_cast<const uint8_t*>(src), size); | 460 reinterpret_cast<const uint8_t*>(src), size); |
461 } | 461 } |
462 V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src, | 462 V8_INLINE void MemMove(void* dest, const void* src, size_t size) { |
463 size_t size) { | |
464 memmove(dest, src, size); | 463 memmove(dest, src, size); |
465 } | 464 } |
466 | 465 |
467 typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest, const uint8_t* src, | 466 typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest, const uint8_t* src, |
468 size_t size); | 467 size_t size); |
469 extern MemCopyUint16Uint8Function memcopy_uint16_uint8_function; | 468 extern MemCopyUint16Uint8Function memcopy_uint16_uint8_function; |
470 void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src, | 469 void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src, |
471 size_t chars); | 470 size_t chars); |
472 // For values < 12, the assembler function is slower than the inlined C code. | 471 // For values < 12, the assembler function is slower than the inlined C code. |
473 const int kMinComplexConvertMemCopy = 12; | 472 const int kMinComplexConvertMemCopy = 12; |
474 V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src, | 473 V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src, |
475 size_t size) { | 474 size_t size) { |
476 (*memcopy_uint16_uint8_function)(dest, src, size); | 475 (*memcopy_uint16_uint8_function)(dest, src, size); |
477 } | 476 } |
478 #elif defined(V8_HOST_ARCH_MIPS) | 477 #elif defined(V8_HOST_ARCH_MIPS) |
479 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src, | 478 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src, |
480 size_t size); | 479 size_t size); |
481 extern MemCopyUint8Function memcopy_uint8_function; | 480 extern MemCopyUint8Function memcopy_uint8_function; |
482 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, | 481 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, |
483 size_t chars) { | 482 size_t chars) { |
484 memcpy(dest, src, chars); | 483 memcpy(dest, src, chars); |
485 } | 484 } |
486 // For values < 16, the assembler function is slower than the inlined C code. | 485 // For values < 16, the assembler function is slower than the inlined C code. |
487 const int kMinComplexMemCopy = 16; | 486 const int kMinComplexMemCopy = 16; |
488 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { | 487 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { |
489 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), | 488 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), |
490 reinterpret_cast<const uint8_t*>(src), size); | 489 reinterpret_cast<const uint8_t*>(src), size); |
491 } | 490 } |
492 V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src, | 491 V8_INLINE void MemMove(void* dest, const void* src, size_t size) { |
493 size_t size) { | |
494 memmove(dest, src, size); | 492 memmove(dest, src, size); |
495 } | 493 } |
496 #else | 494 #else |
497 // Copy memory area to disjoint memory area. | 495 // Copy memory area to disjoint memory area. |
498 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { | 496 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { |
499 memcpy(dest, src, size); | 497 memcpy(dest, src, size); |
500 } | 498 } |
501 V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src, | 499 V8_INLINE void MemMove(void* dest, const void* src, size_t size) { |
502 size_t size) { | |
503 memmove(dest, src, size); | 500 memmove(dest, src, size); |
504 } | 501 } |
505 const int kMinComplexMemCopy = 16 * kPointerSize; | 502 const int kMinComplexMemCopy = 16 * kPointerSize; |
506 #endif // V8_TARGET_ARCH_IA32 | 503 #endif // V8_TARGET_ARCH_IA32 |
507 | 504 |
508 | 505 |
509 // ---------------------------------------------------------------------------- | 506 // ---------------------------------------------------------------------------- |
510 // Miscellaneous | 507 // Miscellaneous |
511 | 508 |
512 // A static resource holds a static instance that can be reserved in | 509 // A static resource holds a static instance that can be reserved in |
(...skipping 1081 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1594 byte* dst = reinterpret_cast<byte*>(p); | 1591 byte* dst = reinterpret_cast<byte*>(p); |
1595 for (size_t i = 0; i < sizeof(V); i++) { | 1592 for (size_t i = 0; i < sizeof(V); i++) { |
1596 dst[i] = src[sizeof(V) - i - 1]; | 1593 dst[i] = src[sizeof(V) - i - 1]; |
1597 } | 1594 } |
1598 #endif // V8_TARGET_LITTLE_ENDIAN | 1595 #endif // V8_TARGET_LITTLE_ENDIAN |
1599 } | 1596 } |
1600 } // namespace internal | 1597 } // namespace internal |
1601 } // namespace v8 | 1598 } // namespace v8 |
1602 | 1599 |
1603 #endif // V8_UTILS_H_ | 1600 #endif // V8_UTILS_H_ |
OLD | NEW |