| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_UTILS_H_ | 5 #ifndef V8_UTILS_H_ |
| 6 #define V8_UTILS_H_ | 6 #define V8_UTILS_H_ |
| 7 | 7 |
| 8 #include <limits.h> | 8 #include <limits.h> |
| 9 #include <stdlib.h> | 9 #include <stdlib.h> |
| 10 #include <string.h> | 10 #include <string.h> |
| (...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 315 | 315 |
| 316 | 316 |
| 317 inline uint32_t ComputePointerHash(void* ptr) { | 317 inline uint32_t ComputePointerHash(void* ptr) { |
| 318 return ComputeIntegerHash( | 318 return ComputeIntegerHash( |
| 319 static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)), | 319 static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)), |
| 320 v8::internal::kZeroHashSeed); | 320 v8::internal::kZeroHashSeed); |
| 321 } | 321 } |
| 322 | 322 |
| 323 | 323 |
| 324 // ---------------------------------------------------------------------------- | 324 // ---------------------------------------------------------------------------- |
| 325 // Generated memcpy/memmove | |
| 326 | |
| 327 // Initializes the codegen support that depends on CPU features. This is | |
| 328 // called after CPU initialization. | |
| 329 void init_memcopy_functions(); | |
| 330 | |
| 331 #if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87) | |
| 332 // Limit below which the extra overhead of the MemCopy function is likely | |
| 333 // to outweigh the benefits of faster copying. | |
| 334 const int kMinComplexMemCopy = 64; | |
| 335 | |
| 336 // Copy memory area. No restrictions. | |
| 337 void MemMove(void* dest, const void* src, size_t size); | |
| 338 typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size); | |
| 339 | |
| 340 // Keep the distinction of "move" vs. "copy" for the benefit of other | |
| 341 // architectures. | |
| 342 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { | |
| 343 MemMove(dest, src, size); | |
| 344 } | |
| 345 #elif defined(V8_HOST_ARCH_ARM) | |
| 346 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src, | |
| 347 size_t size); | |
| 348 extern MemCopyUint8Function memcopy_uint8_function; | |
| 349 void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, size_t chars) { | |
| 350 memcpy(dest, src, chars); | |
| 351 } | |
| 352 // For values < 16, the assembler function is slower than the inlined C code. | |
| 353 const int kMinComplexMemCopy = 16; | |
| 354 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { | |
| 355 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), | |
| 356 reinterpret_cast<const uint8_t*>(src), size); | |
| 357 } | |
| 358 V8_INLINE void MemMove(void* dest, const void* src, size_t size) { | |
| 359 memmove(dest, src, size); | |
| 360 } | |
| 361 | |
| 362 typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest, const uint8_t* src, | |
| 363 size_t size); | |
| 364 extern MemCopyUint16Uint8Function memcopy_uint16_uint8_function; | |
| 365 void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src, | |
| 366 size_t chars); | |
| 367 // For values < 12, the assembler function is slower than the inlined C code. | |
| 368 const int kMinComplexConvertMemCopy = 12; | |
| 369 V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src, | |
| 370 size_t size) { | |
| 371 (*memcopy_uint16_uint8_function)(dest, src, size); | |
| 372 } | |
| 373 #elif defined(V8_HOST_ARCH_MIPS) | |
| 374 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src, | |
| 375 size_t size); | |
| 376 extern MemCopyUint8Function memcopy_uint8_function; | |
| 377 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, | |
| 378 size_t chars) { | |
| 379 memcpy(dest, src, chars); | |
| 380 } | |
| 381 // For values < 16, the assembler function is slower than the inlined C code. | |
| 382 const int kMinComplexMemCopy = 16; | |
| 383 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { | |
| 384 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), | |
| 385 reinterpret_cast<const uint8_t*>(src), size); | |
| 386 } | |
| 387 V8_INLINE void MemMove(void* dest, const void* src, size_t size) { | |
| 388 memmove(dest, src, size); | |
| 389 } | |
| 390 #else | |
| 391 // Copy memory area to disjoint memory area. | |
| 392 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { | |
| 393 memcpy(dest, src, size); | |
| 394 } | |
| 395 V8_INLINE void MemMove(void* dest, const void* src, size_t size) { | |
| 396 memmove(dest, src, size); | |
| 397 } | |
| 398 const int kMinComplexMemCopy = 16 * kPointerSize; | |
| 399 #endif // V8_TARGET_ARCH_IA32 | |
| 400 | |
| 401 | |
| 402 // ---------------------------------------------------------------------------- | |
| 403 // Miscellaneous | 325 // Miscellaneous |
| 404 | 326 |
| 405 // A static resource holds a static instance that can be reserved in | 327 // A static resource holds a static instance that can be reserved in |
| 406 // a local scope using an instance of Access. Attempts to re-reserve | 328 // a local scope using an instance of Access. Attempts to re-reserve |
| 407 // the instance will cause an error. | 329 // the instance will cause an error. |
| 408 template <typename T> | 330 template <typename T> |
| 409 class StaticResource { | 331 class StaticResource { |
| 410 public: | 332 public: |
| 411 StaticResource() : is_reserved_(false) {} | 333 StaticResource() : is_reserved_(false) {} |
| 412 | 334 |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 473 | 395 |
| 474 explicit EmbeddedVector(T initial_value) : Vector<T>(buffer_, kSize) { | 396 explicit EmbeddedVector(T initial_value) : Vector<T>(buffer_, kSize) { |
| 475 for (int i = 0; i < kSize; ++i) { | 397 for (int i = 0; i < kSize; ++i) { |
| 476 buffer_[i] = initial_value; | 398 buffer_[i] = initial_value; |
| 477 } | 399 } |
| 478 } | 400 } |
| 479 | 401 |
| 480 // When copying, make underlying Vector to reference our buffer. | 402 // When copying, make underlying Vector to reference our buffer. |
| 481 EmbeddedVector(const EmbeddedVector& rhs) | 403 EmbeddedVector(const EmbeddedVector& rhs) |
| 482 : Vector<T>(rhs) { | 404 : Vector<T>(rhs) { |
| 483 MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize); | 405 OS::MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize); |
| 484 set_start(buffer_); | 406 set_start(buffer_); |
| 485 } | 407 } |
| 486 | 408 |
| 487 EmbeddedVector& operator=(const EmbeddedVector& rhs) { | 409 EmbeddedVector& operator=(const EmbeddedVector& rhs) { |
| 488 if (this == &rhs) return *this; | 410 if (this == &rhs) return *this; |
| 489 Vector<T>::operator=(rhs); | 411 Vector<T>::operator=(rhs); |
| 490 MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize); | 412 OS::MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize); |
| 491 this->set_start(buffer_); | 413 this->set_start(buffer_); |
| 492 return *this; | 414 return *this; |
| 493 } | 415 } |
| 494 | 416 |
| 495 private: | 417 private: |
| 496 T buffer_[kSize]; | 418 T buffer_[kSize]; |
| 497 }; | 419 }; |
| 498 | 420 |
| 499 | 421 |
| 500 /* | 422 /* |
| (...skipping 710 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1211 template <typename T> | 1133 template <typename T> |
| 1212 inline void CopyWords(T* dst, const T* src, size_t num_words) { | 1134 inline void CopyWords(T* dst, const T* src, size_t num_words) { |
| 1213 STATIC_ASSERT(sizeof(T) == kPointerSize); | 1135 STATIC_ASSERT(sizeof(T) == kPointerSize); |
| 1214 // TODO(mvstanton): disabled because mac builds are bogus failing on this | 1136 // TODO(mvstanton): disabled because mac builds are bogus failing on this |
| 1215 // assert. They are doing a signed comparison. Investigate in | 1137 // assert. They are doing a signed comparison. Investigate in |
| 1216 // the morning. | 1138 // the morning. |
| 1217 // ASSERT(Min(dst, const_cast<T*>(src)) + num_words <= | 1139 // ASSERT(Min(dst, const_cast<T*>(src)) + num_words <= |
| 1218 // Max(dst, const_cast<T*>(src))); | 1140 // Max(dst, const_cast<T*>(src))); |
| 1219 ASSERT(num_words > 0); | 1141 ASSERT(num_words > 0); |
| 1220 | 1142 |
| 1221 // Use block copying MemCopy if the segment we're copying is | 1143 // Use block copying OS::MemCopy if the segment we're copying is |
| 1222 // enough to justify the extra call/setup overhead. | 1144 // enough to justify the extra call/setup overhead. |
| 1223 static const size_t kBlockCopyLimit = 16; | 1145 static const size_t kBlockCopyLimit = 16; |
| 1224 | 1146 |
| 1225 if (num_words < kBlockCopyLimit) { | 1147 if (num_words < kBlockCopyLimit) { |
| 1226 do { | 1148 do { |
| 1227 num_words--; | 1149 num_words--; |
| 1228 *dst++ = *src++; | 1150 *dst++ = *src++; |
| 1229 } while (num_words > 0); | 1151 } while (num_words > 0); |
| 1230 } else { | 1152 } else { |
| 1231 MemCopy(dst, src, num_words * kPointerSize); | 1153 OS::MemCopy(dst, src, num_words * kPointerSize); |
| 1232 } | 1154 } |
| 1233 } | 1155 } |
| 1234 | 1156 |
| 1235 | 1157 |
| 1236 // Copies words from |src| to |dst|. No restrictions. | 1158 // Copies words from |src| to |dst|. No restrictions. |
| 1237 template <typename T> | 1159 template <typename T> |
| 1238 inline void MoveWords(T* dst, const T* src, size_t num_words) { | 1160 inline void MoveWords(T* dst, const T* src, size_t num_words) { |
| 1239 STATIC_ASSERT(sizeof(T) == kPointerSize); | 1161 STATIC_ASSERT(sizeof(T) == kPointerSize); |
| 1240 ASSERT(num_words > 0); | 1162 ASSERT(num_words > 0); |
| 1241 | 1163 |
| 1242 // Use block copying MemCopy if the segment we're copying is | 1164 // Use block copying OS::MemCopy if the segment we're copying is |
| 1243 // enough to justify the extra call/setup overhead. | 1165 // enough to justify the extra call/setup overhead. |
| 1244 static const size_t kBlockCopyLimit = 16; | 1166 static const size_t kBlockCopyLimit = 16; |
| 1245 | 1167 |
| 1246 if (num_words < kBlockCopyLimit && | 1168 if (num_words < kBlockCopyLimit && |
| 1247 ((dst < src) || (dst >= (src + num_words * kPointerSize)))) { | 1169 ((dst < src) || (dst >= (src + num_words * kPointerSize)))) { |
| 1248 T* end = dst + num_words; | 1170 T* end = dst + num_words; |
| 1249 do { | 1171 do { |
| 1250 num_words--; | 1172 num_words--; |
| 1251 *dst++ = *src++; | 1173 *dst++ = *src++; |
| 1252 } while (num_words > 0); | 1174 } while (num_words > 0); |
| 1253 } else { | 1175 } else { |
| 1254 MemMove(dst, src, num_words * kPointerSize); | 1176 OS::MemMove(dst, src, num_words * kPointerSize); |
| 1255 } | 1177 } |
| 1256 } | 1178 } |
| 1257 | 1179 |
| 1258 | 1180 |
| 1259 // Copies data from |src| to |dst|. The data spans must not overlap. | 1181 // Copies data from |src| to |dst|. The data spans must not overlap. |
| 1260 template <typename T> | 1182 template <typename T> |
| 1261 inline void CopyBytes(T* dst, const T* src, size_t num_bytes) { | 1183 inline void CopyBytes(T* dst, const T* src, size_t num_bytes) { |
| 1262 STATIC_ASSERT(sizeof(T) == 1); | 1184 STATIC_ASSERT(sizeof(T) == 1); |
| 1263 ASSERT(Min(dst, const_cast<T*>(src)) + num_bytes <= | 1185 ASSERT(Min(dst, const_cast<T*>(src)) + num_bytes <= |
| 1264 Max(dst, const_cast<T*>(src))); | 1186 Max(dst, const_cast<T*>(src))); |
| 1265 if (num_bytes == 0) return; | 1187 if (num_bytes == 0) return; |
| 1266 | 1188 |
| 1267 // Use block copying MemCopy if the segment we're copying is | 1189 // Use block copying OS::MemCopy if the segment we're copying is |
| 1268 // enough to justify the extra call/setup overhead. | 1190 // enough to justify the extra call/setup overhead. |
| 1269 static const int kBlockCopyLimit = kMinComplexMemCopy; | 1191 static const int kBlockCopyLimit = OS::kMinComplexMemCopy; |
| 1270 | 1192 |
| 1271 if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) { | 1193 if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) { |
| 1272 do { | 1194 do { |
| 1273 num_bytes--; | 1195 num_bytes--; |
| 1274 *dst++ = *src++; | 1196 *dst++ = *src++; |
| 1275 } while (num_bytes > 0); | 1197 } while (num_bytes > 0); |
| 1276 } else { | 1198 } else { |
| 1277 MemCopy(dst, src, num_bytes); | 1199 OS::MemCopy(dst, src, num_bytes); |
| 1278 } | 1200 } |
| 1279 } | 1201 } |
| 1280 | 1202 |
| 1281 | 1203 |
| 1282 template <typename T, typename U> | 1204 template <typename T, typename U> |
| 1283 inline void MemsetPointer(T** dest, U* value, int counter) { | 1205 inline void MemsetPointer(T** dest, U* value, int counter) { |
| 1284 #ifdef DEBUG | 1206 #ifdef DEBUG |
| 1285 T* a = NULL; | 1207 T* a = NULL; |
| 1286 U* b = NULL; | 1208 U* b = NULL; |
| 1287 a = b; // Fake assignment to check assignability. | 1209 a = b; // Fake assignment to check assignability. |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1390 chars); | 1312 chars); |
| 1391 } | 1313 } |
| 1392 } | 1314 } |
| 1393 } | 1315 } |
| 1394 | 1316 |
| 1395 template <typename sourcechar, typename sinkchar> | 1317 template <typename sourcechar, typename sinkchar> |
| 1396 void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) { | 1318 void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) { |
| 1397 sinkchar* limit = dest + chars; | 1319 sinkchar* limit = dest + chars; |
| 1398 #ifdef V8_HOST_CAN_READ_UNALIGNED | 1320 #ifdef V8_HOST_CAN_READ_UNALIGNED |
| 1399 if (sizeof(*dest) == sizeof(*src)) { | 1321 if (sizeof(*dest) == sizeof(*src)) { |
| 1400 if (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest))) { | 1322 if (chars >= static_cast<int>(OS::kMinComplexMemCopy / sizeof(*dest))) { |
| 1401 MemCopy(dest, src, chars * sizeof(*dest)); | 1323 OS::MemCopy(dest, src, chars * sizeof(*dest)); |
| 1402 return; | 1324 return; |
| 1403 } | 1325 } |
| 1404 // Number of characters in a uintptr_t. | 1326 // Number of characters in a uintptr_t. |
| 1405 static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT | 1327 static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT |
| 1406 ASSERT(dest + kStepSize > dest); // Check for overflow. | 1328 ASSERT(dest + kStepSize > dest); // Check for overflow. |
| 1407 while (dest + kStepSize <= limit) { | 1329 while (dest + kStepSize <= limit) { |
| 1408 *reinterpret_cast<uintptr_t*>(dest) = | 1330 *reinterpret_cast<uintptr_t*>(dest) = |
| 1409 *reinterpret_cast<const uintptr_t*>(src); | 1331 *reinterpret_cast<const uintptr_t*>(src); |
| 1410 dest += kStepSize; | 1332 dest += kStepSize; |
| 1411 src += kStepSize; | 1333 src += kStepSize; |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1462 case 13: | 1384 case 13: |
| 1463 memcpy(dest, src, 13); | 1385 memcpy(dest, src, 13); |
| 1464 break; | 1386 break; |
| 1465 case 14: | 1387 case 14: |
| 1466 memcpy(dest, src, 14); | 1388 memcpy(dest, src, 14); |
| 1467 break; | 1389 break; |
| 1468 case 15: | 1390 case 15: |
| 1469 memcpy(dest, src, 15); | 1391 memcpy(dest, src, 15); |
| 1470 break; | 1392 break; |
| 1471 default: | 1393 default: |
| 1472 MemCopy(dest, src, chars); | 1394 OS::MemCopy(dest, src, chars); |
| 1473 break; | 1395 break; |
| 1474 } | 1396 } |
| 1475 } | 1397 } |
| 1476 | 1398 |
| 1477 | 1399 |
| 1478 void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars) { | 1400 void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars) { |
| 1479 if (chars >= kMinComplexConvertMemCopy) { | 1401 if (chars >= OS::kMinComplexConvertMemCopy) { |
| 1480 MemCopyUint16Uint8(dest, src, chars); | 1402 OS::MemCopyUint16Uint8(dest, src, chars); |
| 1481 } else { | 1403 } else { |
| 1482 MemCopyUint16Uint8Wrapper(dest, src, chars); | 1404 OS::MemCopyUint16Uint8Wrapper(dest, src, chars); |
| 1483 } | 1405 } |
| 1484 } | 1406 } |
| 1485 | 1407 |
| 1486 | 1408 |
| 1487 void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) { | 1409 void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) { |
| 1488 switch (static_cast<unsigned>(chars)) { | 1410 switch (static_cast<unsigned>(chars)) { |
| 1489 case 0: | 1411 case 0: |
| 1490 break; | 1412 break; |
| 1491 case 1: | 1413 case 1: |
| 1492 *dest = *src; | 1414 *dest = *src; |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1503 case 5: | 1425 case 5: |
| 1504 memcpy(dest, src, 10); | 1426 memcpy(dest, src, 10); |
| 1505 break; | 1427 break; |
| 1506 case 6: | 1428 case 6: |
| 1507 memcpy(dest, src, 12); | 1429 memcpy(dest, src, 12); |
| 1508 break; | 1430 break; |
| 1509 case 7: | 1431 case 7: |
| 1510 memcpy(dest, src, 14); | 1432 memcpy(dest, src, 14); |
| 1511 break; | 1433 break; |
| 1512 default: | 1434 default: |
| 1513 MemCopy(dest, src, chars * sizeof(*dest)); | 1435 OS::MemCopy(dest, src, chars * sizeof(*dest)); |
| 1514 break; | 1436 break; |
| 1515 } | 1437 } |
| 1516 } | 1438 } |
| 1517 | 1439 |
| 1518 | 1440 |
| 1519 #elif defined(V8_HOST_ARCH_MIPS) | 1441 #elif defined(V8_HOST_ARCH_MIPS) |
| 1520 void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) { | 1442 void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) { |
| 1521 if (chars < kMinComplexMemCopy) { | 1443 if (chars < OS::kMinComplexMemCopy) { |
| 1522 memcpy(dest, src, chars); | 1444 memcpy(dest, src, chars); |
| 1523 } else { | 1445 } else { |
| 1524 MemCopy(dest, src, chars); | 1446 OS::MemCopy(dest, src, chars); |
| 1525 } | 1447 } |
| 1526 } | 1448 } |
| 1527 | 1449 |
| 1528 void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) { | 1450 void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) { |
| 1529 if (chars < kMinComplexMemCopy) { | 1451 if (chars < OS::kMinComplexMemCopy) { |
| 1530 memcpy(dest, src, chars * sizeof(*dest)); | 1452 memcpy(dest, src, chars * sizeof(*dest)); |
| 1531 } else { | 1453 } else { |
| 1532 MemCopy(dest, src, chars * sizeof(*dest)); | 1454 OS::MemCopy(dest, src, chars * sizeof(*dest)); |
| 1533 } | 1455 } |
| 1534 } | 1456 } |
| 1535 #endif | 1457 #endif |
| 1536 | 1458 |
| 1537 | 1459 |
| 1538 class StringBuilder : public SimpleStringBuilder { | 1460 class StringBuilder : public SimpleStringBuilder { |
| 1539 public: | 1461 public: |
| 1540 explicit StringBuilder(int size) : SimpleStringBuilder(size) { } | 1462 explicit StringBuilder(int size) : SimpleStringBuilder(size) { } |
| 1541 StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { } | 1463 StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { } |
| 1542 | 1464 |
| 1543 // Add formatted contents to the builder just like printf(). | 1465 // Add formatted contents to the builder just like printf(). |
| 1544 void AddFormatted(const char* format, ...); | 1466 void AddFormatted(const char* format, ...); |
| 1545 | 1467 |
| 1546 // Add formatted contents like printf based on a va_list. | 1468 // Add formatted contents like printf based on a va_list. |
| 1547 void AddFormattedList(const char* format, va_list list); | 1469 void AddFormattedList(const char* format, va_list list); |
| 1548 private: | 1470 private: |
| 1549 DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder); | 1471 DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder); |
| 1550 }; | 1472 }; |
| 1551 | 1473 |
| 1552 | 1474 |
| 1553 } } // namespace v8::internal | 1475 } } // namespace v8::internal |
| 1554 | 1476 |
| 1555 #endif // V8_UTILS_H_ | 1477 #endif // V8_UTILS_H_ |
| OLD | NEW |