OLD | NEW |
(Empty) | |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 namespace v8 { |
| 6 namespace internal { |
| 7 namespace atomics { |
| 8 |
| 9 // Load //////////////////////////////////////////////////////////////////////// |
| 10 inline uint8_t LoadSeqCst(uint8_t* p) { |
| 11 #if V8_CC_MSVC |
| 12 uint8_t result; |
| 13 __asm { |
| 14 mov ecx, p |
| 15 mov al, byte ptr [ecx] |
| 16 mov result, al |
| 17 } |
| 18 return result; |
| 19 #elif V8_CC_GNU |
| 20 uint8_t result; |
| 21 __asm__ __volatile__("movb (%1), %0" : "=q"(result) : "q"(p)); |
| 22 return result; |
| 23 #else |
| 24 #error Unsupported compiler. |
| 25 #endif |
| 26 } |
| 27 |
| 28 |
| 29 inline int8_t LoadSeqCst(int8_t* p) { |
| 30 #if V8_CC_MSVC |
| 31 int8_t result; |
| 32 __asm { |
| 33 mov ecx, p |
| 34 mov al, byte ptr [ecx] |
| 35 mov result, al |
| 36 } |
| 37 return result; |
| 38 #elif V8_CC_GNU |
| 39 int8_t result; |
| 40 __asm__ __volatile__("movb (%1), %0" : "=q"(result) : "q"(p)); |
| 41 return result; |
| 42 #else |
| 43 #error Unsupported compiler. |
| 44 #endif |
| 45 } |
| 46 |
| 47 |
| 48 inline uint16_t LoadSeqCst(uint16_t* p) { |
| 49 #if V8_CC_MSVC |
| 50 uint16_t result; |
| 51 __asm { |
| 52 mov ecx, p |
| 53 mov ax, word ptr [ecx] |
| 54 mov result, ax |
| 55 } |
| 56 return result; |
| 57 #elif V8_CC_GNU |
| 58 uint16_t result; |
| 59 __asm__ __volatile__("movw (%1), %0" : "=r"(result) : "r"(p)); |
| 60 return result; |
| 61 #else |
| 62 #error Unsupported compiler. |
| 63 #endif |
| 64 } |
| 65 |
| 66 |
| 67 inline int16_t LoadSeqCst(int16_t* p) { |
| 68 #if V8_CC_MSVC |
| 69 int16_t result; |
| 70 __asm { |
| 71 mov ecx, p |
| 72 mov ax, word ptr [ecx] |
| 73 mov result, ax |
| 74 } |
| 75 return result; |
| 76 #elif V8_CC_GNU |
| 77 int16_t result; |
| 78 __asm__ __volatile__("movw (%1), %0" : "=r"(result) : "r"(p)); |
| 79 return result; |
| 80 #else |
| 81 #error Unsupported compiler. |
| 82 #endif |
| 83 } |
| 84 |
| 85 |
| 86 inline uint32_t LoadSeqCst(uint32_t* p) { |
| 87 #if V8_CC_MSVC |
| 88 uint32_t result; |
| 89 __asm { |
| 90 mov ecx, p |
| 91 mov eax, dword ptr [ecx] |
| 92 mov result, eax |
| 93 } |
| 94 return result; |
| 95 #elif V8_CC_GNU |
| 96 uint32_t result; |
| 97 __asm__ __volatile__("movl (%1), %0" : "=r"(result) : "r"(p)); |
| 98 return result; |
| 99 #else |
| 100 #error Unsupported compiler. |
| 101 #endif |
| 102 } |
| 103 |
| 104 |
| 105 inline int32_t LoadSeqCst(int32_t* p) { |
| 106 #if V8_CC_MSVC |
| 107 int32_t result; |
| 108 __asm { |
| 109 mov ecx, p |
| 110 mov eax, dword ptr [ecx] |
| 111 mov result, eax |
| 112 } |
| 113 return result; |
| 114 #elif V8_CC_GNU |
| 115 int32_t result; |
| 116 __asm__ __volatile__("movl (%1), %0" : "=r"(result) : "r"(p)); |
| 117 return result; |
| 118 #else |
| 119 #error Unsupported compiler. |
| 120 #endif |
| 121 } |
| 122 |
| 123 |
| 124 // Store /////////////////////////////////////////////////////////////////////// |
| 125 inline void StoreSeqCst(uint8_t* p, uint8_t value) { |
| 126 #if V8_CC_MSVC |
| 127 __asm { |
| 128 mov al, value |
| 129 mov ecx, p |
| 130 xchg byte ptr [ecx], al |
| 131 } |
| 132 #elif V8_CC_GNU |
| 133 __asm__ __volatile__("xchgb %0, %1" : "+m"(*p) : "q"(value) : "memory"); |
| 134 #else |
| 135 #error Unsupported compiler. |
| 136 #endif |
| 137 } |
| 138 |
| 139 |
| 140 inline void StoreSeqCst(int8_t* p, int8_t value) { |
| 141 #if V8_CC_MSVC |
| 142 __asm { |
| 143 mov al, value |
| 144 mov ecx, p |
| 145 xchg byte ptr [ecx], al |
| 146 } |
| 147 #elif V8_CC_GNU |
| 148 __asm__ __volatile__("xchgb %0, %1" : "+m"(*p) : "q"(value) : "memory"); |
| 149 #else |
| 150 #error Unsupported compiler. |
| 151 #endif |
| 152 } |
| 153 |
| 154 |
| 155 inline void StoreSeqCst(uint16_t* p, uint16_t value) { |
| 156 #if V8_CC_MSVC |
| 157 __asm { |
| 158 mov ax, value |
| 159 mov ecx, p |
| 160 xchg word ptr [ecx], ax |
| 161 } |
| 162 #elif V8_CC_GNU |
| 163 __asm__ __volatile__("xchgw %0, %1" : "+m"(*p) : "r"(value) : "memory"); |
| 164 #else |
| 165 #error Unsupported compiler. |
| 166 #endif |
| 167 } |
| 168 |
| 169 |
| 170 inline void StoreSeqCst(int16_t* p, int16_t value) { |
| 171 #if V8_CC_MSVC |
| 172 __asm { |
| 173 mov ax, value |
| 174 mov ecx, p |
| 175 xchg word ptr [ecx], ax |
| 176 } |
| 177 #elif V8_CC_GNU |
| 178 __asm__ __volatile__("xchgw %0, %1" : "+m"(*p) : "r"(value) : "memory"); |
| 179 #else |
| 180 #error Unsupported compiler. |
| 181 #endif |
| 182 } |
| 183 |
| 184 |
| 185 inline void StoreSeqCst(uint32_t* p, uint32_t value) { |
| 186 #if V8_CC_MSVC |
| 187 __asm { |
| 188 mov eax, value |
| 189 mov ecx, p |
| 190 xchg dword ptr [ecx], eax |
| 191 } |
| 192 #elif V8_CC_GNU |
| 193 __asm__ __volatile__("xchgl %0, %1" : "+m"(*p) : "r"(value) : "memory"); |
| 194 #else |
| 195 #error Unsupported compiler. |
| 196 #endif |
| 197 } |
| 198 |
| 199 |
| 200 inline void StoreSeqCst(int32_t* p, int32_t value) { |
| 201 #if V8_CC_MSVC |
| 202 __asm { |
| 203 mov eax, value |
| 204 mov ecx, p |
| 205 xchg dword ptr [ecx], eax |
| 206 } |
| 207 #elif V8_CC_GNU |
| 208 __asm__ __volatile__("xchgl %0, %1" : "+m"(*p) : "r"(value) : "memory"); |
| 209 #else |
| 210 #error Unsupported compiler. |
| 211 #endif |
| 212 } |
| 213 |
| 214 |
| 215 // Add ///////////////////////////////////////////////////////////////////////// |
| 216 inline uint8_t AddSeqCst(uint8_t* p, uint8_t value) { |
| 217 #if V8_CC_MSVC |
| 218 uint8_t result; |
| 219 __asm { |
| 220 mov al, value |
| 221 mov ecx, p |
| 222 lock xadd byte ptr [ecx], al |
| 223 mov result, al |
| 224 } |
| 225 return result; |
| 226 #elif V8_CC_GNU |
| 227 uint8_t result; |
| 228 __asm__ __volatile__("lock xaddb %2, %1" |
| 229 : "=q"(result), "+m"(*p) |
| 230 : "0"(value) |
| 231 : "memory"); |
| 232 return result; |
| 233 #else |
| 234 #error Unsupported compiler. |
| 235 #endif |
| 236 } |
| 237 |
| 238 |
| 239 inline int8_t AddSeqCst(int8_t* p, int8_t value) { |
| 240 #if V8_CC_MSVC |
| 241 int8_t result; |
| 242 __asm { |
| 243 mov al, value |
| 244 mov ecx, p |
| 245 lock xadd byte ptr [ecx], al |
| 246 mov result, al |
| 247 } |
| 248 return result; |
| 249 #elif V8_CC_GNU |
| 250 int8_t result; |
| 251 __asm__ __volatile__("lock xaddb %2, %1" |
| 252 : "=q"(result), "+m"(*p) |
| 253 : "0"(value) |
| 254 : "memory"); |
| 255 return result; |
| 256 #else |
| 257 #error Unsupported compiler. |
| 258 #endif |
| 259 } |
| 260 |
| 261 |
| 262 inline uint16_t AddSeqCst(uint16_t* p, uint16_t value) { |
| 263 #if V8_CC_MSVC |
| 264 uint16_t result; |
| 265 __asm { |
| 266 mov ax, value |
| 267 mov ecx, p |
| 268 lock xadd word ptr [ecx], ax |
| 269 mov result, ax |
| 270 } |
| 271 return result; |
| 272 #elif V8_CC_GNU |
| 273 uint16_t result; |
| 274 __asm__ __volatile__("lock xaddw %2, %1" |
| 275 : "=r"(result), "+m"(*p) |
| 276 : "0"(value) |
| 277 : "memory"); |
| 278 return result; |
| 279 #else |
| 280 #error Unsupported compiler. |
| 281 #endif |
| 282 } |
| 283 |
| 284 |
| 285 inline int16_t AddSeqCst(int16_t* p, int16_t value) { |
| 286 #if V8_CC_MSVC |
| 287 int16_t result; |
| 288 __asm { |
| 289 mov ax, value |
| 290 mov ecx, p |
| 291 lock xadd word ptr [ecx], ax |
| 292 mov result, ax |
| 293 } |
| 294 return result; |
| 295 #elif V8_CC_GNU |
| 296 int16_t result; |
| 297 __asm__ __volatile__("lock xaddw %2, %1" |
| 298 : "=r"(result), "+m"(*p) |
| 299 : "0"(value) |
| 300 : "memory"); |
| 301 return result; |
| 302 #else |
| 303 #error Unsupported compiler. |
| 304 #endif |
| 305 } |
| 306 |
| 307 |
| 308 inline uint32_t AddSeqCst(uint32_t* p, uint32_t value) { |
| 309 #if V8_CC_MSVC |
| 310 uint32_t result; |
| 311 __asm { |
| 312 mov eax, value |
| 313 mov ecx, p |
| 314 lock xadd dword ptr [ecx], eax |
| 315 mov result, eax |
| 316 } |
| 317 return result; |
| 318 #elif V8_CC_GNU |
| 319 uint32_t result; |
| 320 __asm__ __volatile__("lock xaddl %2, %1" |
| 321 : "=r"(result), "+m"(*p) |
| 322 : "0"(value) |
| 323 : "memory"); |
| 324 return result; |
| 325 #else |
| 326 #error Unsupported compiler. |
| 327 #endif |
| 328 } |
| 329 |
| 330 |
| 331 inline int32_t AddSeqCst(int32_t* p, int32_t value) { |
| 332 #if V8_CC_MSVC |
| 333 int32_t result; |
| 334 __asm { |
| 335 mov eax, value |
| 336 mov ecx, p |
| 337 lock xadd dword ptr [ecx], eax |
| 338 mov result, eax |
| 339 } |
| 340 return result; |
| 341 #elif V8_CC_GNU |
| 342 int32_t result; |
| 343 __asm__ __volatile__("lock xaddl %2, %1" |
| 344 : "=r"(result), "+m"(*p) |
| 345 : "0"(value) |
| 346 : "memory"); |
| 347 return result; |
| 348 #else |
| 349 #error Unsupported compiler. |
| 350 #endif |
| 351 } |
| 352 |
| 353 |
| 354 // Sub ///////////////////////////////////////////////////////////////////////// |
| 355 inline uint8_t SubSeqCst(uint8_t* p, uint8_t value) { |
| 356 #if V8_CC_MSVC |
| 357 uint8_t result; |
| 358 __asm { |
| 359 mov al, value |
| 360 neg al |
| 361 mov ecx, p |
| 362 lock xadd byte ptr [ecx], al |
| 363 mov result, al |
| 364 } |
| 365 return result; |
| 366 #elif V8_CC_GNU |
| 367 uint8_t result; |
| 368 __asm__ __volatile__( |
| 369 "negb %2\n\t" |
| 370 "lock xaddb %2, %1" |
| 371 : "=q"(result), "+m"(*p) |
| 372 : "0"(value) |
| 373 : "memory"); |
| 374 return result; |
| 375 #else |
| 376 #error Unsupported compiler. |
| 377 #endif |
| 378 } |
| 379 |
| 380 |
| 381 inline int8_t SubSeqCst(int8_t* p, int8_t value) { |
| 382 #if V8_CC_MSVC |
| 383 int8_t result; |
| 384 __asm { |
| 385 mov al, value |
| 386 neg al |
| 387 mov ecx, p |
| 388 lock xadd byte ptr [ecx], al |
| 389 mov result, al |
| 390 } |
| 391 return result; |
| 392 #elif V8_CC_GNU |
| 393 int8_t result; |
| 394 __asm__ __volatile__( |
| 395 "negb %2\n\t" |
| 396 "lock xaddb %2, %1" |
| 397 : "=q"(result), "+m"(*p) |
| 398 : "0"(value) |
| 399 : "memory"); |
| 400 return result; |
| 401 #else |
| 402 #error Unsupported compiler. |
| 403 #endif |
| 404 } |
| 405 |
| 406 |
| 407 inline uint16_t SubSeqCst(uint16_t* p, uint16_t value) { |
| 408 #if V8_CC_MSVC |
| 409 uint16_t result; |
| 410 __asm { |
| 411 mov ax, value |
| 412 neg ax |
| 413 mov ecx, p |
| 414 lock xadd word ptr [ecx], ax |
| 415 mov result, ax |
| 416 } |
| 417 return result; |
| 418 #elif V8_CC_GNU |
| 419 uint16_t result; |
| 420 __asm__ __volatile__( |
| 421 "negw %2\n\t" |
| 422 "lock xaddw %2, %1" |
| 423 : "=r"(result), "+m"(*p) |
| 424 : "0"(value) |
| 425 : "memory"); |
| 426 return result; |
| 427 #else |
| 428 #error Unsupported compiler. |
| 429 #endif |
| 430 } |
| 431 |
| 432 |
| 433 inline int16_t SubSeqCst(int16_t* p, int16_t value) { |
| 434 #if V8_CC_MSVC |
| 435 int16_t result; |
| 436 __asm { |
| 437 mov ax, value |
| 438 neg ax |
| 439 mov ecx, p |
| 440 lock xadd word ptr [ecx], ax |
| 441 mov result, ax |
| 442 } |
| 443 return result; |
| 444 #elif V8_CC_GNU |
| 445 int16_t result; |
| 446 __asm__ __volatile__( |
| 447 "negw %2\n\t" |
| 448 "lock xaddw %2, %1" |
| 449 : "=r"(result), "+m"(*p) |
| 450 : "0"(value) |
| 451 : "memory"); |
| 452 return result; |
| 453 #else |
| 454 #error Unsupported compiler. |
| 455 #endif |
| 456 } |
| 457 |
| 458 |
| 459 inline uint32_t SubSeqCst(uint32_t* p, uint32_t value) { |
| 460 #if V8_CC_MSVC |
| 461 uint32_t result; |
| 462 __asm { |
| 463 mov eax, value |
| 464 neg eax |
| 465 mov ecx, p |
| 466 lock xadd dword ptr [ecx], eax |
| 467 mov result, eax |
| 468 } |
| 469 return result; |
| 470 #elif V8_CC_GNU |
| 471 uint32_t result; |
| 472 __asm__ __volatile__( |
| 473 "negl %2\n\t" |
| 474 "lock xaddl %2, %1" |
| 475 : "=r"(result), "+m"(*p) |
| 476 : "0"(value) |
| 477 : "memory"); |
| 478 return result; |
| 479 #else |
| 480 #error Unsupported compiler. |
| 481 #endif |
| 482 } |
| 483 |
| 484 |
| 485 inline int32_t SubSeqCst(int32_t* p, int32_t value) { |
| 486 #if V8_CC_MSVC |
| 487 int32_t result; |
| 488 __asm { |
| 489 mov eax, value |
| 490 neg eax |
| 491 mov ecx, p |
| 492 lock xadd dword ptr [ecx], eax |
| 493 mov result, eax |
| 494 } |
| 495 return result; |
| 496 #elif V8_CC_GNU |
| 497 int32_t result; |
| 498 __asm__ __volatile__( |
| 499 "negl %2\n\t" |
| 500 "lock xaddl %2, %1" |
| 501 : "=r"(result), "+m"(*p) |
| 502 : "0"(value) |
| 503 : "memory"); |
| 504 return result; |
| 505 #else |
| 506 #error Unsupported compiler. |
| 507 #endif |
| 508 } |
| 509 |
| 510 |
| 511 // Exchange //////////////////////////////////////////////////////////////////// |
| 512 inline uint8_t ExchangeSeqCst(uint8_t* p, uint8_t value) { |
| 513 #if V8_CC_MSVC |
| 514 uint8_t result; |
| 515 __asm { |
| 516 mov al, value |
| 517 mov ecx, p |
| 518 xchg byte ptr [ecx], al |
| 519 mov result, al |
| 520 } |
| 521 return result; |
| 522 #elif V8_CC_GNU |
| 523 __asm__ __volatile__("xchgb %1, %2" : "+q"(value), "+m"(*p) : : "memory"); |
| 524 return value; |
| 525 #else |
| 526 #error Unsupported compiler. |
| 527 #endif |
| 528 } |
| 529 |
| 530 |
| 531 inline int8_t ExchangeSeqCst(int8_t* p, int8_t value) { |
| 532 #if V8_CC_MSVC |
| 533 int8_t result; |
| 534 __asm { |
| 535 mov al, value |
| 536 mov ecx, p |
| 537 xchg byte ptr [ecx], al |
| 538 mov result, al |
| 539 } |
| 540 return result; |
| 541 #elif V8_CC_GNU |
| 542 __asm__ __volatile__("xchgb %1, %2" : "+q"(value), "+m"(*p) : : "memory"); |
| 543 return value; |
| 544 #else |
| 545 #error Unsupported compiler. |
| 546 #endif |
| 547 } |
| 548 |
| 549 |
| 550 inline uint16_t ExchangeSeqCst(uint16_t* p, uint16_t value) { |
| 551 #if V8_CC_MSVC |
| 552 uint16_t result; |
| 553 __asm { |
| 554 mov ax, value |
| 555 mov ecx, p |
| 556 xchg word ptr [ecx], ax |
| 557 mov result, ax |
| 558 } |
| 559 return result; |
| 560 #elif V8_CC_GNU |
| 561 __asm__ __volatile__("xchgw %1, %2" : "+r"(value), "+m"(*p) : : "memory"); |
| 562 return value; |
| 563 #else |
| 564 #error Unsupported compiler. |
| 565 #endif |
| 566 } |
| 567 |
| 568 |
| 569 inline int16_t ExchangeSeqCst(int16_t* p, int16_t value) { |
| 570 #if V8_CC_MSVC |
| 571 int16_t result; |
| 572 __asm { |
| 573 mov ax, value |
| 574 mov ecx, p |
| 575 xchg word ptr [ecx], ax |
| 576 mov result, ax |
| 577 } |
| 578 return result; |
| 579 #elif V8_CC_GNU |
| 580 __asm__ __volatile__("xchgw %1, %2" : "+r"(value), "+m"(*p) : : "memory"); |
| 581 return value; |
| 582 #else |
| 583 #error Unsupported compiler. |
| 584 #endif |
| 585 } |
| 586 |
| 587 |
| 588 inline uint32_t ExchangeSeqCst(uint32_t* p, uint32_t value) { |
| 589 #if V8_CC_MSVC |
| 590 uint32_t result; |
| 591 __asm { |
| 592 mov eax, value |
| 593 mov ecx, p |
| 594 xchg dword ptr [ecx], eax |
| 595 mov result, eax |
| 596 } |
| 597 return result; |
| 598 #elif V8_CC_GNU |
| 599 __asm__ __volatile__("xchgl %1, %2" : "+r"(value), "+m"(*p) : : "memory"); |
| 600 return value; |
| 601 #else |
| 602 #error Unsupported compiler. |
| 603 #endif |
| 604 } |
| 605 |
| 606 |
| 607 inline int32_t ExchangeSeqCst(int32_t* p, int32_t value) { |
| 608 #if V8_CC_MSVC |
| 609 int32_t result; |
| 610 __asm { |
| 611 mov eax, value |
| 612 mov ecx, p |
| 613 xchg dword ptr [ecx], eax |
| 614 mov result, eax |
| 615 } |
| 616 return result; |
| 617 #elif V8_CC_GNU |
| 618 __asm__ __volatile__("xchgl %1, %2" : "+r"(value), "+m"(*p) : : "memory"); |
| 619 return value; |
| 620 #else |
| 621 #error Unsupported compiler. |
| 622 #endif |
| 623 } |
| 624 |
| 625 |
| 626 // CompareExchange ///////////////////////////////////////////////////////////// |
| 627 inline uint8_t CompareExchangeSeqCst(uint8_t* p, uint8_t oldval, |
| 628 uint8_t newval) { |
| 629 #if V8_CC_MSVC |
| 630 uint8_t result; |
| 631 __asm { |
| 632 mov al, oldval |
| 633 mov dl, newval |
| 634 mov ecx, p |
| 635 lock cmpxchg byte ptr [ecx], dl |
| 636 mov result, al |
| 637 } |
| 638 return result; |
| 639 #elif V8_CC_GNU |
| 640 uint8_t result; |
| 641 __asm__ __volatile__("lock cmpxchgb %2, %1" |
| 642 : "=a"(result), "+m"(*p) |
| 643 : "q"(newval), "0"(oldval) |
| 644 : "memory"); |
| 645 return result; |
| 646 #else |
| 647 #error Unsupported compiler. |
| 648 #endif |
| 649 } |
| 650 |
| 651 |
| 652 inline int8_t CompareExchangeSeqCst(int8_t* p, int8_t oldval, int8_t newval) { |
| 653 #if V8_CC_MSVC |
| 654 int8_t result; |
| 655 __asm { |
| 656 mov al, oldval |
| 657 mov dl, newval |
| 658 mov ecx, p |
| 659 lock cmpxchg byte ptr [ecx], dl |
| 660 mov result, al |
| 661 } |
| 662 return result; |
| 663 #elif V8_CC_GNU |
| 664 int8_t result; |
| 665 __asm__ __volatile__("lock cmpxchgb %2, %1" |
| 666 : "=a"(result), "+m"(*p) |
| 667 : "q"(newval), "0"(oldval) |
| 668 : "memory"); |
| 669 return result; |
| 670 #else |
| 671 #error Unsupported compiler. |
| 672 #endif |
| 673 } |
| 674 |
| 675 |
| 676 inline uint16_t CompareExchangeSeqCst(uint16_t* p, uint16_t oldval, |
| 677 uint16_t newval) { |
| 678 #if V8_CC_MSVC |
| 679 uint16_t result; |
| 680 __asm { |
| 681 mov ax, oldval |
| 682 mov dx, newval |
| 683 mov ecx, p |
| 684 lock cmpxchg word ptr [ecx], dx |
| 685 mov result, ax |
| 686 } |
| 687 return result; |
| 688 #elif V8_CC_GNU |
| 689 uint16_t result; |
| 690 __asm__ __volatile__("lock cmpxchgw %2, %1" |
| 691 : "=a"(result), "+m"(*p) |
| 692 : "r"(newval), "0"(oldval) |
| 693 : "memory"); |
| 694 return result; |
| 695 #else |
| 696 #error Unsupported compiler. |
| 697 #endif |
| 698 } |
| 699 |
| 700 |
| 701 inline int16_t CompareExchangeSeqCst(int16_t* p, int16_t oldval, |
| 702 int16_t newval) { |
| 703 #if V8_CC_MSVC |
| 704 int16_t result; |
| 705 __asm { |
| 706 mov ax, oldval |
| 707 mov dx, newval |
| 708 mov ecx, p |
| 709 lock cmpxchg word ptr [ecx], dx |
| 710 mov result, ax |
| 711 } |
| 712 return result; |
| 713 #elif V8_CC_GNU |
| 714 int16_t result; |
| 715 __asm__ __volatile__("lock cmpxchgw %2, %1" |
| 716 : "=a"(result), "+m"(*p) |
| 717 : "r"(newval), "0"(oldval) |
| 718 : "memory"); |
| 719 return result; |
| 720 #else |
| 721 #error Unsupported compiler. |
| 722 #endif |
| 723 } |
| 724 |
| 725 |
| 726 inline uint32_t CompareExchangeSeqCst(uint32_t* p, uint32_t oldval, |
| 727 uint32_t newval) { |
| 728 #if V8_CC_MSVC |
| 729 uint32_t result; |
| 730 __asm { |
| 731 mov eax, oldval |
| 732 mov edx, newval |
| 733 mov ecx, p |
| 734 lock cmpxchg dword ptr [ecx], edx |
| 735 mov result, eax |
| 736 } |
| 737 return result; |
| 738 #elif V8_CC_GNU |
| 739 uint32_t result; |
| 740 __asm__ __volatile__("lock cmpxchgl %2, %1" |
| 741 : "=a"(result), "+m"(*p) |
| 742 : "r"(newval), "0"(oldval) |
| 743 : "memory"); |
| 744 return result; |
| 745 #else |
| 746 #error Unsupported compiler. |
| 747 #endif |
| 748 } |
| 749 |
| 750 |
| 751 inline int32_t CompareExchangeSeqCst(int32_t* p, int32_t oldval, |
| 752 int32_t newval) { |
| 753 #if V8_CC_MSVC |
| 754 int32_t result; |
| 755 __asm { |
| 756 mov eax, oldval |
| 757 mov edx, newval |
| 758 mov ecx, p |
| 759 lock cmpxchg dword ptr [ecx], edx |
| 760 mov result, eax |
| 761 } |
| 762 return result; |
| 763 #elif V8_CC_GNU |
| 764 int32_t result; |
| 765 __asm__ __volatile__("lock cmpxchgl %2, %1" |
| 766 : "=a"(result), "+m"(*p) |
| 767 : "r"(newval), "0"(oldval) |
| 768 : "memory"); |
| 769 return result; |
| 770 #else |
| 771 #error Unsupported compiler. |
| 772 #endif |
| 773 } |
| 774 |
| 775 |
| 776 // And ///////////////////////////////////////////////////////////////////////// |
| 777 inline uint8_t AndSeqCst(uint8_t* p, uint8_t value) { |
| 778 #if V8_CC_MSVC |
| 779 uint8_t result; |
| 780 __asm { |
| 781 mov ecx, p |
| 782 mov al, byte ptr [ecx] |
| 783 L0: |
| 784 mov dl, value |
| 785 and dl, al |
| 786 lock cmpxchg byte ptr [ecx], dl |
| 787 jnz short L0 |
| 788 mov result, al |
| 789 } |
| 790 return result; |
| 791 #elif V8_CC_GNU |
| 792 uint8_t temp; |
| 793 uint8_t result; |
| 794 __asm__ __volatile__( |
| 795 "movb %1, %0\n\t" |
| 796 "1:\n\t" |
| 797 "movb %3, %2\n\t" |
| 798 "andb %0, %2\n\t" |
| 799 "lock cmpxchgb %2, %1\n\t" |
| 800 "jnz 1b" |
| 801 : "=&a"(result), "+m"(*p), "=&q"(temp) |
| 802 : "q"(value) |
| 803 : "memory"); |
| 804 return result; |
| 805 #else |
| 806 #error Unsupported compiler. |
| 807 #endif |
| 808 } |
| 809 |
| 810 |
| 811 inline int8_t AndSeqCst(int8_t* p, int8_t value) { |
| 812 #if V8_CC_MSVC |
| 813 int8_t result; |
| 814 __asm { |
| 815 mov ecx, p |
| 816 mov al, byte ptr [ecx] |
| 817 L0: |
| 818 mov dl, value |
| 819 and dl, al |
| 820 lock cmpxchg byte ptr [ecx], dl |
| 821 jnz short L0 |
| 822 mov result, al |
| 823 } |
| 824 return result; |
| 825 #elif V8_CC_GNU |
| 826 int8_t temp; |
| 827 int8_t result; |
| 828 __asm__ __volatile__( |
| 829 "movb %1, %0\n\t" |
| 830 "1:\n\t" |
| 831 "movb %3, %2\n\t" |
| 832 "andb %0, %2\n\t" |
| 833 "lock cmpxchgb %2, %1\n\t" |
| 834 "jnz 1b" |
| 835 : "=&a"(result), "+m"(*p), "=&q"(temp) |
| 836 : "q"(value) |
| 837 : "memory"); |
| 838 return result; |
| 839 #else |
| 840 #error Unsupported compiler. |
| 841 #endif |
| 842 } |
| 843 |
| 844 |
| 845 inline uint16_t AndSeqCst(uint16_t* p, uint16_t value) { |
| 846 #if V8_CC_MSVC |
| 847 uint16_t result; |
| 848 __asm { |
| 849 mov ecx, p |
| 850 mov ax, word ptr [ecx] |
| 851 L0: |
| 852 mov dx, value |
| 853 and dx, ax |
| 854 lock cmpxchg word ptr [ecx], dx |
| 855 jnz short L0 |
| 856 mov result, ax |
| 857 } |
| 858 return result; |
| 859 #elif V8_CC_GNU |
| 860 uint16_t temp; |
| 861 uint16_t result; |
| 862 __asm__ __volatile__( |
| 863 "movw %1, %0\n\t" |
| 864 "1:\n\t" |
| 865 "movw %3, %2\n\t" |
| 866 "andw %0, %2\n\t" |
| 867 "lock cmpxchgw %2, %1\n\t" |
| 868 "jnz 1b" |
| 869 : "=&a"(result), "+m"(*p), "=&r"(temp) |
| 870 : "r"(value) |
| 871 : "memory"); |
| 872 return result; |
| 873 #else |
| 874 #error Unsupported compiler. |
| 875 #endif |
| 876 } |
| 877 |
| 878 |
| 879 inline int16_t AndSeqCst(int16_t* p, int16_t value) { |
| 880 #if V8_CC_MSVC |
| 881 int16_t result; |
| 882 __asm { |
| 883 mov ecx, p |
| 884 mov ax, word ptr [ecx] |
| 885 L0: |
| 886 mov dx, value |
| 887 and dx, ax |
| 888 lock cmpxchg word ptr [ecx], dx |
| 889 jnz short L0 |
| 890 mov result, ax |
| 891 } |
| 892 return result; |
| 893 #elif V8_CC_GNU |
| 894 int16_t temp; |
| 895 int16_t result; |
| 896 __asm__ __volatile__( |
| 897 "movw %1, %0\n\t" |
| 898 "1:\n\t" |
| 899 "movw %3, %2\n\t" |
| 900 "andw %0, %2\n\t" |
| 901 "lock cmpxchgw %2, %1\n\t" |
| 902 "jnz 1b" |
| 903 : "=&a"(result), "+m"(*p), "=&r"(temp) |
| 904 : "r"(value) |
| 905 : "memory"); |
| 906 return result; |
| 907 #else |
| 908 #error Unsupported compiler. |
| 909 #endif |
| 910 } |
| 911 |
| 912 |
| 913 inline uint32_t AndSeqCst(uint32_t* p, uint32_t value) { |
| 914 #if V8_CC_MSVC |
| 915 uint32_t result; |
| 916 __asm { |
| 917 mov ecx, p |
| 918 mov eax, dword ptr [ecx] |
| 919 L0: |
| 920 mov edx, value |
| 921 and edx, eax |
| 922 lock cmpxchg dword ptr [ecx], edx |
| 923 jnz short L0 |
| 924 mov result, eax |
| 925 } |
| 926 return result; |
| 927 #elif V8_CC_GNU |
| 928 uint32_t temp; |
| 929 uint32_t result; |
| 930 __asm__ __volatile__( |
| 931 "movl %1, %0\n\t" |
| 932 "1:\n\t" |
| 933 "movl %3, %2\n\t" |
| 934 "andl %0, %2\n\t" |
| 935 "lock cmpxchgl %2, %1\n\t" |
| 936 "jnz 1b" |
| 937 : "=&a"(result), "+m"(*p), "=&r"(temp) |
| 938 : "r"(value) |
| 939 : "memory"); |
| 940 return result; |
| 941 #else |
| 942 #error Unsupported compiler. |
| 943 #endif |
| 944 } |
| 945 |
| 946 |
| 947 inline int32_t AndSeqCst(int32_t* p, int32_t value) { |
| 948 #if V8_CC_MSVC |
| 949 int32_t result; |
| 950 __asm { |
| 951 mov ecx, p |
| 952 mov eax, dword ptr [ecx] |
| 953 L0: |
| 954 mov edx, value |
| 955 and edx, eax |
| 956 lock cmpxchg dword ptr [ecx], edx |
| 957 jnz short L0 |
| 958 mov result, eax |
| 959 } |
| 960 return result; |
| 961 #elif V8_CC_GNU |
| 962 int32_t temp; |
| 963 int32_t result; |
| 964 __asm__ __volatile__( |
| 965 "movl %1, %0\n\t" |
| 966 "1:\n\t" |
| 967 "movl %3, %2\n\t" |
| 968 "andl %0, %2\n\t" |
| 969 "lock cmpxchgl %2, %1\n\t" |
| 970 "jnz 1b" |
| 971 : "=&a"(result), "+m"(*p), "=&r"(temp) |
| 972 : "r"(value) |
| 973 : "memory"); |
| 974 return result; |
| 975 #else |
| 976 #error Unsupported compiler. |
| 977 #endif |
| 978 } |
| 979 |
| 980 |
| 981 // Or ///////////////////////////////////////////////////////////////////////// |
| 982 inline uint8_t OrSeqCst(uint8_t* p, uint8_t value) { |
| 983 #if V8_CC_MSVC |
| 984 uint8_t result; |
| 985 __asm { |
| 986 mov ecx, p |
| 987 mov al, byte ptr [ecx] |
| 988 L0: |
| 989 mov dl, value |
| 990 or dl, al |
| 991 lock cmpxchg byte ptr [ecx], dl |
| 992 jnz short L0 |
| 993 mov result, al |
| 994 } |
| 995 return result; |
| 996 #elif V8_CC_GNU |
| 997 uint8_t temp; |
| 998 uint8_t result; |
| 999 __asm__ __volatile__( |
| 1000 "movb %1, %0\n\t" |
| 1001 "1:\n\t" |
| 1002 "movb %3, %2\n\t" |
| 1003 "orb %0, %2\n\t" |
| 1004 "lock cmpxchgb %2, %1\n\t" |
| 1005 "jnz 1b" |
| 1006 : "=&a"(result), "+m"(*p), "=&q"(temp) |
| 1007 : "q"(value) |
| 1008 : "memory"); |
| 1009 return result; |
| 1010 #else |
| 1011 #error Unsupported compiler. |
| 1012 #endif |
| 1013 } |
| 1014 |
| 1015 |
| 1016 inline int8_t OrSeqCst(int8_t* p, int8_t value) { |
| 1017 #if V8_CC_MSVC |
| 1018 int8_t result; |
| 1019 __asm { |
| 1020 mov ecx, p |
| 1021 mov al, byte ptr [ecx] |
| 1022 L0: |
| 1023 mov dl, value |
| 1024 or dl, al |
| 1025 lock cmpxchg byte ptr [ecx], dl |
| 1026 jnz short L0 |
| 1027 mov result, al |
| 1028 } |
| 1029 return result; |
| 1030 #elif V8_CC_GNU |
| 1031 int8_t temp; |
| 1032 int8_t result; |
| 1033 __asm__ __volatile__( |
| 1034 "movb %1, %0\n\t" |
| 1035 "1:\n\t" |
| 1036 "movb %3, %2\n\t" |
| 1037 "orb %0, %2\n\t" |
| 1038 "lock cmpxchgb %2, %1\n\t" |
| 1039 "jnz 1b" |
| 1040 : "=&a"(result), "+m"(*p), "=&q"(temp) |
| 1041 : "q"(value) |
| 1042 : "memory"); |
| 1043 return result; |
| 1044 #else |
| 1045 #error Unsupported compiler. |
| 1046 #endif |
| 1047 } |
| 1048 |
| 1049 |
| 1050 inline uint16_t OrSeqCst(uint16_t* p, uint16_t value) { |
| 1051 #if V8_CC_MSVC |
| 1052 uint16_t result; |
| 1053 __asm { |
| 1054 mov ecx, p |
| 1055 mov ax, word ptr [ecx] |
| 1056 L0: |
| 1057 mov dx, value |
| 1058 or dx, ax |
| 1059 lock cmpxchg word ptr [ecx], dx |
| 1060 jnz short L0 |
| 1061 mov result, ax |
| 1062 } |
| 1063 return result; |
| 1064 #elif V8_CC_GNU |
| 1065 uint16_t temp; |
| 1066 uint16_t result; |
| 1067 __asm__ __volatile__( |
| 1068 "movw %1, %0\n\t" |
| 1069 "1:\n\t" |
| 1070 "movw %3, %2\n\t" |
| 1071 "orw %0, %2\n\t" |
| 1072 "lock cmpxchgw %2, %1\n\t" |
| 1073 "jnz 1b" |
| 1074 : "=&a"(result), "+m"(*p), "=&r"(temp) |
| 1075 : "r"(value) |
| 1076 : "memory"); |
| 1077 return result; |
| 1078 #else |
| 1079 #error Unsupported compiler. |
| 1080 #endif |
| 1081 } |
| 1082 |
| 1083 |
| 1084 inline int16_t OrSeqCst(int16_t* p, int16_t value) { |
| 1085 #if V8_CC_MSVC |
| 1086 int16_t result; |
| 1087 __asm { |
| 1088 mov ecx, p |
| 1089 mov ax, word ptr [ecx] |
| 1090 L0: |
| 1091 mov dx, value |
| 1092 or dx, ax |
| 1093 lock cmpxchg word ptr [ecx], dx |
| 1094 jnz short L0 |
| 1095 mov result, ax |
| 1096 } |
| 1097 return result; |
| 1098 #elif V8_CC_GNU |
| 1099 int16_t temp; |
| 1100 int16_t result; |
| 1101 __asm__ __volatile__( |
| 1102 "movw %1, %0\n\t" |
| 1103 "1:\n\t" |
| 1104 "movw %3, %2\n\t" |
| 1105 "orw %0, %2\n\t" |
| 1106 "lock cmpxchgw %2, %1\n\t" |
| 1107 "jnz 1b" |
| 1108 : "=&a"(result), "+m"(*p), "=&r"(temp) |
| 1109 : "r"(value) |
| 1110 : "memory"); |
| 1111 return result; |
| 1112 #else |
| 1113 #error Unsupported compiler. |
| 1114 #endif |
| 1115 } |
| 1116 |
| 1117 |
| 1118 inline uint32_t OrSeqCst(uint32_t* p, uint32_t value) { |
| 1119 #if V8_CC_MSVC |
| 1120 uint32_t result; |
| 1121 __asm { |
| 1122 mov ecx, p |
| 1123 mov eax, dword ptr [ecx] |
| 1124 L0: |
| 1125 mov edx, value |
| 1126 or edx, eax |
| 1127 lock cmpxchg dword ptr [ecx], edx |
| 1128 jnz short L0 |
| 1129 mov result, eax |
| 1130 } |
| 1131 return result; |
| 1132 #elif V8_CC_GNU |
| 1133 uint32_t temp; |
| 1134 uint32_t result; |
| 1135 __asm__ __volatile__( |
| 1136 "movl %1, %0\n\t" |
| 1137 "1:\n\t" |
| 1138 "movl %3, %2\n\t" |
| 1139 "orl %0, %2\n\t" |
| 1140 "lock cmpxchgl %2, %1\n\t" |
| 1141 "jnz 1b" |
| 1142 : "=&a"(result), "+m"(*p), "=&r"(temp) |
| 1143 : "r"(value) |
| 1144 : "memory"); |
| 1145 return result; |
| 1146 #else |
| 1147 #error Unsupported compiler. |
| 1148 #endif |
| 1149 } |
| 1150 |
| 1151 |
| 1152 inline int32_t OrSeqCst(int32_t* p, int32_t value) { |
| 1153 #if V8_CC_MSVC |
| 1154 int32_t result; |
| 1155 __asm { |
| 1156 mov ecx, p |
| 1157 mov eax, dword ptr [ecx] |
| 1158 L0: |
| 1159 mov edx, value |
| 1160 or edx, eax |
| 1161 lock cmpxchg dword ptr [ecx], edx |
| 1162 jnz short L0 |
| 1163 mov result, eax |
| 1164 } |
| 1165 return result; |
| 1166 #elif V8_CC_GNU |
| 1167 int32_t temp; |
| 1168 int32_t result; |
| 1169 __asm__ __volatile__( |
| 1170 "movl %1, %0\n\t" |
| 1171 "1:\n\t" |
| 1172 "movl %3, %2\n\t" |
| 1173 "orl %0, %2\n\t" |
| 1174 "lock cmpxchgl %2, %1\n\t" |
| 1175 "jnz 1b" |
| 1176 : "=&a"(result), "+m"(*p), "=&r"(temp) |
| 1177 : "r"(value) |
| 1178 : "memory"); |
| 1179 return result; |
| 1180 #else |
| 1181 #error Unsupported compiler. |
| 1182 #endif |
| 1183 } |
| 1184 |
| 1185 |
| 1186 // Xor ///////////////////////////////////////////////////////////////////////// |
| 1187 inline uint8_t XorSeqCst(uint8_t* p, uint8_t value) { |
| 1188 #if V8_CC_MSVC |
| 1189 uint8_t result; |
| 1190 __asm { |
| 1191 mov ecx, p |
| 1192 mov al, byte ptr [ecx] |
| 1193 L0: |
| 1194 mov dl, value |
| 1195 xor dl, al |
| 1196 lock cmpxchg byte ptr [ecx], dl |
| 1197 jnz short L0 |
| 1198 mov result, al |
| 1199 } |
| 1200 return result; |
| 1201 #elif V8_CC_GNU |
| 1202 uint8_t temp; |
| 1203 uint8_t result; |
| 1204 __asm__ __volatile__( |
| 1205 "movb %1, %0\n\t" |
| 1206 "1:\n\t" |
| 1207 "movb %3, %2\n\t" |
| 1208 "xorb %0, %2\n\t" |
| 1209 "lock cmpxchgb %2, %1\n\t" |
| 1210 "jnz 1b" |
| 1211 : "=&a"(result), "+m"(*p), "=&q"(temp) |
| 1212 : "q"(value) |
| 1213 : "memory"); |
| 1214 return result; |
| 1215 #else |
| 1216 #error Unsupported compiler. |
| 1217 #endif |
| 1218 } |
| 1219 |
| 1220 |
| 1221 inline int8_t XorSeqCst(int8_t* p, int8_t value) { |
| 1222 #if V8_CC_MSVC |
| 1223 int8_t result; |
| 1224 __asm { |
| 1225 mov ecx, p |
| 1226 mov al, byte ptr [ecx] |
| 1227 L0: |
| 1228 mov dl, value |
| 1229 xor dl, al |
| 1230 lock cmpxchg byte ptr [ecx], dl |
| 1231 jnz short L0 |
| 1232 mov result, al |
| 1233 } |
| 1234 return result; |
| 1235 #elif V8_CC_GNU |
| 1236 int8_t temp; |
| 1237 int8_t result; |
| 1238 __asm__ __volatile__( |
| 1239 "movb %1, %0\n\t" |
| 1240 "1:\n\t" |
| 1241 "movb %3, %2\n\t" |
| 1242 "xorb %0, %2\n\t" |
| 1243 "lock cmpxchgb %2, %1\n\t" |
| 1244 "jnz 1b" |
| 1245 : "=&a"(result), "+m"(*p), "=&q"(temp) |
| 1246 : "q"(value) |
| 1247 : "memory"); |
| 1248 return result; |
| 1249 #else |
| 1250 #error Unsupported compiler. |
| 1251 #endif |
| 1252 } |
| 1253 |
| 1254 |
| 1255 inline uint16_t XorSeqCst(uint16_t* p, uint16_t value) { |
| 1256 #if V8_CC_MSVC |
| 1257 uint16_t result; |
| 1258 __asm { |
| 1259 mov ecx, p |
| 1260 mov ax, word ptr [ecx] |
| 1261 L0: |
| 1262 mov dx, value |
| 1263 xor dx, ax |
| 1264 lock cmpxchg word ptr [ecx], dx |
| 1265 jnz short L0 |
| 1266 mov result, ax |
| 1267 } |
| 1268 return result; |
| 1269 #elif V8_CC_GNU |
| 1270 uint16_t temp; |
| 1271 uint16_t result; |
| 1272 __asm__ __volatile__( |
| 1273 "movw %1, %0\n\t" |
| 1274 "1:\n\t" |
| 1275 "movw %3, %2\n\t" |
| 1276 "xorw %0, %2\n\t" |
| 1277 "lock cmpxchgw %2, %1\n\t" |
| 1278 "jnz 1b" |
| 1279 : "=&a"(result), "+m"(*p), "=&r"(temp) |
| 1280 : "r"(value) |
| 1281 : "memory"); |
| 1282 return result; |
| 1283 #else |
| 1284 #error Unsupported compiler. |
| 1285 #endif |
| 1286 } |
| 1287 |
| 1288 |
| 1289 inline int16_t XorSeqCst(int16_t* p, int16_t value) { |
| 1290 #if V8_CC_MSVC |
| 1291 int16_t result; |
| 1292 __asm { |
| 1293 mov ecx, p |
| 1294 mov ax, word ptr [ecx] |
| 1295 L0: |
| 1296 mov dx, value |
| 1297 xor dx, ax |
| 1298 lock cmpxchg word ptr [ecx], dx |
| 1299 jnz short L0 |
| 1300 mov result, ax |
| 1301 } |
| 1302 return result; |
| 1303 #elif V8_CC_GNU |
| 1304 int16_t temp; |
| 1305 int16_t result; |
| 1306 __asm__ __volatile__( |
| 1307 "movw %1, %0\n\t" |
| 1308 "1:\n\t" |
| 1309 "movw %3, %2\n\t" |
| 1310 "xorw %0, %2\n\t" |
| 1311 "lock cmpxchgw %2, %1\n\t" |
| 1312 "jnz 1b" |
| 1313 : "=&a"(result), "+m"(*p), "=&r"(temp) |
| 1314 : "r"(value) |
| 1315 : "memory"); |
| 1316 return result; |
| 1317 #else |
| 1318 #error Unsupported compiler. |
| 1319 #endif |
| 1320 } |
| 1321 |
| 1322 |
| 1323 inline uint32_t XorSeqCst(uint32_t* p, uint32_t value) { |
| 1324 #if V8_CC_MSVC |
| 1325 uint32_t result; |
| 1326 __asm { |
| 1327 mov ecx, p |
| 1328 mov eax, dword ptr [ecx] |
| 1329 L0: |
| 1330 mov edx, value |
| 1331 xor edx, eax |
| 1332 lock cmpxchg dword ptr [ecx], edx |
| 1333 jnz short L0 |
| 1334 mov result, eax |
| 1335 } |
| 1336 return result; |
| 1337 #elif V8_CC_GNU |
| 1338 uint32_t temp; |
| 1339 uint32_t result; |
| 1340 __asm__ __volatile__( |
| 1341 "movl %1, %0\n\t" |
| 1342 "1:\n\t" |
| 1343 "movl %3, %2\n\t" |
| 1344 "xorl %0, %2\n\t" |
| 1345 "lock cmpxchgl %2, %1\n\t" |
| 1346 "jnz 1b" |
| 1347 : "=&a"(result), "+m"(*p), "=&r"(temp) |
| 1348 : "r"(value) |
| 1349 : "memory"); |
| 1350 return result; |
| 1351 #else |
| 1352 #error Unsupported compiler. |
| 1353 #endif |
| 1354 } |
| 1355 |
| 1356 |
| 1357 inline int32_t XorSeqCst(int32_t* p, int32_t value) { |
| 1358 #if V8_CC_MSVC |
| 1359 int32_t result; |
| 1360 __asm { |
| 1361 mov ecx, p |
| 1362 mov eax, dword ptr [ecx] |
| 1363 L0: |
| 1364 mov edx, value |
| 1365 xor edx, eax |
| 1366 lock cmpxchg dword ptr [ecx], edx |
| 1367 jnz short L0 |
| 1368 mov result, eax |
| 1369 } |
| 1370 return result; |
| 1371 #elif V8_CC_GNU |
| 1372 int32_t temp; |
| 1373 int32_t result; |
| 1374 __asm__ __volatile__( |
| 1375 "movl %1, %0\n\t" |
| 1376 "1:\n\t" |
| 1377 "movl %3, %2\n\t" |
| 1378 "xorl %0, %2\n\t" |
| 1379 "lock cmpxchgl %2, %1\n\t" |
| 1380 "jnz 1b" |
| 1381 : "=&a"(result), "+m"(*p), "=&r"(temp) |
| 1382 : "r"(value) |
| 1383 : "memory"); |
| 1384 return result; |
| 1385 #else |
| 1386 #error Unsupported compiler. |
| 1387 #endif |
| 1388 } |
| 1389 } // namespace atomics |
| 1390 } // namespace internal |
| 1391 } // namespace v8 |
OLD | NEW |