OLD | NEW |
(Empty) | |
| 1 ; Copyright 2016 the V8 project authors. All rights reserved. |
| 2 ; Use of this source code is governed by a BSD-style license that can be |
| 3 ; found in the LICENSE file. |
| 4 |
| 5 .CODE |
| 6 |
| 7 func MACRO name |
| 8 PUBLIC name |
| 9 name: |
| 10 endm |
| 11 |
| 12 ;; LOAD ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
| 13 ; uint8_t v8::internal::atomics::LoadSeqCst(uint8_t*); |
| 14 func ?LoadSeqCst@atomics@internal@v8@@YAEPEAE@Z |
| 15 mov al, [rcx] |
| 16 ret |
| 17 |
| 18 ; int8_t v8::internal::atomics::LoadSeqCst(int8_t*); |
| 19 func ?LoadSeqCst@atomics@internal@v8@@YACPEAC@Z |
| 20 mov al, [rcx] |
| 21 ret |
| 22 |
| 23 ; uint16_t v8::internal::atomics::LoadSeqCst(uint16_t*); |
| 24 func ?LoadSeqCst@atomics@internal@v8@@YAGPEAG@Z |
| 25 mov ax, [rcx] |
| 26 ret |
| 27 |
| 28 ; int16_t v8::internal::atomics::LoadSeqCst(int16_t*); |
| 29 func ?LoadSeqCst@atomics@internal@v8@@YAFPEAF@Z |
| 30 mov ax, [rcx] |
| 31 ret |
| 32 |
| 33 ; uint32_t v8::internal::atomics::LoadSeqCst(uint32_t*); |
| 34 func ?LoadSeqCst@atomics@internal@v8@@YAIPEAI@Z |
| 35 mov eax, [rcx] |
| 36 ret |
| 37 |
| 38 ; int32_t v8::internal::atomics::LoadSeqCst(int32_t*); |
| 39 func ?LoadSeqCst@atomics@internal@v8@@YAHPEAH@Z |
| 40 mov eax, [rcx] |
| 41 ret |
| 42 |
| 43 |
| 44 ;; LOAD ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
| 45 ; void v8::internal::atomics::StoreSeqCst(uint8_t*, uint8_t); |
| 46 func ?StoreSeqCst@atomics@internal@v8@@YAXPEAEE@Z |
| 47 xchg [rcx], dl |
| 48 ret |
| 49 |
| 50 ; void v8::internal::atomics::StoreSeqCst(int8_t*, int8_t); |
| 51 func ?StoreSeqCst@atomics@internal@v8@@YAXPEACC@Z |
| 52 xchg [rcx], dl |
| 53 ret |
| 54 |
| 55 ; void v8::internal::atomics::StoreSeqCst(uint16_t*, uint16_t); |
| 56 func ?StoreSeqCst@atomics@internal@v8@@YAXPEAGG@Z |
| 57 xchg [rcx], dx |
| 58 ret |
| 59 |
| 60 ; void v8::internal::atomics::StoreSeqCst(int16_t*, int16_t); |
| 61 func ?StoreSeqCst@atomics@internal@v8@@YAXPEAFF@Z |
| 62 xchg [rcx], dx |
| 63 ret |
| 64 |
| 65 ; void v8::internal::atomics::StoreSeqCst(uint32_t*, uint32_t); |
| 66 func ?StoreSeqCst@atomics@internal@v8@@YAXPEAII@Z |
| 67 xchg [rcx], edx |
| 68 ret |
| 69 |
| 70 ; void v8::internal::atomics::StoreSeqCst(int32_t*, int32_t); |
| 71 func ?StoreSeqCst@atomics@internal@v8@@YAXPEAHH@Z |
| 72 xchg [rcx], edx |
| 73 ret |
| 74 |
| 75 |
| 76 ;; ADD ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
| 77 ; uint8_t v8::internal::atomics::AddSeqCst(uint8_t*, uint8_t); |
| 78 func ?AddSeqCst@atomics@internal@v8@@YAEPEAEE@Z |
| 79 lock xadd [rcx], dl |
| 80 movzx eax, dl |
| 81 ret |
| 82 |
| 83 ; int8_t v8::internal::atomics::AddSeqCst(int8_t*, int8_t); |
| 84 func ?AddSeqCst@atomics@internal@v8@@YACPEACC@Z |
| 85 lock xadd [rcx], dl |
| 86 movsx eax, dl |
| 87 ret |
| 88 |
| 89 ; uint16_t v8::internal::atomics::AddSeqCst(uint16_t*, uint16_t); |
| 90 func ?AddSeqCst@atomics@internal@v8@@YAGPEAGG@Z |
| 91 lock xadd [rcx], dx |
| 92 movzx eax, dx |
| 93 ret |
| 94 |
| 95 ; int16_t v8::internal::atomics::AddSeqCst(int16_t*, int16_t); |
| 96 func ?AddSeqCst@atomics@internal@v8@@YAFPEAFF@Z |
| 97 lock xadd [rcx], dx |
| 98 movsx eax, dx |
| 99 ret |
| 100 |
| 101 ; uint32_t v8::internal::atomics::AddSeqCst(uint32_t*, uint32_t); |
| 102 func ?AddSeqCst@atomics@internal@v8@@YAIPEAII@Z |
| 103 lock xadd [rcx], edx |
| 104 mov eax, edx |
| 105 ret |
| 106 |
| 107 ; int32_t v8::internal::atomics::AddSeqCst(int32_t*, int32_t); |
| 108 func ?AddSeqCst@atomics@internal@v8@@YAHPEAHH@Z |
| 109 lock xadd [rcx], edx |
| 110 mov eax, edx |
| 111 ret |
| 112 |
| 113 |
| 114 ;; SUB ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
| 115 ; uint8_t v8::internal::atomics::SubSeqCst(uint8_t*, uint8_t); |
| 116 func ?SubSeqCst@atomics@internal@v8@@YAEPEAEE@Z |
| 117 neg dl |
| 118 lock xadd [rcx], dl |
| 119 movzx eax, dl |
| 120 ret |
| 121 |
| 122 ; int8_t v8::internal::atomics::SubSeqCst(int8_t*, int8_t); |
| 123 func ?SubSeqCst@atomics@internal@v8@@YACPEACC@Z |
| 124 neg dl |
| 125 lock xadd [rcx], dl |
| 126 movsx eax, dl |
| 127 ret |
| 128 |
| 129 ; uint16_t v8::internal::atomics::SubSeqCst(uint16_t*, uint16_t); |
| 130 func ?SubSeqCst@atomics@internal@v8@@YAGPEAGG@Z |
| 131 neg dx |
| 132 lock xadd [rcx], dx |
| 133 movzx eax, dx |
| 134 ret |
| 135 |
| 136 ; int16_t v8::internal::atomics::SubSeqCst(int16_t*, int16_t); |
| 137 func ?SubSeqCst@atomics@internal@v8@@YAFPEAFF@Z |
| 138 neg dx |
| 139 lock xadd [rcx], dx |
| 140 movsx eax, dx |
| 141 ret |
| 142 |
| 143 ; uint32_t v8::internal::atomics::SubSeqCst(uint32_t*, uint32_t); |
| 144 func ?SubSeqCst@atomics@internal@v8@@YAIPEAII@Z |
| 145 neg edx |
| 146 lock xadd [rcx], edx |
| 147 mov eax, edx |
| 148 ret |
| 149 |
| 150 ; int32_t v8::internal::atomics::SubSeqCst(int32_t*, int32_t); |
| 151 func ?SubSeqCst@atomics@internal@v8@@YAHPEAHH@Z |
| 152 neg edx |
| 153 lock xadd [rcx], edx |
| 154 mov eax, edx |
| 155 ret |
| 156 |
| 157 |
| 158 ;; EXCHANGE ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
| 159 ; uint8_t v8::internal::atomics::ExchangeSeqCst(uint8_t*, uint8_t); |
| 160 func ?ExchangeSeqCst@atomics@internal@v8@@YAEPEAEE@Z |
| 161 xchg [rcx], dl |
| 162 movzx eax, dl |
| 163 ret |
| 164 |
| 165 ; int8_t v8::internal::atomics::ExchangeSeqCst(int8_t*, int8_t); |
| 166 func ?ExchangeSeqCst@atomics@internal@v8@@YACPEACC@Z |
| 167 xchg [rcx], dl |
| 168 movsx eax, dl |
| 169 ret |
| 170 |
| 171 ; uint16_t v8::internal::atomics::ExchangeSeqCst(uint16_t*, uint16_t); |
| 172 func ?ExchangeSeqCst@atomics@internal@v8@@YAGPEAGG@Z |
| 173 xchg [rcx], dx |
| 174 movzx eax, dx |
| 175 ret |
| 176 |
| 177 ; int16_t v8::internal::atomics::ExchangeSeqCst(int16_t*, int16_t); |
| 178 func ?ExchangeSeqCst@atomics@internal@v8@@YAFPEAFF@Z |
| 179 xchg [rcx], dx |
| 180 movsx eax, dx |
| 181 ret |
| 182 |
| 183 ; uint32_t v8::internal::atomics::ExchangeSeqCst(uint32_t*, uint32_t); |
| 184 func ?ExchangeSeqCst@atomics@internal@v8@@YAIPEAII@Z |
| 185 xchg [rcx], edx |
| 186 mov eax, edx |
| 187 ret |
| 188 |
| 189 ; int32_t v8::internal::atomics::ExchangeSeqCst(int32_t*, int32_t); |
| 190 func ?ExchangeSeqCst@atomics@internal@v8@@YAHPEAHH@Z |
| 191 xchg [rcx], edx |
| 192 mov eax, edx |
| 193 ret |
| 194 |
| 195 |
| 196 ;; COMPARE EXCHANGE ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
| 197 ; uint8_t v8::internal::atomics::CompareExchangeSeqCst( |
| 198 ; uint8_t*, uint8_t oldval, uint8_t newval); |
| 199 func ?CompareExchangeSeqCst@atomics@internal@v8@@YAEPEAEEE@Z |
| 200 mov al, dl |
| 201 lock cmpxchg [rcx], r8b |
| 202 ret |
| 203 |
| 204 ; int8_t v8::internal::atomics::CompareExchangeSeqCst( |
| 205 ; int8_t*, int8_t oldval, int8_t newval); |
| 206 func ?CompareExchangeSeqCst@atomics@internal@v8@@YACPEACCC@Z |
| 207 mov al, dl |
| 208 lock cmpxchg [rcx], r8b |
| 209 ret |
| 210 |
| 211 ; uint16_t v8::internal::atomics::CompareExchangeSeqCst( |
| 212 ; uint16_t*, uint16_t oldval, uint16_t newval); |
| 213 func ?CompareExchangeSeqCst@atomics@internal@v8@@YAGPEAGGG@Z |
| 214 mov ax, dx |
| 215 lock cmpxchg [rcx], r8w |
| 216 ret |
| 217 |
| 218 ; int16_t v8::internal::atomics::CompareExchangeSeqCst( |
| 219 ; int16_t*, int16_t oldval, int16_t newval); |
| 220 func ?CompareExchangeSeqCst@atomics@internal@v8@@YAFPEAFFF@Z |
| 221 mov ax, dx |
| 222 lock cmpxchg [rcx], r8w |
| 223 ret |
| 224 |
| 225 ; uint32_t v8::internal::atomics::CompareExchangeSeqCst( |
| 226 ; uint32_t*, uint32_t oldval, uint32_t newval); |
| 227 func ?CompareExchangeSeqCst@atomics@internal@v8@@YAIPEAIII@Z |
| 228 mov eax, edx |
| 229 lock cmpxchg [rcx], r8d |
| 230 ret |
| 231 |
| 232 ; int32_t v8::internal::atomics::CompareExchangeSeqCst( |
| 233 ; int32_t*, int32_t oldval, int32_t newval); |
| 234 func ?CompareExchangeSeqCst@atomics@internal@v8@@YAHPEAHHH@Z |
| 235 mov eax, edx |
| 236 lock cmpxchg [rcx], r8d |
| 237 ret |
| 238 |
| 239 |
| 240 ;; AND ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
| 241 ; uint8_t v8::internal::atomics::AndSeqCst(uint8_t*, uint8_t); |
| 242 func ?AndSeqCst@atomics@internal@v8@@YAEPEAEE@Z |
| 243 mov al, [rcx] |
| 244 @@: |
| 245 mov r8b, dl |
| 246 and r8b, al |
| 247 lock cmpxchg [rcx], r8b |
| 248 jnz short @B |
| 249 ret |
| 250 |
| 251 ; int8_t v8::internal::atomics::AndSeqCst(int8_t*, int8_t); |
| 252 func ?AndSeqCst@atomics@internal@v8@@YACPEACC@Z |
| 253 mov al, [rcx] |
| 254 @@: |
| 255 mov r8b, dl |
| 256 and r8b, al |
| 257 lock cmpxchg [rcx], r8b |
| 258 jnz short @B |
| 259 ret |
| 260 |
| 261 ; uint16_t v8::internal::atomics::AndSeqCst(uint16_t*, uint16_t); |
| 262 func ?AndSeqCst@atomics@internal@v8@@YAGPEAGG@Z |
| 263 mov ax, [rcx] |
| 264 @@: |
| 265 mov r8w, dx |
| 266 and r8w, ax |
| 267 lock cmpxchg [rcx], r8w |
| 268 jnz short @B |
| 269 ret |
| 270 |
| 271 ; int16_t v8::internal::atomics::AndSeqCst(int16_t*, int16_t); |
| 272 func ?AndSeqCst@atomics@internal@v8@@YAFPEAFF@Z |
| 273 mov ax, [rcx] |
| 274 @@: |
| 275 mov r8w, dx |
| 276 and r8w, ax |
| 277 lock cmpxchg [rcx], r8w |
| 278 jnz short @B |
| 279 ret |
| 280 |
| 281 ; uint32_t v8::internal::atomics::AndSeqCst(uint32_t*, uint32_t); |
| 282 func ?AndSeqCst@atomics@internal@v8@@YAIPEAII@Z |
| 283 mov eax, [rcx] |
| 284 @@: |
| 285 mov r8d, edx |
| 286 and r8d, eax |
| 287 lock cmpxchg [rcx], r8d |
| 288 ret |
| 289 |
| 290 ; int32_t v8::internal::atomics::AndSeqCst(int32_t*, int32_t); |
| 291 func ?AndSeqCst@atomics@internal@v8@@YAHPEAHH@Z |
| 292 mov eax, [rcx] |
| 293 @@: |
| 294 mov r8d, edx |
| 295 and r8d, eax |
| 296 lock cmpxchg [rcx], r8d |
| 297 jnz short @B |
| 298 ret |
| 299 |
| 300 |
| 301 ;; OR ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
| 302 ; uint8_t v8::internal::atomics::OrSeqCst(uint8_t*, uint8_t); |
| 303 func ?OrSeqCst@atomics@internal@v8@@YAEPEAEE@Z |
| 304 mov al, [rcx] |
| 305 @@: |
| 306 mov r8b, dl |
| 307 or r8b, al |
| 308 lock cmpxchg [rcx], r8b |
| 309 jnz short @B |
| 310 ret |
| 311 |
| 312 ; int8_t v8::internal::atomics::OrSeqCst(int8_t*, int8_t); |
| 313 func ?OrSeqCst@atomics@internal@v8@@YACPEACC@Z |
| 314 mov al, [rcx] |
| 315 @@: |
| 316 mov r8b, dl |
| 317 or r8b, al |
| 318 lock cmpxchg [rcx], r8b |
| 319 jnz short @B |
| 320 ret |
| 321 |
| 322 ; uint16_t v8::internal::atomics::OrSeqCst(uint16_t*, uint16_t); |
| 323 func ?OrSeqCst@atomics@internal@v8@@YAGPEAGG@Z |
| 324 mov ax, [rcx] |
| 325 @@: |
| 326 mov r8w, dx |
| 327 or r8w, ax |
| 328 lock cmpxchg [rcx], r8w |
| 329 jnz short @B |
| 330 ret |
| 331 |
| 332 ; int16_t v8::internal::atomics::OrSeqCst(int16_t*, int16_t); |
| 333 func ?OrSeqCst@atomics@internal@v8@@YAFPEAFF@Z |
| 334 mov ax, [rcx] |
| 335 @@: |
| 336 mov r8w, dx |
| 337 or r8w, ax |
| 338 lock cmpxchg [rcx], r8w |
| 339 jnz short @B |
| 340 ret |
| 341 |
| 342 ; uint32_t v8::internal::atomics::OrSeqCst(uint32_t*, uint32_t); |
| 343 func ?OrSeqCst@atomics@internal@v8@@YAIPEAII@Z |
| 344 mov eax, [rcx] |
| 345 @@: |
| 346 mov r8d, edx |
| 347 or r8d, eax |
| 348 lock cmpxchg [rcx], r8d |
| 349 jnz short @B |
| 350 ret |
| 351 |
| 352 ; int32_t v8::internal::atomics::OrSeqCst(int32_t*, int32_t); |
| 353 func ?OrSeqCst@atomics@internal@v8@@YAHPEAHH@Z |
| 354 mov eax, [rcx] |
| 355 @@: |
| 356 mov r8d, edx |
| 357 or r8d, eax |
| 358 lock cmpxchg [rcx], r8d |
| 359 jnz short @B |
| 360 ret |
| 361 |
| 362 |
| 363 ;; XOR ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
| 364 ; uint8_t v8::internal::atomics::XorSeqCst(uint8_t*, uint8_t); |
| 365 func ?XorSeqCst@atomics@internal@v8@@YAEPEAEE@Z |
| 366 mov al, [rcx] |
| 367 @@: |
| 368 mov r8b, dl |
| 369 xor r8b, al |
| 370 lock cmpxchg [rcx], r8b |
| 371 jnz short @B |
| 372 ret |
| 373 |
| 374 ; int8_t v8::internal::atomics::XorSeqCst(int8_t*, int8_t); |
| 375 func ?XorSeqCst@atomics@internal@v8@@YACPEACC@Z |
| 376 mov al, [rcx] |
| 377 @@: |
| 378 mov r8b, dl |
| 379 xor r8b, al |
| 380 lock cmpxchg [rcx], r8b |
| 381 jnz short @B |
| 382 ret |
| 383 |
| 384 ; uint16_t v8::internal::atomics::XorSeqCst(uint16_t*, uint16_t); |
| 385 func ?XorSeqCst@atomics@internal@v8@@YAGPEAGG@Z |
| 386 mov ax, [rcx] |
| 387 @@: |
| 388 mov r8w, dx |
| 389 xor r8w, ax |
| 390 lock cmpxchg [rcx], r8w |
| 391 jnz short @B |
| 392 ret |
| 393 |
| 394 ; int16_t v8::internal::atomics::XorSeqCst(int16_t*, int16_t); |
| 395 func ?XorSeqCst@atomics@internal@v8@@YAFPEAFF@Z |
| 396 mov ax, [rcx] |
| 397 @@: |
| 398 mov r8w, dx |
| 399 xor r8w, ax |
| 400 lock cmpxchg [rcx], r8w |
| 401 jnz short @B |
| 402 ret |
| 403 |
| 404 ; uint32_t v8::internal::atomics::XorSeqCst(uint32_t*, uint32_t); |
| 405 func ?XorSeqCst@atomics@internal@v8@@YAIPEAII@Z |
| 406 mov eax, [rcx] |
| 407 @@: |
| 408 mov r8d, edx |
| 409 xor r8d, eax |
| 410 lock cmpxchg [rcx], r8d |
| 411 jnz short @B |
| 412 ret |
| 413 |
| 414 ; int32_t v8::internal::atomics::XorSeqCst(int32_t*, int32_t); |
| 415 func ?XorSeqCst@atomics@internal@v8@@YAHPEAHH@Z |
| 416 mov eax, [rcx] |
| 417 @@: |
| 418 mov r8d, edx |
| 419 xor r8d, eax |
| 420 lock cmpxchg [rcx], r8d |
| 421 jnz short @B |
| 422 ret |
| 423 |
| 424 END |
OLD | NEW |