| OLD | NEW | 
| (Empty) |  | 
 |    1 /* Copyright (c) 2006, Google Inc. | 
 |    2  * All rights reserved. | 
 |    3  *  | 
 |    4  * Redistribution and use in source and binary forms, with or without | 
 |    5  * modification, are permitted provided that the following conditions are | 
 |    6  * met: | 
 |    7  *  | 
 |    8  *     * Redistributions of source code must retain the above copyright | 
 |    9  * notice, this list of conditions and the following disclaimer. | 
 |   10  *     * Redistributions in binary form must reproduce the above | 
 |   11  * copyright notice, this list of conditions and the following disclaimer | 
 |   12  * in the documentation and/or other materials provided with the | 
 |   13  * distribution. | 
 |   14  *     * Neither the name of Google Inc. nor the names of its | 
 |   15  * contributors may be used to endorse or promote products derived from | 
 |   16  * this software without specific prior written permission. | 
 |   17  *  | 
 |   18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 
 |   19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 
 |   20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 
 |   21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 
 |   22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 
 |   23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 
 |   24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 
 |   25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 
 |   26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 
 |   27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 
 |   28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 
 |   29  * | 
 |   30  * --- | 
 |   31  * Author: Sanjay Ghemawat | 
 |   32  */ | 
 |   33  | 
 |   34 // Implementation of atomic operations using Windows API | 
 |   35 // functions.  This file should not be included directly.  Clients | 
 |   36 // should instead include "base/atomicops.h". | 
 |   37  | 
 |   38 #ifndef BASE_ATOMICOPS_INTERNALS_WINDOWS_H_ | 
 |   39 #define BASE_ATOMICOPS_INTERNALS_WINDOWS_H_ | 
 |   40  | 
 |   41 #include <stdio.h> | 
 |   42 #include <stdlib.h> | 
 |   43 #include "base/basictypes.h"  // For COMPILE_ASSERT | 
 |   44  | 
 |   45 typedef int32 Atomic32; | 
 |   46  | 
 |   47 #if defined(_WIN64) | 
 |   48 #define BASE_HAS_ATOMIC64 1  // Use only in tests and base/atomic* | 
 |   49 #endif | 
 |   50  | 
 |   51 namespace base { | 
 |   52 namespace subtle { | 
 |   53  | 
 |   54 typedef int64 Atomic64; | 
 |   55  | 
 |   56 // 32-bit low-level operations on any platform | 
 |   57  | 
 |   58 // MinGW has a bug in the header files where it doesn't indicate the | 
 |   59 // first argument is volatile -- they're not up to date.  See | 
 |   60 //   http://readlist.com/lists/lists.sourceforge.net/mingw-users/0/3861.html | 
 |   61 // We have to const_cast away the volatile to avoid compiler warnings. | 
 |   62 // TODO(csilvers): remove this once MinGW has updated MinGW/include/winbase.h | 
 |   63 #ifdef __MINGW32__ | 
 |   64 inline LONG InterlockedCompareExchange(volatile LONG* ptr, | 
 |   65                                        LONG newval, LONG oldval) { | 
 |   66   return ::InterlockedCompareExchange(const_cast<LONG*>(ptr), newval, oldval); | 
 |   67 } | 
 |   68 inline LONG InterlockedExchange(volatile LONG* ptr, LONG newval) { | 
 |   69   return ::InterlockedExchange(const_cast<LONG*>(ptr), newval); | 
 |   70 } | 
 |   71 inline LONG InterlockedExchangeAdd(volatile LONG* ptr, LONG increment) { | 
 |   72   return ::InterlockedExchangeAdd(const_cast<LONG*>(ptr), increment); | 
 |   73 } | 
 |   74 #endif  // ifdef __MINGW32__ | 
 |   75  | 
 |   76 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 
 |   77                                          Atomic32 old_value, | 
 |   78                                          Atomic32 new_value) { | 
 |   79   LONG result = InterlockedCompareExchange( | 
 |   80       reinterpret_cast<volatile LONG*>(ptr), | 
 |   81       static_cast<LONG>(new_value), | 
 |   82       static_cast<LONG>(old_value)); | 
 |   83   return static_cast<Atomic32>(result); | 
 |   84 } | 
 |   85  | 
 |   86 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 
 |   87                                          Atomic32 new_value) { | 
 |   88   LONG result = InterlockedExchange( | 
 |   89       reinterpret_cast<volatile LONG*>(ptr), | 
 |   90       static_cast<LONG>(new_value)); | 
 |   91   return static_cast<Atomic32>(result); | 
 |   92 } | 
 |   93  | 
 |   94 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 
 |   95                                         Atomic32 increment) { | 
 |   96   return InterlockedExchangeAdd( | 
 |   97       reinterpret_cast<volatile LONG*>(ptr), | 
 |   98       static_cast<LONG>(increment)) + increment; | 
 |   99 } | 
 |  100  | 
 |  101 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 
 |  102                                           Atomic32 increment) { | 
 |  103   return Barrier_AtomicIncrement(ptr, increment); | 
 |  104 } | 
 |  105  | 
 |  106 }  // namespace base::subtle | 
 |  107 }  // namespace base | 
 |  108  | 
 |  109  | 
 |  110 // In msvc8/vs2005, winnt.h already contains a definition for | 
 |  111 // MemoryBarrier in the global namespace.  Add it there for earlier | 
 |  112 // versions and forward to it from within the namespace. | 
 |  113 #if !(defined(_MSC_VER) && _MSC_VER >= 1400) | 
 |  114 inline void MemoryBarrier() { | 
 |  115   Atomic32 value = 0; | 
 |  116   base::subtle::NoBarrier_AtomicExchange(&value, 0); | 
 |  117                         // actually acts as a barrier in thisd implementation | 
 |  118 } | 
 |  119 #endif | 
 |  120  | 
 |  121 namespace base { | 
 |  122 namespace subtle { | 
 |  123  | 
 |  124 inline void MemoryBarrier() { | 
 |  125   ::MemoryBarrier(); | 
 |  126 } | 
 |  127  | 
 |  128 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | 
 |  129                                        Atomic32 old_value, | 
 |  130                                        Atomic32 new_value) { | 
 |  131   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 
 |  132 } | 
 |  133  | 
 |  134 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 
 |  135                                        Atomic32 old_value, | 
 |  136                                        Atomic32 new_value) { | 
 |  137   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 
 |  138 } | 
 |  139  | 
 |  140 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 
 |  141   *ptr = value; | 
 |  142 } | 
 |  143  | 
 |  144 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 
 |  145   NoBarrier_AtomicExchange(ptr, value); | 
 |  146               // acts as a barrier in this implementation | 
 |  147 } | 
 |  148  | 
 |  149 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 
 |  150   *ptr = value; // works w/o barrier for current Intel chips as of June 2005 | 
 |  151   // See comments in Atomic64 version of Release_Store() below. | 
 |  152 } | 
 |  153  | 
 |  154 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 
 |  155   return *ptr; | 
 |  156 } | 
 |  157  | 
 |  158 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 
 |  159   Atomic32 value = *ptr; | 
 |  160   return value; | 
 |  161 } | 
 |  162  | 
 |  163 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 
 |  164   MemoryBarrier(); | 
 |  165   return *ptr; | 
 |  166 } | 
 |  167  | 
 |  168 // 64-bit operations | 
 |  169  | 
 |  170 #if defined(_WIN64) || defined(__MINGW64__) | 
 |  171  | 
 |  172 // 64-bit low-level operations on 64-bit platform. | 
 |  173  | 
 |  174 COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic); | 
 |  175  | 
 |  176 // Like for the __MINGW32__ case above, this works around a header | 
 |  177 // error in mingw, where it's missing 'volatile'. | 
 |  178 #ifdef __MINGW64__ | 
 |  179 inline PVOID InterlockedCompareExchangePointer(volatile PVOID* ptr, | 
 |  180                                                PVOID newval, PVOID oldval) { | 
 |  181   return ::InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr), | 
 |  182                                              newval, oldval); | 
 |  183 } | 
 |  184 inline PVOID InterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) { | 
 |  185   return ::InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval); | 
 |  186 } | 
 |  187 inline LONGLONG InterlockedExchangeAdd64(volatile LONGLONG* ptr, | 
 |  188                                          LONGLONG increment) { | 
 |  189   return ::InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment); | 
 |  190 } | 
 |  191 #endif  // ifdef __MINGW64__ | 
 |  192  | 
 |  193 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 
 |  194                                          Atomic64 old_value, | 
 |  195                                          Atomic64 new_value) { | 
 |  196   PVOID result = InterlockedCompareExchangePointer( | 
 |  197     reinterpret_cast<volatile PVOID*>(ptr), | 
 |  198     reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); | 
 |  199   return reinterpret_cast<Atomic64>(result); | 
 |  200 } | 
 |  201  | 
 |  202 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | 
 |  203                                          Atomic64 new_value) { | 
 |  204   PVOID result = InterlockedExchangePointer( | 
 |  205     reinterpret_cast<volatile PVOID*>(ptr), | 
 |  206     reinterpret_cast<PVOID>(new_value)); | 
 |  207   return reinterpret_cast<Atomic64>(result); | 
 |  208 } | 
 |  209  | 
 |  210 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | 
 |  211                                         Atomic64 increment) { | 
 |  212   return InterlockedExchangeAdd64( | 
 |  213       reinterpret_cast<volatile LONGLONG*>(ptr), | 
 |  214       static_cast<LONGLONG>(increment)) + increment; | 
 |  215 } | 
 |  216  | 
 |  217 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | 
 |  218                                           Atomic64 increment) { | 
 |  219   return Barrier_AtomicIncrement(ptr, increment); | 
 |  220 } | 
 |  221  | 
 |  222 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | 
 |  223   *ptr = value; | 
 |  224 } | 
 |  225  | 
 |  226 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | 
 |  227   NoBarrier_AtomicExchange(ptr, value); | 
 |  228               // acts as a barrier in this implementation | 
 |  229 } | 
 |  230  | 
 |  231 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | 
 |  232   *ptr = value; // works w/o barrier for current Intel chips as of June 2005 | 
 |  233  | 
 |  234   // When new chips come out, check: | 
 |  235   //  IA-32 Intel Architecture Software Developer's Manual, Volume 3: | 
 |  236   //  System Programming Guide, Chatper 7: Multiple-processor management, | 
 |  237   //  Section 7.2, Memory Ordering. | 
 |  238   // Last seen at: | 
 |  239   //   http://developer.intel.com/design/pentium4/manuals/index_new.htm | 
 |  240 } | 
 |  241  | 
 |  242 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | 
 |  243   return *ptr; | 
 |  244 } | 
 |  245  | 
 |  246 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | 
 |  247   Atomic64 value = *ptr; | 
 |  248   return value; | 
 |  249 } | 
 |  250  | 
 |  251 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 
 |  252   MemoryBarrier(); | 
 |  253   return *ptr; | 
 |  254 } | 
 |  255  | 
 |  256 #else  // defined(_WIN64) || defined(__MINGW64__) | 
 |  257  | 
 |  258 // 64-bit low-level operations on 32-bit platform | 
 |  259  | 
 |  260 // TODO(vchen): The GNU assembly below must be converted to MSVC inline | 
 |  261 // assembly.  Then the file should be renamed to ...-x86-mscv.h, probably. | 
 |  262  | 
 |  263 inline void NotImplementedFatalError(const char *function_name) { | 
 |  264   fprintf(stderr, "64-bit %s() not implemented on this platform\n", | 
 |  265           function_name); | 
 |  266   abort(); | 
 |  267 } | 
 |  268  | 
 |  269 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 
 |  270                                          Atomic64 old_value, | 
 |  271                                          Atomic64 new_value) { | 
 |  272 #if 0 // Not implemented | 
 |  273   Atomic64 prev; | 
 |  274   __asm__ __volatile__("movl (%3), %%ebx\n\t"    // Move 64-bit new_value into | 
 |  275                        "movl 4(%3), %%ecx\n\t"   // ecx:ebx | 
 |  276                        "lock; cmpxchg8b %1\n\t"  // If edx:eax (old_value) same | 
 |  277                        : "=A" (prev)             // as contents of ptr: | 
 |  278                        : "m" (*ptr),             //   ecx:ebx => ptr | 
 |  279                          "0" (old_value),        // else: | 
 |  280                          "r" (&new_value)        //   old *ptr => edx:eax | 
 |  281                        : "memory", "%ebx", "%ecx"); | 
 |  282   return prev; | 
 |  283 #else | 
 |  284   NotImplementedFatalError("NoBarrier_CompareAndSwap"); | 
 |  285   return 0; | 
 |  286 #endif | 
 |  287 } | 
 |  288  | 
 |  289 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | 
 |  290                                          Atomic64 new_value) { | 
 |  291 #if 0 // Not implemented | 
 |  292   __asm__ __volatile__( | 
 |  293                        "movl (%2), %%ebx\n\t"    // Move 64-bit new_value into | 
 |  294                        "movl 4(%2), %%ecx\n\t"   // ecx:ebx | 
 |  295                        "0:\n\t" | 
 |  296                        "movl %1, %%eax\n\t"      // Read contents of ptr into | 
 |  297                        "movl 4%1, %%edx\n\t"     // edx:eax | 
 |  298                        "lock; cmpxchg8b %1\n\t"  // Attempt cmpxchg; if *ptr | 
 |  299                        "jnz 0b\n\t"              // is no longer edx:eax, loop | 
 |  300                        : "=A" (new_value) | 
 |  301                        : "m" (*ptr), | 
 |  302                          "r" (&new_value) | 
 |  303                        : "memory", "%ebx", "%ecx"); | 
 |  304   return new_value;  // Now it's the previous value. | 
 |  305 #else | 
 |  306   NotImplementedFatalError("NoBarrier_AtomicExchange"); | 
 |  307   return 0; | 
 |  308 #endif | 
 |  309 } | 
 |  310  | 
 |  311 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | 
 |  312                                           Atomic64 increment) { | 
 |  313 #if 0 // Not implemented | 
 |  314   Atomic64 temp = increment; | 
 |  315   __asm__ __volatile__( | 
 |  316                        "0:\n\t" | 
 |  317                        "movl (%3), %%ebx\n\t"    // Move 64-bit increment into | 
 |  318                        "movl 4(%3), %%ecx\n\t"   // ecx:ebx | 
 |  319                        "movl (%2), %%eax\n\t"    // Read contents of ptr into | 
 |  320                        "movl 4(%2), %%edx\n\t"   // edx:eax | 
 |  321                        "add %%eax, %%ebx\n\t"    // sum => ecx:ebx | 
 |  322                        "adc %%edx, %%ecx\n\t"    // edx:eax still has old *ptr | 
 |  323                        "lock; cmpxchg8b (%2)\n\t"// Attempt cmpxchg; if *ptr | 
 |  324                        "jnz 0b\n\t"              // is no longer edx:eax, loop | 
 |  325                        : "=A"(temp), "+m"(*ptr) | 
 |  326                        : "D" (ptr), "S" (&increment) | 
 |  327                        : "memory", "%ebx", "%ecx"); | 
 |  328   // temp now contains the previous value of *ptr | 
 |  329   return temp + increment; | 
 |  330 #else | 
 |  331   NotImplementedFatalError("NoBarrier_AtomicIncrement"); | 
 |  332   return 0; | 
 |  333 #endif | 
 |  334 } | 
 |  335  | 
 |  336 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | 
 |  337                                         Atomic64 increment) { | 
 |  338 #if 0 // Not implemented | 
 |  339   Atomic64 new_val = NoBarrier_AtomicIncrement(ptr, increment); | 
 |  340   if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | 
 |  341     __asm__ __volatile__("lfence" : : : "memory"); | 
 |  342   } | 
 |  343   return new_val; | 
 |  344 #else | 
 |  345   NotImplementedFatalError("Barrier_AtomicIncrement"); | 
 |  346   return 0; | 
 |  347 #endif | 
 |  348 } | 
 |  349  | 
 |  350 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | 
 |  351 #if 0 // Not implemented | 
 |  352   __asm { | 
 |  353     mov mm0, value;  // Use mmx reg for 64-bit atomic moves | 
 |  354     mov ptr, mm0; | 
 |  355     emms;            // Empty mmx state to enable FP registers | 
 |  356   } | 
 |  357 #else | 
 |  358   NotImplementedFatalError("NoBarrier_Store"); | 
 |  359 #endif | 
 |  360 } | 
 |  361  | 
 |  362 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | 
 |  363   NoBarrier_AtomicExchange(ptr, value); | 
 |  364               // acts as a barrier in this implementation | 
 |  365 } | 
 |  366  | 
 |  367 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | 
 |  368   NoBarrier_Store(ptr, value); | 
 |  369 } | 
 |  370  | 
 |  371 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | 
 |  372 #if 0 // Not implemented | 
 |  373   Atomic64 value; | 
 |  374   __asm { | 
 |  375     mov mm0, ptr;    // Use mmx reg for 64-bit atomic moves | 
 |  376     mov value, mm0; | 
 |  377     emms;            // Empty mmx state to enable FP registers | 
 |  378   } | 
 |  379   return value; | 
 |  380 #else | 
 |  381   NotImplementedFatalError("NoBarrier_Store"); | 
 |  382   return 0; | 
 |  383 #endif | 
 |  384 } | 
 |  385  | 
 |  386 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | 
 |  387   Atomic64 value = NoBarrier_Load(ptr); | 
 |  388   return value; | 
 |  389 } | 
 |  390  | 
 |  391 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 
 |  392   MemoryBarrier(); | 
 |  393   return NoBarrier_Load(ptr); | 
 |  394 } | 
 |  395  | 
 |  396 #endif  // defined(_WIN64) || defined(__MINGW64__) | 
 |  397  | 
 |  398  | 
 |  399 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | 
 |  400                                        Atomic64 old_value, | 
 |  401                                        Atomic64 new_value) { | 
 |  402   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 
 |  403 } | 
 |  404  | 
 |  405 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | 
 |  406                                        Atomic64 old_value, | 
 |  407                                        Atomic64 new_value) { | 
 |  408   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 
 |  409 } | 
 |  410  | 
 |  411 }  // namespace base::subtle | 
 |  412 }  // namespace base | 
 |  413  | 
 |  414 #endif  // BASE_ATOMICOPS_INTERNALS_WINDOWS_H_ | 
| OLD | NEW |