NVBIO
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
atomics.cpp
Go to the documentation of this file.
1 /*
2  * nvbio
3  * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  * * Redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer.
9  * * Redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution.
12  * * Neither the name of the NVIDIA CORPORATION nor the
13  * names of its contributors may be used to endorse or promote products
14  * derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <nvbio/basic/atomics.h>
29 #include <nvbio/basic/threads.h>
30 
31 #ifdef WIN32
32 
33 #include <windows.h>
34 
35 namespace nvbio {
36 
37 int32 atomic_increment(int32 volatile *value) { return InterlockedIncrement(reinterpret_cast<LONG volatile*>(value)); }
38 int64 atomic_increment(int64 volatile *value) { return InterlockedIncrement64(value); }
39 
40 int32 atomic_decrement(int32 volatile *value) { return InterlockedDecrement(reinterpret_cast<LONG volatile*>(value)); }
41 int64 atomic_decrement(int64 volatile *value) { return InterlockedDecrement64(value); }
42 
43 } // namespace nvbio
44 
45 #endif
46 
47 namespace nvbio {
48 
50 {
51  #if defined(__GNUC__)
52  // make sure the other threads see the reference count before the output is set
53  __atomic_thread_fence( __ATOMIC_RELEASE );
54  #endif
55 }
56 
58 {
59  #if defined(__GNUC__)
60  // make sure the other threads see the reference count before the output is set
61  __atomic_thread_fence( __ATOMIC_ACQUIRE );
62  #endif
63 }
64 
65 int32 host_atomic_add(int32* value, const int32 op)
66 {
67 #if defined(__GNUC__)
68  return __atomic_fetch_add( value, op, __ATOMIC_RELAXED );
69 #else
70  Mutex mutex;
71  ScopedLock lock( &mutex );
72 
73  const int32 old = *value;
74  *value += op;
75  return old;
76 #endif
77 }
79 {
80 #if defined(__GNUC__)
81  return __atomic_fetch_add( value, op, __ATOMIC_RELAXED );
82 #else
83  Mutex mutex;
84  ScopedLock lock( &mutex );
85 
86  const uint32 old = *value;
87  *value += op;
88  return old;
89 #endif
90 }
91 int64 host_atomic_add(int64* value, const int64 op)
92 {
93 #if defined(__GNUC__)
94  return __atomic_fetch_add( value, op, __ATOMIC_RELAXED );
95 #else
96  Mutex mutex;
97  ScopedLock lock( &mutex );
98 
99  const int64 old = *value;
100  *value += op;
101  return old;
102 #endif
103 }
105 {
106 #if defined(__GNUC__)
107  return __atomic_fetch_add( value, op, __ATOMIC_RELAXED );
108 #else
109  Mutex mutex;
110  ScopedLock lock( &mutex );
111 
112  const uint64 old = *value;
113  *value += op;
114  return old;
115 #endif
116 }
117 int32 host_atomic_sub(int32* value, const int32 op)
118 {
119 #if defined(__GNUC__)
120  return __atomic_fetch_sub( value, op, __ATOMIC_RELAXED );
121 #else
122  Mutex mutex;
123  ScopedLock lock( &mutex );
124 
125  const int32 old = *value;
126  *value -= op;
127  return old;
128 #endif
129 }
131 {
132 #if defined(__GNUC__)
133  return __atomic_fetch_sub( value, op, __ATOMIC_RELAXED );
134 #else
135  Mutex mutex;
136  ScopedLock lock( &mutex );
137 
138  const uint32 old = *value;
139  *value -= op;
140  return old;
141 #endif
142 }
143 
144 int64 host_atomic_sub(int64* value, const int64 op)
145 {
146 #if defined(__GNUC__)
147  return __atomic_fetch_sub( value, op, __ATOMIC_RELAXED );
148 #else
149  Mutex mutex;
150  ScopedLock lock( &mutex );
151 
152  const int64 old = *value;
153  *value -= op;
154  return old;
155 #endif
156 }
158 {
159 #if defined(__GNUC__)
160  return __atomic_fetch_sub( value, op, __ATOMIC_RELAXED );
161 #else
162  Mutex mutex;
163  ScopedLock lock( &mutex );
164 
165  const uint64 old = *value;
166  *value -= op;
167  return old;
168 #endif
169 }
170 
172 {
173 #if defined(__GNUC__)
174  return __atomic_fetch_or( value, op, __ATOMIC_RELAXED );
175 #else
176  Mutex mutex;
177  ScopedLock lock( &mutex );
178 
179  const uint32 old = *value;
180  *value |= op;
181  return old;
182 #endif
183 }
185 {
186 #if defined(__GNUC__)
187  return __atomic_fetch_or( value, op, __ATOMIC_RELAXED );
188 #else
189  Mutex mutex;
190  ScopedLock lock( &mutex );
191 
192  const uint64 old = *value;
193  *value |= op;
194  return old;
195 #endif
196 }
197 
198 } // namespace nvbio