OLD | NEW |
| (Empty) |
1 //+--------------------------------------------------------------------------- | |
2 // | |
3 // Copyright ( C ) Microsoft, 2002. | |
4 // | |
5 // File: shared_any.cpp | |
6 // | |
7 // Contents: pool allocator for reference counts | |
8 // | |
9 // Classes: ref_count_allocator and helpers | |
10 // | |
11 // Functions: | |
12 // | |
13 // Author: Eric Niebler ( ericne@microsoft.com ) | |
14 // | |
15 //---------------------------------------------------------------------------- | |
16 | |
17 #ifdef _MT | |
18 # ifndef _WIN32_WINNT | |
19 # define _WIN32_WINNT 0x0403 | |
20 # endif | |
21 # include <windows.h> | |
22 # include <unknwn.h> | |
23 #endif | |
24 | |
25 #include <cassert> | |
26 #include <functional> // for std::less | |
27 #include <algorithm> // for std::swap | |
28 | |
29 #pragma warning(push) | |
30 // C4640: construction of local static object is not thread-safe | |
31 #pragma warning(disable : 4640) | |
32 #include "shared_any.h" | |
33 #include "scoped_any.h" | |
34 #pragma warning(pop) | |
35 | |
36 namespace detail | |
37 { | |
38 struct critsec | |
39 { | |
40 #ifdef _MT | |
41 CRITICAL_SECTION m_cs; | |
42 | |
43 critsec() | |
44 { | |
45 InitializeCriticalSectionAndSpinCount( &m_cs, 4000 ); | |
46 } | |
47 ~critsec() | |
48 { | |
49 DeleteCriticalSection( &m_cs ); | |
50 } | |
51 void enter() | |
52 { | |
53 EnterCriticalSection( &m_cs ); | |
54 } | |
55 void leave() | |
56 { | |
57 LeaveCriticalSection( &m_cs ); | |
58 } | |
59 #endif | |
60 }; | |
61 | |
62 namespace | |
63 { | |
64 critsec g_critsec; | |
65 } | |
66 | |
67 struct lock | |
68 { | |
69 #ifdef _MT | |
70 critsec & m_cs; | |
71 | |
72 explicit lock( critsec & cs ) | |
73 : m_cs(cs) | |
74 { | |
75 m_cs.enter(); | |
76 } | |
77 ~lock() | |
78 { | |
79 m_cs.leave(); | |
80 } | |
81 #else | |
82 explicit lock( critsec & ) | |
83 { | |
84 } | |
85 #endif | |
86 private: | |
87 lock( lock const & ); | |
88 lock & operator=( lock const & ); | |
89 }; | |
90 | |
91 struct ref_count_block | |
92 { | |
93 static long const s_sizeBlock = 256; | |
94 | |
95 short m_free_list; // offset to start of freelist | |
96 short m_available; // count of refcounts in this block that are availabl
e | |
97 long m_refcounts[ s_sizeBlock ]; | |
98 | |
99 ref_count_block() | |
100 : m_free_list(0), m_available(s_sizeBlock) | |
101 { | |
102 for( long l=0; l<s_sizeBlock; ++l ) | |
103 m_refcounts[l] = l+1; | |
104 } | |
105 | |
106 bool empty() const // throw() | |
107 { | |
108 return s_sizeBlock == m_available; | |
109 } | |
110 | |
111 bool full() const // throw() | |
112 { | |
113 return 0 == m_available; | |
114 } | |
115 | |
116 long volatile *alloc( lock & ) | |
117 { | |
118 assert( 0 != m_available ); | |
119 long *refcount = m_refcounts + m_free_list; | |
120 m_free_list = static_cast<short>( *refcount ); | |
121 --m_available; | |
122 return refcount; | |
123 } | |
124 | |
125 void free( long volatile *refcount, lock & ) // throw() | |
126 { | |
127 assert( owns( refcount ) ); | |
128 *refcount = m_free_list; | |
129 m_free_list = static_cast<short>( refcount - m_refcounts ); | |
130 ++m_available; | |
131 } | |
132 | |
133 bool owns( long volatile *refcount ) const // throw() | |
134 { | |
135 return ! std::less<void*>()( const_cast<long*>( refcount ), const_ca
st<long*>( m_refcounts ) ) && | |
136 std::less<void*>()( const_cast<long*>( refcount ), const_cas
t<long*>( m_refcounts ) + s_sizeBlock ); | |
137 } | |
138 }; | |
139 | |
140 | |
141 struct ref_count_allocator::node | |
142 { | |
143 node *m_next; | |
144 node *m_prev; | |
145 ref_count_block m_block; | |
146 explicit node( node *next=0, node *prev=0 ) | |
147 : m_next(next), m_prev(prev), m_block() | |
148 { | |
149 if( m_next ) | |
150 m_next->m_prev = this; | |
151 if( m_prev ) | |
152 m_prev->m_next = this; | |
153 } | |
154 }; | |
155 | |
156 | |
157 ref_count_allocator::ref_count_allocator() | |
158 : m_list_blocks(0), m_last_alloc(0), m_last_free(0) | |
159 { | |
160 } | |
161 | |
162 ref_count_allocator::~ref_count_allocator() | |
163 { | |
164 // Just leak the blocks. It's ok, really. | |
165 // If you need to clean up the blocks and | |
166 // you are certain that no refcounts are | |
167 // outstanding, you can use the finalize() | |
168 // method to force deallocation | |
169 } | |
170 | |
171 void ref_count_allocator::finalize() | |
172 { | |
173 lock l( g_critsec ); | |
174 for( node *next; m_list_blocks; m_list_blocks=next ) | |
175 { | |
176 next = m_list_blocks->m_next; | |
177 delete m_list_blocks; | |
178 } | |
179 m_last_alloc = 0; | |
180 m_last_free = 0; | |
181 } | |
182 | |
183 long volatile *ref_count_allocator::alloc() | |
184 { | |
185 lock l( g_critsec ); | |
186 if( ! m_last_alloc || m_last_alloc->m_block.full() ) | |
187 { | |
188 for( m_last_alloc = m_list_blocks; | |
189 m_last_alloc && m_last_alloc->m_block.full(); | |
190 m_last_alloc = m_last_alloc->m_next ); | |
191 if( ! m_last_alloc ) | |
192 { | |
193 m_last_alloc = new( std::nothrow ) node( m_list_blocks ); | |
194 if( ! m_last_alloc ) | |
195 return 0; | |
196 m_list_blocks = m_last_alloc; | |
197 } | |
198 } | |
199 return m_last_alloc->m_block.alloc( l ); | |
200 } | |
201 | |
202 long volatile *ref_count_allocator::alloc( long val ) | |
203 { | |
204 long volatile *refcount = alloc(); | |
205 *refcount = val; | |
206 return refcount; | |
207 } | |
208 | |
209 void ref_count_allocator::free( long volatile *refcount ) // throw() | |
210 { | |
211 // don't rearrange the order of these locals! | |
212 scoped_any<node*,close_delete> scoped_last_free; | |
213 lock l( g_critsec ); | |
214 | |
215 if( ! m_last_free || ! m_last_free->m_block.owns( refcount ) ) | |
216 { | |
217 for( m_last_free = m_list_blocks; | |
218 m_last_free && ! m_last_free->m_block.owns( refcount ); | |
219 m_last_free = m_last_free->m_next ); | |
220 } | |
221 | |
222 assert( m_last_free && m_last_free->m_block.owns( refcount ) ); | |
223 m_last_free->m_block.free( refcount, l ); | |
224 | |
225 if( m_last_free != m_list_blocks && m_last_free->m_block.empty() ) | |
226 { | |
227 if( 0 != ( m_last_free->m_prev->m_next = m_last_free->m_next ) ) | |
228 m_last_free->m_next->m_prev = m_last_free->m_prev; | |
229 | |
230 if( ! m_list_blocks->m_block.empty() ) | |
231 { | |
232 m_last_free->m_next = m_list_blocks; | |
233 m_last_free->m_prev = 0; | |
234 m_list_blocks->m_prev = m_last_free; | |
235 m_list_blocks = m_last_free; | |
236 } | |
237 else | |
238 reset( scoped_last_free, m_last_free ); // deleted after critsec
is released | |
239 | |
240 m_last_free = 0; | |
241 } | |
242 } | |
243 | |
244 // Here is the global reference count allocator. | |
245 ref_count_allocator ref_count_allocator::instance; | |
246 } | |
OLD | NEW |