| OLD | NEW |
| (Empty) |
| 1 | |
| 2 //---------------------------------------------------------------------------- | |
| 3 // Anti-Grain Geometry - Version 2.3 | |
| 4 // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com) | |
| 5 // | |
| 6 // Permission to copy, use, modify, sell and distribute this software | |
| 7 // is granted provided this copyright notice appears in all copies. | |
| 8 // This software is provided "as is" without express or implied | |
| 9 // warranty, and with no claim as to its suitability for any purpose. | |
| 10 // | |
| 11 //---------------------------------------------------------------------------- | |
| 12 // Contact: mcseem@antigrain.com | |
| 13 // mcseemagg@yahoo.com | |
| 14 // http://www.antigrain.com | |
| 15 //---------------------------------------------------------------------------- | |
| 16 #ifndef AGG_ARRAY_INCLUDED | |
| 17 #define AGG_ARRAY_INCLUDED | |
| 18 #include "agg_basics.h" | |
| 19 namespace agg | |
| 20 { | |
| 21 template<class T> class pod_array : public CFX_Object | |
| 22 { | |
| 23 public: | |
| 24 typedef T value_type; | |
| 25 ~pod_array() | |
| 26 { | |
| 27 FX_Free(m_array); | |
| 28 } | |
| 29 pod_array() : m_size(0), m_capacity(0), m_array(0) {} | |
| 30 pod_array(unsigned cap, unsigned extra_tail = 0); | |
| 31 pod_array(const pod_array<T>&); | |
| 32 const pod_array<T>& operator = (const pod_array<T>&); | |
| 33 void capacity(unsigned cap, unsigned extra_tail = 0); | |
| 34 unsigned capacity() const | |
| 35 { | |
| 36 return m_capacity; | |
| 37 } | |
| 38 void allocate(unsigned size, unsigned extra_tail = 0); | |
| 39 void resize(unsigned new_size); | |
| 40 void zero() | |
| 41 { | |
| 42 FXSYS_memset(m_array, 0, sizeof(T) * m_size); | |
| 43 } | |
| 44 void add(const T& v) | |
| 45 { | |
| 46 m_array[m_size++] = v; | |
| 47 } | |
| 48 void inc_size(unsigned size) | |
| 49 { | |
| 50 m_size += size; | |
| 51 } | |
| 52 unsigned size() const | |
| 53 { | |
| 54 return m_size; | |
| 55 } | |
| 56 unsigned byte_size() const | |
| 57 { | |
| 58 return m_size * sizeof(T); | |
| 59 } | |
| 60 const T& operator [] (unsigned i) const | |
| 61 { | |
| 62 return m_array[i]; | |
| 63 } | |
| 64 T& operator [] (unsigned i) | |
| 65 { | |
| 66 return m_array[i]; | |
| 67 } | |
| 68 const T& at(unsigned i) const | |
| 69 { | |
| 70 return m_array[i]; | |
| 71 } | |
| 72 T& at(unsigned i) | |
| 73 { | |
| 74 return m_array[i]; | |
| 75 } | |
| 76 T value_at(unsigned i) const | |
| 77 { | |
| 78 return m_array[i]; | |
| 79 } | |
| 80 const T* data() const | |
| 81 { | |
| 82 return m_array; | |
| 83 } | |
| 84 T* data() | |
| 85 { | |
| 86 return m_array; | |
| 87 } | |
| 88 void remove_all() | |
| 89 { | |
| 90 m_size = 0; | |
| 91 } | |
| 92 void cut_at(unsigned num) | |
| 93 { | |
| 94 if(num < m_size) { | |
| 95 m_size = num; | |
| 96 } | |
| 97 } | |
| 98 private: | |
| 99 unsigned m_size; | |
| 100 unsigned m_capacity; | |
| 101 T* m_array; | |
| 102 }; | |
| 103 template<class T> | |
| 104 void pod_array<T>::capacity(unsigned cap, unsigned extra_tail) | |
| 105 { | |
| 106 m_size = 0; | |
| 107 unsigned full_cap = cap + extra_tail; | |
| 108 if(full_cap < cap) { | |
| 109 FX_Free(m_array); | |
| 110 m_array = 0; | |
| 111 m_capacity = 0; | |
| 112 } else if(full_cap > m_capacity) { | |
| 113 FX_Free(m_array); | |
| 114 m_array = 0; | |
| 115 m_capacity = 0; | |
| 116 m_array = FX_Alloc(T, full_cap); | |
| 117 if (m_array) { | |
| 118 m_capacity = full_cap; | |
| 119 } | |
| 120 } | |
| 121 } | |
| 122 template<class T> | |
| 123 void pod_array<T>::allocate(unsigned size, unsigned extra_tail) | |
| 124 { | |
| 125 capacity(size, extra_tail); | |
| 126 m_size = size; | |
| 127 } | |
| 128 template<class T> | |
| 129 void pod_array<T>::resize(unsigned new_size) | |
| 130 { | |
| 131 if(new_size > m_size) { | |
| 132 if(new_size > m_capacity) { | |
| 133 T* data = FX_Alloc(T, new_size); | |
| 134 FXSYS_memcpy(data, m_array, m_size * sizeof(T)); | |
| 135 FX_Free(m_array); | |
| 136 m_array = data; | |
| 137 } | |
| 138 } else { | |
| 139 m_size = new_size; | |
| 140 } | |
| 141 } | |
| 142 template<class T> pod_array<T>::pod_array(unsigned cap, unsigned extra_tail) : | |
| 143 m_size(0), m_capacity(cap + extra_tail), m_array(FX_Alloc(T, m_capacity)) {} | |
| 144 template<class T> pod_array<T>::pod_array(const pod_array<T>& v) : | |
| 145 m_size(v.m_size), | |
| 146 m_capacity(v.m_capacity), | |
| 147 m_array(v.m_capacity ? FX_Alloc(T, v.m_capacity) : 0) | |
| 148 { | |
| 149 FXSYS_memcpy(m_array, v.m_array, sizeof(T) * v.m_size); | |
| 150 } | |
| 151 template<class T> const pod_array<T>& | |
| 152 pod_array<T>::operator = (const pod_array<T>&v) | |
| 153 { | |
| 154 allocate(v.m_size); | |
| 155 if(v.m_size) { | |
| 156 FXSYS_memcpy(m_array, v.m_array, sizeof(T) * v.m_size); | |
| 157 } | |
| 158 return *this; | |
| 159 } | |
| 160 template<class T, unsigned S = 6> class pod_deque : public CFX_Object | |
| 161 { | |
| 162 public: | |
| 163 enum block_scale_e { | |
| 164 block_shift = S, | |
| 165 block_size = 1 << block_shift, | |
| 166 block_mask = block_size - 1 | |
| 167 }; | |
| 168 typedef T value_type; | |
| 169 ~pod_deque(); | |
| 170 pod_deque(); | |
| 171 pod_deque(unsigned block_ptr_inc); | |
| 172 pod_deque(const pod_deque<T, S>& v); | |
| 173 const pod_deque<T, S>& operator = (const pod_deque<T, S>& v); | |
| 174 void remove_all() | |
| 175 { | |
| 176 m_size = 0; | |
| 177 } | |
| 178 void free_all() | |
| 179 { | |
| 180 free_tail(0); | |
| 181 } | |
| 182 void free_tail(unsigned size); | |
| 183 void add(const T& val); | |
| 184 void modify_last(const T& val); | |
| 185 void remove_last(); | |
| 186 int allocate_continuous_block(unsigned num_elements); | |
| 187 void add_array(const T* ptr, unsigned num_elem) | |
| 188 { | |
| 189 while(num_elem--) { | |
| 190 add(*ptr++); | |
| 191 } | |
| 192 } | |
| 193 template<class DataAccessor> void add_data(DataAccessor& data) | |
| 194 { | |
| 195 while(data.size()) { | |
| 196 add(*data); | |
| 197 ++data; | |
| 198 } | |
| 199 } | |
| 200 void cut_at(unsigned size) | |
| 201 { | |
| 202 if(size < m_size) { | |
| 203 m_size = size; | |
| 204 } | |
| 205 } | |
| 206 unsigned size() const | |
| 207 { | |
| 208 return m_size; | |
| 209 } | |
| 210 const T& operator [] (unsigned i) const | |
| 211 { | |
| 212 return m_blocks[i >> block_shift][i & block_mask]; | |
| 213 } | |
| 214 T& operator [] (unsigned i) | |
| 215 { | |
| 216 return m_blocks[i >> block_shift][i & block_mask]; | |
| 217 } | |
| 218 const T& at(unsigned i) const | |
| 219 { | |
| 220 return m_blocks[i >> block_shift][i & block_mask]; | |
| 221 } | |
| 222 T& at(unsigned i) | |
| 223 { | |
| 224 return m_blocks[i >> block_shift][i & block_mask]; | |
| 225 } | |
| 226 T value_at(unsigned i) const | |
| 227 { | |
| 228 return m_blocks[i >> block_shift][i & block_mask]; | |
| 229 } | |
| 230 const T& curr(unsigned idx) const | |
| 231 { | |
| 232 return (*this)[idx]; | |
| 233 } | |
| 234 T& curr(unsigned idx) | |
| 235 { | |
| 236 return (*this)[idx]; | |
| 237 } | |
| 238 const T& prev(unsigned idx) const | |
| 239 { | |
| 240 return (*this)[(idx + m_size - 1) % m_size]; | |
| 241 } | |
| 242 T& prev(unsigned idx) | |
| 243 { | |
| 244 return (*this)[(idx + m_size - 1) % m_size]; | |
| 245 } | |
| 246 const T& next(unsigned idx) const | |
| 247 { | |
| 248 return (*this)[(idx + 1) % m_size]; | |
| 249 } | |
| 250 T& next(unsigned idx) | |
| 251 { | |
| 252 return (*this)[(idx + 1) % m_size]; | |
| 253 } | |
| 254 const T& last() const | |
| 255 { | |
| 256 return (*this)[m_size - 1]; | |
| 257 } | |
| 258 T& last() | |
| 259 { | |
| 260 return (*this)[m_size - 1]; | |
| 261 } | |
| 262 unsigned byte_size() const; | |
| 263 const T* block(unsigned nb) const | |
| 264 { | |
| 265 return m_blocks[nb]; | |
| 266 } | |
| 267 public: | |
| 268 void allocate_block(unsigned nb); | |
| 269 T* data_ptr(); | |
| 270 unsigned m_size; | |
| 271 unsigned m_num_blocks; | |
| 272 unsigned m_max_blocks; | |
| 273 T** m_blocks; | |
| 274 unsigned m_block_ptr_inc; | |
| 275 }; | |
| 276 template<class T, unsigned S> pod_deque<T, S>::~pod_deque() | |
| 277 { | |
| 278 if(m_num_blocks) { | |
| 279 T** blk = m_blocks + m_num_blocks - 1; | |
| 280 while(m_num_blocks--) { | |
| 281 FX_Free(*blk); | |
| 282 --blk; | |
| 283 } | |
| 284 FX_Free(m_blocks); | |
| 285 } | |
| 286 } | |
| 287 template<class T, unsigned S> | |
| 288 void pod_deque<T, S>::free_tail(unsigned size) | |
| 289 { | |
| 290 if(size < m_size) { | |
| 291 unsigned nb = (size + block_mask) >> block_shift; | |
| 292 while(m_num_blocks > nb) { | |
| 293 FX_Free(m_blocks[--m_num_blocks]); | |
| 294 } | |
| 295 m_size = size; | |
| 296 } | |
| 297 } | |
| 298 template<class T, unsigned S> pod_deque<T, S>::pod_deque() : | |
| 299 m_size(0), | |
| 300 m_num_blocks(0), | |
| 301 m_max_blocks(0), | |
| 302 m_blocks(0), | |
| 303 m_block_ptr_inc(block_size) | |
| 304 { | |
| 305 } | |
| 306 template<class T, unsigned S> | |
| 307 pod_deque<T, S>::pod_deque(unsigned block_ptr_inc) : | |
| 308 m_size(0), | |
| 309 m_num_blocks(0), | |
| 310 m_max_blocks(0), | |
| 311 m_blocks(0), | |
| 312 m_block_ptr_inc(block_ptr_inc) | |
| 313 { | |
| 314 } | |
| 315 template<class T, unsigned S> | |
| 316 pod_deque<T, S>::pod_deque(const pod_deque<T, S>& v) : | |
| 317 m_size(v.m_size), | |
| 318 m_num_blocks(v.m_num_blocks), | |
| 319 m_max_blocks(v.m_max_blocks), | |
| 320 m_blocks(v.m_max_blocks ? FX_Alloc(T*, v.m_max_blocks) : 0), | |
| 321 m_block_ptr_inc(v.m_block_ptr_inc) | |
| 322 { | |
| 323 unsigned i; | |
| 324 for(i = 0; i < v.m_num_blocks; ++i) { | |
| 325 m_blocks[i] = FX_Alloc(T, block_size); | |
| 326 FXSYS_memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T)); | |
| 327 } | |
| 328 } | |
| 329 template<class T, unsigned S> | |
| 330 const pod_deque<T, S>& pod_deque<T, S>::operator = (const pod_deque<T, S>& v) | |
| 331 { | |
| 332 unsigned i; | |
| 333 for(i = m_num_blocks; i < v.m_num_blocks; ++i) { | |
| 334 allocate_block(i); | |
| 335 } | |
| 336 for(i = 0; i < v.m_num_blocks; ++i) { | |
| 337 FXSYS_memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T)); | |
| 338 } | |
| 339 m_size = v.m_size; | |
| 340 return *this; | |
| 341 } | |
| 342 template<class T, unsigned S> | |
| 343 void pod_deque<T, S>::allocate_block(unsigned nb) | |
| 344 { | |
| 345 if(nb >= m_max_blocks) { | |
| 346 T** new_blocks = FX_Alloc(T*, m_max_blocks + m_block_ptr_inc); | |
| 347 if(m_blocks) { | |
| 348 FXSYS_memcpy(new_blocks, | |
| 349 m_blocks, | |
| 350 m_num_blocks * sizeof(T*)); | |
| 351 FX_Free(m_blocks); | |
| 352 } | |
| 353 m_blocks = new_blocks; | |
| 354 m_max_blocks += m_block_ptr_inc; | |
| 355 } | |
| 356 m_blocks[nb] = FX_Alloc(T, block_size); | |
| 357 m_num_blocks++; | |
| 358 } | |
| 359 template<class T, unsigned S> | |
| 360 inline T* pod_deque<T, S>::data_ptr() | |
| 361 { | |
| 362 unsigned nb = m_size >> block_shift; | |
| 363 if(nb >= m_num_blocks) { | |
| 364 allocate_block(nb); | |
| 365 } | |
| 366 return m_blocks[nb] + (m_size & block_mask); | |
| 367 } | |
| 368 template<class T, unsigned S> | |
| 369 inline void pod_deque<T, S>::add(const T& val) | |
| 370 { | |
| 371 *data_ptr() = val; | |
| 372 ++m_size; | |
| 373 } | |
| 374 template<class T, unsigned S> | |
| 375 inline void pod_deque<T, S>::remove_last() | |
| 376 { | |
| 377 if(m_size) { | |
| 378 --m_size; | |
| 379 } | |
| 380 } | |
| 381 template<class T, unsigned S> | |
| 382 void pod_deque<T, S>::modify_last(const T& val) | |
| 383 { | |
| 384 remove_last(); | |
| 385 add(val); | |
| 386 } | |
| 387 template<class T, unsigned S> | |
| 388 int pod_deque<T, S>::allocate_continuous_block(unsigned num_elements) | |
| 389 { | |
| 390 if(num_elements < block_size) { | |
| 391 data_ptr(); | |
| 392 unsigned rest = block_size - (m_size & block_mask); | |
| 393 unsigned index; | |
| 394 if(num_elements <= rest) { | |
| 395 index = m_size; | |
| 396 m_size += num_elements; | |
| 397 return index; | |
| 398 } | |
| 399 m_size += rest; | |
| 400 data_ptr(); | |
| 401 index = m_size; | |
| 402 m_size += num_elements; | |
| 403 return index; | |
| 404 } | |
| 405 return -1; | |
| 406 } | |
| 407 template<class T, unsigned S> | |
| 408 unsigned pod_deque<T, S>::byte_size() const | |
| 409 { | |
| 410 return m_size * sizeof(T); | |
| 411 } | |
| 412 class pod_allocator : public CFX_Object | |
| 413 { | |
| 414 public: | |
| 415 void remove_all() | |
| 416 { | |
| 417 if(m_num_blocks) { | |
| 418 int8u** blk = m_blocks + m_num_blocks - 1; | |
| 419 while(m_num_blocks--) { | |
| 420 FX_Free(*blk); | |
| 421 --blk; | |
| 422 } | |
| 423 FX_Free(m_blocks); | |
| 424 } | |
| 425 m_num_blocks = 0; | |
| 426 m_max_blocks = 0; | |
| 427 m_blocks = 0; | |
| 428 m_buf_ptr = 0; | |
| 429 m_rest = 0; | |
| 430 } | |
| 431 ~pod_allocator() | |
| 432 { | |
| 433 remove_all(); | |
| 434 } | |
| 435 pod_allocator(unsigned block_size, unsigned block_ptr_inc = 256 - 8) : | |
| 436 m_block_size(block_size), | |
| 437 m_block_ptr_inc(block_ptr_inc), | |
| 438 m_num_blocks(0), | |
| 439 m_max_blocks(0), | |
| 440 m_blocks(0), | |
| 441 m_buf_ptr(0), | |
| 442 m_rest(0) | |
| 443 { | |
| 444 } | |
| 445 int8u* allocate(unsigned size, unsigned alignment = 1) | |
| 446 { | |
| 447 if(size == 0) { | |
| 448 return 0; | |
| 449 } | |
| 450 if(size <= m_rest) { | |
| 451 int8u* ptr = m_buf_ptr; | |
| 452 if(alignment > 1) { | |
| 453 unsigned align = (alignment - unsigned((size_t)ptr) % alignment)
% alignment; | |
| 454 size += align; | |
| 455 ptr += align; | |
| 456 if(size <= m_rest) { | |
| 457 m_rest -= size; | |
| 458 m_buf_ptr += size; | |
| 459 return ptr; | |
| 460 } | |
| 461 allocate_block(size); | |
| 462 return allocate(size - align, alignment); | |
| 463 } | |
| 464 m_rest -= size; | |
| 465 m_buf_ptr += size; | |
| 466 return ptr; | |
| 467 } | |
| 468 allocate_block(size + alignment - 1); | |
| 469 return allocate(size, alignment); | |
| 470 } | |
| 471 private: | |
| 472 void allocate_block(unsigned size) | |
| 473 { | |
| 474 if(size < m_block_size) { | |
| 475 size = m_block_size; | |
| 476 } | |
| 477 if(m_num_blocks >= m_max_blocks) { | |
| 478 int8u** new_blocks = FX_Alloc(int8u*, m_max_blocks + m_block_ptr_inc
); | |
| 479 if(m_blocks) { | |
| 480 FXSYS_memcpy(new_blocks, | |
| 481 m_blocks, | |
| 482 m_num_blocks * sizeof(int8u*)); | |
| 483 FX_Free(m_blocks); | |
| 484 } | |
| 485 m_blocks = new_blocks; | |
| 486 m_max_blocks += m_block_ptr_inc; | |
| 487 } | |
| 488 m_blocks[m_num_blocks] = m_buf_ptr = FX_Alloc(int8u, size); | |
| 489 m_num_blocks++; | |
| 490 m_rest = size; | |
| 491 } | |
| 492 unsigned m_block_size; | |
| 493 unsigned m_block_ptr_inc; | |
| 494 unsigned m_num_blocks; | |
| 495 unsigned m_max_blocks; | |
| 496 int8u** m_blocks; | |
| 497 int8u* m_buf_ptr; | |
| 498 unsigned m_rest; | |
| 499 }; | |
| 500 enum quick_sort_threshold_e { | |
| 501 quick_sort_threshold = 9 | |
| 502 }; | |
| 503 template<class T> inline void swap_elements(T& a, T& b) | |
| 504 { | |
| 505 T temp = a; | |
| 506 a = b; | |
| 507 b = temp; | |
| 508 } | |
| 509 } | |
| 510 #endif | |
| OLD | NEW |