NVBIO
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
vector_array.h
Go to the documentation of this file.
1 /*
2  * nvbio
3  * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  * * Redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer.
9  * * Redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution.
12  * * Neither the name of the NVIDIA CORPORATION nor the
13  * names of its contributors may be used to endorse or promote products
14  * derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #pragma once
29 
30 #include <nvbio/basic/types.h>
32 #include <nvbio/basic/atomics.h>
33 #include <nvbio/basic/vector.h> // thrust_copy_vector
34 
35 namespace nvbio {
36 
89 
92 
98 
101 
105 template <typename T>
107 {
112  T* arena = NULL,
113  uint32* index = NULL,
114  uint32* sizes = NULL,
115  uint32* pool = NULL,
116  uint32 size = 0u)
117  : m_arena(arena), m_index(index), m_sizes(sizes), m_pool(pool), m_size(size) {}
118 
122  T* alloc(const uint32 index, const uint32 size)
123  {
124  const uint32 slot = atomic_add( m_pool, size );
125  if (slot + size >= m_size)
126  {
127  // mark an out-of-bounds allocation
128  m_index[index] = m_size;
129  m_sizes[index] = 0u;
130  return NULL;
131  }
132  m_index[index] = slot;
133  m_sizes[index] = size;
134  return m_arena + slot;
135  }
136 
140  T* operator[](const uint32 index) const
141  {
142  // for unsuccessful allocations m_index is set to m_size - in that case we return NULL
143  return (m_index[index] < m_size) ? m_arena + m_index[index] : NULL;
144  }
145 
149  uint32 slot(const uint32 index) const { return m_index[index]; }
150 
154  uint32 size(const uint32 index) const { return m_sizes[index]; }
155 
156 public:
157  T* m_arena;
162 };
163 
167 template <typename T>
169 {
173 
177 
188  uint64 resize(const uint32 size, const uint32 arena, const bool do_alloc = true)
189  {
190  uint64 bytes = 0;
191  if (do_alloc) m_arena.resize( arena ); bytes += sizeof(T)*arena;
192  if (do_alloc) m_index.resize( size ); bytes += sizeof(uint32)*size;
193  if (do_alloc) m_sizes.resize( size ); bytes += sizeof(uint32)*size;
194  if (do_alloc)
195  {
196  // initialize all slots
197  thrust::fill(
198  m_index.begin(),
199  m_index.begin() + size,
200  arena );
201 
202  // initialize all slots
203  thrust::fill(
204  m_sizes.begin(),
205  m_sizes.begin() + size,
206  uint32(0) );
207  }
208  return bytes;
209  }
210 
213  bool has_overflown() { return (m_pool[0] > m_arena.size()); }
214 
217  void clear() { m_pool[0] = 0; }
218 
221  uint32 size() const { return m_index.size(); }
222 
225  uint32 allocated_size() const { return m_pool[0]; }
226 
229  uint32 arena_size() const { return m_arena.size(); }
230 
234  {
235  cuda::thrust_copy_vector( m_arena, vec.m_arena );
236  cuda::thrust_copy_vector( m_index, vec.m_index );
237  cuda::thrust_copy_vector( m_sizes, vec.m_sizes );
238  cuda::thrust_copy_vector( m_pool, vec.m_pool );
239  return *this;
240  }
241 
245  {
246  m_arena.swap( vec.m_arena );
247  m_index.swap( vec.m_index );
248  m_sizes.swap( vec.m_sizes );
249  m_pool.swap( vec.m_pool );
250  return *this;
251  }
252 
256  {
257  return VectorArrayView<T>(
262  uint32( m_arena.size() ) );
263  }
264 
268  {
269  return VectorArrayView<T>(
274  uint32( m_arena.size() ) );
275  }
276 
277  thrust::device_vector<T> m_arena;
278  thrust::device_vector<uint32> m_index;
279  thrust::device_vector<uint32> m_sizes;
280  thrust::device_vector<uint32> m_pool;
281 };
282 
286 template <typename T>
288 {
291 
295 
306  uint64 resize(const uint32 size, const uint32 arena, const bool do_alloc = true)
307  {
308  uint64 bytes = 0;
309  if (do_alloc) m_arena.resize( arena ); bytes += sizeof(T)*arena;
310  if (do_alloc) m_index.resize( size ); bytes += sizeof(uint32)*size;
311  if (do_alloc) m_sizes.resize( size ); bytes += sizeof(uint32)*size;
312  if (do_alloc)
313  {
314  // initialize all slots
315  thrust::fill(
316  m_index.begin(),
317  m_index.begin() + size,
318  arena );
319 
320  // initialize all slots
321  thrust::fill(
322  m_sizes.begin(),
323  m_sizes.begin() + size,
324  uint32(0) );
325  }
326  return bytes;
327  }
328 
331  bool has_overflown() { return (m_pool[0] > m_arena.size()); }
332 
335  void clear() { m_pool[0] = 0; }
336 
339  uint32 size() const { return m_index.size(); }
340 
343  uint32 allocated_size() const { return m_pool[0]; }
344 
348  {
349  cuda::thrust_copy_vector( m_arena, vec.m_arena );
350  cuda::thrust_copy_vector( m_index, vec.m_index );
351  cuda::thrust_copy_vector( m_sizes, vec.m_sizes );
352  cuda::thrust_copy_vector( m_pool, vec.m_pool );
353  return *this;
354  }
355 
359  {
360  m_arena.swap( vec.m_arena );
361  m_index.swap( vec.m_index );
362  m_sizes.swap( vec.m_sizes );
363  m_pool.swap( vec.m_pool );
364  return *this;
365  }
366 
370  const T* operator[](const uint32 index) const
371  {
372  // for unsuccessful allocations m_index is set to m_size - in that case we return NULL
373  return (m_index[index] < m_arena.size()) ? &m_arena[0] + m_index[index] : NULL;
374  }
375 
379  uint32 slot(const uint32 index) const { return m_index[index]; }
380 
384  {
385  return VectorArrayView<T>(
390  uint32( m_arena.size() ) );
391  }
392 
393  thrust::host_vector<T> m_arena;
394  thrust::host_vector<uint32> m_index;
395  thrust::host_vector<uint32> m_sizes;
396  thrust::host_vector<uint32> m_pool;
397 };
398 
402 template <typename T>
404 
408 template <typename T>
410 
414 template <typename T>
416 
419 
420 } // namespace nvbio