OpenVDB  9.0.1
GridBuilder.h
Go to the documentation of this file.
1 // Copyright Contributors to the OpenVDB Project
2 // SPDX-License-Identifier: MPL-2.0
3 
4 /*!
5  \file GridBuilder.h
6 
7  \author Ken Museth
8 
9  \date June 26, 2020
10 
11  \brief Generates a NanoVDB grid from any volume or function.
12 
13  \note This is only intended as a simple tool to generate nanovdb grids without
14  any dependency on openvdb.
15 */
16 
17 #ifndef NANOVDB_GRIDBUILDER_H_HAS_BEEN_INCLUDED
18 #define NANOVDB_GRIDBUILDER_H_HAS_BEEN_INCLUDED
19 
20 #include "GridHandle.h"
21 #include "GridStats.h"
22 #include "GridChecksum.h"
23 #include "Range.h"
24 #include "Invoke.h"
25 #include "ForEach.h"
26 #include "Reduce.h"
27 #include "DitherLUT.h"// for nanovdb::DitherLUT
28 
29 #include <map>
30 #include <limits>
31 #include <sstream> // for stringstream
32 #include <vector>
33 #include <cstring> // for memcpy
34 
35 namespace nanovdb {
36 
37 /// @brief Compression oracle based on absolute difference
38 class AbsDiff
39 {
40  float mTolerance;// absolute error tolerance
41 public:
42  /// @note The default value of -1 means it's un-initialized!
43  AbsDiff(float tolerance = -1.0f) : mTolerance(tolerance) {}
44  AbsDiff(const AbsDiff&) = default;
45  void setTolerance(float tolerance) { mTolerance = tolerance; }
46  float getTolerance() const { return mTolerance; }
47  /// @brief Return true if the approximate value is within the accepted
48  /// absolute error bounds of the exact value.
49  ///
50  /// @details Required member method
51  bool operator()(float exact, float approx) const
52  {
53  return Abs(exact - approx) <= mTolerance;
54  }
55 };// AbsDiff
56 
57 inline std::ostream& operator<<(std::ostream& os, const AbsDiff& diff)
58 {
59  os << "Absolute tolerance: " << diff.getTolerance();
60  return os;
61 }
62 
63 /// @brief Compression oracle based on relative difference
64 class RelDiff
65 {
66  float mTolerance;// relative error tolerance
67 public:
68  /// @note The default value of -1 means it's un-initialized!
69  RelDiff(float tolerance = -1.0f) : mTolerance(tolerance) {}
70  RelDiff(const RelDiff&) = default;
71  void setTolerance(float tolerance) { mTolerance = tolerance; }
72  float getTolerance() const { return mTolerance; }
73  /// @brief Return true if the approximate value is within the accepted
74  /// relative error bounds of the exact value.
75  ///
76  /// @details Required member method
77  bool operator()(float exact, float approx) const
78  {
79  return Abs(exact - approx)/Max(Abs(exact), Abs(approx)) <= mTolerance;
80  }
81 };// RelDiff
82 
83 inline std::ostream& operator<<(std::ostream& os, const RelDiff& diff)
84 {
85  os << "Relative tolerance: " << diff.getTolerance();
86  return os;
87 }
88 
89 /// @brief Allows for the construction of NanoVDB grids without any dependecy
90 template<typename ValueT, typename BuildT = ValueT, typename StatsT = Stats<ValueT>>
92 {
93  struct BuildLeaf;
94  template<typename ChildT>
95  struct BuildNode;
96  template<typename ChildT>
97  struct BuildRoot;
98  struct ValueAccessor;
99 
100  struct Codec {float min, max; uint16_t log2, size;};// used for adaptive bit-rate quantization
101 
102  using SrcNode0 = BuildLeaf;
103  using SrcNode1 = BuildNode<SrcNode0>;
104  using SrcNode2 = BuildNode<SrcNode1>;
105  using SrcRootT = BuildRoot<SrcNode2>;
106 
107  using DstNode0 = NanoLeaf< BuildT>;// nanovdb::LeafNode<ValueT>; // leaf
108  using DstNode1 = NanoLower<BuildT>;// nanovdb::InternalNode<DstNode0>; // lower
109  using DstNode2 = NanoUpper<BuildT>;// nanovdb::InternalNode<DstNode1>; // upper
110  using DstRootT = NanoRoot< BuildT>;// nanovdb::RootNode<DstNode2>;
111  using DstTreeT = NanoTree< BuildT>;
112  using DstGridT = NanoGrid< BuildT>;
113 
114  ValueT mDelta; // skip node if: node.max < -mDelta || node.min > mDelta
115  uint8_t* mBufferPtr;// pointer to the beginning of the buffer
116  uint64_t mBufferOffsets[9];//grid, tree, root, upper. lower, leafs, meta data, blind data, buffer size
117  int mVerbose;
118  uint64_t mBlindDataSize;
119  SrcRootT mRoot;// this root supports random write
120  std::vector<SrcNode0*> mArray0; // leaf nodes
121  std::vector<SrcNode1*> mArray1; // lower internal nodes
122  std::vector<SrcNode2*> mArray2; // upper internal nodes
123  std::unique_ptr<Codec[]> mCodec;// defines a codec per leaf node
124  GridClass mGridClass;
125  StatsMode mStats;
126  ChecksumMode mChecksum;
127  bool mDitherOn;
128 
129  // Below are private methods use to serialize nodes into NanoVDB
130  template< typename OracleT, typename BufferT>
131  GridHandle<BufferT> initHandle(const OracleT &oracle, const BufferT& buffer);
132 
133  template <typename T, typename OracleT>
134  inline typename std::enable_if<!is_same<T, FpN>::value>::type
135  compression(uint64_t&, OracleT) {}// no-op
136 
137  template <typename T, typename OracleT>
138  inline typename std::enable_if<is_same<T, FpN>::value>::type
139  compression(uint64_t &offset, OracleT oracle);
140 
141  template<typename T>
146  processLeafs(std::vector<T*>&);
147 
148  template<typename T>
152  processLeafs(std::vector<T*>&);
153 
154  template<typename T>
156  processLeafs(std::vector<T*>&);
157 
158  template<typename SrcNodeT>
159  void processNodes(std::vector<SrcNodeT*>&);
160 
161  DstRootT* processRoot();
162 
163  DstTreeT* processTree();
164 
165  DstGridT* processGrid(const Map&, const std::string&);
166 
167  template<typename T, typename FlagT>
169  setFlag(const T&, const T&, FlagT& flag) const { flag &= ~FlagT(1); } // unset first bit
170 
171  template<typename T, typename FlagT>
173  setFlag(const T& min, const T& max, FlagT& flag) const;
174 
175 public:
176  GridBuilder(ValueT background = ValueT(),
177  GridClass gClass = GridClass::Unknown,
178  uint64_t blindDataSize = 0);
179 
180  ValueAccessor getAccessor() { return ValueAccessor(mRoot); }
181 
182  /// @brief Performs multi-threaded bottum-up signed-distance flood-filling and changes GridClass to LevelSet
183  ///
184  /// @warning Only call this method once this GridBuilder contains a valid signed distance field
185  void sdfToLevelSet();
186 
187  /// @brief Performs multi-threaded bottum-up signed-distance flood-filling followed by level-set -> FOG volume
188  /// conversion. It also changes the GridClass to FogVolume
189  ///
190  /// @warning Only call this method once this GridBuilder contains a valid signed distance field
191  void sdfToFog();
192 
193  void setVerbose(int mode = 1) { mVerbose = mode; }
194 
195  void enableDithering(bool on = true) { mDitherOn = on; }
196 
197  void setStats(StatsMode mode = StatsMode::Default) { mStats = mode; }
198 
199  void setChecksum(ChecksumMode mode = ChecksumMode::Default) { mChecksum = mode; }
200 
201  void setGridClass(GridClass mode = GridClass::Unknown) { mGridClass = mode; }
202 
203  /// @brief Return an instance of a GridHandle (invoking move semantics)
204  template<typename OracleT = AbsDiff, typename BufferT = HostBuffer>
205  GridHandle<BufferT> getHandle(double voxelSize = 1.0,
206  const Vec3d& gridOrigin = Vec3d(0),
207  const std::string& name = "",
208  const OracleT& oracle = OracleT(),
209  const BufferT& buffer = BufferT());
210 
211  /// @brief Return an instance of a GridHandle (invoking move semantics)
212  template<typename OracleT = AbsDiff, typename BufferT = HostBuffer>
213  GridHandle<BufferT> getHandle(const Map& map,
214  const std::string& name = "",
215  const OracleT& oracle = OracleT(),
216  const BufferT& buffer = BufferT());
217 
218  /// @brief Sets grids values in domain of the @a bbox to those returned by the specified @a func with the
219  /// expected signature [](const Coord&)->ValueT.
220  ///
221  /// @note If @a func returns a value equal to the brackground value (specified in the constructor) at a
222  /// specific voxel coordinate, then the active state of that coordinate is left off! Else the value
223  /// value is set and the active state is on. This is done to allow for sparse grids to be generated.
224  ///
225  /// @param func Functor used to evaluate the grid values in the @a bbox
226  /// @param bbox Coordinate bounding-box over which the grid values will be set.
227  /// @param delta Specifies a lower threshold value for rendering (optional). Typically equals the voxel size
228  /// for level sets and otherwise it's zero.
229  template<typename Func>
230  void operator()(const Func& func, const CoordBBox& bbox, ValueT delta = ValueT(0));
231 
232 }; // GridBuilder
233 
234 //================================================================================================
235 
236 template<typename ValueT, typename BuildT, typename StatsT>
238 GridBuilder(ValueT background, GridClass gClass, uint64_t blindDataSize)
239  : mDelta(0)
240  , mVerbose(0)
241  , mBlindDataSize(blindDataSize)
242  , mRoot(background)
243  , mGridClass(gClass)
244  , mStats(StatsMode::Default)
245  , mChecksum(ChecksumMode::Default)
246  , mDitherOn(false)
247 {
248 }
249 
250 template<typename ValueT, typename BuildT, typename StatsT>
251 template<typename Func>
253 operator()(const Func& func, const CoordBBox& voxelBBox, ValueT delta)
254 {
255  static_assert(is_same<ValueT, typename std::result_of<Func(const Coord&)>::type>::value, "GridBuilder: mismatched ValueType");
256  mDelta = delta; // delta = voxel size for level sets, else 0
257 
258  using LeafT = BuildLeaf;
259  const CoordBBox leafBBox(voxelBBox[0] >> LeafT::TOTAL, voxelBBox[1] >> LeafT::TOTAL);
260  std::mutex mutex;
261  auto kernel = [&](const CoordBBox& b) {
262  LeafT* leaf = nullptr;
263  for (auto it = b.begin(); it; ++it) {
264  Coord min(*it << LeafT::TOTAL), max(min + Coord(LeafT::DIM - 1));
265  const CoordBBox bbox(min.maxComponent(voxelBBox.min()),
266  max.minComponent(voxelBBox.max()));// crop
267  if (leaf == nullptr) {
268  leaf = new LeafT(bbox[0], mRoot.mBackground, false);
269  } else {
270  leaf->mOrigin = bbox[0] & ~LeafT::MASK;
271  NANOVDB_ASSERT(leaf->mValueMask.isOff());
272  }
273  leaf->mDstOffset = 0;// no prune
274  for (auto ijk = bbox.begin(); ijk; ++ijk) {
275  const auto v = func(*ijk);
276  if (v == mRoot.mBackground) {// don't insert background values
277  continue;
278  }
279  leaf->setValue(*ijk, v);
280  }
281  if (!leaf->mValueMask.isOff()) {// has active values
282  if (leaf->mValueMask.isOn()) {// only active values
283  const auto first = leaf->getFirstValue();
284  int n=1;
285  while (n<512) {// 8^3 = 512
286  if (leaf->mValues[n++] != first) break;
287  }
288  if (n == 512) leaf->mDstOffset = 1;// prune below
289  }
290  std::lock_guard<std::mutex> guard(mutex);
291  NANOVDB_ASSERT(leaf != nullptr);
292  mRoot.addNode(leaf);
293  NANOVDB_ASSERT(leaf == nullptr);
294  }
295  }// loop over sub-part of leafBBox
296  if (leaf) {
297  delete leaf;
298  }
299  }; // kernel
300  forEach(leafBBox, kernel);
301 
302  // Prune leaf and tile nodes
303  for (auto it2 = mRoot.mTable.begin(); it2 != mRoot.mTable.end(); ++it2) {
304  if (auto *upper = it2->second.child) {//upper level internal node
305  for (auto it1 = upper->mChildMask.beginOn(); it1; ++it1) {
306  auto *lower = upper->mTable[*it1].child;// lower level internal node
307  for (auto it0 = lower->mChildMask.beginOn(); it0; ++it0) {
308  auto *leaf = lower->mTable[*it0].child;// leaf nodes
309  if (leaf->mDstOffset) {
310  lower->mTable[*it0].value = leaf->getFirstValue();
311  lower->mChildMask.setOff(*it0);
312  lower->mValueMask.setOn(*it0);
313  delete leaf;
314  }
315  }// loop over leaf nodes
316  if (lower->mChildMask.isOff()) {//only tiles
317  const auto first = lower->getFirstValue();
318  int n=1;
319  while (n < 4096) {// 16^3 = 4096
320  if (lower->mTable[n++].value != first) break;
321  }
322  if (n == 4096) {// identical tile values so prune
323  upper->mTable[*it1].value = first;
324  upper->mChildMask.setOff(*it1);
325  upper->mValueMask.setOn(*it1);
326  delete lower;
327  }
328  }
329  }// loop over lower internal nodes
330  if (upper->mChildMask.isOff()) {//only tiles
331  const auto first = upper->getFirstValue();
332  int n=1;
333  while (n < 32768) {// 32^3 = 32768
334  if (upper->mTable[n++].value != first) break;
335  }
336  if (n == 32768) {// identical tile values so prune
337  it2->second.value = first;
338  it2->second.state = upper->mValueMask.isOn();
339  it2->second.child = nullptr;
340  delete upper;
341  }
342  }
343  }// is child node of the root
344  }// loop over root table
345 }
346 
347 //================================================================================================
348 
349 template<typename ValueT, typename BuildT, typename StatsT>
350 template<typename OracleT, typename BufferT>
352 initHandle(const OracleT &oracle, const BufferT& buffer)
353 {
354  mArray0.clear();
355  mArray1.clear();
356  mArray2.clear();
357  mArray0.reserve(mRoot.template nodeCount<SrcNode0>());
358  mArray1.reserve(mRoot.template nodeCount<SrcNode1>());
359  mArray2.reserve(mRoot.template nodeCount<SrcNode2>());
360 
361  uint64_t offset[3] = {0};
362  for (auto it2 = mRoot.mTable.begin(); it2 != mRoot.mTable.end(); ++it2) {
363  if (SrcNode2 *upper = it2->second.child) {
364  upper->mDstOffset = offset[2];
365  mArray2.emplace_back(upper);
366  offset[2] += DstNode2::memUsage();
367  for (auto it1 = upper->mChildMask.beginOn(); it1; ++it1) {
368  SrcNode1 *lower = upper->mTable[*it1].child;
369  lower->mDstOffset = offset[1];
370  mArray1.emplace_back(lower);
371  offset[1] += DstNode1::memUsage();
372  for (auto it0 = lower->mChildMask.beginOn(); it0; ++it0) {
373  SrcNode0 *leaf = lower->mTable[*it0].child;
374  leaf->mDstOffset = offset[0];
375  mArray0.emplace_back(leaf);
376  offset[0] += DstNode0::memUsage();
377  }// loop over leaf nodes
378  }// loop over lower internal nodes
379  }// is child node of the root
380  }// loop over root table
381 
382  this->template compression<BuildT, OracleT>(offset[0], oracle);
383 
384  mBufferOffsets[0] = 0;// grid is always stored at the start of the buffer!
385  mBufferOffsets[1] = DstGridT::memUsage(); // tree
386  mBufferOffsets[2] = DstTreeT::memUsage(); // root
387  mBufferOffsets[3] = DstRootT::memUsage(mRoot.mTable.size()); // upper internal nodes
388  mBufferOffsets[4] = offset[2]; // lower internal nodes
389  mBufferOffsets[5] = offset[1]; // leaf nodes
390  mBufferOffsets[6] = offset[0]; // blind meta data
391  mBufferOffsets[7] = GridBlindMetaData::memUsage(mBlindDataSize > 0 ? 1 : 0); // blind data
392  mBufferOffsets[8] = mBlindDataSize;// end of buffer
393 
394  // Compute the prefixed sum
395  for (int i = 2; i < 9; ++i) {
396  mBufferOffsets[i] += mBufferOffsets[i - 1];
397  }
398 
399  GridHandle<BufferT> handle(BufferT::create(mBufferOffsets[8], &buffer));
400  mBufferPtr = handle.data();
401  return handle;
402 } // GridBuilder::initHandle
403 
404 //================================================================================================
405 
406 template<typename ValueT, typename BuildT, typename StatsT>
407 template <typename T, typename OracleT>
408 inline typename std::enable_if<is_same<T, FpN>::value>::type
409 GridBuilder<ValueT, BuildT, StatsT>::compression(uint64_t &offset, OracleT oracle)
410 {
411  static_assert(is_same<FpN , BuildT>::value, "compression: expected BuildT == float");
412  static_assert(is_same<float, ValueT>::value, "compression: expected ValueT == float");
413  if (is_same<AbsDiff, OracleT>::value && oracle.getTolerance() < 0.0f) {// default tolerance for level set and fog volumes
414  if (mGridClass == GridClass::LevelSet) {
415  static const float halfWidth = 3.0f;
416  oracle.setTolerance(0.1f * mRoot.mBackground / halfWidth);// range of ls: [-3dx; 3dx]
417  } else if (mGridClass == GridClass::FogVolume) {
418  oracle.setTolerance(0.01f);// range of FOG volumes: [0;1]
419  } else {
420  oracle.setTolerance(0.0f);
421  }
422  }
423 
424  const size_t size = mArray0.size();
425  mCodec.reset(new Codec[size]);
426 
427  DitherLUT lut(mDitherOn);
428  auto kernel = [&](const Range1D &r) {
429  for (auto i=r.begin(); i!=r.end(); ++i) {
430  const float *data = mArray0[i]->mValues;
432  for (int j=0; j<512; ++j) {
433  float v = data[j];
434  if (v<min) min = v;
435  if (v>max) max = v;
436  }
437  mCodec[i].min = min;
438  mCodec[i].max = max;
439  const float range = max - min;
440  uint16_t logBitWidth = 0;// 0,1,2,3,4 => 1,2,4,8,16 bits
441  while (range > 0.0f && logBitWidth < 4u) {
442  const uint32_t mask = (uint32_t(1) << (uint32_t(1) << logBitWidth)) - 1u;
443  const float encode = mask/range;
444  const float decode = range/mask;
445  int j = 0;
446  do {
447  const float exact = data[j];// exact value
448  const uint32_t code = uint32_t(encode*(exact - min) + lut(j));
449  const float approx = code * decode + min;// approximate value
450  j += oracle(exact, approx) ? 1 : 513;
451  } while(j < 512);
452  if (j == 512) break;
453  ++logBitWidth;
454  }
455  mCodec[i].log2 = logBitWidth;
456  mCodec[i].size = DstNode0::DataType::memUsage(1u << logBitWidth);
457  }
458  };// kernel
459  forEach(0, size, 4, kernel);
460 
461  if (mVerbose) {
462  uint32_t counters[5+1] = {0};
463  ++counters[mCodec[0].log2];
464  for (size_t i=1; i<size; ++i) {
465  ++counters[mCodec[i].log2];
466  mArray0[i]->mDstOffset = mArray0[i-1]->mDstOffset + mCodec[i-1].size;
467  }
468  std::cout << "\n" << oracle << std::endl;
469  std::cout << "Dithering: " << (mDitherOn ? "enabled" : "disabled") << std::endl;
470  float avg = 0.0f;
471  for (uint32_t i=0; i<=5; ++i) {
472  if (uint32_t n = counters[i]) {
473  avg += n * float(1 << i);
474  printf("%2i bits: %6u leaf nodes, i.e. %4.1f%%\n",1<<i, n, 100.0f*n/float(size));
475  }
476  }
477  printf("%4.1f bits per value on average\n", avg/float(size));
478  } else {
479  for (size_t i=1; i<size; ++i) {
480  mArray0[i]->mDstOffset = mArray0[i-1]->mDstOffset + mCodec[i-1].size;
481  }
482  }
483  offset = mArray0[size-1]->mDstOffset + mCodec[size-1].size;
484 }// GridBuilder::compression
485 
486 //================================================================================================
487 
488 template<typename ValueT, typename BuildT, typename StatsT>
491 {
492  mArray0.clear();
493  mArray1.clear();
494  mArray2.clear();
495  mArray0.reserve(mRoot.template nodeCount<SrcNode0>());
496  mArray1.reserve(mRoot.template nodeCount<SrcNode1>());
497  mArray2.reserve(mRoot.template nodeCount<SrcNode2>());
498 
499  for (auto it2 = mRoot.mTable.begin(); it2 != mRoot.mTable.end(); ++it2) {
500  if (SrcNode2 *upper = it2->second.child) {
501  mArray2.emplace_back(upper);
502  for (auto it1 = upper->mChildMask.beginOn(); it1; ++it1) {
503  SrcNode1 *lower = upper->mTable[*it1].child;
504  mArray1.emplace_back(lower);
505  for (auto it0 = lower->mChildMask.beginOn(); it0; ++it0) {
506  mArray0.emplace_back(lower->mTable[*it0].child);
507  }// loop over leaf nodes
508  }// loop over lower internal nodes
509  }// is child node of the root
510  }// loop over root table
511 
512  // Note that the bottum-up flood filling is essential
513  const ValueT outside = mRoot.mBackground;
514  forEach(mArray0, 8, [&](const Range1D& r) {
515  for (auto i = r.begin(); i != r.end(); ++i)
516  mArray0[i]->signedFloodFill(outside);
517  });
518  forEach(mArray1, 1, [&](const Range1D& r) {
519  for (auto i = r.begin(); i != r.end(); ++i)
520  mArray1[i]->signedFloodFill(outside);
521  });
522  forEach(mArray2, 1, [&](const Range1D& r) {
523  for (auto i = r.begin(); i != r.end(); ++i)
524  mArray2[i]->signedFloodFill(outside);
525  });
526  mRoot.signedFloodFill(outside);
527  mGridClass = GridClass::LevelSet;
528 } // GridBuilder::sdfToLevelSet
529 
530 //================================================================================================
531 
532 template<typename ValueT, typename BuildT, typename StatsT>
533 template<typename OracleT, typename BufferT>
535  getHandle(double dx, //voxel size
536  const Vec3d& p0, // origin
537  const std::string& name,
538  const OracleT& oracle,
539  const BufferT& buffer)
540 {
541  if (dx <= 0) {
542  throw std::runtime_error("GridBuilder: voxel size is zero or negative");
543  }
544  Map map; // affine map
545  const double Tx = p0[0], Ty = p0[1], Tz = p0[2];
546  const double mat[4][4] = {
547  {dx, 0.0, 0.0, 0.0}, // row 0
548  {0.0, dx, 0.0, 0.0}, // row 1
549  {0.0, 0.0, dx, 0.0}, // row 2
550  {Tx, Ty, Tz, 1.0}, // row 3
551  };
552  const double invMat[4][4] = {
553  {1 / dx, 0.0, 0.0, 0.0}, // row 0
554  {0.0, 1 / dx, 0.0, 0.0}, // row 1
555  {0.0, 0.0, 1 / dx, 0.0}, // row 2
556  {-Tx, -Ty, -Tz, 1.0}, // row 3
557  };
558  map.set(mat, invMat, 1.0);
559  return this->getHandle(map, name, oracle, buffer);
560 } // GridBuilder::getHandle
561 
562 //================================================================================================
563 
564 template<typename ValueT, typename BuildT, typename StatsT>
565 template< typename OracleT, typename BufferT>
567  getHandle(const Map& map,
568  const std::string& name,
569  const OracleT& oracle,
570  const BufferT& buffer)
571 {
573  throw std::runtime_error("Level sets are expected to be floating point types");
574  } else if (mGridClass == GridClass::FogVolume && !is_floating_point<ValueT>::value) {
575  throw std::runtime_error("Fog volumes are expected to be floating point types");
576  }
577 
578  auto handle = this->template initHandle<OracleT, BufferT>(oracle, buffer);// initialize the arrays of nodes
579 
580  this->processLeafs(mArray0);
581 
582  this->processNodes(mArray1);
583 
584  this->processNodes(mArray2);
585 
586  auto *grid = this->processGrid(map, name);
587 
588  gridStats(*grid, mStats);
589 
590  updateChecksum(*grid, mChecksum);
591 
592  return handle;
593 } // GridBuilder::getHandle
594 
595 //================================================================================================
596 
597 template<typename ValueT, typename BuildT, typename StatsT>
598 template<typename T, typename FlagT>
601  setFlag(const T& min, const T& max, FlagT& flag) const
602 {
603  if (mDelta > 0 && (min > mDelta || max < -mDelta)) {
604  flag |= FlagT(1); // set first bit
605  } else {
606  flag &= ~FlagT(1); // unset first bit
607  }
608 }
609 
610 //================================================================================================
611 
612 template<typename ValueT, typename BuildT, typename StatsT>
615 {
616  this->sdfToLevelSet(); // performs signed flood fill
617 
618  const ValueT d = -mRoot.mBackground, w = 1.0f / d;
619  auto op = [&](ValueT& v) -> bool {
620  if (v > ValueT(0)) {
621  v = ValueT(0);
622  return false;
623  }
624  v = v > d ? v * w : ValueT(1);
625  return true;
626  };
627  auto kernel0 = [&](const Range1D& r) {
628  for (auto i = r.begin(); i != r.end(); ++i) {
629  SrcNode0* node = mArray0[i];
630  for (uint32_t i = 0; i < SrcNode0::SIZE; ++i)
631  node->mValueMask.set(i, op(node->mValues[i]));
632  }
633  };
634  auto kernel1 = [&](const Range1D& r) {
635  for (auto i = r.begin(); i != r.end(); ++i) {
636  SrcNode1* node = mArray1[i];
637  for (uint32_t i = 0; i < SrcNode1::SIZE; ++i) {
638  if (node->mChildMask.isOn(i)) {
639  SrcNode0* leaf = node->mTable[i].child;
640  if (leaf->mValueMask.isOff()) {
641  node->mTable[i].value = leaf->getFirstValue();
642  node->mChildMask.setOff(i);
643  delete leaf;
644  }
645  } else {
646  node->mValueMask.set(i, op(node->mTable[i].value));
647  }
648  }
649  }
650  };
651  auto kernel2 = [&](const Range1D& r) {
652  for (auto i = r.begin(); i != r.end(); ++i) {
653  SrcNode2* node = mArray2[i];
654  for (uint32_t i = 0; i < SrcNode2::SIZE; ++i) {
655  if (node->mChildMask.isOn(i)) {
656  SrcNode1* child = node->mTable[i].child;
657  if (child->mChildMask.isOff() && child->mValueMask.isOff()) {
658  node->mTable[i].value = child->getFirstValue();
659  node->mChildMask.setOff(i);
660  delete child;
661  }
662  } else {
663  node->mValueMask.set(i, op(node->mTable[i].value));
664  }
665  }
666  }
667  };
668  forEach(mArray0, 8, kernel0);
669  forEach(mArray1, 1, kernel1);
670  forEach(mArray2, 1, kernel2);
671 
672  for (auto it = mRoot.mTable.begin(); it != mRoot.mTable.end(); ++it) {
673  SrcNode2* child = it->second.child;
674  if (child == nullptr) {
675  it->second.state = op(it->second.value);
676  } else if (child->mChildMask.isOff() && child->mValueMask.isOff()) {
677  it->second.value = child->getFirstValue();
678  it->second.state = false;
679  it->second.child = nullptr;
680  delete child;
681  }
682  }
683  mGridClass = GridClass::FogVolume;
684 } // GridBuilder::sdfToFog
685 
686 //================================================================================================
687 
688 template<typename ValueT, typename BuildT, typename StatsT>
689 template<typename T>
695  processLeafs(std::vector<T*>& srcLeafs)
696 {
697  static_assert(!is_same<bool, ValueT>::value, "Does not yet support bool leafs");
698  static_assert(!is_same<ValueMask, ValueT>::value, "Does not yet support mask leafs");
699  auto kernel = [&](const Range1D& r) {
700  auto *ptr = mBufferPtr + mBufferOffsets[5];
701  for (auto i = r.begin(); i != r.end(); ++i) {
702  auto *srcLeaf = srcLeafs[i];
703  auto *dstLeaf = PtrAdd<DstNode0>(ptr, srcLeaf->mDstOffset);
704  auto *data = dstLeaf->data();
705  srcLeaf->mDstNode = dstLeaf;
706  data->mBBoxMin = srcLeaf->mOrigin; // copy origin of node
707  data->mBBoxDif[0] = 0u;
708  data->mBBoxDif[1] = 0u;
709  data->mBBoxDif[2] = 0u;
710  data->mFlags = 0u;
711  data->mValueMask = srcLeaf->mValueMask; // copy value mask
712  const ValueT* src = srcLeaf->mValues;
713  for (ValueT *dst = data->mValues, *end = dst + SrcNode0::SIZE; dst != end; dst += 4, src += 4) {
714  dst[0] = src[0]; // copy *all* voxel values in sets of four, i.e. loop-unrolling
715  dst[1] = src[1];
716  dst[2] = src[2];
717  dst[3] = src[3];
718  }
719  }
720  };
721  forEach(srcLeafs, 8, kernel);
722 } // GridBuilder::processLeafs<T>
723 
724 //================================================================================================
725 
726 template<typename ValueT, typename BuildT, typename StatsT>
727 template<typename T>
732  processLeafs(std::vector<T*>& srcLeafs)
733 {
734  static_assert(is_same<float, ValueT>::value, "Expected ValueT == float");
735  using ArrayT = typename DstNode0::DataType::ArrayType;
736  using FloatT = typename std::conditional<DstNode0::DataType::bitWidth()>=16, double, float>::type;// 16 compression and higher requires double
737  static constexpr FloatT UNITS = FloatT((1 << DstNode0::DataType::bitWidth()) - 1);// # of unique non-zero values
738  DitherLUT lut(mDitherOn);
739 
740  auto kernel = [&](const Range1D& r) {
741  uint8_t* ptr = mBufferPtr + mBufferOffsets[5];
742  for (auto i = r.begin(); i != r.end(); ++i) {
743  auto *srcLeaf = srcLeafs[i];
744  auto *dstLeaf = PtrAdd<DstNode0>(ptr, srcLeaf->mDstOffset);
745  srcLeaf->mDstNode = dstLeaf;
746  auto *data = dstLeaf->data();
747  data->mBBoxMin = srcLeaf->mOrigin; // copy origin of node
748  data->mBBoxDif[0] = 0u;
749  data->mBBoxDif[1] = 0u;
750  data->mBBoxDif[2] = 0u;
751  data->mFlags = 0u;
752  data->mValueMask = srcLeaf->mValueMask; // copy value mask
753  const float* src = srcLeaf->mValues;
754  // compute extrema values
755  float min = std::numeric_limits<float>::max(), max = -min;
756  for (int i=0; i<512; ++i) {
757  const float v = src[i];
758  if (v < min) min = v;
759  if (v > max) max = v;
760  }
761  data->init(min, max, DstNode0::DataType::bitWidth());
762  // perform quantization relative to the values in the curret leaf node
763  const FloatT encode = UNITS/(max-min);
764  auto *code = reinterpret_cast<ArrayT*>(data->mCode);
765  int offset = 0;
766  if (is_same<Fp4, BuildT>::value) {// resolved at compile-time
767  for (int j=0; j<128; ++j) {
768  auto tmp = ArrayT(encode * (*src++ - min) + lut(offset++));
769  *code++ = ArrayT(encode * (*src++ - min) + lut(offset++)) << 4 | tmp;
770  tmp = ArrayT(encode * (*src++ - min) + lut(offset++));
771  *code++ = ArrayT(encode * (*src++ - min) + lut(offset++)) << 4 | tmp;
772  }
773  } else {
774  for (int j=0; j<128; ++j) {
775  *code++ = ArrayT(encode * (*src++ - min) + lut(offset++));
776  *code++ = ArrayT(encode * (*src++ - min) + lut(offset++));
777  *code++ = ArrayT(encode * (*src++ - min) + lut(offset++));
778  *code++ = ArrayT(encode * (*src++ - min) + lut(offset++));
779  }
780  }
781  }
782  };
783  forEach(srcLeafs, 8, kernel);
784 } // GridBuilder::processLeafs<Fp4, Fp8, Fp16>
785 
786 //================================================================================================
787 
788 template<typename ValueT, typename BuildT, typename StatsT>
789 template<typename T>
792  processLeafs(std::vector<T*>& srcLeafs)
793 {
794  static_assert(is_same<float, ValueT>::value, "Expected ValueT == float");
795 
796  DitherLUT lut(mDitherOn);
797  auto kernel = [&](const Range1D& r) {
798  uint8_t* ptr = mBufferPtr + mBufferOffsets[5];
799  for (auto i = r.begin(); i != r.end(); ++i) {
800  auto *srcLeaf = srcLeafs[i];
801  auto *dstLeaf = PtrAdd<DstNode0>(ptr, srcLeaf->mDstOffset);
802  auto *data = dstLeaf->data();
803  data->mBBoxMin = srcLeaf->mOrigin; // copy origin of node
804  data->mBBoxDif[0] = 0u;
805  data->mBBoxDif[1] = 0u;
806  data->mBBoxDif[2] = 0u;
807  srcLeaf->mDstNode = dstLeaf;
808  const uint8_t logBitWidth = uint8_t(mCodec[i].log2);
809  data->mFlags = logBitWidth << 5;// pack logBitWidth into 3 MSB of mFlag
810  data->mValueMask = srcLeaf->mValueMask; // copy value mask
811  const float* src = srcLeaf->mValues;
812  const float min = mCodec[i].min, max = mCodec[i].max;
813  data->init(min, max, uint8_t(1) << logBitWidth);
814  // perform quantization relative to the values in the curret leaf node
815  int offset = 0;
816  switch (logBitWidth) {
817  case 0u: {// 1 bit
818  auto *dst = reinterpret_cast<uint8_t*>(data+1);
819  const float encode = 1.0f/(max - min);
820  for (int j=0; j<64; ++j) {
821  uint8_t a = 0;
822  for (int k=0; k<8; ++k) {
823  a |= uint8_t(encode * (*src++ - min) + lut(offset++)) << k;
824  }
825  *dst++ = a;
826  }
827  }
828  break;
829  case 1u: {// 2 bits
830  auto *dst = reinterpret_cast<uint8_t*>(data+1);
831  const float encode = 3.0f/(max - min);
832  for (int j=0; j<128; ++j) {
833  auto a = uint8_t(encode * (*src++ - min) + lut(offset++));
834  a |= uint8_t(encode * (*src++ - min) + lut(offset++)) << 2;
835  a |= uint8_t(encode * (*src++ - min) + lut(offset++)) << 4;
836  *dst++ = uint8_t(encode * (*src++ - min) + lut(offset++)) << 6 | a;
837  }
838  }
839  break;
840  case 2u: {// 4 bits
841  auto *dst = reinterpret_cast<uint8_t*>(data+1);
842  const float encode = 15.0f/(max - min);
843  for (int j=0; j<128; ++j) {
844  auto a = uint8_t(encode * (*src++ - min) + lut(offset++));
845  *dst++ = uint8_t(encode * (*src++ - min) + lut(offset++)) << 4 | a;
846  a = uint8_t(encode * (*src++ - min) + lut(offset++));
847  *dst++ = uint8_t(encode * (*src++ - min) + lut(offset++)) << 4 | a;
848  }
849  }
850  break;
851  case 3u: {// 8 bits
852  auto *dst = reinterpret_cast<uint8_t*>(data+1);
853  const float encode = 255.0f/(max - min);
854  for (int j=0; j<128; ++j) {
855  *dst++ = uint8_t(encode * (*src++ - min) + lut(offset++));
856  *dst++ = uint8_t(encode * (*src++ - min) + lut(offset++));
857  *dst++ = uint8_t(encode * (*src++ - min) + lut(offset++));
858  *dst++ = uint8_t(encode * (*src++ - min) + lut(offset++));
859  }
860  }
861  break;
862  default: {// 16 bits
863  auto *dst = reinterpret_cast<uint16_t*>(data+1);
864  const double encode = 65535.0/(max - min);// note that double is required!
865  for (int j=0; j<128; ++j) {
866  *dst++ = uint16_t(encode * (*src++ - min) + lut(offset++));
867  *dst++ = uint16_t(encode * (*src++ - min) + lut(offset++));
868  *dst++ = uint16_t(encode * (*src++ - min) + lut(offset++));
869  *dst++ = uint16_t(encode * (*src++ - min) + lut(offset++));
870  }
871  }
872  }// end switch
873  }
874  };// kernel
875  forEach(srcLeafs, 8, kernel);
876 } // GridBuilder::processLeafs<FpN>
877 
878 //================================================================================================
879 
880 template<typename ValueT, typename BuildT, typename StatsT>
881 template<typename SrcNodeT>
883  processNodes(std::vector<SrcNodeT*>& srcNodes)
884 {
885  using DstNodeT = typename SrcNodeT::NanoNodeT;
886  static_assert(DstNodeT::LEVEL == 1 || DstNodeT::LEVEL == 2, "Expected internal node");
887  auto kernel = [&](const Range1D& r) {
888  uint8_t* ptr = mBufferPtr + mBufferOffsets[5 - DstNodeT::LEVEL];// 3 or 4
889  for (auto i = r.begin(); i != r.end(); ++i) {
890  SrcNodeT *srcNode = srcNodes[i];
891  DstNodeT *dstNode = PtrAdd<DstNodeT>(ptr, srcNode->mDstOffset);
892  auto *data = dstNode->data();
893  srcNode->mDstNode = dstNode;
894  data->mBBox[0] = srcNode->mOrigin; // copy origin of node
895  data->mValueMask = srcNode->mValueMask; // copy value mask
896  data->mChildMask = srcNode->mChildMask; // copy child mask
897  for (uint32_t j = 0; j != SrcNodeT::SIZE; ++j) {
898  if (data->mChildMask.isOn(j)) {
899  data->setChild(j, srcNode->mTable[j].child->mDstNode);
900  } else
901  data->setValue(j, srcNode->mTable[j].value);
902  }
903  }
904  };
905  forEach(srcNodes, 4, kernel);
906 } // GridBuilder::processNodes
907 
908 //================================================================================================
909 
910 template<typename ValueT, typename BuildT, typename StatsT>
912 {
913  auto *dstRoot = reinterpret_cast<DstRootT*>(mBufferPtr + mBufferOffsets[2]);
914  auto *data = dstRoot->data();
915  data->mTableSize = uint32_t(mRoot.mTable.size());
916  data->mMinimum = data->mMaximum = data->mBackground = mRoot.mBackground;
917  data->mBBox = CoordBBox(); // // set to an empty bounding box
918 
919  uint32_t tileID = 0;
920  for (auto iter = mRoot.mTable.begin(); iter != mRoot.mTable.end(); ++iter) {
921  auto *dstTile = data->tile(tileID++);
922  if (auto* srcChild = iter->second.child) {
923  dstTile->setChild(srcChild->mOrigin, srcChild->mDstNode, data);
924  } else {
925  dstTile->setValue(iter->first, iter->second.state, iter->second.value);
926  }
927  }
928  return dstRoot;
929 } // GridBuilder::processRoot
930 
931 //================================================================================================
932 
933 template<typename ValueT, typename BuildT, typename StatsT>
935 {
936  auto *dstTree = reinterpret_cast<DstTreeT*>(mBufferPtr + mBufferOffsets[1]);
937  auto *data = dstTree->data();
938  data->setRoot( this->processRoot() );
939 
940  DstNode2 *node2 = mArray2.empty() ? nullptr : reinterpret_cast<DstNode2*>(mBufferPtr + mBufferOffsets[3]);
941  data->setFirstNode(node2);
942 
943  DstNode1 *node1 = mArray1.empty() ? nullptr : reinterpret_cast<DstNode1*>(mBufferPtr + mBufferOffsets[4]);
944  data->setFirstNode(node1);
945 
946  DstNode0 *node0 = mArray0.empty() ? nullptr : reinterpret_cast<DstNode0*>(mBufferPtr + mBufferOffsets[5]);
947  data->setFirstNode(node0);
948 
949  data->mNodeCount[0] = mArray0.size();
950  data->mNodeCount[1] = mArray1.size();
951  data->mNodeCount[2] = mArray2.size();
952 
953  // Count number of active leaf level tiles
954  data->mTileCount[0] = reduce(mArray1, uint32_t(0), [&](Range1D &r, uint32_t sum){
955  for (auto i=r.begin(); i!=r.end(); ++i) sum += mArray1[i]->mValueMask.countOn();
956  return sum;}, std::plus<uint32_t>());
957 
958  // Count number of active lower internal node tiles
959  data->mTileCount[1] = reduce(mArray2, uint32_t(0), [&](Range1D &r, uint32_t sum){
960  for (auto i=r.begin(); i!=r.end(); ++i) sum += mArray2[i]->mValueMask.countOn();
961  return sum;}, std::plus<uint32_t>());
962 
963  // Count number of active upper internal node tiles
964  uint32_t sum = 0;
965  for (auto &tile : mRoot.mTable) {
966  if (tile.second.child==nullptr && tile.second.state) ++sum;
967  }
968  data->mTileCount[2] = sum;
969 
970  // Count number of active voxels
971  data->mVoxelCount = reduce(mArray0, uint64_t(0), [&](Range1D &r, uint64_t sum){
972  for (auto i=r.begin(); i!=r.end(); ++i) sum += mArray0[i]->mValueMask.countOn();
973  return sum;}, std::plus<uint64_t>());
974 
975  data->mVoxelCount += data->mTileCount[0]*DstNode0::NUM_VALUES;
976  data->mVoxelCount += data->mTileCount[1]*DstNode1::NUM_VALUES;
977  data->mVoxelCount += data->mTileCount[2]*DstNode2::NUM_VALUES;
978 
979  return dstTree;
980 } // GridBuilder::processTree
981 
982 //================================================================================================
983 
984 template<typename ValueT, typename BuildT, typename StatsT>
986 processGrid(const Map& map,
987  const std::string& name)
988 {
989  auto *dstGrid = reinterpret_cast<DstGridT*>(mBufferPtr + mBufferOffsets[0]);
990  this->processTree();
991  auto* data = dstGrid->data();
992  data->mMagic = NANOVDB_MAGIC_NUMBER;
993  data->mChecksum = 0u;
994  data->mVersion = Version();
995  data->mFlags = static_cast<uint32_t>(GridFlags::IsBreadthFirst);
996  data->mGridIndex = 0;
997  data->mGridCount = 1;
998  data->mGridSize = mBufferOffsets[8];
999  data->mWorldBBox = BBox<Vec3R>();
1000  data->mBlindMetadataOffset = 0;
1001  data->mBlindMetadataCount = 0;
1002  data->mGridClass = mGridClass;
1003  data->mGridType = mapToGridType<BuildT>();
1004 
1005  if (!isValid(data->mGridType, data->mGridClass)) {
1006  std::stringstream ss;
1007  ss << "Invalid combination of GridType("<<int(data->mGridType)
1008  << ") and GridClass("<<int(data->mGridClass)<<"). See NanoVDB.h for details!";
1009  throw std::runtime_error(ss.str());
1010  }
1011 
1012  strncpy(data->mGridName, name.c_str(), GridData::MaxNameSize-1);
1013  if (name.length() >= GridData::MaxNameSize) {// currenlty we don't support long grid names
1014  std::stringstream ss;
1015  ss << "Grid name \"" << name << "\" is more then " << GridData::MaxNameSize << " characters";
1016  throw std::runtime_error(ss.str());
1017  }
1018 
1019  data->mVoxelSize = map.applyMap(Vec3d(1)) - map.applyMap(Vec3d(0));
1020  data->mMap = map;
1021 
1022  if (mBlindDataSize>0) {
1023  auto *metaData = reinterpret_cast<GridBlindMetaData*>(mBufferPtr + mBufferOffsets[6]);
1024  data->mBlindMetadataOffset = PtrDiff(metaData, dstGrid);
1025  data->mBlindMetadataCount = 1u;// we currently support only 1 set of blind data
1026  auto *blindData = reinterpret_cast<char*>(mBufferPtr + mBufferOffsets[7]);
1027  metaData->setBlindData(blindData);
1028  }
1029 
1030  return dstGrid;
1031 } // GridBuilder::processGrid
1032 
1033 //================================================================================================
1034 
1035 template<typename ValueT, typename BuildT, typename StatsT>
1036 template<typename ChildT>
1037 struct GridBuilder<ValueT, BuildT, StatsT>::BuildRoot
1038 {
1039  using ValueType = typename ChildT::ValueType;
1040  using ChildType = ChildT;
1041  static constexpr uint32_t LEVEL = 1 + ChildT::LEVEL; // level 0 = leaf
1042  struct Tile
1043  {
1044  Tile(ChildT* c = nullptr)
1045  : child(c)
1046  {
1047  }
1048  Tile(const ValueT& v, bool s)
1049  : child(nullptr)
1050  , value(v)
1051  , state(s)
1052  {
1053  }
1054  ChildT* child;
1055  ValueT value;
1056  bool state;
1057  };
1058  using MapT = std::map<Coord, Tile>;
1059  MapT mTable;
1060  ValueT mBackground;
1061 
1062  BuildRoot(const ValueT& background)
1063  : mBackground(background)
1064  {
1065  }
1066  BuildRoot(const BuildRoot&) = delete; // disallow copy-construction
1067  BuildRoot(BuildRoot&&) = default; // allow move construction
1068  BuildRoot& operator=(const BuildRoot&) = delete; // disallow copy assignment
1069  BuildRoot& operator=(BuildRoot&&) = default; // allow move assignment
1070 
1071  ~BuildRoot() { this->clear(); }
1072 
1073  bool empty() const { return mTable.empty(); }
1074 
1075  void clear()
1076  {
1077  for (auto iter = mTable.begin(); iter != mTable.end(); ++iter)
1078  delete iter->second.child;
1079  mTable.clear();
1080  }
1081 
1082  static Coord CoordToKey(const Coord& ijk) { return ijk & ~ChildT::MASK; }
1083 
1084  template<typename AccT>
1085  bool isActiveAndCache(const Coord& ijk, AccT& acc) const
1086  {
1087  auto iter = mTable.find(CoordToKey(ijk));
1088  if (iter == mTable.end())
1089  return false;
1090  if (iter->second.child) {
1091  acc.insert(ijk, iter->second.child);
1092  return iter->second.child->isActiveAndCache(ijk, acc);
1093  }
1094  return iter->second.state;
1095  }
1096 
1097  const ValueT& getValue(const Coord& ijk) const
1098  {
1099  auto iter = mTable.find(CoordToKey(ijk));
1100  if (iter == mTable.end()) {
1101  return mBackground;
1102  } else if (iter->second.child) {
1103  return iter->second.child->getValue(ijk);
1104  } else {
1105  return iter->second.value;
1106  }
1107  }
1108 
1109  template<typename AccT>
1110  const ValueT& getValueAndCache(const Coord& ijk, AccT& acc) const
1111  {
1112  auto iter = mTable.find(CoordToKey(ijk));
1113  if (iter == mTable.end())
1114  return mBackground;
1115  if (iter->second.child) {
1116  acc.insert(ijk, iter->second.child);
1117  return iter->second.child->getValueAndCache(ijk, acc);
1118  }
1119  return iter->second.value;
1120  }
1121 
1122  template<typename AccT>
1123  void setValueAndCache(const Coord& ijk, const ValueT& value, AccT& acc)
1124  {
1125  ChildT* child = nullptr;
1126  const Coord key = CoordToKey(ijk);
1127  auto iter = mTable.find(key);
1128  if (iter == mTable.end()) {
1129  child = new ChildT(ijk, mBackground, false);
1130  mTable[key] = Tile(child);
1131  } else if (iter->second.child != nullptr) {
1132  child = iter->second.child;
1133  } else {
1134  child = new ChildT(ijk, iter->second.value, iter->second.state);
1135  iter->second.child = child;
1136  }
1137  if (child) {
1138  acc.insert(ijk, child);
1139  child->setValueAndCache(ijk, value, acc);
1140  }
1141  }
1142 
1143  template<typename NodeT>
1144  uint32_t nodeCount() const
1145  {
1146  static_assert(is_same<ValueT, typename NodeT::ValueType>::value, "Root::getNodes: Invalid type");
1147  static_assert(NodeT::LEVEL < LEVEL, "Root::getNodes: LEVEL error");
1148  uint32_t sum = 0;
1149  for (auto iter = mTable.begin(); iter != mTable.end(); ++iter) {
1150  if (iter->second.child == nullptr)
1151  continue; // skip tiles
1152  if (is_same<NodeT, ChildT>::value) { //resolved at compile-time
1153  ++sum;
1154  } else {
1155  sum += iter->second.child->template nodeCount<NodeT>();
1156  }
1157  }
1158  return sum;
1159  }
1160 
1161  template<typename NodeT>
1162  void getNodes(std::vector<NodeT*>& array)
1163  {
1164  static_assert(is_same<ValueT, typename NodeT::ValueType>::value, "Root::getNodes: Invalid type");
1165  static_assert(NodeT::LEVEL < LEVEL, "Root::getNodes: LEVEL error");
1166  for (auto iter = mTable.begin(); iter != mTable.end(); ++iter) {
1167  if (iter->second.child == nullptr)
1168  continue;
1169  if (is_same<NodeT, ChildT>::value) { //resolved at compile-time
1170  array.push_back(reinterpret_cast<NodeT*>(iter->second.child));
1171  } else {
1172  iter->second.child->getNodes(array);
1173  }
1174  }
1175  }
1176 
1177  void addChild(ChildT*& child)
1178  {
1179  NANOVDB_ASSERT(child);
1180  const Coord key = CoordToKey(child->mOrigin);
1181  auto iter = mTable.find(key);
1182  if (iter != mTable.end() && iter->second.child != nullptr) { // existing child node
1183  delete iter->second.child;
1184  iter->second.child = child;
1185  } else {
1186  mTable[key] = Tile(child);
1187  }
1188  child = nullptr;
1189  }
1190 
1191  template<typename NodeT>
1192  void addNode(NodeT*& node)
1193  {
1194  if (is_same<NodeT, ChildT>::value) { //resolved at compile-time
1195  this->addChild(reinterpret_cast<ChildT*&>(node));
1196  } else {
1197  ChildT* child = nullptr;
1198  const Coord key = CoordToKey(node->mOrigin);
1199  auto iter = mTable.find(key);
1200  if (iter == mTable.end()) {
1201  child = new ChildT(node->mOrigin, mBackground, false);
1202  mTable[key] = Tile(child);
1203  } else if (iter->second.child != nullptr) {
1204  child = iter->second.child;
1205  } else {
1206  child = new ChildT(node->mOrigin, iter->second.value, iter->second.state);
1207  iter->second.child = child;
1208  }
1209  child->addNode(node);
1210  }
1211  }
1212 
1213  template<typename T>
1215  signedFloodFill(T outside);
1216 
1217  template<typename T>
1219  signedFloodFill(T) {} // no-op for none floating point values
1220 }; // GridBuilder::BuildRoot
1221 
1222 //================================================================================================
1223 
1224 template<typename ValueT, typename BuildT, typename StatsT>
1225 template<typename ChildT>
1226 template<typename T>
1229  signedFloodFill(T outside)
1230 {
1231  std::map<Coord, ChildT*> nodeKeys;
1232  for (auto iter = mTable.begin(); iter != mTable.end(); ++iter) {
1233  if (iter->second.child == nullptr)
1234  continue;
1235  nodeKeys.insert(std::pair<Coord, ChildT*>(iter->first, iter->second.child));
1236  }
1237 
1238  // We employ a simple z-scanline algorithm that inserts inactive tiles with
1239  // the inside value if they are sandwiched between inside child nodes only!
1240  auto b = nodeKeys.begin(), e = nodeKeys.end();
1241  if (b == e)
1242  return;
1243  for (auto a = b++; b != e; ++a, ++b) {
1244  Coord d = b->first - a->first; // delta of neighboring coordinates
1245  if (d[0] != 0 || d[1] != 0 || d[2] == int(ChildT::DIM))
1246  continue; // not same z-scanline or neighbors
1247  const ValueT fill[] = {a->second->getLastValue(), b->second->getFirstValue()};
1248  if (!(fill[0] < 0) || !(fill[1] < 0))
1249  continue; // scanline isn't inside
1250  Coord c = a->first + Coord(0u, 0u, ChildT::DIM);
1251  for (; c[2] != b->first[2]; c[2] += ChildT::DIM) {
1252  const Coord key = SrcRootT::CoordToKey(c);
1253  mTable[key] = typename SrcRootT::Tile(-outside, false); // inactive tile
1254  }
1255  }
1256 } // Root::signedFloodFill
1257 
1258 //================================================================================================
1259 
1260 template<typename ValueT, typename BuildT, typename StatsT>
1261 template<typename ChildT>
1262 struct GridBuilder<ValueT, BuildT, StatsT>::
1263  BuildNode
1264 {
1265  using ValueType = ValueT;
1266  using BuildType = BuildT;
1267  using ChildType = ChildT;
1268  static constexpr uint32_t LOG2DIM = ChildT::LOG2DIM + 1;
1269  static constexpr uint32_t TOTAL = LOG2DIM + ChildT::TOTAL; //dimension in index space
1270  static constexpr uint32_t DIM = 1u << TOTAL;
1271  static constexpr uint32_t SIZE = 1u << (3 * LOG2DIM); //number of tile values (or child pointers)
1272  static constexpr int32_t MASK = DIM - 1;
1273  static constexpr uint32_t LEVEL = 1 + ChildT::LEVEL; // level 0 = leaf
1274  static constexpr uint64_t NUM_VALUES = uint64_t(1) << (3 * TOTAL); // total voxel count represented by this node
1277 
1278  struct Tile
1279  {
1280  Tile(ChildT* c = nullptr)
1281  : child(c)
1282  {
1283  }
1284  union
1285  {
1286  ChildT* child;
1287  ValueT value;
1288  };
1289  };
1293  Tile mTable[SIZE];
1294 
1295  union {
1296  NanoNodeT *mDstNode;
1297  uint64_t mDstOffset;
1298  };
1299 
1300  BuildNode(const Coord& origin, const ValueT& value, bool state)
1301  : mOrigin(origin & ~MASK)
1302  , mValueMask(state)
1303  , mChildMask()
1304  , mDstOffset(0)
1305  {
1306  for (uint32_t i = 0; i < SIZE; ++i) {
1307  mTable[i].value = value;
1308  }
1309  }
1310  BuildNode(const BuildNode&) = delete; // disallow copy-construction
1311  BuildNode(BuildNode&&) = delete; // disallow move construction
1312  BuildNode& operator=(const BuildNode&) = delete; // disallow copy assignment
1313  BuildNode& operator=(BuildNode&&) = delete; // disallow move assignment
1315  {
1316  for (auto iter = mChildMask.beginOn(); iter; ++iter) {
1317  delete mTable[*iter].child;
1318  }
1319  }
1320 
1321  static uint32_t CoordToOffset(const Coord& ijk)
1322  {
1323  return (((ijk[0] & MASK) >> ChildT::TOTAL) << (2 * LOG2DIM)) +
1324  (((ijk[1] & MASK) >> ChildT::TOTAL) << (LOG2DIM)) +
1325  ((ijk[2] & MASK) >> ChildT::TOTAL);
1326  }
1327 
1328  static Coord OffsetToLocalCoord(uint32_t n)
1329  {
1330  NANOVDB_ASSERT(n < SIZE);
1331  const uint32_t m = n & ((1 << 2 * LOG2DIM) - 1);
1332  return Coord(n >> 2 * LOG2DIM, m >> LOG2DIM, m & ((1 << LOG2DIM) - 1));
1333  }
1334 
1335  void localToGlobalCoord(Coord& ijk) const
1336  {
1337  ijk <<= ChildT::TOTAL;
1338  ijk += mOrigin;
1339  }
1340 
1341  Coord offsetToGlobalCoord(uint32_t n) const
1342  {
1343  Coord ijk = BuildNode::OffsetToLocalCoord(n);
1344  this->localToGlobalCoord(ijk);
1345  return ijk;
1346  }
1347 
1348  template<typename AccT>
1349  bool isActiveAndCache(const Coord& ijk, AccT& acc) const
1350  {
1351  const uint32_t n = CoordToOffset(ijk);
1352  if (mChildMask.isOn(n)) {
1353  acc.insert(ijk, const_cast<ChildT*>(mTable[n].child));
1354  return mTable[n].child->isActiveAndCache(ijk, acc);
1355  }
1356  return mValueMask.isOn(n);
1357  }
1358 
1359  ValueT getFirstValue() const { return mChildMask.isOn(0) ? mTable[0].child->getFirstValue() : mTable[0].value; }
1360  ValueT getLastValue() const { return mChildMask.isOn(SIZE - 1) ? mTable[SIZE - 1].child->getLastValue() : mTable[SIZE - 1].value; }
1361 
1362  const ValueT& getValue(const Coord& ijk) const
1363  {
1364  const uint32_t n = CoordToOffset(ijk);
1365  if (mChildMask.isOn(n)) {
1366  return mTable[n].child->getValue(ijk);
1367  }
1368  return mTable[n].value;
1369  }
1370 
1371  template<typename AccT>
1372  const ValueT& getValueAndCache(const Coord& ijk, AccT& acc) const
1373  {
1374  const uint32_t n = CoordToOffset(ijk);
1375  if (mChildMask.isOn(n)) {
1376  acc.insert(ijk, const_cast<ChildT*>(mTable[n].child));
1377  return mTable[n].child->getValueAndCache(ijk, acc);
1378  }
1379  return mTable[n].value;
1380  }
1381 
1382  void setValue(const Coord& ijk, const ValueT& value)
1383  {
1384  const uint32_t n = CoordToOffset(ijk);
1385  ChildT* child = nullptr;
1386  if (mChildMask.isOn(n)) {
1387  child = mTable[n].child;
1388  } else {
1389  child = new ChildT(ijk, mTable[n].value, mValueMask.isOn(n));
1390  mTable[n].child = child;
1391  mChildMask.setOn(n);
1392  }
1393  child->setValue(ijk, value);
1394  }
1395 
1396  template<typename AccT>
1397  void setValueAndCache(const Coord& ijk, const ValueT& value, AccT& acc)
1398  {
1399  const uint32_t n = CoordToOffset(ijk);
1400  ChildT* child = nullptr;
1401  if (mChildMask.isOn(n)) {
1402  child = mTable[n].child;
1403  } else {
1404  child = new ChildT(ijk, mTable[n].value, mValueMask.isOn(n));
1405  mTable[n].child = child;
1406  mChildMask.setOn(n);
1407  }
1408  acc.insert(ijk, child);
1409  child->setValueAndCache(ijk, value, acc);
1410  }
1411 
1412  template<typename NodeT>
1413  uint32_t nodeCount() const
1414  {
1415  static_assert(is_same<ValueT, typename NodeT::ValueType>::value, "Node::getNodes: Invalid type");
1416  NANOVDB_ASSERT(NodeT::LEVEL < LEVEL);
1417  uint32_t sum = 0;
1418  if (is_same<NodeT, ChildT>::value) { //resolved at compile-time
1419  sum += mChildMask.countOn();
1420  } else {
1421  for (auto iter = mChildMask.beginOn(); iter; ++iter) {
1422  sum += mTable[*iter].child->template nodeCount<NodeT>();
1423  }
1424  }
1425  return sum;
1426  }
1427 
1428  template<typename NodeT>
1429  void getNodes(std::vector<NodeT*>& array)
1430  {
1431  static_assert(is_same<ValueT, typename NodeT::ValueType>::value, "Node::getNodes: Invalid type");
1432  NANOVDB_ASSERT(NodeT::LEVEL < LEVEL);
1433  for (auto iter = mChildMask.beginOn(); iter; ++iter) {
1434  if (is_same<NodeT, ChildT>::value) { //resolved at compile-time
1435  array.push_back(reinterpret_cast<NodeT*>(mTable[*iter].child));
1436  } else {
1437  mTable[*iter].child->getNodes(array);
1438  }
1439  }
1440  }
1441 
1442  void addChild(ChildT*& child)
1443  {
1444  NANOVDB_ASSERT(child && (child->mOrigin & ~MASK) == this->mOrigin);
1445  const uint32_t n = CoordToOffset(child->mOrigin);
1446  if (mChildMask.isOn(n)) {
1447  delete mTable[n].child;
1448  } else {
1449  mChildMask.setOn(n);
1450  }
1451  mTable[n].child = child;
1452  child = nullptr;
1453  }
1454 
1455  template<typename NodeT>
1456  void addNode(NodeT*& node)
1457  {
1458  if (is_same<NodeT, ChildT>::value) { //resolved at compile-time
1459  this->addChild(reinterpret_cast<ChildT*&>(node));
1460  } else {
1461  const uint32_t n = CoordToOffset(node->mOrigin);
1462  ChildT* child = nullptr;
1463  if (mChildMask.isOn(n)) {
1464  child = mTable[n].child;
1465  } else {
1466  child = new ChildT(node->mOrigin, mTable[n].value, mValueMask.isOn(n));
1467  mTable[n].child = child;
1468  mChildMask.setOn(n);
1469  }
1470  child->addNode(node);
1471  }
1472  }
1473 
1474  template<typename T>
1476  signedFloodFill(T outside);
1477  template<typename T>
1479  signedFloodFill(T) {} // no-op for none floating point values
1480 }; // GridBuilder::BuildNode
1481 
1482 //================================================================================================
1483 
1484 template<typename ValueT, typename BuildT, typename StatsT>
1485 template<typename ChildT>
1486 template<typename T>
1489  signedFloodFill(T outside)
1490 {
1491  const uint32_t first = *mChildMask.beginOn();
1492  if (first < NUM_VALUES) {
1493  bool xInside = mTable[first].child->getFirstValue() < 0;
1494  bool yInside = xInside, zInside = xInside;
1495  for (uint32_t x = 0; x != (1 << LOG2DIM); ++x) {
1496  const uint32_t x00 = x << (2 * LOG2DIM); // offset for block(x, 0, 0)
1497  if (mChildMask.isOn(x00)) {
1498  xInside = mTable[x00].child->getLastValue() < 0;
1499  }
1500  yInside = xInside;
1501  for (uint32_t y = 0; y != (1u << LOG2DIM); ++y) {
1502  const uint32_t xy0 = x00 + (y << LOG2DIM); // offset for block(x, y, 0)
1503  if (mChildMask.isOn(xy0))
1504  yInside = mTable[xy0].child->getLastValue() < 0;
1505  zInside = yInside;
1506  for (uint32_t z = 0; z != (1 << LOG2DIM); ++z) {
1507  const uint32_t xyz = xy0 + z; // offset for block(x, y, z)
1508  if (mChildMask.isOn(xyz)) {
1509  zInside = mTable[xyz].child->getLastValue() < 0;
1510  } else {
1511  mTable[xyz].value = zInside ? -outside : outside;
1512  }
1513  }
1514  }
1515  }
1516  }
1517 } // Node::signedFloodFill
1518 
1519 //================================================================================================
1520 
1521 template<typename ValueT, typename BuildT, typename StatsT>
1522 struct GridBuilder<ValueT, BuildT, StatsT>::
1523  BuildLeaf
1524 {
1525  using ValueType = ValueT;
1526  using BuildType = BuildT;
1527  static constexpr uint32_t LOG2DIM = 3;
1528  static constexpr uint32_t TOTAL = LOG2DIM; // needed by parent nodes
1529  static constexpr uint32_t DIM = 1u << TOTAL;
1530  static constexpr uint32_t SIZE = 1u << 3 * LOG2DIM; // total number of voxels represented by this node
1531  static constexpr int32_t MASK = DIM - 1; // mask for bit operations
1532  static constexpr uint32_t LEVEL = 0; // level 0 = leaf
1533  static constexpr uint64_t NUM_VALUES = uint64_t(1) << (3 * TOTAL); // total voxel count represented by this node
1536 
1539  ValueT mValues[SIZE];
1540  union {
1542  uint64_t mDstOffset;
1543  };
1544 
1545  BuildLeaf(const Coord& ijk, const ValueT& value, bool state)
1546  : mOrigin(ijk & ~MASK)
1547  , mValueMask(state) //invalid
1548  , mDstOffset(0)
1549  {
1550  ValueT* target = mValues;
1551  uint32_t n = SIZE;
1552  while (n--)
1553  *target++ = value;
1554  }
1555  BuildLeaf(const BuildLeaf&) = delete; // disallow copy-construction
1556  BuildLeaf(BuildLeaf&&) = delete; // disallow move construction
1557  BuildLeaf& operator=(const BuildLeaf&) = delete; // disallow copy assignment
1558  BuildLeaf& operator=(BuildLeaf&&) = delete; // disallow move assignment
1559  ~BuildLeaf() = default;
1560 
1561  /// @brief Return the linear offset corresponding to the given coordinate
1562  static uint32_t CoordToOffset(const Coord& ijk)
1563  {
1564  return ((ijk[0] & MASK) << (2 * LOG2DIM)) + ((ijk[1] & MASK) << LOG2DIM) + (ijk[2] & MASK);
1565  }
1566 
1567  static Coord OffsetToLocalCoord(uint32_t n)
1568  {
1569  NANOVDB_ASSERT(n < SIZE);
1570  const int32_t m = n & ((1 << 2 * LOG2DIM) - 1);
1571  return Coord(n >> 2 * LOG2DIM, m >> LOG2DIM, m & MASK);
1572  }
1573 
1574  void localToGlobalCoord(Coord& ijk) const
1575  {
1576  ijk += mOrigin;
1577  }
1578 
1579  Coord offsetToGlobalCoord(uint32_t n) const
1580  {
1581  Coord ijk = BuildLeaf::OffsetToLocalCoord(n);
1582  this->localToGlobalCoord(ijk);
1583  return ijk;
1584  }
1585 
1586  template<typename AccT>
1587  bool isActiveAndCache(const Coord& ijk, const AccT&) const
1588  {
1589  return mValueMask.isOn(CoordToOffset(ijk));
1590  }
1591 
1592  ValueT getFirstValue() const { return mValues[0]; }
1593  ValueT getLastValue() const { return mValues[SIZE - 1]; }
1594 
1595  const ValueT& getValue(const Coord& ijk) const
1596  {
1597  return mValues[CoordToOffset(ijk)];
1598  }
1599 
1600  template<typename AccT>
1601  const ValueT& getValueAndCache(const Coord& ijk, const AccT&) const
1602  {
1603  return mValues[CoordToOffset(ijk)];
1604  }
1605 
1606  template<typename AccT>
1607  void setValueAndCache(const Coord& ijk, const ValueT& value, const AccT&)
1608  {
1609  const uint32_t n = CoordToOffset(ijk);
1610  mValueMask.setOn(n);
1611  mValues[n] = value;
1612  }
1613 
1614  void setValue(const Coord& ijk, const ValueT& value)
1615  {
1616  const uint32_t n = CoordToOffset(ijk);
1617  mValueMask.setOn(n);
1618  mValues[n] = value;
1619  }
1620 
1621  template<typename NodeT>
1622  void getNodes(std::vector<NodeT*>&) { NANOVDB_ASSERT(false); }
1623 
1624  template<typename NodeT>
1625  void addNode(NodeT*&) {}
1626 
1627  template<typename NodeT>
1628  uint32_t nodeCount() const
1629  {
1630  NANOVDB_ASSERT(false);// should never get called
1631  return 1;
1632  }
1633 
1634  template<typename T>
1636  signedFloodFill(T outside);
1637  template<typename T>
1639  signedFloodFill(T) {} // no-op for none floating point values
1640 }; // BuildLeaf
1641 
1642 //================================================================================================
1643 
1644 template<typename ValueT, typename BuildT, typename StatsT>
1645 template<typename T>
1648  signedFloodFill(T outside)
1649 {
1650  const uint32_t first = *mValueMask.beginOn();
1651  if (first < SIZE) {
1652  bool xInside = mValues[first] < 0, yInside = xInside, zInside = xInside;
1653  for (uint32_t x = 0; x != DIM; ++x) {
1654  const uint32_t x00 = x << (2 * LOG2DIM);
1655  if (mValueMask.isOn(x00))
1656  xInside = mValues[x00] < 0; // element(x, 0, 0)
1657  yInside = xInside;
1658  for (uint32_t y = 0; y != DIM; ++y) {
1659  const uint32_t xy0 = x00 + (y << LOG2DIM);
1660  if (mValueMask.isOn(xy0))
1661  yInside = mValues[xy0] < 0; // element(x, y, 0)
1662  zInside = yInside;
1663  for (uint32_t z = 0; z != (1 << LOG2DIM); ++z) {
1664  const uint32_t xyz = xy0 + z; // element(x, y, z)
1665  if (mValueMask.isOn(xyz)) {
1666  zInside = mValues[xyz] < 0;
1667  } else {
1668  mValues[xyz] = zInside ? -outside : outside;
1669  }
1670  }
1671  }
1672  }
1673  }
1674 } // BuildLeaf::signedFloodFill
1675 
1676 //================================================================================================
1677 
1678 template<typename ValueT, typename BuildT, typename StatsT>
1679 struct GridBuilder<ValueT, BuildT, StatsT>::
1680  ValueAccessor
1681 {
1682  ValueAccessor(SrcRootT& root)
1684  , mNode{nullptr, nullptr, nullptr, &root}
1685  {
1686  }
1687  template<typename NodeT>
1688  bool isCached(const Coord& ijk) const
1689  {
1690  return (ijk[0] & ~NodeT::MASK) == mKeys[NodeT::LEVEL][0] &&
1691  (ijk[1] & ~NodeT::MASK) == mKeys[NodeT::LEVEL][1] &&
1692  (ijk[2] & ~NodeT::MASK) == mKeys[NodeT::LEVEL][2];
1693  }
1694  const ValueT& getValue(const Coord& ijk)
1695  {
1696  if (this->isCached<SrcNode0>(ijk)) {
1697  return ((SrcNode0*)mNode[0])->getValueAndCache(ijk, *this);
1698  } else if (this->isCached<SrcNode1>(ijk)) {
1699  return ((SrcNode1*)mNode[1])->getValueAndCache(ijk, *this);
1700  } else if (this->isCached<SrcNode2>(ijk)) {
1701  return ((SrcNode2*)mNode[2])->getValueAndCache(ijk, *this);
1702  }
1703  return ((SrcRootT*)mNode[3])->getValueAndCache(ijk, *this);
1704  }
1705  /// @brief Sets value in a leaf node and returns it.
1706  SrcNode0* setValue(const Coord& ijk, const ValueT& value)
1707  {
1708  if (this->isCached<SrcNode0>(ijk)) {
1709  ((SrcNode0*)mNode[0])->setValueAndCache(ijk, value, *this);
1710  } else if (this->isCached<SrcNode1>(ijk)) {
1711  ((SrcNode1*)mNode[1])->setValueAndCache(ijk, value, *this);
1712  } else if (this->isCached<SrcNode2>(ijk)) {
1713  ((SrcNode2*)mNode[2])->setValueAndCache(ijk, value, *this);
1714  } else {
1715  ((SrcRootT*)mNode[3])->setValueAndCache(ijk, value, *this);
1716  }
1717  NANOVDB_ASSERT(this->isCached<SrcNode0>(ijk));
1718  return (SrcNode0*)mNode[0];
1719  }
1720  bool isActive(const Coord& ijk)
1721  {
1722  if (this->isCached<SrcNode0>(ijk)) {
1723  return ((SrcNode0*)mNode[0])->isActiveAndCache(ijk, *this);
1724  } else if (this->isCached<SrcNode1>(ijk)) {
1725  return ((SrcNode1*)mNode[1])->isActiveAndCache(ijk, *this);
1726  } else if (this->isCached<SrcNode2>(ijk)) {
1727  return ((SrcNode2*)mNode[2])->isActiveAndCache(ijk, *this);
1728  }
1729  return ((SrcRootT*)mNode[3])->isActiveAndCache(ijk, *this);
1730  }
1731  bool isValueOn(const Coord& ijk) { return this->isActive(ijk); }
1732  template<typename NodeT>
1733  void insert(const Coord& ijk, NodeT* node)
1734  {
1735  mKeys[NodeT::LEVEL] = ijk & ~NodeT::MASK;
1736  mNode[NodeT::LEVEL] = node;
1737  }
1738  Coord mKeys[3];
1739  void* mNode[4];
1740 }; // ValueAccessor
1741 
1742 } // namespace nanovdb
1743 
1744 #endif // NANOVDB_GRIDBUILDER_H_HAS_BEEN_INCLUDED
ChildT * child
Definition: GridBuilder.h:1286
const ValueT & getValue(const Coord &ijk) const
Definition: GridBuilder.h:1595
void setRoot(const RootT *root)
Definition: NanoVDB.h:2509
VDB Tree, which is a thin wrapper around a RootNode.
Definition: NanoVDB.h:2542
Index64 memUsage(const TreeT &tree, bool threaded=true)
Return the total amount of memory in bytes occupied by this tree.
Definition: Count.h:408
Highest level of the data structure. Contains a tree and a world->index transform (that currently onl...
Definition: NanoVDB.h:2307
Compression oracle based on relative difference.
Definition: GridBuilder.h:64
DataType * data()
Definition: NanoVDB.h:2825
A unified wrapper for tbb::parallel_invoke and a naive std::thread analog.
static Coord OffsetToLocalCoord(uint32_t n)
Definition: GridBuilder.h:1328
bool isValueOn(const Coord &ijk)
Definition: GridBuilder.h:1731
void enableDithering(bool on=true)
Definition: GridBuilder.h:195
Tile(ChildT *c=nullptr)
Definition: GridBuilder.h:1044
bool state
Definition: GridBuilder.h:1056
GridClass
Classes (defined in OpenVDB) that are currently supported by NanoVDB.
Definition: NanoVDB.h:253
A unified wrapper for tbb::parallel_for and a naive std::thread fallback.
RelDiff(float tolerance=-1.0f)
Definition: GridBuilder.h:69
bool isActive(const Coord &ijk)
Definition: GridBuilder.h:1720
const ValueT & getValue(const Coord &ijk) const
Definition: GridBuilder.h:1362
ChecksumMode
List of different modes for computing for a checksum.
Definition: GridChecksum.h:33
Defines two classes, a GridRegister the defines the value type (e.g. Double, Float etc) of a NanoVDB ...
const ValueT & getValueAndCache(const Coord &ijk, AccT &acc) const
Definition: GridBuilder.h:1372
uint64_t memUsage() const
Return the actual memory footprint of this root node.
Definition: NanoVDB.h:2860
bool isActiveAndCache(const Coord &ijk, const AccT &) const
Definition: GridBuilder.h:1587
BuildLeaf(const Coord &ijk, const ValueT &value, bool state)
Definition: GridBuilder.h:1545
Trait to map from LEVEL to node type.
Definition: NanoVDB.h:3933
bool operator()(float exact, float approx) const
Return true if the approximate value is within the accepted relative error bounds of the exact value...
Definition: GridBuilder.h:77
bool isCached(const Coord &ijk) const
Definition: GridBuilder.h:1688
void setOn(uint32_t n)
Set the given bit on.
Definition: NanoVDB.h:1920
MaskT mValueMask
Definition: GridBuilder.h:1291
void sdfToFog()
Performs multi-threaded bottum-up signed-distance flood-filling followed by level-set -> FOG volume c...
Definition: GridBuilder.h:614
Computes a pair of 32bit checksums, og a Grid, by means of Cyclic Redundancy Check (CRC) ...
Bit-compacted representation of all three version numbers.
Definition: NanoVDB.h:540
DataType * data()
Definition: NanoVDB.h:2569
Type Max(Type a, Type b)
Definition: NanoVDB.h:672
static uint64_t memUsage()
return memory usage in bytes for the class
Definition: NanoVDB.h:2574
A unified wrapper for tbb::parallel_reduce and a naive std::future analog.
void forEach(RangeT range, const FuncT &func)
simple wrapper for tbb::parallel_for with a naive std fallback
Definition: ForEach.h:40
~BuildNode()
Definition: GridBuilder.h:1314
BuildT BuildType
Definition: GridBuilder.h:1526
Allows for the construction of NanoVDB grids without any dependecy.
Definition: GridBuilder.h:91
static uint32_t CoordToOffset(const Coord &ijk)
Definition: GridBuilder.h:1321
This class serves to manage a raw memory buffer of a NanoVDB Grid.
Definition: GridHandle.h:70
void getNodes(std::vector< NodeT * > &)
Definition: GridBuilder.h:1622
Definition: GridBuilder.h:1278
void addChild(ChildT *&child)
Definition: GridBuilder.h:1442
const std::enable_if<!VecTraits< T >::IsVec, T >::type & max(const T &a, const T &b)
Definition: Composite.h:107
Definition: NanoVDB.h:184
Bit-mask to encode active states and facilitate sequential iterators and a fast codec for I/O compres...
Definition: NanoVDB.h:1794
static uint32_t CoordToOffset(const Coord &ijk)
Return the linear offset corresponding to the given coordinate.
Definition: GridBuilder.h:1562
Tile(ChildT *c=nullptr)
Definition: GridBuilder.h:1280
BBox< Coord > CoordBBox
Definition: NanoVDB.h:1658
void getNodes(std::vector< NodeT * > &array)
Definition: GridBuilder.h:1429
Top-most node of the VDB tree structure.
Definition: NanoVDB.h:2798
ValueT ValueType
Definition: GridBuilder.h:1525
BuildT BuildType
Definition: GridBuilder.h:1266
void setValueAndCache(const Coord &ijk, const ValueT &value, AccT &acc)
Definition: GridBuilder.h:1397
uint8_t * data() override
Returns a non-const pointer to the data.
Definition: GridHandle.h:115
const ValueT & getValue(const Coord &ijk)
Definition: GridBuilder.h:1694
Custom Range class that is compatible with the tbb::blocked_range classes.
ValueT getLastValue() const
Definition: GridBuilder.h:1593
ValueAccessor getAccessor()
Definition: GridBuilder.h:180
void setValueAndCache(const Coord &ijk, const ValueT &value, const AccT &)
Definition: GridBuilder.h:1607
static Coord OffsetToLocalCoord(uint32_t n)
Definition: GridBuilder.h:1567
ValueT value
Definition: GridBuilder.h:1055
void localToGlobalCoord(Coord &ijk) const
Definition: GridBuilder.h:1335
bool operator()(float exact, float approx) const
Return true if the approximate value is within the accepted absolute error bounds of the exact value...
Definition: GridBuilder.h:51
std::ostream & operator<<(std::ostream &os, const AbsDiff &diff)
Definition: GridBuilder.h:57
T reduce(RangeT range, const T &identity, const FuncT &func, const JoinT &join)
Definition: Reduce.h:41
static size_t memUsage()
Return memory usage in bytes for the class.
Definition: NanoVDB.h:3156
T Abs(T x)
Definition: NanoVDB.h:747
bool isOn(uint32_t n) const
Return true if the given bit is set.
Definition: NanoVDB.h:1901
ValueT getLastValue() const
Definition: GridBuilder.h:1360
void setValue(const Coord &ijk, const ValueT &value)
Definition: GridBuilder.h:1614
#define NANOVDB_MAGIC_NUMBER
Definition: NanoVDB.h:102
ValueAccessor(SrcRootT &root)
Definition: GridBuilder.h:1682
Definition: NanoVDB.h:2067
bool isActiveAndCache(const Coord &ijk, AccT &acc) const
Definition: GridBuilder.h:1349
BuildT ArrayType
Definition: NanoVDB.h:3361
Iterator beginOn() const
Definition: NanoVDB.h:1898
void setStats(StatsMode mode=StatsMode::Default)
Definition: GridBuilder.h:197
Defines look up table to do dithering of 8^3 leaf nodes.
Definition: DitherLUT.h:19
std::enable_if<!std::is_floating_point< T >::value >::type signedFloodFill(T)
Definition: GridBuilder.h:1479
typename NanoNode< BuildT, 0 >::Type NanoLeafT
Definition: GridBuilder.h:1535
typename NanoNode< BuildT, LEVEL >::Type NanoNodeT
Definition: GridBuilder.h:1276
ValueT getFirstValue() const
Definition: GridBuilder.h:1592
Definition: Range.h:28
void set(const Mat4T &mat, const Mat4T &invMat, double taper)
Definition: NanoVDB.h:2045
ValueT value
Definition: GridBuilder.h:1287
uint32_t nodeCount() const
Definition: GridBuilder.h:1628
void updateChecksum(NanoGrid< ValueT > &grid, ChecksumMode mode=ChecksumMode::Default)
Updates the checksum of a grid.
Definition: GridChecksum.h:272
void setVerbose(int mode=1)
Definition: GridBuilder.h:193
Coord offsetToGlobalCoord(uint32_t n) const
Definition: GridBuilder.h:1579
void gridStats(NanoGrid< BuildT > &grid, StatsMode mode=StatsMode::Default)
Re-computes the min/max, stats and bbox information for an existing NanoVDB Grid. ...
Definition: GridStats.h:713
void addNode(NodeT *&node)
Definition: GridBuilder.h:1456
Compression oracle based on absolute difference.
Definition: GridBuilder.h:38
ChildT * child
Definition: GridBuilder.h:1054
void setValue(const Coord &ijk, const ValueT &value)
Definition: GridBuilder.h:1382
Maximum floating-point values.
Definition: NanoVDB.h:613
MaskT mChildMask
Definition: GridBuilder.h:1292
void setGridClass(GridClass mode=GridClass::Unknown)
Definition: GridBuilder.h:201
SrcNode0 * setValue(const Coord &ijk, const ValueT &value)
Sets value in a leaf node and returns it.
Definition: GridBuilder.h:1706
#define NANOVDB_ASSERT(x)
Definition: NanoVDB.h:149
void operator()(const Func &func, const CoordBBox &bbox, ValueT delta=ValueT(0))
Sets grids values in domain of the bbox to those returned by the specified func with the expected sig...
Definition: GridBuilder.h:253
ValueT getFirstValue() const
Definition: GridBuilder.h:1359
void setTolerance(float tolerance)
Definition: GridBuilder.h:45
Defines an affine transform and its inverse represented as a 3x3 matrix and a vec3 translation...
Definition: NanoVDB.h:1997
std::enable_if<!std::is_floating_point< T >::value >::type signedFloodFill(T)
Definition: GridBuilder.h:1639
float getTolerance() const
Definition: GridBuilder.h:72
void signedFloodFill(TreeOrLeafManagerT &tree, bool threaded=true, size_t grainSize=1, Index minLevel=0)
Set the values of all inactive voxels and tiles of a narrow-band level set from the signs of the acti...
Definition: SignedFloodFill.h:267
bool isValid(GridType gridType, GridClass gridClass)
return true if the combination of GridType and GridClass is valid.
Definition: NanoVDB.h:520
GridBuilder(ValueT background=ValueT(), GridClass gClass=GridClass::Unknown, uint64_t blindDataSize=0)
Definition: GridBuilder.h:238
BuildNode(const Coord &origin, const ValueT &value, bool state)
Definition: GridBuilder.h:1300
Codec
Optional compression codecs.
Definition: IO.h:61
Internal nodes of a VDB treedim(),.
Definition: NanoVDB.h:3120
ChildT ChildType
Definition: GridBuilder.h:1267
Coord offsetToGlobalCoord(uint32_t n) const
Definition: GridBuilder.h:1341
StatsMode
Grid flags which indicate what extra information is present in the grid buffer.
Definition: GridStats.h:32
Coord mOrigin
Definition: GridBuilder.h:1537
GridHandle< BufferT > getHandle(double voxelSize=1.0, const Vec3d &gridOrigin=Vec3d(0), const std::string &name="", const OracleT &oracle=OracleT(), const BufferT &buffer=BufferT())
Return an instance of a GridHandle (invoking move semantics)
Definition: GridBuilder.h:535
static const int MaxNameSize
Definition: NanoVDB.h:2186
ValueT ValueType
Definition: GridBuilder.h:1265
float getTolerance() const
Definition: GridBuilder.h:46
void insert(const Coord &ijk, NodeT *node)
Definition: GridBuilder.h:1733
const ValueT & getValueAndCache(const Coord &ijk, const AccT &) const
Definition: GridBuilder.h:1601
static uint64_t memUsage()
Return memory usage in bytes for this class only.
Definition: NanoVDB.h:2332
static int64_t PtrDiff(const T1 *p, const T2 *q)
Definition: NanoVDB.h:433
void setChecksum(ChecksumMode mode=ChecksumMode::Default)
Definition: GridBuilder.h:199
uint32_t nodeCount() const
Definition: GridBuilder.h:1413
uint32_t countOn() const
Definition: NanoVDB.h:1810
static constexpr uint64_t NUM_VALUES
Definition: NanoVDB.h:3707
uint32_t mTableSize
Definition: NanoVDB.h:2719
Leaf nodes of the VDB tree. (defaults to 8x8x8 = 512 voxels)
Definition: NanoVDB.h:3683
const std::enable_if<!VecTraits< T >::IsVec, T >::type & min(const T &a, const T &b)
Definition: Composite.h:103
Mask< LOG2DIM > mValueMask
Definition: GridBuilder.h:1538
void sdfToLevelSet()
Performs multi-threaded bottum-up signed-distance flood-filling and changes GridClass to LevelSet...
Definition: GridBuilder.h:490
Signed (i, j, k) 32-bit integer coordinate class, similar to openvdb::math::Coord.
Definition: NanoVDB.h:859
NanoNodeT * mDstNode
Definition: GridBuilder.h:1296
C++11 implementation of std::is_floating_point.
Definition: NanoVDB.h:355
static constexpr uint64_t NUM_VALUES
Definition: NanoVDB.h:3140
static uint64_t memUsage(uint64_t blindDataCount=0)
return memory usage in bytes for the class (note this computes for all blindMetaData structures...
Definition: NanoVDB.h:2079
Definition: GridBuilder.h:1042
uint64_t mDstOffset
Definition: GridBuilder.h:1297
AbsDiff(float tolerance=-1.0f)
Definition: GridBuilder.h:43
C++11 implementation of std::is_same.
Definition: NanoVDB.h:326
Tile(const ValueT &v, bool s)
Definition: GridBuilder.h:1048
void localToGlobalCoord(Coord &ijk) const
Definition: GridBuilder.h:1574
Coord & minComponent(const Coord &other)
Perform a component-wise minimum with the other Coord.
Definition: NanoVDB.h:979
Re-computes min/max/avg/var/bbox information for each node in a pre-existing NanoVDB grid...
void addNode(NodeT *&)
Definition: GridBuilder.h:1625
void setTolerance(float tolerance)
Definition: GridBuilder.h:71
static uint64_t memUsage()
return memory usage in bytes for the class
Definition: NanoVDB.h:3772
Vec3T applyMap(const Vec3T &xyz) const
Definition: NanoVDB.h:2013
Coord mOrigin
Definition: GridBuilder.h:1290