OpenVDB  9.0.1
GridOperators.h
Go to the documentation of this file.
1 // Copyright Contributors to the OpenVDB Project
2 // SPDX-License-Identifier: MPL-2.0
3 
4 /// @file tools/GridOperators.h
5 ///
6 /// @brief Apply an operator to an input grid to produce an output grid
7 /// with the same active voxel topology but a potentially different value type.
8 
9 #ifndef OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
10 #define OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
11 
12 #include "openvdb/Grid.h"
13 #include "openvdb/math/Operators.h"
15 #include "openvdb/thread/Threading.h"
18 #include "ValueTransformer.h" // for tools::foreach()
19 #include <openvdb/openvdb.h>
20 
21 #include <tbb/parallel_for.h>
22 
23 namespace openvdb {
25 namespace OPENVDB_VERSION_NAME {
26 namespace tools {
27 
28 /// @brief VectorToScalarConverter<VectorGridType>::Type is the type of a grid
29 /// having the same tree configuration as VectorGridType but a scalar value type, T,
30 /// where T is the type of the original vector components.
31 /// @details For example, VectorToScalarConverter<Vec3DGrid>::Type is equivalent to DoubleGrid.
32 template<typename VectorGridType> struct VectorToScalarConverter {
33  typedef typename VectorGridType::ValueType::value_type VecComponentValueT;
34  typedef typename VectorGridType::template ValueConverter<VecComponentValueT>::Type Type;
35 };
36 
37 /// @brief ScalarToVectorConverter<ScalarGridType>::Type is the type of a grid
38 /// having the same tree configuration as ScalarGridType but value type Vec3<T>
39 /// where T is ScalarGridType::ValueType.
40 /// @details For example, ScalarToVectorConverter<DoubleGrid>::Type is equivalent to Vec3DGrid.
41 template<typename ScalarGridType> struct ScalarToVectorConverter {
43  typedef typename ScalarGridType::template ValueConverter<VectorValueT>::Type Type;
44 };
45 
46 
47 /// @brief Compute the Closest-Point Transform (CPT) from a distance field.
48 /// @return a new vector-valued grid with the same numerical precision as the input grid
49 /// (for example, if the input grid is a DoubleGrid, the output grid will be a Vec3DGrid)
50 /// @details When a mask grid is specified, the solution is calculated only in
51 /// the intersection of the mask active topology and the input active topology
52 /// independent of the transforms associated with either grid.
53 template<typename GridType, typename InterruptT>
55 cpt(const GridType& grid, bool threaded, InterruptT* interrupt);
56 
57 template<typename GridType, typename MaskT, typename InterruptT>
59 cpt(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
60 
61 template<typename GridType>
63 cpt(const GridType& grid, bool threaded = true)
64 {
65  return cpt<GridType, util::NullInterrupter>(grid, threaded, nullptr);
66 }
67 
68 template<typename GridType, typename MaskT>
70 cpt(const GridType& grid, const MaskT& mask, bool threaded = true)
71 {
72  return cpt<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
73 }
74 
75 
76 /// @brief Compute the curl of the given vector-valued grid.
77 /// @return a new vector-valued grid
78 /// @details When a mask grid is specified, the solution is calculated only in
79 /// the intersection of the mask active topology and the input active topology
80 /// independent of the transforms associated with either grid.
81 template<typename GridType, typename InterruptT>
82 typename GridType::Ptr
83 curl(const GridType& grid, bool threaded, InterruptT* interrupt);
84 
85 template<typename GridType, typename MaskT, typename InterruptT>
86 typename GridType::Ptr
87 curl(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
88 
89 template<typename GridType>
90 typename GridType::Ptr
91 curl(const GridType& grid, bool threaded = true)
92 {
93  return curl<GridType, util::NullInterrupter>(grid, threaded, nullptr);
94 }
95 
96 template<typename GridType, typename MaskT>
97 typename GridType::Ptr
98 curl(const GridType& grid, const MaskT& mask, bool threaded = true)
99 {
100  return curl<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
101 }
102 
103 
104 /// @brief Compute the divergence of the given vector-valued grid.
105 /// @return a new scalar-valued grid with the same numerical precision as the input grid
106 /// (for example, if the input grid is a Vec3DGrid, the output grid will be a DoubleGrid)
107 /// @details When a mask grid is specified, the solution is calculated only in
108 /// the intersection of the mask active topology and the input active topology
109 /// independent of the transforms associated with either grid.
110 template<typename GridType, typename InterruptT>
112 divergence(const GridType& grid, bool threaded, InterruptT* interrupt);
113 
114 template<typename GridType, typename MaskT, typename InterruptT>
116 divergence(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
117 
118 template<typename GridType>
120 divergence(const GridType& grid, bool threaded = true)
121 {
122  return divergence<GridType, util::NullInterrupter>(grid, threaded, nullptr);
123 }
124 
125 template<typename GridType, typename MaskT>
127 divergence(const GridType& grid, const MaskT& mask, bool threaded = true)
128 {
129  return divergence<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
130 }
131 
132 
133 /// @brief Compute the gradient of the given scalar grid.
134 /// @return a new vector-valued grid with the same numerical precision as the input grid
135 /// (for example, if the input grid is a DoubleGrid, the output grid will be a Vec3DGrid)
136 /// @details When a mask grid is specified, the solution is calculated only in
137 /// the intersection of the mask active topology and the input active topology
138 /// independent of the transforms associated with either grid.
139 template<typename GridType, typename InterruptT>
141 gradient(const GridType& grid, bool threaded, InterruptT* interrupt);
142 
143 template<typename GridType, typename MaskT, typename InterruptT>
145 gradient(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
146 
147 template<typename GridType>
149 gradient(const GridType& grid, bool threaded = true)
150 {
151  return gradient<GridType, util::NullInterrupter>(grid, threaded, nullptr);
152 }
153 
154 template<typename GridType, typename MaskT>
156 gradient(const GridType& grid, const MaskT& mask, bool threaded = true)
157 {
158  return gradient<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
159 }
160 
161 
162 /// @brief Compute the Laplacian of the given scalar grid.
163 /// @return a new scalar grid
164 /// @details When a mask grid is specified, the solution is calculated only in
165 /// the intersection of the mask active topology and the input active topology
166 /// independent of the transforms associated with either grid.
167 template<typename GridType, typename InterruptT>
168 typename GridType::Ptr
169 laplacian(const GridType& grid, bool threaded, InterruptT* interrupt);
170 
171 template<typename GridType, typename MaskT, typename InterruptT>
172 typename GridType::Ptr
173 laplacian(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
174 
175 template<typename GridType>
176 typename GridType::Ptr
177 laplacian(const GridType& grid, bool threaded = true)
178 {
179  return laplacian<GridType, util::NullInterrupter>(grid, threaded, nullptr);
180 }
181 
182 template<typename GridType, typename MaskT>
183 typename GridType::Ptr
184 laplacian(const GridType& grid, const MaskT& mask, bool threaded = true)
185 {
186  return laplacian<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
187 }
188 
189 
190 /// @brief Compute the mean curvature of the given grid.
191 /// @return a new grid
192 /// @details When a mask grid is specified, the solution is calculated only in
193 /// the intersection of the mask active topology and the input active topology
194 /// independent of the transforms associated with either grid.
195 template<typename GridType, typename InterruptT>
196 typename GridType::Ptr
197 meanCurvature(const GridType& grid, bool threaded, InterruptT* interrupt);
198 
199 template<typename GridType, typename MaskT, typename InterruptT>
200 typename GridType::Ptr
201 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
202 
203 template<typename GridType>
204 typename GridType::Ptr
205 meanCurvature(const GridType& grid, bool threaded = true)
206 {
207  return meanCurvature<GridType, util::NullInterrupter>(grid, threaded, nullptr);
208 }
209 
210 template<typename GridType, typename MaskT>
211 typename GridType::Ptr
212 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded = true)
213 {
214  return meanCurvature<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
215 }
216 
217 
218 /// @brief Compute the magnitudes of the vectors of the given vector-valued grid.
219 /// @return a new scalar-valued grid with the same numerical precision as the input grid
220 /// (for example, if the input grid is a Vec3DGrid, the output grid will be a DoubleGrid)
221 /// @details When a mask grid is specified, the solution is calculated only in
222 /// the intersection of the mask active topology and the input active topology
223 /// independent of the transforms associated with either grid.
224 template<typename GridType, typename InterruptT>
226 magnitude(const GridType& grid, bool threaded, InterruptT* interrupt);
227 
228 template<typename GridType, typename MaskT, typename InterruptT>
230 magnitude(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
231 
232 template<typename GridType>
234 magnitude(const GridType& grid, bool threaded = true)
235 {
236  return magnitude<GridType, util::NullInterrupter>(grid, threaded, nullptr);
237 }
238 
239 template<typename GridType, typename MaskT>
241 magnitude(const GridType& grid, const MaskT& mask, bool threaded = true)
242 {
243  return magnitude<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
244 }
245 
246 
247 /// @brief Normalize the vectors of the given vector-valued grid.
248 /// @return a new vector-valued grid
249 /// @details When a mask grid is specified, the solution is calculated only in
250 /// the intersection of the mask active topology and the input active topology
251 /// independent of the transforms associated with either grid.
252 template<typename GridType, typename InterruptT>
253 typename GridType::Ptr
254 normalize(const GridType& grid, bool threaded, InterruptT* interrupt);
255 
256 template<typename GridType, typename MaskT, typename InterruptT>
257 typename GridType::Ptr
258 normalize(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
259 
260 template<typename GridType>
261 typename GridType::Ptr
262 normalize(const GridType& grid, bool threaded = true)
263 {
264  return normalize<GridType, util::NullInterrupter>(grid, threaded, nullptr);
265 }
266 
267 template<typename GridType, typename MaskT>
268 typename GridType::Ptr
269 normalize(const GridType& grid, const MaskT& mask, bool threaded = true)
270 {
271  return normalize<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
272 }
273 
274 
275 ////////////////////////////////////////
276 
277 
278 namespace gridop {
279 
280 /// @brief ToMaskGrid<T>::Type is the type of a grid having the same
281 /// tree hierarchy as grid type T but a value equal to its active state.
282 /// @details For example, ToMaskGrid<FloatGrid>::Type is equivalent to MaskGrid.
283 template<typename GridType>
284 struct ToMaskGrid {
286 };
287 
288 
289 /// @brief Apply an operator to an input grid to produce an output grid
290 /// with the same active voxel topology but a potentially different value type.
291 /// @details To facilitate inlining, this class is also templated on a Map type.
292 ///
293 /// @note This is a helper class and should never be used directly.
294 template<
295  typename InGridT,
296  typename MaskGridType,
297  typename OutGridT,
298  typename MapT,
299  typename OperatorT,
300  typename InterruptT = util::NullInterrupter>
302 {
303 public:
304  typedef typename OutGridT::TreeType OutTreeT;
305  typedef typename OutTreeT::LeafNodeType OutLeafT;
307 
308  GridOperator(const InGridT& grid, const MaskGridType* mask, const MapT& map,
309  InterruptT* interrupt = nullptr, bool densify = true)
310  : mAcc(grid.getConstAccessor())
311  , mMap(map)
312  , mInterrupt(interrupt)
313  , mMask(mask)
314  , mDensify(densify) ///< @todo consider adding a "NeedsDensification" operator trait
315  {
316  }
317  GridOperator(const GridOperator&) = default;
318  GridOperator& operator=(const GridOperator&) = default;
319  virtual ~GridOperator() = default;
320 
321  typename OutGridT::Ptr process(bool threaded = true)
322  {
323  if (mInterrupt) mInterrupt->start("Processing grid");
324 
325  // Derive background value of the output grid
326  typename InGridT::TreeType tmp(mAcc.tree().background());
327  typename OutGridT::ValueType backg = OperatorT::result(mMap, tmp, math::Coord(0));
328 
329  // The output tree is topology copy, optionally densified, of the input tree.
330  // (Densification is necessary for some operators because applying the operator to
331  // a constant tile produces distinct output values, particularly along tile borders.)
332  /// @todo Can tiles be handled correctly without densification, or by densifying
333  /// only to the width of the operator stencil?
334  typename OutTreeT::Ptr tree(new OutTreeT(mAcc.tree(), backg, TopologyCopy()));
335  if (mDensify) tree->voxelizeActiveTiles();
336 
337  // create grid with output tree and unit transform
338  typename OutGridT::Ptr result(new OutGridT(tree));
339 
340  // Modify the solution area if a mask was supplied.
341  if (mMask) {
342  result->topologyIntersection(*mMask);
343  }
344 
345  // transform of output grid = transform of input grid
346  result->setTransform(math::Transform::Ptr(new math::Transform( mMap.copy() )));
347 
348  LeafManagerT leafManager(*tree);
349 
350  if (threaded) {
351  tbb::parallel_for(leafManager.leafRange(), *this);
352  } else {
353  (*this)(leafManager.leafRange());
354  }
355 
356  // If the tree wasn't densified, it might have active tiles that need to be processed.
357  if (!mDensify) {
358  using TileIter = typename OutTreeT::ValueOnIter;
359 
360  TileIter tileIter = tree->beginValueOn();
361  tileIter.setMaxDepth(tileIter.getLeafDepth() - 1); // skip leaf values (i.e., voxels)
362 
363  AccessorT inAcc = mAcc; // each thread needs its own accessor, captured by value
364  auto tileOp = [this, inAcc](const TileIter& it) {
365  // Apply the operator to the input grid's tile value at the iterator's
366  // current coordinates, and set the output tile's value to the result.
367  it.setValue(OperatorT::result(this->mMap, inAcc, it.getCoord()));
368  };
369 
370  // Apply the operator to tile values, optionally in parallel.
371  // (But don't share the functor; each thread needs its own accessor.)
372  tools::foreach(tileIter, tileOp, threaded, /*shareFunctor=*/false);
373  }
374 
375  if (mDensify) tree->prune();
376 
377  if (mInterrupt) mInterrupt->end();
378  return result;
379  }
380 
381  /// @brief Iterate sequentially over LeafNodes and voxels in the output
382  /// grid and apply the operator using a value accessor for the input grid.
383  ///
384  /// @note Never call this public method directly - it is called by
385  /// TBB threads only!
386  void operator()(const typename LeafManagerT::LeafRange& range) const
387  {
388  if (util::wasInterrupted(mInterrupt)) {
389  thread::cancelGroupExecution();
390  }
391 
392  for (typename LeafManagerT::LeafRange::Iterator leaf=range.begin(); leaf; ++leaf) {
393  for (typename OutLeafT::ValueOnIter value=leaf->beginValueOn(); value; ++value) {
394  value.setValue(OperatorT::result(mMap, mAcc, value.getCoord()));
395  }
396  }
397  }
398 
399 protected:
400  typedef typename InGridT::ConstAccessor AccessorT;
401  mutable AccessorT mAcc;
402  const MapT& mMap;
403  InterruptT* mInterrupt;
404  const MaskGridType* mMask;
405  const bool mDensify;
406 }; // end of GridOperator class
407 
408 } // namespace gridop
409 
410 
411 ////////////////////////////////////////
412 
413 
414 /// @brief Compute the closest-point transform of a scalar grid.
415 template<
416  typename InGridT,
417  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
418  typename InterruptT = util::NullInterrupter>
419 class Cpt
420 {
421 public:
422  typedef InGridT InGridType;
424 
425  Cpt(const InGridType& grid, InterruptT* interrupt = nullptr):
426  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
427  {
428  }
429 
430  Cpt(const InGridType& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
431  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
432  {
433  }
434 
435  typename OutGridType::Ptr process(bool threaded = true, bool useWorldTransform = true)
436  {
437  Functor functor(mInputGrid, mMask, threaded, useWorldTransform, mInterrupt);
438  processTypedMap(mInputGrid.transform(), functor);
439  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_CONTRAVARIANT_ABSOLUTE);
440  return functor.mOutputGrid;
441  }
442 
443 private:
444  struct IsOpT
445  {
446  template<typename MapT, typename AccT>
447  static typename OutGridType::ValueType
448  result(const MapT& map, const AccT& acc, const Coord& xyz)
449  {
450  return math::CPT<MapT, math::CD_2ND>::result(map, acc, xyz);
451  }
452  };
453  struct WsOpT
454  {
455  template<typename MapT, typename AccT>
456  static typename OutGridType::ValueType
457  result(const MapT& map, const AccT& acc, const Coord& xyz)
458  {
459  return math::CPT_RANGE<MapT, math::CD_2ND>::result(map, acc, xyz);
460  }
461  };
462  struct Functor
463  {
464  Functor(const InGridType& grid, const MaskGridType* mask,
465  bool threaded, bool worldspace, InterruptT* interrupt)
466  : mThreaded(threaded)
467  , mWorldSpace(worldspace)
468  , mInputGrid(grid)
469  , mInterrupt(interrupt)
470  , mMask(mask)
471  {}
472 
473  template<typename MapT>
474  void operator()(const MapT& map)
475  {
476  if (mWorldSpace) {
478  op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
479  mOutputGrid = op.process(mThreaded); // cache the result
480  } else {
482  op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
483  mOutputGrid = op.process(mThreaded); // cache the result
484  }
485  }
486  const bool mThreaded;
487  const bool mWorldSpace;
488  const InGridType& mInputGrid;
489  typename OutGridType::Ptr mOutputGrid;
490  InterruptT* mInterrupt;
491  const MaskGridType* mMask;
492  };
493  const InGridType& mInputGrid;
494  InterruptT* mInterrupt;
495  const MaskGridType* mMask;
496 }; // end of Cpt class
497 
498 
499 ////////////////////////////////////////
500 
501 
502 /// @brief Compute the curl of a vector grid.
503 template<
504  typename GridT,
505  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
506  typename InterruptT = util::NullInterrupter>
507 class Curl
508 {
509 public:
510  typedef GridT InGridType;
511  typedef GridT OutGridType;
512 
513  Curl(const GridT& grid, InterruptT* interrupt = nullptr):
514  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
515  {
516  }
517 
518  Curl(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
519  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
520  {
521  }
522 
523  typename GridT::Ptr process(bool threaded = true)
524  {
525  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
526  processTypedMap(mInputGrid.transform(), functor);
527  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
528  return functor.mOutputGrid;
529  }
530 
531 private:
532  struct Functor
533  {
534  Functor(const GridT& grid, const MaskGridType* mask,
535  bool threaded, InterruptT* interrupt):
536  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
537 
538  template<typename MapT>
539  void operator()(const MapT& map)
540  {
541  typedef math::Curl<MapT, math::CD_2ND> OpT;
543  op(mInputGrid, mMask, map, mInterrupt);
544  mOutputGrid = op.process(mThreaded); // cache the result
545  }
546 
547  const bool mThreaded;
548  const GridT& mInputGrid;
549  typename GridT::Ptr mOutputGrid;
550  InterruptT* mInterrupt;
551  const MaskGridType* mMask;
552  }; // Private Functor
553 
554  const GridT& mInputGrid;
555  InterruptT* mInterrupt;
556  const MaskGridType* mMask;
557 }; // end of Curl class
558 
559 
560 ////////////////////////////////////////
561 
562 
563 /// @brief Compute the divergence of a vector grid.
564 template<
565  typename InGridT,
566  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
567  typename InterruptT = util::NullInterrupter>
569 {
570 public:
571  typedef InGridT InGridType;
573 
574  Divergence(const InGridT& grid, InterruptT* interrupt = nullptr):
575  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
576  {
577  }
578 
579  Divergence(const InGridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
580  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
581  {
582  }
583 
584  typename OutGridType::Ptr process(bool threaded = true)
585  {
586  if (mInputGrid.getGridClass() == GRID_STAGGERED) {
587  Functor<math::FD_1ST> functor(mInputGrid, mMask, threaded, mInterrupt);
588  processTypedMap(mInputGrid.transform(), functor);
589  return functor.mOutputGrid;
590  } else {
591  Functor<math::CD_2ND> functor(mInputGrid, mMask, threaded, mInterrupt);
592  processTypedMap(mInputGrid.transform(), functor);
593  return functor.mOutputGrid;
594  }
595  }
596 
597 protected:
598  template<math::DScheme DiffScheme>
599  struct Functor
600  {
601  Functor(const InGridT& grid, const MaskGridType* mask,
602  bool threaded, InterruptT* interrupt):
603  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
604 
605  template<typename MapT>
606  void operator()(const MapT& map)
607  {
610  op(mInputGrid, mMask, map, mInterrupt);
611  mOutputGrid = op.process(mThreaded); // cache the result
612  }
613 
614  const bool mThreaded;
615  const InGridType& mInputGrid;
616  typename OutGridType::Ptr mOutputGrid;
617  InterruptT* mInterrupt;
618  const MaskGridType* mMask;
619  }; // Private Functor
620 
621  const InGridType& mInputGrid;
622  InterruptT* mInterrupt;
623  const MaskGridType* mMask;
624 }; // end of Divergence class
625 
626 
627 ////////////////////////////////////////
628 
629 
630 /// @brief Compute the gradient of a scalar grid.
631 template<
632  typename InGridT,
633  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
634  typename InterruptT = util::NullInterrupter>
635 class Gradient
636 {
637 public:
638  typedef InGridT InGridType;
640 
641  Gradient(const InGridT& grid, InterruptT* interrupt = nullptr):
642  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
643  {
644  }
645 
646  Gradient(const InGridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
647  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
648  {
649  }
650 
651  typename OutGridType::Ptr process(bool threaded = true)
652  {
653  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
654  processTypedMap(mInputGrid.transform(), functor);
655  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
656  return functor.mOutputGrid;
657  }
658 
659 protected:
660  struct Functor
661  {
662  Functor(const InGridT& grid, const MaskGridType* mask,
663  bool threaded, InterruptT* interrupt):
664  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
665 
666  template<typename MapT>
667  void operator()(const MapT& map)
668  {
671  op(mInputGrid, mMask, map, mInterrupt);
672  mOutputGrid = op.process(mThreaded); // cache the result
673  }
674 
675  const bool mThreaded;
676  const InGridT& mInputGrid;
677  typename OutGridType::Ptr mOutputGrid;
678  InterruptT* mInterrupt;
679  const MaskGridType* mMask;
680  }; // Private Functor
681 
682  const InGridT& mInputGrid;
683  InterruptT* mInterrupt;
684  const MaskGridType* mMask;
685 }; // end of Gradient class
686 
687 
688 ////////////////////////////////////////
689 
690 
691 template<
692  typename GridT,
693  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
694  typename InterruptT = util::NullInterrupter>
696 {
697 public:
698  typedef GridT InGridType;
699  typedef GridT OutGridType;
700 
701  Laplacian(const GridT& grid, InterruptT* interrupt = nullptr):
702  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
703  {
704  }
705 
706  Laplacian(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
707  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
708  {
709  }
710 
711  typename GridT::Ptr process(bool threaded = true)
712  {
713  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
714  processTypedMap(mInputGrid.transform(), functor);
715  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
716  return functor.mOutputGrid;
717  }
718 
719 protected:
720  struct Functor
721  {
722  Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
723  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
724 
725  template<typename MapT>
726  void operator()(const MapT& map)
727  {
730  op(mInputGrid, mMask, map, mInterrupt);
731  mOutputGrid = op.process(mThreaded); // cache the result
732  }
733 
734  const bool mThreaded;
735  const GridT& mInputGrid;
736  typename GridT::Ptr mOutputGrid;
737  InterruptT* mInterrupt;
738  const MaskGridType* mMask;
739  }; // Private Functor
740 
741  const GridT& mInputGrid;
742  InterruptT* mInterrupt;
743  const MaskGridType* mMask;
744 }; // end of Laplacian class
745 
746 
747 ////////////////////////////////////////
748 
749 
750 template<
751  typename GridT,
752  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
753  typename InterruptT = util::NullInterrupter>
755 {
756 public:
757  typedef GridT InGridType;
758  typedef GridT OutGridType;
759 
760  MeanCurvature(const GridT& grid, InterruptT* interrupt = nullptr):
761  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
762  {
763  }
764 
765  MeanCurvature(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
766  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
767  {
768  }
769 
770  typename GridT::Ptr process(bool threaded = true)
771  {
772  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
773  processTypedMap(mInputGrid.transform(), functor);
774  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
775  return functor.mOutputGrid;
776  }
777 
778 protected:
779  struct Functor
780  {
781  Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
782  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
783 
784  template<typename MapT>
785  void operator()(const MapT& map)
786  {
789  op(mInputGrid, mMask, map, mInterrupt);
790  mOutputGrid = op.process(mThreaded); // cache the result
791  }
792 
793  const bool mThreaded;
794  const GridT& mInputGrid;
795  typename GridT::Ptr mOutputGrid;
796  InterruptT* mInterrupt;
797  const MaskGridType* mMask;
798  }; // Private Functor
799 
800  const GridT& mInputGrid;
801  InterruptT* mInterrupt;
802  const MaskGridType* mMask;
803 }; // end of MeanCurvature class
804 
805 
806 ////////////////////////////////////////
807 
808 
809 template<
810  typename InGridT,
811  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
812  typename InterruptT = util::NullInterrupter>
814 {
815 public:
816  typedef InGridT InGridType;
818 
819  Magnitude(const InGridType& grid, InterruptT* interrupt = nullptr):
820  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
821  {
822  }
823 
824  Magnitude(const InGridType& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
825  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
826  {
827  }
828 
829  typename OutGridType::Ptr process(bool threaded = true)
830  {
831  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
832  processTypedMap(mInputGrid.transform(), functor);
833  return functor.mOutputGrid;
834  }
835 
836 protected:
837  struct OpT
838  {
839  template<typename MapT, typename AccT>
840  static typename OutGridType::ValueType
841  result(const MapT&, const AccT& acc, const Coord& xyz) { return acc.getValue(xyz).length();}
842  };
843  struct Functor
844  {
845  Functor(const InGridT& grid, const MaskGridType* mask,
846  bool threaded, InterruptT* interrupt):
847  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
848 
849  template<typename MapT>
850  void operator()(const MapT& map)
851  {
853  op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
854  mOutputGrid = op.process(mThreaded); // cache the result
855  }
856 
857  const bool mThreaded;
858  const InGridType& mInputGrid;
859  typename OutGridType::Ptr mOutputGrid;
860  InterruptT* mInterrupt;
861  const MaskGridType* mMask;
862  }; // Private Functor
863 
864  const InGridType& mInputGrid;
865  InterruptT* mInterrupt;
866  const MaskGridType* mMask;
867 }; // end of Magnitude class
868 
869 
870 ////////////////////////////////////////
871 
872 
873 template<
874  typename GridT,
875  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
876  typename InterruptT = util::NullInterrupter>
878 {
879 public:
880  typedef GridT InGridType;
881  typedef GridT OutGridType;
882 
883  Normalize(const GridT& grid, InterruptT* interrupt = nullptr):
884  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
885  {
886  }
887 
888  Normalize(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
889  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
890  {
891  }
892 
893  typename GridT::Ptr process(bool threaded = true)
894  {
895  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
896  processTypedMap(mInputGrid.transform(), functor);
897  if (typename GridT::Ptr outGrid = functor.mOutputGrid) {
898  const VecType vecType = mInputGrid.getVectorType();
899  if (vecType == VEC_COVARIANT) {
900  outGrid->setVectorType(VEC_COVARIANT_NORMALIZE);
901  } else {
902  outGrid->setVectorType(vecType);
903  }
904  }
905  return functor.mOutputGrid;
906  }
907 
908 protected:
909  struct OpT
910  {
911  template<typename MapT, typename AccT>
912  static typename OutGridType::ValueType
913  result(const MapT&, const AccT& acc, const Coord& xyz)
914  {
915  typename OutGridType::ValueType vec = acc.getValue(xyz);
916  if ( !vec.normalize() ) vec.setZero();
917  return vec;
918  }
919  };
920  struct Functor
921  {
922  Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
923  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
924 
925  template<typename MapT>
926  void operator()(const MapT& map)
927  {
929  op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
930  mOutputGrid = op.process(mThreaded); // cache the result
931  }
932 
933  const bool mThreaded;
934  const GridT& mInputGrid;
935  typename GridT::Ptr mOutputGrid;
936  InterruptT* mInterrupt;
937  const MaskGridType* mMask;
938  }; // Private Functor
939 
940  const GridT& mInputGrid;
941  InterruptT* mInterrupt;
942  const MaskGridType* mMask;
943 }; // end of Normalize class
944 
945 
946 ////////////////////////////////////////
947 
948 
949 template<typename GridType, typename InterruptT>
951 cpt(const GridType& grid, bool threaded, InterruptT* interrupt)
952 {
953  Cpt<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt);
954  return op.process(threaded);
955 }
956 
957 template<typename GridType, typename MaskT, typename InterruptT>
959 cpt(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
960 {
961  Cpt<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
962  return op.process(threaded);
963 }
964 
965 template<typename GridType, typename InterruptT>
966 typename GridType::Ptr
967 curl(const GridType& grid, bool threaded, InterruptT* interrupt)
968 {
969  Curl<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt);
970  return op.process(threaded);
971 }
972 
973 template<typename GridType, typename MaskT, typename InterruptT>
974 typename GridType::Ptr
975 curl(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
976 {
977  Curl<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
978  return op.process(threaded);
979 }
980 
981 template<typename GridType, typename InterruptT>
983 divergence(const GridType& grid, bool threaded, InterruptT* interrupt)
984 {
986  op(grid, interrupt);
987  return op.process(threaded);
988 }
989 
990 template<typename GridType, typename MaskT, typename InterruptT>
992 divergence(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
993 {
994  Divergence<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
995  return op.process(threaded);
996 }
997 
998 template<typename GridType, typename InterruptT>
1000 gradient(const GridType& grid, bool threaded, InterruptT* interrupt)
1001 {
1003  op(grid, interrupt);
1004  return op.process(threaded);
1005 }
1006 
1007 template<typename GridType, typename MaskT, typename InterruptT>
1009 gradient(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1010 {
1011  Gradient<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1012  return op.process(threaded);
1013 }
1014 
1015 template<typename GridType, typename InterruptT>
1016 typename GridType::Ptr
1017 laplacian(const GridType& grid, bool threaded, InterruptT* interrupt)
1018 {
1020  op(grid, interrupt);
1021  return op.process(threaded);
1022 }
1023 
1024 template<typename GridType, typename MaskT, typename InterruptT>
1025 typename GridType::Ptr
1026 laplacian(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1027 {
1028  Laplacian<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1029  return op.process(threaded);
1030 }
1031 
1032 template<typename GridType, typename InterruptT>
1033 typename GridType::Ptr
1034 meanCurvature(const GridType& grid, bool threaded, InterruptT* interrupt)
1035 {
1037  op(grid, interrupt);
1038  return op.process(threaded);
1039 }
1040 
1041 template<typename GridType, typename MaskT, typename InterruptT>
1042 typename GridType::Ptr
1043 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1044 {
1045  MeanCurvature<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1046  return op.process(threaded);
1047 }
1048 
1049 template<typename GridType, typename InterruptT>
1051 magnitude(const GridType& grid, bool threaded, InterruptT* interrupt)
1052 {
1054  op(grid, interrupt);
1055  return op.process(threaded);
1056 }
1057 
1058 template<typename GridType, typename MaskT, typename InterruptT>
1060 magnitude(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1061 {
1062  Magnitude<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1063  return op.process(threaded);
1064 }
1065 
1066 template<typename GridType, typename InterruptT>
1067 typename GridType::Ptr
1068 normalize(const GridType& grid, bool threaded, InterruptT* interrupt)
1069 {
1071  op(grid, interrupt);
1072  return op.process(threaded);
1073 }
1074 
1075 template<typename GridType, typename MaskT, typename InterruptT>
1076 typename GridType::Ptr
1077 normalize(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1078 {
1079  Normalize<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1080  return op.process(threaded);
1081 }
1082 
1083 ////////////////////////////////////////
1084 
1085 
1086 // Explicit Template Instantiation
1087 
1088 #ifdef OPENVDB_USE_EXPLICIT_INSTANTIATION
1089 
1090 #ifdef OPENVDB_INSTANTIATE_GRIDOPERATORS
1092 #endif
1093 
1094 #define _FUNCTION(TreeT) \
1095  ScalarToVectorConverter<Grid<TreeT>>::Type::Ptr cpt(const Grid<TreeT>&, bool, util::NullInterrupter*)
1097 #undef _FUNCTION
1098 
1099 #define _FUNCTION(TreeT) \
1100  ScalarToVectorConverter<Grid<TreeT>>::Type::Ptr cpt(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1102 #undef _FUNCTION
1103 
1104 #define _FUNCTION(TreeT) \
1105  Grid<TreeT>::Ptr curl(const Grid<TreeT>&, bool, util::NullInterrupter*)
1107 #undef _FUNCTION
1108 
1109 #define _FUNCTION(TreeT) \
1110  Grid<TreeT>::Ptr curl(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1112 #undef _FUNCTION
1113 
1114 #define _FUNCTION(TreeT) \
1115  VectorToScalarConverter<Grid<TreeT>>::Type::Ptr divergence(const Grid<TreeT>&, bool, util::NullInterrupter*)
1117 #undef _FUNCTION
1118 
1119 #define _FUNCTION(TreeT) \
1120  VectorToScalarConverter<Grid<TreeT>>::Type::Ptr divergence(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1122 #undef _FUNCTION
1123 
1124 #define _FUNCTION(TreeT) \
1125  ScalarToVectorConverter<Grid<TreeT>>::Type::Ptr gradient(const Grid<TreeT>&, bool, util::NullInterrupter*)
1127 #undef _FUNCTION
1128 
1129 #define _FUNCTION(TreeT) \
1130  ScalarToVectorConverter<Grid<TreeT>>::Type::Ptr gradient(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1132 #undef _FUNCTION
1133 
1134 #define _FUNCTION(TreeT) \
1135  Grid<TreeT>::Ptr laplacian(const Grid<TreeT>&, bool, util::NullInterrupter*)
1137 #undef _FUNCTION
1138 
1139 #define _FUNCTION(TreeT) \
1140  Grid<TreeT>::Ptr laplacian(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1142 #undef _FUNCTION
1143 
1144 #define _FUNCTION(TreeT) \
1145  Grid<TreeT>::Ptr meanCurvature(const Grid<TreeT>&, bool, util::NullInterrupter*)
1147 #undef _FUNCTION
1148 
1149 #define _FUNCTION(TreeT) \
1150  Grid<TreeT>::Ptr meanCurvature(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1152 #undef _FUNCTION
1153 
1154 #define _FUNCTION(TreeT) \
1155  VectorToScalarConverter<Grid<TreeT>>::Type::Ptr magnitude(const Grid<TreeT>&, bool, util::NullInterrupter*)
1157 #undef _FUNCTION
1158 
1159 #define _FUNCTION(TreeT) \
1160  VectorToScalarConverter<Grid<TreeT>>::Type::Ptr magnitude(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1162 #undef _FUNCTION
1163 
1164 #define _FUNCTION(TreeT) \
1165  Grid<TreeT>::Ptr normalize(const Grid<TreeT>&, bool, util::NullInterrupter*)
1167 #undef _FUNCTION
1168 
1169 #define _FUNCTION(TreeT) \
1170  Grid<TreeT>::Ptr normalize(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1172 #undef _FUNCTION
1173 
1174 #endif // OPENVDB_USE_EXPLICIT_INSTANTIATION
1175 
1176 
1177 } // namespace tools
1178 } // namespace OPENVDB_VERSION_NAME
1179 } // namespace openvdb
1180 
1181 #endif // OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
ScalarGridType::template ValueConverter< VectorValueT >::Type Type
Definition: GridOperators.h:43
bool wasInterrupted(T *i, int percent=-1)
Definition: NullInterrupter.h:49
void operator()(const MapT &map)
Definition: GridOperators.h:850
ToMaskGrid<T>::Type is the type of a grid having the same tree hierarchy as grid type T but a value e...
Definition: GridOperators.h:284
InterruptT * mInterrupt
Definition: GridOperators.h:742
InterruptT * mInterrupt
Definition: GridOperators.h:683
static OutGridType::ValueType result(const MapT &, const AccT &acc, const Coord &xyz)
Definition: GridOperators.h:913
const bool mThreaded
Definition: GridOperators.h:933
Definition: GridOperators.h:779
GridType::Ptr normalize(const GridType &grid, const MaskT &mask, bool threaded=true)
Definition: GridOperators.h:269
const MaskGridType * mMask
Definition: GridOperators.h:743
GridT InGridType
Definition: GridOperators.h:698
const bool mThreaded
Definition: GridOperators.h:675
const GridT & mInputGrid
Definition: GridOperators.h:940
Compute the divergence of a vector-valued grid using differencing of various orders, the result defined with respect to the range-space of the map.
Definition: Operators.h:949
Iterator begin() const
Definition: LeafManager.h:155
LeafRange leafRange(size_t grainsize=1) const
Return a TBB-compatible LeafRange.
Definition: LeafManager.h:345
const InGridT & mInputGrid
Definition: GridOperators.h:682
void operator()(const typename LeafManagerT::LeafRange &range) const
Iterate sequentially over LeafNodes and voxels in the output grid and apply the operator using a valu...
Definition: GridOperators.h:386
ScalarToVectorConverter< InGridT >::Type OutGridType
Definition: GridOperators.h:639
InGridT InGridType
Definition: GridOperators.h:816
InterruptT * mInterrupt
Definition: GridOperators.h:617
Compute the mean curvature.
Definition: Operators.h:1761
const MaskGridType * mMask
Definition: GridOperators.h:738
GridT::Ptr process(bool threaded=true)
Definition: GridOperators.h:523
const MaskGridType * mMask
Definition: GridOperators.h:866
Grid< typename GridType::TreeType::template ValueConverter< ValueMask >::Type > Type
Definition: GridOperators.h:285
Base class for interrupters.
Definition: NullInterrupter.h:25
GridT OutGridType
Definition: GridOperators.h:511
#define OPENVDB_NUMERIC_TREE_INSTANTIATE(Function)
Definition: version.h.in:148
VectorToScalarConverter<VectorGridType>::Type is the type of a grid having the same tree configuratio...
Definition: GridOperators.h:32
SharedPtr< Transform > Ptr
Definition: Transform.h:42
const MaskGridType * mMask
Definition: GridOperators.h:618
OutGridT::TreeType OutTreeT
Definition: GridOperators.h:304
const MaskGridType * mMask
Definition: GridOperators.h:679
Curl(const GridT &grid, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:513
void operator()(const MapT &map)
Definition: GridOperators.h:926
Curl(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:518
Compute the closest-point transform of a scalar grid.
Definition: GridOperators.h:419
const MaskGridType * mMask
Definition: GridOperators.h:797
OutGridType::Ptr process(bool threaded=true)
Definition: GridOperators.h:584
Definition: GridOperators.h:660
InterruptT * mInterrupt
Definition: GridOperators.h:678
InterruptT * mInterrupt
Definition: GridOperators.h:801
InGridT::ConstAccessor AccessorT
Definition: GridOperators.h:400
math::Vec3< typename ScalarGridType::ValueType > VectorValueT
Definition: GridOperators.h:42
Divergence(const InGridT &grid, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:574
const MaskGridType * mMask
Definition: GridOperators.h:684
Gradient(const InGridT &grid, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:641
Definition: GridOperators.h:695
Gradient(const InGridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:646
const GridT & mInputGrid
Definition: GridOperators.h:794
Definition: Transform.h:39
void foreach(const IterT &iter, XformOp &op, bool threaded=true, bool shareOp=true)
Definition: ValueTransformer.h:382
void operator()(const MapT &map)
Definition: GridOperators.h:606
static OutGridType::ValueType result(const MapT &, const AccT &acc, const Coord &xyz)
Definition: GridOperators.h:841
GridT OutGridType
Definition: GridOperators.h:881
OutGridType::Ptr mOutputGrid
Definition: GridOperators.h:859
GridT::Ptr mOutputGrid
Definition: GridOperators.h:736
VectorToScalarConverter< GridType >::Type::Ptr magnitude(const GridType &grid, const MaskT &mask, bool threaded=true)
Definition: GridOperators.h:241
OutGridType::Ptr mOutputGrid
Definition: GridOperators.h:677
OutGridType::Ptr mOutputGrid
Definition: GridOperators.h:616
Signed (x, y, z) 32-bit integer coordinates.
Definition: Coord.h:24
void operator()(const MapT &map)
Definition: GridOperators.h:785
OutGridT::Ptr process(bool threaded=true)
Definition: GridOperators.h:321
const MaskGridType * mMask
Definition: GridOperators.h:623
GridT InGridType
Definition: GridOperators.h:510
Tag dispatch class that distinguishes topology copy constructors from deep copy constructors.
Definition: Types.h:644
Functor(const InGridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
Definition: GridOperators.h:601
OutGridType::Ptr process(bool threaded=true)
Definition: GridOperators.h:829
InterruptT * mInterrupt
Definition: GridOperators.h:796
Functor(const GridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
Definition: GridOperators.h:781
InterruptT * mInterrupt
Definition: GridOperators.h:860
const MaskGridType * mMask
Definition: GridOperators.h:861
Definition: LeafManager.h:101
Cpt(const InGridType &grid, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:425
const MaskGridType * mMask
Definition: GridOperators.h:937
Compute the divergence of a vector grid.
Definition: GridOperators.h:568
Magnitude(const InGridType &grid, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:819
Functor(const GridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
Definition: GridOperators.h:722
GridT::Ptr mOutputGrid
Definition: GridOperators.h:795
const bool mThreaded
Definition: GridOperators.h:614
OutGridType::Ptr process(bool threaded=true, bool useWorldTransform=true)
Definition: GridOperators.h:435
Compute the gradient of a scalar grid.
Definition: GridOperators.h:635
Cpt(const InGridType &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:430
const bool mDensify
Definition: GridOperators.h:405
const GridT & mInputGrid
Definition: GridOperators.h:735
GridT InGridType
Definition: GridOperators.h:757
InterruptT * mInterrupt
Definition: GridOperators.h:865
ScalarToVectorConverter< InGridT >::Type OutGridType
Definition: GridOperators.h:423
VectorGridType::ValueType::value_type VecComponentValueT
Definition: GridOperators.h:33
GridT OutGridType
Definition: GridOperators.h:699
const MaskGridType * mMask
Definition: GridOperators.h:802
InterruptT * mInterrupt
Definition: GridOperators.h:403
Apply an operator to an input grid to produce an output grid with the same active voxel topology but ...
Definition: GridOperators.h:301
Definition: GridOperators.h:877
VectorToScalarConverter< InGridT >::Type OutGridType
Definition: GridOperators.h:572
Definition: Exceptions.h:13
OutGridType::Ptr process(bool threaded=true)
Definition: GridOperators.h:651
const InGridType & mInputGrid
Definition: GridOperators.h:615
InterruptT * mInterrupt
Definition: GridOperators.h:622
Normalize(const GridT &grid, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:883
Compute the closest-point transform to a level set.
Definition: Operators.h:1718
ValueT value
Definition: GridBuilder.h:1287
InterruptT * mInterrupt
Definition: GridOperators.h:941
Center difference gradient operators, defined with respect to the range-space of the map...
Definition: Operators.h:619
GridT::Ptr process(bool threaded=true)
Definition: GridOperators.h:893
GridT InGridType
Definition: GridOperators.h:880
Compute the Laplacian at a given location in a grid using finite differencing of various orders...
Definition: Operators.h:1419
Definition: GridOperators.h:909
Definition: GridOperators.h:920
This class manages a linear array of pointers to a given tree&#39;s leaf nodes, as well as optional auxil...
Definition: LeafManager.h:84
const MapT & mMap
Definition: GridOperators.h:402
Compute the closest-point transform to a level set.
Definition: Operators.h:1665
const bool mThreaded
Definition: GridOperators.h:793
const GridT & mInputGrid
Definition: GridOperators.h:800
Container class that associates a tree with a transform and metadata.
Definition: Grid.h:28
Definition: Mat.h:187
GridT::Ptr process(bool threaded=true)
Definition: GridOperators.h:770
Compute the curl of a vector-valued grid using differencing of various orders in the space defined by...
Definition: Operators.h:1274
MeanCurvature(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:765
GridType
List of types that are currently supported by NanoVDB.
Definition: NanoVDB.h:216
GridType::Ptr meanCurvature(const GridType &grid, const MaskT &mask, bool threaded=true)
Definition: GridOperators.h:212
Laplacian(const GridT &grid, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:701
VecType
Definition: Types.h:444
Definition: GridOperators.h:813
const InGridType & mInputGrid
Definition: GridOperators.h:621
MeanCurvature(const GridT &grid, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:760
const InGridType & mInputGrid
Definition: GridOperators.h:858
GridT::Ptr mOutputGrid
Definition: GridOperators.h:935
Definition: GridOperators.h:837
Definition: GridOperators.h:754
ScalarToVectorConverter<ScalarGridType>::Type is the type of a grid having the same tree configuratio...
Definition: GridOperators.h:41
ScalarToVectorConverter< GridType >::Type::Ptr gradient(const GridType &grid, const MaskT &mask, bool threaded=true)
Definition: GridOperators.h:156
const InGridT & mInputGrid
Definition: GridOperators.h:676
ScalarToVectorConverter< GridType >::Type::Ptr cpt(const GridType &grid, const MaskT &mask, bool threaded=true)
Definition: GridOperators.h:70
VectorGridType::template ValueConverter< VecComponentValueT >::Type Type
Definition: GridOperators.h:34
GridOperator(const InGridT &grid, const MaskGridType *mask, const MapT &map, InterruptT *interrupt=nullptr, bool densify=true)
Definition: GridOperators.h:308
const bool mThreaded
Definition: GridOperators.h:857
const bool mThreaded
Definition: GridOperators.h:734
const MaskGridType * mMask
Definition: GridOperators.h:942
Definition: GridOperators.h:599
void operator()(const MapT &map)
Definition: GridOperators.h:726
bool processTypedMap(TransformType &transform, OpType &op)
Utility function that, given a generic map pointer, calls a functor on the fully-resoved map...
Definition: Transform.h:233
Definition: Types.h:446
InGridT InGridType
Definition: GridOperators.h:571
#define OPENVDB_REAL_TREE_INSTANTIATE(Function)
Definition: version.h.in:147
const InGridType & mInputGrid
Definition: GridOperators.h:864
Functor(const InGridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
Definition: GridOperators.h:845
Divergence(const InGridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:579
GridT::Ptr process(bool threaded=true)
Definition: GridOperators.h:711
const MaskGridType * mMask
Definition: GridOperators.h:404
Definition: Types.h:418
InterruptT * mInterrupt
Definition: GridOperators.h:737
InterruptT * mInterrupt
Definition: GridOperators.h:936
A LeafManager manages a linear array of pointers to a given tree&#39;s leaf nodes, as well as optional au...
OutTreeT::LeafNodeType OutLeafT
Definition: GridOperators.h:305
VectorToScalarConverter< InGridT >::Type OutGridType
Definition: GridOperators.h:817
const GridT & mInputGrid
Definition: GridOperators.h:934
VectorToScalarConverter< GridType >::Type::Ptr divergence(const GridType &grid, const MaskT &mask, bool threaded=true)
Definition: GridOperators.h:127
#define OPENVDB_VERSION_NAME
The version namespace name for this library version.
Definition: version.h.in:116
GridT OutGridType
Definition: GridOperators.h:758
AccessorT mAcc
Definition: GridOperators.h:401
Definition: GridOperators.h:843
Normalize(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:888
Functor(const GridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
Definition: GridOperators.h:922
#define OPENVDB_VEC3_TREE_INSTANTIATE(Function)
Definition: version.h.in:149
Laplacian(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:706
const GridT & mInputGrid
Definition: GridOperators.h:741
Functor(const InGridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
Definition: GridOperators.h:662
InGridT InGridType
Definition: GridOperators.h:638
InGridT InGridType
Definition: GridOperators.h:422
GridType::Ptr curl(const GridType &grid, const MaskT &mask, bool threaded=true)
Definition: GridOperators.h:98
tree::LeafManager< OutTreeT > LeafManagerT
Definition: GridOperators.h:306
Magnitude(const InGridType &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:824
void operator()(const MapT &map)
Definition: GridOperators.h:667
#define OPENVDB_USE_VERSION_NAMESPACE
Definition: version.h.in:202
GridType::Ptr laplacian(const GridType &grid, const MaskT &mask, bool threaded=true)
Definition: GridOperators.h:184
Compute the curl of a vector grid.
Definition: GridOperators.h:507
Definition: GridOperators.h:720