Point Cloud Library (PCL)  1.15.1-dev
texture_mapping.hpp
1 /*
2  * Software License Agreement (BSD License)
3  *
4  * Copyright (c) 2010, Willow Garage, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above
14  * copyright notice, this list of conditions and the following
15  * disclaimer in the documentation and/or other materials provided
16  * with the distribution.
17  * * Neither the name of Willow Garage, Inc. nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $Id$
35  *
36  */
37 
38 #ifndef PCL_SURFACE_IMPL_TEXTURE_MAPPING_HPP_
39 #define PCL_SURFACE_IMPL_TEXTURE_MAPPING_HPP_
40 
41 #include <pcl/common/distances.h>
42 #include <pcl/kdtree/kdtree_flann.h>
43 #include <pcl/surface/texture_mapping.h>
44 #include <unordered_set>
45 
46 ///////////////////////////////////////////////////////////////////////////////////////////////
47 template<typename PointInT> std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> >
49  const Eigen::Vector3f &p1,
50  const Eigen::Vector3f &p2,
51  const Eigen::Vector3f &p3)
52 {
53  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > tex_coordinates;
54  // process for each face
55  Eigen::Vector3f p1p2 (p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2]);
56  Eigen::Vector3f p1p3 (p3[0] - p1[0], p3[1] - p1[1], p3[2] - p1[2]);
57  Eigen::Vector3f p2p3 (p3[0] - p2[0], p3[1] - p2[1], p3[2] - p2[2]);
58 
59  // Normalize
60  p1p2 /= std::sqrt (p1p2.dot (p1p2));
61  p1p3 /= std::sqrt (p1p3.dot (p1p3));
62  p2p3 /= std::sqrt (p2p3.dot (p2p3));
63 
64  // compute vector normal of a face
65  Eigen::Vector3f f_normal = p1p2.cross (p1p3);
66  f_normal /= std::sqrt (f_normal.dot (f_normal));
67 
68  // project vector field onto the face: vector v1_projected = v1 - Dot(v1, n) * n;
69  Eigen::Vector3f f_vector_field = vector_field_ - vector_field_.dot (f_normal) * f_normal;
70 
71  // Normalize
72  f_vector_field /= std::sqrt (f_vector_field.dot (f_vector_field));
73 
74  // texture coordinates
75  Eigen::Vector2f tp1, tp2, tp3;
76 
77  double alpha = std::acos (f_vector_field.dot (p1p2));
78 
79  // distance between 3 vertices of triangles
80  double e1 = (p2 - p3).norm () / f_;
81  double e2 = (p1 - p3).norm () / f_;
82  double e3 = (p1 - p2).norm () / f_;
83 
84  // initialize
85  tp1[0] = 0.0;
86  tp1[1] = 0.0;
87 
88  tp2[0] = static_cast<float> (e3);
89  tp2[1] = 0.0;
90 
91  // determine texture coordinate tp3;
92  double cos_p1 = (e2 * e2 + e3 * e3 - e1 * e1) / (2 * e2 * e3);
93  double sin_p1 = sqrt (1 - (cos_p1 * cos_p1));
94 
95  tp3[0] = static_cast<float> (cos_p1 * e2);
96  tp3[1] = static_cast<float> (sin_p1 * e2);
97 
98  // rotating by alpha (angle between V and pp1 & pp2)
99  Eigen::Vector2f r_tp2, r_tp3;
100  r_tp2[0] = static_cast<float> (tp2[0] * std::cos (alpha) - tp2[1] * std::sin (alpha));
101  r_tp2[1] = static_cast<float> (tp2[0] * std::sin (alpha) + tp2[1] * std::cos (alpha));
102 
103  r_tp3[0] = static_cast<float> (tp3[0] * std::cos (alpha) - tp3[1] * std::sin (alpha));
104  r_tp3[1] = static_cast<float> (tp3[0] * std::sin (alpha) + tp3[1] * std::cos (alpha));
105 
106  // shifting
107  tp1[0] = tp1[0];
108  tp2[0] = r_tp2[0];
109  tp3[0] = r_tp3[0];
110  tp1[1] = tp1[1];
111  tp2[1] = r_tp2[1];
112  tp3[1] = r_tp3[1];
113 
114  float min_x = tp1[0];
115  float min_y = tp1[1];
116  if (min_x > tp2[0])
117  min_x = tp2[0];
118  if (min_x > tp3[0])
119  min_x = tp3[0];
120  if (min_y > tp2[1])
121  min_y = tp2[1];
122  if (min_y > tp3[1])
123  min_y = tp3[1];
124 
125  if (min_x < 0)
126  {
127  tp1[0] -= min_x;
128  tp2[0] -= min_x;
129  tp3[0] -= min_x;
130  }
131  if (min_y < 0)
132  {
133  tp1[1] -= min_y;
134  tp2[1] -= min_y;
135  tp3[1] -= min_y;
136  }
137 
138  tex_coordinates.push_back (tp1);
139  tex_coordinates.push_back (tp2);
140  tex_coordinates.push_back (tp3);
141  return (tex_coordinates);
142 }
143 
144 ///////////////////////////////////////////////////////////////////////////////////////////////
145 template<typename PointInT> void
147 {
148  // mesh information
149  int nr_points = tex_mesh.cloud.width * tex_mesh.cloud.height;
150  int point_size = static_cast<int> (tex_mesh.cloud.data.size ()) / nr_points;
151 
152  // temporary PointXYZ
153  float x, y, z;
154  // temporary face
155  Eigen::Vector3f facet[3];
156 
157  // texture coordinates for each mesh
158  std::vector<std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > >texture_map;
159 
160  for (std::size_t m = 0; m < tex_mesh.tex_polygons.size (); ++m)
161  {
162  // texture coordinates for each mesh
163  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > texture_map_tmp;
164 
165  // processing for each face
166  for (std::size_t i = 0; i < tex_mesh.tex_polygons[m].size (); ++i)
167  {
168  // get facet information
169  for (std::size_t j = 0; j < tex_mesh.tex_polygons[m][i].vertices.size (); ++j)
170  {
171  std::size_t idx = tex_mesh.tex_polygons[m][i].vertices[j];
172  memcpy (&x, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[0].offset], sizeof(float));
173  memcpy (&y, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[1].offset], sizeof(float));
174  memcpy (&z, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[2].offset], sizeof(float));
175  facet[j][0] = x;
176  facet[j][1] = y;
177  facet[j][2] = z;
178  }
179 
180  // get texture coordinates of each face
181  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > tex_coordinates = mapTexture2Face (facet[0], facet[1], facet[2]);
182  for (const auto &tex_coordinate : tex_coordinates)
183  texture_map_tmp.push_back (tex_coordinate);
184  }// end faces
185 
186  // texture materials
187  tex_material_.tex_name = "material_" + std::to_string(m);
188  tex_material_.tex_file = tex_files_[m];
189  tex_mesh.tex_materials.push_back (tex_material_);
190 
191  // texture coordinates
192  tex_mesh.tex_coordinates.push_back (texture_map_tmp);
193  }// end meshes
194 }
195 
196 ///////////////////////////////////////////////////////////////////////////////////////////////
197 template<typename PointInT> void
199 {
200  // mesh information
201  int nr_points = tex_mesh.cloud.width * tex_mesh.cloud.height;
202  int point_size = static_cast<int> (tex_mesh.cloud.data.size ()) / nr_points;
203 
204  float x_lowest = 100000;
205  float x_highest = 0;
206  float y_lowest = 100000;
207  //float y_highest = 0 ;
208  float z_lowest = 100000;
209  float z_highest = 0;
210  float x_, y_, z_;
211 
212  for (int i = 0; i < nr_points; ++i)
213  {
214  memcpy (&x_, &tex_mesh.cloud.data[i * point_size + tex_mesh.cloud.fields[0].offset], sizeof(float));
215  memcpy (&y_, &tex_mesh.cloud.data[i * point_size + tex_mesh.cloud.fields[1].offset], sizeof(float));
216  memcpy (&z_, &tex_mesh.cloud.data[i * point_size + tex_mesh.cloud.fields[2].offset], sizeof(float));
217  // x
218  if (x_ <= x_lowest)
219  x_lowest = x_;
220  if (x_ > x_lowest)
221  x_highest = x_;
222 
223  // y
224  if (y_ <= y_lowest)
225  y_lowest = y_;
226  //if (y_ > y_lowest) y_highest = y_;
227 
228  // z
229  if (z_ <= z_lowest)
230  z_lowest = z_;
231  if (z_ > z_lowest)
232  z_highest = z_;
233  }
234  // x
235  float x_range = (x_lowest - x_highest) * -1;
236  float x_offset = 0 - x_lowest;
237  // x
238  // float y_range = (y_lowest - y_highest)*-1;
239  // float y_offset = 0 - y_lowest;
240  // z
241  float z_range = (z_lowest - z_highest) * -1;
242  float z_offset = 0 - z_lowest;
243 
244  // texture coordinates for each mesh
245  std::vector<std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > >texture_map;
246 
247  for (std::size_t m = 0; m < tex_mesh.tex_polygons.size (); ++m)
248  {
249  // texture coordinates for each mesh
250  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > texture_map_tmp;
251 
252  // processing for each face
253  for (std::size_t i = 0; i < tex_mesh.tex_polygons[m].size (); ++i)
254  {
255  Eigen::Vector2f tmp_VT;
256  for (std::size_t j = 0; j < tex_mesh.tex_polygons[m][i].vertices.size (); ++j)
257  {
258  std::size_t idx = tex_mesh.tex_polygons[m][i].vertices[j];
259  memcpy (&x_, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[0].offset], sizeof(float));
260  memcpy (&y_, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[1].offset], sizeof(float));
261  memcpy (&z_, &tex_mesh.cloud.data[idx * point_size + tex_mesh.cloud.fields[2].offset], sizeof(float));
262 
263  // calculate uv coordinates
264  tmp_VT[0] = (x_ + x_offset) / x_range;
265  tmp_VT[1] = (z_ + z_offset) / z_range;
266  texture_map_tmp.push_back (tmp_VT);
267  }
268  }// end faces
269 
270  // texture materials
271  tex_material_.tex_name = "material_" + std::to_string(m);
272  tex_material_.tex_file = tex_files_[m];
273  tex_mesh.tex_materials.push_back (tex_material_);
274 
275  // texture coordinates
276  tex_mesh.tex_coordinates.push_back (texture_map_tmp);
277  }// end meshes
278 }
279 
280 ///////////////////////////////////////////////////////////////////////////////////////////////
281 template<typename PointInT> void
283 {
284 
285  if (tex_mesh.tex_polygons.size () != cams.size () + 1)
286  {
287  PCL_ERROR ("The mesh should be divided into nbCamera+1 sub-meshes.\n");
288  PCL_ERROR ("You provided %d cameras and a mesh containing %d sub-meshes.\n", cams.size (), tex_mesh.tex_polygons.size ());
289  return;
290  }
291 
292  PCL_INFO ("You provided %d cameras and a mesh containing %d sub-meshes.\n", cams.size (), tex_mesh.tex_polygons.size ());
293 
294  typename pcl::PointCloud<PointInT>::Ptr originalCloud (new pcl::PointCloud<PointInT>);
295  typename pcl::PointCloud<PointInT>::Ptr camera_transformed_cloud (new pcl::PointCloud<PointInT>);
296 
297  // convert mesh's cloud to pcl format for ease
298  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *originalCloud);
299 
300  for (std::size_t m = 0; m < cams.size (); ++m)
301  {
302  // get current camera parameters
303  Camera current_cam = cams[m];
304 
305  // get camera transform
306  Eigen::Affine3f cam_trans = current_cam.pose;
307 
308  // transform cloud into current camera frame
309  pcl::transformPointCloud (*originalCloud, *camera_transformed_cloud, cam_trans.inverse ());
310 
311  // vector of texture coordinates for each face
312  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > texture_map_tmp;
313 
314  // processing each face visible by this camera
315  for (const auto &tex_polygon : tex_mesh.tex_polygons[m])
316  {
317  Eigen::Vector2f tmp_VT;
318  // for each point of this face
319  for (const auto &vertex : tex_polygon.vertices)
320  {
321  // get point
322  PointInT pt = (*camera_transformed_cloud)[vertex];
323 
324  // compute UV coordinates for this point
325  getPointUVCoordinates (pt, current_cam, tmp_VT);
326  texture_map_tmp.push_back (tmp_VT);
327  }// end points
328  }// end faces
329 
330  // texture materials
331  tex_material_.tex_name = "material_" + std::to_string(m);
332  tex_material_.tex_file = current_cam.texture_file;
333  tex_mesh.tex_materials.push_back (tex_material_);
334 
335  // texture coordinates
336  tex_mesh.tex_coordinates.push_back (texture_map_tmp);
337  }// end cameras
338 
339  // push on extra empty UV map (for unseen faces) so that obj writer does not crash!
340  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > texture_map_tmp;
341  for (const auto &tex_polygon : tex_mesh.tex_polygons[cams.size ()])
342  for (std::size_t j = 0; j < tex_polygon.vertices.size (); ++j)
343  {
344  Eigen::Vector2f tmp_VT;
345  tmp_VT[0] = -1;
346  tmp_VT[1] = -1;
347  texture_map_tmp.push_back (tmp_VT);
348  }
349 
350  tex_mesh.tex_coordinates.push_back (texture_map_tmp);
351 
352  // push on an extra dummy material for the same reason
353  tex_material_.tex_name = "material_" + std::to_string(cams.size());
354  tex_material_.tex_file = "occluded.jpg";
355  tex_mesh.tex_materials.push_back (tex_material_);
356 }
357 
358 ///////////////////////////////////////////////////////////////////////////////////////////////
359 template<typename PointInT> bool
361 {
362  Eigen::Vector3f direction;
363  direction (0) = pt.x;
364  direction (1) = pt.y;
365  direction (2) = pt.z;
366 
367  pcl::Indices indices;
368 
369  PointCloudConstPtr cloud (new PointCloud());
370  cloud = octree->getInputCloud();
371 
372  double distance_threshold = octree->getResolution();
373 
374  // raytrace
375  octree->getIntersectedVoxelIndices(direction, -direction, indices);
376 
377  int nbocc = static_cast<int> (indices.size ());
378  for (const auto &index : indices)
379  {
380  // if intersected point is on the over side of the camera
381  if (pt.z * (*cloud)[index].z < 0)
382  {
383  nbocc--;
384  continue;
385  }
386 
387  if (std::fabs ((*cloud)[index].z - pt.z) <= distance_threshold)
388  {
389  // points are very close to each-other, we do not consider the occlusion
390  nbocc--;
391  }
392  }
393 
394  return (nbocc != 0);
395 }
396 
397 ///////////////////////////////////////////////////////////////////////////////////////////////
398 template<typename PointInT> void
400  PointCloudPtr &filtered_cloud,
401  const double octree_voxel_size, pcl::Indices &visible_indices,
402  pcl::Indices &occluded_indices)
403 {
404  // variable used to filter occluded points by depth
405  double maxDeltaZ = octree_voxel_size;
406 
407  // create an octree to perform rayTracing
408  Octree octree (octree_voxel_size);
409  // create octree structure
410  octree.setInputCloud (input_cloud);
411  // update bounding box automatically
412  octree.defineBoundingBox ();
413  // add points in the tree
414  octree.addPointsFromInputCloud ();
415 
416  visible_indices.clear ();
417 
418  // for each point of the cloud, raycast toward camera and check intersected voxels.
419  Eigen::Vector3f direction;
420  pcl::Indices indices;
421  for (std::size_t i = 0; i < input_cloud->size (); ++i)
422  {
423  direction (0) = (*input_cloud)[i].x;
424  direction (1) = (*input_cloud)[i].y;
425  direction (2) = (*input_cloud)[i].z;
426 
427  // if point is not occluded
428  octree.getIntersectedVoxelIndices (direction, -direction, indices);
429 
430  int nbocc = static_cast<int> (indices.size ());
431  for (const auto &index : indices)
432  {
433  // if intersected point is on the over side of the camera
434  if ((*input_cloud)[i].z * (*input_cloud)[index].z < 0)
435  {
436  nbocc--;
437  continue;
438  }
439 
440  if (std::fabs ((*input_cloud)[index].z - (*input_cloud)[i].z) <= maxDeltaZ)
441  {
442  // points are very close to each-other, we do not consider the occlusion
443  nbocc--;
444  }
445  }
446 
447  if (nbocc == 0)
448  {
449  // point is added in the filtered mesh
450  filtered_cloud->points.push_back ((*input_cloud)[i]);
451  visible_indices.push_back (static_cast<pcl::index_t> (i));
452  }
453  else
454  {
455  occluded_indices.push_back (static_cast<pcl::index_t> (i));
456  }
457  }
458 
459 }
460 
461 ///////////////////////////////////////////////////////////////////////////////////////////////
462 template<typename PointInT> void
463 pcl::TextureMapping<PointInT>::removeOccludedPoints (const pcl::TextureMesh &tex_mesh, pcl::TextureMesh &cleaned_mesh, const double octree_voxel_size)
464 {
465  // copy mesh
466  cleaned_mesh = tex_mesh;
467 
469  typename pcl::PointCloud<PointInT>::Ptr filtered_cloud (new pcl::PointCloud<PointInT>);
470 
471  // load points into a PCL format
472  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *cloud);
473 
474  pcl::Indices visible, occluded;
475  removeOccludedPoints (cloud, filtered_cloud, octree_voxel_size, visible, occluded);
476 
477  // Now that we know which points are visible, let's iterate over each face.
478  // if the face has one invisible point => out!
479  for (std::size_t polygons = 0; polygons < cleaned_mesh.tex_polygons.size (); ++polygons)
480  {
481  // remove all faces from cleaned mesh
482  cleaned_mesh.tex_polygons[polygons].clear ();
483  // iterate over faces
484  for (std::size_t faces = 0; faces < tex_mesh.tex_polygons[polygons].size (); ++faces)
485  {
486  // check if all the face's points are visible
487  bool faceIsVisible = true;
488 
489  // iterate over face's vertex
490  for (const auto &vertex : tex_mesh.tex_polygons[polygons][faces].vertices)
491  {
492  if (find (occluded.begin (), occluded.end (), vertex) == occluded.end ())
493  {
494  // point is not in the occluded vector
495  // PCL_INFO (" VISIBLE!\n");
496  }
497  else
498  {
499  // point was occluded
500  // PCL_INFO(" OCCLUDED!\n");
501  faceIsVisible = false;
502  }
503  }
504 
505  if (faceIsVisible)
506  {
507  cleaned_mesh.tex_polygons[polygons].push_back (tex_mesh.tex_polygons[polygons][faces]);
508  }
509 
510  }
511  }
512 }
513 
514 ///////////////////////////////////////////////////////////////////////////////////////////////
515 template<typename PointInT> void
517  const double octree_voxel_size)
518 {
519  PointCloudPtr cloud (new PointCloud);
520 
521  // load points into a PCL format
522  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *cloud);
523 
524  pcl::Indices visible, occluded;
525  removeOccludedPoints (cloud, filtered_cloud, octree_voxel_size, visible, occluded);
526 
527 }
528 
529 ///////////////////////////////////////////////////////////////////////////////////////////////
530 template<typename PointInT> int
532  const pcl::texture_mapping::CameraVector &cameras, const double octree_voxel_size,
533  PointCloud &visible_pts)
534 {
535  if (tex_mesh.tex_polygons.size () != 1)
536  {
537  PCL_ERROR ("The mesh must contain only 1 sub-mesh!\n");
538  return (-1);
539  }
540 
541  if (cameras.empty ())
542  {
543  PCL_ERROR ("Must provide at least one camera info!\n");
544  return (-1);
545  }
546 
547  // copy mesh
548  sorted_mesh = tex_mesh;
549  // clear polygons from cleaned_mesh
550  sorted_mesh.tex_polygons.clear ();
551 
552  typename pcl::PointCloud<PointInT>::Ptr original_cloud (new pcl::PointCloud<PointInT>);
553  typename pcl::PointCloud<PointInT>::Ptr transformed_cloud (new pcl::PointCloud<PointInT>);
554  typename pcl::PointCloud<PointInT>::Ptr filtered_cloud (new pcl::PointCloud<PointInT>);
555 
556  // load points into a PCL format
557  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *original_cloud);
558 
559  // for each camera
560  for (const auto &camera : cameras)
561  {
562  // get camera pose as transform
563  Eigen::Affine3f cam_trans = camera.pose;
564 
565  // transform original cloud in camera coordinates
566  pcl::transformPointCloud (*original_cloud, *transformed_cloud, cam_trans.inverse ());
567 
568  // find occlusions on transformed cloud
569  pcl::Indices visible, occluded;
570  removeOccludedPoints (transformed_cloud, filtered_cloud, octree_voxel_size, visible, occluded);
571  visible_pts = *filtered_cloud;
572 
573  // pushing occluded idxs into a set for faster lookup
574  std::unordered_set<index_t> occluded_set(occluded.cbegin(), occluded.cend());
575 
576  // find visible faces => add them to polygon N for camera N
577  // add polygon group for current camera in clean
578  std::vector<pcl::Vertices> visibleFaces_currentCam;
579  // iterate over the faces of the current mesh
580  for (std::size_t faces = 0; faces < tex_mesh.tex_polygons[0].size (); ++faces)
581  {
582  // check if all the face's points are visible
583  // iterate over face's vertex
584  const auto faceIsVisible = std::all_of(tex_mesh.tex_polygons[0][faces].vertices.cbegin(),
585  tex_mesh.tex_polygons[0][faces].vertices.cend(),
586  [&](const auto& vertex)
587  {
588  if (occluded_set.find(vertex) != occluded_set.cend()) {
589  return false; // point is occluded
590  }
591  // is the point visible to the camera?
592  Eigen::Vector2f dummy_UV;
593  return this->getPointUVCoordinates ((*transformed_cloud)[vertex], camera, dummy_UV);
594  });
595 
596  if (faceIsVisible)
597  {
598  // push current visible face into the sorted mesh
599  visibleFaces_currentCam.push_back (tex_mesh.tex_polygons[0][faces]);
600  // remove it from the unsorted mesh
601  tex_mesh.tex_polygons[0].erase (tex_mesh.tex_polygons[0].begin () + faces);
602  faces--;
603  }
604 
605  }
606  sorted_mesh.tex_polygons.push_back (visibleFaces_currentCam);
607  }
608 
609  // we should only have occluded and non-visible faces left in tex_mesh.tex_polygons[0]
610  // we need to add them as an extra polygon in the sorted mesh
611  sorted_mesh.tex_polygons.push_back (tex_mesh.tex_polygons[0]);
612  return (0);
613 }
614 
615 ///////////////////////////////////////////////////////////////////////////////////////////////
616 template<typename PointInT> void
619  const double octree_voxel_size, const bool show_nb_occlusions,
620  const int max_occlusions)
621  {
622  // variable used to filter occluded points by depth
623  double maxDeltaZ = octree_voxel_size * 2.0;
624 
625  // create an octree to perform rayTracing
626  Octree octree (octree_voxel_size);
627  // create octree structure
628  octree.setInputCloud (input_cloud);
629  // update bounding box automatically
630  octree.defineBoundingBox ();
631  // add points in the tree
632  octree.addPointsFromInputCloud ();
633 
634  // ray direction
635  Eigen::Vector3f direction;
636 
637  pcl::Indices indices;
638  // point from where we ray-trace
639  pcl::PointXYZI pt;
640 
641  std::vector<double> zDist;
642  std::vector<double> ptDist;
643  // for each point of the cloud, ray-trace toward the camera and check intersected voxels.
644  for (const auto& point: *input_cloud)
645  {
646  direction = pt.getVector3fMap() = point.getVector3fMap();
647 
648  // get number of occlusions for that point
649  indices.clear ();
650  int nbocc = octree.getIntersectedVoxelIndices (direction, -direction, indices);
651 
652  nbocc = static_cast<int> (indices.size ());
653 
654  // TODO need to clean this up and find tricks to get remove aliasaing effect on planes
655  for (const auto &index : indices)
656  {
657  // if intersected point is on the over side of the camera
658  if (pt.z * (*input_cloud)[index].z < 0)
659  {
660  nbocc--;
661  }
662  else if (std::fabs ((*input_cloud)[index].z - pt.z) <= maxDeltaZ)
663  {
664  // points are very close to each-other, we do not consider the occlusion
665  nbocc--;
666  }
667  else
668  {
669  zDist.push_back (std::fabs ((*input_cloud)[index].z - pt.z));
670  ptDist.push_back (pcl::euclideanDistance ((*input_cloud)[index], pt));
671  }
672  }
673 
674  if (show_nb_occlusions)
675  (nbocc <= max_occlusions) ? (pt.intensity = static_cast<float> (nbocc)) : (pt.intensity = static_cast<float> (max_occlusions));
676  else
677  (nbocc == 0) ? (pt.intensity = 0) : (pt.intensity = 1);
678 
679  colored_cloud->points.push_back (pt);
680  }
681 
682  if (zDist.size () >= 2)
683  {
684  std::sort (zDist.begin (), zDist.end ());
685  std::sort (ptDist.begin (), ptDist.end ());
686  }
687 }
688 
689 ///////////////////////////////////////////////////////////////////////////////////////////////
690 template<typename PointInT> void
692  double octree_voxel_size, bool show_nb_occlusions, int max_occlusions)
693 {
694  // load points into a PCL format
696  pcl::fromPCLPointCloud2 (tex_mesh.cloud, *cloud);
697 
698  showOcclusions (cloud, colored_cloud, octree_voxel_size, show_nb_occlusions, max_occlusions);
699 }
700 
701 ///////////////////////////////////////////////////////////////////////////////////////////////
702 template<typename PointInT> void
704 {
705 
706  if (mesh.tex_polygons.size () != 1)
707  return;
708 
710 
711  pcl::fromPCLPointCloud2 (mesh.cloud, *mesh_cloud);
712 
713  std::vector<pcl::Vertices> faces;
714 
715  for (int current_cam = 0; current_cam < static_cast<int> (cameras.size ()); ++current_cam)
716  {
717  PCL_INFO ("Processing camera %d of %d.\n", current_cam+1, cameras.size ());
718 
719  // transform mesh into camera's frame
720  typename pcl::PointCloud<PointInT>::Ptr camera_cloud (new pcl::PointCloud<PointInT>);
721  pcl::transformPointCloud (*mesh_cloud, *camera_cloud, cameras[current_cam].pose.inverse ());
722 
723  // CREATE UV MAP FOR CURRENT FACES
725  std::vector<bool> visibility;
726  visibility.resize (mesh.tex_polygons[current_cam].size ());
727  std::vector<UvIndex> indexes_uv_to_points;
728  // for each current face
729 
730  //TODO change this
731  pcl::PointXY nan_point;
732  nan_point.x = std::numeric_limits<float>::quiet_NaN ();
733  nan_point.y = std::numeric_limits<float>::quiet_NaN ();
734  UvIndex u_null;
735  u_null.idx_cloud = -1;
736  u_null.idx_face = -1;
737 
738  int cpt_invisible=0;
739  for (int idx_face = 0; idx_face < static_cast<int> (mesh.tex_polygons[current_cam].size ()); ++idx_face)
740  {
741  //project each vertice, if one is out of view, stop
742  pcl::PointXY uv_coord1;
743  pcl::PointXY uv_coord2;
744  pcl::PointXY uv_coord3;
745 
746  if (isFaceProjected (cameras[current_cam],
747  (*camera_cloud)[mesh.tex_polygons[current_cam][idx_face].vertices[0]],
748  (*camera_cloud)[mesh.tex_polygons[current_cam][idx_face].vertices[1]],
749  (*camera_cloud)[mesh.tex_polygons[current_cam][idx_face].vertices[2]],
750  uv_coord1,
751  uv_coord2,
752  uv_coord3))
753  {
754  // face is in the camera's FOV
755 
756  // add UV coordinates
757  projections->points.push_back (uv_coord1);
758  projections->points.push_back (uv_coord2);
759  projections->points.push_back (uv_coord3);
760 
761  // remember corresponding face
762  UvIndex u1, u2, u3;
763  u1.idx_cloud = mesh.tex_polygons[current_cam][idx_face].vertices[0];
764  u2.idx_cloud = mesh.tex_polygons[current_cam][idx_face].vertices[1];
765  u3.idx_cloud = mesh.tex_polygons[current_cam][idx_face].vertices[2];
766  u1.idx_face = idx_face; u2.idx_face = idx_face; u3.idx_face = idx_face;
767  indexes_uv_to_points.push_back (u1);
768  indexes_uv_to_points.push_back (u2);
769  indexes_uv_to_points.push_back (u3);
770 
771  //keep track of visibility
772  visibility[idx_face] = true;
773  }
774  else
775  {
776  projections->points.push_back (nan_point);
777  projections->points.push_back (nan_point);
778  projections->points.push_back (nan_point);
779  indexes_uv_to_points.push_back (u_null);
780  indexes_uv_to_points.push_back (u_null);
781  indexes_uv_to_points.push_back (u_null);
782  //keep track of visibility
783  visibility[idx_face] = false;
784  cpt_invisible++;
785  }
786  }
787 
788  // projections contains all UV points of the current faces
789  // indexes_uv_to_points links a uv point to its point in the camera cloud
790  // visibility contains tells if a face was in the camera FOV (false = skip)
791 
792  // TODO handle case were no face could be projected
793  if (visibility.size () - cpt_invisible !=0)
794  {
795  //create kdtree
797  kdtree.setInputCloud (projections);
798 
799  pcl::Indices idxNeighbors;
800  std::vector<float> neighborsSquaredDistance;
801  // af first (idx_pcan < current_cam), check if some of the faces attached to previous cameras occlude the current faces
802  // then (idx_pcam == current_cam), check for self occlusions. At this stage, we skip faces that were already marked as occluded
803  cpt_invisible = 0;
804  for (int idx_pcam = 0 ; idx_pcam <= current_cam ; ++idx_pcam)
805  {
806  // project all faces
807  for (int idx_face = 0; idx_face < static_cast<int> (mesh.tex_polygons[idx_pcam].size ()); ++idx_face)
808  {
809 
810  if (idx_pcam == current_cam && !visibility[idx_face])
811  {
812  // we are now checking for self occlusions within the current faces
813  // the current face was already declared as occluded.
814  // therefore, it cannot occlude another face anymore => we skip it
815  continue;
816  }
817 
818  // project each vertice, if one is out of view, stop
819  pcl::PointXY uv_coord1;
820  pcl::PointXY uv_coord2;
821  pcl::PointXY uv_coord3;
822 
823  if (isFaceProjected (cameras[current_cam],
824  (*camera_cloud)[mesh.tex_polygons[idx_pcam][idx_face].vertices[0]],
825  (*camera_cloud)[mesh.tex_polygons[idx_pcam][idx_face].vertices[1]],
826  (*camera_cloud)[mesh.tex_polygons[idx_pcam][idx_face].vertices[2]],
827  uv_coord1,
828  uv_coord2,
829  uv_coord3))
830  {
831  // face is in the camera's FOV
832  //get its circumsribed circle
833  double radius;
834  pcl::PointXY center;
835  // getTriangleCircumcenterAndSize (uv_coord1, uv_coord2, uv_coord3, center, radius);
836  getTriangleCircumcscribedCircleCentroid(uv_coord1, uv_coord2, uv_coord3, center, radius); // this function yields faster results than getTriangleCircumcenterAndSize
837 
838  // get points inside circ.circle
839  if (kdtree.radiusSearch (center, radius, idxNeighbors, neighborsSquaredDistance) > 0 )
840  {
841  // for each neighbor
842  for (const auto &idxNeighbor : idxNeighbors)
843  {
844  if (std::max ((*camera_cloud)[mesh.tex_polygons[idx_pcam][idx_face].vertices[0]].z,
845  std::max ((*camera_cloud)[mesh.tex_polygons[idx_pcam][idx_face].vertices[1]].z,
846  (*camera_cloud)[mesh.tex_polygons[idx_pcam][idx_face].vertices[2]].z))
847  < (*camera_cloud)[indexes_uv_to_points[idxNeighbor].idx_cloud].z)
848  {
849  // neighbor is farther than all the face's points. Check if it falls into the triangle
850  if (checkPointInsideTriangle(uv_coord1, uv_coord2, uv_coord3, (*projections)[idxNeighbor]))
851  {
852  // current neighbor is inside triangle and is closer => the corresponding face
853  visibility[indexes_uv_to_points[idxNeighbor].idx_face] = false;
854  cpt_invisible++;
855  //TODO we could remove the projections of this face from the kd-tree cloud, but I found it slower, and I need the point to keep ordered to query UV coordinates later
856  }
857  }
858  }
859  }
860  }
861  }
862  }
863  }
864 
865  // now, visibility is true for each face that belongs to the current camera
866  // if a face is not visible, we push it into the next one.
867 
868  if (static_cast<int> (mesh.tex_coordinates.size ()) <= current_cam)
869  {
870  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > dummy_container;
871  mesh.tex_coordinates.push_back (dummy_container);
872  }
873  mesh.tex_coordinates[current_cam].resize (3 * visibility.size ());
874 
875  std::vector<pcl::Vertices> occluded_faces;
876  occluded_faces.resize (visibility.size ());
877  std::vector<pcl::Vertices> visible_faces;
878  visible_faces.resize (visibility.size ());
879 
880  int cpt_occluded_faces = 0;
881  int cpt_visible_faces = 0;
882 
883  for (std::size_t idx_face = 0 ; idx_face < visibility.size () ; ++idx_face)
884  {
885  if (visibility[idx_face])
886  {
887  // face is visible by the current camera copy UV coordinates
888  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3](0) = (*projections)[idx_face*3].x;
889  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3](1) = (*projections)[idx_face*3].y;
890 
891  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 1](0) = (*projections)[idx_face*3 + 1].x;
892  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 1](1) = (*projections)[idx_face*3 + 1].y;
893 
894  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 2](0) = (*projections)[idx_face*3 + 2].x;
895  mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 2](1) = (*projections)[idx_face*3 + 2].y;
896 
897  visible_faces[cpt_visible_faces] = mesh.tex_polygons[current_cam][idx_face];
898 
899  cpt_visible_faces++;
900  }
901  else
902  {
903  // face is occluded copy face into temp vector
904  occluded_faces[cpt_occluded_faces] = mesh.tex_polygons[current_cam][idx_face];
905  cpt_occluded_faces++;
906  }
907  }
908  mesh.tex_coordinates[current_cam].resize (cpt_visible_faces*3);
909 
910  occluded_faces.resize (cpt_occluded_faces);
911  mesh.tex_polygons.push_back (occluded_faces);
912 
913  visible_faces.resize (cpt_visible_faces);
914  mesh.tex_polygons[current_cam].clear ();
915  mesh.tex_polygons[current_cam] = visible_faces;
916  }
917 
918  // we have been through all the cameras.
919  // if any faces are left, they were not visible by any camera
920  // we still need to produce uv coordinates for them
921 
922  if (mesh.tex_coordinates.size() <= cameras.size ())
923  {
924  std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > dummy_container;
925  mesh.tex_coordinates.push_back(dummy_container);
926  }
927 
928 
929  for(std::size_t idx_face = 0 ; idx_face < mesh.tex_polygons[cameras.size()].size() ; ++idx_face)
930  {
931  Eigen::Vector2f UV1, UV2, UV3;
932  UV1(0) = -1.0; UV1(1) = -1.0;
933  UV2(0) = -1.0; UV2(1) = -1.0;
934  UV3(0) = -1.0; UV3(1) = -1.0;
935  mesh.tex_coordinates[cameras.size()].push_back(UV1);
936  mesh.tex_coordinates[cameras.size()].push_back(UV2);
937  mesh.tex_coordinates[cameras.size()].push_back(UV3);
938  }
939 
940 }
941 
942 ///////////////////////////////////////////////////////////////////////////////////////////////
943 template<typename PointInT> inline void
945 {
946  // we simplify the problem by translating the triangle's origin to its first point
947  pcl::PointXY ptB, ptC;
948  ptB.x = p2.x - p1.x; ptB.y = p2.y - p1.y; // B'=B-A
949  ptC.x = p3.x - p1.x; ptC.y = p3.y - p1.y; // C'=C-A
950 
951  double D = 2.0*(ptB.x*ptC.y - ptB.y*ptC.x); // D'=2(B'x*C'y - B'y*C'x)
952 
953  // Safety check to avoid division by zero
954  if(D == 0)
955  {
956  circomcenter.x = p1.x;
957  circomcenter.y = p1.y;
958  }
959  else
960  {
961  // compute squares once
962  double bx2 = ptB.x * ptB.x; // B'x^2
963  double by2 = ptB.y * ptB.y; // B'y^2
964  double cx2 = ptC.x * ptC.x; // C'x^2
965  double cy2 = ptC.y * ptC.y; // C'y^2
966 
967  // compute circomcenter's coordinates (translate back to original coordinates)
968  circomcenter.x = static_cast<float> (p1.x + (ptC.y*(bx2 + by2) - ptB.y*(cx2 + cy2)) / D);
969  circomcenter.y = static_cast<float> (p1.y + (ptB.x*(cx2 + cy2) - ptC.x*(bx2 + by2)) / D);
970  }
971 
972  radius = std::sqrt( (circomcenter.x - p1.x)*(circomcenter.x - p1.x) + (circomcenter.y - p1.y)*(circomcenter.y - p1.y));//2.0* (p1.x*(p2.y - p3.y) + p2.x*(p3.y - p1.y) + p3.x*(p1.y - p2.y));
973 }
974 
975 ///////////////////////////////////////////////////////////////////////////////////////////////
976 template<typename PointInT> inline void
978 {
979  // compute centroid's coordinates (translate back to original coordinates)
980  circumcenter.x = static_cast<float> (p1.x + p2.x + p3.x ) / 3;
981  circumcenter.y = static_cast<float> (p1.y + p2.y + p3.y ) / 3;
982  double r1 = (circumcenter.x - p1.x) * (circumcenter.x - p1.x) + (circumcenter.y - p1.y) * (circumcenter.y - p1.y) ;
983  double r2 = (circumcenter.x - p2.x) * (circumcenter.x - p2.x) + (circumcenter.y - p2.y) * (circumcenter.y - p2.y) ;
984  double r3 = (circumcenter.x - p3.x) * (circumcenter.x - p3.x) + (circumcenter.y - p3.y) * (circumcenter.y - p3.y) ;
985 
986  // radius
987  radius = std::sqrt( std::max( r1, std::max( r2, r3) )) ;
988 }
989 
990 
991 ///////////////////////////////////////////////////////////////////////////////////////////////
992 template<typename PointInT> inline bool
993 pcl::TextureMapping<PointInT>::getPointUVCoordinates(const PointInT &pt, const Camera &cam, pcl::PointXY &UV_coordinates)
994 {
995  if (pt.z > 0)
996  {
997  // compute image center and dimension
998  double sizeX = cam.width;
999  double sizeY = cam.height;
1000  double cx, cy;
1001  if (cam.center_w > 0)
1002  cx = cam.center_w;
1003  else
1004  cx = sizeX / 2.0;
1005  if (cam.center_h > 0)
1006  cy = cam.center_h;
1007  else
1008  cy = sizeY / 2.0;
1009 
1010  double focal_x, focal_y;
1011  if (cam.focal_length_w > 0)
1012  focal_x = cam.focal_length_w;
1013  else
1014  focal_x = cam.focal_length;
1015  if (cam.focal_length_h > 0)
1016  focal_y = cam.focal_length_h;
1017  else
1018  focal_y = cam.focal_length;
1019 
1020  // project point on camera's image plane
1021  UV_coordinates.x = static_cast<float> ((focal_x * (pt.x / pt.z) + cx) / sizeX); //horizontal
1022  UV_coordinates.y = 1.0f - static_cast<float> ((focal_y * (pt.y / pt.z) + cy) / sizeY); //vertical
1023 
1024  // point is visible!
1025  if (UV_coordinates.x >= 0.0 && UV_coordinates.x <= 1.0 && UV_coordinates.y >= 0.0 && UV_coordinates.y <= 1.0)
1026  return (true); // point was visible by the camera
1027  }
1028 
1029  // point is NOT visible by the camera
1030  UV_coordinates.x = -1.0f;
1031  UV_coordinates.y = -1.0f;
1032  return (false); // point was not visible by the camera
1033 }
1034 
1035 ///////////////////////////////////////////////////////////////////////////////////////////////
1036 template<typename PointInT> inline bool
1038 {
1039  // Compute vectors
1040  Eigen::Vector2d v0, v1, v2;
1041  v0(0) = p3.x - p1.x; v0(1) = p3.y - p1.y; // v0= C - A
1042  v1(0) = p2.x - p1.x; v1(1) = p2.y - p1.y; // v1= B - A
1043  v2(0) = pt.x - p1.x; v2(1) = pt.y - p1.y; // v2= P - A
1044 
1045  // Compute dot products
1046  double dot00 = v0.dot(v0); // dot00 = dot(v0, v0)
1047  double dot01 = v0.dot(v1); // dot01 = dot(v0, v1)
1048  double dot02 = v0.dot(v2); // dot02 = dot(v0, v2)
1049  double dot11 = v1.dot(v1); // dot11 = dot(v1, v1)
1050  double dot12 = v1.dot(v2); // dot12 = dot(v1, v2)
1051 
1052  // Compute barycentric coordinates
1053  double invDenom = 1.0 / (dot00*dot11 - dot01*dot01);
1054  double u = (dot11*dot02 - dot01*dot12) * invDenom;
1055  double v = (dot00*dot12 - dot01*dot02) * invDenom;
1056 
1057  // Check if point is in triangle
1058  return ((u >= 0) && (v >= 0) && (u + v < 1));
1059 }
1060 
1061 ///////////////////////////////////////////////////////////////////////////////////////////////
1062 template<typename PointInT> inline bool
1063 pcl::TextureMapping<PointInT>::isFaceProjected (const Camera &camera, const PointInT &p1, const PointInT &p2, const PointInT &p3, pcl::PointXY &proj1, pcl::PointXY &proj2, pcl::PointXY &proj3)
1064 {
1065  return (getPointUVCoordinates(p1, camera, proj1)
1066  &&
1067  getPointUVCoordinates(p2, camera, proj2)
1068  &&
1069  getPointUVCoordinates(p3, camera, proj3)
1070  );
1071 }
1072 
1073 #define PCL_INSTANTIATE_TextureMapping(T) \
1074  template class PCL_EXPORTS pcl::TextureMapping<T>;
1075 
1076 #endif /* TEXTURE_MAPPING_HPP_ */
KdTreeFLANN is a generic type of 3D spatial locator using kD-tree structures.
Definition: kdtree_flann.h:132
int radiusSearch(const PointT &point, double radius, Indices &k_indices, std::vector< float > &k_sqr_distances, unsigned int max_nn=0) const override
Search for all the nearest neighbors of the query point in a given radius.
void setInputCloud(const PointCloudConstPtr &cloud, const IndicesConstPtr &indices=IndicesConstPtr()) override
Provide a pointer to the input dataset.
shared_ptr< PointCloud< PointT > > Ptr
Definition: point_cloud.h:414
std::vector< PointT, Eigen::aligned_allocator< PointT > > points
The point data.
Definition: point_cloud.h:396
bool getPointUVCoordinates(const PointInT &pt, const Camera &cam, Eigen::Vector2f &UV_coordinates)
computes UV coordinates of point, observed by one particular camera
void mapTexture2MeshUV(pcl::TextureMesh &tex_mesh)
Map texture to a mesh UV mapping.
void mapTexture2Mesh(pcl::TextureMesh &tex_mesh)
Map texture to a mesh synthesis algorithm.
void getTriangleCircumcscribedCircleCentroid(const pcl::PointXY &p1, const pcl::PointXY &p2, const pcl::PointXY &p3, pcl::PointXY &circumcenter, double &radius)
Returns the centroid of a triangle and the corresponding circumscribed circle's radius.
typename PointCloud::Ptr PointCloudPtr
typename Octree::Ptr OctreePtr
bool isPointOccluded(const PointInT &pt, const OctreePtr octree)
Check if a point is occluded using raycasting on octree.
bool checkPointInsideTriangle(const pcl::PointXY &p1, const pcl::PointXY &p2, const pcl::PointXY &p3, const pcl::PointXY &pt)
Returns True if a point lays within a triangle.
bool isFaceProjected(const Camera &camera, const PointInT &p1, const PointInT &p2, const PointInT &p3, pcl::PointXY &proj1, pcl::PointXY &proj2, pcl::PointXY &proj3)
Returns true if all the vertices of one face are projected on the camera's image plane.
void removeOccludedPoints(const PointCloudPtr &input_cloud, PointCloudPtr &filtered_cloud, const double octree_voxel_size, pcl::Indices &visible_indices, pcl::Indices &occluded_indices)
Remove occluded points from a point cloud.
std::vector< Eigen::Vector2f, Eigen::aligned_allocator< Eigen::Vector2f > > mapTexture2Face(const Eigen::Vector3f &p1, const Eigen::Vector3f &p2, const Eigen::Vector3f &p3)
Map texture to a face.
int sortFacesByCamera(pcl::TextureMesh &tex_mesh, pcl::TextureMesh &sorted_mesh, const pcl::texture_mapping::CameraVector &cameras, const double octree_voxel_size, PointCloud &visible_pts)
Segment faces by camera visibility.
void getTriangleCircumcenterAndSize(const pcl::PointXY &p1, const pcl::PointXY &p2, const pcl::PointXY &p3, pcl::PointXY &circumcenter, double &radius)
Returns the circumcenter of a triangle and the circle's radius.
void showOcclusions(const PointCloudPtr &input_cloud, pcl::PointCloud< pcl::PointXYZI >::Ptr &colored_cloud, const double octree_voxel_size, const bool show_nb_occlusions=true, const int max_occlusions=4)
Colors a point cloud, depending on its occlusions.
typename PointCloud::ConstPtr PointCloudConstPtr
void mapMultipleTexturesToMeshUV(pcl::TextureMesh &tex_mesh, pcl::texture_mapping::CameraVector &cams)
Map textures acquired from a set of cameras onto a mesh.
void textureMeshwithMultipleCameras(pcl::TextureMesh &mesh, const pcl::texture_mapping::CameraVector &cameras)
Segment and texture faces by camera visibility.
void defineBoundingBox()
Investigate dimensions of pointcloud data set and define corresponding bounding box for octree.
void setInputCloud(const PointCloudConstPtr &cloud_arg, const IndicesConstPtr &indices_arg=IndicesConstPtr())
Provide a pointer to the input data set.
void addPointsFromInputCloud()
Add points from input point cloud to octree.
Octree pointcloud search class
Definition: octree_search.h:58
uindex_t getIntersectedVoxelIndices(Eigen::Vector3f origin, Eigen::Vector3f direction, Indices &k_indices, uindex_t max_voxel_count=0) const
Get indices of all voxels that are intersected by a ray (origin, direction).
Define standard C methods to do distance calculations.
void transformPointCloud(const pcl::PointCloud< PointT > &cloud_in, pcl::PointCloud< PointT > &cloud_out, const Eigen::Matrix< Scalar, 4, 4 > &transform, bool copy_all_fields)
Apply a rigid transform defined by a 4x4 matrix.
Definition: transforms.hpp:221
std::vector< Camera, Eigen::aligned_allocator< Camera > > CameraVector
detail::int_type_t< detail::index_type_size, detail::index_type_signed > index_t
Type used for an index in PCL.
Definition: types.h:112
void fromPCLPointCloud2(const pcl::PCLPointCloud2 &msg, pcl::PointCloud< PointT > &cloud, const MsgFieldMap &field_map, const std::uint8_t *msg_data)
Convert a PCLPointCloud2 binary data blob into a pcl::PointCloud<T> object using a field_map.
Definition: conversions.h:229
float euclideanDistance(const PointType1 &p1, const PointType2 &p2)
Calculate the euclidean distance between the two given points.
Definition: distances.h:204
IndicesAllocator<> Indices
Type used for indices in PCL.
Definition: types.h:133
std::vector<::pcl::PCLPointField > fields
std::vector< std::uint8_t > data
A 2D point structure representing Euclidean xy coordinates.
std::vector< std::vector< pcl::Vertices > > tex_polygons
polygon which is mapped with specific texture defined in TexMaterial
Definition: TextureMesh.h:95
std::vector< std::vector< Eigen::Vector2f, Eigen::aligned_allocator< Eigen::Vector2f > > > tex_coordinates
UV coordinates.
Definition: TextureMesh.h:97
std::vector< pcl::TexMaterial > tex_materials
define texture material
Definition: TextureMesh.h:99
pcl::PCLPointCloud2 cloud
Definition: TextureMesh.h:90
Structure to store camera pose and focal length.
Structure that links a uv coordinate to its 3D point and face.