CustusX  18.04
An IGT application
cxReconstructPreprocessor.cpp
Go to the documentation of this file.
1 /*=========================================================================
2 This file is part of CustusX, an Image Guided Therapy Application.
3 
4 Copyright (c) SINTEF Department of Medical Technology.
5 All rights reserved.
6 
7 CustusX is released under a BSD 3-Clause license.
8 
9 See Lisence.txt (https://github.com/SINTEFMedtek/CustusX/blob/master/License.txt) for details.
10 =========================================================================*/
11 
12 
14 
15 #include "cxLogger.h"
16 #include "cxReconstructCore.h"
17 #include <vtkImageData.h>
18 #include <QFileInfo>
19 #include "cxTime.h"
20 #include "cxTypeConversions.h"
22 #include "cxVolumeHelpers.h"
24 #include "cxTimeKeeper.h"
25 #include "cxUSFrameData.h"
26 
28 #include "cxPatientModelService.h"
29 
30 namespace cx
31 {
32 
34  mInput(ReconstructCore::InputParams()),
35  mPatientModelService(patientModelService)
36 {
37  mMaxTimeDiff = 250; // TODO: Change default value for max allowed time difference between tracking and image time tags
38  // TODO Legg inn andre rekonstruksjonsparametre?
39  //
40  //
41 }
42 
44 {
45 }
46 
47 std::vector<ProcessedUSInputDataPtr> ReconstructPreprocessor::createProcessedInput(std::vector<bool> angio)
48 {
49 
50  std::vector<std::vector<vtkImageDataPtr> > frames = mFileData.mUsRaw->initializeFrames(angio);
51 
52  std::vector<ProcessedUSInputDataPtr> retval;
53 
54  for (unsigned i=0; i<angio.size(); ++i)
55  {
57  input.reset(new ProcessedUSInputData(frames[i],
58  mFileData.mFrames,
59  mFileData.getMask(),
60  mFileData.mFilename,
61  QFileInfo(mFileData.mFilename).completeBaseName() ));
62  CX_ASSERT(Eigen::Array3i(frames[i][0]->GetDimensions()).isApprox(Eigen::Array3i(mFileData.getMask()->GetDimensions())));
63  retval.push_back(input);
64  }
65  return retval;
66 }
67 
68 namespace
69 {
70 bool within(int x, int min, int max)
71 {
72  return (x >= min) && (x <= max);
73 }
74 }
75 
85 void ReconstructPreprocessor::calibrateTimeStamps(double offset, double scale)
86 {
87  for (unsigned int i = 0; i < mFileData.mPositions.size(); i++)
88  {
89  mFileData.mPositions[i].mTime = scale * mFileData.mPositions[i].mTime + offset;
90  }
91 }
92 
99 void ReconstructPreprocessor::alignTimeSeries()
100 {
101  report("Generate time calibration based on input time stamps.");
102  double framesSpan = mFileData.mFrames.back().mTime - mFileData.mFrames.front().mTime;
103  double positionsSpan = mFileData.mPositions.back().mTime - mFileData.mPositions.front().mTime;
104  double scale = framesSpan / positionsSpan;
105 
106  double offset = mFileData.mFrames.front().mTime - scale * mFileData.mPositions.front().mTime;
107 
108  this->calibrateTimeStamps(offset, scale);
109 }
110 
114 void ReconstructPreprocessor::cropInputData()
115 {
116  //IntBoundingBox3D
117  ProbeDefinition sector = mFileData.mProbeDefinition.mData;
118  IntBoundingBox3D cropbox(sector.getClipRect_p().begin());
119  cropbox = this->reduceCropboxToImageSize(cropbox, sector.getSize());
120  Eigen::Vector3i shift = cropbox.corner(0,0,0).cast<int>();
121  Eigen::Vector3i size = cropbox.range().cast<int>() + Eigen::Vector3i(1,1,0); // convert from extent format to size format by adding 1
122  mFileData.mUsRaw->setCropBox(cropbox);
123 
124  DoubleBoundingBox3D clipRect_p = sector.getClipRect_p();
125  Vector3D origin_p = sector.getOrigin_p();
126 
127  for (unsigned i=0; i<3; ++i)
128  {
129  clipRect_p[2*i] -= shift[i];
130  clipRect_p[2*i+1] -= shift[i];
131  origin_p[i] -= shift[i];
132  }
133 
134  sector.setClipRect_p(clipRect_p);
135  sector.setOrigin_p(origin_p);
136  sector.setSize(QSize(size[0], size[1]));
137  mFileData.mProbeDefinition.setData(sector);
138 }
139 
140 IntBoundingBox3D ReconstructPreprocessor::reduceCropboxToImageSize(IntBoundingBox3D cropbox, QSize size)
141 {
142  cropbox[0] = std::max(cropbox[0], 0);
143  cropbox[2] = std::max(cropbox[2], 0);
144  cropbox[4] = std::max(cropbox[4], 0);
145 
146  cropbox[1] = std::min(cropbox[1], size.width() - 1);
147  cropbox[3] = std::min(cropbox[3], size.height() - 1);
148 
149  return cropbox;
150 }
151 
161 void ReconstructPreprocessor::applyTimeCalibration()
162 {
163  double timeshift = mInput.mExtraTimeCalibration;
164  // The shift is on frames. The calibrate function applies to tracker positions,
165  // hence the positive sign. (real use: subtract from frame data)
166  // std::cout << "TIMESHIFT " << timeshift << std::endl;
167  // timeshift = -timeshift;
168  if (!similar(0.0, timeshift))
169  report("Applying reconstruction-time calibration to tracking data: " + qstring_cast(
170  timeshift) + "ms");
171  this->calibrateTimeStamps(timeshift, 1.0);
172 
173  // ignore calibrations
174  if (mInput.mAlignTimestamps)
175  {
176  this->alignTimeSeries();
177  }
178 }
179 
180 
182 {
183  RemoveDataType() : count(0), err(-1) {}
184  void add(double _err) { ++count; err=((err<0)?_err:std::min(_err, err)); }
185  int count;
186  double err;
187 };
188 
189 void ReconstructPreprocessor::filterPositions()
190 {
191  int filterStrength = mInput.mPosFilterStrength;
192 
193  if (filterStrength > 0) //Position filter enabled
194  {
195  int filterLength(1+2*filterStrength);
196  int nPositions(mFileData.mPositions.size());
197  if (nPositions > filterLength) //Position sequence sufficient long
198  {
199  // Define quaternion array
200  Eigen::ArrayXXd qPosArray(7,(nPositions+(2*filterStrength))); // Add room for FIR-filtering
201  Transform3D localTx;
202  Eigen::Quaterniond qA;
203 
204  unsigned int sourceIdx(0);
205  for (unsigned int i = 0; i < (nPositions+(2*filterStrength)); i++)
206  {
207 
208  // Convert to quaternions
209  sourceIdx = (i > filterStrength) ? (i-filterStrength) : 0; // Pad array with edge elements
210  sourceIdx = (sourceIdx < nPositions) ? sourceIdx : (nPositions-1);
211 
212  localTx = mFileData.mPositions[sourceIdx].mPos;
213 
214  qPosArray.block<3,1>(4,i) = localTx.matrix().block<3, 1>(0,3); // Translation part
215  qA = Eigen::Quaterniond(localTx.matrix().block<3, 3>(0,0)); //Convert rot to quaternions
216  qPosArray.block<4,1>(0,i) = qA.coeffs(); //Rotation parameters
217 
218  }
219 
220  // Filter quaternion arrays (simple averaging filter)
221  Eigen::ArrayXXd qPosFiltered = Eigen::ArrayXXd::Zero(7,nPositions); // Fill with zeros
222  for (unsigned int i = 0; i < (1+2*filterStrength); i++)
223  {
224  qPosFiltered = qPosFiltered + qPosArray.block(0,i,7,nPositions);
225  }
226  qPosFiltered = qPosFiltered / (1+2*filterStrength);
227 
228  for (unsigned int i = 0; i < mFileData.mPositions.size(); i++)
229  {
230  // Convert back to position data
231  qA.coeffs() = qPosFiltered.block<4,1>(0,i);
232  localTx.matrix().block<3, 3>(0,0) = qA.toRotationMatrix();
233  localTx.matrix().block<3, 1>(0,3) = qPosFiltered.block<3,1>(4,i);
234  mFileData.mPositions[i].mPos = localTx;
235  }
236  }
237  }
238 
239 }
240 
241 void ReconstructPreprocessor::positionThinning()
242 {
243  // If enabled, try to remove "suspect" data (large jumps etc.)
244  // Replace tracking positions that deviate greatly from neighbours with an interpolated value
245 
246 }
247 
248 
255 void ReconstructPreprocessor::interpolatePositions()
256 {
257  mFileData.mUsRaw->resetRemovedFrames();
258  int startFrames = mFileData.mFrames.size();
259 
260  std::map<int,RemoveDataType> removedData;
261 
262  for (unsigned i_frame = 0; i_frame < mFileData.mFrames.size();)
263  {
264  std::vector<TimedPosition>::iterator posIter;
265  posIter = lower_bound(mFileData.mPositions.begin(), mFileData.mPositions.end(), mFileData.mFrames[i_frame]);
266 
267  unsigned i_pos = distance(mFileData.mPositions.begin(), posIter);
268  if (i_pos != 0)
269  i_pos--;
270 
271  if (i_pos >= mFileData.mPositions.size() - 1)
272  i_pos = mFileData.mPositions.size() - 2;
273 
274  // Remove frames too far from the positions
275  // Don't increment frame index since the index now points to the next element
276  double timeToPos1 = timeToPosition(i_frame, i_pos);
277  double timeToPos2 = timeToPosition(i_frame, i_pos+1);
278  if ((timeToPos1 > mMaxTimeDiff) || (timeToPos2 > mMaxTimeDiff))
279  {
280  removedData[i_frame].add(std::max(timeToPos1, timeToPos2));
281 
282  mFileData.mFrames.erase(mFileData.mFrames.begin() + i_frame);
283  mFileData.mUsRaw->removeFrame(i_frame);
284  }
285  else
286  {
287  double t_delta_tracking = mFileData.mPositions[i_pos + 1].mTime - mFileData.mPositions[i_pos].mTime;
288  double t = 0;
289  if (!similar(t_delta_tracking, 0))
290  t = (mFileData.mFrames[i_frame].mTime - mFileData.mPositions[i_pos].mTime) / t_delta_tracking;
291  // mFileData.mFrames[i_frame].mPos = interpolate(mFileData.mPositions[i_pos].mPos, mFileData.mPositions[i_pos + 1].mPos, t);
292  mFileData.mFrames[i_frame].mPos = cx::USReconstructInputDataAlgorithm::slerpInterpolate(mFileData.mPositions[i_pos].mPos, mFileData.mPositions[i_pos + 1].mPos, t);
293  i_frame++;// Only increment if we didn't delete the frame
294  }
295  }
296 
297  int removeCount=0;
298  for (std::map<int,RemoveDataType>::iterator iter=removedData.begin(); iter!=removedData.end(); ++iter)
299  {
300  int first = iter->first+removeCount;
301  int last = first + iter->second.count-1;
302  report(QString("Removed input frame [%1-%2]. Time diff=%3").arg(first).arg(last).arg(iter->second.err, 0, 'f', 1));
303  removeCount += iter->second.count;
304  }
305 
306  double removed = double(startFrames - mFileData.mFrames.size()) / double(startFrames);
307  if (removed > 0.02)
308  {
309  double percent = removed * 100;
310  if (percent > 1)
311  {
312  reportWarning("Removed " + QString::number(percent, 'f', 1) + "% of the "+ qstring_cast(startFrames) + " frames.");
313  }
314  else
315  {
316  report("Removed " + QString::number(percent, 'f', 1) + "% of the " + qstring_cast(startFrames) + " frames.");
317  }
318  }
319 }
320 
321 
322 double ReconstructPreprocessor::timeToPosition(unsigned i_frame, unsigned i_pos)
323 {
324  return fabs(mFileData.mFrames[i_frame].mTime - mFileData.mPositions[i_pos].mTime);
325 }
326 
330 std::vector<Vector3D> ReconstructPreprocessor::generateInputRectangle()
331 {
332  std::vector<Vector3D> retval(4);
333  vtkImageDataPtr mask = mFileData.getMask();
334  if (!mask)
335  {
336  reportError("Reconstructer::generateInputRectangle() + requires mask");
337  return retval;
338  }
339  Eigen::Array3i dims = mFileData.mUsRaw->getDimensions();
340  Vector3D spacing = mFileData.mUsRaw->getSpacing();
341 
342  Eigen::Array3i maskDims(mask->GetDimensions());
343 
344  if (( maskDims[0]<dims[0] )||( maskDims[1]<dims[1] ))
345  reportError(QString("input data (%1) and mask (%2) dim mimatch")
346  .arg(qstring_cast(dims))
347  .arg(qstring_cast(maskDims)));
348 
349  int xmin = maskDims[0];
350  int xmax = 0;
351  int ymin = maskDims[1];
352  int ymax = 0;
353 
354  unsigned char* ptr = static_cast<unsigned char*> (mask->GetScalarPointer());
355  for (int x = 0; x < maskDims[0]; x++)
356  for (int y = 0; y < maskDims[1]; y++)
357  {
358  unsigned char val = ptr[x + y * maskDims[0]];
359  if (val != 0)
360  {
361  xmin = std::min(xmin, x);
362  ymin = std::min(ymin, y);
363  xmax = std::max(xmax, x);
364  ymax = std::max(ymax, y);
365  }
366  }
367 
368  //Test: reduce the output volume by reducing the mask when determining
369  // output volume size
370  double red = mInput.mMaskReduce;
371  int reduceX = (xmax - xmin) * (red / 100);
372  int reduceY = (ymax - ymin) * (red / 100);
373 
374  xmin += reduceX;
375  xmax -= reduceX;
376  ymin += reduceY;
377  ymax -= reduceY;
378 
379  retval[0] = Vector3D(xmin * spacing[0], ymin * spacing[1], 0);
380  retval[1] = Vector3D(xmax * spacing[0], ymin * spacing[1], 0);
381  retval[2] = Vector3D(xmin * spacing[0], ymax * spacing[1], 0);
382  retval[3] = Vector3D(xmax * spacing[0], ymax * spacing[1], 0);
383 
384  return retval;
385 }
386 
393 Transform3D ReconstructPreprocessor::applyOutputOrientation()
394 {
395  Transform3D prMdd = Transform3D::Identity();
396 
397  if (mInput.mOrientation == "PatientReference")
398  {
399  // identity
400  }
401  else if (mInput.mOrientation == "MiddleFrame")
402  {
403  prMdd = mFileData.mFrames[mFileData.mFrames.size() / 2].mPos;
404  }
405  else
406  {
407  reportError("no orientation algorithm selected in reconstruction");
408  }
409 
410  // apply the selected orientation to the frames.
411  Transform3D ddMpr = prMdd.inv();
412  for (unsigned i = 0; i < mFileData.mFrames.size(); i++)
413  {
414  // mPos = prMu
415  mFileData.mFrames[i].mPos = ddMpr * mFileData.mFrames[i].mPos;
416  // mPos = ddMu
417  }
418 
419  return prMdd;
420 }
421 
431 void ReconstructPreprocessor::findExtentAndOutputTransform()
432 {
433  if (mFileData.mFrames.empty())
434  return;
435  // A first guess for d'Mu with correct orientation
436  Transform3D prMdd = this->applyOutputOrientation();
437  //mFrames[i].mPos = d'Mu, d' = only rotation
438 
439  // Find extent of all frames as a point cloud
440  std::vector<Vector3D> inputRect = this->generateInputRectangle();
441  std::vector<Vector3D> outputRect;
442  for (unsigned slice = 0; slice < mFileData.mFrames.size(); slice++)
443  {
444  Transform3D dMu = mFileData.mFrames[slice].mPos;
445  for (unsigned i = 0; i < inputRect.size(); i++)
446  {
447  outputRect.push_back(dMu.coord(inputRect[i]));
448  }
449  }
450 
452 
453  // Translate dMu to output volume origo
454  Transform3D T_origo = createTransformTranslate(extent.corner(0, 0, 0));
455  Transform3D prMd = prMdd * T_origo; // transform from output space to patref, use when storing volume.
456  for (unsigned i = 0; i < mFileData.mFrames.size(); i++)
457  {
458  mFileData.mFrames[i].mPos = T_origo.inv() * mFileData.mFrames[i].mPos;
459  }
460 
461  // Calculate optimal output image spacing and dimensions based on US frame spacing
462  double inputSpacing = std::min(mFileData.mUsRaw->getSpacing()[0], mFileData.mUsRaw->getSpacing()[1]);
463  mOutputVolumeParams = OutputVolumeParams(extent, inputSpacing, mInput.mMaxOutputVolumeSize);
464 
465  mOutputVolumeParams.set_rMd((mPatientModelService->get_rMpr()) * prMd);
466 }
467 
471 void ReconstructPreprocessor::updateFromOriginalFileData()
472 {
473  // uncomment to test cropping of data before reconstructing
474  this->cropInputData();
475 
476  // Only use this if the time stamps have different formats
477  // The function assumes that both lists of time stamps start at the same time,
478  // and that is not completely correct.
479  //this->calibrateTimeStamps();
480  // Use the time calibration from the acquisition module
481  //this->calibrateTimeStamps(0.0, 1.0);
482  this->applyTimeCalibration();
483 
484  // Smooth tracking data before further processing
485  // User preferences apply
486  //this->positionThinning(); //Do thinning before filtering if enabled
487  this->filterPositions();
488 
490  //mPos (in mPositions) is now prMu
491 
492  this->interpolatePositions();
493  // mFrames: now mPos as prMu
494 
495  if (mFileData.mFrames.empty()) // if all positions are filtered out
496  return;
497 
498  this->findExtentAndOutputTransform();
499  //mPos in mFrames is now dMu
500 }
501 
503 {
504  mInput = input;
505  mFileData = fileData;
506  this->updateFromOriginalFileData();
507 }
508 
509 
510 } /* namespace cx */
QString qstring_cast(const T &val)
void reportError(QString msg)
Definition: cxLogger.cpp:71
Vector3D corner(int x, int y, int z) const
#define CX_ASSERT(statement)
Definition: cxLogger.h:116
Transform3D Transform3D
Transform3D is a representation of an affine 3D transform.
std::vector< TimedPosition > mFrames
Helper struct for sending and controlling output volume properties.
static void transformTrackingPositionsTo_prMu(USReconstructInputData *data)
static DoubleBoundingBox3D fromCloud(std::vector< Vector3D > cloud)
Vector3D getOrigin_p() const
void setOrigin_p(Vector3D origin_p)
ReconstructPreprocessor(PatientModelServicePtr patientModelService)
void reportWarning(QString msg)
Definition: cxLogger.cpp:70
void setClipRect_p(DoubleBoundingBox3D clipRect_p)
boost::shared_ptr< class PatientModelService > PatientModelServicePtr
Transform3D createTransformTranslate(const Vector3D &translation)
std::vector< ProcessedUSInputDataPtr > createProcessedInput(std::vector< bool > angio)
Representation of an integer bounding box in 3D. The data are stored as {xmin,xmax,ymin,ymax,zmin,zmax}, in order to simplify communication with vtk.
Representation of a floating-point bounding box in 3D. The data are stored as {xmin,xmax,ymin,ymax,zmin,zmax}, in order to simplify communication with vtk.
std::vector< TimedPosition > mPositions
ProbeDefinition mData
Definition: cxProbeSector.h:54
Eigen::Vector3d Vector3D
Vector3D is a representation of a point or vector in 3D.
Definition: cxVector3D.h:42
Definition of characteristics for an Ultrasound Probe Sector.
Eigen::Vector3i corner(int x, int y, int z) const
void report(QString msg)
Definition: cxLogger.cpp:69
bool similar(const CameraInfo &lhs, const CameraInfo &rhs, double tol)
DoubleBoundingBox3D getClipRect_p() const
static Transform3D slerpInterpolate(const Transform3D &a, const Transform3D &b, double t)
QString mFilename
filename used for current data read
boost::shared_ptr< class ProcessedUSInputData > ProcessedUSInputDataPtr
vtkSmartPointer< class vtkImageData > vtkImageDataPtr
USFrameDataPtr mUsRaw
All imported US data frames with pointers to each frame.
void initialize(ReconstructCore::InputParams input, USReconstructInputData fileData)
void setSize(QSize size)
Algorithm part of reconstruction - no dependencies on parameter classes.
void setData(ProbeDefinition data)
Namespace for all CustusX production code.