Fraxinus  18.10
An IGT application
cxReconstructPreprocessor.cpp
Go to the documentation of this file.
1 /*=========================================================================
2 This file is part of CustusX, an Image Guided Therapy Application.
3 
4 Copyright (c) SINTEF Department of Medical Technology.
5 All rights reserved.
6 
7 CustusX is released under a BSD 3-Clause license.
8 
9 See Lisence.txt (https://github.com/SINTEFMedtek/CustusX/blob/master/License.txt) for details.
10 =========================================================================*/
11 
12 
14 
15 #include "cxLogger.h"
16 #include "cxReconstructCore.h"
17 #include <vtkImageData.h>
18 #include <QFileInfo>
19 #include "cxTime.h"
20 #include "cxTypeConversions.h"
22 #include "cxVolumeHelpers.h"
24 #include "cxTimeKeeper.h"
25 #include "cxUSFrameData.h"
26 
28 #include "cxPatientModelService.h"
29 #include "cxMathUtils.h"
30 
31 #include <QThread>
32 
33 namespace cx
34 {
35 
37  mInput(ReconstructCore::InputParams()),
38  mPatientModelService(patientModelService)
39 {
40  mMaxTimeDiff = 250; // TODO: Change default value for max allowed time difference between tracking and image time tags
41  // TODO Legg inn andre rekonstruksjonsparametre?
42  //
43  //
44 }
45 
47 {
48 }
49 
50 std::vector<ProcessedUSInputDataPtr> ReconstructPreprocessor::createProcessedInput(std::vector<bool> angio)
51 {
52 
53  std::vector<std::vector<vtkImageDataPtr> > frames = mFileData.mUsRaw->initializeFrames(angio);
54 
55  std::vector<ProcessedUSInputDataPtr> retval;
56 
57  for (unsigned i=0; i<angio.size(); ++i)
58  {
60  input.reset(new ProcessedUSInputData(frames[i],
61  mFileData.mFrames,
62  mFileData.getMask(),
63  mFileData.mFilename,
64  QFileInfo(mFileData.mFilename).completeBaseName() ));
65  CX_ASSERT(Eigen::Array3i(frames[i][0]->GetDimensions()).isApprox(Eigen::Array3i(mFileData.getMask()->GetDimensions())));
66  retval.push_back(input);
67  }
68  return retval;
69 }
70 
71 namespace
72 {
73 bool within(int x, int min, int max)
74 {
75  return (x >= min) && (x <= max);
76 }
77 }
78 
88 void ReconstructPreprocessor::calibrateTimeStamps(double offset, double scale)
89 {
90  for (unsigned int i = 0; i < mFileData.mPositions.size(); i++)
91  {
92  mFileData.mPositions[i].mTime = scale * mFileData.mPositions[i].mTime + offset;
93  }
94 }
95 
102 void ReconstructPreprocessor::alignTimeSeries()
103 {
104  report("Generate time calibration based on input time stamps.");
105  double framesSpan = mFileData.mFrames.back().mTime - mFileData.mFrames.front().mTime;
106  double positionsSpan = mFileData.mPositions.back().mTime - mFileData.mPositions.front().mTime;
107  double scale = framesSpan / positionsSpan;
108 
109  double offset = mFileData.mFrames.front().mTime - scale * mFileData.mPositions.front().mTime;
110 
111  this->calibrateTimeStamps(offset, scale);
112 }
113 
117 void ReconstructPreprocessor::cropInputData()
118 {
119  //IntBoundingBox3D
120  ProbeDefinition sector = mFileData.mProbeDefinition.mData;
121  IntBoundingBox3D cropbox(sector.getClipRect_p().begin());
122  cropbox = this->reduceCropboxToImageSize(cropbox, sector.getSize());
123  Eigen::Vector3i shift = cropbox.corner(0,0,0).cast<int>();
124  Eigen::Vector3i size = cropbox.range().cast<int>() + Eigen::Vector3i(1,1,0); // convert from extent format to size format by adding 1
125  mFileData.mUsRaw->setCropBox(cropbox);
126 
127  DoubleBoundingBox3D clipRect_p = sector.getClipRect_p();
128  Vector3D origin_p = sector.getOrigin_p();
129 
130  for (unsigned i=0; i<3; ++i)
131  {
132  clipRect_p[2*i] -= shift[i];
133  clipRect_p[2*i+1] -= shift[i];
134  origin_p[i] -= shift[i];
135  }
136 
137  sector.setClipRect_p(clipRect_p);
138  sector.setOrigin_p(origin_p);
139  sector.setSize(QSize(size[0], size[1]));
140  mFileData.mProbeDefinition.setData(sector);
141 }
142 
143 IntBoundingBox3D ReconstructPreprocessor::reduceCropboxToImageSize(IntBoundingBox3D cropbox, QSize size)
144 {
145  cropbox[0] = std::max(cropbox[0], 0);
146  cropbox[2] = std::max(cropbox[2], 0);
147  cropbox[4] = std::max(cropbox[4], 0);
148 
149  cropbox[1] = std::min(cropbox[1], size.width() - 1);
150  cropbox[3] = std::min(cropbox[3], size.height() - 1);
151 
152  return cropbox;
153 }
154 
164 void ReconstructPreprocessor::applyTimeCalibration()
165 {
166  double timeshift = mInput.mExtraTimeCalibration;
167  // The shift is on frames. The calibrate function applies to tracker positions,
168  // hence the positive sign. (real use: subtract from frame data)
169  // std::cout << "TIMESHIFT " << timeshift << std::endl;
170  // timeshift = -timeshift;
171  if (!similar(0.0, timeshift))
172  report("Applying reconstruction-time calibration to tracking data: " + qstring_cast(
173  timeshift) + "ms");
174  this->calibrateTimeStamps(timeshift, 1.0);
175 
176  // ignore calibrations
177  if (mInput.mAlignTimestamps)
178  {
179  this->alignTimeSeries();
180  }
181 }
182 
183 
185 {
186  RemoveDataType() : count(0), err(-1) {}
187  void add(double _err) { ++count; err=((err<0)?_err:std::min(_err, err)); }
188  int count;
189  double err;
190 };
191 
192 void ReconstructPreprocessor::filterPositions()
193 {
194  int filterStrength = mInput.mPosFilterStrength;
195 
196  if (filterStrength > 0) //Position filter enabled
197  {
198  int filterLength(1+2*filterStrength);
199  int nPositions(mFileData.mPositions.size());
200  if (nPositions > filterLength) //Position sequence sufficient long?
201  {
202  // Init array to hold positions converted to quaternions:
203  int nQuaternions = nPositions+(2*filterStrength); // Add room for FIR-filtering
204  Eigen::ArrayXXd qPosArray(7,nQuaternions);
205 
206  // Convert to quaternions:
207  for (unsigned int i = 0; i < nQuaternions; i++) //For each pose (Tx), with edge padding
208  {
209  unsigned int sourceIdx = (i > filterStrength) ? (i-filterStrength) : 0; // Index in Tx array, pad with edge elements
210  sourceIdx = (sourceIdx < nPositions) ? sourceIdx : (nPositions-1);
211  qPosArray.col(i) = matrixToQuaternion(mFileData.mPositions[sourceIdx].mPos); // Convert each Tx to quaternions
212  }
213 
214  // Filter quaternion arrays (simple averaging filter):
215  Eigen::ArrayXXd qPosFiltered = Eigen::ArrayXXd::Zero(7,nPositions); // Fill with zeros
216  for (unsigned int i = 0; i < filterLength; i++)
217  {
218  qPosFiltered = qPosFiltered + qPosArray.block(0,i,7,nPositions);
219  }
220  qPosFiltered = qPosFiltered / filterLength;
221 
222  // Convert back to Tx:
223  for (unsigned int i = 0; i < mFileData.mPositions.size(); i++) //For each pose after filtering
224  {
225  // Convert back to position data
226  mFileData.mPositions[i].mPos = quaternionToMatrix(qPosFiltered.col(i));
227  }
228  }
229  }
230 
231 }
232 
233 void ReconstructPreprocessor::positionThinning()
234 {
235  // If enabled, try to remove "suspect" data (large jumps etc.)
236  // Replace tracking positions that deviate greatly from neighbours with an interpolated value
237 
238 }
239 
240 
247 void ReconstructPreprocessor::interpolatePositions()
248 {
249  mFileData.mUsRaw->resetRemovedFrames();
250  int startFrames = mFileData.mFrames.size();
251 
252  std::map<int,RemoveDataType> removedData;
253 
254  for (unsigned i_frame = 0; i_frame < mFileData.mFrames.size();)
255  {
256  std::vector<TimedPosition>::iterator posIter;
257  posIter = lower_bound(mFileData.mPositions.begin(), mFileData.mPositions.end(), mFileData.mFrames[i_frame]);
258 
259  unsigned i_pos = distance(mFileData.mPositions.begin(), posIter);
260  if (i_pos != 0)
261  i_pos--;
262 
263  if (i_pos >= mFileData.mPositions.size() - 1)
264  i_pos = mFileData.mPositions.size() - 2;
265 
266  // Remove frames too far from the positions
267  // Don't increment frame index since the index now points to the next element
268  double timeToPos1 = timeToPosition(i_frame, i_pos);
269  double timeToPos2 = timeToPosition(i_frame, i_pos+1);
270  if ((timeToPos1 > mMaxTimeDiff) || (timeToPos2 > mMaxTimeDiff))
271  {
272  removedData[i_frame].add(std::max(timeToPos1, timeToPos2));
273 
274  mFileData.mFrames.erase(mFileData.mFrames.begin() + i_frame);
275  mFileData.mUsRaw->removeFrame(i_frame);
276  }
277  else
278  {
279  double t_delta_tracking = mFileData.mPositions[i_pos + 1].mTime - mFileData.mPositions[i_pos].mTime;
280  double t = 0;
281  if (!similar(t_delta_tracking, 0))
282  t = (mFileData.mFrames[i_frame].mTime - mFileData.mPositions[i_pos].mTime) / t_delta_tracking;
283  // mFileData.mFrames[i_frame].mPos = interpolate(mFileData.mPositions[i_pos].mPos, mFileData.mPositions[i_pos + 1].mPos, t);
284  mFileData.mFrames[i_frame].mPos = cx::USReconstructInputDataAlgorithm::slerpInterpolate(mFileData.mPositions[i_pos].mPos, mFileData.mPositions[i_pos + 1].mPos, t);
285  i_frame++;// Only increment if we didn't delete the frame
286  }
287  }
288 
289  int removeCount=0;
290  for (std::map<int,RemoveDataType>::iterator iter=removedData.begin(); iter!=removedData.end(); ++iter)
291  {
292  int first = iter->first+removeCount;
293  int last = first + iter->second.count-1;
294  report(QString("Removed input frame [%1-%2]. Time diff=%3").arg(first).arg(last).arg(iter->second.err, 0, 'f', 1));
295  removeCount += iter->second.count;
296  }
297 
298  double removed = double(startFrames - mFileData.mFrames.size()) / double(startFrames);
299  if (removed > 0.02)
300  {
301  double percent = removed * 100;
302  if (percent > 1)
303  {
304  reportWarning("Removed " + QString::number(percent, 'f', 1) + "% of the "+ qstring_cast(startFrames) + " frames.");
305  }
306  else
307  {
308  report("Removed " + QString::number(percent, 'f', 1) + "% of the " + qstring_cast(startFrames) + " frames.");
309  }
310  }
311 }
312 
313 
314 double ReconstructPreprocessor::timeToPosition(unsigned i_frame, unsigned i_pos)
315 {
316  return fabs(mFileData.mFrames[i_frame].mTime - mFileData.mPositions[i_pos].mTime);
317 }
318 
322 std::vector<Vector3D> ReconstructPreprocessor::generateInputRectangle()
323 {
324  std::vector<Vector3D> retval(4);
325  vtkImageDataPtr mask = mFileData.getMask();
326  if (!mask)
327  {
328  reportError("Reconstructer::generateInputRectangle() + requires mask");
329  return retval;
330  }
331  Eigen::Array3i dims = mFileData.mUsRaw->getDimensions();
332  Vector3D spacing = mFileData.mUsRaw->getSpacing();
333 
334  Eigen::Array3i maskDims(mask->GetDimensions());
335 
336  if (( maskDims[0]<dims[0] )||( maskDims[1]<dims[1] ))
337  reportError(QString("input data (%1) and mask (%2) dim mimatch")
338  .arg(qstring_cast(dims))
339  .arg(qstring_cast(maskDims)));
340 
341  int xmin = maskDims[0];
342  int xmax = 0;
343  int ymin = maskDims[1];
344  int ymax = 0;
345 
346  unsigned char* ptr = static_cast<unsigned char*> (mask->GetScalarPointer());
347  for (int x = 0; x < maskDims[0]; x++)
348  for (int y = 0; y < maskDims[1]; y++)
349  {
350  unsigned char val = ptr[x + y * maskDims[0]];
351  if (val != 0)
352  {
353  xmin = std::min(xmin, x);
354  ymin = std::min(ymin, y);
355  xmax = std::max(xmax, x);
356  ymax = std::max(ymax, y);
357  }
358  }
359 
360  //Test: reduce the output volume by reducing the mask when determining
361  // output volume size
362  double red = mInput.mMaskReduce;
363  int reduceX = (xmax - xmin) * (red / 100);
364  int reduceY = (ymax - ymin) * (red / 100);
365 
366  xmin += reduceX;
367  xmax -= reduceX;
368  ymin += reduceY;
369  ymax -= reduceY;
370 
371  retval[0] = Vector3D(xmin * spacing[0], ymin * spacing[1], 0);
372  retval[1] = Vector3D(xmax * spacing[0], ymin * spacing[1], 0);
373  retval[2] = Vector3D(xmin * spacing[0], ymax * spacing[1], 0);
374  retval[3] = Vector3D(xmax * spacing[0], ymax * spacing[1], 0);
375 
376  return retval;
377 }
378 
385 Transform3D ReconstructPreprocessor::applyOutputOrientation()
386 {
387  Transform3D prMdd = Transform3D::Identity();
388 
389  if (mInput.mOrientation == "PatientReference")
390  {
391  // identity
392  }
393  else if (mInput.mOrientation == "MiddleFrame")
394  {
395  prMdd = mFileData.mFrames[mFileData.mFrames.size() / 2].mPos;
396  }
397  else
398  {
399  reportError("no orientation algorithm selected in reconstruction");
400  }
401 
402  // apply the selected orientation to the frames.
403  Transform3D ddMpr = prMdd.inv();
404  for (unsigned i = 0; i < mFileData.mFrames.size(); i++)
405  {
406  // mPos = prMu
407  mFileData.mFrames[i].mPos = ddMpr * mFileData.mFrames[i].mPos;
408  // mPos = ddMu
409  }
410 
411  return prMdd;
412 }
413 
423 void ReconstructPreprocessor::findExtentAndOutputTransform()
424 {
425  if (mFileData.mFrames.empty())
426  return;
427  // A first guess for d'Mu with correct orientation
428  Transform3D prMdd = this->applyOutputOrientation();
429  //mFrames[i].mPos = d'Mu, d' = only rotation
430 
431  // Find extent of all frames as a point cloud
432  std::vector<Vector3D> inputRect = this->generateInputRectangle();
433  std::vector<Vector3D> outputRect;
434  for (unsigned slice = 0; slice < mFileData.mFrames.size(); slice++)
435  {
436  Transform3D dMu = mFileData.mFrames[slice].mPos;
437  for (unsigned i = 0; i < inputRect.size(); i++)
438  {
439  outputRect.push_back(dMu.coord(inputRect[i]));
440  }
441  }
442 
444 
445  // Translate dMu to output volume origo
446  Transform3D T_origo = createTransformTranslate(extent.corner(0, 0, 0));
447  Transform3D prMd = prMdd * T_origo; // transform from output space to patref, use when storing volume.
448  for (unsigned i = 0; i < mFileData.mFrames.size(); i++)
449  {
450  mFileData.mFrames[i].mPos = T_origo.inv() * mFileData.mFrames[i].mPos;
451  }
452 
453  // Calculate optimal output image spacing and dimensions based on US frame spacing
454  double inputSpacing = std::min(mFileData.mUsRaw->getSpacing()[0], mFileData.mUsRaw->getSpacing()[1]);
455  mOutputVolumeParams = OutputVolumeParams(extent, inputSpacing, mInput.mMaxOutputVolumeSize);
456 
457  mOutputVolumeParams.set_rMd((mPatientModelService->get_rMpr()) * prMd);
458 }
459 
463 void ReconstructPreprocessor::updateFromOriginalFileData()
464 {
465  // uncomment to test cropping of data before reconstructing
466  this->cropInputData();
467 
468  // Only use this if the time stamps have different formats
469  // The function assumes that both lists of time stamps start at the same time,
470  // and that is not completely correct.
471  //this->calibrateTimeStamps();
472  // Use the time calibration from the acquisition module
473  //this->calibrateTimeStamps(0.0, 1.0);
474  this->applyTimeCalibration();
475 
476  // Smooth tracking data before further processing
477  // User preferences apply
478  //this->positionThinning(); //Do thinning before filtering if enabled
479  this->filterPositions();
480 
482  //mPos (in mPositions) is now prMu
483 
484  this->interpolatePositions();
485  // mFrames: now mPos as prMu
486 
487  if (mFileData.mFrames.empty()) // if all positions are filtered out
488  return;
489 
490  this->findExtentAndOutputTransform();
491  //mPos in mFrames is now dMu
492 }
493 
495 {
496  mInput = input;
497  mFileData = fileData;
498  this->updateFromOriginalFileData();
499 }
500 
501 
502 } /* namespace cx */
QString qstring_cast(const T &val)
Transform3D quaternionToMatrix(Eigen::ArrayXd qArray)
Definition: cxMathUtils.cpp:47
void reportError(QString msg)
Definition: cxLogger.cpp:71
Vector3D corner(int x, int y, int z) const
#define CX_ASSERT(statement)
Definition: cxLogger.h:116
Transform3D Transform3D
Transform3D is a representation of an affine 3D transform.
std::vector< TimedPosition > mFrames
Helper struct for sending and controlling output volume properties.
static void transformTrackingPositionsTo_prMu(USReconstructInputData *data)
Eigen::ArrayXd matrixToQuaternion(Transform3D Tx)
Definition: cxMathUtils.cpp:34
static DoubleBoundingBox3D fromCloud(std::vector< Vector3D > cloud)
Vector3D getOrigin_p() const
void setOrigin_p(Vector3D origin_p)
ReconstructPreprocessor(PatientModelServicePtr patientModelService)
void reportWarning(QString msg)
Definition: cxLogger.cpp:70
void setClipRect_p(DoubleBoundingBox3D clipRect_p)
boost::shared_ptr< class PatientModelService > PatientModelServicePtr
Transform3D createTransformTranslate(const Vector3D &translation)
std::vector< ProcessedUSInputDataPtr > createProcessedInput(std::vector< bool > angio)
Representation of an integer bounding box in 3D. The data are stored as {xmin,xmax,ymin,ymax,zmin,zmax}, in order to simplify communication with vtk.
Representation of a floating-point bounding box in 3D. The data are stored as {xmin,xmax,ymin,ymax,zmin,zmax}, in order to simplify communication with vtk.
std::vector< TimedPosition > mPositions
ProbeDefinition mData
Definition: cxProbeSector.h:54
Eigen::Vector3d Vector3D
Vector3D is a representation of a point or vector in 3D.
Definition: cxVector3D.h:42
Definition of characteristics for an Ultrasound Probe Sector.
Eigen::Vector3i corner(int x, int y, int z) const
void report(QString msg)
Definition: cxLogger.cpp:69
bool similar(const CameraInfo &lhs, const CameraInfo &rhs, double tol)
DoubleBoundingBox3D getClipRect_p() const
static Transform3D slerpInterpolate(const Transform3D &a, const Transform3D &b, double t)
QString mFilename
filename used for current data read
boost::shared_ptr< class ProcessedUSInputData > ProcessedUSInputDataPtr
vtkSmartPointer< class vtkImageData > vtkImageDataPtr
USFrameDataPtr mUsRaw
All imported US data frames with pointers to each frame.
void initialize(ReconstructCore::InputParams input, USReconstructInputData fileData)
void setSize(QSize size)
Algorithm part of reconstruction - no dependencies on parameter classes.
void setData(ProbeDefinition data)
Namespace for all CustusX production code.