2D Fusion

<< Click to Display Table of Contents >>

Navigation:  XStream® HDVR® SDK > Advanced Functionality >

2D Fusion

Previous pageReturn to chapter overviewNext page

C++ C++ Java Java .NET .NET

Summary

 

When displaying volume data, it may be beneficial to merge the visualizations of two different datasets into a single image. A use case example is the fusion of PET and CT datasets to better visualize tumor locations in the body. The XStream® HDVR® SDK provides utility functions to support rendering images from separate datasets, and blending images from these datasets into a fused image. This feature is referred to as 2D Fusion because it produces a 2D image from separate 3D datasets. The datasets to be used for the fused image do not need to be registered, have the same voxel size, or the same initial orientation.

 

FusionCT_MPR

CT MPR Image

FusionPET

PET Image

2D_Fusion

PET/CT Fusion

 

 

 

 

2D Fusion

 

The first step in producing a 2D Fusion image is to load each of the two separate datasets. The rendering parameters for these two datasets must be configured so that the view position and relative orientation is the same in each. This is to ensure that the object of study is being viewed from the same perspective. Appropriate transfer functions should be set for each dataset to highlight the features of interest.

 

// Load and apply data from dataset 1.

pServer->LoadDicomDirectory(&pVolumePET, "/Datasets/PET_CT/PET");

pServer->CreateOctree(&pOctreePET, pVolumePET);

pEngineCT->SetVolumeData(pVolumePET, pOctreePET);

 

// Load and apply a preset file to dataset 1.

pPresetUtils->ReadParamsFromXml("/Presets/set_pt_fusion_heat_map.xml", &rpPET);

pEngine->SetRenderParams(&rpPET);

 

// Load and apply data from dataset 2.

pServer->LoadDicomDirectory(&pVolumeCT, "/Datasets/PET_CT/CT");

pServer->CreateOctree(&pOctreeCT, pVolumeCT);

pEnginePET->SetVolumeData(pVolumeCT, pOctreeCT);

 

// Load and apply a preset file to dataset 2.

pPresetUtils->ReadParamsFromXml("/Presets/set_ct_fusion_mip.xml", &rpCT);

pEngine->SetRenderParams(&rpCT);

 

// Load and apply data from dataset 1

volumePET = server.loadDicomDirectory("/Datasets/PET_CT/PET");

octreePET = server.createOctree(volumePET.getId());

enginePET.setVolumeData(volumePET, octreePET);

 

// Load and apply a preset file to dataset 1.

rpPET.loadFromXML("/Presets/set_pt_fusion_heat_map.xml");

engine.setRenderParams(rpPET);

 

// Load and apply data from dataset 2

volumeCT = server.loadDicomDirectory("/Datasets/PET_CT/CT");

octreeCT = server.createOctree(volumeCT.getId());

engineCT.setVolumeData(volumeCT, octreeCT);

 

// Load and apply a preset file to dataset 2.

rr = rpCT.loadFromXML("/Presets/set_ct_fusion_mip.xml");

engine.setRenderParams(rpPET);

 

The second step is to calculate an appropriate value for the RENDER_PARAMS.Zoom field to be applied to the second image, relative to the first. This is to ensure that the number of rendered pixels per voxel is the same in both images. The viewpoint position and orientation should be the same for each image. The result will be two images with the same apparent voxel size, regardless of the actual original dataset voxel size.

 

// Get volume data params for alignment calculations

VOLUME_DATA_PARAMS vdpPET, vdpCT;

pVolumePET->GetVolumeDataParams(&vdpPET);

pVolumeCT->GetVolumeDataParams(&vdpCT);

 

// Calculate appropriate zoom value so two images have the same relative size.

C3DHelpers helpers;

double srcPixPerMilli;

double targetZoom;

 

// Match transform and zooms so the CT and PET images line up.

helpers.GetPixelsPerMillimeter(&srcPixPerMilli, vdp1.Spacing.x, rpPET.Zoom);

helpers.GetZoomForPixelsPerMillimeter(&targetZoom, vdp2.Spacing.x, srcPixPerMilli);

rpCT.Zoom = (float)targetZoom;

 

// Transform values must be equal for images to line up correctly.

rpCT.Transform = rpPET.Transform;

 

// Apply the modified RENDER_PARAMS to the engine.

rr = pEngineCT->SetRenderParams(&rpCT);

 

// Match transform and zooms so the CT and PET images line up.

VOLUME_DATA_PARAMS vdpPET = new VOLUME_DATA_PARAMS();

VOLUME_DATA_PARAMS vdpCT = new VOLUME_DATA_PARAMS();

vdpPET = volumePET.getVolumeDataParams();

vdpCT = volumeCT.getVolumeDataParams();

 

// Calculate appropriate zoom value so two images have the same relative size.

double srcPixPerMilli = hdrc3DHelpers.getPixelsPerMillimeter(vdpPET.Spacing.x, rpPET.Zoom); ;

double targetZoom = hdrc3DHelpers.getZoomForPixelsPerMillimeter(vdpCT.Spacing.x, srcPixPerMilli);

rpCT.Zoom = (float)targetZoom;

 

// Transform values must be equal for images to line up correctly.

rpCT.Transform = rpPET.Transform;

 

// Apply the loaded RENDER_PARAMS to the engine.

engine.setRenderParams(rpCT);

 

The final step is to render an image from each of the separate datasets, and then blend the two images into a single fused image.

 

VOLVISIMAGE imReq,imRes;

BCOZEROMEMORY(imReq);

BCOZEROMEMORY(imRes);

 

// Render an interactive quality image first.

imReq.Stage = RENDER_STAGE_PROGR0;

pEngineCT->Render(&imReq, &imRes);

 

// Render a final quality image.

imReq.Stage = RENDER_STAGE_FINAL;

pEngineCT->Render(&imReq, &imRes);

 

// Allocate an array for the fusion image.

unsigned char *pFusionImage = new unsigned char[imRes.DataSize];

 

// Copy the CT image to the fusion buffer.

memcpy(pFusionImage, imRes.Data, imRes.DataSize);

 

// Render an interactive quality image first.

pEnginePET->Render(&imReq, &imRes);

 

// Render a final quality image.

imReq.Stage = RENDER_STAGE_FINAL;

pEnginePET->Render(&imReq, &imRes);

 

// Perform an additive blend on the CT and PET images and copy to the fusion buffer.

for(int i=0; i < imRes.DataSize; i++) {

   int val0 = (pFusionImage[i] < 0) ? ((int)pFusionImage[i])+256 : pFusionImage[i];

   int val1 = (imRes.Data[i] < 0) ? ((int)imRes.Data[i])+256 : imRes.Data[i];

                 

   if(val0 + val1 < 255)

      pFusionImage[i] = val0 + val1;

   else

      pFusionImage[i] = 255;

}

 

VOLVISIMAGE imReq = new VOLVISIMAGE();

VOLVISIMAGE imRes = new VOLVISIMAGE();

 

// Render an interactive quality image first.

imReq.Stage = hdrcDefines.RENDER_STAGE_PROGR0;

engineCT.render(imReq, imRes);

 

// Render a final quality image.

imReq.Stage = hdrcDefines.RENDER_STAGE_FINAL;

engineCT.render(imReq, imRes);

 

// Allocate an array for the fusion image.

byte[] fusionImage = new byte[imRes.DataSize];

 

// Copy the CT image to the fusion buffer.

fusionImage = Arrays.copyOf(imRes.Data, imRes.DataSize);

 

// Render an interactive quality image first.

imReq.Stage = hdrcDefines.RENDER_STAGE_PROGR0;

enginePT.render(imReq, imRes);

 

// Render a final quality image.

imReq.Stage = hdrcDefines.RENDER_STAGE_FINAL;

enginePT.render(imReq, imRes);

 

// Perform an additive blend on the CT and PET images and copy to the fusion buffer.

for(int i=0; i < imRes.DataSize; i++) {

   int val0 = (fusionImage[i] < 0) ? ((int)fusionImage[i])+256 : fusionImage[i];

   int val1 = (imRes.Data[i] < 0) ? ((int)imRes.Data[i])+256 : imRes.Data[i];

                 

   if(val0 + val1 < 255)

      fusionImage[i] = (byte)(val0 + val1);

   else

      fusionImage[i] = 255;

}

 

VOLVISIMAGE imReq = new VOLVISIMAGE();

VOLVISIMAGE imRes = new VOLVISIMAGE();

 

// Render an interactive quality image first.

imReq.Stage = hdrcDefines.__Fields.RENDER_STAGE_PROGR0;

engineCT.render(imReq, imRes);

 

// Render a final quality image.

imReq.Stage = hdrcDefines.__Fields.RENDER_STAGE_FINAL;

engineCT.render(imReq, imRes);

 

// Allocate an array for the fusion image.

byte[] fusionImage = new byte[imRes.DataSize];

 

// Copy the CT image to the fusion buffer.

Array.Copy(imRes.Data, fusionImage, imRes.DataSize);

 

// Render an interactive quality image first.

imReq.Stage = hdrcDefines.__Fields.RENDER_STAGE_PROGR0;

enginePT.render(imReq, imRes);

 

// Render a final quality image.

imReq.Stage = hdrcDefines.__Fields.RENDER_STAGE_FINAL;

enginePT.render(imReq, imRes);

 

// Perform an additive blend on the CT and PET images and copy to the fusion buffer.

for(int i=0; i < imRes.DataSize; i++) {

   int val0 = (fusionImage[i] < 0) ? ((int)fusionImage[i])+256 : fusionImage[i];

   int val1 = (imRes.Data[i] < 0) ? ((int)imRes.Data[i])+256 : imRes.Data[i];

                 

   if(val0 + val1 < 255)

      fusionImage[i] = (byte)(val0 + val1);

   else

      fusionImage[i] = 255;

}