Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
102 changes: 70 additions & 32 deletions merlin/analysis/decode.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,16 @@ def get_codebook(self) -> Codebook:
self.parameters['preprocess_task'])
return preprocessTask.get_codebook()

def _get_decoder(self, codebook, scaleFactors, backgrounds):
decoder = decoding.PixelBasedDecoder(codebook)
return [decoder,
lambda x: decoder.decode_pixels(
x, scaleFactors, backgrounds,
lowPassSigma=
self.parameters['lowpass_sigma'],
distanceThreshold=
self.parameters['distance_threshold'])]

def _run_analysis(self, fragmentIndex):
"""This function decodes the barcodes in a fov and saves them to the
barcode database.
Expand All @@ -102,9 +112,11 @@ def _run_analysis(self, fragmentIndex):
lowPassSigma = self.parameters['lowpass_sigma']

codebook = self.get_codebook()
decoder = decoding.PixelBasedDecoder(codebook)
scaleFactors = optimizeTask.get_scale_factors()
backgrounds = optimizeTask.get_backgrounds()
[decoder, decodeFn] = self._get_decoder(codebook,
scaleFactors,
backgrounds)
chromaticCorrector = optimizeTask.get_chromatic_corrector()

zPositionCount = len(self.dataSet.get_z_positions())
Expand All @@ -118,9 +130,8 @@ def _run_analysis(self, fragmentIndex):
if not decode3d:
for zIndex in range(zPositionCount):
di, pm, d = self._process_independent_z_slice(
fragmentIndex, zIndex, chromaticCorrector, scaleFactors,
backgrounds, preprocessTask, decoder
)
fragmentIndex, zIndex, chromaticCorrector,
preprocessTask, decoder, decodeFn)

decodedImages[zIndex, :, :] = di
magnitudeImages[zIndex, :, :] = pm
Expand All @@ -145,10 +156,7 @@ def _run_analysis(self, fragmentIndex):
(imageSet.shape[0], imageSet.shape[-2],
imageSet.shape[-1]))

di, pm, npt, d = decoder.decode_pixels(
imageSet, scaleFactors, backgrounds,
lowPassSigma=lowPassSigma,
distanceThreshold=self.parameters['distance_threshold'])
di, pm, npt, d = decodeFn(imageSet)

normalizedPixelTraces[zIndex, :, :, :] = npt
decodedImages[zIndex, :, :] = di
Expand All @@ -174,20 +182,17 @@ def _run_analysis(self, fragmentIndex):
bcDB.empty_database(fragmentIndex)
bcDB.write_barcodes(bc, fov=fragmentIndex)


def _process_independent_z_slice(
self, fov: int, zIndex: int, chromaticCorrector, scaleFactors,
backgrounds, preprocessTask, decoder):
self, fov: int, zIndex: int, chromaticCorrector, preprocessTask,
decoder, decoderFn):

imageSet = preprocessTask.get_processed_image_set(
fov, zIndex, chromaticCorrector)
imageSet = imageSet.reshape(
(imageSet.shape[0], imageSet.shape[-2], imageSet.shape[-1]))

di, pm, npt, d = decoder.decode_pixels(
imageSet, scaleFactors, backgrounds,
lowPassSigma=self.parameters['lowpass_sigma'],
distanceThreshold=self.parameters['distance_threshold'])
di, pm, npt, d = decoderFn(imageSet)

self._extract_and_save_barcodes(
decoder, di, pm, npt, d, fov, zIndex)

Expand All @@ -197,25 +202,26 @@ def _save_decoded_images(self, fov: int, zPositionCount: int,
decodedImages: np.ndarray,
magnitudeImages: np.ndarray,
distanceImages: np.ndarray) -> None:
imageDescription = self.dataSet.analysis_tiff_description(
zPositionCount, 3)
with self.dataSet.writer_for_analysis_images(
self, 'decoded', fov) as outputTif:
for i in range(zPositionCount):
outputTif.save(decodedImages[i].astype(np.float32),
photometric='MINISBLACK',
metadata=imageDescription)
outputTif.save(magnitudeImages[i].astype(np.float32),
photometric='MINISBLACK',
metadata=imageDescription)
outputTif.save(distanceImages[i].astype(np.float32),
photometric='MINISBLACK',
metadata=imageDescription)
imageDescription = self.dataSet.analysis_tiff_description(
zPositionCount, 3)
with self.dataSet.writer_for_analysis_images(
self, 'decoded', fov) as outputTif:
for i in range(zPositionCount):
outputTif.save(decodedImages[i].astype(np.float32),
photometric='MINISBLACK',
metadata=imageDescription)
outputTif.save(magnitudeImages[i].astype(np.float32),
photometric='MINISBLACK',
metadata=imageDescription)
outputTif.save(distanceImages[i].astype(np.float32),
photometric='MINISBLACK',
metadata=imageDescription)

def _extract_and_save_barcodes(
self, decoder: decoding.PixelBasedDecoder, decodedImage: np.ndarray,
pixelMagnitudes: np.ndarray, pixelTraces: np.ndarray,
distances: np.ndarray, fov: int, zIndex: int=None) -> None:
self, decoder: decoding.PixelBasedDecoder,
decodedImage: np.ndarray, pixelMagnitudes: np.ndarray,
pixelTraces: np.ndarray, distances: np.ndarray,
fov: int, zIndex: int = None) -> None:

globalTask = self.dataSet.load_analysis_task(
self.parameters['global_align_task'])
Expand All @@ -235,3 +241,35 @@ def _remove_z_duplicate_barcodes(self, bc):
self.parameters['z_duplicate_xy_pixel_threshold'],
self.dataSet.get_z_positions())
return bc


class DecodeSNB(Decode):

"""
This variant is designed for shot noise based analysis.

A significance_threshold value of at least 5.0 is
recommended. A value of 3.0 or 4.0 will return lots of
false positives.
"""

def __init__(self, dataSet: dataset.MERFISHDataSet,
parameters=None, analysisName=None):
super().__init__(dataSet, parameters, analysisName)

if 'lowpass_sigma' not in parameters:
self.parameters['lowpass_sigma'] = -1
if 'significance_threshold' not in self.parameters:
self.parameters['significance_threshold'] = 6

def _get_decoder(self, codebook, scaleFactors, backgrounds):
decoder = decoding.PixelBasedDecoderSNB(codebook)
return [decoder,
lambda x:decoder.decode_pixels(
x,
significanceThreshold=
self.parameters['significance_threshold'],
lowPassSigma=
self.parameters['lowpass_sigma'],
distanceThreshold=
self.parameters['distance_threshold'])]
78 changes: 72 additions & 6 deletions merlin/analysis/optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,9 +332,10 @@ def get_scale_factors(self) -> np.ndarray:
# Don't rescale bits that were never seen
refactors[refactors == 0] = 1

previousFactors = np.array([self.dataSet.load_numpy_analysis_result(
'previous_scale_factors', self.analysisName, resultIndex=i)
for i in range(self.parameters['fov_per_iteration'])])
previousFactors = np.array(
[self.dataSet.load_numpy_analysis_result(
'previous_scale_factors', self.analysisName, resultIndex=i)
for i in range(self.parameters['fov_per_iteration'])])

scaleFactors = np.nanmedian(
np.multiply(refactors, previousFactors), axis=0)
Expand Down Expand Up @@ -364,9 +365,10 @@ def get_backgrounds(self) -> np.ndarray:
'previous_backgrounds', self.analysisName, resultIndex=i)
for i in range(self.parameters['fov_per_iteration'])])

previousFactors = np.array([self.dataSet.load_numpy_analysis_result(
'previous_scale_factors', self.analysisName, resultIndex=i)
for i in range(self.parameters['fov_per_iteration'])])
previousFactors = np.array(
[self.dataSet.load_numpy_analysis_result(
'previous_scale_factors', self.analysisName, resultIndex=i)
for i in range(self.parameters['fov_per_iteration'])])

backgrounds = np.nanmedian(np.add(
previousBackgrounds, np.multiply(refactors, previousFactors)),
Expand Down Expand Up @@ -414,3 +416,67 @@ def get_barcode_count_history(self) -> np.ndarray:
self.parameters['previous_iteration']
).get_barcode_count_history()
return np.append(previousHistory, [countsMean], axis=0)


class OptimizeIterationChromaticCorrection(OptimizeIteration):

"""
This variant only optimizes the chromatic correction. It is
used in the shot noise based analysis pathway where optimizing
scale factors and background offsets isn't relevant.
"""

def __init__(self, dataSet, parameters=None, analysisName=None):
super().__init__(dataSet, parameters, analysisName)

if 'significance_threshold' not in self.parameters:
self.parameters['significance_threshold'] = 10

def get_backgrounds(self) -> np.ndarray:
return None

def get_scale_factors(self) -> np.ndarray:
return None

def _run_analysis(self, fragmentIndex):
if not self.parameters['optimize_chromatic_correction']:
return

preprocessTask = self.dataSet.load_analysis_task(
self.parameters['preprocess_task'])
codebook = self.get_codebook()

fovIndex, zIndex = self.parameters['fov_index'][fragmentIndex]

chromaticTransformations = \
self._get_previous_chromatic_transformations()

self.dataSet.save_pickle_analysis_result(
chromaticTransformations, 'previous_chromatic_corrections',
self.analysisName, resultIndex=fragmentIndex)
self.dataSet.save_numpy_analysis_result(
np.array([fovIndex, zIndex]), 'select_frame', self.analysisName,
resultIndex=fragmentIndex)

chromaticCorrector = aberration.RigidChromaticCorrector(
chromaticTransformations, self.get_reference_color())
warpedImages = preprocessTask.get_processed_image_set(
fovIndex, zIndex=zIndex, chromaticCorrector=chromaticCorrector)

decoder = decoding.PixelBasedDecoderSNB(codebook)
areaThreshold = self.parameters['area_threshold']
decoder.refactorAreaThreshold = areaThreshold
di, pm, npt, d = \
decoder.decode_pixels(warpedImages,
significanceThreshold=
self.parameters['significance_threshold'])

# TODO this saves the barcodes under fragment instead of fov
# the barcodedb should be made more general
cropWidth = self.parameters['crop_width']
self.get_barcode_database().write_barcodes(
pandas.concat([decoder.extract_barcodes_with_index(
i, di, pm, npt, d, fovIndex, cropWidth,
zIndex, minimumArea=areaThreshold)
for i in range(codebook.get_barcode_count())]),
fov=fragmentIndex)
Loading