diff --git a/CHANGES.rst b/CHANGES.rst index 48d9eda8..094c6404 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,7 +1,10 @@ 1.5.3 (unreleased) ================== -- +jump +---- +- Fix the code to at least always flag the group with the shower and the requested + groups after the primary shower. [#237] 1.5.2 (2023-12-13) ================== diff --git a/src/stcal/jump/jump.py b/src/stcal/jump/jump.py index d6aeb3ad..01afaa2d 100644 --- a/src/stcal/jump/jump.py +++ b/src/stcal/jump/jump.py @@ -1,11 +1,13 @@ import logging import multiprocessing import time - -import cv2 as cv import numpy as np -from astropy import stats -from astropy.convolution import Ring2DKernel, convolve +import cv2 as cv +import astropy.stats as stats + +from astropy.convolution import Ring2DKernel +from astropy.convolution import convolve + from . import constants from . import twopoint_difference as twopt @@ -285,7 +287,7 @@ def detect_jumps( dqflags['SATURATED'] # This is the flag that controls the flagging of snowballs. if expand_large_events: - total_snowballs = flag_large_events( + gdq, total_snowballs = flag_large_events( gdq, jump_flag, sat_flag, @@ -443,7 +445,7 @@ def detect_jumps( # This is the flag that controls the flagging of snowballs. if expand_large_events: - total_snowballs = flag_large_events( + gdq, total_snowballs = flag_large_events( gdq, jump_flag, sat_flag, @@ -485,7 +487,6 @@ def detect_jumps( data /= gain_2d err /= gain_2d readnoise_2d /= gain_2d - # Return the updated data quality arrays return gdq, pdq, total_primary_crs, number_extended_events, stddev @@ -592,9 +593,10 @@ def flag_large_events( sat_flag, jump_flag, expansion=expand_factor, + num_grps_masked=0, max_extended_radius=max_extended_radius, ) - return total_snowballs + return gdq, total_snowballs def extend_saturation( @@ -645,7 +647,8 @@ def extend_ellipses( # For a given DQ plane it will use the list of ellipses to create # expanded ellipses of pixels with # the jump flag set. - plane = gdq_cube[intg, grp, :, :] + out_gdq_cube = gdq_cube.copy() + plane = gdq_cube[intg, grp, :, :].copy() ncols = plane.shape[1] nrows = plane.shape[0] image = np.zeros(shape=(nrows, ncols, 3), dtype=np.uint8) @@ -685,15 +688,38 @@ def extend_ellipses( ) jump_ellipse = image[:, :, 2] ngrps = gdq_cube.shape[1] - last_grp = min(grp + num_grps_masked, ngrps) + last_grp = find_last_grp(grp, ngrps, num_grps_masked) # This loop will flag the number of groups for flg_grp in range(grp, last_grp): sat_pix = np.bitwise_and(gdq_cube[intg, flg_grp, :, :], sat_flag) saty, satx = np.where(sat_pix == sat_flag) jump_ellipse[saty, satx] = 0 - gdq_cube[intg, flg_grp, :, :] = np.bitwise_or(gdq_cube[intg, flg_grp, :, :], jump_ellipse) - return gdq_cube, num_ellipses + out_gdq_cube[intg, flg_grp, :, :] = np.bitwise_or(gdq_cube[intg, flg_grp, :, :], jump_ellipse) + diff_cube = out_gdq_cube - gdq_cube + return out_gdq_cube, num_ellipses + +def find_last_grp(grp, ngrps, num_grps_masked): + """ + Parameters + ---------- + grp : int + The location of the shower + + ngrps : int + The number of groups in the integration + + num_grps_masked : int + The requested number of groups to be flagged after the shower + Returns + ------- + last_grp : int + The index of the last group to flag for the shower + + """ + num_grps_masked += 1 + last_grp = min(grp + num_grps_masked, ngrps) + return last_grp def find_circles(dqplane, bitmask, min_area): # Using an input DQ plane this routine will find the groups of pixels with at least the minimum @@ -848,7 +874,7 @@ def find_faint_extended( Total number of showers detected. """ - read_noise_2 = readnoise_2d**2 + read_noise_2d_sqr = readnoise_2d**2 data = indata.copy() data[gdq == sat_flag] = np.nan data[gdq == 1] = np.nan @@ -863,12 +889,11 @@ def find_faint_extended( # calculate sigma for each pixel if nints <= minimum_sigclip_groups: median_diffs = np.nanmedian(first_diffs_masked[intg], axis=0) - sigma = np.sqrt(np.abs(median_diffs) + read_noise_2 / nframes) + sigma = np.sqrt(np.abs(median_diffs) + read_noise_2d_sqr / nframes) # The difference from the median difference for each group e_jump = first_diffs_masked[intg] - median_diffs[np.newaxis, :, :] # SNR ratio of each diff. ratio = np.abs(e_jump) / sigma[np.newaxis, :, :] - # The convolution kernel creation ring_2D_kernel = Ring2DKernel(inner, outer) ngrps = data.shape[1] @@ -895,6 +920,7 @@ def find_faint_extended( extended_emission = np.zeros(shape=(nrows, ncols), dtype=np.uint8) exty, extx = np.where(masked_smoothed_ratio > snr_threshold) extended_emission[exty, extx] = 1 + # find the contours of the extended emission contours, hierarchy = cv.findContours(extended_emission, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) # get the contours that are above the minimum size @@ -902,7 +928,6 @@ def find_faint_extended( # get the minimum enclosing rectangle which is the same as the # minimum enclosing ellipse ellipses = [cv.minAreaRect(con) for con in bigcontours] - expand_by_ratio = True expansion = 1.0 plane = gdq[intg, grp, :, :] @@ -947,6 +972,7 @@ def find_faint_extended( if len(ellipses) > 0: # add all the showers for this integration to the list all_ellipses.append([intg, grp, ellipses]) + total_showers = 0 if all_ellipses: # Now we actually do the flagging of the pixels inside showers. # This is deferred until all showers are detected. because the showers @@ -956,6 +982,7 @@ def find_faint_extended( intg = showers[0] grp = showers[1] ellipses = showers[2] + total_showers += len(ellipses) gdq, num = extend_ellipses( gdq, intg, @@ -966,9 +993,9 @@ def find_faint_extended( expansion=ellipse_expand, expand_by_ratio=True, num_grps_masked=num_grps_masked, - max_extended_radius=max_extended_radius, + max_extended_radius=max_extended_radius ) - return gdq, len(all_ellipses) + return gdq, total_showers def calc_num_slices(n_rows, max_cores, max_available): diff --git a/tests/test_jump.py b/tests/test_jump.py index a85eef51..9443c949 100644 --- a/tests/test_jump.py +++ b/tests/test_jump.py @@ -9,6 +9,7 @@ flag_large_events, point_inside_ellipse, detect_jumps, + find_last_grp ) DQFLAGS = {"JUMP_DET": 4, "SATURATED": 2, "DO_NOT_USE": 1, "GOOD": 0, "NO_GAIN_VALUE": 8} @@ -220,7 +221,7 @@ def test_flag_large_events_withsnowball(): cube[0, 2, 5, 1:6] = DQFLAGS["JUMP_DET"] cube[0, 2, 1:6, 1] = DQFLAGS["JUMP_DET"] cube[0, 2, 1:6, 5] = DQFLAGS["JUMP_DET"] - flag_large_events( + cube, total_snowballs = flag_large_events( cube, DQFLAGS["JUMP_DET"], DQFLAGS["SATURATED"], @@ -253,7 +254,7 @@ def test_flag_large_events_groupedsnowball(): cube[0, 2, 5, 1:6] = DQFLAGS["JUMP_DET"] cube[0, 2, 1:6, 1] = DQFLAGS["JUMP_DET"] cube[0, 2, 1:6, 5] = DQFLAGS["JUMP_DET"] - flag_large_events( + cube, total_snowballs = flag_large_events( cube, DQFLAGS["JUMP_DET"], DQFLAGS["SATURATED"], @@ -284,7 +285,7 @@ def test_flag_large_events_withsnowball_noextension(): cube[0, 2, 5, 1:6] = DQFLAGS["JUMP_DET"] cube[0, 2, 1:6, 1] = DQFLAGS["JUMP_DET"] cube[0, 2, 1:6, 5] = DQFLAGS["JUMP_DET"] - flag_large_events( + cube, num_snowballs = flag_large_events( cube, DQFLAGS["JUMP_DET"], DQFLAGS["SATURATED"], @@ -446,3 +447,15 @@ def test_calc_num_slices(): assert calc_num_slices(n_rows, "3/4", max_available_cores) == 1 n_rows = 9 assert calc_num_slices(n_rows, "21", max_available_cores) == 9 + + +def test_find_last_grp(): + assert (find_last_grp(grp=5, ngrps=7, num_grps_masked=0) == 6) + assert (find_last_grp(grp=5, ngrps=7, num_grps_masked=2) == 7) + assert (find_last_grp(grp=5, ngrps=7, num_grps_masked=3) == 7) + assert (find_last_grp(grp=5, ngrps=6, num_grps_masked=1) == 6) + assert (find_last_grp(grp=5, ngrps=6, num_grps_masked=0) == 6) + assert (find_last_grp(grp=5, ngrps=6, num_grps_masked=2) == 6) + assert (find_last_grp(grp=5, ngrps=8, num_grps_masked=0) == 6) + assert (find_last_grp(grp=5, ngrps=8, num_grps_masked=1) == 7) + assert (find_last_grp(grp=5, ngrps=8, num_grps_masked=2) == 8)