Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

tickets/SP-1600: updates to pass ruff checks #429

Merged
merged 4 commits into from
Jan 16, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions rubin_sim/maf/batches/science_radar_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -570,9 +570,6 @@ def science_radar_batch(
sqlconstraint = "night <= %s" % (yr_cut * 365.25 + 0.5)
sqlconstraint += ' and scheduler_note not like "DD%"'
info_label = f"{bandpass} band non-DD year {yr_cut}"
ThreebyTwoSummary_simple = metrics.StaticProbesFoMEmulatorMetricSimple(
nside=nside, year=yr_cut, metric_name="3x2ptFoM_simple"
)
ThreebyTwoSummary = maf.StaticProbesFoMEmulatorMetric(nside=nside, metric_name="3x2ptFoM")

m = metrics.ExgalM5WithCuts(
Expand Down
18 changes: 10 additions & 8 deletions rubin_sim/maf/maf_contrib/star_counts/coords.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
# Two different functions are present that do the conversion, and a third
# that uses ephem package, for redundancy purposes.
# For use with Field Star Count metric


import astropy.units as u
import numpy as np
from astropy.coordinates import SkyCoord
from scipy.optimize import fsolve

rad1 = np.radians(282.25)
Expand Down Expand Up @@ -76,12 +76,14 @@ def eq_gal2(eq_ra, eq_dec):


def eq_gal3(eq_ra, eq_dec):
coordset = ephem.Equatorial(np.radians(eq_ra), np.radians(eq_dec), epoch="2000")
g = ephem.Galactic(coordset)
templon, templat = float(g.lon), float(g.lat)
l_deg = np.degrees(templon)
b_deg = np.degrees(templat)
return b_deg, l_deg
# assume input ra, dec are in deg
coordset = SkyCoord(
ra=np.radians(eq_ra) * u.radians, dec=np.rad(eq_dec) * u.rad, frame="icrs", unit="deg"
)
# convert to galactic
galactic = coordset.galactic
# return b,l in deg
return galactic.b.value, galactic.l.values


def gal_cyn(b_deg, l_deg, dist):
Expand Down
13 changes: 7 additions & 6 deletions rubin_sim/maf/maf_contrib/star_counts/starcount_bymass.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,13 @@ def noise_calc(band):
m5 = {"u": 23.9, "g": 25.0, "r": 24.7, "i": 24.0, "z": 23.3, "y": 22.1}
sigma = 0.03
sigma_sys = 0.005
fun = (
lambda x: sigma_sys**2
+ (0.04 - gamma[band]) * 10 ** (0.4 * (x - m5[band]))
+ gamma[band] * 10 ** (0.8 * (x - m5[band]))
- sigma**2
)

def fun(x):
sigma_sys**2
+(0.04 - gamma[band]) * 10 ** (0.4 * (x - m5[band]))
+gamma[band] * 10 ** (0.8 * (x - m5[band]))
-(sigma**2)

return newton(fun, 25)


Expand Down
22 changes: 13 additions & 9 deletions rubin_sim/maf/metrics/star_density.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,13 @@ class StarDensityMetric(BaseMetric):
"""Interpolate the stellar luminosity function to return the number of
stars per square arcsecond brighter than the mag_limit.
Note that the map is built from CatSim stars in the range 20 < r < 28.
mag_limit values outside that the range of the map's starMapBins will return self.badval
mag_limit values outside that the range of the map's starMapBins will
return self.badval

The stellar density maps are available in any bandpass, but bandpasses other
than r band must use a pre-configured StellarDensityMap (not just the default).
In other words, when setting up the metric bundle for an i-band stellar density
using (as an example) a HealpixSlicer:
The stellar density maps are available in any bandpass, but bandpasses
other than r band must use a pre-configured StellarDensityMap (not just the
default). In other words, when setting up the metric bundle for an i-band
stellar density using (as an example) a HealpixSlicer:
```
map = maf.StellarDensityMap(filtername='i')
metric = maf.StarDensityMetric(filtername='i', mag_limit=25.0)
Expand All @@ -31,8 +32,9 @@ class StarDensityMetric(BaseMetric):
Returns number of stars per square arcsecond brighter than this limit.
Default 25.
filtername : `str`, opt
Which filter to evaluate the luminosity function in; Note that using bands other than r
will require setting up a custom (rather than default) version of the stellar density map.
Which filter to evaluate the luminosity function in; Note that using
bands other than r will require setting up a custom (rather than
default) version of the stellar density map.
Default r.
units : `str`, opt
Units for the output values. Default "stars/sq arcsec".
Expand All @@ -42,7 +44,8 @@ class StarDensityMetric(BaseMetric):
Returns
-------
result : `float`
Number of stars brighter than mag_limit in filtername, based on the stellar density map.
Number of stars brighter than mag_limit in filtername, based on the
stellar density map.
"""

def __init__(
Expand All @@ -68,6 +71,7 @@ def run(self, data_slice, slice_point=None):
try:
result = interp(self.mag_limit) / (3600.0**2)
except ValueError:
# This probably means the interpolation went out of range (magLimit <15 or >28)
# This probably means the interpolation went out of range
# (magLimit <15 or >28)
return self.badval
return result
9 changes: 6 additions & 3 deletions rubin_sim/maf/metrics/summary_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,8 @@ def run(self, data_slice, slice_point=None):

class NormalizeMetric(BaseMetric):
"""
Return a metric values divided by 'norm_val'. Useful for turning summary statistics into fractions.
Return a metric values divided by 'norm_val'.
Useful for turning summary statistics into fractions.
"""

def __init__(self, col="metricdata", norm_val=1, **kwargs):
Expand All @@ -190,7 +191,8 @@ def run(self, data_slice, slice_point=None):

class ZeropointMetric(BaseMetric):
"""
Return a metric values with the addition of 'zp'. Useful for altering the zeropoint for summary statistics.
Return a metric values with the addition of 'zp'.
Useful for altering the zeropoint for summary statistics.
"""

def __init__(self, col="metricdata", zp=0, **kwargs):
Expand Down Expand Up @@ -271,7 +273,8 @@ def run(self, data_slice, slice_point=None):
Returns:
float: Interpolated static-probe statistical Figure-of-Merit.
Raises:
ValueError: If year is not one of the 4 for which a FoM is calculated
ValueError: If year is not one of the 4 for which a FoM is
calculated
"""
# Chop off any outliers
good_pix = np.where(data_slice[self.col] > 0)[0]
Expand Down
10 changes: 6 additions & 4 deletions rubin_sim/maf/metrics/surfb_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@ def surface_brightness_limit_approx(
tot_area=100.0,
mag_diff_warn=0.1,
):
"""Compute surface brightness limit in 3 limiting cases, return the brightest.
"""Compute surface brightness limit in 3 limiting cases, return the
brightest.

Algerbra worked out in this technote:
https://github.com/lsst-sims/smtn-016
Expand Down Expand Up @@ -52,8 +53,8 @@ def surface_brightness_limit_approx(

Returns
-------
surface brightness limit in mags/sq arcsec
aka the surface brightness that reaches SNR=nsigma when measured over tot_area.
surface brightness limit in mags/sq arcsec, aka the surface brightness that
reaches SNR=nsigma when measured over tot_area.
"""

a_pix = pixscale**2
Expand All @@ -76,7 +77,8 @@ def surface_brightness_limit_approx(

if np.min([d1, d2, d3]) < mag_diff_warn:
warnings.warn(
"Limiting magnitudes in different cases are within %.3f mags, result may be too optimistic by up 0.38 mags/sq arcsec."
"Limiting magnitudes in different cases are within %.3f mags, \
result may be too optimistic by up 0.38 mags/sq arcsec."
% mag_diff_warn
)

Expand Down
59 changes: 37 additions & 22 deletions rubin_sim/maf/metrics/transient_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@

class TransientMetric(BaseMetric):
"""
Calculate what fraction of the transients would be detected. Best paired with a spatial slicer.
Calculate what fraction of the transients would be detected. Best paired
with a spatial slicer.
We are assuming simple light curves with no color evolution.

Parameters
Expand All @@ -19,7 +20,8 @@ class TransientMetric(BaseMetric):
How long it takes to reach the peak magnitude (days). Default 5.
rise_slope : float, optional
Slope of the light curve before peak time (mags/day).
This should be negative since mags are backwards (magnitudes decrease towards brighter fluxes).
This should be negative since mags are backwards (magnitudes decrease
towards brighter fluxes).
Default 0.
decline_slope : float, optional
Slope of the light curve after peak time (mags/day).
Expand All @@ -43,25 +45,31 @@ class TransientMetric(BaseMetric):
MJD for the survey start date.
Default None (uses the time of the first observation).
detect_m5_plus : float, optional
An observation will be used if the light curve magnitude is brighter than m5+detect_m5_plus.
An observation will be used if the light curve magnitude is brighter
than m5+detect_m5_plus.
Default 0.
n_pre_peak : int, optional
Number of observations (in any filter(s)) to demand before peak_time,
before saying a transient has been detected.
Default 0.
n_per_lc : int, optional
Number of sections of the light curve that must be sampled above the detect_m5_plus theshold
Number of sections of the light curve that must be sampled above the
detect_m5_plus theshold
(in a single filter) for the light curve to be counted.
For example, setting n_per_lc = 2 means a light curve is only considered detected if there
is at least 1 observation in the first half of the LC, and at least one in the second half of the LC.
n_per_lc = 4 means each quarter of the light curve must be detected to count.
For example, setting n_per_lc = 2 means a light curve is only
considered detected if there is at least 1 observation in the first
half of the LC, and at least one in the second half of the LC.
n_per_lc = 4 means each quarter of the light curve must be detected to
count.
Default 1.
n_filters : int, optional
Number of filters that need to be observed for an object to be counted as detected.
Number of filters that need to be observed for an object to be counted
as detected.
Default 1.
n_phase_check : int, optional
Sets the number of phases that should be checked.
One can imagine pathological cadences where many objects pass the detection criteria,
One can imagine pathological cadences where many objects pass the
detection criteria,
but would not if the observations were offset by a phase-shift.
Default 1.
count_method : {'full' 'partialLC'}, defaults to 'full'
Expand Down Expand Up @@ -154,15 +162,18 @@ def light_curve(self, time, filters):
return lc_mags

def run(self, data_slice, slice_point=None):
""" "
Calculate the detectability of a transient with the specified lightcurve.
"""
Calculate the detectability of a transient with the specified
lightcurve.

Parameters
----------
data_slice : numpy.array
Numpy structured array containing the data related to the visits provided by the slicer.
Numpy structured array containing the data related to the visits
provided by the slicer.
slice_point : dict, optional
Dictionary containing information about the slice_point currently active in the slicer.
Dictionary containing information about the slice_point currently
active in the slicer.

Returns
-------
Expand All @@ -178,8 +189,8 @@ def run(self, data_slice, slice_point=None):
n_detected = 0
n_trans_max = 0
for tshift in tshifts:
# Compute the total number of back-to-back transients are possible to detect
# given the survey duration and the transient duration.
# Compute the total number of back-to-back transients are possible
# to detect given the survey duration and the transient duration.
n_trans_max += _n_trans_max
if tshift != 0:
n_trans_max -= 1
Expand Down Expand Up @@ -211,17 +222,19 @@ def run(self, data_slice, slice_point=None):
ulc_number = np.unique(lc_number)
left = np.searchsorted(lc_number, ulc_number)
right = np.searchsorted(lc_number, ulc_number, side="right")
# Note here I'm using np.searchsorted to basically do a 'group by'
# might be clearer to use scipy.ndimage.measurements.find_objects or pandas, but
# this numpy function is known for being efficient.
# Note here I'm using np.searchsorted to basically do a
# 'group by' might be clearer to use
# scipy.ndimage.measurements.find_objects or pandas, but this
# numpy function is known for being efficient.
for le, ri in zip(left, right):
# Number of points where there are a detection
good = np.where(time[le:ri] < self.peak_time)
nd = np.sum(detected[le:ri][good])
if nd >= self.n_pre_peak:
detected[le:ri] += 1

# Check if we need multiple points per light curve or multiple filters
# Check if we need multiple points per light curve
# or multiple filters
if (self.n_per_lc > 1) | (self.n_filters > 1):
# make sure things are sorted by time
ord = np.argsort(data_slice[self.mjd_col])
Expand All @@ -243,11 +256,13 @@ def run(self, data_slice, slice_point=None):
if np.size(np.unique(phase_sections[good])) >= self.n_per_lc:
detected[le:ri] += 1

# Find the unique number of light curves that passed the required number of conditions
# Find the unique number of light curves that passed the required
# number of conditions
n_detected += np.size(np.unique(lc_number[np.where(detected >= detect_thresh)]))

# Rather than keeping a single "detected" variable, maybe make a mask for each criteria, then
# reduce functions like: reduce_singleDetect, reduce_NDetect, reduce_PerLC, reduce_perFilter.
# Rather than keeping a single "detected" variable, maybe make a mask
# for each criteria, then reduce functions like: reduce_singleDetect,
# reduce_NDetect, reduce_PerLC, reduce_perFilter.
# The way I'm running now it would speed things up.

return float(n_detected) / n_trans_max
Loading
Loading