diff --git a/CHANGELOG.md b/CHANGELOG.md index 4013d9c45..96b4babb7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # OMC3 Changelog +#### 2024-08-14 - v0.15.3 - _jdilly_ + +- Fixed: + - Add DOROS BPMs to `twiss.dat`. + - Some Pandas `FutureWarning`s, `DeprecationWarning`s and `PerformanceWarning`s + #### 2024-08-14 - v0.15.2 - _fesoubel_, _jdilly_ - Fixed: diff --git a/omc3/__init__.py b/omc3/__init__.py index 2f58dcd98..28cfcf713 100644 --- a/omc3/__init__.py +++ b/omc3/__init__.py @@ -11,7 +11,7 @@ __title__ = "omc3" __description__ = "An accelerator physics tools package for the OMC team at CERN." __url__ = "https://github.com/pylhc/omc3" -__version__ = "0.15.2" +__version__ = "0.15.3" __author__ = "pylhc" __author_email__ = "pylhc@github.com" __license__ = "MIT" diff --git a/omc3/harpy/frequency.py b/omc3/harpy/frequency.py index a9018d951..135bb744e 100644 --- a/omc3/harpy/frequency.py +++ b/omc3/harpy/frequency.py @@ -9,7 +9,6 @@ analysis. Also searches for resonances in the calculated spectra. """ -from collections import OrderedDict from numbers import Number import numpy as np @@ -136,7 +135,7 @@ def find_resonances(tunes, nturns, plane, spectra, order_resonances): """ resonance_lines = _get_resonance_lines(order_resonances) - df = pd.DataFrame(index=spectra["FREQS"].index, columns=OrderedDict()) + df = pd.DataFrame(index=spectra["FREQS"].index, dtype=pd.Float64Dtype()) resonances_freqs = _compute_resonances_with_freqs(plane, tunes, resonance_lines) if tunes[2] > 0.0: resonances_freqs.update(_compute_resonances_with_freqs("Z", tunes, resonance_lines)) @@ -145,10 +144,12 @@ def find_resonances(tunes, nturns, plane, spectra, order_resonances): max_coefs, max_freqs = _search_highest_coefs(resonances_freqs[resonance], tolerance, spectra["FREQS"], spectra["COEFFS"]) resstr = _get_resonance_suffix(resonance) - df[f"{COL_FREQ}{resstr}"], df[f"{COL_AMP}{resstr}"], df[f"{COL_PHASE}{resstr}"] = _get_freqs_amps_phases( - max_freqs, max_coefs, resonances_freqs[resonance]) + columns = [f"{COL_FREQ}{resstr}", f"{COL_AMP}{resstr}", f"{COL_PHASE}{resstr}"] + df_resonance = _get_freqs_amps_phases(max_freqs, max_coefs, resonances_freqs[resonance]) + df_resonance.columns = columns + df.loc[:, columns] = df_resonance - df[f"{COL_PHASE}{resstr}"] = _realign_phases(df.loc[:, f"{COL_PHASE}{resstr}"].to_numpy(), + df.loc[:, f"{COL_PHASE}{resstr}"] = _realign_phases(df.loc[:, f"{COL_PHASE}{resstr}"].to_numpy(), df.loc[:, f"{COL_FREQ}{resstr}"].to_numpy(), nturns) return df @@ -161,25 +162,34 @@ def _get_main_resonances(tunes, spectra, plane, tolerance, df): raise ValueError(f"No main {plane} resonances found, " f"try to increase the tolerance or adjust the tunes") bad_bpms_by_tune = spectra["COEFFS"].loc[max_coefs == 0.].index - df[f"{COL_TUNE}{plane}"], df[f"{COL_AMP}{plane}"], df[f"{COL_MU}{plane}"] = _get_freqs_amps_phases( - max_freqs, max_coefs, freq) + columns = [f"{COL_TUNE}{plane}", f"{COL_AMP}{plane}", f"{COL_MU}{plane}"] + df_main = _get_freqs_amps_phases(max_freqs, max_coefs, freq) + df_main.columns = columns + df.loc[:, columns] = df_main if plane != "Z": df = df.loc[df.index.difference(bad_bpms_by_tune)] return df, bad_bpms_by_tune def _calculate_natural_tunes(spectra, nattunes, tolerance, plane): - df = pd.DataFrame(index=spectra["FREQS"].index, columns=OrderedDict()) + columns = [f"{COL_NATTUNE}{plane}", f"{COL_NATAMP}{plane}", f"{COL_NATMU}{plane}"] x, y, _ = nattunes freq = x % 1 if plane == "X" else y % 1 max_coefs, max_freqs = _search_highest_coefs(freq, tolerance, spectra["FREQS"], spectra["COEFFS"]) - df[f"{COL_NATTUNE}{plane}"], df[f"{COL_NATAMP}{plane}"], df[f"{COL_NATMU}{plane}"] = _get_freqs_amps_phases( - max_freqs, max_coefs, freq) + df = _get_freqs_amps_phases(max_freqs, max_coefs, freq) + df.columns = columns return df -def _get_freqs_amps_phases(max_freqs, max_coefs, freq): - return max_freqs, np.abs(max_coefs), np.sign(0.5 - freq) * np.angle(max_coefs) / PI2 +def _get_freqs_amps_phases(max_freqs: pd.Series, max_coefs: pd.Series, freq: float) -> pd.DataFrame: + return pd.DataFrame( + { + f"{COL_FREQ}": max_freqs, + f"{COL_AMP}": np.abs(max_coefs), + f"{COL_PHASE}": np.sign(0.5 - freq) * np.angle(max_coefs) / PI2, + }, + dtype=pd.Float64Dtype(), + ) def _realign_phases(phase_data, freq_data, nturns): diff --git a/omc3/harpy/handler.py b/omc3/harpy/handler.py index ffc54cd39..2be2ba28b 100644 --- a/omc3/harpy/handler.py +++ b/omc3/harpy/handler.py @@ -184,29 +184,35 @@ def _get_output_path_without_suffix(output_dir, file_path): return join(output_dir, basename(file_path)) -def _rescale_amps_to_main_line_and_compute_noise(panda, plane): +def _rescale_amps_to_main_line_and_compute_noise(df: pd.DataFrame, plane: str) -> pd.DataFrame: """ TODO follows non-transpararent convention TODO the consequent analysis has to be changed if removed """ - cols = [col for col in panda.columns.to_numpy() if col.startswith(COL_AMP)] + cols = [col for col in df.columns.to_numpy() if col.startswith(COL_AMP)] cols.remove(f"{COL_AMP}{plane}") - panda.loc[:, cols] = panda.loc[:, cols].div(panda.loc[:, f"{COL_AMP}{plane}"], axis="index") - amps = panda.loc[:, f"{COL_AMP}{plane}"].to_numpy() + df.loc[:, cols] = df.loc[:, cols].div(df.loc[:, f"{COL_AMP}{plane}"], axis="index") + amps = df.loc[:, f"{COL_AMP}{plane}"].to_numpy() # Division by two for backwards compatibility with Drive, i.e. the unit is [2mm] # TODO later remove - panda[f"{COL_AMP}{plane}"] = panda.loc[:, f"{COL_AMP}{plane}"].to_numpy() / 2 - if f"{COL_NATAMP}{plane}" in panda.columns: - panda[f"{COL_NATAMP}{plane}"] = panda.loc[:, f"{COL_NATAMP}{plane}"].to_numpy() / 2 - - if np.max(panda.loc[:, 'NOISE'].to_numpy()) == 0.0: - return panda # Do not calculated errors when no noise was calculated - noise_scaled = panda.loc[:, 'NOISE'] / amps - panda.loc[:, "NOISE_SCALED"] = noise_scaled - panda.loc[:, f"{COL_ERR}{COL_AMP}{plane}"] = panda.loc[:, 'NOISE'] - if f"{COL_NATTUNE}{plane}" in panda.columns: - panda.loc[:, f"{COL_ERR}{COL_NATAMP}{plane}"] = panda.loc[:, 'NOISE'] - for col in cols: - this_amp = panda.loc[:, col] - panda.loc[:, f"{COL_ERR}{col}"] = noise_scaled * np.sqrt(1 + np.square(this_amp)) - return panda + df[f"{COL_AMP}{plane}"] = df.loc[:, f"{COL_AMP}{plane}"].to_numpy() / 2 + if f"{COL_NATAMP}{plane}" in df.columns: + df[f"{COL_NATAMP}{plane}"] = df.loc[:, f"{COL_NATAMP}{plane}"].to_numpy() / 2 + + if np.max(df.loc[:, 'NOISE'].to_numpy()) == 0.0: + return df # Do not calculated errors when no noise was calculated + noise_scaled = df.loc[:, 'NOISE'] / amps + df.loc[:, "NOISE_SCALED"] = noise_scaled + df.loc[:, f"{COL_ERR}{COL_AMP}{plane}"] = df.loc[:, 'NOISE'] + if f"{COL_NATTUNE}{plane}" in df.columns: + df.loc[:, f"{COL_ERR}{COL_NATAMP}{plane}"] = df.loc[:, 'NOISE'] + + # Create dedicated dataframe with error columns to assign later (cleaner + # and faster than assigning individual columns) + df_amp = pd.DataFrame( + data={f"{COL_ERR}{col}": noise_scaled * np.sqrt(1 + np.square(df.loc[:, col])) for col in cols}, + index=df.index, + dtype=pd.Float64Dtype() + ) + df.loc[:, df_amp.columns] = df_amp + return df diff --git a/omc3/model/madx_macros/general.macros.madx b/omc3/model/madx_macros/general.macros.madx index caa41000c..269bbd4e5 100644 --- a/omc3/model/madx_macros/general.macros.madx +++ b/omc3/model/madx_macros/general.macros.madx @@ -21,6 +21,7 @@ select_monitors(): macro = { k1l, k1sl, k2l, k3l, k4l, wx, wy, phix, phiy, dmux, dmuy, keyword, dbx, dby, r11, r12, r21, r22; + select, flag=twiss, pattern="_DOROS$"; } diff --git a/omc3/optics_measurements/beta_from_phase.py b/omc3/optics_measurements/beta_from_phase.py index 9bdd9cdde..94210b676 100644 --- a/omc3/optics_measurements/beta_from_phase.py +++ b/omc3/optics_measurements/beta_from_phase.py @@ -321,7 +321,7 @@ def _assign_uncertainties(twiss_full, errordefspath): """ LOGGER.debug("Start creating uncertainty information") errdefs = tfs.read(errordefspath) - twiss_full = twiss_full.assign(UNC=False, dK1=0, KdS=0, mKdS=0, dX=0, BPMdS=0) + twiss_full = twiss_full.assign(UNC=False, dK1=0.0, KdS=0.0, mKdS=0.0, dX=0.0, BPMdS=0.0) # loop over uncertainty definitions, fill the respective columns, set UNC to true for indx in errdefs.index: patt = errdefs.loc[indx, "PATTERN"] diff --git a/omc3/optics_measurements/kick.py b/omc3/optics_measurements/kick.py index 96ef47e15..91d22cdb3 100644 --- a/omc3/optics_measurements/kick.py +++ b/omc3/optics_measurements/kick.py @@ -5,7 +5,6 @@ This module contains kick functionality of ``optics_measurements``. It provides functions to compute kick actions. """ -from collections import OrderedDict from contextlib import suppress from os.path import join @@ -63,7 +62,8 @@ def _get_kick(measure_input, files, plane): load_columns, calc_columns, column_types = _get_column_mapping(plane) kick_frame = pd.DataFrame(data=0., index=range(len(files[plane])), - columns=list(load_columns.keys()) + calc_columns) + columns=list(column_types.keys())) + kick_frame = kick_frame.astype(column_types) for i, df in enumerate(files[plane]): # load data directly from file @@ -131,7 +131,7 @@ def _get_model_arc_betas(measure_input, plane): def _get_column_mapping(plane): plane_number = PLANE_TO_NUM[plane] - load_columns = OrderedDict([ + load_columns = dict([ (TIME, "TIME"), (DPP, DPP), (DPPAMP, DPPAMP), diff --git a/omc3/optics_measurements/phase.py b/omc3/optics_measurements/phase.py index f969e275f..827fa0960 100644 --- a/omc3/optics_measurements/phase.py +++ b/omc3/optics_measurements/phase.py @@ -244,8 +244,7 @@ def write_special(meas_input, phase_advances, plane_tune, plane): 'BPM2', f'BPM_PHASE{plane}', f'BPM_{ERR}PHASE{plane}',] - special_phase_df = pd.DataFrame(columns=special_phase_columns) - + to_concat_rows = [] for elem1, elem2 in accel.important_phase_advances(): mus1 = elements.loc[elem1, f"MU{plane}"] - elements.loc[:, f"MU{plane}"] minmu1 = abs(mus1.loc[meas.index]).idxmin() @@ -260,8 +259,7 @@ def write_special(meas_input, phase_advances, plane_tune, plane): elems_to_bpms = -mus1.loc[minmu1] - mus2.loc[minmu2] ph_result = ((bpm_phase_advance + elems_to_bpms) * bd) model_value = (model_value * bd) % 1 - new_row = pd.DataFrame( - dict(zip(special_phase_columns, [ + new_row = pd.DataFrame([[ elem1, elem2, ph_result % 1, @@ -274,11 +272,12 @@ def write_special(meas_input, phase_advances, plane_tune, plane): minmu2, bpm_phase_advance, elems_to_bpms, - ])), - index=[0] + ]], + columns=special_phase_columns, ) + to_concat_rows.append(new_row) - special_phase_df = pd.concat([special_phase_df, new_row], axis="index", ignore_index=True) + special_phase_df = pd.concat(to_concat_rows, axis="index", ignore_index=True) tfs.write(Path(meas_input.outputdir) / f"{SPECIAL_PHASE_NAME}{plane.lower()}{EXT}", special_phase_df) diff --git a/omc3/optics_measurements/rdt.py b/omc3/optics_measurements/rdt.py index 4bcf589b2..21110307c 100644 --- a/omc3/optics_measurements/rdt.py +++ b/omc3/optics_measurements/rdt.py @@ -240,7 +240,8 @@ def fitting(x, f): for i, bpm_rdt_data in enumerate(line_amp): popt, pcov = curve_fit(fitting, kick_data, bpm_rdt_data, p0=guess[i]) amps[i] = popt[0] - err_amps[i] = np.sqrt(pcov)[0] if np.isfinite(np.sqrt(pcov)[0]) else 0. # if single file is used, the error is reported as Inf, which is then overwritten with 0 + sqrt_pcov = np.sqrt(pcov).flat[0] + err_amps[i] = sqrt_pcov if np.isfinite(sqrt_pcov) else 0. # if single file is used, the error is reported as Inf, which is then overwritten with 0 return amps, err_amps diff --git a/omc3/tune_analysis/bbq_tools.py b/omc3/tune_analysis/bbq_tools.py index 1616293e4..94d3716ae 100644 --- a/omc3/tune_analysis/bbq_tools.py +++ b/omc3/tune_analysis/bbq_tools.py @@ -144,7 +144,7 @@ def _get_interpolated_moving_average(data_series: pd.Series, clean_mask: Union[p try: # 'interpolate' fills nan based on index/values of neighbours - data = data.interpolate("index").fillna(method="bfill").fillna(method="ffill") + data = data.interpolate("index").bfill().ffill() except TypeError as e: raise TypeError("Interpolation failed. " "Usually due to a dtype format that is not properly recognized.") from e @@ -152,12 +152,10 @@ def _get_interpolated_moving_average(data_series: pd.Series, clean_mask: Union[p shift = -int((length-1)/2) # Shift average to middle value # calculate mean and fill NaNs at the ends - data_mav = data.rolling(length).mean().shift(shift).fillna( - method="bfill").fillna(method="ffill") + data_mav = data.rolling(length).mean().shift(shift).bfill().ffill() # calculate deviation to the moving average and fill NaNs at the ends - std_mav = np.sqrt(((data-data_mav)**2).rolling(length).mean().shift(shift).fillna( - method="bfill").fillna(method="ffill")) + std_mav = np.sqrt(((data-data_mav)**2).rolling(length).mean().shift(shift).bfill().ffill()) err_mav = std_mav / np.sqrt(length) if is_datetime_index: diff --git a/pyproject.toml b/pyproject.toml index 8fb5b4213..e3b1828de 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -109,5 +109,5 @@ markers = [ "cern_network: tests that require access to afs or the technical network", ] # Helpful for pytest-debugging (leave commented out on commit): -# log_cli=true -# log_level=DEBUG +#log_cli = true +#log_cli_level = "DEBUG" diff --git a/tests/unit/test_model_creator.py b/tests/unit/test_model_creator.py index a71f5bc2f..5ff2cf0b8 100644 --- a/tests/unit/test_model_creator.py +++ b/tests/unit/test_model_creator.py @@ -4,12 +4,13 @@ from pathlib import Path import pytest +import tfs from omc3.model.accelerators.accelerator import AcceleratorDefinitionError, AccExcitationMode from omc3.model.constants import TWISS_AC_DAT, TWISS_ADT_DAT, TWISS_DAT, TWISS_ELEMENTS_DAT, PATHFETCHER from omc3.model.manager import get_accelerator from omc3.model.model_creators.lhc_model_creator import LhcBestKnowledgeCreator, LhcModelCreator from omc3.model_creator import create_instance_and_model -from omc3.model.model_creators.lhc_model_creator import LhcModelCreator +from omc3.optics_measurements.constants import NAME INPUTS = Path(__file__).parent.parent / "inputs" LHC_30CM_MODIFIERS = [Path("R2023a_A30cmC30cmA10mL200cm.madx")] @@ -156,6 +157,11 @@ def test_lhc_creation_nominal_driven(tmp_path, acc_models_lhc_2023): ) check_accel_from_dir_vs_options(tmp_path, accel_opt, accel, required_keys=["beam", "year"]) + # quick check for DOROS BPMs + for twiss_name in (TWISS_DAT, TWISS_ELEMENTS_DAT): + df_twiss = tfs.read(tmp_path / twiss_name, index=NAME) + assert any(df_twiss.index.str.match(r"BPM.+_DOROS$")) + # checks that should fail with pytest.raises(AcceleratorDefinitionError) as excinfo: diff --git a/tests/unit/test_rdt_magnet_order.py b/tests/unit/test_rdt_magnet_order.py index d96c98af0..6ab42e803 100644 --- a/tests/unit/test_rdt_magnet_order.py +++ b/tests/unit/test_rdt_magnet_order.py @@ -1,18 +1,10 @@ import os -import random -import string -import tempfile -import itertools -import numpy as np -import pandas as pd import pytest import tfs from pathlib import Path -import turn_by_turn as tbt -from omc3.definitions.constants import PLANES from omc3.hole_in_one import hole_in_one_entrypoint INPUTS = Path(__file__).parent.parent / "inputs"