Skip to content

Commit

Permalink
Reviewed updates
Browse files Browse the repository at this point in the history
  • Loading branch information
jgray-19 committed Jan 6, 2025
1 parent 94b3414 commit 31bbff8
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 16 deletions.
1 change: 0 additions & 1 deletion tests/test_madng.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ def _original_simulation_data() -> TbtData:
bpm3_p2_x = np.array([-0.002414213831, 0.0009999991309, 0.002414214191])
bpm3_p2_y = np.array([-0.0004142133507,-0.001000000149, 0.0004142129907])

print(pd.DataFrame(index=names, data=[bpm1_p1_x, bpm2_p1_x, bpm3_p1_x]))
matrix = [
TransverseData( # first particle
X=pd.DataFrame(index=names, data=[bpm1_p1_x, bpm2_p1_x, bpm3_p1_x]),
Expand Down
29 changes: 14 additions & 15 deletions turn_by_turn/madng.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,45 +35,45 @@ def read_tbt(file_path: str | Path) -> TbtData:
Returns:
A ``TbTData`` object with the loaded data.
"""
df = tfs.read(file_path)
LOGGER.debug("Starting to read TBT data from dataframe")
df = tfs.read(file_path)

nturns = int(df.iloc[-1].loc[TURN])
npart = int(df.iloc[-1].loc[PARTICLE_ID])
LOGGER.info(f"Number of turns: {nturns}, Number of particles: {npart}")

# Get the unique BPMs and number of BPMs
bpms = df[NAME].unique()
nbpms = len(bpms)
# Get the names of all of the observed points (probably just BPMs, maybe other devices)
observe_points = df.loc[df[TURN] == 1][NAME].to_numpy()
num_observables = len(observe_points) # Number of BPMs (or observed points)

# Set the index to the particle ID
df.set_index([PARTICLE_ID], inplace=True)
df = df.set_index([PARTICLE_ID])

matrices = []
bunch_ids = range(1, npart + 1) # Particle IDs start from 1 (not 0)
for particle_id in bunch_ids:
LOGGER.info(f"Processing particle ID: {particle_id}")

# Filter the dataframe for the current particle
df_particle = df.loc[particle_id]
df_particle = df.loc[particle_id].copy() # As we

# Check if the number of BPMs is consistent for all particles/turns (i.e. no lost particles)
if len(df_particle[NAME]) / nturns != nbpms:
# Check if the number of observed points is consistent for all particles/turns (i.e. no lost particles)
if len(df_particle[NAME]) / nturns != num_observables:
raise ValueError(
"The number of BPMs is not consistent for all particles/turns. Simulation may have lost particles."
"The number of BPMs (or observed points) is not consistent for all particles/turns. Simulation may have lost particles."
)

# Set the index to the element index, which are unique for every BPM and turn
df_particle.set_index([ELEMENT_INDEX], inplace=True)
# Set the index to the element index, which are unique for every observable and turn
df_particle = df_particle.set_index([ELEMENT_INDEX])

# Create a dictionary of the TransverseData fields
tracking_data_dict = {
plane: pd.DataFrame(
index=bpms,
index=observe_points,
data=df_particle[plane.lower()] # MAD-NG uses lower case field names
.to_numpy()
.reshape(nbpms, nturns, order="F"),
# ^ Number of BPMs x Number of turns, Fortran order (So that the BPMs are the rows)
.reshape(num_observables, nturns, order="F"),
# ^ Number of Observables x Number of turns, Fortran order (So that the observables are the rows)
)
for plane in TransverseData.fieldnames() # X, Y
}
Expand All @@ -83,5 +83,4 @@ def read_tbt(file_path: str | Path) -> TbtData:
matrices.append(TransverseData(**tracking_data_dict))

LOGGER.debug("Finished reading TBT data")
# Should we also provide date? (jgray 2024)
return TbtData(matrices=matrices, bunch_ids=list(bunch_ids), nturns=nturns)

0 comments on commit 31bbff8

Please sign in to comment.