Skip to content

Commit

Permalink
Merging revision 2
Browse files Browse the repository at this point in the history
  • Loading branch information
pSpitzner committed May 23, 2023
1 parent 0deb849 commit e612d81
Show file tree
Hide file tree
Showing 6 changed files with 2,954 additions and 83 deletions.
106 changes: 81 additions & 25 deletions ana/ana_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Author: F. Paul Spitzner
# @Email: [email protected]
# @Created: 2021-03-10 13:23:16
# @Last Modified: 2022-12-14 12:08:27
# @Last Modified: 2023-05-05 14:05:08
# ------------------------------------------------------------------------------ #
# Here we collect all functions for importing and analyzing the data.
# A central idea is that for every simulated/experimental trial, we have a
Expand All @@ -16,7 +16,6 @@
# ------------------------------------------------------------------------------ #



import os
import sys
import glob
Expand Down Expand Up @@ -183,14 +182,54 @@ def prepare_file(
h5f["ana.neuron_ids"] = neuron_ids

# make sure that the 2d_spikes representation is nan-padded, requires loading!
try:
spikes = h5f["data.spiketimes"][:]
if spikes is None:
raise ValueError
spikes[spikes == 0] = np.nan
h5f["data.spiketimes"] = spikes
except:
log.info("No spikes in file, plotting and analysing dynamics will not work.")
spikes = h5f["data.spiketimes"][:]
if spikes is None:
log.warning("No spikes in file, plotting and analysing dynamics won't work.")
elif np.any(np.isnan(spikes)):
log.debug("spikes seem to be nan-padded, no conversion needed.")
else:
# make sure the format matches
# from simulations, I had a hard time padding with nans in the hdf5 files.
# but needs a workaround, because in rare cases, we might have 0 as a real spike time.

# in essence, this is what we want to do, but it is not as robust:
# spikes[spikes == 0] = np.nan
try:
# new approach: what we expect is that spiketimes always increase,
# until they are padded. detect padding by looking for non-increasing values.
diff = np.diff(spikes, axis=1) < 0

# use scipy label to find consecutive patches, has to be done for each neuron,
# otherwise it connects patches across neurons.
from scipy.ndimage import label

for n_id in range(0, num_n):
patches, _ = label(diff[n_id, :])
_, change_pt_indices = np.unique(patches, return_index=True)
if len(change_pt_indices) == 1:
if spikes[n_id, 0] == 0:
# edge-case, everything zero
spikes[n_id, :] = np.nan
else:
# all consecutive, and non-zero all good
continue
elif len(change_pt_indices) == 2:
# second half needs padding
spikes[n_id, change_pt_indices[1] + 1 :] = np.nan
elif len(change_pt_indices) > 2:
# this should not happen
log.error(
f"More than two change points found for spikes of neuron {n_id}"
)
raise ValueError

h5f["data.spiketimes"] = spikes

except Exception as e:
log.error(e)
log.warning(
"Spike conversion failed, plotting and analysing dynamics won't work."
)

# # now we need to load things. [:] loads to ram and makes everything else faster
# # convert spikes in the convenient nested (2d) format, first dim neuron,
Expand Down Expand Up @@ -325,7 +364,13 @@ def load_experimental_files(path_prefix, condition="1_pre_"):
log.error(f"No fluorescence data found for {path_prefix} {condition}")
# log.exception(e)

return prepare_file(h5f)
# default preprocessing
h5f = prepare_file(h5f)

# overwrite some details
# we overwrite the stimulation description, maybe even as an potional argument

return h5f


def prepare_minimal(spikes):
Expand All @@ -345,7 +390,7 @@ def prepare_minimal(spikes):
h5f["data.spiketimes"] = spikes
h5f["data.neuron_module_id"] = np.zeros(num_n, dtype=int)

prepare_file(h5f);
prepare_file(h5f)
return h5f


Expand Down Expand Up @@ -1130,19 +1175,12 @@ def find_rij_pairs(h5f, rij=None, pairing="within_modules", which="neurons"):
res.append(rij[i, j])

elif "within_group" in pairing:
if (
h5f[key][i] in mods_a
and h5f[key][j] in mods_a
):
if h5f[key][i] in mods_a and h5f[key][j] in mods_a:
res.append(rij[i, j])

elif "across_groups" in pairing:
if (
h5f[key][i] in mods_a
and h5f[key][j] in mods_b
) or (
h5f[key][i] in mods_b
and h5f[key][j] in mods_a
if (h5f[key][i] in mods_a and h5f[key][j] in mods_b) or (
h5f[key][i] in mods_b and h5f[key][j] in mods_a
):
res.append(rij[i, j])

Expand Down Expand Up @@ -1219,6 +1257,7 @@ def find_state_variable(h5f, variable, write_to_h5f=True, return_res=False):
if return_res:
return states


def find_module_resources_at_burst_begin(h5f, write_to_h5f=True, return_res=False):
"""
Find what was the level of resources in every module when bursts started.
Expand Down Expand Up @@ -1255,7 +1294,7 @@ def get_module_resources_at_times(h5f, times):
if "ana.adaptation" not in h5f.keypaths():
find_module_level_adaptation(h5f)

res = np.ones((len(h5f["ana.mods"]), len(times)))*np.nan
res = np.ones((len(h5f["ana.mods"]), len(times))) * np.nan

dt = h5f["ana.adaptation.dt"]

Expand Down Expand Up @@ -1295,6 +1334,7 @@ def find_module_level_adaptation(h5f):

h5f[f"ana.adaptation.dt"] = dt


def find_rij_within_across(h5f):
"""
We already have a nice way to get the rij in `find_functional_complexity`.
Expand Down Expand Up @@ -2612,7 +2652,14 @@ def get_threshold_from_logisi_distribution(list_of_isi, area_fraction=0.3):


def pd_bootstrap(
df, obs, sample_size=None, num_boot=500, f_within_sample=np.nanmean, f_across_samples=np.nanmean, percentiles=None
df,
obs,
sample_size=None,
num_boot=500,
f_within_sample=np.nanmean,
f_across_samples=np.nanmean,
percentiles=None,
return_samples=False,
):
"""
bootstrap across all rows of a dataframe to get the mean across
Expand All @@ -2624,8 +2671,13 @@ def pd_bootstrap(
sample_size: int or None, default (None) for samples that are as large
as the original dataframe (number of rows)
num_boot : int, how many bootstrap samples to generate
func : function, default np.nanmean is used to calculate the estimate
f_within_sample : function, default np.nanmean is used to calculate the estimate
for each sample
f_across_samples : function, default np.nanmean is used to calculate the estimate
(mid) across samples
return_samples : bool, default False,
if True, return the sample results instead of the across-sample estimates.
(f_within_sample is still applied to each sample.)
percentiles : list of floats
the percentiles to return. default is [2.5, 50, 97.5]
Expand All @@ -2651,6 +2703,9 @@ def pd_bootstrap(

resampled_estimates.append(f_within_sample(sample_df[obs]))

if return_samples:
return resampled_estimates

mid = f_across_samples(resampled_estimates)
std = np.std(resampled_estimates, ddof=1)
q = np.percentile(resampled_estimates, percentiles)
Expand Down Expand Up @@ -2869,6 +2924,7 @@ def sequences_from_area(h5f, area_threshold=0.1):
h5f["ana.bursts.system_level.module_sequences"] = sequences
h5f["ana.bursts.event_sizes"] = event_sizes


def sequence_histogram(ids, sequences=None):

"""
Expand Down
6 changes: 3 additions & 3 deletions ana/movie_business.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Author: F. Paul Spitzner
# @Email: [email protected]
# @Created: 2020-01-24 13:43:39
# @Last Modified: 2023-03-02 19:42:33
# @Last Modified: 2023-05-11 10:55:48
# ------------------------------------------------------------------------------- #
# Classes needed to create a movie of the network.
# ------------------------------------------------------------------------------- #
Expand Down Expand Up @@ -31,8 +31,8 @@

from bitsandbobs.plt import alpha_to_solid_on_bg

theme_bg = "black"
# theme_bg = "white"
# theme_bg = "black"
theme_bg = "white"
if theme_bg == "black":
plt.style.use("dark_background")
# use custom colors to match the paper
Expand Down
4 changes: 2 additions & 2 deletions ana/paper_movies.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# @Author: F. Paul Spitzner
# @Email: [email protected]
# @Created: 2022-06-22 10:12:19
# @Last Modified: 2023-03-02 22:16:45
# @Last Modified: 2023-05-11 10:55:26
# ------------------------------------------------------------------------------ #
# This guy uses movie_business.py to create movies from hdf5 files.
# tweak main() and run from console.
Expand Down Expand Up @@ -357,7 +357,7 @@ def make_a_movie(
def comparison_simulation(
input_top="../dat/simulations/lif/raw/highres_stim=02_k=3_kin=30_jA=45.0_jG=50.0_jM=15.0_tD=20.0_rate=80.0_stimrate=0.0_rep=001.hdf5",
input_bot="../dat/simulations/lif/raw/highres_stim=02_k=3_kin=30_jA=45.0_jG=50.0_jM=15.0_tD=20.0_rate=80.0_stimrate=20.0_rep=001.hdf5",
output_path="../mov/simulation_test.mp4",
output_path="../mov/simulation_comparison_k3_rep1.mp4",
title=None,
show_time=False,
movie_duration=20,
Expand Down
Loading

0 comments on commit e612d81

Please sign in to comment.