-
Notifications
You must be signed in to change notification settings - Fork 2
/
estimate.py
493 lines (400 loc) · 20.8 KB
/
estimate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
import argparse
from sys import exit, stderr, argv, path
from os.path import isfile, isdir, realpath, dirname, exists
import ast
import yaml
import numpy as np
ESTIMATOR_DIR = dirname(realpath(__file__))
path.insert(1, '{}/src'.format(ESTIMATOR_DIR))
import hde_utils as utl
import hde_visualization as vsl
__version__ = "unknown"
from _version import __version__
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
def do_main_analysis(spike_times, spike_times_optimization, spike_times_validation,
analysis_file, settings):
"""
Determine the history dependence of a neuron's activity based on
spike time data.
"""
utl.save_spike_times_stats(analysis_file, spike_times, **settings)
if settings['cross_validated_optimization']:
settings['cross_val'] = 'h1' # first half of the data
utl.save_history_dependence_for_embeddings(analysis_file,
spike_times_optimization,
**settings)
settings['cross_val'] = 'h2' # second half of the data
utl.save_history_dependence_for_embeddings(analysis_file,
spike_times_validation,
**settings)
utl.compute_CIs(analysis_file, spike_times, target_R='R_max', **settings)
else:
settings['cross_val'] = None
utl.save_history_dependence_for_embeddings(analysis_file,
spike_times, **settings)
utl.compute_CIs(analysis_file, spike_times, target_R='R_max', **settings)
def compute_CIs(spike_times, analysis_file, settings):
"""
Compute bootstrap replications of the history-dependence estimate
which can be used to obtain confidence intervals.
"""
if settings['cross_validated_optimization']:
settings['cross_val'] = 'h2' # second half of the data
else:
settings['cross_val'] = None
utl.compute_CIs(analysis_file, spike_times, target_R='R_tot', **settings)
utl.compute_CIs(analysis_file, spike_times, target_R='nonessential', **settings)
# def perform_permutation_test(analysis_file, settings):
# """
# Perform a permutation test to check whether the history dependece
# in the target neuron is significantly different from zero.
# """
# utl.perform_permutation_test(analysis_file, **settings)
def analyse_auto_MI(spike_times, analysis_file, settings):
"""
Compute the auto mutual information in the neuron's activity, a
measure closely related to history dependence.
"""
utl.analyse_auto_MI(analysis_file, spike_times, **settings)
def create_CSV_files(analysis_file,
csv_stats_file, csv_histdep_data_file, csv_auto_MI_data_file,
analysis_num, settings):
"""
Export the data resulting from the analysis as csv files.
"""
if settings['cross_validated_optimization']:
settings['cross_val'] = 'h2' # second half of the data
else:
settings['cross_val'] = None
utl.create_CSV_files(analysis_file,
csv_stats_file, csv_histdep_data_file, csv_auto_MI_data_file,
analysis_num, **settings)
def produce_plots(spike_times, csv_stats_file, csv_histdep_data_file, csv_auto_MI_data_file,
settings):
"""
Produce plots that visualize the results.
"""
vsl.produce_plots(spike_times,
csv_stats_file, csv_histdep_data_file, csv_auto_MI_data_file,
**settings)
# parse arguments received via the command line and check for validity
def parse_arguments(arguments, defined_tasks, defined_estimation_methods):
"""
Parse the arguments passed to the script via the command line.
Import settings from file, do some sanity checks to avoid faulty runs.
"""
# parse arguments
parser = argparse.ArgumentParser(description=
"""
History dependence estimator, v. {}
Estimate the history dependence and temporal depth of a single
neuron, based on information-theoretical measures for spike time
data, as presented in (Rudelt et al, in prep.) [1]. Parameters
can be passed via the command line or through files, where command
line options are prioritised over those passed by file. (If none
are supplied, settings are read from the 'default.yaml' file.) A
user new to this tool is encouraged to run
python3 estimate.py sample_data/spike_times.dat -o sample_output.pdf \\
-s settings/test.yaml
to test the functionality of this tool. A more detailed
description can be found in the guide provided with the tool [2].
[1]: L. Rudelt, D. G. Marx, M. Wibral, V. Priesemann: Embedding
optimization reveals long-lasting history dependence in
neural spiking activity (in prep.)
[2]: https://github.com/Priesemann-Group/hdestimator
""".format(__version__), formatter_class=argparse.RawDescriptionHelpFormatter)
optional_arguments = parser._action_groups.pop()
required_arguments = parser.add_argument_group("required arguments")
required_arguments.add_argument('spike_times_file', action="store", help="Define file from which to read spike times and on which to perform the analysis. The file should contain one spike time per line.",
nargs='+')
optional_arguments.add_argument("-t", "--task", metavar="TASK", action="store", help="Define task to be performed. One of {}. Per default, the full analysis is performed.".format(defined_tasks),
default="full-analysis")
optional_arguments.add_argument("-e", "--estimation-method", metavar="EST_METHOD", action="store", help="Specify estimation method for the analysis, one of {}.".format(defined_estimation_methods))
optional_arguments.add_argument("-h5", "--hdf5-dataset", action="store", help="Load data stored in a dataset in a hdf5 file.",
nargs='+')
optional_arguments.add_argument("-o", "--output", metavar="IMAGE_FILE", action="store", help="Save the output image to file.")
optional_arguments.add_argument("-p", "--persistent", action="store_true", help="Save the analysis to file. If an existing analysis is found, read it from file.")
optional_arguments.add_argument("-s", "--settings-file", metavar="SETTINGS_FILE", action="store", help="Specify yaml file from which to load custom settings.")
optional_arguments.add_argument("-l", "--label", metavar="LABEL", action="store", help="Include a label in the output to classify the analysis.")
# optional_arguments.add_argument("-v", "--verbose", action="store_true", help="Print more info at run time.")
optional_arguments.add_argument('--version', action='version', version='hdestimator v. {}'.format(__version__), help="Show version of the tool and exit.")
parser._action_groups.append(optional_arguments)
args = parser.parse_args(arguments)
# check that parsed arguments are valid
task = args.task.lower()
spike_times_file_names = args.spike_times_file
task_found = False
task_full_name = ""
for defined_task in defined_tasks:
if defined_task.startswith(task):
if not task_found:
task_found = True
task_full_name = defined_task
else:
print("Task could not be uniquely determined. Task must be one of {}. Aborting.".format(defined_tasks), file=stderr, flush=True)
exit(EXIT_FAILURE)
task = task_full_name
if not task in defined_tasks:
print("Task must be one of {}. Aborting.".format(defined_tasks), file=stderr, flush=True)
exit(EXIT_FAILURE)
for spike_times_file_name in spike_times_file_names:
if not exists(spike_times_file_name):
print("Spike times file {} not found. Aborting.".format(spike_times_file_name),
file=stderr, flush=True)
exit(EXIT_FAILURE)
spike_times = utl.get_spike_times_from_file(spike_times_file_names,
args.hdf5_dataset)
if not isinstance(spike_times, np.ndarray):
print("Error loading spike times. Aborting.",
file=stderr, flush=True)
exit(EXIT_FAILURE)
elif not len(spike_times) > 0:
print("Spike times are empty. Aborting.",
file=stderr, flush=True)
exit(EXIT_FAILURE)
#
# PARSE SETTINGS
#
# create default settings file if it does not exist:
if not isfile('{}/settings/default.yaml'.format(ESTIMATOR_DIR)):
utl.create_default_settings_file(ESTIMATOR_DIR)
# load default settings
with open('{}/settings/default.yaml'.format(ESTIMATOR_DIR), 'r') as default_settings_file:
settings = yaml.load(default_settings_file, Loader=yaml.BaseLoader)
# overwrite default settings with custom ones
if not args.settings_file is None:
if not isfile(args.settings_file):
print("Error: Settings file {} not found. Aborting.".format(args.settings_file),
file=stderr, flush=True)
exit(EXIT_FAILURE)
with open(args.settings_file, 'r') as custom_settings_file:
custom_settings = yaml.load(custom_settings_file, Loader=yaml.BaseLoader)
for setting_key in settings:
if setting_key in custom_settings:
settings[setting_key] = custom_settings[setting_key]
if args.persistent:
settings['persistent_analysis'] = "True"
# if args.verbose:
# settings['verbose_output'] = "True"
# else:
settings['verbose_output'] = "False"
if not args.estimation_method is None:
settings['estimation_method'] = args.estimation_method
if not 'block_length_l' in settings:
settings['block_length_l'] = "None"
# check that required settings are defined
required_parameters = ['embedding_past_range_set', 'embedding_number_of_bins_set',
'embedding_scaling_exponent_set', 'embedding_step_size',
'bbc_tolerance', 'timescale_minimum_past_range',
'number_of_bootstraps_R_max', 'number_of_bootstraps_R_tot',
'number_of_bootstraps_nonessential',
'block_length_l',
'bootstrap_CI_percentile_lo',
'bootstrap_CI_percentile_hi',
# 'number_of_permutations',
'auto_MI_bin_size_set',
'auto_MI_max_delay']
required_settings = ['estimation_method', 'plot_AIS',
'ANALYSIS_DIR', 'persistent_analysis',
'cross_validated_optimization',
'return_averaged_R',
'bootstrap_CI_use_sd',
'verbose_output',
'plot_settings', 'plot_color'] + required_parameters
for required_setting in required_settings:
if not required_setting in settings:
print("Error in settings file: {} is not defined. Aborting.".format(required_setting),
file=stderr, flush=True)
exit(EXIT_FAILURE)
# sanity check for the settings
if not settings['estimation_method'] in defined_estimation_methods:
print("Error: estimation_method must be one of {}. Aborting.".format(defined_estimation_methods),
file=stderr, flush=True)
exit(EXIT_FAILURE)
# evaluate settings (turn strings into booleans etc if applicable)
for setting_key in ['persistent_analysis',
'verbose_output',
'cross_validated_optimization',
'return_averaged_R',
'bootstrap_CI_use_sd',
'plot_AIS']:
settings[setting_key] = ast.literal_eval(settings[setting_key])
for plot_setting in settings['plot_settings']:
try:
settings['plot_settings'][plot_setting] \
= ast.literal_eval(settings['plot_settings'][plot_setting])
except:
continue
for parameter_key in required_parameters:
if isinstance(settings[parameter_key], list):
settings[parameter_key] = [ast.literal_eval(element)
for element in settings[parameter_key]]
elif parameter_key == 'embedding_scaling_exponent_set' \
and isinstance(settings['embedding_scaling_exponent_set'], dict):
# embedding_scaling_exponent_set can be passed either as a
# list, in which case it is evaluated as such or it can be
# passed by specifying three parameters that determine how
# many scaling exponents should be used. In the latter case, the
# uniform embedding as well as the embedding for which
# the first bin has a length of min_first_bin_size (in
# seconds) are used, as well as linearly spaced scaling
# factors in between, such that in total
# number_of_scalings scalings are used
for key in settings['embedding_scaling_exponent_set']:
settings['embedding_scaling_exponent_set'][key] \
= ast.literal_eval(settings['embedding_scaling_exponent_set'][key])
else:
settings[parameter_key] = ast.literal_eval(settings[parameter_key])
# Cython implementation uses 64bit unsigned integers for the symbols,
# we allow up to 62 bins (window has 1 bin more..)
if max(settings['embedding_number_of_bins_set']) > 62:
print("Error: Max number of bins too large; use less than 63. Aborting.",
file=stderr, flush=True)
exit(EXIT_FAILURE)
# If R_tot is computed as an average over Rs, no confidence interval can be estimated
if settings['return_averaged_R']:
settings['number_of_bootstraps_R_tot'] = 0
# if the user specifies a file in which to store output image:
# store this in settings
if not args.output is None:
settings['output_image'] = args.output
# if the user wants to store the data, do so in a dedicated directory below the
# ANALYSIS_DIR passed via settings (here it is also checked whether there is an
# existing analysis, for which the hash sum of the content of the spike times
# file must match).
#
# If the user does not want to store the data, a temporary file is created and
# then deleted after the program finishes
#
# For most tasks an existing analysis file is expected
if settings['persistent_analysis']:
if not isdir(settings['ANALYSIS_DIR']):
print("Error: {} not found. Aborting.".format(settings['ANALYSIS_DIR']),
file=stderr, flush=True)
exit(EXIT_FAILURE)
analysis_dir, analysis_num, existing_analysis_found \
= utl.get_or_create_analysis_dir(spike_times,
spike_times_file_names,
settings['ANALYSIS_DIR'])
settings['ANALYSIS_DIR'] = analysis_dir
else:
analysis_num = "temp"
analysis_file = utl.get_analysis_file(settings['persistent_analysis'],
settings['ANALYSIS_DIR'])
# sanity check for tasks
if not task == "full-analysis" and not settings['persistent_analysis']:
print("Error. Setting 'persistent_analysis' is set to 'False' and task is not 'full-analysis'. This would produce no output. Aborting.", file=stderr, flush=True)
exit(EXIT_FAILURE)
if task in ["confidence-intervals",
# "permutation-test",
"csv-files"]:
if settings['cross_validated_optimization']:
required_dir = "h2_embeddings"
else:
required_dir = "embeddings"
if not required_dir in analysis_file.keys():
print("Error. No existing analysis found. Please run the 'history-dependence' task first. Aborting.", file=stderr, flush=True)
exit(EXIT_FAILURE)
csv_stats_file, csv_histdep_data_file, csv_auto_MI_data_file \
= utl.get_CSV_files(task,
settings['persistent_analysis'],
settings['ANALYSIS_DIR'])
if task == "plots":
for csv_file in [csv_stats_file,
csv_histdep_data_file,
csv_auto_MI_data_file]:
if csv_file == None:
print("Error. CSV files not found and needed to produce plots. Please run the 'csv-files' task first. Aborting.", file=stderr, flush=True)
exit(EXIT_FAILURE)
# label for the output
if not args.label is None:
settings['label'] = args.label
else:
if not 'label' in settings:
settings['label'] = ""
if "," in settings['label']:
new_label = ""
for char in settings['label']:
if not char == ",":
new_label += char
else:
new_label += ";"
settings['label'] = new_label
print("Warning: Invalid label '{}'. It may not contain any commas, as this conflicts with the CSV file format. The commas have been replaced by semicolons.".format(settings['label']),
file=stderr, flush=True)
# for cross-validation
# split up data in two halves
spike_times_optimization = []
spike_times_validation = []
if settings['cross_validated_optimization']:
for spt in spike_times:
spt_half_time = (spt[-1] - spt[0]) / 2
spt_optimization = spt[spt < spt_half_time]
spt_validation = spt[spt >= spt_half_time] \
- spt_half_time
spike_times_optimization += [spt_optimization]
spike_times_validation += [spt_validation]
else:
for spt in spike_times:
spike_times_optimization += [spt]
spike_times_validation += [spt]
spike_times_optimization = np.array(spike_times_optimization)
spike_times_validation = np.array(spike_times_validation)
return task, spike_times, spike_times_optimization, spike_times_validation, \
analysis_file, csv_stats_file, csv_histdep_data_file, csv_auto_MI_data_file, analysis_num, \
settings
def main(arguments):
"""
Parse arguments and settings and then run selected tasks.
"""
# definitions
defined_tasks = ["history-dependence",
"confidence-intervals",
# "permutation-test",
"auto-mi",
"csv-files",
"plots",
"full-analysis"]
defined_estimation_methods = ['bbc', 'shuffling', 'all']
# get task and target (parse arguments and check for validity)
task, spike_times, spike_times_optimization, spike_times_validation, \
analysis_file, csv_stats_file, csv_histdep_data_file, csv_auto_MI_data_file, analysis_num, \
settings = parse_arguments(arguments,
defined_tasks,
defined_estimation_methods)
if settings['estimation_method'] == 'all':
estimation_methods = ['bbc', 'shuffling']
else:
estimation_methods = [settings['estimation_method']]
# now perform tasks as specified by the parsed arguments
for estimation_method in estimation_methods:
settings['estimation_method'] = estimation_method
if task == "history-dependence" or task == "full-analysis":
do_main_analysis(spike_times, spike_times_optimization, spike_times_validation,
analysis_file, settings)
if task == "confidence-intervals" or task == "full-analysis":
compute_CIs(spike_times_validation, analysis_file, settings)
# if task == "permutation-test" or task == "full-analysis":
# perform_permutation_test(analysis_file, settings)
if task == "auto-mi" or task == "full-analysis":
analyse_auto_MI(spike_times, analysis_file, settings)
if task == "csv-files" or task == "full-analysis":
create_CSV_files(analysis_file,
csv_stats_file, csv_histdep_data_file, csv_auto_MI_data_file,
analysis_num, settings)
if task == "plots" or task == "full-analysis":
produce_plots(spike_times,
csv_stats_file, csv_histdep_data_file, csv_auto_MI_data_file,
settings)
for f in [analysis_file,
csv_stats_file,
csv_histdep_data_file,
csv_auto_MI_data_file]:
if not f == None:
f.close()
return EXIT_SUCCESS
if __name__ == "__main__":
if len(argv) == 1:
argv += ["-h"]
exit(main(argv[1:]))