-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathconfig.py
178 lines (146 loc) · 5.43 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
"""
========================
Study configuration file
========================
Configuration parameters and global variable values for the study.
Authors: José C. García Alanis <alanis.jcg@gmail.com>
License: BSD (3-clause)
"""
import os
from os import path as op
import platform
import multiprocessing
import argparse
import numpy as np
from utils import FileNames
from mne.channels import make_standard_montage
###############################################################################
class LoggingFormat:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
###############################################################################
# User parser to handle command line arguments
parser = argparse.ArgumentParser(description='Parse command line argument for '
'pre-processing of EEG data.')
parser.add_argument('subject',
metavar='sub###',
help='The subject to process',
type=int)
# Determine which user is running the scripts on which machine. Set the path to
# where the data is stored and determine how many CPUs to use for analysis.
node = platform.node() # Maschine
system = platform.system() # Os
# You want to add your machine to this list
if 'Jose' in node and 'n' in system:
# iMac at work
data_dir = '../data'
n_jobs = 2 # This station has 4 cores (we'll use 2).
elif 'jose' in node and 'x' in system:
# pc at home
data_dir = '../data'
n_jobs = 'cuda' # Use NVIDIA CUDA GPU processing
elif 'ma04' in node:
data_dir = '../data'
n_jobs = 2
else:
# Defaults
data_dir = '../data'
n_jobs = 1
# For BLAS to use the right amount of cores
use_cores = multiprocessing.cpu_count()//2
if use_cores < 2:
use_cores = 1
os.environ['OMP_NUM_THREADS'] = str(use_cores)
###############################################################################
# Relevant parameters for the analysis.
sampling_rate = 256.0 # Hz
task_name = 'dpxtt'
task_description = 'DPX, effects of time on task'
# eeg channel names and locations
montage = make_standard_montage('standard_1020')
# channels to be exclude from import
exclude = ['EXG5', 'EXG6', 'EXG7', 'EXG8']
# subjects to use for analysis
subjects = np.arange(1, 28)
# relevant events in the paradigm
event_ids = {'correct_target_button': 13,
'correct_non_target_button': 12,
'incorrect_target_button': 113,
'incorrect_non_target_button': 112,
'cue_0': 70,
'cue_1': 71,
'cue_2': 72,
'cue_3': 73,
'cue_4': 74,
'cue_5': 75,
'probe_0': 76,
'probe_1': 77,
'probe_2': 78,
'probe_3': 79,
'probe_4': 80,
'probe_5': 81,
'start_record': 127,
'pause_record': 245}
###############################################################################
# Templates for filenames
#
# This part of the config file uses the FileNames class. It provides a small
# wrapper around string.format() to keep track of a list of filenames.
# See fnames.py for details on how this class works.
fname = FileNames()
# directories to use for input and output
fname.add('data_dir', data_dir)
fname.add('bids_data', '{data_dir}/sub-{subject:03d}')
fname.add('subject_demographics',
'{data_dir}/subject_data/subject_demographics.tsv')
fname.add('sourcedata_dir', '{data_dir}/sourcedata')
fname.add('derivatives_dir', '{data_dir}/derivatives')
fname.add('reports_dir', '{derivatives_dir}/reports')
fname.add('results', '{derivatives_dir}/results')
fname.add('rt', '{results}/rt')
fname.add('figures', '{results}/figures')
fname.add('tables', '{results}/tables')
fname.add('rois', '{results}/rois')
# The paths for data file input
# fname.add('source',
# '{sourcedata_dir}/sub-{subject:02d}/eeg/sub-{subject:02d}_dpx_eeg.bdf') # noqa
# alternative:
def source_file(files, source_type, subject):
if source_type == 'eeg':
return \
op.join(files.sourcedata_dir,
'sub-%02d/%s/sub-%02d_dualreg_eeg.bdf'
% (subject, source_type, subject))
elif source_type == 'demo':
return \
op.join(files.sourcedata_dir,
'sub-%02d/%s/sub-%02d_dualreg_demographics.tsv'
% (subject, source_type, subject))
# create full path for data file input
fname.add('source', source_file)
# The paths that are produced by the analysis steps
def output_path(path, processing_step, subject, file_type):
path = op.join(path.derivatives_dir, processing_step, 'sub-%03d' % subject)
os.makedirs(path, exist_ok=True)
return op.join(path,
'sub-%03d-%s-%s' % (subject, processing_step, file_type))
# The full path for data file output
fname.add('output', output_path)
# The paths that are produced by the report step
def report_path(path, subject):
h5_path = op.join(path.reports_dir, 'sub-%03d.h5' % subject)
html_path = op.join(path.reports_dir, 'sub-%03d-report.html' % subject)
return h5_path, html_path
# The full path for report file output
fname.add('report', report_path)
# Files produced by system check and validator
fname.add('system_check', './system_check.txt')
fname.add('validator', './validator.txt')